blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0f57f25e6830e8a8a6c29f9389d56e4d28244ca4 | 8240abd177ece3a1cf2d753cc5694c1fec478709 | /week1/codeingBat/list-1/11.py | cf2becfbba95f6796832323d11c3203f3efb0f6e | [] | no_license | DastanB/BF-Django | 255001185d8a8318bd19b750fe662a7f86b64d92 | adcd1d968b94ea5097fd3d03338f031d5497d463 | refs/heads/master | 2022-10-27T06:44:56.648527 | 2018-11-24T18:33:35 | 2018-11-24T18:33:35 | 147,125,321 | 1 | 1 | null | 2022-10-19T08:22:54 | 2018-09-02T22:07:22 | Python | UTF-8 | Python | false | false | 52 | py | def make_ends(nums):
return [nums[0], nums[-1]]
| [
"[email protected]"
] | |
8870bd24e5ffe301721b460d9d17fd2f21eb6d2a | 904e75e2ceff81c18a432fe1b951b683e859cbed | /views/home.py | 00e8a59f5996bfd50c7e509807307c71b893ddd0 | [] | no_license | PUYUP/plutoborn | a42c65fa360de41a1236af00b5718948dc1b9940 | e6b47b7f183fcff60fa803329e11c2e87de560ef | refs/heads/master | 2022-12-05T17:06:10.049472 | 2020-08-19T09:12:45 | 2020-08-19T09:12:45 | 254,116,939 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,622 | py | import datetime
from django.conf import settings
from django.views import View
from django.shortcuts import render, redirect
from django.urls import reverse
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from django.db.models import (
Count, Prefetch, Case, When, Value, BooleanField, IntegerField,
F, Q, Subquery, OuterRef, CharField, Sum, FloatField, Max, DateTimeField)
from django.contrib.auth.mixins import LoginRequiredMixin
from django.utils import timezone
from utils.generals import get_model
from apps.market.utils.constant import PUBLISHED, NATIONAL
from apps.tryout.utils.constant import PREFERENCE, TRUE_FALSE_NONE
Simulation = get_model('tryout', 'Simulation')
Bundle = get_model('market', 'Bundle')
CMSBanner = get_model('cms', 'CMSBanner')
CMSVideo = get_model('cms', 'CMSVideo')
class HomeView(LoginRequiredMixin, View):
login_url = '/login/'
redirect_field_name = 'redirect_to'
template_name = 'home.html'
context = dict()
def latest_simulation(self):
user = self.request.user
try:
latest_simulation = Simulation.objects.filter(user_id=user.id).latest('date_created')
except ObjectDoesNotExist:
latest_simulation = None
if not latest_simulation:
return None
packet = latest_simulation.packet
chance = latest_simulation.chance
theories = packet.questions.filter(theory__isnull=False) \
.values('theory', 'theory__pk', 'theory__label', 'theory__true_score',
'theory__false_score', 'theory__none_score', 'theory__scoring_type') \
.distinct()
theories_params = dict()
theories_total_score = list()
for item in theories:
theory_id = item['theory__pk']
scoring_type = item['theory__scoring_type']
at = 'theory_{}_true_count'.format(theory_id)
af = 'theory_{}_false_count'.format(theory_id)
an = 'theory_{}_none_count'.format(theory_id)
at_s = 'theory_{}_true_score'.format(theory_id)
af_s = 'theory_{}_false_score'.format(theory_id)
an_s = 'theory_{}_none_score'.format(theory_id)
ts = 'theory_{}_total_score'.format(theory_id)
tn = 'theory_{}_verbose_name'.format(theory_id)
pt_s = 'theory_{}_preference_score_total'.format(theory_id)
# verbose name
theories_params[tn] = Value(item['theory__label'], output_field=CharField())
# score by preference
theories_params[pt_s] = Sum(
Case(
When(
Q(answers__question__theory__id=theory_id)
& Q(answers__question__theory__scoring_type=PREFERENCE)
& Q(answers__choice__isnull=False),
then=F('answers__choice__score')
),
output_field=IntegerField(),
default=Value(0)
)
)
# count right choice
theories_params[at] = Sum(
Case(
When(
Q(answers__question__theory__id=theory_id)
& Q(answers__choice__isnull=False)
& Q(answers__choice__right_choice=True),
then=Value(1)
),
output_field=IntegerField(),
default=Value(0)
)
)
theories_params[at_s] = F(at) * item['theory__true_score']
# count false choice
theories_params[af] = Sum(
Case(
When(
Q(answers__question__theory__id=theory_id)
& Q(answers__choice__isnull=False)
& Q(answers__choice__right_choice=False),
then=Value(1)
),
output_field=IntegerField(),
default=Value(0)
)
)
theories_params[af_s] = F(af) * item['theory__false_score']
# count none choice
theories_params[an] = Sum(
Case(
When(
Q(answers__question__theory__id=theory_id)
& Q(answers__choice__isnull=True),
then=Value(1)
),
output_field=IntegerField(),
default=Value(0)
)
)
theories_params[an_s] = F(an) * item['theory__none_score']
# sum all theory score
if scoring_type == TRUE_FALSE_NONE:
# sum all theory score
theories_params[ts] = (F(at) * item['theory__true_score']) \
+ (F(an) * item['theory__none_score']) \
- (F(af) * item['theory__false_score'])
elif scoring_type == PREFERENCE:
theories_params[ts] = F(pt_s)
# prepare total score
theories_total_score.append(F(ts))
simulations = Simulation.objects.filter(packet_id=packet.id, chance=chance) \
.annotate(
**theories_params,
total_score=sum(theories_total_score),
current_score=Case(
When(user_id=user.id, then=True),
default=False,
output_field=BooleanField()
),
).order_by('-total_score')
theory_ids = [item['theory__pk'] for item in theories]
simulation = simulations.filter(user_id=user.id).get()
tgs = list()
for tid in theory_ids:
st = 'theory_{}_scoring_type'.format(tid)
tn = 'theory_{}_verbose_name'.format(tid)
ts = 'theory_{}_total_score'.format(tid)
at_s = 'theory_{}_true_score'.format(tid)
af_s = 'theory_{}_false_score'.format(tid)
an_s = 'theory_{}_none_score'.format(tid)
pt_s = 'theory_{}_preference_score_total'.format(tid)
label = getattr(simulation, tn, None)
true_score = getattr(simulation, at_s, 0)
false_score = getattr(simulation, af_s, 0)
none_score = getattr(simulation, an_s, 0)
total_score = getattr(simulation, ts, 0)
preference_total_score = getattr(item, pt_s, 0)
scoring_type = getattr(item, st, None)
tg = {
'label': label,
'true_score': true_score,
'false_score': false_score,
'none_score': none_score,
'total_score': total_score,
'preference_total_score': preference_total_score,
'scoring_type': scoring_type,
}
tgs.append(tg)
simulation.theory_groups = tgs
rank = simulations.filter(total_score__gt=simulation.total_score).count() + 1
return {'simulation': simulation, 'rank': rank}
def get(self, request):
user = request.user
account = user.account
# redirect to admin dashboard
if user.is_staff:
return redirect(reverse('dashboard'))
if user.profile.is_empty:
return redirect(reverse('profile'))
simulation_stat = self.latest_simulation()
settings.TIME_ZONE # 'UTC'
aware_datetime = timezone.make_aware(timezone.datetime.today())
aware_datetime.tzinfo # <UTC>
simulation_due = Bundle.objects.filter(start_date__gt=aware_datetime).first()
bundles = Bundle.objects \
.annotate(total_packet=Count('packet', distinct=True)) \
.filter(status=PUBLISHED) \
.exclude(boughts__user_id=user.id)[:4]
packets = user.acquireds \
.prefetch_related(Prefetch('packet'), Prefetch('user')) \
.select_related('packet', 'user') \
.annotate(
question_total=Count('packet__questions', distinct=True),
theory_total=Count('packet__questions__theory', distinct=True),
x_start_date=Case(
When(packet__bundle__start_date__isnull=False, then=F('packet__bundle__start_date')),
default=F('packet__start_date'),
ouput_field=DateTimeField()
),
x_end_date=Case(
When(packet__bundle__end_date__isnull=False, then=F('packet__bundle__end_date')),
default=F('packet__end_date'),
ouput_field=DateTimeField()
),
x_simulation_type=Case(
When(packet__bundle__simulation_type=NATIONAL, then=Value('Nasional')),
default=Value('Umum'),
output_field=CharField()
)
)[:4]
# CMS
banners = CMSBanner.objects.filter(is_active=True).order_by('sort')
videos = CMSVideo.objects.filter(is_active=True).order_by('sort')
self.context['simulation_due'] = simulation_due
self.context['simulation_stat'] = simulation_stat
self.context['my_coins'] = account.coin_amounts
self.context['my_points'] = account.points_amounts
self.context['bundles'] = bundles
self.context['packets'] = packets
# CMS
self.context['banners'] = banners
self.context['videos'] = videos
return render(request, self.template_name, self.context)
| [
"[email protected]"
] | |
87952ccd9a2eb59e81b8c92ef355b23f757f7304 | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part004399.py | ecb633d07fd632fb4d5e0042ffa3c812f780ceff | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,364 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher141684(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({0: 1}), [
(VariableWithCount('i2.2.1.0', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher141684._instance is None:
CommutativeMatcher141684._instance = CommutativeMatcher141684()
return CommutativeMatcher141684._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 141683
if len(subjects) >= 1 and isinstance(subjects[0], Pow):
tmp1 = subjects.popleft()
subjects2 = deque(tmp1._args)
# State 141685
if len(subjects2) >= 1:
tmp3 = subjects2.popleft()
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.1', tmp3)
except ValueError:
pass
else:
pass
# State 141686
if len(subjects2) >= 1 and subjects2[0] == Integer(2):
tmp5 = subjects2.popleft()
# State 141687
if len(subjects2) == 0:
pass
# State 141688
if len(subjects) == 0:
pass
# 0: x**2
yield 0, subst1
subjects2.appendleft(tmp5)
subjects2.appendleft(tmp3)
subjects.appendleft(tmp1)
return
yield
from collections import deque | [
"[email protected]"
] | |
41704a03c9c525e5742496757d48362c163126ef | 26ae248d7f1ca16c51c4f34c1f67ef19be162a4e | /targAssign.py | 5ac00997505f5b61d411830f332333dfd23ee9a2 | [] | no_license | csayres/astro598 | 0d87373904da8419b90665fb84d747cf49830ef6 | 676b7ae9ae08fbeca48ded0c6f980892e907972f | refs/heads/master | 2020-11-24T05:42:13.775053 | 2019-12-14T09:22:10 | 2019-12-14T09:22:10 | 227,990,176 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,555 | py | import time
from multiprocessing import Pool, cpu_count
import pickle
import numpy
import matplotlib.pyplot as plt
from keras.models import Sequential, load_model
from keras.layers import Dense
from kaiju import RobotGrid, utils
nHexDia = 7
xCoords, yCoords = utils.hexFromDia(nHexDia)
nPositioners = len(xCoords)
print("using %i positioners"%nPositioners)
nTargets = nPositioners
nProcs = 10 # processors to use for multiprocessing
batchSize = 100 # The number of samples to run through the network before the weights / gradient are updated
epochs = 5 # The number of times to iterate through the complete sample of training data
trainingRatio = 0.9
def getValidAssignments(seed):
"""seed is the random seed with which to initialze the RobotGrid
return dictionary keyed by positioner id with the coordinates of the
metrology fiber. These represent valid (non-collided) xy Fiber positions
for each robot
"""
rg = RobotGrid(seed=seed)
for ii, (xp, yp) in enumerate(zip(xCoords, yCoords)):
rg.addRobot(robotID=ii, xPos=xp, yPos=yp)
rg.initGrid()
# give all robots an initial (radom) target configuration
for robot in rg.robotDict.values():
# assigns a robot a target picked uniformly in xy
# from its patrol annulus
robot.setXYUniform()
# decollide any colliding robots so that we have a completely
# non-colliding target configuration
rg.decollideGrid()
targetPos = {}
for robot in rg.robotDict.values():
targetPos[robot.id] = robot.metFiberPos[:-1] # xy coord, drop the z
return targetPos
def generateAssignments(nSeeds):
p = Pool(nProcs)
tstart = time.time()
validAssignments = p.map(getValidAssignments, range(nSeeds))
tend = time.time() - tstart
print("took %.2f seconds"%tend)
p.close()
pickle.dump(validAssignments, open("validAssign_%i.p"%nSeeds, "wb"))
def target2NN(targetDict, shuffle=True):
y = numpy.zeros((nPositioners, nTargets)) #n x 2, robot x target
# shuffle targets
x = []
shuffledInds = numpy.arange(nPositioners)
if shuffle:
numpy.random.shuffle(shuffledInds)
for targetInd, robotInd in enumerate(shuffledInds):
target = targetDict[robotInd]
x.append(target[0]) # xvalue
x.append(target[1]) # yvalue
# x is flattened!
y[robotInd, targetInd] = 1
x = numpy.array(x)
# rows and columns of y sum to 1, total sums to nPositioners
# print("sum of y", numpy.sum(y, axis=0), numpy.sum(y, axis=1), numpy.sum(y))
y = y.flatten() # consider normalizing by something? sum of the array will be 547
return x, y
def form2NN(assignFile):
"""Take valid assignments from assignFile
Format for use with the NN. Shuffle input targets
"""
numpy.random.seed(547)
with open(assignFile, "rb") as f:
validAssignments = pickle.load(f)
# generate a big array
# use reshape (nTargs, 2) to get original array
X = [] # input n x [x1, y1, x2, y2, ... xn, yn]
Y = [] # output
for targetDict in validAssignments:
x, y = target2NN(targetDict)
X.append(x)
Y.append(y)
X = numpy.array(X)
Y = numpy.array(Y)
return X, Y
def runNN(X, Y):
"""X is input array xy coords
Y is output array, flattened nRobots x nTargets array indexing the answers
"""
# truncate?
# X = X[:10000,:]
# Y = Y[:10000,:]
# normalize
nTrials = X.shape[0]
nInputs = X.shape[1]
nHidden = int(nInputs*1.5)
nOutputs = Y.shape[1]
model = Sequential()
model.add(
Dense(nHidden,
activation="relu",
input_dim = nInputs,
))
model.add(
Dense(nOutputs, activation="softmax"))
model.summary()
model.compile(loss='categorical_crossentropy', # See: https://keras.io/losses/
optimizer='rmsprop', # See: https://keras.io/optimizers/
metrics=['accuracy']
)
# split the data into training and testing, 75% goes towards training
split = int(numpy.floor(nTrials*trainingRatio))
X_train = X[:split, :]
Y_train = Y[:split, :]
X_test = X[split:, :]
Y_test = Y[split:, :]
history = model.fit(X_train, Y_train,
batch_size=batchSize, epochs=epochs,
verbose=1, validation_data=(X_test, Y_test))
model.save("targAssign.h5")
def compareModeled():
model = load_model('targAssign.h5')
newSeed = 2000000 # never used
ii = 0
for seed in range(newSeed, newSeed+10):
targDict = getValidAssignments(seed)
x, yTrue = target2NN(targDict, shuffle=False)
# import pdb; pdb.set_trace()
print("xhape", x.shape)
yFit = model.predict(numpy.array([x]), verbose=1)
yFit = yFit.reshape(nPositioners, nTargets)
yTrue = yTrue.reshape(nPositioners, nTargets)
plt.figure()
plt.imshow(yFit/numpy.sum(yFit))
plt.title("NN Model Fit %i"%ii)
plt.ylabel("Positioner Index")
plt.xlabel("Target Index")
plt.savefig("model_%i.png"%ii)
plt.close()
ii += 1
plt.figure()
plt.imshow(yTrue/numpy.sum(yTrue))
plt.title("True Assignment")
plt.ylabel("Positioner Index")
plt.xlabel("Target Index")
plt.savefig("true.png")
plt.close()
if __name__ == "__main__":
nSeeds = 1000000
generateAssignments(nSeeds)
X, Y = form2NN("validAssign_%i.p"%nSeeds)
runNN(X, Y)
compareModeled()
| [
"[email protected]"
] | |
2b5b147fd0ab404cd378d2b57cda206bc53c6ab4 | 1c6276c90ab97004e2435a539d011c5b9d08d134 | /electrum/simple_config.py | 83eec9befca8755f965b89e3090798660e730bf0 | [
"MIT"
] | permissive | c4pt000/electrum-ravencoin-lite | 020dedec4891293e9439f4692c0e5855466c249d | fe0f139a3708261467beb855700f56b5fbc7c2e9 | refs/heads/main | 2023-08-24T00:52:51.983733 | 2021-10-20T13:22:15 | 2021-10-20T13:22:15 | 406,919,123 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 24,218 | py | import json
import threading
import time
import os
import stat
import ssl
from decimal import Decimal
from typing import Union, Optional
from numbers import Real
from copy import deepcopy
from aiorpcx import NetAddress
from . import util
from . import constants
from .util import base_units, base_unit_name_to_decimal_point, decimal_point_to_base_unit_name, UnknownBaseUnit, DECIMAL_POINT_DEFAULT
from .util import format_satoshis, format_fee_satoshis
from .util import user_dir, make_dir, NoDynamicFeeEstimates, quantize_feerate
from .i18n import _
from .logging import get_logger, Logger
FEE_ETA_TARGETS = [
520900 * 1000,
520900 * 1000,
520900 * 1000,
520900 * 1000,
]
FEE_DEPTH_TARGETS = [
520900 * 1000,
520900 * 1000,
520900 * 1000,
520900 * 1000,
520900 * 1000,
520900 * 1000,
520900 * 1000,
520900 * 1000,
]
#10, 5000000, 2000000, 10, 500000, 200000, 10]
FEE_LN_ETA_TARGET = 520900 * 1000
#2 # note: make sure the network is asking for estimates for this target
# satoshi per kbyte
FEERATE_MAX_DYNAMIC = 520900 * 1000
FEERATE_WARNING_HIGH_FEE = 520900 * 1000
FEERATE_FALLBACK_STATIC_FEE = 520900 * 1000
FEERATE_DEFAULT_RELAY = 520900 * 1000
FEERATE_MAX_RELAY = 520900 * 1000
FEERATE_STATIC_VALUES = [
520900 * 1000,
520900 * 1000,
520900 * 1000,
520900 * 1000,
520900 * 1000,
520900 * 1000,
520900 * 1000,
520900 * 1000,
520900 * 1000,
520900 * 1000,
520900 * 1000,
]
#1000, 2000, 5000, 10000, 20000, 30000,
# 50000, 70000, 10, 150000, 200000, 300000]
# Regtest feerate doesn't need Namecoin's 100x adjustment.
FEERATE_REGTEST_HARDCODED = 180000 # for eclair compat
FEE_RATIO_HIGH_WARNING = 0.05 # warn user if fee/amount for on-chain tx is higher than this
_logger = get_logger(__name__)
FINAL_CONFIG_VERSION = 3
class SimpleConfig(Logger):
"""
The SimpleConfig class is responsible for handling operations involving
configuration files.
There are two different sources of possible configuration values:
1. Command line options.
2. User configuration (in the user's config directory)
They are taken in order (1. overrides config options set in 2.)
"""
def __init__(self, options=None, read_user_config_function=None,
read_user_dir_function=None):
if options is None:
options = {}
Logger.__init__(self)
# This lock needs to be acquired for updating and reading the config in
# a thread-safe way.
self.lock = threading.RLock()
self.mempool_fees = {}
self.fee_estimates = {}
self.fee_estimates_last_updated = {}
self.last_time_fee_estimates_requested = 0 # zero ensures immediate fees
# The following two functions are there for dependency injection when
# testing.
if read_user_config_function is None:
read_user_config_function = read_user_config
if read_user_dir_function is None:
self.user_dir = user_dir
else:
self.user_dir = read_user_dir_function
# The command line options
self.cmdline_options = deepcopy(options)
# don't allow to be set on CLI:
self.cmdline_options.pop('config_version', None)
# Set self.path and read the user config
self.user_config = {} # for self.get in electrum_path()
self.path = self.electrum_path()
self.user_config = read_user_config_function(self.path)
if not self.user_config:
# avoid new config getting upgraded
self.user_config = {'config_version': FINAL_CONFIG_VERSION}
self._not_modifiable_keys = set()
# config "upgrade" - CLI options
self.rename_config_keys(
self.cmdline_options, {'auto_cycle': 'auto_connect'}, True)
# config upgrade - user config
if self.requires_upgrade():
self.upgrade()
self._check_dependent_keys()
# units and formatting
self.decimal_point = self.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
decimal_point_to_base_unit_name(self.decimal_point)
except UnknownBaseUnit:
self.decimal_point = DECIMAL_POINT_DEFAULT
self.num_zeros = int(self.get('num_zeros', 0))
def electrum_path(self):
# Read electrum_path from command line
# Otherwise use the user's default data directory.
path = self.get('electrum_path')
if path is None:
path = self.user_dir()
make_dir(path, allow_symlink=False)
if self.get('testnet'):
path = os.path.join(path, 'testnet')
make_dir(path, allow_symlink=False)
elif self.get('regtest'):
path = os.path.join(path, 'regtest')
make_dir(path, allow_symlink=False)
elif self.get('simnet'):
path = os.path.join(path, 'simnet')
make_dir(path, allow_symlink=False)
self.logger.info(f"electrum-nmc directory {path}")
return path
def rename_config_keys(self, config, keypairs, deprecation_warning=False):
"""Migrate old key names to new ones"""
updated = False
for old_key, new_key in keypairs.items():
if old_key in config:
if new_key not in config:
config[new_key] = config[old_key]
if deprecation_warning:
self.logger.warning('Note that the {} variable has been deprecated. '
'You should use {} instead.'.format(old_key, new_key))
del config[old_key]
updated = True
return updated
def set_key(self, key, value, save=True):
if not self.is_modifiable(key):
self.logger.warning(f"not changing config key '{key}' set on the command line")
return
try:
json.dumps(key)
json.dumps(value)
except:
self.logger.info(f"json error: cannot save {repr(key)} ({repr(value)})")
return
self._set_key_in_user_config(key, value, save)
def _set_key_in_user_config(self, key, value, save=True):
with self.lock:
if value is not None:
self.user_config[key] = value
else:
self.user_config.pop(key, None)
if save:
self.save_user_config()
def get(self, key, default=None):
with self.lock:
out = self.cmdline_options.get(key)
if out is None:
out = self.user_config.get(key, default)
return out
def _check_dependent_keys(self) -> None:
if self.get('serverfingerprint'):
if not self.get('server'):
raise Exception("config key 'serverfingerprint' requires 'server' to also be set")
self.make_key_not_modifiable('server')
def requires_upgrade(self):
return self.get_config_version() < FINAL_CONFIG_VERSION
def upgrade(self):
with self.lock:
self.logger.info('upgrading config')
self.convert_version_2()
self.convert_version_3()
self.set_key('config_version', FINAL_CONFIG_VERSION, save=True)
def convert_version_2(self):
if not self._is_upgrade_method_needed(1, 1):
return
self.rename_config_keys(self.user_config, {'auto_cycle': 'auto_connect'})
try:
# change server string FROM host:port:proto TO host:port:s
server_str = self.user_config.get('server')
host, port, protocol = str(server_str).rsplit(':', 2)
assert protocol in ('s', 't')
int(port) # Throw if cannot be converted to int
server_str = '{}:{}:s'.format(host, port)
self._set_key_in_user_config('server', server_str)
except BaseException:
self._set_key_in_user_config('server', None)
self.set_key('config_version', 2)
def convert_version_3(self):
if not self._is_upgrade_method_needed(2, 2):
return
base_unit = self.user_config.get('base_unit')
if isinstance(base_unit, str):
self._set_key_in_user_config('base_unit', None)
map_ = {'nmc':8, 'mnmc':5, 'unmc':2, 'bits':2, 'noise':0}
decimal_point = map_.get(base_unit.lower())
self._set_key_in_user_config('decimal_point', decimal_point)
self.set_key('config_version', 3)
def _is_upgrade_method_needed(self, min_version, max_version):
cur_version = self.get_config_version()
if cur_version > max_version:
return False
elif cur_version < min_version:
raise Exception(
('config upgrade: unexpected version %d (should be %d-%d)'
% (cur_version, min_version, max_version)))
else:
return True
def get_config_version(self):
config_version = self.get('config_version', 1)
if config_version > FINAL_CONFIG_VERSION:
self.logger.warning('config version ({}) is higher than latest ({})'
.format(config_version, FINAL_CONFIG_VERSION))
return config_version
def is_modifiable(self, key) -> bool:
return (key not in self.cmdline_options
and key not in self._not_modifiable_keys)
def make_key_not_modifiable(self, key) -> None:
self._not_modifiable_keys.add(key)
def save_user_config(self):
if self.get('forget_config'):
return
if not self.path:
return
path = os.path.join(self.path, "config")
s = json.dumps(self.user_config, indent=4, sort_keys=True)
try:
with open(path, "w", encoding='utf-8') as f:
f.write(s)
os.chmod(path, stat.S_IREAD | stat.S_IWRITE)
except FileNotFoundError:
# datadir probably deleted while running...
if os.path.exists(self.path): # or maybe not?
raise
def get_wallet_path(self, *, use_gui_last_wallet=False):
"""Set the path of the wallet."""
# command line -w option
if self.get('wallet_path'):
return os.path.join(self.get('cwd', ''), self.get('wallet_path'))
if use_gui_last_wallet:
path = self.get('gui_last_wallet')
if path and os.path.exists(path):
return path
# default path
util.assert_datadir_available(self.path)
dirpath = os.path.join(self.path, "wallets")
make_dir(dirpath, allow_symlink=False)
new_path = os.path.join(self.path, "wallets", "default_wallet")
# default path in pre 1.9 versions
old_path = os.path.join(self.path, "electrum-nmc.dat")
if os.path.exists(old_path) and not os.path.exists(new_path):
os.rename(old_path, new_path)
return new_path
def remove_from_recently_open(self, filename):
recent = self.get('recently_open', [])
if filename in recent:
recent.remove(filename)
self.set_key('recently_open', recent)
def set_session_timeout(self, seconds):
self.logger.info(f"session timeout -> {seconds} seconds")
self.set_key('session_timeout', seconds)
def get_session_timeout(self):
return self.get('session_timeout', 300)
def save_last_wallet(self, wallet):
if self.get('wallet_path') is None:
path = wallet.storage.path
self.set_key('gui_last_wallet', path)
def impose_hard_limits_on_fee(func):
def get_fee_within_limits(self, *args, **kwargs):
fee = func(self, *args, **kwargs)
if fee is None:
return fee
fee = min(FEERATE_MAX_DYNAMIC, fee)
fee = max(FEERATE_DEFAULT_RELAY, fee)
return fee
return get_fee_within_limits
def eta_to_fee(self, slider_pos) -> Optional[int]:
"""Returns fee in sat/kbyte."""
slider_pos = max(slider_pos, 0)
slider_pos = min(slider_pos, len(FEE_ETA_TARGETS))
if slider_pos < len(FEE_ETA_TARGETS):
num_blocks = FEE_ETA_TARGETS[slider_pos]
fee = self.eta_target_to_fee(num_blocks)
else:
fee = self.eta_target_to_fee(1)
return fee
@impose_hard_limits_on_fee
def eta_target_to_fee(self, num_blocks: int) -> Optional[int]:
"""Returns fee in sat/kbyte."""
if num_blocks == 1:
fee = self.fee_estimates.get(2)
if fee is not None:
fee += fee / 2
fee = int(fee)
else:
fee = self.fee_estimates.get(num_blocks)
return fee
def fee_to_depth(self, target_fee: Real) -> int:
"""For a given sat/vbyte fee, returns an estimate of how deep
it would be in the current mempool in vbytes.
Pessimistic == overestimates the depth.
"""
depth = 0
for fee, s in self.mempool_fees:
depth += s
if fee <= target_fee:
break
return depth
def depth_to_fee(self, slider_pos) -> int:
"""Returns fee in sat/kbyte."""
target = self.depth_target(slider_pos)
return self.depth_target_to_fee(target)
@impose_hard_limits_on_fee
def depth_target_to_fee(self, target: int) -> int:
"""Returns fee in sat/kbyte.
target: desired mempool depth in vbytes
"""
depth = 0
for fee, s in self.mempool_fees:
depth += s
if depth > target:
break
else:
return 0
# add one sat/byte as currently that is
# the max precision of the histogram
fee += 1
# convert to sat/kbyte
# return fee * 1000 000 000
return fee * 1000000000
def depth_target(self, slider_pos):
slider_pos = max(slider_pos, 0)
slider_pos = min(slider_pos, len(FEE_DEPTH_TARGETS)-1)
return FEE_DEPTH_TARGETS[slider_pos]
def eta_target(self, i):
if i == len(FEE_ETA_TARGETS):
return 1
return FEE_ETA_TARGETS[i]
def fee_to_eta(self, fee_per_kb):
import operator
l = list(self.fee_estimates.items()) + [(1, self.eta_to_fee(4))]
dist = map(lambda x: (x[0], abs(x[1] - fee_per_kb)), l)
min_target, min_value = min(dist, key=operator.itemgetter(1))
if fee_per_kb < self.fee_estimates.get(25)/2:
min_target = -1
return min_target
def depth_tooltip(self, depth):
return "%.1f MB from tip"%(depth/100)
def eta_tooltip(self, x):
if x < 0:
return _('Low fee')
elif x == 1:
return _('In the next block')
else:
return _('Within {} blocks').format(x)
def get_fee_status(self):
dyn = self.is_dynfee()
mempool = self.use_mempool_fees()
pos = self.get_depth_level() if mempool else self.get_fee_level()
fee_rate = self.fee_per_kb()
target, tooltip = self.get_fee_text(pos, dyn, mempool, fee_rate)
return tooltip + ' [%s]'%target if dyn else target + ' [Static]'
def get_fee_text(self, pos, dyn, mempool, fee_rate):
"""Returns (text, tooltip) where
text is what we target: static fee / num blocks to confirm in / mempool depth
tooltip is the corresponding estimate (e.g. num blocks for a static fee)
fee_rate is in sat/kbyte
"""
if fee_rate is None:
rate_str = 'unknown'
else:
# fee_rate = fee_rate/1000
fee_rate = 1.000
rate_str = format_fee_satoshis(fee_rate) + ' noise/byte'
if dyn:
if mempool:
depth = self.depth_target(pos)
text = self.depth_tooltip(depth)
else:
eta = self.eta_target(pos)
text = self.eta_tooltip(eta)
tooltip = rate_str
else:
text = rate_str
if mempool and self.has_fee_mempool():
depth = self.fee_to_depth(fee_rate)
tooltip = self.depth_tooltip(depth)
elif not mempool and self.has_fee_etas():
eta = self.fee_to_eta(fee_rate)
tooltip = self.eta_tooltip(eta)
else:
tooltip = ''
return text, tooltip
def get_depth_level(self):
maxp = len(FEE_DEPTH_TARGETS) - 1
return min(maxp, self.get('depth_level', 2))
def get_fee_level(self):
maxp = len(FEE_ETA_TARGETS) # not (-1) to have "next block"
return min(maxp, self.get('fee_level', 2))
def get_fee_slider(self, dyn, mempool):
if dyn:
if mempool:
pos = self.get_depth_level()
maxp = len(FEE_DEPTH_TARGETS) - 1
fee_rate = self.depth_to_fee(pos)
else:
pos = self.get_fee_level()
maxp = len(FEE_ETA_TARGETS) # not (-1) to have "next block"
fee_rate = self.eta_to_fee(pos)
else:
fee_rate = self.fee_per_kb(dyn=False)
pos = self.static_fee_index(fee_rate)
maxp = len(FEERATE_STATIC_VALUES) - 1
return maxp, pos, fee_rate
def static_fee(self, i):
return FEERATE_STATIC_VALUES[i]
def static_fee_index(self, value):
if value is None:
raise TypeError('static fee cannot be None')
dist = list(map(lambda x: abs(x - value), FEERATE_STATIC_VALUES))
return min(range(len(dist)), key=dist.__getitem__)
def has_fee_etas(self):
return len(self.fee_estimates) == 4
def has_fee_mempool(self):
return bool(self.mempool_fees)
def has_dynamic_fees_ready(self):
if self.use_mempool_fees():
return self.has_fee_mempool()
else:
return self.has_fee_etas()
def is_dynfee(self):
return bool(self.get('dynamic_fees', True))
def use_mempool_fees(self):
return bool(self.get('mempool_fees', False))
def _feerate_from_fractional_slider_position(self, fee_level: float, dyn: bool,
mempool: bool) -> Union[int, None]:
fee_level = max(fee_level, 0)
fee_level = min(fee_level, 1)
if dyn:
max_pos = (len(FEE_DEPTH_TARGETS) - 1) if mempool else len(FEE_ETA_TARGETS)
slider_pos = round(fee_level * max_pos)
fee_rate = self.depth_to_fee(slider_pos) if mempool else self.eta_to_fee(slider_pos)
else:
max_pos = len(FEERATE_STATIC_VALUES) - 1
slider_pos = round(fee_level * max_pos)
fee_rate = FEERATE_STATIC_VALUES[slider_pos]
return fee_rate
def fee_per_kb(self, dyn: bool=None, mempool: bool=None, fee_level: float=None) -> Union[int, None]:
"""Returns sat/kvB fee to pay for a txn.
Note: might return None.
fee_level: float between 0.0 and 1.0, representing fee slider position
"""
if constants.net is constants.BitcoinRegtest:
return FEERATE_REGTEST_HARDCODED
if dyn is None:
dyn = self.is_dynfee()
if mempool is None:
mempool = self.use_mempool_fees()
if fee_level is not None:
return self._feerate_from_fractional_slider_position(fee_level, dyn, mempool)
# there is no fee_level specified; will use config.
# note: 'depth_level' and 'fee_level' in config are integer slider positions,
# unlike fee_level here, which (when given) is a float in [0.0, 1.0]
if dyn:
if mempool:
fee_rate = self.depth_to_fee(self.get_depth_level())
else:
fee_rate = self.eta_to_fee(self.get_fee_level())
else:
fee_rate = self.get('fee_per_kb', FEERATE_FALLBACK_STATIC_FEE)
return fee_rate
def fee_per_byte(self):
"""Returns sat/vB fee to pay for a txn.
Note: might return None.
"""
fee_per_kb = self.fee_per_kb()
return fee_per_kb / 1000 if fee_per_kb is not None else None
def estimate_fee(self, size: Union[int, float, Decimal], *,
allow_fallback_to_static_rates: bool = False) -> int:
fee_per_kb = self.fee_per_kb()
if fee_per_kb is None:
if allow_fallback_to_static_rates:
fee_per_kb = FEERATE_FALLBACK_STATIC_FEE
else:
raise NoDynamicFeeEstimates()
return self.estimate_fee_for_feerate(fee_per_kb, size)
@classmethod
def estimate_fee_for_feerate(cls, fee_per_kb: Union[int, float, Decimal],
size: Union[int, float, Decimal]) -> int:
size = Decimal(size)
fee_per_kb = Decimal(fee_per_kb)
fee_per_byte = fee_per_kb / 1000
# to be consistent with what is displayed in the GUI,
# the calculation needs to use the same precision:
fee_per_byte = quantize_feerate(fee_per_byte)
return round(fee_per_byte * size)
def update_fee_estimates(self, key, value):
self.fee_estimates[key] = value
self.fee_estimates_last_updated[key] = time.time()
def is_fee_estimates_update_required(self):
"""Checks time since last requested and updated fee estimates.
Returns True if an update should be requested.
"""
now = time.time()
return now - self.last_time_fee_estimates_requested > 60
def requested_fee_estimates(self):
self.last_time_fee_estimates_requested = time.time()
def get_video_device(self):
device = self.get("video_device", "default")
if device == 'default':
device = ''
return device
def get_ssl_context(self):
ssl_keyfile = self.get('ssl_keyfile')
ssl_certfile = self.get('ssl_certfile')
if ssl_keyfile and ssl_certfile:
ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_context.load_cert_chain(ssl_certfile, ssl_keyfile)
return ssl_context
def get_ssl_domain(self):
from .paymentrequest import check_ssl_config
if self.get('ssl_keyfile') and self.get('ssl_certfile'):
SSL_identity = check_ssl_config(self)
else:
SSL_identity = None
return SSL_identity
def get_netaddress(self, key: str) -> Optional[NetAddress]:
text = self.get(key)
if text:
try:
host, port = text.split(':')
return NetAddress(host, port)
except:
pass
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(
x,
num_zeros=self.num_zeros,
decimal_point=self.decimal_point,
is_diff=is_diff,
whitespaces=whitespaces,
)
def format_amount_and_units(self, amount):
return self.format_amount(amount) + ' '+ self.get_base_unit()
def format_fee_rate(self, fee_rate):
return format_fee_satoshis(fee_rate/1000, num_zeros=self.num_zeros) + ' noise/byte'
def get_base_unit(self):
return decimal_point_to_base_unit_name(self.decimal_point)
def set_base_unit(self, unit):
assert unit in base_units.keys()
self.decimal_point = base_unit_name_to_decimal_point(unit)
self.set_key('decimal_point', self.decimal_point, True)
def get_decimal_point(self):
return self.decimal_point
def read_user_config(path):
"""Parse and store the user config settings in electrum-nmc.conf into user_config[]."""
if not path:
return {}
config_path = os.path.join(path, "config")
if not os.path.exists(config_path):
return {}
try:
with open(config_path, "r", encoding='utf-8') as f:
data = f.read()
result = json.loads(data)
except:
_logger.warning(f"Cannot read config file. {config_path}")
return {}
if not type(result) is dict:
return {}
return result
| [
"[email protected]"
] | |
63da4abf9140ef6028f7be93dad6d9462a3652ae | 25ebc03b92df764ff0a6c70c14c2848a49fe1b0b | /daily/20200414/codes/output/code081.py | b7717f8dce6a21d9f95ef23b3b3ed26b31bdeef3 | [] | no_license | podhmo/individual-sandbox | 18db414fafd061568d0d5e993b8f8069867dfcfb | cafee43b4cf51a321f4e2c3f9949ac53eece4b15 | refs/heads/master | 2023-07-23T07:06:57.944539 | 2023-07-09T11:45:53 | 2023-07-09T11:45:53 | 61,940,197 | 6 | 0 | null | 2022-10-19T05:01:17 | 2016-06-25T11:27:04 | Python | UTF-8 | Python | false | false | 200 | py | import pygal
chart = pygal.Line(stroke_style={'width': 5, 'dasharray': '3, 6', 'linecap': 'round', 'linejoin': 'round'})
chart.add('line', [.0002, .0005, .00035])
print(chart.render(is_unicode=True))
| [
"[email protected]"
] | |
403f0e4f49753a0aec4176cc3333a60bd7a59334 | 55ab64b67d8abc02907eb43a54ff6c326ded6b72 | /scripts/addon_library/local/uvpackmaster3/overlay.py | 9f6fb0255008525ae59e25abc9636f43e1684ffc | [
"MIT"
] | permissive | Tilapiatsu/blender-custom_config | 2f03b0bb234c3b098d2830732296d199c91147d0 | 00e14fc190ebff66cf50ff911f25cf5ad3529f8f | refs/heads/master | 2023-08-16T14:26:39.990840 | 2023-08-16T01:32:41 | 2023-08-16T01:32:41 | 161,249,779 | 6 | 2 | MIT | 2023-04-12T05:33:59 | 2018-12-10T23:25:14 | Python | UTF-8 | Python | false | false | 6,194 | py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
import blf
from .enums import OperationStatus, UvpmLogType
from .utils import in_debug_mode, print_backtrace, get_prefs
class TextOverlay:
def __init__(self, text, color):
self.coords = None
self.text = text
self.color = color
def set_coords(self, coords):
self.coords = coords
def draw(self, ov_manager):
if self.coords is None:
return
blf.color(ov_manager.font_id, *self.color)
region_coord = ov_manager.context.region.view2d.view_to_region(self.coords[0], self.coords[1])
blf.position(ov_manager.font_id, region_coord[0], region_coord[1], 0)
blf.draw(ov_manager.font_id, self.text)
class OverlayManager:
LINE_X_COORD = 10
LINE_Y_COORD = 35
LINE_TEXT_COLOR = (1, 1, 1, 1)
def __init__(self, context, callback):
prefs = get_prefs()
self.font_size = prefs.font_size_text_output
self.font_size_uv_overlay = prefs.font_size_uv_overlay
self.line_distance = int(float(25) / 15 * self.font_size)
self.font_id = 0
self.context = context
handler_args = (self, context)
self.__draw_handler = bpy.types.SpaceImageEditor.draw_handler_add(callback, handler_args, 'WINDOW', 'POST_PIXEL')
def finish(self):
if self.__draw_handler is not None:
bpy.types.SpaceImageEditor.draw_handler_remove(self.__draw_handler, 'WINDOW')
def print_text(self, coords, text, color, z_coord=0.0):
blf.size(self.font_id, self.font_size, 72)
blf.color(self.font_id, *color)
blf.position(self.font_id, coords[0], coords[1], z_coord)
blf.draw(self.font_id, text)
blf.color(self.font_id, *(0, 0, 0, 1))
def __print_text_inline(self, line_num, text, color):
x_coord = self.LINE_X_COORD
y_coord = self.LINE_Y_COORD + line_num * self.line_distance
self.print_text((x_coord, y_coord), text, color)
def print_text_inline(self, text, color=LINE_TEXT_COLOR):
self.__print_text_inline(self.next_line_num, text, color)
self.next_line_num += 1
def callback_begin(self):
self.next_line_num = 0
class EngineOverlayManager(OverlayManager):
WARNING_COLOR = (1, 0.4, 0, 1)
ERROR_COLOR = (1, 0, 0, 1)
DISABLED_DEVICE_COLOR_MULTIPLIER = 0.7
INTEND_STR = ' '
OPSTATUS_TO_COLOR = {
OperationStatus.ERROR : ERROR_COLOR,
OperationStatus.WARNING : WARNING_COLOR,
OperationStatus.CORRECT : OverlayManager.LINE_TEXT_COLOR
}
def __init__(self, op, dev_array):
super().__init__(op.p_context.context, engine_overlay_manager_draw_callback)
self.op = op
self.dev_array = dev_array
self.print_dev_progress = True
self.p_context = op.p_context
self.log_manager = op.log_manager
self.font_id = 0
def print_dev_array(self):
if self.dev_array is None:
return
for dev in reversed(self.dev_array):
dev_color = self.LINE_TEXT_COLOR
if self.print_dev_progress:
progress_str = "{}% ".format(dev.bench_entry.progress)
else:
progress_str = ''
if dev.settings.enabled:
dev_status = "{}(iterations: {})".format(progress_str, dev.bench_entry.iter_count)
else:
dev_status = 'disabled'
dev_color = tuple(self.DISABLED_DEVICE_COLOR_MULTIPLIER * c for c in dev_color)
self.print_text_inline("{}{}: {}".format(self.INTEND_STR, dev.name, dev_status), color=dev_color)
self.print_text_inline("[PACKING DEVICES]:")
def print_list(self, header, list, color):
for elem in reversed(list):
self.print_text_inline("{}* {}".format(self.INTEND_STR, elem), color=color)
self.print_text_inline("[{}]:".format(header), color=color)
def engine_overlay_manager_draw_callback(self, context):
try:
self.callback_begin()
status_str = self.log_manager.last_log(UvpmLogType.STATUS)
if status_str is None:
status_str = ''
status_color = self.OPSTATUS_TO_COLOR[self.log_manager.operation_status()]
hint_str = self.log_manager.last_log(UvpmLogType.HINT)
if hint_str:
status_str = "{} ({})".format(status_str, hint_str)
self.print_text_inline('[STATUS]: ' + status_str, color=status_color)
self.print_dev_array()
log_print_metadata = (\
(UvpmLogType.INFO, 'INFO'),
(UvpmLogType.WARNING,'WARNINGS'),
(UvpmLogType.ERROR, 'ERRORS')
)
for log_type, header in log_print_metadata:
op_status = self.log_manager.LOGTYPE_TO_OPSTATUS[log_type]
color = self.OPSTATUS_TO_COLOR[op_status]
log_list = self.log_manager.log_list(log_type)
if len(log_list) > 0:
self.print_list(header, log_list, color)
blf.size(self.font_id, self.font_size_uv_overlay, 72)
if self.p_context.p_islands is not None:
for p_island in self.p_context.p_islands:
overlay = p_island.overlay()
if overlay is not None:
overlay.draw(self)
except Exception as ex:
if in_debug_mode():
print_backtrace(ex)
| [
"[email protected]"
] | |
7a08037735251f82bbeb0a141dc986cc6be5b018 | db9cc680a60997412eae035b257cc77efbcdcb06 | /py3/leetcodeCN/tree/111. Minimum Depth of Binary Tree.py | 32a79a0af7a51bccd7310bf7ea62463e9dd2d775 | [] | no_license | Azson/machineLearning | 9630b62c73b2388a57c630644dae3ffa8e4db236 | 35662ddf39d322009f074ce8981e5f5d27786819 | refs/heads/master | 2022-05-06T07:03:23.543355 | 2021-08-20T14:57:25 | 2021-08-20T14:57:25 | 179,935,258 | 3 | 3 | null | 2019-11-04T14:26:51 | 2019-04-07T08:07:08 | Python | UTF-8 | Python | false | false | 482 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
def minDepth(root):
ls = [root]
ans = 0
while len(ls) > 0:
ans += 1
la = len(ls)
for i in range(la):
root = ls[i]
if not (root.left and root.right):
return ans+1
if root.left:
ls.append(root.left)
if root.right:
ls.append(root.right)
ls = ls[la:]
return ans
if __name__ == '__main__':
pass | [
"[email protected]"
] | |
58efa655d1487cd5d3f26dbe92f3eb954bf9e877 | ca42e62ce157095ace5fbaec0bf261a4fb13aa6a | /pyenv/lib/python3.6/site-packages/rest_framework/utils/serializer_helpers.py | 4734332af888219ce4589dd089dcda9352ed0871 | [
"Apache-2.0"
] | permissive | ronald-rgr/ai-chatbot-smartguide | 58f1e7c76b00248923f5fe85f87c318b45e38836 | c9c830feb6b66c2e362f8fb5d147ef0c4f4a08cf | refs/heads/master | 2021-04-18T03:15:23.720397 | 2020-03-23T17:55:47 | 2020-03-23T17:55:47 | 249,500,344 | 0 | 0 | Apache-2.0 | 2021-04-16T20:45:28 | 2020-03-23T17:35:37 | Python | UTF-8 | Python | false | false | 4,617 | py | from __future__ import unicode_literals
import collections
from collections import OrderedDict
from django.utils.encoding import force_text
from rest_framework.compat import unicode_to_repr
class ReturnDict(OrderedDict):
"""
Return object from `serializer.data` for the `Serializer` class.
Includes a backlink to the serializer instance for renderers
to use if they need richer field information.
"""
def __init__(self, *args, **kwargs):
self.serializer = kwargs.pop('serializer')
super(ReturnDict, self).__init__(*args, **kwargs)
def copy(self):
return ReturnDict(self, serializer=self.serializer)
def __repr__(self):
return dict.__repr__(self)
def __reduce__(self):
# Pickling these objects will drop the .serializer backlink,
# but preserve the raw data.
return (dict, (dict(self),))
class ReturnList(list):
"""
Return object from `serializer.data` for the `SerializerList` class.
Includes a backlink to the serializer instance for renderers
to use if they need richer field information.
"""
def __init__(self, *args, **kwargs):
self.serializer = kwargs.pop('serializer')
super(ReturnList, self).__init__(*args, **kwargs)
def __repr__(self):
return list.__repr__(self)
def __reduce__(self):
# Pickling these objects will drop the .serializer backlink,
# but preserve the raw data.
return (list, (list(self),))
class BoundField(object):
"""
A field object that also includes `.value` and `.error` properties.
Returned when iterating over a serializer instance,
providing an API similar to Django forms and form fields.
"""
def __init__(self, field, value, errors, prefix=''):
self._field = field
self._prefix = prefix
self.value = value
self.errors = errors
self.name = prefix + self.field_name
def __getattr__(self, attr_name):
return getattr(self._field, attr_name)
@property
def _proxy_class(self):
return self._field.__class__
def __repr__(self):
return unicode_to_repr('<%s value=%s errors=%s>' % (
self.__class__.__name__, self.value, self.errors
))
def as_form_field(self):
value = '' if (self.value is None or self.value is False) else self.value
return self.__class__(self._field, value, self.errors, self._prefix)
class NestedBoundField(BoundField):
"""
This `BoundField` additionally implements __iter__ and __getitem__
in order to support nested bound fields. This class is the type of
`BoundField` that is used for serializer fields.
"""
def __init__(self, field, value, errors, prefix=''):
if value is None or value is '':
value = {}
super(NestedBoundField, self).__init__(field, value, errors, prefix)
def __iter__(self):
for field in self.fields.values():
yield self[field.field_name]
def __getitem__(self, key):
field = self.fields[key]
value = self.value.get(key) if self.value else None
error = self.errors.get(key) if self.errors else None
if hasattr(field, 'fields'):
return NestedBoundField(field, value, error, prefix=self.name + '.')
return BoundField(field, value, error, prefix=self.name + '.')
def as_form_field(self):
values = {}
for key, value in self.value.items():
if isinstance(value, (list, dict)):
values[key] = value
else:
values[key] = '' if (value is None or value is False) else force_text(value)
return self.__class__(self._field, values, self.errors, self._prefix)
class BindingDict(collections.MutableMapping):
"""
This dict-like object is used to store fields on a serializer.
This ensures that whenever fields are added to the serializer we call
`field.bind()` so that the `field_name` and `parent` attributes
can be set correctly.
"""
def __init__(self, serializer):
self.serializer = serializer
self.fields = OrderedDict()
def __setitem__(self, key, field):
self.fields[key] = field
field.bind(field_name=key, parent=self.serializer)
def __getitem__(self, key):
return self.fields[key]
def __delitem__(self, key):
del self.fields[key]
def __iter__(self):
return iter(self.fields)
def __len__(self):
return len(self.fields)
def __repr__(self):
return dict.__repr__(self.fields)
| [
"[email protected]"
] | |
febdaae751915967a6fef3b5f718c6b4c230ab89 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/008_graph_algorithms/_exercises/templates/Cracking Coding Interviews - Mastering Algorithms/clone-graph-bfs.py | 08c345f3dc7c721c8efea80633a287a2e51fb103 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 1,084 | py | # c_ Node
# ___ - val neighbors
# ? ?
# ? ?
#
# # Example Input:
#
# # 1 <---> 2
# # ^ ^
# # | |
# # v v
# # 4 <---> 3
#
# # Example Output:
#
# # 1 <---> 2
# # ^ ^
# # | |
# # v v
# # 4 <---> 3
#
# ___ clone node
# queue _ # LIST
#
# visited _ # DICT
#
# ?.ap.. ?
#
# w__ le. ? > 0
# cur _ ?.po. 0
#
# new_node _ N..
#
# __ c.. __ v__.k..
# n.. _ v..|c..
# ____
# new_node _ N.. c__.v.. ||
#
# neighbors _ n__.n..
#
# v..|c.. _ n..
#
# ___ i __ ra.. le. c__.n..
# __ c__.n..|? __ v__.k..
# n__.ap.. v..|c__.n..|?
# ____
# q__.ap.. c__.n..|?
# new_neighbor_node _ ? c__.n..|?.v.. ||
# n__.ap.. ?
# v..|c__.n..|? _ ?
#
# r_ v..|n..
#
#
#
#
# node = Node(1, [])
# node2 = Node(2, [])
# node3 = Node(3, [])
# node4 = Node(4, [])
#
# node.neighbors.append(node2)
# node.neighbors.append(node4)
#
# node2.neighbors.append(node)
# node2.neighbors.append(node3)
#
# node3.neighbors.append(node2)
# node3.neighbors.append(node4)
#
# node4.neighbors.append(node)
# node4.neighbors.append(node3)
| [
"[email protected]"
] | |
9e484b15c88d0bde693c3cc2535cf40b53bbc640 | 5a0122509b4e7e15e556460d261d9d8a1cee76ad | /repository/base/docattachments_pb.py | ddad9c7ef5a420750f8e46c2777173bcc3458696 | [] | no_license | cash2one/BHWGoogleProject | cec4d5353f6ea83ecec0d0325747bed812283304 | 18ecee580e284705b642b88c8e9594535993fead | refs/heads/master | 2020-12-25T20:42:08.612393 | 2013-04-13T14:01:37 | 2013-04-13T14:01:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,677 | py | # This file automatically generated by protocol-compiler from repository/base/docattachments.proto
# DO NOT EDIT!
from google3.net.proto import ProtocolBuffer
import array
import thread
from google3.net.proto import _net_proto___parse__python
__pychecker__ = """maxreturns=0 maxbranches=0 no-callinit
unusednames=printElemNumber,debug_strs no-special"""
from google3.net.proto.message_set import MessageSet
class DocAttachments(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
self.attachments_ = MessageSet()
self.has_attachments_ = 0
if contents is not None: self.MergeFromString(contents)
def attachments(self): return self.attachments_
def mutable_attachments(self): self.has_attachments_ = 1; return self.attachments_
def clear_attachments(self):self.has_attachments_ = 0; self.attachments_.Clear()
def has_attachments(self): return self.has_attachments_
def MergeFrom(self, x):
assert x is not self
if (x.has_attachments()): self.mutable_attachments().MergeFrom(x.attachments())
def _CMergeFromString(self, s):
_net_proto___parse__python.MergeFromString(self, 'DocAttachments', s)
def _CEncode(self):
return _net_proto___parse__python.Encode(self, 'DocAttachments')
def _CToASCII(self, output_format):
return _net_proto___parse__python.ToASCII(self, 'DocAttachments', output_format)
def ParseASCII(self, s):
_net_proto___parse__python.ParseASCII(self, 'DocAttachments', s)
def ParseASCIIIgnoreUnknown(self, s):
_net_proto___parse__python.ParseASCIIIgnoreUnknown(self, 'DocAttachments', s)
def Equals(self, x):
if x is self: return 1
if self.has_attachments_ != x.has_attachments_: return 0
if self.has_attachments_ and self.attachments_ != x.attachments_: return 0
return 1
def __eq__(self, other):
return (other is not None) and (other.__class__ == self.__class__) and self.Equals(other)
def __ne__(self, other):
return not (self == other)
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_attachments_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: attachments not set.')
elif not self.attachments_.IsInitialized(debug_strs): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(self.attachments_.ByteSize())
return n + 1
def Clear(self):
self.clear_attachments()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putVarInt32(self.attachments_.ByteSize())
self.attachments_.OutputUnchecked(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_attachments().TryMerge(tmp)
continue
# tag 0 is special: it's used to indicate an error.
# so if we see it we raise an exception.
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_attachments_:
res+=prefix+"attachments <\n"
res+=self.attachments_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
return res
kattachments = 1
_TEXT = (
"ErrorCode", # 0
"attachments", # 1
)
_TYPES = (
ProtocolBuffer.Encoder.NUMERIC, # 0
ProtocolBuffer.Encoder.STRING, # 1
)
# stylesheet for XML output
_STYLE = \
""""""
_STYLE_CONTENT_TYPE = \
""""""
_SERIALIZED_DESCRIPTOR = array.array('B', [
0x5a,
0x24,
0x72,
0x65,
0x70,
0x6f,
0x73,
0x69,
0x74,
0x6f,
0x72,
0x79,
0x2f,
0x62,
0x61,
0x73,
0x65,
0x2f,
0x64,
0x6f,
0x63,
0x61,
0x74,
0x74,
0x61,
0x63,
0x68,
0x6d,
0x65,
0x6e,
0x74,
0x73,
0x2e,
0x70,
0x72,
0x6f,
0x74,
0x6f,
0x0a,
0x0e,
0x44,
0x6f,
0x63,
0x41,
0x74,
0x74,
0x61,
0x63,
0x68,
0x6d,
0x65,
0x6e,
0x74,
0x73,
0x13,
0x1a,
0x0b,
0x61,
0x74,
0x74,
0x61,
0x63,
0x68,
0x6d,
0x65,
0x6e,
0x74,
0x73,
0x20,
0x01,
0x28,
0x02,
0x30,
0x0b,
0x38,
0x02,
0x4a,
0x0a,
0x4d,
0x65,
0x73,
0x73,
0x61,
0x67,
0x65,
0x53,
0x65,
0x74,
0x14,
])
_net_proto___parse__python.RegisterType(_SERIALIZED_DESCRIPTOR.tostring())
__all__ = ['DocAttachments']
| [
"[email protected]"
] | |
6581711d6a4c030829ec7b03eb6558cac005f100 | 6bb4291e34598a83d1cd4631abd04ae00df5290b | /api/test/utils/test_utils.py | 9b440e5c4479bc56943a720ec1d824546f1bb28c | [] | no_license | Frost-Lee/order_scheduling | f1f2e69bc142a81869f70697b79cb7f2664d6b2e | 825456c1b9a95011fe3530a2fb449dffd40f5246 | refs/heads/main | 2023-01-12T12:27:45.182521 | 2020-11-16T06:06:13 | 2020-11-16T06:06:13 | 312,620,540 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 659 | py | import unittest
from scheduler.utils import utils
class TestUtils(unittest.TestCase):
def test_aggregate_tuples(self):
with self.assertRaisesRegex(AssertionError, 'index out of range'):
utils.aggregate_tuples([('a', 'b', 1)], [2, 3], 0)
utils.aggregate_tuples([('a', 'b', 1)], [1, 2], 3)
self.assertEqual(
set(utils.aggregate_tuples([('a', 'b', 1), ('a', 'b', 2)], [0, 1], 2)),
set(('a', 'b', 3))
)
self.assertEqual(
set(utils.aggregate_tuples([('a', 'b', 1), ('a', 'b', 2), ('b', 'c', 3)], [0, 1], 2)),
set(('a', 'b', 3), ('b', 'c', 3))
)
| [
"[email protected]"
] | |
486140921bdefd70a51c334abbc4653f45f45993 | 7967b7652dcf580d54fa891f560cdc82f1f1ca14 | /shell/tinkerforge.header | 09cdc5c5ce8dc225a07e272d7c79c5c3a120185a | [] | no_license | chuangke365/tfgenerators | 1c3a75b2a463697b54a7e618f36d36593835ba17 | 05d76d03b3499e0449b5ee65f93eab14186c2153 | refs/heads/master | 2020-12-29T02:47:11.043814 | 2015-03-19T16:27:01 | 2015-03-19T16:27:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,302 | header | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2013-2014 Matthias Bolte <[email protected]>
#
# Version <<VERSION>>
#
# Redistribution and use in source and binary forms of this file,
# with or without modification, are permitted. See the Creative
# Commons Zero (CC0 1.0) License for more details.
import sys
import os
import signal
import time
import shlex
import socket
import select
import threading
import subprocess
import textwrap
# error codes are also used as exit codes, valid values are [1..255]
ERROR_INTERRUPTED = 1
ERROR_SYNTAX_ERROR = 2
ERROR_PYTHON_VERSION = 21
ERROR_ARGPARSE_MISSING = 22
ERROR_SOCKET_ERROR = 23
ERROR_OTHER_EXCEPTION = 24
ERROR_INVALID_PLACEHOLDER = 25
ERROR_AUTHENTICATION_ERROR = 26
IPCONNECTION_ERROR_OFFSET = 200
listen_mode = False
enable_host = True
enable_port = True
enable_execute = True
line_separator = '\n'
group_terminator = '\n'
# set from environment variable
dry_run = False
def fatal_error(message, exit_code):
sys.stderr.write('tinkerforge: error: {0}\n'.format(message))
sys.exit(exit_code)
if sys.hexversion < 0x02050000:
fatal_error('requiring python 2.5 or newer', ERROR_PYTHON_VERSION)
try:
import argparse
except ImportError:
fatal_error('requiring python argparse module', ERROR_ARGPARSE_MISSING)
class Context:
abort = False
async_exception = None
host = None
port = None
secret = None
item_separator = None
group_separator = None
no_symbolic_input = None
no_symbolic_output = None
timeout = None
duration = None
uid = None
def output(self, string):
sys.stdout.write(string)
def duplicate(self):
ctx = Context()
ctx.host = self.host
ctx.port = self.port
ctx.secret = self.secret
ctx.item_separator = self.item_separator
ctx.group_separator = self.group_separator
ctx.no_symbolic_input = self.no_symbolic_input
ctx.no_symbolic_output = self.no_symbolic_output
ctx.timeout = self.timeout
ctx.duration = self.duration
ctx.uid = self.uid
return ctx
class ParserExit(Exception):
pass
class FatalError(Exception):
def __init__(self, message, exit_code):
Exception.__init__(self, message)
self.exit_code = exit_code
self.message = message
class Formatter(argparse.HelpFormatter):
def _fill_text(self, text, width, indent):
ps = []
for p in text.split('\n'):
ps.append(textwrap.fill(p, width, initial_indent=indent, subsequent_indent=indent))
return '\n'.join(ps)
class Parser(argparse.ArgumentParser):
def __init__(self, ctx, prog, description=None, epilog=None):
if not listen_mode:
if len(prog) > 0:
prog = 'tinkerforge ' + prog
else:
prog = 'tinkerforge'
argparse.ArgumentParser.__init__(self, prog=prog, add_help=False, description=description, epilog=epilog, formatter_class=Formatter)
self.ctx = ctx
self.ctx.current_parser = self
self.add_argument('--help', action='help', help='show this help message and exit')
def _print_message(self, message, file=None):
if message:
self.ctx.output(message)
def exit(self, status=0, message=None):
if status == 0:
if message:
self.ctx.output(message)
raise ParserExit
else:
if not message:
message = 'unknown error'
raise FatalError(message, ERROR_OTHER_EXCEPTION)
def error(self, message):
if not listen_mode:
self.print_usage(sys.stderr)
raise FatalError(message, ERROR_SYNTAX_ERROR)
class ParserWithExecute(Parser):
def __init__(self, ctx, prog):
Parser.__init__(self, ctx, prog)
if enable_execute:
self.add_argument('--execute', type=str, help='shell command line to execute for each incoming response', metavar='<command>')
def parse_args(self, args):
namespace = argparse.Namespace()
if not enable_execute:
setattr(namespace, 'execute', None)
return Parser.parse_args(self, args, namespace)
class ParserWithExpectResponse(Parser):
def __init__(self, ctx, prog):
Parser.__init__(self, ctx, prog)
self.add_argument('--expect-response', action='store_true', help='request response and wait for it')
def handle_ipcon_exceptions(ipcon, function):
try:
function(ipcon)
except Error as e:
raise FatalError(e.description.lower(), IPCONNECTION_ERROR_OFFSET - e.value)
except socket.error as e:
raise FatalError(str(e).lower(), ERROR_SOCKET_ERROR)
except Exception as e:
raise FatalError(str(e).lower(), ERROR_OTHER_EXCEPTION)
finally:
try:
ipcon.disconnect()
except:
pass
def authenticate(ipcon, secret, message):
# don't auto-reconnect on authentication error
ipcon.set_auto_reconnect(False)
try:
ipcon.authenticate(secret)
except:
raise FatalError(message, ERROR_AUTHENTICATION_ERROR)
ipcon.set_auto_reconnect(True)
def connect_ipcon_and_call(ctx, function, timeout=None):
def function_wrapper(ipcon):
if timeout is not None:
ipcon.set_timeout(timeout)
ipcon.connect(ctx.host, ctx.port)
if len(ctx.secret) > 0:
def callback(connect_reason):
if connect_reason == IPConnection.CONNECT_REASON_AUTO_RECONNECT:
try:
authenticate(ipcon, ctx.secret, 'could not authenticate after auto-reconnect')
except FatalError as e:
ctx.async_exception = e
ipcon.register_callback(IPConnection.CALLBACK_CONNECTED, callback)
authenticate(ipcon, ctx.secret, 'could not authenticate')
function(ipcon)
handle_ipcon_exceptions(IPConnection(), function_wrapper)
def call_generic(ctx, name, functions, argv):
parser = Parser(ctx, 'call ' + name)
function_choices = sorted(functions.keys())
class ListFunctionsAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
ctx.output(line_separator.join(function_choices) + group_terminator)
raise ParserExit()
parser.add_argument('--list-functions', action=ListFunctionsAction, nargs=0, help='show functions of {0} and exit'.format(name))
parser.add_argument('uid', type=check_base58, help='uid of a ' + name, metavar='<uid>')
parser.add_argument('function', choices=function_choices, help='{' + ', '.join(function_choices) + '}', metavar='<function>')
parser.add_argument('args', nargs=argparse.REMAINDER, help='function specific arguments', metavar='<args>')
args = parser.parse_args(argv)
ctx.uid = args.uid
functions[args.function](ctx, args.args)
def dispatch_generic(ctx, name, callbacks, argv):
parser = Parser(ctx, 'dispatch ' + name)
callback_choices = sorted(callbacks.keys())
class ListCallbacksAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
ctx.output(line_separator.join(callback_choices) + group_terminator)
raise ParserExit()
parser.add_argument('--list-callbacks', action=ListCallbacksAction, nargs=0, help='show callbacks of {0} and exit'.format(name))
parser.add_argument('uid', type=check_base58, help='uid of a ' + name, metavar='<uid>')
parser.add_argument('callback', choices=callback_choices, help='{' + ', '.join(callback_choices) + '}', metavar='<callback>')
parser.add_argument('args', nargs=argparse.REMAINDER, help='callback specific arguments', metavar='<args>')
args = parser.parse_args(argv)
ctx.uid = args.uid
callbacks[args.callback](ctx, args.args)
def device_send_request(ctx, device_class, function_id, request_data, format_in,
format_out, command, expect_response, names, symbols):
if dry_run:
return
if command is not None:
def handle_response(values):
execute_response(ctx, command, names, values)
else:
def handle_response(values):
output_response(ctx, names, values)
def function(ipcon):
device = device_class(ctx.uid, ipcon)
if expect_response:
device.set_response_expected(function_id, True)
response = ipcon.send_request(device, function_id, request_data, format_in, format_out)
if response is not None:
if len(names) == 1:
response = (response,)
response = format_symbolic_output(ctx, response, symbols)
handle_response(response)
elif listen_mode:
ctx.output(group_terminator)
connect_ipcon_and_call(ctx, function, ctx.timeout / 1000.0)
def device_callback(ctx, device_class, function_id, command, names):
if dry_run:
return
if command is not None:
def callback(*values):
execute_response(ctx, command, names, values)
else:
is_first_callback = [True]
def callback(*values):
if len(names) > 1 and not listen_mode:
if is_first_callback[0]:
is_first_callback[0] = False
else:
ctx.output(ctx.group_separator)
output_response(ctx, names, values)
def function(ipcon):
device = device_class(ctx.uid, ipcon)
if ctx.duration == 0:
exit_flag = [False]
def callback_wrapper(*args, **kwargs):
if not exit_flag[0]:
callback(*args, **kwargs)
exit_flag[0] = True
device.registered_callbacks[function_id] = callback_wrapper
while not exit_flag[0] and not ctx.abort:
time.sleep(0.1)
if ctx.async_exception is not None:
raise ctx.async_exception
elif ctx.duration < 0:
device.registered_callbacks[function_id] = callback
while not ctx.abort:
time.sleep(1)
if ctx.async_exception is not None:
raise ctx.async_exception
else:
device.registered_callbacks[function_id] = callback
# FIXME: if duration is large then it would be better to sleep
# in multiple steps here
time.sleep(ctx.duration / 1000.0)
# FIXME: only checking for an exception after the complete sleep
# is not good, sleep in shorter steps here to check for
# exception more often
if ctx.async_exception is not None:
raise ctx.async_exception
connect_ipcon_and_call(ctx, function)
# length_is_fixed = False means length is maximum length
def get_array_type_name(ctx, name, length, length_is_fixed=True):
if length_is_fixed:
if length < 7:
return ctx.item_separator.join([name]*length)
else:
return '{0}{1}{0}{1}..{2}x..{1}{0}{1}{0}'.format(name, ctx.item_separator, length - 4)
else:
return ('[' + ctx.item_separator).join([name]*length) + ']'*(length - 1)
def format_symbolic_output(ctx, values, symbols):
if ctx.no_symbolic_output:
return values
translated_values = []
for vs in zip(values, symbols):
if vs[1] is not None:
try:
translated_values.append(vs[1][vs[0]])
except KeyError:
translated_values.append(vs[0])
else:
translated_values.append(vs[0])
return tuple(translated_values)
def check_base58(string):
try:
base58decode(string)
except:
msg = 'invalid base58 value: %r' % string
raise argparse.ArgumentTypeError(msg)
return string
check_base58.__name__ = 'base58'
def check_char(string):
if len(string) != 1:
msg = 'invalid char value: %r' % string
raise argparse.ArgumentTypeError(msg)
return string
check_char.__name__ = 'char'
def convert_int(string):
try:
return int(string, base=0)
except (ValueError, TypeError):
msg = 'invalid int value: %r' % string
raise argparse.ArgumentTypeError(msg)
convert_int.__name__ = 'int'
def convert_bool(string):
value = string.lower()
if value == 'true':
return True
elif value == 'false':
return False
else:
msg = 'invalid bool value: %r' % string
raise argparse.ArgumentTypeError(msg)
convert_bool.__name__ = 'bool'
def create_string_checker(type, length):
def check_string(string):
try:
value = type(string)
except (ValueError, TypeError):
name = getattr(type, '__name__', repr(type))
msg = 'invalid %s value: %r' % (name, string)
raise argparse.ArgumentTypeError(msg)
if len(value) > length:
msg = 'string value is too long: %r' % value
raise argparse.ArgumentTypeError(msg)
return value
check_string.__name__ = 'string'
return check_string
def create_symbol_converter(ctx, type, symbols, strict=False):
def convert_symbol(string):
if not ctx.no_symbolic_input:
try:
return symbols[string]
except KeyError:
pass
try:
value = type(string)
except (ValueError, TypeError):
name = getattr(type, '__name__', repr(type))
msg = 'invalid %s value: %r' % (name, string)
raise argparse.ArgumentTypeError(msg)
if strict and value not in symbols.values():
name = getattr(type, '__name__', repr(type))
msg = '%s value is out-of-range: %r' % (name, string)
raise argparse.ArgumentTypeError(msg)
return value
convert_symbol.__name__ = getattr(type, '__name__', repr(type))
return convert_symbol
# length_is_fixed = False means length is maximum length
def create_array_converter(ctx, type, length, length_is_fixed=True):
def convert_array(string):
array = []
for item in string.split(ctx.item_separator):
try:
value = type(item)
except (ValueError, TypeError, argparse.ArgumentTypeError):
name = getattr(type, '__name__', repr(type))
msg = 'invalid %s value: %r' % (get_array_type_name(ctx, name, length, length_is_fixed), string)
raise argparse.ArgumentTypeError(msg)
array.append(value)
if (length_is_fixed and len(array) != length) or \
(not length_is_fixed and (len(array) < 1 or len(array) > length)):
name = getattr(type, '__name__', repr(type))
msg = 'invalid %s value: %r' % (get_array_type_name(ctx, name, length, length_is_fixed), string)
raise argparse.ArgumentTypeError(msg)
return array
name = getattr(type, '__name__', repr(type))
convert_array.__name__ = get_array_type_name(ctx, name, length, length_is_fixed)
return convert_array
def execute_response(ctx, command, names, values):
formatted_values = {}
class Tuple(tuple):
def __init__(self, *args):
tuple.__init__(self, *args)
def __str__(self):
return ctx.item_separator.join(map(str, self))
for name, value in zip(names, values):
if type(value) == tuple:
formatted_values[name] = Tuple(value)
elif type(value) == bool:
formatted_values[name] = str(value).lower()
else:
formatted_values[name] = value
try:
formatted_command = command.format(**formatted_values)
except KeyError as e:
raise FatalError('invalid placeholder %s in format: %s' % (str(e).lower(), command), ERROR_INVALID_PLACEHOLDER)
except Exception as e:
raise FatalError('%s: %s' % (str(e).lower(), command), ERROR_OTHER_EXCEPTION)
try:
if listen_mode:
try:
output = subprocess.check_output(formatted_command, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
output = e.output
ctx.output(output)
else:
subprocess.call(formatted_command, shell=True)
except Exception as e:
raise FatalError('%s: %s' % (str(e).lower(), formatted_command), ERROR_OTHER_EXCEPTION)
def output_response(ctx, names, values):
lines = []
for name, value in zip(names, values):
if type(value) == tuple:
lines.append('{0}={1}'.format(name, ctx.item_separator.join(map(str, value))))
elif type(value) == bool:
lines.append('{0}={1}'.format(name, str(value).lower()))
else:
lines.append('{0}={1}'.format(name, value))
ctx.output(line_separator.join(lines) + group_terminator)
def common_get_identity(ctx, prog_prefix, klass, argv):
parser = ParserWithExecute(ctx, prog_prefix + ' get-identity')
args = parser.parse_args(argv)
device_send_request(ctx, klass, 255, (), '', '8s 8s c 3B 3B H', args.execute, False,
['uid', 'connected-uid', 'position', 'hardware-version', 'firmware-version', 'device-identifier'],
[None, None, None, None, None, device_identifier_symbols])
| [
"[email protected]"
] | |
076c5361b321dec4c40f27a6bcaae40a5b2d2e57 | e811c41caa55559d3b482f26c31fcef02ec66138 | /venv/Lib/site-packages/typed_ast/ast27.py | e11ebc20fa0017659085dcd93b58a00a972ffd12 | [
"MIT"
] | permissive | 1SouravGhosh/POC_REPO | 929ea865d60a51597966ffcfc4a7a3a350a00f54 | e486d9a1fe0e1215f24bac3aaf97517cda21a066 | refs/heads/master | 2022-11-01T09:53:56.443500 | 2019-02-17T16:21:48 | 2019-02-17T16:21:48 | 171,133,391 | 0 | 1 | MIT | 2022-10-22T04:17:54 | 2019-02-17T14:45:39 | Python | UTF-8 | Python | false | false | 12,603 | py | # -*- coding: utf-8 -*-
"""
ast27
~~~
The `ast27` module helps Python applications to process trees of the Python
abstract syntax grammar. The abstract syntax itself might change with
each Python release; this module helps to find out programmatically what
the current grammar looks like and allows modifications of it. The `ast27`
module is similar to the builtin `ast` module on Python 2.7, except `ast27`
runs on Python 3 and provides PEP 484 type comments as part of the AST.
Specifically, these changes are made to the Python 2.7 AST:
- The `FunctionDef`, `Assign`, `For`, and `With` classes all have a
`type_comment` field which contains a `str` with the text of the
associated type comment, if any.
- `arguments` has a `type_comments` list of per-argument type comments.
- `parse` has been augmented so it can parse function signature types when
called with `mode=func_type`.
- `Module` has a `type_ignores` field which contains a list of
lines which have been `# type: ignore`d.
- `Str` has a `kind` string field which preserves the original string
prefix, so that `ast27.parse('br"test"').body[0].value.kind == 'br'`.
An abstract syntax tree can be generated by using the `parse()`
function from this module. The result will be a tree of objects whose
classes all inherit from `ast27.AST`.
A modified abstract syntax tree can be compiled into a Python code object
using the built-in `compile()` function.
Additionally various helper functions are provided that make working with
the trees simpler. The main intention of the helper functions and this
module in general is to provide an easy to use interface for libraries
that work tightly with the python syntax (template engines for example).
:copyright: Copyright 2008 by Armin Ronacher.
:license: Python License.
"""
import _ast27
from _ast27 import *
def parse(source, filename='<unknown>', mode='exec'):
"""
Parse the source into an AST node with type comments.
Equivalent to compile(source, filename, mode, PyCF_ONLY_AST).
"""
return _ast27.parse(source, filename, mode)
def literal_eval(node_or_string):
"""
Safely evaluate an expression node or a string containing a Python
expression. The string or node provided may only consist of the following
Python literal structures: strings, numbers, tuples, lists, dicts, booleans,
and None.
"""
_safe_names = {'None': None, 'True': True, 'False': False}
if isinstance(node_or_string, basestring):
node_or_string = parse(node_or_string, mode='eval')
if isinstance(node_or_string, Expression):
node_or_string = node_or_string.body
def _convert(node):
if isinstance(node, Str):
return node.s
elif isinstance(node, Num):
return node.n
elif isinstance(node, Tuple):
return tuple(map(_convert, node.elts))
elif isinstance(node, List):
return list(map(_convert, node.elts))
elif isinstance(node, Dict):
return dict((_convert(k), _convert(v)) for k, v
in zip(node.keys, node.values))
elif isinstance(node, Name):
if node.id in _safe_names:
return _safe_names[node.id]
elif isinstance(node, BinOp) and \
isinstance(node.op, (Add, Sub)) and \
isinstance(node.right, Num) and \
isinstance(node.right.n, complex) and \
isinstance(node.left, Num) and \
isinstance(node.left.n, (int, long, float)):
left = node.left.n
right = node.right.n
if isinstance(node.op, Add):
return left + right
else:
return left - right
raise ValueError('malformed string')
return _convert(node_or_string)
def dump(node, annotate_fields=True, include_attributes=False):
"""
Return a formatted dump of the tree in *node*. This is mainly useful for
debugging purposes. The returned string will show the names and the values
for fields. This makes the code impossible to evaluate, so if evaluation is
wanted *annotate_fields* must be set to False. Attributes such as line
numbers and column offsets are not dumped by default. If this is wanted,
*include_attributes* can be set to True.
"""
def _format(node):
if isinstance(node, AST):
fields = [(a, _format(b)) for a, b in iter_fields(node)]
rv = '%s(%s' % (node.__class__.__name__, ', '.join(
('%s=%s' % field for field in fields)
if annotate_fields else
(b for a, b in fields)
))
if include_attributes and node._attributes:
rv += fields and ', ' or ' '
rv += ', '.join('%s=%s' % (a, _format(getattr(node, a)))
for a in node._attributes)
return rv + ')'
elif isinstance(node, list):
return '[%s]' % ', '.join(_format(x) for x in node)
return repr(node)
if not isinstance(node, AST):
raise TypeError('expected AST, got %r' % node.__class__.__name__)
return _format(node)
def copy_location(new_node, old_node):
"""
Copy source location (`lineno` and `col_offset` attributes) from
*old_node* to *new_node* if possible, and return *new_node*.
"""
for attr in 'lineno', 'col_offset':
if attr in old_node._attributes and attr in new_node._attributes \
and hasattr(old_node, attr):
setattr(new_node, attr, getattr(old_node, attr))
return new_node
def fix_missing_locations(node):
"""
When you compile a node tree with compile(), the compiler expects lineno and
col_offset attributes for every node that supports them. This is rather
tedious to fill in for generated nodes, so this helper adds these attributes
recursively where not already set, by setting them to the values of the
parent node. It works recursively starting at *node*.
"""
def _fix(node, lineno, col_offset):
if 'lineno' in node._attributes:
if not hasattr(node, 'lineno'):
node.lineno = lineno
else:
lineno = node.lineno
if 'col_offset' in node._attributes:
if not hasattr(node, 'col_offset'):
node.col_offset = col_offset
else:
col_offset = node.col_offset
for child in iter_child_nodes(node):
_fix(child, lineno, col_offset)
_fix(node, 1, 0)
return node
def increment_lineno(node, n=1):
"""
Increment the line number of each node in the tree starting at *node* by *n*.
This is useful to "move code" to a different location in a file.
"""
for child in walk(node):
if 'lineno' in child._attributes:
child.lineno = getattr(child, 'lineno', 0) + n
return node
def iter_fields(node):
"""
Yield a tuple of ``(fieldname, value)`` for each field in ``node._fields``
that is present on *node*.
"""
for field in node._fields:
try:
yield field, getattr(node, field)
except AttributeError:
pass
def iter_child_nodes(node):
"""
Yield all direct child nodes of *node*, that is, all fields that are nodes
and all items of fields that are lists of nodes.
"""
for name, field in iter_fields(node):
if isinstance(field, AST):
yield field
elif isinstance(field, list):
for item in field:
if isinstance(item, AST):
yield item
def get_docstring(node, clean=True):
"""
Return the docstring for the given node or None if no docstring can
be found. If the node provided does not have docstrings a TypeError
will be raised.
"""
if not isinstance(node, (FunctionDef, ClassDef, Module)):
raise TypeError("%r can't have docstrings" % node.__class__.__name__)
if node.body and isinstance(node.body[0], Expr) and \
isinstance(node.body[0].value, Str):
if clean:
import inspect
return inspect.cleandoc(node.body[0].value.s)
return node.body[0].value.s
def walk(node):
"""
Recursively yield all descendant nodes in the tree starting at *node*
(including *node* itself), in no specified order. This is useful if you
only want to modify nodes in place and don't care about the context.
"""
from collections import deque
todo = deque([node])
while todo:
node = todo.popleft()
todo.extend(iter_child_nodes(node))
yield node
class NodeVisitor(object):
"""
A node visitor base class that walks the abstract syntax tree and calls a
visitor function for every node found. This function may return a value
which is forwarded by the `visit` method.
This class is meant to be subclassed, with the subclass adding visitor
methods.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `visit` method. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
Don't use the `NodeVisitor` if you want to apply changes to nodes during
traversing. For this a special visitor exists (`NodeTransformer`) that
allows modifications.
"""
def visit(self, node):
"""Visit a node."""
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node)
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node."""
for field, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item)
elif isinstance(value, AST):
self.visit(value)
class NodeTransformer(NodeVisitor):
"""
A :class:`NodeVisitor` subclass that walks the abstract syntax tree and
allows modification of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor methods to replace or remove the old node. If the return value of
the visitor method is ``None``, the node will be removed from its location,
otherwise it is replaced with the return value. The return value may be the
original node in which case no replacement takes place.
Here is an example transformer that rewrites all occurrences of name lookups
(``foo``) to ``data['foo']``::
class RewriteName(NodeTransformer):
def visit_Name(self, node):
return copy_location(Subscript(
value=Name(id='data', ctx=Load()),
slice=Index(value=Str(s=node.id, kind='')),
ctx=node.ctx
), node)
Keep in mind that if the node you're operating on has child nodes you must
either transform the child nodes yourself or call the :meth:`generic_visit`
method for the node first.
For nodes that were part of a collection of statements (that applies to all
statement nodes), the visitor may also return a list of nodes rather than
just a single node.
Usually you use the transformer like this::
node = YourTransformer().visit(node)
"""
def generic_visit(self, node):
for field, old_value in iter_fields(node):
old_value = getattr(node, field, None)
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, AST):
value = self.visit(value)
if value is None:
continue
elif not isinstance(value, AST):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, AST):
new_node = self.visit(old_value)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
| [
"[email protected]"
] | |
33c449dc19c9effe47b503bea32359f5b42fb142 | f652cf4e0fa6fbfcca8d94cec5f942fd8bd021a0 | /mbuild/__init__.py | bac5509ac8b9754a7fb64e88b372b76f045f4dc3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Jonestj1/mbuild | 83317ab3a53f40ff6c9c69f6be542b8562602eee | 411cc60d3ef496fa26541bb0b7ea8dcf8c7449e4 | refs/heads/master | 2021-01-20T19:45:11.563610 | 2017-02-13T18:16:12 | 2017-02-13T18:16:12 | 32,886,030 | 0 | 0 | null | 2015-03-25T19:24:50 | 2015-03-25T19:24:50 | null | UTF-8 | Python | false | false | 339 | py | from mbuild.box import Box
from mbuild.coarse_graining import coarse_grain
from mbuild.coordinate_transform import *
from mbuild.compound import *
from mbuild.pattern import *
from mbuild.packing import *
from mbuild.port import Port
from mbuild.recipes import *
from mbuild.formats import *
from mbuild.version import version
| [
"[email protected]"
] | |
ea8f72a37588594d3881ecbe62617f136c7c6869 | a0c782c69420f513bd2d0c0fcea896f732b05cb2 | /account_bank_statement_advanced/res_partner_bank.py | 5f1063e39f9b4a98c855f959b4764797ab92eb80 | [] | no_license | SVQTQ/noviat-apps | 8b5116287681fabcefc5d456786c16c717de54ab | 57ec751ccd4a3e32798ec8851c3501e809c09f91 | refs/heads/8.0 | 2020-04-08T19:55:06.699350 | 2015-07-29T14:34:22 | 2015-07-29T14:34:22 | 32,148,042 | 0 | 0 | null | 2015-07-29T14:34:22 | 2015-03-13T09:45:48 | Gettext Catalog | UTF-8 | Python | false | false | 3,843 | py | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2014-2015 Noviat nv/sa (www.noviat.com).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, api
import logging
_logger = logging.getLogger(__name__)
class res_partner_bank(models.Model):
_inherit = 'res.partner.bank'
def _acc_number_select(self, operator, number):
"""
The code below could be simplified if the Odoo standard
accounting modules would store bank account numbers
in the database in 'normalised' format (without spaces or
other formatting characters (such as '-').
"""
if operator in ['=', '=like', '=ilike']:
op = '='
else: # operator in ['like', 'ilike']
op = 'LIKE'
if len(number) == 12:
"""
Belgium BBAN is always 12 chars and subset of IBAN.
Hence we can retrieve the IBAN from a BBAN lookup.
TODO: extend logic to other countries
"""
select = \
"SELECT id FROM res_partner_bank WHERE " \
"(state='iban' AND SUBSTRING(acc_number FOR 2) = 'BE' AND " \
"REPLACE(acc_number, ' ', '') LIKE '%%'|| '%s' ||'%%' ) " \
% number
# other countries
if op == '=':
select += "OR " \
"REPLACE(REPLACE(acc_number, ' ', ''), '-','') = '%s'" \
% number
else:
select += "OR " \
"REPLACE(REPLACE(acc_number, ' ', ''), '-','') " \
"LIKE '%%'|| '%s' ||'%%' " \
% number
else:
if op == '=':
select = \
"SELECT id FROM res_partner_bank WHERE " \
"REPLACE(REPLACE(acc_number, ' ', ''), '-','') = '%s'" \
% number
else:
select = \
"SELECT id FROM res_partner_bank WHERE " \
"REPLACE(REPLACE(acc_number, ' ', ''), '-','') " \
"LIKE '%%'|| '%s' ||'%%' " \
% number
return select
@api.model
def search(self, args, offset=0, limit=None, order=None, count=False):
# _logger.warn('%s, search, args=%s', self._name, args)
for i, arg in enumerate(args):
if arg[0] == 'acc_number' and \
arg[1] in ['=', '=like', '=ilike', 'like', 'ilike']:
number = arg[2].replace(' ', '').replace('-', '').upper()
select = self._acc_number_select(arg[1], number)
self._cr.execute(select)
res = self._cr.fetchall()
if res:
rpb_ids = [x[0] for x in res]
args[i] = ['id', 'in', rpb_ids]
# _logger.warn('%s, search, args=%s', self._name, args)
return super(res_partner_bank, self).search(
args, offset, limit, order, count=count)
| [
"[email protected]"
] | |
a80777a871d9539635a4e13543e35fe55d86461d | 7f9a73533b3678f0e83dc559dee8a37474e2a289 | /aws-serverless-for-deep-learning-first-steps-workshop/notebooks/deep-learning-inference/PIL/PixarImagePlugin.py | 5ea32ba89be5253e3ad0e8349be1bdcb42bb2494 | [
"MIT"
] | permissive | ryfeus/stepfunctions2processing | 04a5e83ee9b74e029b79a3f19381ba6d9265fc48 | 0b74797402d39f4966cab278d9718bfaec3386c2 | refs/heads/master | 2022-10-08T16:20:55.459175 | 2022-09-09T05:54:47 | 2022-09-09T05:54:47 | 147,448,024 | 128 | 34 | MIT | 2022-01-04T18:56:47 | 2018-09-05T02:26:31 | Python | UTF-8 | Python | false | false | 1,657 | py | #
# The Python Imaging Library.
# $Id$
#
# PIXAR raster support for PIL
#
# history:
# 97-01-29 fl Created
#
# notes:
# This is incomplete; it is based on a few samples created with
# Photoshop 2.5 and 3.0, and a summary description provided by
# Greg Coats <[email protected]>. Hopefully, "L" and
# "RGBA" support will be added in future versions.
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1997.
#
# See the README file for information on usage and redistribution.
#
from . import Image, ImageFile
from ._binary import i16le as i16
#
# helpers
def _accept(prefix):
return prefix[:4] == b"\200\350\000\000"
##
# Image plugin for PIXAR raster images.
class PixarImageFile(ImageFile.ImageFile):
format = "PIXAR"
format_description = "PIXAR raster image"
def _open(self):
# assuming a 4-byte magic label
s = self.fp.read(4)
if s != b"\200\350\000\000":
raise SyntaxError("not a PIXAR file")
# read rest of header
s = s + self.fp.read(508)
self._size = i16(s[418:420]), i16(s[416:418])
# get channel/depth descriptions
mode = i16(s[424:426]), i16(s[426:428])
if mode == (14, 2):
self.mode = "RGB"
# FIXME: to be continued...
# create tile descriptor (assuming "dumped")
self.tile = [("raw", (0, 0) + self.size, 1024, (self.mode, 0, 1))]
#
# --------------------------------------------------------------------
Image.register_open(PixarImageFile.format, PixarImageFile, _accept)
Image.register_extension(PixarImageFile.format, ".pxr")
| [
"[email protected]"
] | |
cbb69a91ee6be1562c57094de5515967c74944d9 | 123d26781801473dc59d8be847dbac79d4b555df | /configs/swin/mask_rcnn_swin_tiny_patch4_window7_mstrain_480-800_adamw_3x_coco_fashion.py | 8c91f897f95d7b08f2b9d121d6a71c791be11601 | [
"Apache-2.0"
] | permissive | jireh-father/CBNetV2 | c2ed5358a81dde7dff3b50614371afe6045553c0 | 0b62f8107d72691a02efb7c92fc6dfcf5d0d0262 | refs/heads/main | 2023-07-17T16:42:55.126767 | 2021-08-31T10:34:15 | 2021-08-31T10:34:15 | 398,468,051 | 0 | 0 | Apache-2.0 | 2021-08-21T04:47:14 | 2021-08-21T04:47:14 | null | UTF-8 | Python | false | false | 3,037 | py | _base_ = [
'../_base_/models/mask_rcnn_swin_fpn_fashion.py',
'../_base_/datasets/coco_instance_fashion.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
ape=False,
drop_path_rate=0.2,
patch_norm=True,
use_checkpoint=False
),
neck=dict(in_channels=[96, 192, 384, 768]))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# augmentation strategy originates from DETR / Sparse RCNN
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='AutoAugment',
policies=[
[
dict(type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
multiscale_mode='value',
keep_ratio=True)
],
[
dict(type='Resize',
img_scale=[(400, 1333), (500, 1333), (600, 1333)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
multiscale_mode='value',
override=True,
keep_ratio=True)
]
]),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
data = dict(train=dict(pipeline=train_pipeline))
optimizer = dict(_delete_=True, type='AdamW', lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05,
paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
'relative_position_bias_table': dict(decay_mult=0.),
'norm': dict(decay_mult=0.)}))
lr_config = dict(step=[27, 33])
runner = dict(type='EpochBasedRunnerAmp', max_epochs=36)
# do not use mmdet version fp16
fp16 = None
optimizer_config = dict(
type="DistOptimizerHook",
update_interval=1,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
use_fp16=True,
)
| [
"[email protected]"
] | |
8893df317f1c158e4400b4b8eb0be1430d608747 | 130e13bfad23961739613f78a7f3e59c67d28cac | /utility/resnet.py | e5a80a23b027647809fcf951058e2e1ada2ab564 | [
"Apache-2.0"
] | permissive | dashmoment/aic_scene | 981a0faca39b2079203d24a961a491905f0f2ca9 | 3e68c03f032b3979feb17b550a953e5aafc970c1 | refs/heads/master | 2021-07-12T05:47:35.491495 | 2017-09-29T09:53:54 | 2017-09-29T09:53:54 | 103,190,682 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,689 | py |
import skimage.io # bug. need to import this before tensorflow
import skimage.transform # bug. need to import this before tensorflow
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.training import moving_averages
from resnet_config import Config
import datetime
import numpy as np
import os
import time
MOVING_AVERAGE_DECAY = 0.9997
BN_DECAY = MOVING_AVERAGE_DECAY
BN_EPSILON = 0.001
CONV_WEIGHT_DECAY = 0.00004
CONV_WEIGHT_STDDEV = 0.1
FC_WEIGHT_DECAY = 0.00004
FC_WEIGHT_STDDEV = 0.01
RESNET_VARIABLES = 'resnet_variables'
UPDATE_OPS_COLLECTION = 'resnet_update_ops' # must be grouped with training op
IMAGENET_MEAN_BGR = [103.062623801, 115.902882574, 123.151630838, ]
tf.app.flags.DEFINE_integer('input_size', 224, "input image size")
activation = tf.nn.relu
def inference(x, is_training,
num_classes=1000,
num_blocks=[3, 4, 6, 3], # defaults to 50-layer network
use_bias=False, # defaults to using batch norm
bottleneck=True):
c = {}
c['bottleneck'] = bottleneck
c['is_training'] = tf.convert_to_tensor(is_training,
dtype='bool',
name='is_training')
c['ksize'] = 3
c['stride'] = 1
c['use_bias'] = use_bias
c['fc_units_out'] = num_classes
c['num_blocks'] = num_blocks
c['stack_stride'] = 2
with tf.variable_scope('scale1'):
c['conv_filters_out'] = 64
c['ksize'] = 7
c['stride'] = 2
x = conv(x, c)
x = bn(x, c)
x = activation(x)
with tf.variable_scope('scale2'):
x = _max_pool(x, ksize=3, stride=2)
c['num_blocks'] = num_blocks[0]
c['stack_stride'] = 1
c['block_filters_internal'] = 64
x = stack(x, c)
with tf.variable_scope('scale3'):
c['num_blocks'] = num_blocks[1]
c['block_filters_internal'] = 128
c['stack_stride'] == 1
x = stack(x, c)
with tf.variable_scope('scale4'):
c['num_blocks'] = num_blocks[2]
c['block_filters_internal'] = 256
x = stack(x, c)
with tf.variable_scope('scale5'):
c['num_blocks'] = num_blocks[3]
c['block_filters_internal'] = 512
x = stack(x, c)
# post-net
avg_out = tf.reduce_mean(x, reduction_indices=[1, 2], name="avg_pool")
if num_classes != None:
with tf.variable_scope('fc'):
x = fc(avg_out, c)
return x, avg_out
# This is what they use for CIFAR-10 and 100.
# See Section 4.2 in http://arxiv.org/abs/1512.03385
def inference_small(x,
is_training,
num_blocks=3, # 6n+2 total weight layers will be used.
use_bias=False, # defaults to using batch norm
num_classes=10):
c = {}
c['is_training'] = tf.convert_to_tensor(is_training,
dtype='bool',
name='is_training')
c['use_bias'] = use_bias
c['fc_units_out'] = num_classes
c['num_blocks'] = num_blocks
c['num_classes'] = num_classes
x, avg_out = inference_small_config(x, c)
return x, avg_out
def inference_small_config(x, c):
c['bottleneck'] = False
c['ksize'] = 3
c['stride'] = 1
with tf.variable_scope('scale1'):
c['conv_filters_out'] = 16
c['block_filters_internal'] = 16
c['stack_stride'] = 1
x = conv(x, c)
x = bn(x, c)
x = activation(x)
x = stack(x, c)
with tf.variable_scope('scale2'):
c['block_filters_internal'] = 32
c['stack_stride'] = 2
x = stack(x, c)
with tf.variable_scope('scale3'):
c['block_filters_internal'] = 64
c['stack_stride'] = 2
x = stack(x, c)
# post-net
avg_out = tf.reduce_mean(x, reduction_indices=[1, 2], name="avg_pool")
if c['num_classes'] != None:
with tf.variable_scope('fc'):
x = fc(avg_out, c)
return x, avg_out
def _imagenet_preprocess(rgb):
"""Changes RGB [0,1] valued image to BGR [0,255] with mean subtracted."""
red, green, blue = tf.split(3, 3, rgb * 255.0)
bgr = tf.concat(3, [blue, green, red])
bgr -= IMAGENET_MEAN_BGR
return bgr
def loss(logits, labels):
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, labels)
cross_entropy_mean = tf.reduce_mean(cross_entropy)
regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
loss_ = tf.add_n([cross_entropy_mean] + regularization_losses)
tf.scalar_summary('loss', loss_)
return loss_
def stack(x, c):
for n in range(c['num_blocks']):
s = c['stack_stride'] if n == 0 else 1
c['block_stride'] = s
with tf.variable_scope('block%d' % (n + 1)):
x = block(x, c)
return x
def block(x, c):
filters_in = x.get_shape()[-1]
# Note: filters_out isn't how many filters are outputed.
# That is the case when bottleneck=False but when bottleneck is
# True, filters_internal*4 filters are outputted. filters_internal is how many filters
# the 3x3 convs output internally.
m = 4 if c['bottleneck'] else 1
filters_out = m * c['block_filters_internal']
shortcut = x # branch 1
c['conv_filters_out'] = c['block_filters_internal']
if c['bottleneck']:
with tf.variable_scope('a'):
c['ksize'] = 1
c['stride'] = c['block_stride']
x = conv(x, c)
x = bn(x, c)
x = activation(x)
with tf.variable_scope('b'):
x = conv(x, c)
x = bn(x, c)
x = activation(x)
with tf.variable_scope('c'):
c['conv_filters_out'] = filters_out
c['ksize'] = 1
c['stride'] == 1
x = conv(x, c)
x = bn(x, c)
else:
with tf.variable_scope('A'):
c['stride'] = c['block_stride']
c['ksize'] == 3
x = conv(x, c)
x = bn(x, c)
x = activation(x)
with tf.variable_scope('B'):
c['conv_filters_out'] = filters_out
c['ksize'] == 3
print(c)
c['stride'] = 1
assert c['stride'] == 1
x = conv(x, c)
x = bn(x, c)
with tf.variable_scope('shortcut'):
if filters_out != filters_in or c['block_stride'] != 1:
c['ksize'] = 1
c['stride'] = c['block_stride']
c['conv_filters_out'] = filters_out
shortcut = conv(shortcut, c)
shortcut = bn(shortcut, c)
return activation(x + shortcut)
def bn(x, c):
x_shape = x.get_shape()
params_shape = x_shape[-1:]
if c['use_bias']:
bias = _get_variable('bias', params_shape,
initializer=tf.zeros_initializer())
return x + bias
axis = list(range(len(x_shape) - 1))
beta = _get_variable('beta',
params_shape,
initializer=tf.zeros_initializer())
gamma = _get_variable('gamma',
params_shape,
initializer=tf.ones_initializer())
moving_mean = _get_variable('moving_mean',
params_shape,
initializer=tf.zeros_initializer(),
trainable=False)
moving_variance = _get_variable('moving_variance',
params_shape,
initializer=tf.ones_initializer(),
trainable=False)
# These ops will only be preformed when training.
mean, variance = tf.nn.moments(x, axis)
update_moving_mean = moving_averages.assign_moving_average(moving_mean,
mean, BN_DECAY)
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, BN_DECAY)
tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_mean)
tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_variance)
mean, variance = control_flow_ops.cond(
c['is_training'], lambda: (mean, variance),
lambda: (moving_mean, moving_variance))
x = tf.nn.batch_normalization(x, mean, variance, beta, gamma, BN_EPSILON)
#x.set_shape(inputs.get_shape()) ??
return x
def fc(x, c):
num_units_in = x.get_shape()[1]
num_units_out = c['fc_units_out']
weights_initializer = tf.truncated_normal_initializer(
stddev=FC_WEIGHT_STDDEV)
weights = _get_variable('weights',
shape=[num_units_in, num_units_out],
initializer=weights_initializer,
weight_decay=FC_WEIGHT_STDDEV)
biases = _get_variable('biases',
shape=[num_units_out],
initializer=tf.zeros_initializer())
x = tf.nn.xw_plus_b(x, weights, biases)
return x
def _get_variable(name,
shape,
initializer,
weight_decay=0.0,
dtype='float',
trainable=True):
"A little wrapper around tf.get_variable to do weight decay and add to"
"resnet collection"
if weight_decay > 0:
regularizer = tf.contrib.layers.l2_regularizer(weight_decay)
else:
regularizer = None
collections = [tf.GraphKeys.VARIABLES, RESNET_VARIABLES]
return tf.get_variable(name,
shape=shape,
initializer=initializer,
dtype=dtype,
regularizer=regularizer,
collections=collections)
def conv(x, c):
ksize = c['ksize']
stride = c['stride']
filters_out = c['conv_filters_out']
filters_in = x.get_shape()[-1]
shape = [ksize, ksize, filters_in, filters_out]
initializer = tf.truncated_normal_initializer(stddev=CONV_WEIGHT_STDDEV)
weights = _get_variable('weights',
shape=shape,
dtype='float',
initializer=initializer,
weight_decay=CONV_WEIGHT_DECAY)
return tf.nn.conv2d(x, weights, [1, stride, stride, 1], padding='SAME')
def _max_pool(x, ksize=3, stride=2):
return tf.nn.max_pool(x,
ksize=[1, ksize, ksize, 1],
strides=[1, stride, stride, 1],
padding='SAME')
| [
"[email protected]"
] | |
f95aaa24aa81978b09838def7e11466bbd207ccd | eb2d683106f2a2ee6d266dd7f8fcdb4f86adb8fb | /datas/PY_gal.py | 965ade949ac6fdc78f2e7b6ed621377afd9b8f38 | [] | no_license | minicloudsky/PythonCode | 2fb001d524c8e59eefa66f476a1119ff27e7df36 | 298a6db80a74a3b5f7d75f6037306e9e06ffa092 | refs/heads/master | 2023-02-07T18:25:04.753928 | 2020-12-20T03:27:07 | 2020-12-20T03:27:07 | 106,422,450 | 0 | 0 | null | 2020-09-26T03:27:31 | 2017-10-10T13:41:28 | Python | UTF-8 | Python | false | false | 31 | py | import pygal
hist = pygal.Bar() | [
"[email protected]"
] | |
4b3aa7a1ee58238dd8a25b2a149447be16633036 | 64267b1f7ca193b0fab949089b86bc7a60e5b859 | /slehome/manage.py | 1cb83a303a492fa808560a2831d6104bd01a8931 | [] | no_license | hongdangodori/slehome | 6a9f2b4526c2783932627b982df0540762570bff | 3e558c78c3943dadf0ec485738a0cc98dea64353 | refs/heads/master | 2021-01-17T12:00:34.221088 | 2015-02-06T13:44:00 | 2015-02-06T13:44:00 | 28,847,585 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "slehome.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
ffd91bb863c0a3f67d1d3ed0d36bcd76c48916d4 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/sieve-big-1270.py | d68c3965a130f64994c9d66a570265f0edd9655d | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,757 | py | # A resizable list of integers
class Vector(object):
items: [int] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A resizable list of integers
class Vector2(object):
items: [int] = None
items2: [int] = None
size: int = 0
size2: int = 0
def __init__(self:"Vector2"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector2") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector2") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector2", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector2", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector2", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector2", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector2", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector2", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector2", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector2", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector2") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector2") -> int:
return self.size
# A resizable list of integers
class Vector3(object):
items: [int] = None
items2: [$Type] = None
items3: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
def __init__(self:"Vector3"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector3") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector3", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector3", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector3", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector3", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector3", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector3", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector3", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector3", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector3", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector3", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector3", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector3", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector3") -> int:
return self.size
# A resizable list of integers
class Vector4(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
def __init__(self:"Vector4"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector4") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector4", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector4", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector4", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector4", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector4", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector4", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector4", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector4", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector4", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector4", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector4", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector4", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector4") -> int:
return self.size
# A resizable list of integers
class Vector5(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
items5: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
size5: int = 0
def __init__(self:"Vector5"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity5(self:"Vector5") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity5(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector5", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector5", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector5", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector5", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append5(self:"Vector5", item: int, item2: int, item3: int, item4: int, item5: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector5", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector5", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all5(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int], new_items5: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
item5:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector5", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector5", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector5", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector5", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector5", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector5", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length5(self:"Vector5") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector2(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector3(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector4(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector5(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
doubling_limit5:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity5(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange2(i:int, j:int, i2:int, j2:int) -> Vector:
v:Vector = None
v2:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange3(i:int, j:int, i2:int, j2:int, i3:int, j3:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange4(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange5(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int, i5:int, j5:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve2(v:Vector, v2:Vector) -> object:
i:int = 0
i2:int = 0
j:int = 0
j2:int = 0
k:int = 0
k2:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve3(v:Vector, v2:Vector, v3:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
j:int = 0
j2:int = 0
j3:int = 0
k:int = 0
k2:int = 0
k3:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve4(v:Vector, v2:Vector, v3:Vector, v4:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve5(v:Vector, v2:Vector, v3:Vector, v4:Vector, v5:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
j5:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
k5:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
n2:int = 50
n3:int = 50
n4:int = 50
n5:int = 50
# Data
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
v = vrange(2, n)
v2 = vrange(2, n)
v3 = vrange(2, n)
v4 = vrange(2, n)
v5 = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
| [
"[email protected]"
] | |
76ac2d60a69c2c9463da4ae6c4547c5b867dd6e8 | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /azure-mgmt-network/azure/mgmt/network/v2018_12_01/models/topology_association.py | 7832c4a1904ba784c50b3cf5aae97796eb260dd0 | [
"MIT"
] | permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 1,586 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class TopologyAssociation(Model):
"""Resources that have an association with the parent resource.
:param name: The name of the resource that is associated with the parent
resource.
:type name: str
:param resource_id: The ID of the resource that is associated with the
parent resource.
:type resource_id: str
:param association_type: The association type of the child resource to the
parent resource. Possible values include: 'Associated', 'Contains'
:type association_type: str or
~azure.mgmt.network.v2018_12_01.models.AssociationType
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'association_type': {'key': 'associationType', 'type': 'str'},
}
def __init__(self, **kwargs):
super(TopologyAssociation, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.resource_id = kwargs.get('resource_id', None)
self.association_type = kwargs.get('association_type', None)
| [
"[email protected]"
] | |
e5c53766c994f5d150cd47187531f1339035c92b | 065acd70109d206c4021954e68c960a631a6c5e3 | /shot_detector/utils/collections/sliding_windows/__init__.py | dbbbfbf2e08e76dc001cf31e3d17b64c790ba048 | [] | permissive | w495/python-video-shot-detector | bf2e3cc8175687c73cd01cf89441efc349f58d4d | 617ff45c9c3c96bbd9a975aef15f1b2697282b9c | refs/heads/master | 2022-12-12T02:29:24.771610 | 2017-05-15T00:38:22 | 2017-05-15T00:38:22 | 37,352,923 | 20 | 3 | BSD-3-Clause | 2022-11-22T01:15:45 | 2015-06-13T01:33:27 | Python | UTF-8 | Python | false | false | 347 | py | # -*- coding: utf8 -*-
"""
Different kinds of sliding windows
"""
from __future__ import absolute_import, division, print_function
from .base_sliding_window import BaseSlidingWindow
from .delayed_sliding_window import DelayedSlidingWindow
from .repeated_sliding_window import RepeatedSlidingWindow
from .sliding_window import SlidingWindow
| [
"[email protected]"
] | |
494209f5626eff8613f8403f2084829f49a30c87 | 1554150a9720ebf35cd11c746f69169b595dca10 | /package_package/package/model/fuzzy_number.py | b64b535dfc851ec40ee6a38917dddbbf78b72a3a | [] | no_license | andrewili/shape-grammar-engine | 37a809f8cf78b133f8f1c3f9cf13a7fbbb564713 | 2859d8021442542561bdd1387deebc85e26f2d03 | refs/heads/master | 2021-01-18T22:46:51.221257 | 2016-05-31T21:15:28 | 2016-05-31T21:15:28 | 14,129,359 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,214 | py | import numpy as np
almost_equal = np.allclose
class FuzzyNumber(object):
def __init__(self, number_in):
"""Receives:
number_in num
"""
method_name = '__init__'
try:
if not self._is_a_number(number_in):
raise TypeError
except TypeError:
message = "The argument must be a number"
self.__class__._print_error_message(method_name, message)
else:
self.value = number_in
def _is_a_number(self, x):
"""Receives:
x object
Returns:
value boolean. True if x is an int, a float, an
np.int64, or an np.float64. False otherwise
"""
value = False
if (type(x) == int or
type(x) == float or
type(x) == np.int64 or
type(x) == np.float64
):
value = True
return value
def __eq__(self, other):
return almost_equal(self.value, other.value)
def __ge__(self, other):
return (
almost_equal(self.value, other.value) or
self.value > other.value)
def __gt__(self, other):
if almost_equal(self.value, other.value):
value = False
elif self.value > other.value:
value = True
else:
value = False
return value
def __le__(self, other):
return(
almost_equal(self.value, other.value) or
self.value < other.value)
def __lt__(self, other):
if almost_equal(self.value, other.value):
value = False
elif self.value < other.value:
value = True
else:
value = False
return value
def __ne__(self, other):
return not almost_equal(self.value, other.value)
### utility
@classmethod
def _print_error_message(cls, method_name, message):
print '%s.%s:\n %s' % (cls.__name__, method_name, message)
### represent
def __str__(self):
return str(self.value)
if __name__ == '__main__':
import doctest
doctest.testfile('tests/fuzzy_number_test.txt')
| [
"[email protected]"
] | |
a9297cfbbfe53a5bdce5b575f72cd5880abbafce | 2154d0221e29a86850a1b83e4302f6e3e3f7fa5d | /thread_example/simple_thread_example.py | 6f5917fe97dff2fcaaacbf683f71542762a6a5f6 | [] | no_license | aaqqxx/simple_for_life | 3b8805c6791da6a3a7f42c069dc1ee7d2b8d3649 | 9ad6d61a56216d04250cd89aeaeda63c11942d0a | refs/heads/master | 2020-04-04T09:18:59.396540 | 2015-04-28T11:22:55 | 2015-04-28T11:22:55 | 20,906,518 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | # coding:utf-8
#!/usr/bin/env python
__author__ = 'XingHua'
"""
"""
import time, thread
def timer():
print('hello')
def test():
for i in range(0, 10):
thread.start_new_thread(timer, ())
if __name__ == '__main__':
test()
time.sleep(10) | [
"[email protected]"
] | |
9009efdfe35f109ffbf2ed9bc2895223798d906d | 2f42c6d1fddccc481354148393b9b44165826c04 | /test/FsrAnalysis/PmvTreeMaker_cfg.py | 19afc0d561fe2b2cedabc88728d5afeb0336515a | [] | no_license | senka/MultiBosons | 2ef3dc86e9f0a90f911557638fe79631fef0b3d2 | e8413aa280d85376a2f6dfe300bf52b78b4da437 | refs/heads/master | 2021-03-12T20:15:44.118577 | 2013-11-20T21:21:01 | 2013-11-20T21:21:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,460 | py | import FWCore.ParameterSet.Config as cms
import FWCore.ParameterSet.VarParsing as VarParsing
import FWCore.ParameterSet.Types as CfgTypes
import PhysicsTools.PythonAnalysis.LumiList as LumiList
## setup 'analysis' options
options = VarParsing.VarParsing ('analysis')
## register customized options
options.register("jsonFile",
"Cert_160404-165542_7TeV_PromptReco_Collisions11_JSON.txt", # default value
VarParsing.VarParsing.multiplicity.singleton, # singleton or list
VarParsing.VarParsing.varType.string, # string, int, or float
"JSON file to be applied."
)
options.register("globalTag",
"GR_R_39X_V6::All", # default value
VarParsing.VarParsing.multiplicity.singleton, # singleton or list
VarParsing.VarParsing.varType.string, # string, int, or float
"Global tag to be used."
)
options.register("isMC",
False, # default value
VarParsing.VarParsing.multiplicity.singleton, # singleton or list
VarParsing.VarParsing.varType.bool, # string, int, or float
"Is this MC?"
)
options.setupTags(tag = "of%d",
ifCond = "totalSections != 0",
tagArg = "totalSections")
options.setupTags(tag = "job%d",
ifCond = "section != 0",
tagArg = "section")
## setup any defaults you want
options.maxEvents = 10 # -1 means all events
#options.inputFiles = ["file:/uscmst1b_scratch/lpc1/3DayLifetime/veverka/mu/VGammaSkim_LyonSyncTest_Dec22ReReco_v2_DimuonSkim_1_of_4.root"]
options.outputFile = "pmvTree.root"
options.jsonFile = "Cert_160404-163869_7TeV_PromptReco_Collisions11_JSON_MuonPhys.txt"
## get and parse the command line arguments
options.parseArguments()
## define the process
process = cms.Process("TEST")
## Load standard sequence for crack corrections
process.load('CalibCalorimetry.EcalTrivialCondModules.EcalTrivialCondRetriever_cfi')
# process.load('Configuration.StandardSequences.Services_cff')
# process.load('Configuration.StandardSequences.MagneticField_38T_cff')
process.load('Configuration.StandardSequences.Geometry_cff')
# process.load('Configuration.StandardSequences.Reconstruction_cff')
# process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
## Global tag
# process.GlobalTag.globaltag = options.globalTag
## Message logger
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(False) )
process.MessageLogger.cerr.FwkReport.reportEvery = 100
## Enable LogInfo
# process.MessageLogger.cerr.INFO.limit = 100
## Enable LogDebug
### Remember to recompile with:
### scramv1 b USER_CXXFLAGS="-g\ -D=EDM_ML_DEBUG"
#process.MessageLogger.debugModules = ["pmvTree"]
#process.MessageLogger.cerr.threshold = "DEBUG"
### Geometry, Detector Conditions and Pythia Decay Tables (needed for the vertexing)
#process.load("Configuration.StandardSequences.Geometry_cff")
#process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
#process.GlobalTag.globaltag = options.globalTag
#process.load("Configuration.StandardSequences.MagneticField_cff")
#process.load("SimGeneral.HepPDTESSource.pythiapdt_cfi")
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring() + options.inputFiles
)
# JSON file
if not options.isMC and options.jsonFile != "":
myLumis = \
LumiList.LumiList(filename = options.jsonFile
).getCMSSWString().split(',')
process.source.lumisToProcess = \
CfgTypes.untracked(CfgTypes.VLuminosityBlockRange())
process.source.lumisToProcess.extend(myLumis)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(options.maxEvents)
)
process.TFileService = cms.Service("TFileService",
fileName = cms.string(options.outputFile)
)
from ElectroWeakAnalysis.MultiBosons.Selectors.muonSelector_cfi \
import muonSelection_FsrApr082011_PixelMatchVeto as muonSelection
from ElectroWeakAnalysis.MultiBosons.Selectors.diLeptonSelector_cfi \
import diMuonSelection_Fsr2011Apr11_PixelMatchVeto as diMuonSelection
from ElectroWeakAnalysis.MultiBosons.Selectors.photonSelector_cfi \
import photonSelection_Fsr2011Apr11_PixelMatchVeto as photonSelection
from ElectroWeakAnalysis.MultiBosons.Selectors.ZMuMuGammaSelector_cfi \
import ZMuMuGammaSelection_Fsr2011Apr11_PixelMatchVeto as ZMuMuGammaSelection
process.selectedMuons = cms.EDFilter("VGammaMuonFilter",
filterParams = muonSelection,
src = cms.InputTag("cleanPatMuonsTriggerMatch","","PAT"),
filter = cms.bool(True),
verbosity = cms.untracked.uint32(2)
)
process.goodDiMuons = cms.EDProducer("CandViewShallowClonePtrCombiner",
#process.goodDiMuons = cms.EDProducer("CandViewShallowCloneCombiner",
checkCharge = cms.bool(False),
cut = cms.string("mass > 0"), ## dummy cut
decay = cms.string("selectedMuons selectedMuons"),
roles = cms.vstring("muon1", "muon2")
)
process.selectedDiMuons = cms.EDFilter("VGammaDiLeptonFilter",
filterParams = diMuonSelection,
src = cms.InputTag("goodDiMuons"),
filter = cms.bool(True),
verbosity = cms.untracked.uint32(2)
)
process.selectedPhotons = cms.EDFilter("VGammaPhotonFilter",
filterParams = photonSelection,
src = cms.InputTag("cleanPatPhotonsTriggerMatch"),
filter = cms.bool(True),
verbosity = cms.untracked.uint32(2)
)
#process.vertexedDiMuons = cms.EDProducer("KalmanVertexFitCompositeCandProducer",
#src = cms.InputTag("selectedDiMuons")
#)
process.goodZMuMuGammas = cms.EDProducer("CandViewShallowClonePtrCombiner",
checkCharge = cms.bool(False),
cut = cms.string("mass > 0"), ## dummy cut
decay = cms.string("selectedDiMuons selectedPhotons"),
roles = cms.vstring("dimuon", "photon")
)
process.selectedZMuMuGammas = cms.EDFilter("ZMuMuGammaFilter",
filterParams = ZMuMuGammaSelection,
src = cms.InputTag("goodZMuMuGammas"),
filter = cms.bool(True),
verbosity = cms.untracked.uint32(2)
)
## Loosen the invariant mass window
process.selectedZMuMuGammas.filterParams.minMass = 50
process.selectedZMuMuGammas.filterParams.maxMass = 130
process.selectionSequence = cms.Sequence(
process.selectedMuons *
process.goodDiMuons *
process.selectedDiMuons *
process.selectedPhotons *
#process.vertexedDiMuons *
process.goodZMuMuGammas *
process.selectedZMuMuGammas
)
#process.mmgTree = cms.EDAnalyzer("MuMuGammaTreeMaker",
#photonSrc = cms.untracked.InputTag("selectedPhotons"),
#muonSrc = cms.untracked.InputTag("selectedMuons"),
#dimuonSrc = cms.untracked.InputTag("selectedDiMuons"),
#beamSpotSrc = cms.untracked.InputTag("offlineBeamSpot"),
#primaryVertexSrc = cms.untracked.InputTag("offlinePrimaryVertices"),
#ebClusterSrc = cms.untracked.InputTag("islandBasicClusters", "islandBarrelBasicClusters"),
#ebRecHitsSrc = cms.untracked.InputTag("ecalRecHit", "EcalRecHitsEB"),
#eeRecHitsSrc = cms.untracked.InputTag("ecalRecHit", "EcalRecHitsEE"),
#genParticleSrc = cms.untracked.InputTag("prunedGenParticles"),
#isMC = cms.untracked.bool(False),
#)
process.load("ElectroWeakAnalysis.MultiBosons.FsrAnalysis.PmvTreeMaker_cfi")
process.pmvTree.isMC = options.isMC
## Pileup
if options.isMC:
process.pmvTree.pileupInfoSrc = cms.untracked.InputTag("addPileupInfo")
process.pmvTree.lumiReWeighting = cms.untracked.PSet(
mcDistribution = cms.vdouble(
## from the gamma+jet sample (no filter)
## 21 numbers
# 257141., 295755., 263008., 286909., 282291., 281067.,
# 295777., 297075., 250569., 299795., 256528., 248686.,
# 203484., 137833., 117686., 76877., 62815., 35462.,
# 8381., 10012., 4233.
## from the S4 gamma + jet sample (no filter)
## 51 numbers, use only first 36
# 1.15148e+06, 582849, 629204, 642292, 658930, 666227,
# 668263, 649863, 623035, 588189, 528601, 478063,
# 412804, 351588, 285862, 231776, 181493, 139729,
# 104007, 77262, 55684, 39053, 27132, 18393,
# 12278, 8039, 5393, 3301, 2152, 1321,
# 875, 482, 317, 195, 98, 75,
# 44, 22, 15, 5, 7, 2,
# 0, 1, 0, 0, 0, 0,
# 0, 0, 0,
## In-time Poisson smeared Distribution for Fall 2011 S6 MC
## see https://twiki.cern.ch/twiki/bin/viewauth/CMS/PileupMCReweightingUtilities#Sample_Input_Distributions
0.0145837, 0.025683, 0.0384606, 0.0494145, 0.0569311,
0.0611828, 0.0625346, 0.0614769, 0.0586775, 0.0554499, #10
0.0515491, 0.047621, 0.0439238, 0.0405691, 0.0374147,
0.034227, 0.0314377, 0.0288256, 0.026219, 0.0237271, #20
0.0213656, 0.0191874, 0.0169728, 0.0149206, 0.013039,
0.0112938, 0.00961247, 0.00819356, 0.00688805, 0.00571524, #30
0.00471123, 0.00386993, 0.00315452, 0.00254742, 0.00202471,
0.00157441, 0.00124581, 0.000955206, 0.000735305, 0.000557304, #40
0.000412503, 0.000305502, 0.000231002, 0.000165701, 0.000121201,
9.30006e-05, 6.40004e-05, 4.22003e-05, 2.85002e-05, 1.96001e-05, #50
# 1.59001e-05, 1.01001e-05, 8.50006e-06, 6.60004e-06, 2.70002e-06 #55
),
dataDistribution = cms.vdouble(
## The length has to be exactly the same as for the MC!
## From pileupCalc using the analysis_AN-12-048_HggMVA_2011B.json
##+ This is the intersection of the certified lumi and lumi in files
##+ for the AN-12-048 (Hgg MVA).
##+ https://twiki.cern.ch/twiki/bin/view/CMS/PileupJSONFileforData
270698, 1.92097e+06, 7.37936e+06, 1.97546e+07, 4.12105e+07, 7.15133e+07,
1.07744e+08, 1.45221e+08, 1.78943e+08, 2.04812e+08, 2.20301e+08, 2.24587e+08,
2.18333e+08, 2.033e+08, 1.81904e+08, 1.56775e+08, 1.30395e+08, 1.04822e+08,
8.15498e+07, 6.14711e+07, 4.49426e+07, 3.19021e+07, 2.20071e+07, 1.47669e+07,
9.64664e+06, 6.14038e+06, 3.81158e+06, 2.30915e+06, 1.36638e+06, 790299,
447122, 247617, 134324, 71422.1, 37247.5, 19064.2,
9582.2, 4732.54, 2298.06, 1097.78, 516.18, 239.035,
109.077, 49.0746, 21.7801, 9.54062, 4.127, 1.76384,
0.745197, 0.31138,
## From pileupCalc using the analysis_AN-12-048_HggMVA_2011A.json
##+ This is the intersection of the certified lumi and lumi in files
##+ for the AN-12-048 (Hgg MVA).
##+ https://twiki.cern.ch/twiki/bin/view/CMS/PileupJSONFileforData
# 9.0421e+06, 4.18256e+07, 1.02775e+08, 1.78055e+08, 2.44227e+08, 2.82567e+08,
# 2.86929e+08, 2.62667e+08, 2.20922e+08, 1.73072e+08, 1.27563e+08, 8.91129e+07,
# 5.93253e+07, 3.77909e+07, 2.31054e+07, 1.3591e+07, 7.70586e+06, 4.21808e+06,
# 2.23217e+06, 1.14338e+06, 567552, 273295, 127794, 58085.6,
# 25686.5, 11061.4, 4642.54, 1900.61, 759.561, 296.542,
# 113.18, 42.2575, 15.4443, 5.52873, 1.93966, 0.667283,
# 0.225219, 0.0746155, 0.0242768, 0.00776057, 0.00243853, 0.000753497,
# 0.000229053, 6.8528e-05, 2.01861e-05, 5.85673e-06, 1.67436e-06, 4.71841e-07,
# 1.31119e-07, 3.59441e-08,
## From pileupCalc using the analysis_AN-12-048_HggMVA.json
##+ This is the intersection of the certified lumi and lumi in files
##+ for the AN-12-048 (Hgg MVA).
##+ https://twiki.cern.ch/twiki/bin/view/CMS/PileupJSONFileforData
# 9.31279e+06, 4.37466e+07, 1.10154e+08, 1.9781e+08, 2.85437e+08, 3.5408e+08, # 6
# 3.94674e+08, 4.07888e+08, 3.99865e+08, 3.77884e+08, 3.47865e+08, 3.137e+08, # 12
# 2.77658e+08, 2.41091e+08, 2.05009e+08, 1.70366e+08, 1.38101e+08, 1.0904e+08,# 18
# 8.3782e+07, 6.26145e+07, 4.55102e+07, 3.21754e+07, 2.21349e+07, 1.4825e+07, # 24
# 9.67233e+06, 6.15145e+06, 3.81622e+06, 2.31105e+06, 1.36714e+06, 790595, # 30
# 447235, 247660, 134340, 71427.6, 37249.4, 19064.9, # 36
# 9582.42, 4732.62, 2298.08, 1097.79, 516.183, 239.036, # 42
# 109.078, 49.0747, 21.7801, 9.54063, 4.127, 1.76384, # 48
# 0.745197, 0.31138, # 50
## from https://cms-service-dqm.web.cern.ch/cms-service-dqm/CAF/
##+ certification/Collisions11/7TeV/PileUp/*.pileup_v2.root
##+ Run 2011A and 2011B combined
# 1.34465e+07, 5.90653e+07, 1.40903e+08, 2.41301e+08, 3.33745e+08, 3.98711e+08,
# 4.30106e+08, 4.32283e+08, 4.1382e+08, 3.82846e+08, 3.45164e+08, 3.04344e+08,
# 2.62555e+08, 2.21331e+08, 1.81983e+08, 1.4569e+08, 1.13413e+08, 8.57789e+07,
# 6.30124e+07, 4.49596e+07, 3.1169e+07, 2.10079e+07, 1.37759e+07, 8.79641e+06,
# 5.47442e+06, 3.32378e+06, 1.97064e+06, 1.14204e+06, 647539, 359547,
# 195673, 104460, 54745.2, 28185.6, 28005.5, 0.008,
## Run 2011A only
# 1.29654e+07, 5.58514e+07, 1.29329e+08, 2.12134e+08, 2.76138e+08, 3.03604e+08,
# 2.93258e+08, 2.55633e+08, 2.0497e+08, 1.53264e+08, 1.07936e+08, 7.21006e+07,
# 4.5913e+07, 2.797e+07, 1.63426e+07, 9.17598e+06, 4.95861e+06, 2.58239e+06,
# 1.2977e+06, 629975, 295784, 134470, 59260.1, 25343.9,
# 10530.1, 4255.05, 1673.95, 641.776, 240.022, 87.6504,
# 31.281, 10.9195, 3.73146, 1.24923, 0.602368, 0.008,
## Run 2011B only
# 481142, 3.21393e+06, 1.15733e+07, 2.91676e+07, 5.76072e+07, 9.51074e+07,
# 1.36849e+08, 1.7665e+08, 2.0885e+08, 2.29582e+08, 2.37228e+08, 2.32243e+08,
# 2.16642e+08, 1.93361e+08, 1.6564e+08, 1.36514e+08, 1.08455e+08, 8.31965e+07,
# 6.17147e+07, 4.43296e+07, 3.08733e+07, 2.08734e+07, 1.37166e+07, 8.77106e+06,
# 5.46389e+06, 3.31952e+06, 1.96896e+06, 1.1414e+06, 647299, 359460,
# 195642, 104449, 54741.4, 28184.3, 28004.9, 0,
## from estimatePileupD.py for golden JSON up to run 173244
# 2.66037e+07, 6.20837e+07, 1.28931e+08, 2.00545e+08, 2.5334e+08, 2.73133e+08,
# 2.5988e+08, 2.23527e+08, 1.76897e+08, 1.30515e+08, 9.06582e+07, 5.972e+07,
# 3.75081e+07, 2.2549e+07, 1.30131e+07, 7.2248e+06, 3.86533e+06, 1.99552e+06,
# 995277, 480084, 224189, 101452, 44532.8, 18979.4,
# 7860.96, 3167.1, 1242.31, 474.86, 177.025, 64.4158,
# 22.8974, 7.95686, 2.70506, 0.900305, 0.293541, 0.0938176,
# 0.02941, 0.0090478, 0.00273311, 0.000811054, 0.000236549, 6.78354e-05,
## from estimatePileupD.py for golden JSON for runs 160404-166861
# 1.00826e+07, 1.9655e+07, 4.58762e+07, 7.63478e+07, 9.9728e+07, 1.0842e+08,
# 1.01847e+08, 8.48512e+07, 6.39051e+07, 4.41459e+07, 2.82916e+07, 1.69742e+07,
# 9.60532e+06, 5.15841e+06, 2.64284e+06, 1.29755e+06, 612859, 279413,
# 123331, 52841.1, 22026.7, 8951.4, 3552.86, 1379.43,
# 524.638, 195.694, 71.6639, 25.7868, 9.12372, 3.17583,
# 1.088, 0.36694, 0.121851, 0.0398426, 0.0128274, 0.00406596,
# 0.00126871, 0.000389638, 0.000117757, 3.50154e-05, 1.02425e-05, 2.94689e-06,
# 8.33821e-07, 2.32e-07, 6.34707e-08, 1.7073e-08, 4.51528e-09, 1.17408e-09,
# 3.00169e-10, 2.00066e-07, 0,
## from estimatePileupD.py for golden JSON for runs 136033-166861
# 1.56121e+07, 2.87272e+07, 5.46463e+07, 8.25868e+07, 1.03348e+08, 1.10229e+08,
# 1.02651e+08, 8.51755e+07, 6.40254e+07, 4.41874e+07, 2.8305e+07, 1.69782e+07,
# 9.60647e+06, 5.15872e+06, 2.64292e+06, 1.29757e+06, 612863, 279414,
# 123331, 52841.1, 22026.7,
## from estimatePileupD.py for golden JSON up to run 166502
#3.36441e+06, 6.50754e+06, 1.57837e+07, 2.75468e+07, 3.78054e+07, 4.31307e+07,
#4.2414e+07, 3.68867e+07, 2.8917e+07, 2.07353e+07, 1.37572e+07, 8.52297e+06,
#4.9674e+06, 2.74032e+06, 1.43822e+06, 721206, 346808, 160424,
#71576.4, 30874.3, 12901.2, #5231.58, 2061.91, 790.889,
## from estimatePileupD.py for golden JSON up to run 165542
#4.49103e+06, 7.50711e+06, 1.7013e+07, 2.77526e+07, 3.56721e+07, 3.82648e+07,
#3.55386e+07, 2.93206e+07, 2.18974e+07, 1.50169e+07, 9.56312e+06, 5.70622e+06,
#3.21393e+06, 1.71936e+06, 878374, 430566, 203380, 92934.5,
#41228.6, 17815.2, 7520.35,# 3109.37, 1262.01, 503.739,
#198.015, 76.7276, 29.3217, 11.0527, 4.10876, 1.50569,
#0.543606, 0.193229, 0.0675766, 0.0232364, 0.00785103, 0.0026052,
#0.000848637, 0.000271282, 8.50798e-05, 2.61736e-05, 7.8975e-06, 2.33716e-06,
#6.78371e-07, 1.93133e-07, 5.39384e-08, 1.47793e-08, 3.97367e-09, 1.04856e-09,
#2.71605e-10, 6.92423e-08, 0,
## from estimatePileupD.py for golden JSON up to run 163869
#3.6124e+06, 5.7606e+06, 1.3047e+07, 2.12065e+07, 2.71345e+07, 2.89995e+07,
#2.68765e+07, 2.21641e+07, 1.65695e+07, 1.13875e+07, 7.27332e+06, 4.35533e+06,
#2.46294e+06, 1.32354e+06, 679618, 335115, 159402, 73447,
#32906.5, 14384.3, 6152.9, #2581.8, 1064.77, 432.206,
#172.826, 68.1079, 26.4529, 10.1234, 3.81552, 1.4155,
#0.51655, 0.185307, 0.0653117, 0.0226036, 0.00767821, 0.00255903,
#0.000836568, 0.000268193, 8.43057e-05, 2.59835e-05, 7.85175e-06, 2.32636e-06,
#6.75872e-07, 1.92565e-07, 5.3812e-08, 1.47516e-08, 3.96773e-09, 1.0473e-09,
#2.71346e-10, 5.26651e-08, 0.
)
#mcFile = cms.FileInPath('pudist_G_Pt-15to3000_TuneZ2_Flat_7TeV_pythia6_Summer11.root'),
#mcHist = cms.string('pudist'),
#dataFile = cms.FileInPath('pudist_160404-163869_Cert_JSON.root'),
#dataHist = cms.string('pileup'),
)
process.p = cms.Path(
process.selectionSequence *
process.pmvTree
)
process.options.wantSummary = False
if __name__ == "__main__": import user
| [
"[email protected]"
] | |
3922747c24aeae6863311beb748f65358b035f73 | 62e58c051128baef9452e7e0eb0b5a83367add26 | /edifact/D95A/DIRDEBD95AUN.py | 5415a9c13741232f73c37ae3b49aa4c18660d498 | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 4,507 | py | #Generated by bots open source edi translator from UN-docs.
from bots.botsconfig import *
from edifact import syntax
from recordsD95AUN import recorddefs
structure = [
{ID: 'UNH', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGM', MIN: 1, MAX: 1},
{ID: 'DTM', MIN: 1, MAX: 1},
{ID: 'BUS', MIN: 0, MAX: 1},
{ID: 'RFF', MIN: 0, MAX: 2, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 1},
]},
{ID: 'FII', MIN: 0, MAX: 5, LEVEL: [
{ID: 'CTA', MIN: 0, MAX: 1},
{ID: 'COM', MIN: 0, MAX: 5},
]},
{ID: 'NAD', MIN: 0, MAX: 3, LEVEL: [
{ID: 'CTA', MIN: 0, MAX: 1},
{ID: 'COM', MIN: 0, MAX: 5},
]},
{ID: 'LIN', MIN: 1, MAX: 9999, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 1},
{ID: 'RFF', MIN: 0, MAX: 2},
{ID: 'BUS', MIN: 0, MAX: 1},
{ID: 'FCA', MIN: 0, MAX: 1},
{ID: 'MOA', MIN: 0, MAX: 1, LEVEL: [
{ID: 'CUX', MIN: 0, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 2},
{ID: 'RFF', MIN: 0, MAX: 1},
]},
{ID: 'FII', MIN: 1, MAX: 1, LEVEL: [
{ID: 'CTA', MIN: 0, MAX: 1},
{ID: 'COM', MIN: 0, MAX: 5},
]},
{ID: 'NAD', MIN: 0, MAX: 3, LEVEL: [
{ID: 'CTA', MIN: 0, MAX: 1},
{ID: 'COM', MIN: 0, MAX: 5},
]},
{ID: 'INP', MIN: 0, MAX: 1, LEVEL: [
{ID: 'FTX', MIN: 0, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 2},
]},
{ID: 'GIS', MIN: 0, MAX: 10, LEVEL: [
{ID: 'MOA', MIN: 0, MAX: 1},
{ID: 'LOC', MIN: 0, MAX: 2},
{ID: 'NAD', MIN: 0, MAX: 1},
{ID: 'RCS', MIN: 0, MAX: 1},
{ID: 'FTX', MIN: 0, MAX: 10},
]},
{ID: 'PRC', MIN: 0, MAX: 1, LEVEL: [
{ID: 'FTX', MIN: 1, MAX: 1},
]},
{ID: 'SEQ', MIN: 1, MAX: 9999, LEVEL: [
{ID: 'MOA', MIN: 1, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 1},
{ID: 'RFF', MIN: 0, MAX: 3},
{ID: 'PAI', MIN: 0, MAX: 1},
{ID: 'FCA', MIN: 0, MAX: 1},
{ID: 'FII', MIN: 0, MAX: 3, LEVEL: [
{ID: 'CTA', MIN: 0, MAX: 1},
{ID: 'COM', MIN: 0, MAX: 5},
]},
{ID: 'NAD', MIN: 0, MAX: 3, LEVEL: [
{ID: 'CTA', MIN: 0, MAX: 1},
{ID: 'COM', MIN: 0, MAX: 5},
]},
{ID: 'INP', MIN: 0, MAX: 3, LEVEL: [
{ID: 'FTX', MIN: 0, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 2},
]},
{ID: 'GIS', MIN: 0, MAX: 10, LEVEL: [
{ID: 'MOA', MIN: 0, MAX: 1},
{ID: 'LOC', MIN: 0, MAX: 2},
{ID: 'NAD', MIN: 0, MAX: 1},
{ID: 'RCS', MIN: 0, MAX: 1},
{ID: 'FTX', MIN: 0, MAX: 10},
]},
{ID: 'PRC', MIN: 0, MAX: 1, LEVEL: [
{ID: 'FTX', MIN: 0, MAX: 5},
{ID: 'DOC', MIN: 0, MAX: 9999, LEVEL: [
{ID: 'MOA', MIN: 0, MAX: 5},
{ID: 'DTM', MIN: 0, MAX: 5},
{ID: 'RFF', MIN: 0, MAX: 5},
{ID: 'NAD', MIN: 0, MAX: 2},
{ID: 'CUX', MIN: 0, MAX: 5, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 1},
]},
{ID: 'AJT', MIN: 0, MAX: 100, LEVEL: [
{ID: 'MOA', MIN: 0, MAX: 1},
{ID: 'RFF', MIN: 0, MAX: 1},
{ID: 'FTX', MIN: 0, MAX: 5},
]},
{ID: 'DLI', MIN: 0, MAX: 1000, LEVEL: [
{ID: 'MOA', MIN: 1, MAX: 5},
{ID: 'PIA', MIN: 0, MAX: 5},
{ID: 'DTM', MIN: 0, MAX: 5},
{ID: 'CUX', MIN: 0, MAX: 5, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 1},
]},
{ID: 'AJT', MIN: 0, MAX: 10, LEVEL: [
{ID: 'MOA', MIN: 1, MAX: 1},
{ID: 'RFF', MIN: 0, MAX: 1},
{ID: 'FTX', MIN: 0, MAX: 5},
]},
]},
]},
{ID: 'GIS', MIN: 0, MAX: 1, LEVEL: [
{ID: 'MOA', MIN: 0, MAX: 5},
]},
]},
]},
]},
{ID: 'CNT', MIN: 0, MAX: 5},
{ID: 'AUT', MIN: 0, MAX: 5, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 1},
]},
{ID: 'UNT', MIN: 1, MAX: 1},
]},
]
| [
"[email protected]"
] | |
bbf4965b241b4632e2d4a349b3869fba4cb9831f | ca23b411c8a046e98f64b81f6cba9e47783d2584 | /poem/core/models_test.py | a0db94e4d47fae13658d07b17e82bce15a3e7851 | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | pdybczak/google-research | 1fb370a6aa4820a42a5d417a1915687a00613f9c | 0714e9a5a3934d922c0b9dd017943a8e511eb5bc | refs/heads/master | 2023-03-05T23:16:11.246574 | 2021-01-04T11:30:28 | 2021-01-04T11:30:28 | 326,629,357 | 1 | 0 | Apache-2.0 | 2021-02-01T12:39:09 | 2021-01-04T09:17:36 | Jupyter Notebook | UTF-8 | Python | false | false | 9,686 | py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests model architecture functions."""
import tensorflow.compat.v1 as tf
from poem.core import common
from poem.core import models
tf.disable_v2_behavior()
class ModelsTest(tf.test.TestCase):
def test_simple_model_shapes(self):
# Shape = [4, 2, 3].
input_features = tf.constant([[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],
[[7.0, 8.0, 9.0], [10.0, 11.0, 12.0]],
[[13.0, 14.0, 15.0], [16.0, 17.0, 18.0]],
[[19.0, 20.0, 21.0], [22.0, 23.0, 24.0]]])
output_sizes = {'a': 8, 'b': [4, 3]}
outputs, activations = models.simple_model(
input_features,
output_sizes,
sequential_inputs=False,
is_training=True,
num_bottleneck_nodes=16)
expected_global_variable_shapes = {
'SimpleModel/InputFC/Linear/weight:0': ([3, 1024]),
'SimpleModel/InputFC/Linear/bias:0': ([1024]),
'SimpleModel/InputFC/BatchNorm/gamma:0': ([1024]),
'SimpleModel/InputFC/BatchNorm/beta:0': ([1024]),
'SimpleModel/InputFC/BatchNorm/moving_mean:0': ([1024]),
'SimpleModel/InputFC/BatchNorm/moving_variance:0': ([1024]),
'SimpleModel/FullyConnectedBlock_0/FC_0/Linear/weight:0': ([1024,
1024]),
'SimpleModel/FullyConnectedBlock_0/FC_0/Linear/bias:0': ([1024]),
'SimpleModel/FullyConnectedBlock_0/FC_0/BatchNorm/gamma:0': ([1024]),
'SimpleModel/FullyConnectedBlock_0/FC_0/BatchNorm/beta:0': ([1024]),
'SimpleModel/FullyConnectedBlock_0/FC_0/BatchNorm/moving_mean:0':
([1024]),
'SimpleModel/FullyConnectedBlock_0/FC_0/BatchNorm/moving_variance:0':
([1024]),
'SimpleModel/FullyConnectedBlock_0/FC_1/Linear/weight:0': ([1024,
1024]),
'SimpleModel/FullyConnectedBlock_0/FC_1/Linear/bias:0': ([1024]),
'SimpleModel/FullyConnectedBlock_0/FC_1/BatchNorm/gamma:0': ([1024]),
'SimpleModel/FullyConnectedBlock_0/FC_1/BatchNorm/beta:0': ([1024]),
'SimpleModel/FullyConnectedBlock_0/FC_1/BatchNorm/moving_mean:0':
([1024]),
'SimpleModel/FullyConnectedBlock_0/FC_1/BatchNorm/moving_variance:0':
([1024]),
'SimpleModel/FullyConnectedBlock_1/FC_0/Linear/weight:0': ([1024,
1024]),
'SimpleModel/FullyConnectedBlock_1/FC_0/Linear/bias:0': ([1024]),
'SimpleModel/FullyConnectedBlock_1/FC_0/BatchNorm/gamma:0': ([1024]),
'SimpleModel/FullyConnectedBlock_1/FC_0/BatchNorm/beta:0': ([1024]),
'SimpleModel/FullyConnectedBlock_1/FC_0/BatchNorm/moving_mean:0':
([1024]),
'SimpleModel/FullyConnectedBlock_1/FC_0/BatchNorm/moving_variance:0':
([1024]),
'SimpleModel/FullyConnectedBlock_1/FC_1/Linear/weight:0': ([1024,
1024]),
'SimpleModel/FullyConnectedBlock_1/FC_1/Linear/bias:0': ([1024]),
'SimpleModel/FullyConnectedBlock_1/FC_1/BatchNorm/gamma:0': ([1024]),
'SimpleModel/FullyConnectedBlock_1/FC_1/BatchNorm/beta:0': ([1024]),
'SimpleModel/FullyConnectedBlock_1/FC_1/BatchNorm/moving_mean:0':
([1024]),
'SimpleModel/FullyConnectedBlock_1/FC_1/BatchNorm/moving_variance:0':
([1024]),
'SimpleModel/BottleneckLogits/weight:0': ([1024, 16]),
'SimpleModel/BottleneckLogits/bias:0': ([16]),
'SimpleModel/OutputLogits/a/weight:0': ([16, 8]),
'SimpleModel/OutputLogits/a/bias:0': ([8]),
'SimpleModel/OutputLogits/b/weight:0': ([16, 12]),
'SimpleModel/OutputLogits/b/bias:0': ([12]),
}
self.assertDictEqual(
{var.name: var.shape.as_list() for var in tf.global_variables()},
expected_global_variable_shapes)
self.assertCountEqual(outputs.keys(), ['a', 'b'])
self.assertAllEqual(outputs['a'].shape.as_list(), [4, 2, 8])
self.assertAllEqual(outputs['b'].shape.as_list(), [4, 2, 4, 3])
self.assertCountEqual(activations.keys(),
['base_activations', 'bottleneck_activations'])
self.assertAllEqual(activations['base_activations'].shape.as_list(),
[4, 2, 1024])
self.assertAllEqual(activations['bottleneck_activations'].shape.as_list(),
[4, 2, 16])
def test_simple_model_forward_pass(self):
input_features = tf.constant([[1.0, 2.0, 3.0]])
output_sizes = {'a': 4}
outputs, activations = models.simple_model(
input_features,
output_sizes,
sequential_inputs=False,
is_training=True,
num_hidden_nodes=2,
weight_initializer=tf.initializers.ones(),
bias_initializer=tf.initializers.zeros(),
weight_max_norm=0.0,
use_batch_norm=False,
dropout_rate=0.0,
num_fcs_per_block=2,
num_fc_blocks=3)
with self.session() as sess:
sess.run(tf.initializers.global_variables())
outputs_result, activations_result = sess.run([outputs, activations])
self.assertCountEqual(outputs_result.keys(), ['a'])
self.assertAllClose(outputs_result['a'], [[1500.0, 1500.0, 1500.0, 1500.0]])
self.assertCountEqual(activations_result.keys(), ['base_activations'])
self.assertAllClose(activations_result['base_activations'],
[[750.0, 750.0]])
def test_get_simple_model(self):
input_features = tf.constant([[1.0, 2.0, 3.0]])
output_sizes = {'a': 4}
model_fn = models.get_model(
base_model_type=common.BASE_MODEL_TYPE_SIMPLE,
is_training=True,
num_hidden_nodes=2,
weight_initializer=tf.initializers.ones(),
bias_initializer=tf.initializers.zeros(),
weight_max_norm=0.0,
use_batch_norm=False,
dropout_rate=0.0,
num_fcs_per_block=2,
num_fc_blocks=3)
outputs, activations = model_fn(input_features, output_sizes)
with self.session() as sess:
sess.run(tf.initializers.global_variables())
outputs_result, activations_result = sess.run([outputs, activations])
self.assertCountEqual(outputs_result.keys(), ['a'])
self.assertAllClose(outputs_result['a'], [[1500.0, 1500.0, 1500.0, 1500.0]])
self.assertCountEqual(activations_result.keys(), ['base_activations'])
self.assertAllClose(activations_result['base_activations'],
[[750.0, 750.0]])
def test_get_simple_point_embedder(self):
# Shape = [4, 2, 3].
input_features = tf.constant([[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],
[[7.0, 8.0, 9.0], [10.0, 11.0, 12.0]],
[[13.0, 14.0, 15.0], [16.0, 17.0, 18.0]],
[[19.0, 20.0, 21.0], [22.0, 23.0, 24.0]]])
embedder_fn = models.get_embedder(
base_model_type=common.BASE_MODEL_TYPE_SIMPLE,
embedding_type=common.EMBEDDING_TYPE_POINT,
num_embedding_components=3,
embedding_size=16,
is_training=True)
outputs, activations = embedder_fn(input_features)
self.assertCountEqual(outputs.keys(), [common.KEY_EMBEDDING_MEANS])
self.assertAllEqual(outputs[common.KEY_EMBEDDING_MEANS].shape.as_list(),
[4, 2, 3, 16])
self.assertCountEqual(activations.keys(), ['base_activations'])
self.assertAllEqual(activations['base_activations'].shape.as_list(),
[4, 2, 1024])
def test_get_simple_gaussian_embedder(self):
# Shape = [4, 2, 3].
input_features = tf.constant([[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],
[[7.0, 8.0, 9.0], [10.0, 11.0, 12.0]],
[[13.0, 14.0, 15.0], [16.0, 17.0, 18.0]],
[[19.0, 20.0, 21.0], [22.0, 23.0, 24.0]]])
embedder_fn = models.get_embedder(
base_model_type=common.BASE_MODEL_TYPE_SIMPLE,
embedding_type=common.EMBEDDING_TYPE_GAUSSIAN,
num_embedding_components=3,
embedding_size=16,
num_embedding_samples=32,
is_training=True,
weight_max_norm=0.0)
outputs, activations = embedder_fn(input_features)
self.assertCountEqual(outputs.keys(), [
common.KEY_EMBEDDING_MEANS,
common.KEY_EMBEDDING_STDDEVS,
common.KEY_EMBEDDING_SAMPLES,
])
self.assertAllEqual(outputs[common.KEY_EMBEDDING_MEANS].shape.as_list(),
[4, 2, 3, 16])
self.assertAllEqual(outputs[common.KEY_EMBEDDING_STDDEVS].shape.as_list(),
[4, 2, 3, 16])
self.assertAllEqual(outputs[common.KEY_EMBEDDING_SAMPLES].shape.as_list(),
[4, 2, 3, 32, 16])
self.assertCountEqual(activations.keys(), ['base_activations'])
self.assertAllEqual(activations['base_activations'].shape.as_list(),
[4, 2, 1024])
if __name__ == '__main__':
tf.test.main()
| [
"[email protected]"
] | |
9f5061630659beed761f1e56fb5a1083b3bb3c3d | 234c46d1249c9209f268417a19018afc12e378b4 | /tests/modules/transformer/activation_layer_test.py | 2af0338a92e9723143c9b963856628980b4971bc | [
"Apache-2.0"
] | permissive | allenai/allennlp | 1f4bcddcb6f5ce60c7ef03a9a3cd6a38bdb987cf | 80fb6061e568cb9d6ab5d45b661e86eb61b92c82 | refs/heads/main | 2023-07-07T11:43:33.781690 | 2022-11-22T00:42:46 | 2022-11-22T00:42:46 | 91,356,408 | 12,257 | 2,712 | Apache-2.0 | 2022-11-22T00:42:47 | 2017-05-15T15:52:41 | Python | UTF-8 | Python | false | false | 804 | py | import torch
import pytest
from allennlp.common import Params
from allennlp.modules.transformer import ActivationLayer
@pytest.fixture
def params_dict():
return {
"hidden_size": 5,
"intermediate_size": 3,
"activation": "relu",
}
@pytest.fixture
def params(params_dict):
return Params(params_dict)
@pytest.fixture
def activation_layer(params):
return ActivationLayer.from_params(params.duplicate())
def test_can_construct_from_params(activation_layer, params_dict):
activation_layer = activation_layer
assert activation_layer.dense.in_features == params_dict["hidden_size"]
assert activation_layer.dense.out_features == params_dict["intermediate_size"]
def test_forward_runs(activation_layer):
activation_layer.forward(torch.randn(7, 5))
| [
"[email protected]"
] | |
074aeca3d97502ed60c27a33d1803a45293f210c | c1ea75db1da4eaa485d39e9d8de480b6ed0ef40f | /app/api/app.py | bafb5b35fd82a3d3b5865aa651d5ecb12186e978 | [
"Apache-2.0"
] | permissive | gasbarroni8/VideoCrawlerEngine | a4f092b0a851dc0487e4dcf4c98b62d6282a6180 | 994933d91d85bb87ae8dfba1295f7a69f6d50097 | refs/heads/master | 2023-04-06T07:59:29.269894 | 2021-02-10T16:09:15 | 2021-02-10T16:09:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,259 | py |
from fastapi import FastAPI
from fastapi.staticfiles import StaticFiles
from fastapi.responses import HTMLResponse
from .routers import include_routers
from app.helper.middleware import include_exception_handler
from helper.conf import get_conf
from .helper import read_html_file
from app.helper.middleware.proxy import ReverseProxyMiddleware
from ..helper.middleware import include_middleware
from urllib.parse import urljoin
import os
app = FastAPI()
conf = get_conf('app')
htmldist = {
'static': os.path.join(conf.html['dist'], 'static'),
'index': os.path.join(conf.html['dist'], 'index.html')
}
app.mount(
'/static',
StaticFiles(directory=htmldist['static']),
name='dist'
)
include_routers(app)
include_exception_handler(app)
proxy_pass_configures = [
{
'source': '/api/task/',
'pass': urljoin(
conf.taskflow['gateway'].geturl(),
'/api/v1/task/'
),
}, {
'source': '/api/script/',
'pass': urljoin(
conf.script['gateway'].geturl(),
'/api/v1/script/'
),
}
]
include_middleware(app, ReverseProxyMiddleware(proxy_pass_configures))
@app.get('/')
async def index():
return HTMLResponse(read_html_file(htmldist['index']))
| [
"[email protected]"
] | |
13a95e0835eddd0fa3db784494dd57177d13927b | 8fcdcec1bf0f194d23bba4acd664166a04dc128f | /packages/grid_control_update.py | d21546c84a97777c2b4b0811e15519a89314cefb | [] | no_license | grid-control/grid-control | e51337dd7e5d158644a8da35923443fb0d232bfb | 1f5295cd6114f3f18958be0e0618ff6b35aa16d7 | refs/heads/master | 2022-11-13T13:29:13.226512 | 2021-10-01T14:37:59 | 2021-10-01T14:37:59 | 13,805,261 | 32 | 30 | null | 2023-02-19T16:22:47 | 2013-10-23T14:39:28 | Python | UTF-8 | Python | false | false | 1,227 | py | #!/usr/bin/env python
# | Copyright 2014-2017 Karlsruhe Institute of Technology
# |
# | Licensed under the Apache License, Version 2.0 (the "License");
# | you may not use this file except in compliance with the License.
# | You may obtain a copy of the License at
# |
# | http://www.apache.org/licenses/LICENSE-2.0
# |
# | Unless required by applicable law or agreed to in writing, software
# | distributed under the License is distributed on an "AS IS" BASIS,
# | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# | See the License for the specific language governing permissions and
# | limitations under the License.
import os, sys
def update_plugin_files():
base_dir = os.path.abspath(os.path.dirname(__file__))
sys.path.append(base_dir)
from hpfwk.hpf_plugin import create_plugin_file
def _select(path):
for pat in ['/share', '_compat_', '/requests', '/xmpp']:
if pat in path:
return False
return True
package_list = os.listdir(base_dir)
package_list.sort()
for package in package_list:
package = os.path.abspath(os.path.join(base_dir, package))
if os.path.isdir(package):
create_plugin_file(package, _select)
if __name__ == '__main__':
update_plugin_files()
| [
"[email protected]"
] | |
0817e8833e06cdbb3dc7357bbdcedcc83fb04a46 | 73fcadae6177ab973f1aa3ffe874ac3fadb52312 | /server/fta/utils/i18n.py | 4f91cd88e9509cabeab6ce284564a7f4a93d9ea7 | [
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSL-1.0",
"Apache-2.0"
] | permissive | huang1125677925/fta | 352cd587aaca3d3149516345559d420c41d1caf4 | a50a3c498c39b14e7df4a0a960c2a1499b1ec6bb | refs/heads/master | 2023-03-18T16:08:40.904716 | 2019-02-22T09:35:23 | 2019-02-22T09:35:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,745 | py | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.
Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
""" # noqa
import logging
import os.path
import arrow
import pytz as tz
from babel import support
from fta.utils.lazy import LazyString
logger = logging.getLogger(__name__)
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class I18N(object):
__metaclass__ = Singleton
def __init__(self):
# 全局唯一, 修改后可更改语言, 时区
self.cc_biz_id = None
from fta import settings
self.default_locale = settings.DEFAULT_LOCALE
self.default_timezone = settings.DEFAULT_TIMEZONE
self.translations = {}
self.domain = None
def set_biz(self, cc_biz_id):
"""change biz method
"""
self.cc_biz_id = cc_biz_id
@property
def translation_directories(self):
"""翻译文件夹
"""
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
yield os.path.join(BASE_DIR, 'locale')
def locale_best_match(self, locale):
"""兼容不同编码
"""
if locale.lower() in ['zh', 'zh_cn', 'zh-cn']:
return 'zh_Hans_CN'
return 'en'
def get_locale(self):
"""
根据业务ID获取语言
"""
if not self.cc_biz_id:
return self.default_locale
try:
from project.utils import query_cc
locale = query_cc.get_app_by_id(self.cc_biz_id).get('Language')
if locale:
return self.locale_best_match(locale)
else:
return self.default_locale
except Exception:
return self.default_locale
def get_timezone(self):
try:
timezone = self._get_timezone()
except Exception:
timezone = tz.timezone(self.default_timezone)
return timezone
def _get_timezone(self):
"""
根据业务ID获取时区
"""
if not self.cc_biz_id:
return self.default_timezone
try:
from project.utils import query_cc
timezone = query_cc.get_app_by_id(self.cc_biz_id).get('TimeZone')
if timezone:
return timezone
else:
return self.default_timezone
except Exception:
return self.default_timezone
def get_translations(self):
"""get translation on the fly
"""
locale = self.get_locale()
if locale not in self.translations:
translations = support.Translations()
for dirname in self.translation_directories:
catalog = support.Translations.load(
dirname,
[locale],
self.domain,
)
translations.merge(catalog)
if hasattr(catalog, 'plural'):
translations.plural = catalog.plural
logger.info('load translations, %s=%s', locale, translations)
self.translations[locale] = translations
return self.translations[locale]
i18n = I18N()
def gettext(string, **variables):
"""replace stdlib
"""
t = i18n.get_translations()
if t is None:
return string if not variables else string % variables
s = t.ugettext(string)
return s if not variables else s % variables
def ngettext(singular, plural, n):
t = i18n.get_translations()
if t is None:
return singular
s = t.ngettext(singular, plural, n)
return s
def lazy_gettext(string, **variables):
"""Like :func:`gettext` but the string returned is lazy which means
it will be translated when it is used as an actual string.
Example::
hello = lazy_gettext(u'Hello World')
@app.route('/')
def index():
return unicode(hello)
"""
return LazyString(gettext, string, **variables)
_ = gettext
def arrow_localtime(value, timezone=None):
"""value必须是UTC时间, arrow转换成本地时间
"""
value = arrow.get(value).replace(tzinfo="utc")
if not timezone:
timezone = i18n.get_timezone()
value = value.to(timezone)
return value
def localtime(value, timezone=None):
"""value必须是UTC时间, datetime格式
"""
value = arrow_localtime(value, timezone)
value = value.datetime
return value
def arrow_now():
"""当前时区时间, arrow格式
"""
utcnow = arrow.utcnow()
timezone = i18n.get_timezone()
return utcnow.to(timezone)
def now():
"""当前时间, datetime格式
"""
return arrow_now().datetime
def lazy_join(iterable, word):
value = ''
is_first = True
for i in iterable:
if is_first:
value = value + i
is_first = False
else:
value = value + word + i
return value
| [
"[email protected]"
] | |
cef06aa93427891f9e1de15f76de7e4aa063276f | 48ba8d0788e4ac7d4cacd7e7a2e2cf4f391c85ad | /Apple/rectangle_overlap.py | 2fe9ccc6190a3b2c9e19a0c9399b0cd7700fb388 | [] | no_license | rahulvshinde/Python_Playground | c28ac2dc0865e254caa5360c3bb97b4ff5f23b3a | 7a03b765dd440654caba1e06af5b149f584e9f08 | refs/heads/master | 2023-04-19T17:25:55.993837 | 2021-05-17T01:15:30 | 2021-05-17T01:15:30 | 280,736,898 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 886 | py | """
A rectangle is represented as a list [x1, y1, x2, y2], where (x1, y1) are the coordinates of its bottom-left corner,
and (x2, y2) are the coordinates of its top-right corner.
Two rectangles overlap if the area of their intersection is positive. To be clear, two rectangles that only touch at
the corner or edges do not overlap.
Given two (axis-aligned) rectangles, return whether they overlap.
Example 1:
Input: rec1 = [0,0,2,2], rec2 = [1,1,3,3]
Output: true
Example 2:
Input: rec1 = [0,0,1,1], rec2 = [1,0,2,1]
Output: false
Notes:
Both rectangles rec1 and rec2 are lists of 4 integers.
All coordinates in rectangles will be between -10^9 and 10^9.
"""
# rec1 = [0,0,2,2]
# rec2 = [1,1,3,3]
rec1 = [0,0,1,1]
rec2 = [1,0,2,1]
def rectOverlap(rec1, rec2):
return rec1[0]<rec2[2] and rec2[0] <rec1[2] and rec1[1]< rec2[3] and rec2[1]<rec1[3]
print(rectOverlap(rec1,rec2)) | [
"[email protected]"
] | |
fdb935308c84e6e8df3718a147bb41f284314a06 | 5be8b0f2ee392abeee6970e7a6364ac9a5b8ceaa | /xiaojian/forth_phase/Django./day03/exersice/exersice/wsgi.py | 3d56e7a2957ad8662abfa9118725486dff7fda08 | [] | no_license | Wellsjian/20180826 | 424b65f828f0174e4d568131da01dafc2a36050a | 0156ad4db891a2c4b06711748d2624080578620c | refs/heads/master | 2021-06-18T12:16:08.466177 | 2019-09-01T10:06:44 | 2019-09-01T10:06:44 | 204,462,572 | 0 | 1 | null | 2021-04-20T18:26:03 | 2019-08-26T11:38:09 | JavaScript | UTF-8 | Python | false | false | 394 | py | """
WSGI config for exersice project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "exersice.settings")
application = get_wsgi_application()
| [
"[email protected]"
] | |
8bbb7896a9faa5e12fd9ed8815e374e5c0f9b90b | 61afe17201589a61c39429602ca11e3fdacf47a9 | /Chapter3/Day19/12.异常细分(了解).py | 53a37128647faf14441789776924fc9aa2b738f8 | [] | no_license | Liunrestrained/Python- | ec09315c50b395497dd9b0f83219fef6355e9b21 | 6b2cb4ae74c59820c6eabc4b0e98961ef3b941b2 | refs/heads/main | 2023-07-17T14:16:12.084304 | 2021-08-28T14:05:12 | 2021-08-28T14:05:12 | 399,408,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 734 | py | import requests
from requests import exceptions
while True:
url = input("下载链接")
try:
res = requests.get(url=url)
print(res)
except exceptions.MissingSchema as e: # 细分处理
print("URL架构不存在")
except exceptions.InvalidSchema as e: # 细分处理
print("URL架构错误")
except exceptions.InvalidURL as e: # 细分处理
print("URL地址格式错误")
except exceptions.ConnectionError as e: # 细分处理
print("网络连接出错")
except Exception as e: # 模糊处理
print("代码出现错误", e)
# # 提示:如果想要写的简单一点,其实只写一个Exception捕获错误就可以了。
| [
"[email protected]"
] | |
ce554e2695eb9840c6d0399b1f782c9eb8d9d10e | d30cb6a597f6a5fad9a01da77594a225daf9a211 | /Lesson 4 - File Handling/project/attempt_1/suffix.py | 34ed3d75c984c7bdfaaf0517676f8b2ac263c7dd | [] | no_license | jmwoloso/Python_2 | 290ef8b0c7db8347fa25cf39da26f39e218d9c68 | 06c45545ed064d0e9c4fd15cc81cf454cb079c9d | refs/heads/master | 2020-04-24T02:18:34.058148 | 2015-08-02T21:02:02 | 2015-08-02T21:02:02 | 37,082,608 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,197 | py | #!/usr/bin/python3
# A Program Used for Assigning Suffixes
# suffix.py
#
# Created by: Jason Wolosonovich
# 02-24-2015
#
# Lesson 4 - Project 1, Attempt 1
"""
Contains a dict that houses the extensions of many common
file types.
"""
#global file_suffix_dict
file_suffix_dict = {
1 : ".txt",
2 : ".doc",
3 : ".docx",
4 : ".png",
5 : ".jpeg",
6 : ".py",
7 : ".pyc",
8 : ".rtf",
9 : ".log",
10 : ".csv",
11 : ".dat",
12 : ".ppt",
13 : ".tar",
14 : ".tar.gz",
15 : ".mpg",
16 : ".mpeg",
17 : ".mp4",
18 : ".wmv",
19 : ".svg",
20 : ".xls",
21 : ".xlsx",
22 : ".accdb",
23 : ".db",
24 : ".bat",
25 : ".sql",
26 : ".tar.bz2",
27 : ""
}
| [
"[email protected]"
] | |
15ffd68d61b4a460ef95ddadae10b0d714791ef3 | bc233c24523f05708dd1e091dca817f9095e6bb5 | /bitmovin_api_sdk/models/dolby_digital_plus_downmixing_preferred_mode.py | dbe4b9349d8eabb5c12189d531b9a85ed63ac8e2 | [
"MIT"
] | permissive | bitmovin/bitmovin-api-sdk-python | e3d6cf8eb8bdad62cb83ec77c0fc4950b06b9cdd | b0860c0b1be7747cf22ad060985504da625255eb | refs/heads/main | 2023-09-01T15:41:03.628720 | 2023-08-30T10:52:13 | 2023-08-30T10:52:13 | 175,209,828 | 13 | 14 | MIT | 2021-04-29T12:30:31 | 2019-03-12T12:47:18 | Python | UTF-8 | Python | false | false | 268 | py | # coding: utf-8
from enum import Enum
from six import string_types, iteritems
from bitmovin_api_sdk.common.poscheck import poscheck_model
class DolbyDigitalPlusDownmixingPreferredMode(Enum):
LO_RO = "LO_RO"
LT_RT = "LT_RT"
PRO_LOGIC_II = "PRO_LOGIC_II"
| [
"[email protected]"
] | |
db4947dd7f21941b4aac995c4fe2285f661d7466 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit_noisy1448.py | ae49b0570c58881f82ac7b3f628b829ccd29533b | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,294 | py | # qubit number=5
# total number=51
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[1]) # number=26
prog.cz(input_qubit[4],input_qubit[1]) # number=27
prog.h(input_qubit[1]) # number=28
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[4]) # number=21
prog.h(input_qubit[1]) # number=34
prog.cz(input_qubit[4],input_qubit[1]) # number=35
prog.z(input_qubit[4]) # number=46
prog.rx(0.8011061266653969,input_qubit[2]) # number=37
prog.h(input_qubit[1]) # number=36
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=48
prog.cz(input_qubit[1],input_qubit[0]) # number=49
prog.h(input_qubit[0]) # number=50
prog.x(input_qubit[0]) # number=39
prog.cx(input_qubit[1],input_qubit[0]) # number=40
prog.cx(input_qubit[0],input_qubit[1]) # number=42
prog.x(input_qubit[1]) # number=43
prog.cx(input_qubit[0],input_qubit[1]) # number=44
prog.x(input_qubit[2]) # number=11
prog.y(input_qubit[1]) # number=45
prog.x(input_qubit[3]) # number=12
prog.h(input_qubit[2]) # number=41
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.cx(input_qubit[1],input_qubit[0]) # number=22
prog.x(input_qubit[4]) # number=47
prog.x(input_qubit[0]) # number=23
prog.cx(input_qubit[1],input_qubit[0]) # number=24
prog.cx(input_qubit[0],input_qubit[1]) # number=30
prog.x(input_qubit[1]) # number=31
prog.cx(input_qubit[0],input_qubit[1]) # number=32
prog.x(input_qubit[2]) # number=15
prog.h(input_qubit[4]) # number=29
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = FakeVigo()
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy1448.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"[email protected]"
] | |
2646f88f0590dd62b8ba725e67e06c4e9c20406e | b0fce7b572c78ee67ea0e2bd27e2837fffe66891 | /setup.py | 87b23bf3cfa596e1163e4cf81c7292a8ba217f97 | [
"MIT"
] | permissive | ZizhouJia/pyson | 300bd4c68cec3c0a42c5f3135e0447149ca86ebe | ba80336e6ec43456c0d1bf3e71109609b9489181 | refs/heads/master | 2020-08-15T07:55:24.704936 | 2019-11-29T08:46:35 | 2019-11-29T08:46:35 | 215,304,822 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 725 | py | import setuptools
setuptools.setup(
name="pypyson",
version='0.01',
description="A JSON like more powerful object notation for python",
license="MIT License",
author="ZizhouJia",
author_email="[email protected]",
url="http://github.com/ZizhouJia/pyson",
packages=setuptools.find_packages(),
install_requires=["antlr4-python3-runtime"],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Operating System :: OS Independent'
],
data_files=[('pyson/init', ['pyson/init/checker_scheme.pyson'])],
python_requires='>=3.6'
) | [
"[email protected]"
] | |
1d831cb9cfb9b9f7db96f0499fe3f0d02ab6c4ee | 6302d46032f704aa2c8bb6e2810c19e3bb90c1c4 | /server/netflix_backend/movies_api/migrations/0002_auto_20210219_1954.py | 7ccbe630791c36378645ba0357d4a4f295324d1c | [] | no_license | raghavendra-musubi/netflix-django-rest-react-redux | 304d28f68e13e9962f31593441ae1b7b36743952 | fe78061ccc1c27ff78697cb5f21d92a313b8a7c0 | refs/heads/main | 2023-03-09T21:32:30.409919 | 2021-02-24T19:03:32 | 2021-02-24T19:03:32 | 340,214,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 535 | py | # Generated by Django 3.1.6 on 2021-02-19 19:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('movies_api', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='movie',
name='category_id',
field=models.PositiveSmallIntegerField(choices=[(1, 'Adventure'), (2, 'Action'), (3, 'Thriller'), (4, 'Horror'), (5, 'Comedy'), (6, 'Musical'), (7, 'Romance'), (8, 'Drama'), (9, 'Fantasy')]),
),
]
| [
"[email protected]"
] | |
353ab642a8ae08763c52fbf98af5efa618985a9d | fb8cbebdf034b2f478943752d5443afc82c6eef5 | /tuirer/venv/lib/python3.6/site-packages/pygments/lexers/int_fiction.py | 1265072250a996322b6fafbc8a93654c2281faf8 | [] | no_license | fariasjr/CitiTuirer | f64e0ec93ef088f8140bb0961d2ad4ed3b59448a | deb3f7a9c2d45b8a7f54639037f097b99abdac11 | refs/heads/master | 2020-03-24T05:10:36.261050 | 2018-08-01T20:24:30 | 2018-08-01T20:24:30 | 142,477,521 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 55,826 | py | # -*- coding: utf-8 -*-
"""
pygments.lexers.int_fiction
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for interactive fiction languages.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import (RegexLexer, bygroups, default, include, this,
using, words)
from pygments.token import (Comment, Error, Generic, Keyword, Name, Number,
Operator, Punctuation, String, Text)
__all__ = ['Inform6Lexer', 'Inform6TemplateLexer', 'Inform7Lexer',
'Tads3Lexer']
class Inform6Lexer(RegexLexer):
"""
For `Inform 6 <http://inform-fiction.org/>`_ source code.
.. versionadded:: 2.0
"""
name = 'Inform 6'
aliases = ['inform6', 'i6']
filenames = ['*.inf']
flags = re.MULTILINE | re.DOTALL | re.UNICODE
_name = r'[a-zA-Z_]\w*'
# Inform 7 maps these four character classes to their ASCII
# equivalents. To support Inform 6 inclusions within Inform 7,
# Inform6Lexer maps them too.
_dash = u'\\-\u2010-\u2014'
_dquote = u'"\u201c\u201d'
_squote = u"'\u2018\u2019"
_newline = u'\\n\u0085\u2028\u2029'
tokens = {
'root': [
(r'\A(!%%[^%s]*[%s])+' % (_newline, _newline), Comment.Preproc,
'directive'),
default('directive')
],
'_whitespace': [
(r'\s+', Text),
(r'![^%s]*' % _newline, Comment.Single)
],
'default': [
include('_whitespace'),
(r'\[', Punctuation, 'many-values'), # Array initialization
(r':|(?=;)', Punctuation, '#pop'),
(r'<', Punctuation), # Second angle bracket in an action statement
default(('expression', '_expression'))
],
# Expressions
'_expression': [
include('_whitespace'),
(r'(?=sp\b)', Text, '#pop'),
(r'(?=[%s%s$0-9#a-zA-Z_])' % (_dquote, _squote), Text,
('#pop', 'value')),
(r'\+\+|[%s]{1,2}(?!>)|~~?' % _dash, Operator),
(r'(?=[()\[%s,?@{:;])' % _dash, Text, '#pop')
],
'expression': [
include('_whitespace'),
(r'\(', Punctuation, ('expression', '_expression')),
(r'\)', Punctuation, '#pop'),
(r'\[', Punctuation, ('#pop', 'statements', 'locals')),
(r'>(?=(\s+|(![^%s]*))*[>;])' % _newline, Punctuation),
(r'\+\+|[%s]{2}(?!>)' % _dash, Operator),
(r',', Punctuation, '_expression'),
(r'&&?|\|\|?|[=~><]?=|[%s]{1,2}>?|\.\.?[&#]?|::|[<>+*/%%]' % _dash,
Operator, '_expression'),
(r'(has|hasnt|in|notin|ofclass|or|provides)\b', Operator.Word,
'_expression'),
(r'sp\b', Name),
(r'\?~?', Name.Label, 'label?'),
(r'[@{]', Error),
default('#pop')
],
'_assembly-expression': [
(r'\(', Punctuation, ('#push', '_expression')),
(r'[\[\]]', Punctuation),
(r'[%s]>' % _dash, Punctuation, '_expression'),
(r'sp\b', Keyword.Pseudo),
(r';', Punctuation, '#pop:3'),
include('expression')
],
'_for-expression': [
(r'\)', Punctuation, '#pop:2'),
(r':', Punctuation, '#pop'),
include('expression')
],
'_keyword-expression': [
(r'(from|near|to)\b', Keyword, '_expression'),
include('expression')
],
'_list-expression': [
(r',', Punctuation, '#pop'),
include('expression')
],
'_object-expression': [
(r'has\b', Keyword.Declaration, '#pop'),
include('_list-expression')
],
# Values
'value': [
include('_whitespace'),
# Strings
(r'[%s][^@][%s]' % (_squote, _squote), String.Char, '#pop'),
(r'([%s])(@\{[0-9a-fA-F]{1,4}\})([%s])' % (_squote, _squote),
bygroups(String.Char, String.Escape, String.Char), '#pop'),
(r'([%s])(@.{2})([%s])' % (_squote, _squote),
bygroups(String.Char, String.Escape, String.Char), '#pop'),
(r'[%s]' % _squote, String.Single, ('#pop', 'dictionary-word')),
(r'[%s]' % _dquote, String.Double, ('#pop', 'string')),
# Numbers
(r'\$[+%s][0-9]*\.?[0-9]*([eE][+%s]?[0-9]+)?' % (_dash, _dash),
Number.Float, '#pop'),
(r'\$[0-9a-fA-F]+', Number.Hex, '#pop'),
(r'\$\$[01]+', Number.Bin, '#pop'),
(r'[0-9]+', Number.Integer, '#pop'),
# Values prefixed by hashes
(r'(##|#a\$)(%s)' % _name, bygroups(Operator, Name), '#pop'),
(r'(#g\$)(%s)' % _name,
bygroups(Operator, Name.Variable.Global), '#pop'),
(r'#[nw]\$', Operator, ('#pop', 'obsolete-dictionary-word')),
(r'(#r\$)(%s)' % _name, bygroups(Operator, Name.Function), '#pop'),
(r'#', Name.Builtin, ('#pop', 'system-constant')),
# System functions
(words((
'child', 'children', 'elder', 'eldest', 'glk', 'indirect', 'metaclass',
'parent', 'random', 'sibling', 'younger', 'youngest'), suffix=r'\b'),
Name.Builtin, '#pop'),
# Metaclasses
(r'(?i)(Class|Object|Routine|String)\b', Name.Builtin, '#pop'),
# Veneer routines
(words((
'Box__Routine', 'CA__Pr', 'CDefArt', 'CInDefArt', 'Cl__Ms',
'Copy__Primitive', 'CP__Tab', 'DA__Pr', 'DB__Pr', 'DefArt', 'Dynam__String',
'EnglishNumber', 'Glk__Wrap', 'IA__Pr', 'IB__Pr', 'InDefArt', 'Main__',
'Meta__class', 'OB__Move', 'OB__Remove', 'OC__Cl', 'OP__Pr', 'Print__Addr',
'Print__PName', 'PrintShortName', 'RA__Pr', 'RA__Sc', 'RL__Pr', 'R_Process',
'RT__ChG', 'RT__ChGt', 'RT__ChLDB', 'RT__ChLDW', 'RT__ChPR', 'RT__ChPrintA',
'RT__ChPrintC', 'RT__ChPrintO', 'RT__ChPrintS', 'RT__ChPS', 'RT__ChR',
'RT__ChSTB', 'RT__ChSTW', 'RT__ChT', 'RT__Err', 'RT__TrPS', 'RV__Pr',
'Symb__Tab', 'Unsigned__Compare', 'WV__Pr', 'Z__Region'),
prefix='(?i)', suffix=r'\b'),
Name.Builtin, '#pop'),
# Other built-in symbols
(words((
'call', 'copy', 'create', 'DEBUG', 'destroy', 'DICT_CHAR_SIZE',
'DICT_ENTRY_BYTES', 'DICT_IS_UNICODE', 'DICT_WORD_SIZE', 'false',
'FLOAT_INFINITY', 'FLOAT_NAN', 'FLOAT_NINFINITY', 'GOBJFIELD_CHAIN',
'GOBJFIELD_CHILD', 'GOBJFIELD_NAME', 'GOBJFIELD_PARENT',
'GOBJFIELD_PROPTAB', 'GOBJFIELD_SIBLING', 'GOBJ_EXT_START',
'GOBJ_TOTAL_LENGTH', 'Grammar__Version', 'INDIV_PROP_START', 'INFIX',
'infix__watching', 'MODULE_MODE', 'name', 'nothing', 'NUM_ATTR_BYTES', 'print',
'print_to_array', 'recreate', 'remaining', 'self', 'sender', 'STRICT_MODE',
'sw__var', 'sys__glob0', 'sys__glob1', 'sys__glob2', 'sys_statusline_flag',
'TARGET_GLULX', 'TARGET_ZCODE', 'temp__global2', 'temp__global3',
'temp__global4', 'temp_global', 'true', 'USE_MODULES', 'WORDSIZE'),
prefix='(?i)', suffix=r'\b'),
Name.Builtin, '#pop'),
# Other values
(_name, Name, '#pop')
],
# Strings
'dictionary-word': [
(r'[~^]+', String.Escape),
(r'[^~^\\@({%s]+' % _squote, String.Single),
(r'[({]', String.Single),
(r'@\{[0-9a-fA-F]{,4}\}', String.Escape),
(r'@.{2}', String.Escape),
(r'[%s]' % _squote, String.Single, '#pop')
],
'string': [
(r'[~^]+', String.Escape),
(r'[^~^\\@({%s]+' % _dquote, String.Double),
(r'[({]', String.Double),
(r'\\', String.Escape),
(r'@(\\\s*[%s]\s*)*@((\\\s*[%s]\s*)*[0-9])*' %
(_newline, _newline), String.Escape),
(r'@(\\\s*[%s]\s*)*\{((\\\s*[%s]\s*)*[0-9a-fA-F]){,4}'
r'(\\\s*[%s]\s*)*\}' % (_newline, _newline, _newline),
String.Escape),
(r'@(\\\s*[%s]\s*)*.(\\\s*[%s]\s*)*.' % (_newline, _newline),
String.Escape),
(r'[%s]' % _dquote, String.Double, '#pop')
],
'plain-string': [
(r'[^~^\\({\[\]%s]+' % _dquote, String.Double),
(r'[~^({\[\]]', String.Double),
(r'\\', String.Escape),
(r'[%s]' % _dquote, String.Double, '#pop')
],
# Names
'_constant': [
include('_whitespace'),
(_name, Name.Constant, '#pop'),
include('value')
],
'_global': [
include('_whitespace'),
(_name, Name.Variable.Global, '#pop'),
include('value')
],
'label?': [
include('_whitespace'),
(_name, Name.Label, '#pop'),
default('#pop')
],
'variable?': [
include('_whitespace'),
(_name, Name.Variable, '#pop'),
default('#pop')
],
# Values after hashes
'obsolete-dictionary-word': [
(r'\S\w*', String.Other, '#pop')
],
'system-constant': [
include('_whitespace'),
(_name, Name.Builtin, '#pop')
],
# Directives
'directive': [
include('_whitespace'),
(r'#', Punctuation),
(r';', Punctuation, '#pop'),
(r'\[', Punctuation,
('default', 'statements', 'locals', 'routine-name?')),
(words((
'abbreviate', 'endif', 'dictionary', 'ifdef', 'iffalse', 'ifndef', 'ifnot',
'iftrue', 'ifv3', 'ifv5', 'release', 'serial', 'switches', 'system_file',
'version'), prefix='(?i)', suffix=r'\b'),
Keyword, 'default'),
(r'(?i)(array|global)\b', Keyword,
('default', 'directive-keyword?', '_global')),
(r'(?i)attribute\b', Keyword, ('default', 'alias?', '_constant')),
(r'(?i)class\b', Keyword,
('object-body', 'duplicates', 'class-name')),
(r'(?i)(constant|default)\b', Keyword,
('default', 'expression', '_constant')),
(r'(?i)(end\b)(.*)', bygroups(Keyword, Text)),
(r'(?i)(extend|verb)\b', Keyword, 'grammar'),
(r'(?i)fake_action\b', Keyword, ('default', '_constant')),
(r'(?i)import\b', Keyword, 'manifest'),
(r'(?i)(include|link)\b', Keyword,
('default', 'before-plain-string')),
(r'(?i)(lowstring|undef)\b', Keyword, ('default', '_constant')),
(r'(?i)message\b', Keyword, ('default', 'diagnostic')),
(r'(?i)(nearby|object)\b', Keyword,
('object-body', '_object-head')),
(r'(?i)property\b', Keyword,
('default', 'alias?', '_constant', 'property-keyword*')),
(r'(?i)replace\b', Keyword,
('default', 'routine-name?', 'routine-name?')),
(r'(?i)statusline\b', Keyword, ('default', 'directive-keyword?')),
(r'(?i)stub\b', Keyword, ('default', 'routine-name?')),
(r'(?i)trace\b', Keyword,
('default', 'trace-keyword?', 'trace-keyword?')),
(r'(?i)zcharacter\b', Keyword,
('default', 'directive-keyword?', 'directive-keyword?')),
(_name, Name.Class, ('object-body', '_object-head'))
],
# [, Replace, Stub
'routine-name?': [
include('_whitespace'),
(_name, Name.Function, '#pop'),
default('#pop')
],
'locals': [
include('_whitespace'),
(r';', Punctuation, '#pop'),
(r'\*', Punctuation),
(r'"', String.Double, 'plain-string'),
(_name, Name.Variable)
],
# Array
'many-values': [
include('_whitespace'),
(r';', Punctuation),
(r'\]', Punctuation, '#pop'),
(r':', Error),
default(('expression', '_expression'))
],
# Attribute, Property
'alias?': [
include('_whitespace'),
(r'alias\b', Keyword, ('#pop', '_constant')),
default('#pop')
],
# Class, Object, Nearby
'class-name': [
include('_whitespace'),
(r'(?=[,;]|(class|has|private|with)\b)', Text, '#pop'),
(_name, Name.Class, '#pop')
],
'duplicates': [
include('_whitespace'),
(r'\(', Punctuation, ('#pop', 'expression', '_expression')),
default('#pop')
],
'_object-head': [
(r'[%s]>' % _dash, Punctuation),
(r'(class|has|private|with)\b', Keyword.Declaration, '#pop'),
include('_global')
],
'object-body': [
include('_whitespace'),
(r';', Punctuation, '#pop:2'),
(r',', Punctuation),
(r'class\b', Keyword.Declaration, 'class-segment'),
(r'(has|private|with)\b', Keyword.Declaration),
(r':', Error),
default(('_object-expression', '_expression'))
],
'class-segment': [
include('_whitespace'),
(r'(?=[,;]|(class|has|private|with)\b)', Text, '#pop'),
(_name, Name.Class),
default('value')
],
# Extend, Verb
'grammar': [
include('_whitespace'),
(r'=', Punctuation, ('#pop', 'default')),
(r'\*', Punctuation, ('#pop', 'grammar-line')),
default('_directive-keyword')
],
'grammar-line': [
include('_whitespace'),
(r';', Punctuation, '#pop'),
(r'[/*]', Punctuation),
(r'[%s]>' % _dash, Punctuation, 'value'),
(r'(noun|scope)\b', Keyword, '=routine'),
default('_directive-keyword')
],
'=routine': [
include('_whitespace'),
(r'=', Punctuation, 'routine-name?'),
default('#pop')
],
# Import
'manifest': [
include('_whitespace'),
(r';', Punctuation, '#pop'),
(r',', Punctuation),
(r'(?i)global\b', Keyword, '_global'),
default('_global')
],
# Include, Link, Message
'diagnostic': [
include('_whitespace'),
(r'[%s]' % _dquote, String.Double, ('#pop', 'message-string')),
default(('#pop', 'before-plain-string', 'directive-keyword?'))
],
'before-plain-string': [
include('_whitespace'),
(r'[%s]' % _dquote, String.Double, ('#pop', 'plain-string'))
],
'message-string': [
(r'[~^]+', String.Escape),
include('plain-string')
],
# Keywords used in directives
'_directive-keyword!': [
include('_whitespace'),
(words((
'additive', 'alias', 'buffer', 'class', 'creature', 'data', 'error', 'fatalerror',
'first', 'has', 'held', 'initial', 'initstr', 'last', 'long', 'meta', 'multi',
'multiexcept', 'multiheld', 'multiinside', 'noun', 'number', 'only', 'private',
'replace', 'reverse', 'scope', 'score', 'special', 'string', 'table', 'terminating',
'time', 'topic', 'warning', 'with'), suffix=r'\b'),
Keyword, '#pop'),
(r'[%s]{1,2}>|[+=]' % _dash, Punctuation, '#pop')
],
'_directive-keyword': [
include('_directive-keyword!'),
include('value')
],
'directive-keyword?': [
include('_directive-keyword!'),
default('#pop')
],
'property-keyword*': [
include('_whitespace'),
(r'(additive|long)\b', Keyword),
default('#pop')
],
'trace-keyword?': [
include('_whitespace'),
(words((
'assembly', 'dictionary', 'expressions', 'lines', 'linker',
'objects', 'off', 'on', 'symbols', 'tokens', 'verbs'), suffix=r'\b'),
Keyword, '#pop'),
default('#pop')
],
# Statements
'statements': [
include('_whitespace'),
(r'\]', Punctuation, '#pop'),
(r'[;{}]', Punctuation),
(words((
'box', 'break', 'continue', 'default', 'give', 'inversion',
'new_line', 'quit', 'read', 'remove', 'return', 'rfalse', 'rtrue',
'spaces', 'string', 'until'), suffix=r'\b'),
Keyword, 'default'),
(r'(do|else)\b', Keyword),
(r'(font|style)\b', Keyword,
('default', 'miscellaneous-keyword?')),
(r'for\b', Keyword, ('for', '(?')),
(r'(if|switch|while)', Keyword,
('expression', '_expression', '(?')),
(r'(jump|save|restore)\b', Keyword, ('default', 'label?')),
(r'objectloop\b', Keyword,
('_keyword-expression', 'variable?', '(?')),
(r'print(_ret)?\b|(?=[%s])' % _dquote, Keyword, 'print-list'),
(r'\.', Name.Label, 'label?'),
(r'@', Keyword, 'opcode'),
(r'#(?![agrnw]\$|#)', Punctuation, 'directive'),
(r'<', Punctuation, 'default'),
(r'move\b', Keyword,
('default', '_keyword-expression', '_expression')),
default(('default', '_keyword-expression', '_expression'))
],
'miscellaneous-keyword?': [
include('_whitespace'),
(r'(bold|fixed|from|near|off|on|reverse|roman|to|underline)\b',
Keyword, '#pop'),
(r'(a|A|an|address|char|name|number|object|property|string|the|'
r'The)\b(?=(\s+|(![^%s]*))*\))' % _newline, Keyword.Pseudo,
'#pop'),
(r'%s(?=(\s+|(![^%s]*))*\))' % (_name, _newline), Name.Function,
'#pop'),
default('#pop')
],
'(?': [
include('_whitespace'),
(r'\(', Punctuation, '#pop'),
default('#pop')
],
'for': [
include('_whitespace'),
(r';', Punctuation, ('_for-expression', '_expression')),
default(('_for-expression', '_expression'))
],
'print-list': [
include('_whitespace'),
(r';', Punctuation, '#pop'),
(r':', Error),
default(('_list-expression', '_expression', '_list-expression', 'form'))
],
'form': [
include('_whitespace'),
(r'\(', Punctuation, ('#pop', 'miscellaneous-keyword?')),
default('#pop')
],
# Assembly
'opcode': [
include('_whitespace'),
(r'[%s]' % _dquote, String.Double, ('operands', 'plain-string')),
(_name, Keyword, 'operands')
],
'operands': [
(r':', Error),
default(('_assembly-expression', '_expression'))
]
}
def get_tokens_unprocessed(self, text):
# 'in' is either a keyword or an operator.
# If the token two tokens after 'in' is ')', 'in' is a keyword:
# objectloop(a in b)
# Otherwise, it is an operator:
# objectloop(a in b && true)
objectloop_queue = []
objectloop_token_count = -1
previous_token = None
for index, token, value in RegexLexer.get_tokens_unprocessed(self,
text):
if previous_token is Name.Variable and value == 'in':
objectloop_queue = [[index, token, value]]
objectloop_token_count = 2
elif objectloop_token_count > 0:
if token not in Comment and token not in Text:
objectloop_token_count -= 1
objectloop_queue.append((index, token, value))
else:
if objectloop_token_count == 0:
if objectloop_queue[-1][2] == ')':
objectloop_queue[0][1] = Keyword
while objectloop_queue:
yield objectloop_queue.pop(0)
objectloop_token_count = -1
yield index, token, value
if token not in Comment and token not in Text:
previous_token = token
while objectloop_queue:
yield objectloop_queue.pop(0)
class Inform7Lexer(RegexLexer):
"""
For `Inform 7 <http://inform7.com/>`_ source code.
.. versionadded:: 2.0
"""
name = 'Inform 7'
aliases = ['inform7', 'i7']
filenames = ['*.ni', '*.i7x']
flags = re.MULTILINE | re.DOTALL | re.UNICODE
_dash = Inform6Lexer._dash
_dquote = Inform6Lexer._dquote
_newline = Inform6Lexer._newline
_start = r'\A|(?<=[%s])' % _newline
# There are three variants of Inform 7, differing in how to
# interpret at signs and braces in I6T. In top-level inclusions, at
# signs in the first column are inweb syntax. In phrase definitions
# and use options, tokens in braces are treated as I7. Use options
# also interpret "{N}".
tokens = {}
token_variants = ['+i6t-not-inline', '+i6t-inline', '+i6t-use-option']
for level in token_variants:
tokens[level] = {
'+i6-root': list(Inform6Lexer.tokens['root']),
'+i6t-root': [ # For Inform6TemplateLexer
(r'[^%s]*' % Inform6Lexer._newline, Comment.Preproc,
('directive', '+p'))
],
'root': [
(r'(\|?\s)+', Text),
(r'\[', Comment.Multiline, '+comment'),
(r'[%s]' % _dquote, Generic.Heading,
('+main', '+titling', '+titling-string')),
default(('+main', '+heading?'))
],
'+titling-string': [
(r'[^%s]+' % _dquote, Generic.Heading),
(r'[%s]' % _dquote, Generic.Heading, '#pop')
],
'+titling': [
(r'\[', Comment.Multiline, '+comment'),
(r'[^%s.;:|%s]+' % (_dquote, _newline), Generic.Heading),
(r'[%s]' % _dquote, Generic.Heading, '+titling-string'),
(r'[%s]{2}|(?<=[\s%s])\|[\s%s]' % (_newline, _dquote, _dquote),
Text, ('#pop', '+heading?')),
(r'[.;:]|(?<=[\s%s])\|' % _dquote, Text, '#pop'),
(r'[|%s]' % _newline, Generic.Heading)
],
'+main': [
(r'(?i)[^%s:a\[(|%s]+' % (_dquote, _newline), Text),
(r'[%s]' % _dquote, String.Double, '+text'),
(r':', Text, '+phrase-definition'),
(r'(?i)\bas\b', Text, '+use-option'),
(r'\[', Comment.Multiline, '+comment'),
(r'(\([%s])(.*?)([%s]\))' % (_dash, _dash),
bygroups(Punctuation,
using(this, state=('+i6-root', 'directive'),
i6t='+i6t-not-inline'), Punctuation)),
(r'(%s|(?<=[\s;:.%s]))\|\s|[%s]{2,}' %
(_start, _dquote, _newline), Text, '+heading?'),
(r'(?i)[a(|%s]' % _newline, Text)
],
'+phrase-definition': [
(r'\s+', Text),
(r'\[', Comment.Multiline, '+comment'),
(r'(\([%s])(.*?)([%s]\))' % (_dash, _dash),
bygroups(Punctuation,
using(this, state=('+i6-root', 'directive',
'default', 'statements'),
i6t='+i6t-inline'), Punctuation), '#pop'),
default('#pop')
],
'+use-option': [
(r'\s+', Text),
(r'\[', Comment.Multiline, '+comment'),
(r'(\([%s])(.*?)([%s]\))' % (_dash, _dash),
bygroups(Punctuation,
using(this, state=('+i6-root', 'directive'),
i6t='+i6t-use-option'), Punctuation), '#pop'),
default('#pop')
],
'+comment': [
(r'[^\[\]]+', Comment.Multiline),
(r'\[', Comment.Multiline, '#push'),
(r'\]', Comment.Multiline, '#pop')
],
'+text': [
(r'[^\[%s]+' % _dquote, String.Double),
(r'\[.*?\]', String.Interpol),
(r'[%s]' % _dquote, String.Double, '#pop')
],
'+heading?': [
(r'(\|?\s)+', Text),
(r'\[', Comment.Multiline, '+comment'),
(r'[%s]{4}\s+' % _dash, Text, '+documentation-heading'),
(r'[%s]{1,3}' % _dash, Text),
(r'(?i)(volume|book|part|chapter|section)\b[^%s]*' % _newline,
Generic.Heading, '#pop'),
default('#pop')
],
'+documentation-heading': [
(r'\s+', Text),
(r'\[', Comment.Multiline, '+comment'),
(r'(?i)documentation\s+', Text, '+documentation-heading2'),
default('#pop')
],
'+documentation-heading2': [
(r'\s+', Text),
(r'\[', Comment.Multiline, '+comment'),
(r'[%s]{4}\s' % _dash, Text, '+documentation'),
default('#pop:2')
],
'+documentation': [
(r'(?i)(%s)\s*(chapter|example)\s*:[^%s]*' %
(_start, _newline), Generic.Heading),
(r'(?i)(%s)\s*section\s*:[^%s]*' % (_start, _newline),
Generic.Subheading),
(r'((%s)\t.*?[%s])+' % (_start, _newline),
using(this, state='+main')),
(r'[^%s\[]+|[%s\[]' % (_newline, _newline), Text),
(r'\[', Comment.Multiline, '+comment'),
],
'+i6t-not-inline': [
(r'(%s)@c( .*?)?([%s]|\Z)' % (_start, _newline),
Comment.Preproc),
(r'(%s)@([%s]+|Purpose:)[^%s]*' % (_start, _dash, _newline),
Comment.Preproc),
(r'(%s)@p( .*?)?([%s]|\Z)' % (_start, _newline),
Generic.Heading, '+p')
],
'+i6t-use-option': [
include('+i6t-not-inline'),
(r'(\{)(N)(\})', bygroups(Punctuation, Text, Punctuation))
],
'+i6t-inline': [
(r'(\{)(\S[^}]*)?(\})',
bygroups(Punctuation, using(this, state='+main'),
Punctuation))
],
'+i6t': [
(r'(\{[%s])(![^}]*)(\}?)' % _dash,
bygroups(Punctuation, Comment.Single, Punctuation)),
(r'(\{[%s])(lines)(:)([^}]*)(\}?)' % _dash,
bygroups(Punctuation, Keyword, Punctuation, Text,
Punctuation), '+lines'),
(r'(\{[%s])([^:}]*)(:?)([^}]*)(\}?)' % _dash,
bygroups(Punctuation, Keyword, Punctuation, Text,
Punctuation)),
(r'(\(\+)(.*?)(\+\)|\Z)',
bygroups(Punctuation, using(this, state='+main'),
Punctuation))
],
'+p': [
(r'[^@]+', Comment.Preproc),
(r'(%s)@c( .*?)?([%s]|\Z)' % (_start, _newline),
Comment.Preproc, '#pop'),
(r'(%s)@([%s]|Purpose:)' % (_start, _dash), Comment.Preproc),
(r'(%s)@p( .*?)?([%s]|\Z)' % (_start, _newline),
Generic.Heading),
(r'@', Comment.Preproc)
],
'+lines': [
(r'(%s)@c( .*?)?([%s]|\Z)' % (_start, _newline),
Comment.Preproc),
(r'(%s)@([%s]|Purpose:)[^%s]*' % (_start, _dash, _newline),
Comment.Preproc),
(r'(%s)@p( .*?)?([%s]|\Z)' % (_start, _newline),
Generic.Heading, '+p'),
(r'(%s)@\w*[ %s]' % (_start, _newline), Keyword),
(r'![^%s]*' % _newline, Comment.Single),
(r'(\{)([%s]endlines)(\})' % _dash,
bygroups(Punctuation, Keyword, Punctuation), '#pop'),
(r'[^@!{]+?([%s]|\Z)|.' % _newline, Text)
]
}
# Inform 7 can include snippets of Inform 6 template language,
# so all of Inform6Lexer's states are copied here, with
# modifications to account for template syntax. Inform7Lexer's
# own states begin with '+' to avoid name conflicts. Some of
# Inform6Lexer's states begin with '_': these are not modified.
# They deal with template syntax either by including modified
# states, or by matching r'' then pushing to modified states.
for token in Inform6Lexer.tokens:
if token == 'root':
continue
tokens[level][token] = list(Inform6Lexer.tokens[token])
if not token.startswith('_'):
tokens[level][token][:0] = [include('+i6t'), include(level)]
def __init__(self, **options):
level = options.get('i6t', '+i6t-not-inline')
if level not in self._all_tokens:
self._tokens = self.__class__.process_tokendef(level)
else:
self._tokens = self._all_tokens[level]
RegexLexer.__init__(self, **options)
class Inform6TemplateLexer(Inform7Lexer):
"""
For `Inform 6 template
<http://inform7.com/sources/src/i6template/Woven/index.html>`_ code.
.. versionadded:: 2.0
"""
name = 'Inform 6 template'
aliases = ['i6t']
filenames = ['*.i6t']
def get_tokens_unprocessed(self, text, stack=('+i6t-root',)):
return Inform7Lexer.get_tokens_unprocessed(self, text, stack)
class Tads3Lexer(RegexLexer):
"""
For `TADS 3 <http://www.tads.org/>`_ source code.
"""
name = 'TADS 3'
aliases = ['tads3']
filenames = ['*.t']
flags = re.DOTALL | re.MULTILINE
_comment_single = r'(?://(?:[^\\\n]|\\+[\w\W])*$)'
_comment_multiline = r'(?:/\*(?:[^*]|\*(?!/))*\*/)'
_escape = (r'(?:\\(?:[\n\\<>"\'^v bnrt]|u[\da-fA-F]{,4}|x[\da-fA-F]{,2}|'
r'[0-3]?[0-7]{1,2}))')
_name = r'(?:[_a-zA-Z]\w*)'
_no_quote = r'(?=\s|\\?>)'
_operator = (r'(?:&&|\|\||\+\+|--|\?\?|::|[.,@\[\]~]|'
r'(?:[=+\-*/%!&|^]|<<?|>>?>?)=?)')
_ws = r'(?:\\|\s|%s|%s)' % (_comment_single, _comment_multiline)
_ws_pp = r'(?:\\\n|[^\S\n]|%s|%s)' % (_comment_single, _comment_multiline)
def _make_string_state(triple, double, verbatim=None, _escape=_escape):
if verbatim:
verbatim = ''.join(['(?:%s|%s)' % (re.escape(c.lower()),
re.escape(c.upper()))
for c in verbatim])
char = r'"' if double else r"'"
token = String.Double if double else String.Single
escaped_quotes = r'+|%s(?!%s{2})' % (char, char) if triple else r''
prefix = '%s%s' % ('t' if triple else '', 'd' if double else 's')
tag_state_name = '%sqt' % prefix
state = []
if triple:
state += [
(r'%s{3,}' % char, token, '#pop'),
(r'\\%s+' % char, String.Escape),
(char, token)
]
else:
state.append((char, token, '#pop'))
state += [
include('s/verbatim'),
(r'[^\\<&{}%s]+' % char, token)
]
if verbatim:
# This regex can't use `(?i)` because escape sequences are
# case-sensitive. `<\XMP>` works; `<\xmp>` doesn't.
state.append((r'\\?<(/|\\\\|(?!%s)\\)%s(?=[\s=>])' %
(_escape, verbatim),
Name.Tag, ('#pop', '%sqs' % prefix, tag_state_name)))
else:
state += [
(r'\\?<!([^><\\%s]|<(?!<)|\\%s%s|%s|\\.)*>?' %
(char, char, escaped_quotes, _escape), Comment.Multiline),
(r'(?i)\\?<listing(?=[\s=>]|\\>)', Name.Tag,
('#pop', '%sqs/listing' % prefix, tag_state_name)),
(r'(?i)\\?<xmp(?=[\s=>]|\\>)', Name.Tag,
('#pop', '%sqs/xmp' % prefix, tag_state_name)),
(r'\\?<([^\s=><\\%s]|<(?!<)|\\%s%s|%s|\\.)*' %
(char, char, escaped_quotes, _escape), Name.Tag,
tag_state_name),
include('s/entity')
]
state += [
include('s/escape'),
(r'\{([^}<\\%s]|<(?!<)|\\%s%s|%s|\\.)*\}' %
(char, char, escaped_quotes, _escape), String.Interpol),
(r'[\\&{}<]', token)
]
return state
def _make_tag_state(triple, double, _escape=_escape):
char = r'"' if double else r"'"
quantifier = r'{3,}' if triple else r''
state_name = '%s%sqt' % ('t' if triple else '', 'd' if double else 's')
token = String.Double if double else String.Single
escaped_quotes = r'+|%s(?!%s{2})' % (char, char) if triple else r''
return [
(r'%s%s' % (char, quantifier), token, '#pop:2'),
(r'(\s|\\\n)+', Text),
(r'(=)(\\?")', bygroups(Punctuation, String.Double),
'dqs/%s' % state_name),
(r"(=)(\\?')", bygroups(Punctuation, String.Single),
'sqs/%s' % state_name),
(r'=', Punctuation, 'uqs/%s' % state_name),
(r'\\?>', Name.Tag, '#pop'),
(r'\{([^}<\\%s]|<(?!<)|\\%s%s|%s|\\.)*\}' %
(char, char, escaped_quotes, _escape), String.Interpol),
(r'([^\s=><\\%s]|<(?!<)|\\%s%s|%s|\\.)+' %
(char, char, escaped_quotes, _escape), Name.Attribute),
include('s/escape'),
include('s/verbatim'),
include('s/entity'),
(r'[\\{}&]', Name.Attribute)
]
def _make_attribute_value_state(terminator, host_triple, host_double,
_escape=_escape):
token = (String.Double if terminator == r'"' else
String.Single if terminator == r"'" else String.Other)
host_char = r'"' if host_double else r"'"
host_quantifier = r'{3,}' if host_triple else r''
host_token = String.Double if host_double else String.Single
escaped_quotes = (r'+|%s(?!%s{2})' % (host_char, host_char)
if host_triple else r'')
return [
(r'%s%s' % (host_char, host_quantifier), host_token, '#pop:3'),
(r'%s%s' % (r'' if token is String.Other else r'\\?', terminator),
token, '#pop'),
include('s/verbatim'),
include('s/entity'),
(r'\{([^}<\\%s]|<(?!<)|\\%s%s|%s|\\.)*\}' %
(host_char, host_char, escaped_quotes, _escape), String.Interpol),
(r'([^\s"\'<%s{}\\&])+' % (r'>' if token is String.Other else r''),
token),
include('s/escape'),
(r'["\'\s&{<}\\]', token)
]
tokens = {
'root': [
(u'\ufeff', Text),
(r'\{', Punctuation, 'object-body'),
(r';+', Punctuation),
(r'(?=(argcount|break|case|catch|continue|default|definingobj|'
r'delegated|do|else|for|foreach|finally|goto|if|inherited|'
r'invokee|local|nil|new|operator|replaced|return|self|switch|'
r'targetobj|targetprop|throw|true|try|while)\b)', Text, 'block'),
(r'(%s)(%s*)(\()' % (_name, _ws),
bygroups(Name.Function, using(this, state='whitespace'),
Punctuation),
('block?/root', 'more/parameters', 'main/parameters')),
include('whitespace'),
(r'\++', Punctuation),
(r'[^\s!"%-(*->@-_a-z{-~]+', Error), # Averts an infinite loop
(r'(?!\Z)', Text, 'main/root')
],
'main/root': [
include('main/basic'),
default(('#pop', 'object-body/no-braces', 'classes', 'class'))
],
'object-body/no-braces': [
(r';', Punctuation, '#pop'),
(r'\{', Punctuation, ('#pop', 'object-body')),
include('object-body')
],
'object-body': [
(r';', Punctuation),
(r'\{', Punctuation, '#push'),
(r'\}', Punctuation, '#pop'),
(r':', Punctuation, ('classes', 'class')),
(r'(%s?)(%s*)(\()' % (_name, _ws),
bygroups(Name.Function, using(this, state='whitespace'),
Punctuation),
('block?', 'more/parameters', 'main/parameters')),
(r'(%s)(%s*)(\{)' % (_name, _ws),
bygroups(Name.Function, using(this, state='whitespace'),
Punctuation), 'block'),
(r'(%s)(%s*)(:)' % (_name, _ws),
bygroups(Name.Variable, using(this, state='whitespace'),
Punctuation),
('object-body/no-braces', 'classes', 'class')),
include('whitespace'),
(r'->|%s' % _operator, Punctuation, 'main'),
default('main/object-body')
],
'main/object-body': [
include('main/basic'),
(r'(%s)(%s*)(=?)' % (_name, _ws),
bygroups(Name.Variable, using(this, state='whitespace'),
Punctuation), ('#pop', 'more', 'main')),
default('#pop:2')
],
'block?/root': [
(r'\{', Punctuation, ('#pop', 'block')),
include('whitespace'),
(r'(?=[[\'"<(:])', Text, # It might be a VerbRule macro.
('#pop', 'object-body/no-braces', 'grammar', 'grammar-rules')),
# It might be a macro like DefineAction.
default(('#pop', 'object-body/no-braces'))
],
'block?': [
(r'\{', Punctuation, ('#pop', 'block')),
include('whitespace'),
default('#pop')
],
'block/basic': [
(r'[;:]+', Punctuation),
(r'\{', Punctuation, '#push'),
(r'\}', Punctuation, '#pop'),
(r'default\b', Keyword.Reserved),
(r'(%s)(%s*)(:)' % (_name, _ws),
bygroups(Name.Label, using(this, state='whitespace'),
Punctuation)),
include('whitespace')
],
'block': [
include('block/basic'),
(r'(?!\Z)', Text, ('more', 'main'))
],
'block/embed': [
(r'>>', String.Interpol, '#pop'),
include('block/basic'),
(r'(?!\Z)', Text, ('more/embed', 'main'))
],
'main/basic': [
include('whitespace'),
(r'\(', Punctuation, ('#pop', 'more', 'main')),
(r'\[', Punctuation, ('#pop', 'more/list', 'main')),
(r'\{', Punctuation, ('#pop', 'more/inner', 'main/inner',
'more/parameters', 'main/parameters')),
(r'\*|\.{3}', Punctuation, '#pop'),
(r'(?i)0x[\da-f]+', Number.Hex, '#pop'),
(r'(\d+\.(?!\.)\d*|\.\d+)([eE][-+]?\d+)?|\d+[eE][-+]?\d+',
Number.Float, '#pop'),
(r'0[0-7]+', Number.Oct, '#pop'),
(r'\d+', Number.Integer, '#pop'),
(r'"""', String.Double, ('#pop', 'tdqs')),
(r"'''", String.Single, ('#pop', 'tsqs')),
(r'"', String.Double, ('#pop', 'dqs')),
(r"'", String.Single, ('#pop', 'sqs')),
(r'R"""', String.Regex, ('#pop', 'tdqr')),
(r"R'''", String.Regex, ('#pop', 'tsqr')),
(r'R"', String.Regex, ('#pop', 'dqr')),
(r"R'", String.Regex, ('#pop', 'sqr')),
# Two-token keywords
(r'(extern)(%s+)(object\b)' % _ws,
bygroups(Keyword.Reserved, using(this, state='whitespace'),
Keyword.Reserved)),
(r'(function|method)(%s*)(\()' % _ws,
bygroups(Keyword.Reserved, using(this, state='whitespace'),
Punctuation),
('#pop', 'block?', 'more/parameters', 'main/parameters')),
(r'(modify)(%s+)(grammar\b)' % _ws,
bygroups(Keyword.Reserved, using(this, state='whitespace'),
Keyword.Reserved),
('#pop', 'object-body/no-braces', ':', 'grammar')),
(r'(new)(%s+(?=(?:function|method)\b))' % _ws,
bygroups(Keyword.Reserved, using(this, state='whitespace'))),
(r'(object)(%s+)(template\b)' % _ws,
bygroups(Keyword.Reserved, using(this, state='whitespace'),
Keyword.Reserved), ('#pop', 'template')),
(r'(string)(%s+)(template\b)' % _ws,
bygroups(Keyword, using(this, state='whitespace'),
Keyword.Reserved), ('#pop', 'function-name')),
# Keywords
(r'(argcount|definingobj|invokee|replaced|targetobj|targetprop)\b',
Name.Builtin, '#pop'),
(r'(break|continue|goto)\b', Keyword.Reserved, ('#pop', 'label')),
(r'(case|extern|if|intrinsic|return|static|while)\b',
Keyword.Reserved),
(r'catch\b', Keyword.Reserved, ('#pop', 'catch')),
(r'class\b', Keyword.Reserved,
('#pop', 'object-body/no-braces', 'class')),
(r'(default|do|else|finally|try)\b', Keyword.Reserved, '#pop'),
(r'(dictionary|property)\b', Keyword.Reserved,
('#pop', 'constants')),
(r'enum\b', Keyword.Reserved, ('#pop', 'enum')),
(r'export\b', Keyword.Reserved, ('#pop', 'main')),
(r'(for|foreach)\b', Keyword.Reserved,
('#pop', 'more/inner', 'main/inner')),
(r'(function|method)\b', Keyword.Reserved,
('#pop', 'block?', 'function-name')),
(r'grammar\b', Keyword.Reserved,
('#pop', 'object-body/no-braces', 'grammar')),
(r'inherited\b', Keyword.Reserved, ('#pop', 'inherited')),
(r'local\b', Keyword.Reserved,
('#pop', 'more/local', 'main/local')),
(r'(modify|replace|switch|throw|transient)\b', Keyword.Reserved,
'#pop'),
(r'new\b', Keyword.Reserved, ('#pop', 'class')),
(r'(nil|true)\b', Keyword.Constant, '#pop'),
(r'object\b', Keyword.Reserved, ('#pop', 'object-body/no-braces')),
(r'operator\b', Keyword.Reserved, ('#pop', 'operator')),
(r'propertyset\b', Keyword.Reserved,
('#pop', 'propertyset', 'main')),
(r'self\b', Name.Builtin.Pseudo, '#pop'),
(r'template\b', Keyword.Reserved, ('#pop', 'template')),
# Operators
(r'(__objref|defined)(%s*)(\()' % _ws,
bygroups(Operator.Word, using(this, state='whitespace'),
Operator), ('#pop', 'more/__objref', 'main')),
(r'delegated\b', Operator.Word),
# Compiler-defined macros and built-in properties
(r'(__DATE__|__DEBUG|__LINE__|__FILE__|'
r'__TADS_MACRO_FORMAT_VERSION|__TADS_SYS_\w*|__TADS_SYSTEM_NAME|'
r'__TADS_VERSION_MAJOR|__TADS_VERSION_MINOR|__TADS3|__TIME__|'
r'construct|finalize|grammarInfo|grammarTag|lexicalParent|'
r'miscVocab|sourceTextGroup|sourceTextGroupName|'
r'sourceTextGroupOrder|sourceTextOrder)\b', Name.Builtin, '#pop')
],
'main': [
include('main/basic'),
(_name, Name, '#pop'),
default('#pop')
],
'more/basic': [
(r'\(', Punctuation, ('more/list', 'main')),
(r'\[', Punctuation, ('more', 'main')),
(r'\.{3}', Punctuation),
(r'->|\.\.', Punctuation, 'main'),
(r'(?=;)|[:)\]]', Punctuation, '#pop'),
include('whitespace'),
(_operator, Operator, 'main'),
(r'\?', Operator, ('main', 'more/conditional', 'main')),
(r'(is|not)(%s+)(in\b)' % _ws,
bygroups(Operator.Word, using(this, state='whitespace'),
Operator.Word)),
(r'[^\s!"%-_a-z{-~]+', Error) # Averts an infinite loop
],
'more': [
include('more/basic'),
default('#pop')
],
# Then expression (conditional operator)
'more/conditional': [
(r':(?!:)', Operator, '#pop'),
include('more')
],
# Embedded expressions
'more/embed': [
(r'>>', String.Interpol, '#pop:2'),
include('more')
],
# For/foreach loop initializer or short-form anonymous function
'main/inner': [
(r'\(', Punctuation, ('#pop', 'more/inner', 'main/inner')),
(r'local\b', Keyword.Reserved, ('#pop', 'main/local')),
include('main')
],
'more/inner': [
(r'\}', Punctuation, '#pop'),
(r',', Punctuation, 'main/inner'),
(r'(in|step)\b', Keyword, 'main/inner'),
include('more')
],
# Local
'main/local': [
(_name, Name.Variable, '#pop'),
include('whitespace')
],
'more/local': [
(r',', Punctuation, 'main/local'),
include('more')
],
# List
'more/list': [
(r'[,:]', Punctuation, 'main'),
include('more')
],
# Parameter list
'main/parameters': [
(r'(%s)(%s*)(?=:)' % (_name, _ws),
bygroups(Name.Variable, using(this, state='whitespace')), '#pop'),
(r'(%s)(%s+)(%s)' % (_name, _ws, _name),
bygroups(Name.Class, using(this, state='whitespace'),
Name.Variable), '#pop'),
(r'\[+', Punctuation),
include('main/basic'),
(_name, Name.Variable, '#pop'),
default('#pop')
],
'more/parameters': [
(r'(:)(%s*(?=[?=,:)]))' % _ws,
bygroups(Punctuation, using(this, state='whitespace'))),
(r'[?\]]+', Punctuation),
(r'[:)]', Punctuation, ('#pop', 'multimethod?')),
(r',', Punctuation, 'main/parameters'),
(r'=', Punctuation, ('more/parameter', 'main')),
include('more')
],
'more/parameter': [
(r'(?=[,)])', Text, '#pop'),
include('more')
],
'multimethod?': [
(r'multimethod\b', Keyword, '#pop'),
include('whitespace'),
default('#pop')
],
# Statements and expressions
'more/__objref': [
(r',', Punctuation, 'mode'),
(r'\)', Operator, '#pop'),
include('more')
],
'mode': [
(r'(error|warn)\b', Keyword, '#pop'),
include('whitespace')
],
'catch': [
(r'\(+', Punctuation),
(_name, Name.Exception, ('#pop', 'variables')),
include('whitespace')
],
'enum': [
include('whitespace'),
(r'token\b', Keyword, ('#pop', 'constants')),
default(('#pop', 'constants'))
],
'grammar': [
(r'\)+', Punctuation),
(r'\(', Punctuation, 'grammar-tag'),
(r':', Punctuation, 'grammar-rules'),
(_name, Name.Class),
include('whitespace')
],
'grammar-tag': [
include('whitespace'),
(r'"""([^\\"<]|""?(?!")|\\"+|\\.|<(?!<))+("{3,}|<<)|'
r'R"""([^\\"]|""?(?!")|\\"+|\\.)+"{3,}|'
r"'''([^\\'<]|''?(?!')|\\'+|\\.|<(?!<))+('{3,}|<<)|"
r"R'''([^\\']|''?(?!')|\\'+|\\.)+'{3,}|"
r'"([^\\"<]|\\.|<(?!<))+("|<<)|R"([^\\"]|\\.)+"|'
r"'([^\\'<]|\\.|<(?!<))+('|<<)|R'([^\\']|\\.)+'|"
r"([^)\s\\/]|/(?![/*]))+|\)", String.Other, '#pop')
],
'grammar-rules': [
include('string'),
include('whitespace'),
(r'(\[)(%s*)(badness)' % _ws,
bygroups(Punctuation, using(this, state='whitespace'), Keyword),
'main'),
(r'->|%s|[()]' % _operator, Punctuation),
(_name, Name.Constant),
default('#pop:2')
],
':': [
(r':', Punctuation, '#pop')
],
'function-name': [
(r'(<<([^>]|>>>|>(?!>))*>>)+', String.Interpol),
(r'(?=%s?%s*[({])' % (_name, _ws), Text, '#pop'),
(_name, Name.Function, '#pop'),
include('whitespace')
],
'inherited': [
(r'<', Punctuation, ('#pop', 'classes', 'class')),
include('whitespace'),
(_name, Name.Class, '#pop'),
default('#pop')
],
'operator': [
(r'negate\b', Operator.Word, '#pop'),
include('whitespace'),
(_operator, Operator),
default('#pop')
],
'propertyset': [
(r'\(', Punctuation, ('more/parameters', 'main/parameters')),
(r'\{', Punctuation, ('#pop', 'object-body')),
include('whitespace')
],
'template': [
(r'(?=;)', Text, '#pop'),
include('string'),
(r'inherited\b', Keyword.Reserved),
include('whitespace'),
(r'->|\?|%s' % _operator, Punctuation),
(_name, Name.Variable)
],
# Identifiers
'class': [
(r'\*|\.{3}', Punctuation, '#pop'),
(r'object\b', Keyword.Reserved, '#pop'),
(r'transient\b', Keyword.Reserved),
(_name, Name.Class, '#pop'),
include('whitespace'),
default('#pop')
],
'classes': [
(r'[:,]', Punctuation, 'class'),
include('whitespace'),
(r'>', Punctuation, '#pop'),
default('#pop')
],
'constants': [
(r',+', Punctuation),
(r';', Punctuation, '#pop'),
(r'property\b', Keyword.Reserved),
(_name, Name.Constant),
include('whitespace')
],
'label': [
(_name, Name.Label, '#pop'),
include('whitespace'),
default('#pop')
],
'variables': [
(r',+', Punctuation),
(r'\)', Punctuation, '#pop'),
include('whitespace'),
(_name, Name.Variable)
],
# Whitespace and comments
'whitespace': [
(r'^%s*#(%s|[^\n]|(?<=\\)\n)*\n?' % (_ws_pp, _comment_multiline),
Comment.Preproc),
(_comment_single, Comment.Single),
(_comment_multiline, Comment.Multiline),
(r'\\+\n+%s*#?|\n+|([^\S\n]|\\)+' % _ws_pp, Text)
],
# Strings
'string': [
(r'"""', String.Double, 'tdqs'),
(r"'''", String.Single, 'tsqs'),
(r'"', String.Double, 'dqs'),
(r"'", String.Single, 'sqs')
],
's/escape': [
(r'\{\{|\}\}|%s' % _escape, String.Escape)
],
's/verbatim': [
(r'<<\s*(as\s+decreasingly\s+likely\s+outcomes|cycling|else|end|'
r'first\s+time|one\s+of|only|or|otherwise|'
r'(sticky|(then\s+)?(purely\s+)?at)\s+random|stopping|'
r'(then\s+)?(half\s+)?shuffled|\|\|)\s*>>', String.Interpol),
(r'<<(%%(_(%s|\\?.)|[\-+ ,#]|\[\d*\]?)*\d*\.?\d*(%s|\\?.)|'
r'\s*((else|otherwise)\s+)?(if|unless)\b)?' % (_escape, _escape),
String.Interpol, ('block/embed', 'more/embed', 'main'))
],
's/entity': [
(r'(?i)&(#(x[\da-f]+|\d+)|[a-z][\da-z]*);?', Name.Entity)
],
'tdqs': _make_string_state(True, True),
'tsqs': _make_string_state(True, False),
'dqs': _make_string_state(False, True),
'sqs': _make_string_state(False, False),
'tdqs/listing': _make_string_state(True, True, 'listing'),
'tsqs/listing': _make_string_state(True, False, 'listing'),
'dqs/listing': _make_string_state(False, True, 'listing'),
'sqs/listing': _make_string_state(False, False, 'listing'),
'tdqs/xmp': _make_string_state(True, True, 'xmp'),
'tsqs/xmp': _make_string_state(True, False, 'xmp'),
'dqs/xmp': _make_string_state(False, True, 'xmp'),
'sqs/xmp': _make_string_state(False, False, 'xmp'),
# Tags
'tdqt': _make_tag_state(True, True),
'tsqt': _make_tag_state(True, False),
'dqt': _make_tag_state(False, True),
'sqt': _make_tag_state(False, False),
'dqs/tdqt': _make_attribute_value_state(r'"', True, True),
'dqs/tsqt': _make_attribute_value_state(r'"', True, False),
'dqs/dqt': _make_attribute_value_state(r'"', False, True),
'dqs/sqt': _make_attribute_value_state(r'"', False, False),
'sqs/tdqt': _make_attribute_value_state(r"'", True, True),
'sqs/tsqt': _make_attribute_value_state(r"'", True, False),
'sqs/dqt': _make_attribute_value_state(r"'", False, True),
'sqs/sqt': _make_attribute_value_state(r"'", False, False),
'uqs/tdqt': _make_attribute_value_state(_no_quote, True, True),
'uqs/tsqt': _make_attribute_value_state(_no_quote, True, False),
'uqs/dqt': _make_attribute_value_state(_no_quote, False, True),
'uqs/sqt': _make_attribute_value_state(_no_quote, False, False),
# Regular expressions
'tdqr': [
(r'[^\\"]+', String.Regex),
(r'\\"*', String.Regex),
(r'"{3,}', String.Regex, '#pop'),
(r'"', String.Regex)
],
'tsqr': [
(r"[^\\']+", String.Regex),
(r"\\'*", String.Regex),
(r"'{3,}", String.Regex, '#pop'),
(r"'", String.Regex)
],
'dqr': [
(r'[^\\"]+', String.Regex),
(r'\\"?', String.Regex),
(r'"', String.Regex, '#pop')
],
'sqr': [
(r"[^\\']+", String.Regex),
(r"\\'?", String.Regex),
(r"'", String.Regex, '#pop')
]
}
def get_tokens_unprocessed(self, text, **kwargs):
pp = r'^%s*#%s*' % (self._ws_pp, self._ws_pp)
if_false_level = 0
for index, token, value in (
RegexLexer.get_tokens_unprocessed(self, text, **kwargs)):
if if_false_level == 0: # Not in a false #if
if (token is Comment.Preproc and
re.match(r'%sif%s+(0|nil)%s*$\n?' %
(pp, self._ws_pp, self._ws_pp), value)):
if_false_level = 1
else: # In a false #if
if token is Comment.Preproc:
if (if_false_level == 1 and
re.match(r'%sel(if|se)\b' % pp, value)):
if_false_level = 0
elif re.match(r'%sif' % pp, value):
if_false_level += 1
elif re.match(r'%sendif\b' % pp, value):
if_false_level -= 1
else:
token = Comment
yield index, token, value
| [
"[email protected]"
] | |
de87b1312d042de25ad09e1c1273fad0b0bc68a4 | fa4b2b4ce915b4e58737f65efe7d18d1f45cbe27 | /home/migrations/0001_initial.py | 6f7e2af882d3cea72915015cbf036f11d7df263e | [] | no_license | Wishez/cosmeticsyou-v2.0 | 0fde09158944415b2471cb07dcf1e2cd1df85923 | a0f6a1b11622cb36a5084781ad35f4eed2778f66 | refs/heads/master | 2022-12-26T12:47:53.693887 | 2020-10-12T20:22:30 | 2020-10-12T20:27:54 | 293,092,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,946 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-10 15:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Callback',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('callback_name', models.CharField(max_length=30, verbose_name=b'\xd0\x98\xd0\xbc\xd1\x8f')),
('callback_phone', models.CharField(max_length=30, verbose_name=b'\xd0\xa2\xd0\xb5\xd0\xbb\xd0\xb5\xd1\x84\xd0\xbe\xd0\xbd')),
('callback_message', models.TextField(max_length=250, verbose_name=b'\xd0\x9a\xd0\xbe\xd0\xbc\xd0\xbc\xd0\xb5\xd0\xbd\xd1\x82\xd0\xb0\xd1\x80\xd0\xb8\xd0\xb9')),
],
options={
'verbose_name': '\u041e\u0431\u0440\u0430\u0442\u043d\u044b\u0439 \u0432\u044b\u0437\u043e\u0432',
'verbose_name_plural': '\u041e\u0431\u0440\u0430\u0442\u043d\u044b\u0435 \u0432\u044b\u0437\u043e\u0432\u044b',
},
),
migrations.CreateModel(
name='Program',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('img', models.FileField(blank=True, null=True, upload_to=b'uploads/program/', verbose_name=b'\xd0\x98\xd0\xb7\xd0\xbe\xd0\xb1\xd1\x80\xd0\xb0\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 \xd0\xbf\xd1\x80\xd0\xbe\xd0\xb3\xd1\x80\xd0\xb0\xd0\xbc\xd0\xbc\xd1\x8b')),
('title', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x97\xd0\xb0\xd0\xb3\xd0\xbe\xd0\xbb\xd0\xbe\xd0\xb2\xd0\xbe\xd0\xba')),
('p', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x9f\xd0\xb0\xd1\x80\xd0\xb0\xd0\xb3\xd1\x80\xd0\xb0\xd1\x84 \xd0\xbf\xd0\xbe\xd0\xb4 \xd0\xb7\xd0\xb0\xd0\xb3\xd0\xbe\xd0\xbb\xd0\xbe\xd0\xb2\xd0\xba\xd0\xbe\xd0\xbc')),
('offer_1', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x9f\xd1\x80\xd0\xb5\xd0\xb4\xd0\xbb\xd0\xbe\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 1 (\xd0\xbf\xd0\xbe\xd0\xb4-\xd0\xb7\xd0\xb0\xd0\xb3\xd0\xbe\xd0\xbb\xd0\xbe\xd0\xb2\xd0\xbe\xd0\xba)')),
('action_1_1', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5')),
('action_1_2', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_1_3', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_1_4', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('offer_2', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x9f\xd1\x80\xd0\xb5\xd0\xb4\xd0\xbb\xd0\xbe\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 (\xd0\xbf\xd0\xbe\xd0\xb4-\xd0\xb7\xd0\xb0\xd0\xb3\xd0\xbe\xd0\xbb\xd0\xbe\xd0\xb2\xd0\xbe\xd0\xba)')),
('action_2_1', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5')),
('action_2_2', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_2_3', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_2_4', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_2_5', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5')),
('action_2_6', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_2_7', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_2_8', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_2_9', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5')),
('action_2_10', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_2_11', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_2_12', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('offer_3', models.CharField(blank=True, max_length=300, null=True, verbose_name=b'\xd0\x9f\xd1\x80\xd0\xb5\xd0\xb4\xd0\xbb\xd0\xbe\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 3 (\xd0\xbf\xd0\xbe\xd0\xb4-\xd0\xb7\xd0\xb0\xd0\xb3\xd0\xbe\xd0\xbb\xd0\xbe\xd0\xb2\xd0\xbe\xd0\xba)')),
('action_3_1', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5')),
('action_3_2', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_3_3', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_3_4', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_3_5', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5')),
('action_3_6', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_3_7', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_3_8', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_3_9', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5')),
('action_3_10', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_3_11', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_3_12', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('offer_4', models.CharField(blank=True, max_length=300, null=True, verbose_name=b'\xd0\x9f\xd1\x80\xd0\xb5\xd0\xb4\xd0\xbb\xd0\xbe\xd0\xb6\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5 4 (\xd0\xbf\xd0\xbe\xd0\xb4-\xd0\xb7\xd0\xb0\xd0\xb3\xd0\xbe\xd0\xbb\xd0\xbe\xd0\xb2\xd0\xbe\xd0\xba)')),
('action_4_1', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5')),
('action_4_2', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_4_3', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_4_4', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_4_5', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5')),
('action_4_6', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_4_7', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_4_8', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_4_9', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xb5')),
('action_4_10', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_4_11', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
('action_4_12', models.CharField(blank=True, max_length=350, null=True, verbose_name=b'\xd0\x94\xd0\xb5\xd0\xb9\xd1\x81\xd1\x82\xd0\xb2\xd0\xb8\xd0\xb5 \xd0\xb4\xd0\xbb\xd1\x8f \xd0\xb2\xd1\x8b\xd0\xbf\xd0\xbe\xd0\xbb\xd0\xbd\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f')),
],
options={
'verbose_name': '\u0421\u0442\u0430\u0440\u0442\u043e\u0432\u0430\u044f \u043f\u0440\u043e\u0433\u0440\u0430\u043c\u043c\u0430',
'verbose_name_plural': '\u0421\u0442\u0430\u0440\u0442\u043e\u0432\u044b\u0435 \u043f\u0440\u043e\u0433\u0440\u0430\u043c\u043c\u044b',
},
),
migrations.CreateModel(
name='Slider',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slide_1', models.FileField(blank=True, null=True, upload_to=b'uploads/slider/', verbose_name=b'\xd0\xa1\xd0\xbb\xd0\xb0\xd0\xb9\xd0\xb4')),
('slide_2', models.FileField(blank=True, null=True, upload_to=b'uploads/slider/', verbose_name=b'\xd0\xa1\xd0\xbb\xd0\xb0\xd0\xb9\xd0\xb4')),
('slide_3', models.FileField(blank=True, null=True, upload_to=b'uploads/slider/', verbose_name=b'\xd0\xa1\xd0\xbb\xd0\xb0\xd0\xb9\xd0\xb4')),
('slide_4', models.FileField(blank=True, null=True, upload_to=b'uploads/slider/', verbose_name=b'\xd0\xa1\xd0\xbb\xd0\xb0\xd0\xb9\xd0\xb4')),
('slide_5', models.FileField(blank=True, null=True, upload_to=b'uploads/slider/', verbose_name=b'\xd0\xa1\xd0\xbb\xd0\xb0\xd0\xb9\xd0\xb4')),
],
options={
'verbose_name': '\u0421\u043b\u0430\u0439\u0434\u0435\u0440',
'verbose_name_plural': '\u0421\u043b\u0430\u0439\u0434\u044b',
},
),
]
| [
"[email protected]"
] | |
f1852414a1506a6b10a1010751f35b8c44e0caba | 7104726233d98dd714a445f4f516bce954680f7f | /PuThresholdTuning/python/runForest_PbPb_MIX_75X_PUThresholdVarR020.py | 8fd43dffcd346d87d79593e728a23b559bab3f6a | [
"CC0-1.0"
] | permissive | mverwe/JetRecoValidation | 7b09dada9a797b0ccf39064bdbc801639a8dd229 | ee8b3fd94bac16390b367dc5030489738ff67958 | refs/heads/master | 2021-01-10T06:54:12.312670 | 2016-02-25T10:35:19 | 2016-02-25T10:35:19 | 43,553,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,822 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process('HiForest')
process.options = cms.untracked.PSet(
# wantSummary = cms.untracked.bool(True)
#SkipEvent = cms.untracked.vstring('ProductNotFound')
)
################################################################################
# HiForest labelling info
################################################################################
process.load("HeavyIonsAnalysis.JetAnalysis.HiForest_cff")
process.HiForest.inputLines = cms.vstring("HiForest V3",)
import subprocess
version = subprocess.Popen(["(cd $CMSSW_BASE/src && git describe --tags)"], stdout=subprocess.PIPE, shell=True).stdout.read()
if version == '':
version = 'no git info'
process.HiForest.HiForestVersion = cms.untracked.string(version)
################################################################################
# Input source
################################################################################
process.source = cms.Source("PoolSource",
duplicateCheckMode = cms.untracked.string("noDuplicateCheck"),
fileNames = cms.untracked.vstring(
"/store/user/twang/Pyquen_DiJet_pt40_5020GeV_GEN_SIM_PU_20150813/Pyquen_DiJet_pt40_5020GeV_step3_RECODEBUG_20150813/3179e0200600a67eea51209589c07fdd/step3_RECODEBUG_RAW2DIGI_L1Reco_RECO_PU_100_1_ppt.root"
#"/store/relval/CMSSW_7_5_0_pre5/RelValPhotonJets_Pt_10_13_HI/GEN-SIM-RECO/MCHI2_75_V2-v2/00000/BAA0D4EC-AF0B-E511-95A6-02163E011865.root"
))
#root://cmsxrootd.fnal.gov//
# Number of events we want to process, -1 = all events
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(10))
#####################################################################################
# Load Global Tag, Geometry, etc.
#####################################################################################
process.load('Configuration.StandardSequences.Services_cff')
process.load('Configuration.Geometry.GeometryDB_cff')
process.load('Configuration.StandardSequences.MagneticField_38T_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.load('Configuration.StandardSequences.Digi_cff')
process.load('Configuration.StandardSequences.SimL1Emulator_cff')
process.load('Configuration.StandardSequences.DigiToRaw_cff')
process.load('Configuration.StandardSequences.RawToDigi_cff')
#process.load('Configuration.StandardSequences.ReconstructionHeavyIons_cff')
process.load('FWCore.MessageService.MessageLogger_cfi')
# PbPb 53X MC
from Configuration.AlCa.GlobalTag_condDBv2 import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '75X_mcRun2_HeavyIon_v5', '')
#process.GlobalTag.toGet.extend([
# cms.PSet(record = cms.string("HeavyIonRcd"),
# tag = cms.string("CentralityTable_HFtowers200_HydjetDrum5_v750x02_mc"),
# connect = cms.string("frontier://FrontierProd/CMS_CONDITIONS"),
# label = cms.untracked.string("HFtowersHydjetDrum5")
# ),
#])
from HeavyIonsAnalysis.Configuration.CommonFunctions_cff import *
#overrideGT_PbPb2760(process)
overrideJEC_PbPb2760(process)
process.load("RecoHI.HiCentralityAlgos.CentralityBin_cfi")
process.centralityBin.Centrality = cms.InputTag("hiCentrality")
process.centralityBin.centralityVariable = cms.string("HFtowers")
process.centralityBin.nonDefaultGlauberModel = cms.string("HydjetDrum5")
################################################################################
# Define tree output
################################################################################
process.TFileService = cms.Service("TFileService",
fileName=cms.string("HiForest.root"))
################################################################################
# Additional Reconstruction and Analysis: Main Body
################################################################################
#begin: MV edits
## PF jets
process.load('HiRecoPFJets_PuThreshold_cff') ##creates sequence hiRecoPFJets
process.load('akPu2PFJetSequence5_cff')
process.load('akPu2PFJetSequence10_cff')
process.load('akPu2PFJetSequence15_cff')
process.load('akPu2PFJetSequence20_cff')
process.load('akPu2PFJetSequence25_cff')
process.jetSequencesPF = cms.Sequence(process.hiRecoPFJets2
*process.akPu2PFJetSequence5
+process.akPu2PFJetSequence10
+process.akPu2PFJetSequence15
+process.akPu2PFJetSequence20
+process.akPu2PFJetSequence25
)
## Calo jets
process.load('HiRecoCaloJets_PuThreshold_cff') ##creates sequence hiRecoPFJets
process.load('akPu2CaloJetSequence2_cff')
process.load('akPu2CaloJetSequence4_cff')
process.load('akPu2CaloJetSequence6_cff')
process.load('akPu2CaloJetSequence8_cff')
process.load('akPu2CaloJetSequence10_cff')
process.jetSequencesCalo = cms.Sequence(process.hiRecoCaloJets2
+process.akPu2CaloJetSequence2
+process.akPu2CaloJetSequence4
+process.akPu2CaloJetSequence6
+process.akPu2CaloJetSequence8
+process.akPu2CaloJetSequence10
)
#end: MV edits
process.load('HeavyIonsAnalysis.EventAnalysis.hievtanalyzer_mc_cfi')
process.hiEvtAnalyzer.doMC = cms.bool(False) #the gen info dataformat has changed in 73X, we need to update hiEvtAnalyzer code
process.load('HeavyIonsAnalysis.EventAnalysis.hltanalysis_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.HiGenAnalyzer_cfi')
#####################################################################################
# To be cleaned
process.load('HeavyIonsAnalysis.JetAnalysis.ExtraTrackReco_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.TrkAnalyzers_MC_cff')
process.load("HeavyIonsAnalysis.TrackAnalysis.METAnalyzer_cff")
process.load("HeavyIonsAnalysis.JetAnalysis.pfcandAnalyzer_cfi")
process.load('HeavyIonsAnalysis.JetAnalysis.rechitanalyzer_cfi')
process.rechitAna = cms.Sequence(process.rechitanalyzer+process.pfTowers)
process.pfcandAnalyzer.skipCharged = False
process.pfcandAnalyzer.pfPtMin = 0
#####################################################################################
#########################
# Track Analyzer
#########################
process.anaTrack.qualityStrings = cms.untracked.vstring(['highPurity','tight','loose'])
process.pixelTrack.qualityStrings = cms.untracked.vstring('highPurity')
process.hiTracks.cut = cms.string('quality("highPurity")')
# set track collection to iterative tracking
process.anaTrack.trackSrc = cms.InputTag("hiGeneralTracks")
# clusters missing in recodebug - to be resolved
process.anaTrack.doPFMatching = False
process.pixelTrack.doPFMatching = False
process.anaTrack.doSimVertex = True
process.anaTrack.doSimTrack = True
# process.ppTrack.fillSimTrack = True
process.load("SimTracker.TrackAssociation.trackingParticleRecoTrackAsssociation_cff")
process.tpRecoAssocGeneralTracks = process.trackingParticleRecoTrackAsssociation.clone()
process.tpRecoAssocGeneralTracks.label_tr = cms.InputTag("hiGeneralTracks")
process.quickTrackAssociatorByHits.ComponentName = cms.string('quickTrackAssociatorByHits')
#####################
# Photons
#####################
process.load('HeavyIonsAnalysis.PhotonAnalysis.ggHiNtuplizer_cfi')
process.ggHiNtuplizer.genParticleSrc = cms.InputTag("genParticles")
#####################
# muons
######################
#process.load("HeavyIonsAnalysis.MuonAnalysis.hltMuTree_cfi")
#process.hltMuTree.doGen = cms.untracked.bool(True)
#process.load("RecoHI.HiMuonAlgos.HiRecoMuon_cff")
#process.muons.JetExtractorPSet.JetCollectionLabel = cms.InputTag("akVs3PFJets")
#process.globalMuons.TrackerCollectionLabel = "hiGeneralTracks"
#process.muons.TrackExtractorPSet.inputTrackCollection = "hiGeneralTracks"
#process.muons.inputCollectionLabels = ["hiGeneralTracks", "globalMuons", "standAloneMuons:UpdatedAtVtx", "tevMuons:firstHit", "tevMuons:picky", "tevMuons:dyt"]
# HYDJET RECO file didn't have ak2GenJets and ak6GenJets as input, so removed them
# and ran our own hiGenJets sequence
from RecoHI.HiJetAlgos.HiGenJets_cff import ak2HiGenJets, ak3HiGenJets, ak4HiGenJets
from RecoJets.Configuration.GenJetParticles_cff import genParticlesForJets
genParticlesForJets.ignoreParticleIDs += cms.vuint32( 12,14,16)
process.hiSelectGenJets = cms.Sequence(
genParticlesForJets +
ak2HiGenJets +
ak3HiGenJets +
ak4HiGenJets
)
process.anaTrack.doSimTrack = cms.untracked.bool(False)
process.HiGenParticleAna.genParticleSrc = cms.untracked.InputTag("genParticles")
process.load("GeneratorInterface.HiGenCommon.HeavyIon_cff")
process.ana_step = cms.Path(process.heavyIon*
process.hltanalysis *
#temp process.hltobject *
process.centralityBin *
process.hiEvtAnalyzer*
process.HiGenParticleAna*
#process.hiGenJetsCleaned*
process.quickTrackAssociatorByHits*
#process.tpRecoAssocGeneralTracks + #used in HiPFJetAnalyzer
process.hiSelectGenJets +
process.jetSequencesPF +
process.jetSequencesCalo +
process.ggHiNtuplizer +
process.pfcandAnalyzer +
process.rechitAna +
#temp process.hltMuTree +
process.HiForest +
# process.cutsTPForFak +
# process.cutsTPForEff +
process.anaTrack
#process.pixelTrack
)
process.load('HeavyIonsAnalysis.JetAnalysis.EventSelection_cff')
process.phltJetHI = cms.Path( process.hltJetHI )
#process.pcollisionEventSelection = cms.Path(process.collisionEventSelection)
# process.pHBHENoiseFilter = cms.Path( process.HBHENoiseFilter ) #should be put back in later
#process.pHBHENoiseFilterResultProducer = cms.Path( process.HBHENoiseFilterResultProducer )
process.phfCoincFilter = cms.Path(process.hfCoincFilter )
process.phfCoincFilter3 = cms.Path(process.hfCoincFilter3 )
process.pprimaryVertexFilter = cms.Path(process.primaryVertexFilter )
#process.phltPixelClusterShapeFilter = cms.Path(process.siPixelRecHits*process.hltPixelClusterShapeFilter )
process.phiEcalRecHitSpikeFilter = cms.Path(process.hiEcalRecHitSpikeFilter )
process.pAna = cms.EndPath(process.skimanalysis)
# Customization
| [
"[email protected]"
] | |
3ae943c05939e10eb7593fa7d5be7c5f831a76c5 | 19d47d47c9614dddcf2f8d744d883a90ade0ce82 | /pynsxt/swagger_client/models/aws_gateway_amis_list_result.py | 5882e38c27c689298103233daa9be4d54349462e | [] | no_license | darshanhuang1/pynsxt-1 | 9ed7c0da9b3a64e837a26cbbd8b228e811cee823 | fb1091dff1af7f8b8f01aec715682dea60765eb8 | refs/heads/master | 2020-05-25T14:51:09.932853 | 2018-05-16T12:43:48 | 2018-05-16T12:43:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,184 | py | # coding: utf-8
"""
NSX API
VMware NSX REST API # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.models.aws_gateway_ami_info import AwsGatewayAmiInfo # noqa: F401,E501
from swagger_client.models.list_result import ListResult # noqa: F401,E501
from swagger_client.models.resource_link import ResourceLink # noqa: F401,E501
from swagger_client.models.self_resource_link import SelfResourceLink # noqa: F401,E501
class AwsGatewayAmisListResult(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'_self': 'SelfResourceLink',
'links': 'list[ResourceLink]',
'schema': 'str',
'cursor': 'str',
'sort_ascending': 'bool',
'sort_by': 'str',
'result_count': 'int',
'results': 'list[AwsGatewayAmiInfo]'
}
attribute_map = {
'_self': '_self',
'links': '_links',
'schema': '_schema',
'cursor': 'cursor',
'sort_ascending': 'sort_ascending',
'sort_by': 'sort_by',
'result_count': 'result_count',
'results': 'results'
}
def __init__(self, _self=None, links=None, schema=None, cursor=None, sort_ascending=None, sort_by=None, result_count=None, results=None): # noqa: E501
"""AwsGatewayAmisListResult - a model defined in Swagger""" # noqa: E501
self.__self = None
self._links = None
self._schema = None
self._cursor = None
self._sort_ascending = None
self._sort_by = None
self._result_count = None
self._results = None
self.discriminator = None
if _self is not None:
self._self = _self
if links is not None:
self.links = links
if schema is not None:
self.schema = schema
if cursor is not None:
self.cursor = cursor
if sort_ascending is not None:
self.sort_ascending = sort_ascending
if sort_by is not None:
self.sort_by = sort_by
if result_count is not None:
self.result_count = result_count
if results is not None:
self.results = results
@property
def _self(self):
"""Gets the _self of this AwsGatewayAmisListResult. # noqa: E501
:return: The _self of this AwsGatewayAmisListResult. # noqa: E501
:rtype: SelfResourceLink
"""
return self.__self
@_self.setter
def _self(self, _self):
"""Sets the _self of this AwsGatewayAmisListResult.
:param _self: The _self of this AwsGatewayAmisListResult. # noqa: E501
:type: SelfResourceLink
"""
self.__self = _self
@property
def links(self):
"""Gets the links of this AwsGatewayAmisListResult. # noqa: E501
The server will populate this field when returing the resource. Ignored on PUT and POST. # noqa: E501
:return: The links of this AwsGatewayAmisListResult. # noqa: E501
:rtype: list[ResourceLink]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this AwsGatewayAmisListResult.
The server will populate this field when returing the resource. Ignored on PUT and POST. # noqa: E501
:param links: The links of this AwsGatewayAmisListResult. # noqa: E501
:type: list[ResourceLink]
"""
self._links = links
@property
def schema(self):
"""Gets the schema of this AwsGatewayAmisListResult. # noqa: E501
:return: The schema of this AwsGatewayAmisListResult. # noqa: E501
:rtype: str
"""
return self._schema
@schema.setter
def schema(self, schema):
"""Sets the schema of this AwsGatewayAmisListResult.
:param schema: The schema of this AwsGatewayAmisListResult. # noqa: E501
:type: str
"""
self._schema = schema
@property
def cursor(self):
"""Gets the cursor of this AwsGatewayAmisListResult. # noqa: E501
Opaque cursor to be used for getting next page of records (supplied by current result page) # noqa: E501
:return: The cursor of this AwsGatewayAmisListResult. # noqa: E501
:rtype: str
"""
return self._cursor
@cursor.setter
def cursor(self, cursor):
"""Sets the cursor of this AwsGatewayAmisListResult.
Opaque cursor to be used for getting next page of records (supplied by current result page) # noqa: E501
:param cursor: The cursor of this AwsGatewayAmisListResult. # noqa: E501
:type: str
"""
self._cursor = cursor
@property
def sort_ascending(self):
"""Gets the sort_ascending of this AwsGatewayAmisListResult. # noqa: E501
:return: The sort_ascending of this AwsGatewayAmisListResult. # noqa: E501
:rtype: bool
"""
return self._sort_ascending
@sort_ascending.setter
def sort_ascending(self, sort_ascending):
"""Sets the sort_ascending of this AwsGatewayAmisListResult.
:param sort_ascending: The sort_ascending of this AwsGatewayAmisListResult. # noqa: E501
:type: bool
"""
self._sort_ascending = sort_ascending
@property
def sort_by(self):
"""Gets the sort_by of this AwsGatewayAmisListResult. # noqa: E501
Field by which records are sorted # noqa: E501
:return: The sort_by of this AwsGatewayAmisListResult. # noqa: E501
:rtype: str
"""
return self._sort_by
@sort_by.setter
def sort_by(self, sort_by):
"""Sets the sort_by of this AwsGatewayAmisListResult.
Field by which records are sorted # noqa: E501
:param sort_by: The sort_by of this AwsGatewayAmisListResult. # noqa: E501
:type: str
"""
self._sort_by = sort_by
@property
def result_count(self):
"""Gets the result_count of this AwsGatewayAmisListResult. # noqa: E501
Count of results found (across all pages), set only on first page # noqa: E501
:return: The result_count of this AwsGatewayAmisListResult. # noqa: E501
:rtype: int
"""
return self._result_count
@result_count.setter
def result_count(self, result_count):
"""Sets the result_count of this AwsGatewayAmisListResult.
Count of results found (across all pages), set only on first page # noqa: E501
:param result_count: The result_count of this AwsGatewayAmisListResult. # noqa: E501
:type: int
"""
self._result_count = result_count
@property
def results(self):
"""Gets the results of this AwsGatewayAmisListResult. # noqa: E501
Aws Gateway amis list # noqa: E501
:return: The results of this AwsGatewayAmisListResult. # noqa: E501
:rtype: list[AwsGatewayAmiInfo]
"""
return self._results
@results.setter
def results(self, results):
"""Sets the results of this AwsGatewayAmisListResult.
Aws Gateway amis list # noqa: E501
:param results: The results of this AwsGatewayAmisListResult. # noqa: E501
:type: list[AwsGatewayAmiInfo]
"""
self._results = results
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AwsGatewayAmisListResult):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
0907267e98b96a3bfb69062100eb901fb42b8d3d | f7463bd0ab18b41611d5ac725f65d3db3a3a7a1d | /Generation Python - A Beginner's Course/13_Functions/13.5(return_v2)/7.py | 05f6fc472054d62e8f0ac4d289c449cf867dab43 | [] | no_license | Sergey-Laznenko/Stepik | f81c5aeead3fbd20628129d60ccce92b34724b97 | 5e1a1a76c3f6ed487cf8fc847913c890c8eac840 | refs/heads/master | 2022-12-28T19:01:48.670540 | 2020-10-18T15:23:58 | 2020-10-18T15:23:58 | 279,022,462 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 561 | py | def is_palindrome(a):
if str(a) == str(a)[::-1]:
return True
else:
return False
def is_prime(b):
if b in (0, 1):
return False
if b % 2 == 0:
return False
for i in range(3, round(b ** (1 / 2) + 1), 2):
if b % i == 0:
return False
return True
def is_even(c):
if c % 2 == 0:
return True
else:
return False
pws = input().split(':')
a = pws[0]
b = pws[1]
c = pws[2]
if is_palindrome(a) == is_prime(b) == is_even(c):
print('True')
else:
print('False')
| [
"[email protected]"
] | |
ad8ea5912f2475677a294ad6b496f6e6354dab53 | f7fe9c722b8fa7ed6e66080053706a495fffb2d8 | /tensorflow/python/distribute/failure_handling/failure_handling.py | 0acb76e8a30174efd1c32a7cf020cdaea0d941c2 | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] | permissive | Poet-LiBai/tensorflow | 832d9d8ddb58b5560ba19119cf107bbe857208e7 | f354ef21ae067a73fbc2ab45a7a5ceda4b0a1ff4 | refs/heads/master | 2022-05-02T04:06:27.411162 | 2022-04-21T02:45:39 | 2022-04-21T02:49:33 | 155,213,121 | 0 | 0 | Apache-2.0 | 2018-10-29T13:04:55 | 2018-10-29T13:04:55 | null | UTF-8 | Python | false | false | 35,947 | py | # Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for `WorkerPreemptionHandler`.
This is currently under development and the API is subject to change.
WorkerPreemptionHandler reduces loss of training progress caused by termination
(preemption or maintenance) of workers in multi-worker synchronous training and
avoid surfacing an error indistinguishable from application errors to the
job scheduler or users.
"""
import os
import signal
import sys
import threading
import time
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.distribute.failure_handling import gce_util
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
_INITIAL_RUN_COUNT_KEY = 'RUN_TO_CHECKPOINT'
_FINAL_RUN_COUNT_KEY = 'LAST_RUN_TO_CHECKPOINT'
# This key is used to guarantee that only one worker (and it's the earliest
# one that receives a preemption signal) sets _received_own_sigterm,
# leads the step resolution, and controls the grace period timeline.
_PREEMPTION_WORKER_KEY = 'TERMINATED_WORKER'
_ACKNOWLEDGE_KEY = 'RECEIVED_SIGNAL'
_ITERATION_VARIABLE = 'checkpointed_runs'
_STOP_WATCHING_CLUSTER_VALUE = 'STOP_WATCHER'
def _mwms_write_checkpoint_dir(checkpoint_dir, task_type, task_id,
cluster_spec):
"""Returns checkpoint_dir for chief and a temp dir for any other worker."""
dirpath = os.path.dirname(checkpoint_dir)
base = os.path.basename(checkpoint_dir)
if not multi_worker_util.is_chief(
cluster_spec=cluster_spec, task_type=task_type, task_id=task_id):
base_dirpath = 'workertemp_' + str(task_id)
dirpath = os.path.join(dirpath, base_dirpath)
gfile.MakeDirs(dirpath)
return os.path.join(dirpath, base)
# TODO(wxinyi): rename time_till_termination to grace_period.
class TerminationConfig(object):
"""Configurations to customize for a platform other than Google's Borg or GCP.
A TerminationConfig can be created and passed to the
`WorkerPreemptionHandler` to provide customization based on the platform.
It will deliver three pieces of information:
* How to decide if there is a termination event soon
The termination notification and how to fetch it varies across platforms. Thus
we accept a user-defined function, `termination_watcher_function`, and execute
it repeatedly to check for termination notification.
`termination_watcher_function` should be a function that returns True if a
termination notification has been made available and False otherwise. And the
function should be lightweight and non-blocking so that we can clean up the
resources properly if no termination signal is ever raised until training
finishes.
* How to exit the program
We are asking for an `exit_fn` to execute after saving the checkpoint to exit
the training program gracefully. For MultiWorkerMirroredStrategy, a restart is
inevitable to reset the program's state. However, you can configure the
`exit_fn` to facilitate the restart and make the training experience
smooth. How so? Maybe your platform has an agreement to a RESTART_CODE that’s
recognized as a program auto-restart signal, or you may have a coordinating
script that starts up the training, in which you can configure the program to
auto-restart if it ever exits with this RESTART_CODE. In both cases,
you can configure `exit_fn` to be `sys.exit(RESTART_CODE)` and then wouldn’t
even notice that the training has been interrupted and restarted.
* How long do we have from receiving a termination event notice till the
actual termination.
Some platforms have the gap time as long as, say, one hour. In this case, you
might want to utilize this time for training as much as possible until you
have to save a checkpoint and exit. We can utilize this information if you
pass it through the `time_till_termination` argument.
*The default behavior*:
If you are training with Google’s Borg system or GCP, we automatically detect
the platform and make the right configuration for you. Besides these two
platforms, the default behavior on an unrecognized platform is:
* If `termination_event` is `None`, we will treat `signal.SIGTERM` as a
termination event.
* If `exit_fn` not configured, we exit the program with an arbitrary code 42.
* If `time_till_termination` is not configured, the default is 0, and we will
wrap up the current training step, save a checkpoint, and exit the program as
soon as we receive the termination signal.
"""
def __init__(self,
termination_watcher_function=None,
exit_fn=None,
time_till_termination=None):
self.termination_watcher_function = termination_watcher_function
self.exit_fn = exit_fn
self.time_till_termination = time_till_termination
# TODO(wxinyi): configure the exit function based on device type (GPU or TPU).
class GCPTerminationConfig(TerminationConfig):
"""Configurations for GCP GPU VM."""
def __init__( # pylint: disable=super-init-not-called
self,
termination_watcher_function=None,
exit_fn=None,
time_till_termination=None):
self.termination_watcher_function = termination_watcher_function or gce_util.termination_watcher_function_gce
self.exit_fn = exit_fn or gce_util.gce_exit_fn
self.time_till_termination = time_till_termination or gce_util.GRACE_PERIOD_GCE
class BorgTerminationConfig(TerminationConfig):
"""Configurations for Borg."""
def __init__( # pylint: disable=super-init-not-called
self,
termination_watcher_function=None,
exit_fn=None,
time_till_termination=None):
self.termination_watcher_function = termination_watcher_function
default_exit_fn = lambda: sys.exit(42)
self.exit_fn = exit_fn or default_exit_fn
self.time_till_termination = time_till_termination or 0
def _complete_config_for_environement(platform_device, termination_config):
"""Complete un-filled fields of TerminationConfig based on platform."""
if platform_device is gce_util.PlatformDevice.GCE_GPU:
return GCPTerminationConfig(termination_config.termination_watcher_function,
termination_config.exit_fn,
termination_config.time_till_termination)
else:
# The default we chose are the same as the ones used by Borg. So we just
# return this.
return BorgTerminationConfig(
termination_config.termination_watcher_function,
termination_config.exit_fn,
termination_config.time_till_termination)
# Implementation:
# Each worker will create its own WorkerPreemptionHandler instance, and the
# instances communicate through coordination services. Each
# WorkerPreemptionHandler conduct three tasks in parallel:
# - Watches out for its own preemption signal. (_poll_termination_signal_thread)
# - Watches out for a step key from the coordination service made available
# by any member in the cluster (_cluster_wise_termination_watcher_thread)
# - The main thread for training.
#
# The life cycle of a WorkerPreemptionHandler is as below:
#
# It starts two threads as two watcher as described above. And it starts
# training. Each time before it starts a training step, it will check if any
# information has been made available by the two watchers: The
# _poll_termination_signal_thread will be in charge of the _received_own_sigterm
# event, the _cluster_wise_termination_watcher_thread will be in charge of the
# _received_checkpoint_step event.
#
# If at any point the local worker receives a preemption signal,
# _poll_termination_signal_thread will set _received_own_sigterm.
# Next time before it attempts to run a training step, it will deal with the
# event, by setting its current finished step + 1 as the step after which a
# checkpoint should be saved and make it available to all the workers through
# the coordination service. It will then continue training.
#
# This step key will be picked up by the other watcher,
# _cluster_wise_termination_watcher_thread, both on the worker to be preempted
# and other workers. And it will set the _received_checkpoint_step event.
# Now, if there is a long grace period before the training
# has to terminate (e.g., an hour), we would like to keep training and save a
# checkpoint again right before the termination. Thus this watcher thread will
# move on to watch out for a final step-to-save key. Otherwise,
# it has finished all the task to do.
#
# Back to the main training thread. Again, before the next training step, the
# WorkerPreemptionHandler found that _received_checkpoint_step is set. If the
# local worker has not finished the required step after which to save a
# checkpoint, it will not do anything. Continue training and it will revisit
# after another step. If the step is met, then it will save a checkpoint,
# which requires participation of all workers.
#
# After this checkpoint is saved, if there is NO long grace period, all workers
# will just exit. If there is, all workers will enter a grace period countdown
# phase (_final_checkpoint_countdown) and clear the _received_checkpoint_step
# event. They will then continue training.
#
# For the worker to be preempted, during this countdown period, it will check
# whether the grace period is almost ending before its every step. If not,
# nothing needs to be done. If so, it will again set a step-to-save key and made
# it available to all workers. This is still watched by
# _cluster_wise_termination_watcher_thread and gestured by
# _received_checkpoint_step. A similar process is repeated: all workers save
# a checkpoint at an agreed step. And after they finish saving, they recognize
# that they have finished a countdown period for an extended grace period, and
# they all exit.
#
# When the program restarts and WorkerPreemptionHandler object is created, it
# will restore the checkpoint.
class WorkerPreemptionHandler(object):
"""Preemption and error handler for synchronous training.
The API helps coordinate all workers to save a checkpoint upon receiving a
preemption signal and helps propagate accurate error messages during training.
When the program recovers from preemption, the checkpoint that is passed to
initialize a `WorkerPreemptionHandler` object will be loaded
automatically.
Right after the initialization, a thread starts to watch out for a termination
signal for any member in the cluster, but the signal will only be handled
(which includes aligning the step to save a checkpoint, saving a checkpoint,
and exiting with a platform recognized restart code) after entering a
`WorkerPreemptionHandler.run` call.
Example usage:
```python
strategy = tf.distribute.MultiWorkerMirroredStrategy()
with strategy.scope():
dataset, model, optimizer = ...
fh_checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)
worker_preemption_watcher = tf.distribute.WorkerPreemptionHandler(
cluster_resolver, fh_checkpoint, checkpoint_directory)
# `worker_preemption_watcher.total_runs` will be restored to its
# checkpointed value when training is restored after interruption.
for epoch in range(worker_preemption_watcher.total_runs //
STEPS_PER_EPOCH, num_epochs):
for step in range(worker_preemption_watcher.total_runs %
STEPS_PER_EPOCH, num_steps):
# distributed_train_step is a function wrapped by strategy.run
loss += worker_preemption_watcher.run(distributed_train_step,
args=(next(dataset),))
```
`WorkerPreemptionHandler` will create a CheckpointManager to manage the
checkpoint and only one CheckpointManager should be active in a particular
directory at a time. Thus, if the user would like to save a checkpoint for
purpose other than fault tolerance, e.g., for evaluation, they should save it
in a directory different from the one passed to a
`WorkerPreemptionHandler`.
This API targets multi-client distributed training, and right now only
`tf.distribute.MultiWorkerMirroredStrategy` is supported.
"""
def __init__(self,
cluster_resolver,
checkpoint,
checkpoint_dir,
termination_config=TerminationConfig()):
"""Creates the failure handler.
Args:
cluster_resolver: a `tf.distribute.cluster_resolver.ClusterResolver`. You
may also get it through the `cluster_resolver` attribute of the strategy
in use.
checkpoint: a `tf.train.Checkpoint` that will be saved upon preemption and
loaded upon restart by the `WorkerPreemptionHandler` API automatically.
checkpoint_dir: a directory for the `WorkerPreemptionHandler` to play with
checkpoints. `WorkerPreemptionHandler` will create a
`tf.train.CheckpointManager` to manage the passed-in `checkpoint`. Since
only one `tf.train.CheckpointManager` should be active in a particular
directory at a time, this `checkpoint_dir` arg should preferably be
separated from where the user saves their checkpoint for non-fault
tolerance purpose.
termination_config: a `TerminationConfig` object to configure for a
platform other than Google Borg or GCP.
"""
self._cluster_resolver = cluster_resolver
self._checkpoint = checkpoint
self._id_in_cluster = str(
multi_worker_util.id_in_cluster(
self._cluster_resolver.cluster_spec(),
self._cluster_resolver.task_type,
self._cluster_resolver.task_id))
# The number of calls to `WorkerPreemptionHandler.run` when the latest
# checkpoint was saved.
self._checkpointed_runs = variables.Variable(
initial_value=constant_op.constant(0, dtype=dtypes.int64),
trainable=False,
name=_ITERATION_VARIABLE)
if not hasattr(self._checkpoint,
_ITERATION_VARIABLE):
setattr(self._checkpoint, _ITERATION_VARIABLE,
self._checkpointed_runs)
# Make CheckpointManagers. MultiWorkerMirroredStrategy requires different
# setup on chief and on other workers.
self._read_checkpoint_manager = checkpoint_management.CheckpointManager(
checkpoint, directory=checkpoint_dir, max_to_keep=1)
if multi_worker_util.is_chief(
cluster_spec=cluster_resolver.cluster_spec(),
task_type=cluster_resolver.task_type,
task_id=cluster_resolver.task_id):
self._write_checkpoint_manager = self._read_checkpoint_manager
else:
self._write_checkpoint_manager = checkpoint_management.CheckpointManager(
checkpoint,
_mwms_write_checkpoint_dir(checkpoint_dir, cluster_resolver.task_type,
cluster_resolver.task_id,
cluster_resolver.cluster_spec()),
max_to_keep=1)
self._read_checkpoint_manager.restore_or_initialize()
# grace period countdown. Set to True for all workers once they finish
# timing saving a checkpoint. Once entering this phase, new
# preemption/maintenance notice will not be handled, since the whole cluster
# goes down as the worker who first initiates the grace period goes down.
self._final_checkpoint_countdown = False
self._estimated_run_time = 0
# An internal step counter that's restored to checkpointed_iterations when
# training is restored. It increments by one every time
# `WorkerPreemptionHandler.run` is called. Note that in this case, the
# user must pass a single-step training function to
# `WorkerPreemptionHandler.run` instead of a multiple-step one.
self._run_counter = self._checkpointed_runs.numpy()
# The worker itself has received preeption signal.
self._received_own_sigterm = threading.Event()
# Some member (could be oneself) has received preemption signal, and the
# step number to save a checkpoint has been aligned.
self._received_checkpoint_step = threading.Event()
self._platform_device = gce_util.detect_platform()
completed_termination_config = _complete_config_for_environement(
self._platform_device, termination_config)
self._termination_watcher_function = completed_termination_config.termination_watcher_function
self._exit_fn = completed_termination_config.exit_fn
self._grace_period = completed_termination_config.time_till_termination
# When training is interrupted, we explicitly call the cleanup methods for
# the thread watching for local worker's termination signal and the thread
# watching for clusterwise information before we save a checkpoint and exit.
# In the final chapter of the training where no interruption is encountered,
# we rely on __del__ to clean up. However, there is no guarantee when or
# whether __del__ is executed, thus we make the threads daemon to avoid it
# preventing program from exit.
self._cluster_wise_termination_watcher_thread = threading.Thread(
target=self._watch_step_to_save_key,
name='PeerTerminationWatcher-%s' % self._id_in_cluster,
daemon=True)
logging.info('Start watcher for peer\'s signal.')
self._cluster_wise_termination_watcher_thread.start()
self._poll_termination_signal_thread = None
if completed_termination_config.termination_watcher_function:
self._start_polling_for_termination_signal()
else:
self._start_watching_for_signal()
def _start_watching_for_signal(self):
signal.signal(signal.SIGTERM, self._sigterm_handler_fn)
def _start_polling_for_termination_signal(self):
self._poll_termination_signal_thread_should_stop = threading.Event()
self._poll_termination_signal_thread = threading.Thread(
target=self._poll_termination_signal,
name='WorkerTerminationSignalWatcher-%s' % self._id_in_cluster,
daemon=True)
logging.info('Start polling for termination signal.')
self._poll_termination_signal_thread.start()
def _poll_termination_signal(self):
"""Poll maintenance notice and notify peers if receiving one."""
while True:
if self._poll_termination_signal_thread_should_stop.is_set(
) or self._final_checkpoint_countdown:
return
if self._termination_watcher_function():
break
time.sleep(1)
self._maybe_set_received_own_sigterm()
def _maybe_set_received_own_sigterm(self):
"""Claim earliest preemption if no one else has done it before."""
try:
context.context().set_config_key_value(_PREEMPTION_WORKER_KEY,
self._id_in_cluster)
logging.info('Member %s has received termination notice.',
self._id_in_cluster)
self._received_own_sigterm_time = time.time()
self._received_own_sigterm.set()
# This is to handle the case that a worker has received termination
# notice but hasn't come to the next step to set the step key. Other
# workers might receive a termination notice too, and attempt to set the
# config key again, which causes this error. This can be safely ignored
# since checkpoint should be saved as early as the earliest call is made.
except errors.AlreadyExistsError:
logging.info('Member %s has received termination notice. But some other '
'worker has received it as well! Leaving'
' it to them to decide when to checkpoint. ',
self._id_in_cluster)
return
def _stop_poll_termination_signal_thread(self):
if self._poll_termination_signal_thread:
self._poll_termination_signal_thread_should_stop.set()
self._poll_termination_signal_thread.join()
self._poll_termination_signal_thread = None
logging.info('Shut down watcher for one\'s own termination signal')
def _stop_cluster_wise_termination_watcher_thread(self):
"""Stop the thread that is _watch_step_to_save_key."""
if self._cluster_wise_termination_watcher_thread:
try:
context.context().set_config_key_value(_INITIAL_RUN_COUNT_KEY,
_STOP_WATCHING_CLUSTER_VALUE)
except (errors.AlreadyExistsError, errors.UnavailableError):
# We'll ignore any error in the process of setting this key. There
# certainly will be a AlreadyExistError since all workers are trying to
# push this key. Or some worker might have exited already, leading to a
# errors.UnavailableError or errors.AbortedError.
pass
try:
context.context().set_config_key_value(_FINAL_RUN_COUNT_KEY,
_STOP_WATCHING_CLUSTER_VALUE)
except (errors.AlreadyExistsError, errors.UnavailableError):
pass
finally:
self._cluster_wise_termination_watcher_thread.join()
self._cluster_wise_termination_watcher_thread = None
logging.info('Shut down watcher for peer\'s termination signal.')
def __del__(self):
self._stop_cluster_wise_termination_watcher_thread()
self._stop_poll_termination_signal_thread()
@property
def total_runs(self):
"""Returns the number of times `WorkerPreemptionHandler.run` is called.
This value tracks the number of all calls to
`WorkerPreemptionHandler.run` including those before the program is
restarted and the training is restored. The user can compute their total
number of iterations by:
`worker_preemption_watcher.run * number_of_steps_in_train_function`,
while for tf.distribute.MultiWorkerMirroredStrategy users,
`number_of_steps_in_train_function` should be one.
"""
return self._run_counter
def run(self,
distributed_train_function,
*args,
**kwargs):
"""Runs a training function with error and preemption handling.
This function handles the preemption signal from any peer in the cluster by
saving the training progress and exiting gracefully. (Specifically, when
running on Borg, it exits with a special code so that the cluster
automatically restarts the training after the down worker is back.) It will
also propagate any program error encountered during execution of
`distributed_train_function` to all workers so that they can raise the same
error.
The `distributed_train_function` argument should be a distributed train
function (i.e., containing a call to `tf.distribute.Strategy.run`). For
`tf.distribute.MultiWorkerMirroredStrategy` users, we recommend passing in a
single-step `distributed_train_function` to
`WorkerPreemptionHandler.run` so that the checkpoint can be saved in
time in case a preemption signal or maintenance notice is sent.
Besides the preemption and error handling part,
`WorkerPreemptionHandler.run(distributed_train_function, *args,
**kwargs)` has the same effect and output as
`distributed_train_function(*args, **kwargs)`. `distributed_train_function`
can return either some or no result. The following is a shortened example:
```python
@tf.function
def distributed_train_step(iterator):
# A distributed single-step training function.
def step_fn(inputs):
# A per-replica single-step training function.
x, y = inputs
...
return loss
per_replica_losses = strategy.run(step_fn, args=(next(iterator),))
return strategy.reduce(
tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None)
for epoch in range(worker_preemption_watcher.total_runs //
STEPS_PER_EPOCH, EPOCHS_TO_RUN):
iterator = iter(multi_worker_dataset)
total_loss = 0.0
num_batches = 0
for step in range(worker_preemption_watcher.total_runs %
STEPS_PER_EPOCH, STEPS_PER_EPOCH):
total_loss += worker_preemption_watcher.run(distributed_train_step)
num_batches += 1
train_loss = total_loss / num_batches
print('Epoch: %d, train_loss: %f.' %(epoch.numpy(), train_loss))
train_accuracy.reset_states()
```
Args:
distributed_train_function: A (single-step) distributed training function.
*args: args for `distributed_train_function`.
**kwargs: kwargs for `distributed_train_function`.
Raises:
Program error encountered by any member in the cluster encounters one
while executing the `distributed_train_function`, or any error from the
program error propagation process.
Returns:
Result of running the `distributed_train_function`.
"""
# TODO(wxinyi): after we support use with TPUStrategy, we should expand the
# API doc to state that `distributed_train_function` does not need to be a
# single-step training function, since a multi-step host-training loop is
# the dominant use case for TPU user. Besides, passing in a multi-step
# `distributed_train_function` will require the user to track their own
# training steps.
try:
self._checkpoint_if_preempted()
run_begin_time = time.time()
result = distributed_train_function(*args, **kwargs)
new_run_time = time.time() - run_begin_time
self._run_counter += 1
# Update the average run time with the new run.
self._estimated_run_time = self._estimated_run_time + (
new_run_time - self._estimated_run_time) / self._run_counter
except errors.OpError as e:
logging.info('Propagating error to cluster: %r: %s', e, e)
try:
context.context().report_error_to_cluster(e.error_code, e.message)
except Exception as ex: # pylint: disable=broad-except
logging.info('Ignoring error during error propagation: %r:%s', ex, ex)
raise
return result
def _save_checkpoint(self):
"""Saves the checkpoint and exit program."""
logging.info('WorkerPreemptionHandler: Starting saving a checkpoint.')
self._checkpointed_runs.assign(self.total_runs)
start_time = time.monotonic()
self._write_checkpoint_manager.save()
# All workers need to participate in saving a checkpoint to avoid
# deadlock. They need to write to different paths so that they would not
# override each other. We make temporary directories for non-chief
# workers to write to, and clean them up afterward.
if not multi_worker_util.is_chief(
cluster_spec=self._cluster_resolver.cluster_spec(),
task_type=self._cluster_resolver.task_type,
task_id=self._cluster_resolver.task_id):
gfile.DeleteRecursively(
os.path.dirname(self._write_checkpoint_manager.directory))
end_time = time.monotonic()
logging.info('Checkpoint finished at path %s',
self._write_checkpoint_manager.directory)
self._checkpoint_time = end_time - start_time
def _checkpoint_if_preempted(self):
"""Checkpoint if any worker has received a preemption signal.
This function handles preemption signal reported by any worker in the
cluster. The current implementation relies on the fact that all workers in a
MultiWorkerMirroredStrategy training cluster have a step number difference
maximum of 1.
- If the signal comes from the worker itself (i.e., where this failure
handler sits), the worker will notify all peers to checkpoint after they
finish CURRENT_STEP+1 steps, where CURRENT_STEP is the step this worker has
just finished. And the worker will wait for all peers to acknowledge that
they have received its preemption signal and the final-step number before
the worker proceeds on training the final step.
- If the signal comes from another member in the cluster but NO final-step
info is available, proceed on training, because it will be available after
finishing the next step.
- If the signal comes from some other member in the cluster, and final-step
info is available, if the worker has not finished these steps yet, keep
training; otherwise, checkpoint and exit with a cluster-recognized restart
code.
"""
if self._final_checkpoint_countdown:
run_count_config_key = _FINAL_RUN_COUNT_KEY
else:
run_count_config_key = _INITIAL_RUN_COUNT_KEY
if self._received_checkpoint_step.is_set():
run_count_key = context.context().get_config_key_value(
run_count_config_key)
if run_count_key == str(self._run_counter):
self._save_checkpoint()
if self._time_to_exit():
self._stop_poll_termination_signal_thread()
self._stop_cluster_wise_termination_watcher_thread()
logging.info('WorkerPreemptionHandler: checkpoint saved. Exiting.')
self._exit_fn()
else:
logging.info('Continue training for the grace period.')
self._final_checkpoint_countdown = True
self._received_checkpoint_step.clear()
elif self._received_own_sigterm.is_set():
# Only the worker who gets termination signal first among the cluster
# will enter this branch. The following will happen in chronological
# order:
# 1. The worker just receives a preemption signal and enters this branch
# for the first time. It will set a step-to-checkpoint and let the cluster
# know.
# 2. If there is a long grace period, it will also set
# _final_checkpoint_countdown, so that during this grace period, it will
# re-enter this branch to check if grace period is ending.
# 3. If it is, set a step-to-checkpoint key again.
if self._final_checkpoint_countdown:
if self._target_time_for_termination < time.time():
logging.info(
'Grace period almost ended. Final call to save a checkpoint!')
else:
return
step_to_save_at = str(self._run_counter + 1)
logging.info('Termination caught in main thread on preempted worker')
context.context().set_config_key_value(run_count_config_key,
step_to_save_at)
logging.info('%s set to %s', run_count_config_key, step_to_save_at)
n_workers = multi_worker_util.worker_count(
self._cluster_resolver.cluster_spec(),
self._cluster_resolver.task_type)
for i in range(n_workers):
context.context().get_config_key_value(
f'{_ACKNOWLEDGE_KEY}_{run_count_config_key}_{i}')
logging.info('Sigterm acknowledgement from replica %d received', i)
self._setup_countdown_if_has_grace_period_and_not_already_counting_down()
def _time_to_exit(self):
"""Return whether to exit: exit if no grace period or grace period ends."""
# we should directly exit in either of the two cases:
# 1. if no grace period is provided;
# 2. if there is a grace period, and we're in countdown period. This,
# together with the fact that _received_checkpoint_step is set (again),
# means it's time to exit: when there is a grace period, a worker
# receives preemption signal and sets the step key. Then all workers
# receive the step key and set their local _received_checkpoint_step
# event, enters this branch in _checkpoint_if_preempted, make a
# checkpoint. Then they set _final_checkpoint_countdown to True, clear
# _received_checkpoint_step, and continue training. New preemption
# signals anywhere in the cluster will not be handled, because
# _PREEMPTION_WORKER_KEY is occupied. The only chance that
# _received_checkpoint_step gets set again is when the worker who has
# received the preemption signal earlier decide it's time to do a final
# checkpoint (by checking if it already passes
# _target_time_for_termination). It will upload a final step key. All
# workers receive this key and again set _received_checkpoint_step. So,
# if we found out that _received_checkpoint_step is set, and also
# _final_checkpoint_countdown is true, it's checkpoint and exit time.
return (self._grace_period <= 0) or self._final_checkpoint_countdown
def _setup_countdown_if_has_grace_period_and_not_already_counting_down(self):
"""Set up at the beginning of a countdown period for long grace period."""
if self._grace_period > 0 and not self._final_checkpoint_countdown:
# A factor to provide more buffer / inaccuracy.
# TODO(wxinyi): update buffer_factor as needed. Maybe deduct a constant.
buffer_factor = 3
# Timing by 2 since while the preempted worker needs to do 1 extra step
# when time_till_final_call <=0, other workers might need to do x step
# where 0<x<2
self._target_time_for_termination = (
self._received_own_sigterm_time + self._grace_period -
buffer_factor * self._estimated_run_time * 2)
def _sigterm_handler_fn(self, signum, frame):
"""Upload the to-be-preempted worker's id to coordination service."""
del signum, frame
self._maybe_set_received_own_sigterm()
def _watch_step_to_save_key(self):
"""Watch out for step-to-save config key and acknowledge.
All workers, including the one to be preempted, execute this function to get
step-to-save.
"""
step_value = context.context().get_config_key_value(_INITIAL_RUN_COUNT_KEY)
# get_config_key_value does not return until it gets some result. Thus at
# the time to clean up, we upload a _STOP_WATCHING_CLUSTER_VALUE as the
# value so we can join the thread executing _watch_step_to_save_key.
if step_value != _STOP_WATCHING_CLUSTER_VALUE:
# This must be set before we set the ack key below, otherwise its value
# in _checkpoint_if_preempted may be outdated.
self._received_checkpoint_step.set()
ack_key = f'{_ACKNOWLEDGE_KEY}_{_INITIAL_RUN_COUNT_KEY}_{self._id_in_cluster}'
context.context().set_config_key_value(ack_key, '1')
logging.info(
'WorkerPreemptionHandler: %s set, '
'preemption awareness acknowledged', ack_key)
# If a positive grace_period is not configured, we get the
# _INITIAL_RUN_COUNT_KEY and then we're done. _checkpoint_if_preempted
# will save a checkpoint and then exit. Otherwise, we need to move on to
# wait for the _FINAL_RUN_COUNT_KEY, the one that the preempted worker
# will set after we utilize the extended grace period to train, so that
# a final checkpoint should be made right before the termination.
if self._grace_period > 0:
# Continue to wait until a final call is made.
final_step_value = context.context().get_config_key_value(
_FINAL_RUN_COUNT_KEY)
if final_step_value != _STOP_WATCHING_CLUSTER_VALUE:
ack_key = f'{_ACKNOWLEDGE_KEY}_{_FINAL_RUN_COUNT_KEY}_{self._id_in_cluster}'
context.context().set_config_key_value(ack_key, '1')
logging.info('WorkerPreemptionHandler: %s acknowledged, final '
'checkpoint timing received.', ack_key)
self._received_checkpoint_step.set()
| [
"[email protected]"
] | |
70cb3d09402bd71b84303f0fe648479b8846a4b2 | e93d1931789c99922a6b5ff3cf7e3bfe1c8bce3d | /blog/urls.py | feda7193e8da66168c2c798b1763fd13b33d3f73 | [] | no_license | nhutphong/djangoblog | 2653fcc34285788e7b34048acc7a078c88536c5c | e4bf2a0d43727c248b2a2006910a68063f99f186 | refs/heads/master | 2023-03-16T10:59:51.700275 | 2022-10-18T03:40:39 | 2022-10-18T03:40:39 | 237,549,725 | 1 | 0 | null | 2022-03-12T01:04:49 | 2020-02-01T02:23:09 | Python | UTF-8 | Python | false | false | 1,089 | py | from django.urls import path
from django.contrib.auth.decorators import login_required
from .views import (
ArticleListView,
ArticleCreateView,
ArticleDetailView,
ArticleUpdateView,
ArticleDeleteView,
PaginationListView,
SearchResultsView,
)
from . import views_filter
app_name = 'articles'
urlpatterns = [
#/blog/
path('demo/', views_filter.demo, name='demo-list'),
path('filter/', views_filter.filter_test, name='filter-list'),
path('pagination/', PaginationListView.as_view(), name='pagination-list'),
path('timkiem/', SearchResultsView.as_view(), name='search-results'),
path('', ArticleListView.as_view(), name='article-list'),
path('create/', ArticleCreateView.as_view(), name='article-create'),
path('<slug:slug>/', ArticleDetailView.as_view(), name='article-detail'),
path(
'<slug:slug>/update/',
ArticleUpdateView.as_view(),
name='article-update'
),
path(
'<slug:slug>/delete/',
ArticleDeleteView.as_view(),
name='article-delete'
)
] | [
"[email protected]"
] | |
b2675f662be96e49ab8d4e0c301e40732a490cef | 24d070c6410fdf7212c4c37c2fadc932cd4e8aec | /trac/wiki/test.py | b5941785692974b8d7b29d2432aee83ae302c289 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | clubturbo/Trac-1.4.2 | 4f111e8df9e8007a0e02080bec560361b25fc11c | 254ce54a3c2fb86b4f31810ddeabbd4ff8b54a78 | refs/heads/master | 2023-01-20T16:20:44.724154 | 2020-12-03T08:57:08 | 2020-12-03T08:57:08 | 317,922,011 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,528 | py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2004-2020 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at https://trac.edgewall.org/log/.
import difflib
import io
import os
import re
import unittest
# Python 2.7 `assertMultiLineEqual` calls `safe_repr(..., short=True)`
# which breaks our custom failure display in WikiTestCase.
try:
from unittest.util import safe_repr
except ImportError:
pass
else:
unittest.case.safe_repr = lambda obj, short=False: safe_repr(obj, False)
from trac.test import EnvironmentStub, MockRequest
from trac.util.datefmt import datetime_now, to_utimestamp, utc
from trac.util.text import strip_line_ws, to_unicode
from trac.web.chrome import web_context
from trac.wiki.formatter import (HtmlFormatter, InlineHtmlFormatter,
OutlineFormatter)
class WikiTestCase(unittest.TestCase):
generate_opts = {}
def __init__(self, title, input, expected, file, line,
setup=None, teardown=None, context=None, default_data=False,
enable_components=None, disable_components=None,
env_path='', destroying=False):
unittest.TestCase.__init__(self, 'test')
self.title = title
self.input = input
self.expected = expected
if file.endswith('.pyc'):
file = file.replace('.pyc', '.py')
self.file = file
self.line = line
self._setup = setup
self._teardown = teardown
self._context = context
self.context = None
self._env_kwargs = {'default_data': default_data,
'enable': enable_components,
'disable': disable_components,
'path': env_path, 'destroying': destroying}
def _create_env(self):
env = EnvironmentStub(**self._env_kwargs)
# -- intertrac support
env.config.set('intertrac', 'genshi.title', "Genshi's Trac")
env.config.set('intertrac', 'genshi.url', "https://genshi.edgewall.org")
env.config.set('intertrac', 't', 'trac')
env.config.set('intertrac', 'th.title', "Trac Hacks")
env.config.set('intertrac', 'th.url', "http://trac-hacks.org")
# -- safe schemes
env.config.set('wiki', 'safe_schemes',
'data,file,ftp,http,https,svn,svn+ssh,'
'rfc-2396.compatible,rfc-2396+under_score')
return env
def setUp(self):
self.env = self._create_env()
self.req = MockRequest(self.env, script_name='/')
context = self._context
if context:
if isinstance(self._context, tuple):
context = web_context(self.req, *self._context)
else:
context = web_context(self.req, 'wiki', 'WikiStart')
self.context = context
# Remove the following lines in order to discover
# all the places were we should use the req.href
# instead of env.href
self.env.href = self.req.href
self.env.abs_href = self.req.abs_href
self.env.db_transaction(
"INSERT INTO wiki VALUES(%s,%s,%s,%s,%s,%s,%s)",
('WikiStart', 1, to_utimestamp(datetime_now(utc)), 'joe',
'--', 'Entry page', 0))
if self._setup:
self._setup(self)
def tearDown(self):
self.env.reset_db()
if self._teardown:
self._teardown(self)
def test(self):
"""Testing WikiFormatter"""
formatter = self.formatter()
v = unicode(formatter.generate(**self.generate_opts))
v = v.replace('\r', '').replace(u'\u200b', '') # FIXME: keep ZWSP
v = strip_line_ws(v, leading=False)
try:
self.assertEqual(self.expected, v)
except AssertionError as e:
msg = to_unicode(e)
match = re.match(r"u?'(.*)' != u?'(.*)'", msg)
if match:
g1 = ["%s\n" % x for x in match.group(1).split(r'\n')]
g2 = ["%s\n" % x for x in match.group(2).split(r'\n')]
expected = ''.join(g1)
actual = ''.join(g2)
wiki = repr(self.input).replace(r'\n', '\n')
diff = ''.join(list(difflib.unified_diff(g1, g2, 'expected',
'actual')))
# Tip: sometimes, 'expected' and 'actual' differ only by
# whitespace, so it can be useful to visualize them, e.g.
# expected = expected.replace(' ', '.')
# actual = actual.replace(' ', '.')
def info(*args):
return '\n========== %s: ==========\n%s' % args
msg = info('expected', expected)
msg += info('actual', actual)
msg += info('wiki', ''.join(wiki))
msg += info('diff', diff)
raise AssertionError( # See below for details
'%s\n\n%s:%s: "%s" (%s flavor)' \
% (msg, self.file, self.line, self.title, formatter.flavor))
def formatter(self):
return HtmlFormatter(self.env, self.context, self.input)
def shortDescription(self):
return 'Test ' + self.title
class OneLinerTestCase(WikiTestCase):
def formatter(self):
return InlineHtmlFormatter(self.env, self.context, self.input)
class EscapeNewLinesTestCase(WikiTestCase):
generate_opts = {'escape_newlines': True}
def formatter(self):
return HtmlFormatter(self.env, self.context, self.input)
class OutlineTestCase(WikiTestCase):
def formatter(self):
class Outliner(object):
flavor = 'outliner'
def __init__(self, env, context, input):
self.outliner = OutlineFormatter(env, context)
self.input = input
def generate(self):
out = io.StringIO()
self.outliner.format(self.input, out)
return out.getvalue()
return Outliner(self.env, self.context, self.input)
def wikisyntax_test_suite(data=None, setup=None, file=None, teardown=None,
context=None, default_data=False,
enable_components=None, disable_components=None,
env_path=None, destroying=False):
suite = unittest.TestSuite()
def add_test_cases(data, filename):
tests = re.compile('^(%s.*)$' % ('=' * 30), re.MULTILINE).split(data)
next_line = 1
line = 0
for title, test in zip(tests[1::2], tests[2::2]):
title = title.lstrip('=').strip()
if line != next_line:
line = next_line
if not test or test == '\n':
continue
next_line += len(test.split('\n')) - 1
if 'SKIP' in title or 'WONTFIX' in title:
continue
blocks = test.split('-' * 30 + '\n')
if len(blocks) < 5:
blocks.extend([None] * (5 - len(blocks)))
input, page, oneliner, page_escape_nl, outline = blocks[:5]
for cls, expected in [
(WikiTestCase, page),
(OneLinerTestCase, oneliner and oneliner[:-1]),
(EscapeNewLinesTestCase, page_escape_nl),
(OutlineTestCase, outline)]:
if expected:
tc = cls(title, input, expected, filename, line,
setup, teardown, context, default_data,
enable_components, disable_components,
env_path, destroying)
suite.addTest(tc)
if data:
add_test_cases(data, file)
else:
if os.path.exists(file):
with open(file, 'r') as fobj:
data = fobj.read().decode('utf-8')
add_test_cases(data, file)
else:
print('no ' + file)
return suite
| [
"jonn@mindhunterx"
] | jonn@mindhunterx |
71574601ac2b63d3341288b90ea931c5e3941b71 | 13a32b92b1ba8ffb07e810dcc8ccdf1b8b1671ab | /home--tommy--mypy/mypy/lib/python2.7/site-packages/scikits/statsmodels/tools/decorators.py | b67ab7f9182886af449a828b1f8d2348ab11ea16 | [
"Unlicense"
] | permissive | tommybutler/mlearnpy2 | 8ec52bcd03208c9771d8d02ede8eaa91a95bda30 | 9e5d377d0242ac5eb1e82a357e6701095a8ca1ff | refs/heads/master | 2022-10-24T23:30:18.705329 | 2022-10-17T15:41:37 | 2022-10-17T15:41:37 | 118,529,175 | 0 | 2 | Unlicense | 2022-10-15T23:32:18 | 2018-01-22T23:27:10 | Python | UTF-8 | Python | false | false | 7,973 | py | from numpy.testing import *
import warnings
__all__ = ['resettable_cache','cache_readonly', 'cache_writable']
class CacheWriteWarning(UserWarning):
pass
class ResettableCache(dict):
"""
Dictionary whose elements mey depend one from another.
If entry `B` depends on entry `A`, changing the values of entry `A` will
reset the value of entry `B` to a default (None); deleteing entry `A` will
delete entry `B`. The connections between entries are stored in a
`_resetdict` private attribute.
Parameters
----------
reset : dictionary, optional
An optional dictionary, associated a sequence of entries to any key
of the object.
items : var, optional
An optional dictionary used to initialize the dictionary
Examples
--------
>>> reset = dict(a=('b',), b=('c',))
>>> cache = resettable_cache(a=0, b=1, c=2, reset=reset)
>>> assert_equal(cache, dict(a=0, b=1, c=2))
>>> print "Try resetting a"
>>> cache['a'] = 1
>>> assert_equal(cache, dict(a=1, b=None, c=None))
>>> cache['c'] = 2
>>> assert_equal(cache, dict(a=1, b=None, c=2))
>>> cache['b'] = 0
>>> assert_equal(cache, dict(a=1, b=0, c=None))
>>> print "Try deleting b"
>>> del(cache['a'])
>>> assert_equal(cache, {})
"""
def __init__(self, reset=None, **items):
self._resetdict = reset or {}
dict.__init__(self, **items)
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
for mustreset in self._resetdict.get(key, []):
self[mustreset] = None
def __delitem__(self, key):
dict.__delitem__(self, key)
for mustreset in self._resetdict.get(key, []):
del(self[mustreset])
resettable_cache = ResettableCache
class CachedAttribute(object):
def __init__(self, func, cachename=None, resetlist=None):
self.fget = func
self.name = func.__name__
self.cachename = cachename or '_cache'
self.resetlist = resetlist or ()
def __get__(self, obj, type=None):
if obj is None:
return self.fget
# Get the cache or set a default one if needed
_cachename = self.cachename
_cache = getattr(obj, _cachename, None)
if _cache is None:
setattr(obj, _cachename, resettable_cache())
_cache = getattr(obj, _cachename)
# Get the name of the attribute to set and cache
name = self.name
_cachedval = _cache.get(name, None)
# print "[_cachedval=%s]" % _cachedval
if _cachedval is None:
# Call the "fget" function
_cachedval = self.fget(obj)
# Set the attribute in obj
# print "Setting %s in cache to %s" % (name, _cachedval)
try:
_cache[name] = _cachedval
except KeyError:
setattr(_cache, name, _cachedval)
# Update the reset list if needed (and possible)
resetlist = self.resetlist
if resetlist is not ():
try:
_cache._resetdict[name] = self.resetlist
except AttributeError:
pass
# else:
# print "Reading %s from cache (%s)" % (name, _cachedval)
return _cachedval
def __set__(self, obj, value):
errmsg = "The attribute '%s' cannot be overwritten" % self.name
warnings.warn(errmsg, CacheWriteWarning)
class CachedWritableAttribute(CachedAttribute):
#
def __set__(self, obj, value):
_cache = getattr(obj, self.cachename)
name = self.name
try:
_cache[name] = value
except KeyError:
setattr(_cache, name, value)
class _cache_readonly(object):
"""
Decorator for CachedAttribute
"""
def __init__(self, cachename=None, resetlist=None):
self.func = None
self.cachename = cachename
self.resetlist = resetlist or None
def __call__(self, func):
return CachedAttribute(func,
cachename=self.cachename,
resetlist=self.resetlist)
cache_readonly = _cache_readonly()
class cache_writable(_cache_readonly):
"""
Decorator for CachedWritableAttribute
"""
def __call__(self, func):
return CachedWritableAttribute(func,
cachename=self.cachename,
resetlist=self.resetlist)
#this has been copied from nitime a long time ago
#TODO: ceck whether class has change in nitime
class OneTimeProperty(object):
"""A descriptor to make special properties that become normal attributes.
This is meant to be used mostly by the auto_attr decorator in this module.
Author: Fernando Perez, copied from nitime
"""
def __init__(self,func):
"""Create a OneTimeProperty instance.
Parameters
----------
func : method
The method that will be called the first time to compute a value.
Afterwards, the method's name will be a standard attribute holding
the value of this computation.
"""
self.getter = func
self.name = func.func_name
def __get__(self,obj,type=None):
"""This will be called on attribute access on the class or instance. """
if obj is None:
# Being called on the class, return the original function. This way,
# introspection works on the class.
#return func
#print 'class access'
return self.getter
val = self.getter(obj)
#print "** auto_attr - loading '%s'" % self.name # dbg
setattr(obj, self.name, val)
return val
if __name__ == "__main__":
### Tests resettable_cache ----------------------------------------------------
reset = dict(a=('b',), b=('c',))
cache = resettable_cache(a=0, b=1, c=2, reset=reset)
assert_equal(cache, dict(a=0, b=1, c=2))
#
print "Try resetting a"
cache['a'] = 1
assert_equal(cache, dict(a=1, b=None, c=None))
cache['c'] = 2
assert_equal(cache, dict(a=1, b=None, c=2))
cache['b'] = 0
assert_equal(cache, dict(a=1, b=0, c=None))
#
print "Try deleting b"
del(cache['a'])
assert_equal(cache, {})
### ---------------------------------------------------------------------------
class Example(object):
#
def __init__(self):
self._cache = resettable_cache()
self.a = 0
#
@cache_readonly
def b(self):
return 1
@cache_writable(resetlist='d')
def c(self):
return 2
@cache_writable(resetlist=('e', 'f'))
def d(self):
return self.c + 1
#
@cache_readonly
def e(self):
return 4
@cache_readonly
def f(self):
return self.e + 1
ex = Example()
print "(attrs : %s)" % str(ex.__dict__)
print "(cached : %s)" % str(ex._cache)
print "Try a :", ex.a
print "Try accessing/setting a readonly attribute"
assert_equal(ex.__dict__, dict(a=0, _cache={}))
print "Try b #1:", ex.b
b = ex.b
assert_equal(b, 1)
assert_equal(ex.__dict__, dict(a=0, _cache=dict(b=1,)))
# assert_equal(ex.__dict__, dict(a=0, b=1, _cache=dict(b=1)))
ex.b = -1
print "Try dict", ex.__dict__
assert_equal(ex._cache, dict(b=1,))
#
print "Try accessing/resetting a cachewritable attribute"
c = ex.c
assert_equal(c, 2)
assert_equal(ex._cache, dict(b=1, c=2))
d = ex.d
assert_equal(d, 3)
assert_equal(ex._cache, dict(b=1, c=2, d=3))
ex.c = 0
assert_equal(ex._cache, dict(b=1, c=0, d=None, e=None, f=None))
d = ex.d
assert_equal(ex._cache, dict(b=1, c=0, d=1, e=None, f=None))
ex.d = 5
assert_equal(ex._cache, dict(b=1, c=0, d=5, e=None, f=None))
| [
"[email protected]"
] | |
9da746164e40ff74bb887fd59775557656eb228e | 21e87dc5abaf8c8dfe7adfb72c38648f415d038c | /16_developer_tools/11_compileall/example/subfolder2/c.py | 4713d0f8c91464a958dcfae43283a515af70bba3 | [] | no_license | ariesduanmu/python3_standard_library | f2badbb6047b6003ddeccb77ba2892074510f0ff | 905ae53d0970be442bcf3d2a9dc3eadbc58367e5 | refs/heads/master | 2022-04-23T21:05:52.862076 | 2020-04-23T16:44:14 | 2020-04-23T16:44:14 | 241,277,069 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | # -*- coding: utf-8 -*-
# @Author: Li Qin
# @Date: 2020-02-24 09:30:22
# @Last Modified by: Li Qin
# @Last Modified time: 2020-02-24 09:30:56
def minus(a, b):
return a-b | [
"[email protected]"
] | |
d06b868fa88c5d499dd32895fd542a19fc18deb0 | eed7b5aa4861086d34e539e7bbfeff4286506692 | /src/Game/Effects/spend_power.py | 2286d1f6e407736c8ea6bf6088203090a386bc5c | [] | no_license | dfwarden/DeckBuilding | 0be2ccb68fc9a69c8eaa1d8acedeaa7cebef1a31 | 0b5a7573a3cf33430fe61e4ff8a8a7a0ae20b258 | refs/heads/master | 2021-01-18T09:52:51.880892 | 2015-02-03T03:21:17 | 2015-02-03T03:21:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 337 | py |
class SpendPower:
""" Represents an effect to spend power this turn """
def __init__(self, power):
""" Initialize the Effect with the power to spend """
self.power = power
def perform(self, context):
""" Perform the Game Effect """
context.owner.spendPower(self.power) | [
"[email protected]"
] | |
5309fa1188f170b8efbe4b43b64fe524a1b8e1e9 | db8ab70de135d8bddc2c6df865b98ed76c2b92ee | /model/toxic_comment_classifier.py | f0bf2a9509d6372736d134cd7b3551e2797e332d | [] | no_license | boyuan12/ToxicBlockPlus | 718af4970f27e9eba9c454268a75c53c007f7737 | f90a46b9748a8d4dcdfc9e8c19279cc6aeed46c5 | refs/heads/main | 2023-02-26T21:20:56.878995 | 2021-02-09T01:15:34 | 2021-02-09T01:15:34 | 335,865,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,278 | py | from typing import List
from bentoml import api, artifacts, env, BentoService
from bentoml.frameworks.keras import KerasModelArtifact
from bentoml.service.artifacts.common import PickleArtifact
from bentoml.adapters import DataframeInput, JsonOutput
from keras.preprocessing import text, sequence
import numpy as np
import pandas as pd
list_of_classes = ["toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"]
max_text_length = 400
@env(pip_packages=['tensorflow==1.14.0', 'keras==2.3.1', 'pandas', 'numpy'])
@artifacts([PickleArtifact('x_tokenizer'), KerasModelArtifact('model')])
class ToxicCommentClassification(BentoService):
def tokenize_df(self, df):
comments = df['comment_text'].values
tokenized = self.artifacts.x_tokenizer.texts_to_sequences(comments)
input_data = sequence.pad_sequences(tokenized, maxlen=max_text_length)
return input_data
@api(input=DataframeInput(), output=JsonOutput(), batch=True)
def predict(self, df: pd.DataFrame) -> List[str]:
input_data = self.tokenize_df(df)
prediction = self.artifacts.model.predict(input_data)
result = []
for i in prediction:
result.append(list_of_classes[np.argmax(i)])
return result | [
"[email protected]"
] | |
434bfb4f4cc27692073954c84c66e1218f428b56 | af47e1dd1405ebd5267e7f8cf22f7b4429fcef00 | /scattertext/termscoring/g2.py | 7ba2d01a83690dfca816ad03c8a93d0365684bad | [
"MIT",
"CC-BY-NC-SA-4.0",
"LicenseRef-scancode-proprietary-license",
"Apache-2.0"
] | permissive | JasonKessler/scattertext | 72ce3b35d71af595f7797de845ba93b4bb0091b4 | b41e3a875faf6dd886e49e524345202432db1b21 | refs/heads/master | 2023-05-11T06:42:51.108527 | 2023-05-06T19:23:59 | 2023-05-06T19:23:59 | 63,827,736 | 2,187 | 303 | Apache-2.0 | 2023-05-06T19:24:00 | 2016-07-21T01:47:12 | Python | UTF-8 | Python | false | false | 4,143 | py | import numpy as np
import pandas as pd
from scipy.stats import chi2
from statsmodels.stats.multitest import fdrcorrection
from scattertext.termscoring.CorpusBasedTermScorer import CorpusBasedTermScorer
def g2_term(O, E):
res = O.astype(np.float64) * (np.log(O) - np.log(E))
res[O == 0] = 0
return res
def sign(a: np.array) -> np.array:
return np.nan_to_num(a / np.abs(a), 0)
def qchisq(alpha: np.array, df: int) -> np.array:
return chi2.ppf(1 - alpha, df=df) # qchisq(alpha, df=1, lower.tail=FALSE)
class G2(CorpusBasedTermScorer):
"""
G^2 (log likelihood ratio)s from (Rayson and Garside 2000)
A direct translation of the R function from (Evert 2023)
Stephanie Evert. 2023. Measuring Keyness. https://osf.io/x8z9n.
G2.term <- function (O, E) {
res <- O * log(O / E)
res[O == 0] <- 0
res
}
G2 <- function (f1, f2, N1, N2, alpha=NULL, correct=TRUE) {
stopifnot(length(f1) == length(f2))
## observed and expected contingency tables
N <- N1 + N2
R1 <- f1 + f2
O11 <- f1; E11 <- R1 * N1 / N
O12 <- f2; E12 <- R1 * N2 / N
O21 <- N1 - f1; E21 <- N1 - E11
O22 <- N2 - f2; E22 <- N2 - E12
## log-likelihood statistic (simplest formula)
G2 <- 2 * (G2.term(O11, E11) + G2.term(O12, E12) + G2.term(O21, E21) + G2.term(O22, E22))
res <- sign(O11 - E11) * G2 # set sign to distinguish positive vs. negative keywords
## weed out non-significant items if alpha is specified
if (!is.null(alpha)) {
if (correct) alpha <- alpha / length(f1)
theta <- qchisq(alpha, df=1, lower.tail=FALSE)
res[G2 < theta] <- 0 # set to 0 if not significant at level alpha
}
res
}
"""
def _set_scorer_args(self, **kwargs):
self.alpha_ = kwargs.get('alpha', None)
self.correct_ = kwargs.get('correct', True)
def get_score_df(self, label_append=''):
N1, N2, f1, f2 = self._get_ns_and_fs(())
gsquare, res = self._get_g2_and_res(N1, N2, f1, f2)
df = pd.DataFrame({
'G2': gsquare,
'Score': res,
'P': chi2.sf(gsquare, df=1),
})
return df.assign(
CorrectedP = lambda df: fdrcorrection(pvals=df.P.values, alpha=0.05, method='indep')[1]
)
def get_scores(self, *args) -> pd.Series:
N1, N2, f1, f2 = self._get_ns_and_fs(args)
gsquare, res = self._get_g2_and_res(N1, N2, f1, f2)
## weed out non-significant items if alpha is specified
if self.alpha_ is not None:
alpha = self.alpha_
if self.correct_:
alpha = alpha / len(f1)
theta = qchisq(alpha, df=1)
res[gsquare < theta] = 0 # set to 0 if not significant at level alpha
return pd.Series(res, index=self._get_terms())
def _get_g2_and_res(self, N1, N2, f1, f2):
N = N1 + N2
R1 = f1 + f2
E11, E12, E21, E22, O11, O12, O21, O22 = self.__get_contingency_table(N, N1, N2, R1, f1, f2)
## log-likelihood statistic (simplest formula)
gsquare = 2 * (g2_term(O11, E11) + g2_term(O12, E12) + g2_term(O21, E21) + g2_term(O22, E22))
res = sign(O11 - E11) * gsquare # set sign to distinguish positive vs. negative keywords
return gsquare, res
def __get_contingency_table(self, N, N1, N2, R1, f1, f2):
O11 = f1
E11 = R1 * N1 / N
O12 = f2
E12 = R1 * N2 / N
O21 = N1 - f1
E21 = N1 - E11
O22 = N2 - f2
E22 = N2 - E12
return E11, E12, E21, E22, O11, O12, O21, O22
def _get_ns_and_fs(self, args):
cat_X, ncat_X = self._get_cat_and_ncat(self._get_X())
N1 = self._get_cat_size()
N2 = self._get_ncat_size()
if len(args) == 0:
f1 = cat_X.sum(axis=0).A1
f2 = ncat_X.sum(axis=0).A1
else:
f1, f2 = self.__get_f1_f2_from_args(args)
f1 = np.array(f1).astype(np.float64)
f2 = np.array(f2).astype(np.float64)
return N1, N2, f1, f2
def get_name(self):
return 'G2'
| [
"[email protected]"
] | |
2ef11f6cdbf8403c0d448a2d67022c40b83c6620 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-gsn-edf/gsn-edf_ut=3.5_rd=0.8_rw=0.06_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=60/sched.py | 48c7bde40359bfb09d24223c9a5ccb1161b938c2 | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | -X FMLP -Q 0 -L 3 104 400
-X FMLP -Q 0 -L 3 78 300
-X FMLP -Q 0 -L 3 69 300
-X FMLP -Q 0 -L 3 64 300
-X FMLP -Q 1 -L 2 62 250
-X FMLP -Q 1 -L 2 54 175
-X FMLP -Q 1 -L 2 49 300
-X FMLP -Q 2 -L 2 49 150
-X FMLP -Q 2 -L 2 43 150
-X FMLP -Q 2 -L 2 37 250
-X FMLP -Q 3 -L 1 35 250
-X FMLP -Q 3 -L 1 34 100
-X FMLP -Q 3 -L 1 30 150
22 100
21 200
8 175
| [
"[email protected]"
] | |
2b0b60fa7dc054457ed41a697b42028cc176aa8f | e10a6d844a286db26ef56469e31dc8488a8c6f0e | /ml_debiaser/randomized_threshold.py | 11e80f5069ecc7299769e3dd9478b15431da9356 | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | Jimmy-INL/google-research | 54ad5551f97977f01297abddbfc8a99a7900b791 | 5573d9c5822f4e866b6692769963ae819cb3f10d | refs/heads/master | 2023-04-07T19:43:54.483068 | 2023-03-24T16:27:28 | 2023-03-24T16:32:17 | 282,682,170 | 1 | 0 | Apache-2.0 | 2020-07-26T15:50:32 | 2020-07-26T15:50:31 | null | UTF-8 | Python | false | false | 10,142 | py | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for debiasing ML models."""
import math
from typing import Optional
import numpy as np
class RandomizedThreshold:
"""Threshold optimizer (RTO) to debias models via postprocessing.
See: https://arxiv.org/abs/2106.12887.
This is a solver to the following optimiation problem:
minimize gamma/2 ||x||^2 - y^Tx
s.t. x satisfying DP constraints with tolerance eps and parameter rho.
There are no assumptions about y in this code but, in general, y should be the
predictions of the original classifier.
"""
def __init__(self, gamma: float = 1.0,
eps: float = 0.0,
rho: Optional[float] = None
) -> None:
"""Instantiate object.
Args:
gamma: The regularization parameter gamma (for randomization). Set this to
1 if the goal is to minmize l2 difference from the original scores.
eps: Tolerance parameter for bias between 0 and 1 inclusive.
rho: The rho parameter in the post-hoc rule. If None, rho = E[y].
"""
if eps < 0:
raise ValueError('eps must be non-negative.')
if gamma <= 0:
raise ValueError('gamma must be a strictly positive number.')
if rho is not None and rho <= 0:
raise ValueError('rho must be either None or a strictly positive number.')
self.num_groups = 1
self.gamma = gamma
self.eps = eps
self.rho = rho
self.avrg_y_score = 0
# model paramters (Lagrange dual variables)
self.lambdas = []
self.mus = []
def fit(self,
y_orig: np.ndarray,
group_feature: np.ndarray,
sgd_steps: int = 10_000,
full_gradient_epochs: int = 1_000,
verbose: bool = True,
batch_size: int = 256,
ignore_warnings: bool = False
) -> None:
"""Debias predictions w.r.t. the sensitive class in each demographic group.
IMPORTANT: If this is used for postprocessing a classifier,
the scores y_orig need to be rescaled linearly to [-1, +1].
Training proceeds in two rounds. First is SGD. Second is full gradient
descent. Full gradient descent is recommended when debiasing deep neural
nets because the scores are concentrated around the extremes
so high preciseion might be needed. Because the loss is smooth, lr
in full gradient method does not need tuning. It can be set to gamma / 2.0.
Args:
y_orig: A vector of the original probability scores. If this is used for
debiasing binary classifiers, y_orig = 2 * p(y=1) - 1.
group_feature: An array containing the group id of each instance starting
from group 0 to group K-1.
sgd_steps: Number of minibatch steps in SGD.
full_gradient_epochs: Number of epochs in full gradient descent phase.
verbose: Set to True to display progress.
batch_size: Size of minibatches in SGD.
ignore_warnings: Set to True to suppress warnings.
"""
if min(y_orig) >= 0: # use this to catch a common bug
self.yscale = 'positive'
else:
self.yscale = 'negative'
y_orig = np.array(y_orig)
num_groups = len(set(group_feature)) # number of demographic groups
# warnings against common bugs/errors
if (min(y_orig) < -1 or max(y_orig) > 1) and not ignore_warnings:
print('Warning: the scores y_orig are not in the range [-1, +1]. '
'To suppress this message, set ignore_warnings=True.')
if self.yscale == 'positive' and not ignore_warnings:
print('Warning: if this is for postprocessing a binary classifier, '
'the scores need to be rescaled to [-1, +1]. To suppress this '
'message, set ignore_warnings=True.')
# assert that group_feature is of the right form and no group is empty
if min(group_feature) != 0 or (max(group_feature) != num_groups - 1):
raise ValueError('group_feature should be in {0, 1, .. K-1} where '
'K is the nubmer of groups. Some groups are missing.')
self.num_groups = num_groups
eps0 = self.eps / 2.0
gamma = self.gamma
# Store group membership ids in a dictionary.
xk_groups = {k: [] for k in range(num_groups)}
for i in range(len(group_feature)):
xk_groups[group_feature[i]].append(i)
self.avrg_y_score = float(sum(y_orig))/len(y_orig)
if self.rho is None: # by default: self.rho = E[y] in [0, 1] not [-1, 1]
if self.yscale == 'positive':
self.rho = self.avrg_y_score
else:
self.rho = self.avrg_y_score / 2.0 + 0.5
# The parameters we optimize in the algorithm are lambdas and mus.
# lambdas_final and mus_final are running averages (final output).
lambdas = np.zeros((num_groups,))
mus = np.zeros((num_groups,))
lambdas_final = np.zeros_like(lambdas) # running averages
mus_final = np.zeros_like(mus) # running averages
# SGD is carried out in each group separately due to decomposition of the
# optimization problem.
num_samples_sgd = sgd_steps * batch_size
lr = gamma * math.sqrt(1.0 / num_samples_sgd)
# Begin the projected SGD phase.
if verbose:
print('SGD phase started:')
for k in range(num_groups):
if verbose:
print('Group %d.\t\t%02d%%'%(k, int(100*k/num_groups)), end='\r')
idx = np.array(list(xk_groups[k])) # instance IDs in group k
group_size = len(idx)
for _ in range(sgd_steps):
# random.randint is 10x faster than random.choice.
batch_ids = np.random.randint(0, group_size, batch_size)
batch_ids = idx[batch_ids]
# The code below is a faster implementation of:
# xi_arg = y_orig[batch_ids] - (lambdas[k] - mus[k])
# xi_gradient = xi_arg/gamma
# xi_gradient = np.maximum(xi_gradient, 0.)
# xi_gradient = np.minimum(xi_gradient, 1.)
lambda_minus_mu = lambdas[k] - mus[k]
xi_arg = np.maximum(y_orig[batch_ids], lambda_minus_mu)
xi_arg = np.minimum(xi_arg, gamma + lambda_minus_mu)
mean_xi = (np.mean(xi_arg) - lambda_minus_mu) / gamma
lambda_gradient = eps0 + self.rho - mean_xi
mu_gradient = eps0 - self.rho + mean_xi
# stochastic gradient descent
if eps0 > 1e-3:
lambdas[k] = max(0, lambdas[k] - lr * batch_size * lambda_gradient)
mus[k] = max(0, mus[k] - lr * batch_size * mu_gradient)
else:
# If self.eps=0, we can drop mus and optimize lambdas only but
# lambdas will not be constrained to be non-negative in this case.
lambdas[k] = lambdas[k] - lr * batch_size * lambda_gradient
# lambdas_final and mus_final are running averages.
lambdas_final[k] += lambdas[k] / sgd_steps
mus_final[k] += mus[k] / sgd_steps
# Now switch to full gradient descent.
# Because the objective is smooth, lr = gamma / 2 works.
if verbose and full_gradient_epochs:
print('\nFull gradient descent phase started:')
for k in range(num_groups):
if verbose:
print('Group {}.'.format(k))
idx = np.array(list(xk_groups[k]))
for _ in range(full_gradient_epochs):
lambda_minus_mu = lambdas_final[k] - mus_final[k]
xi_arg = np.maximum(y_orig[idx], lambda_minus_mu)
xi_arg = np.minimum(xi_arg, gamma + lambda_minus_mu)
mean_xi = (np.mean(xi_arg) - lambda_minus_mu) / gamma
full_grad_lambda = eps0 + self.rho - mean_xi
full_grad_mu = eps0 - self.rho + mean_xi
if eps0 > 1e-3:
lambdas_final[k] = max(0,
lambdas_final[k] - 0.5*gamma*full_grad_lambda)
mus_final[k] = max(0, mus_final[k] - 0.5*gamma*full_grad_mu)
else:
lambdas_final[k] = lambdas_final[k] - 0.5*gamma*full_grad_lambda
self.lambdas = lambdas_final
self.mus = mus_final
def predict(self,
y_orig: np.ndarray,
group_feature: np.ndarray,
ignore_warnings: bool = False
) -> np.ndarray:
"""Debiases the predictions.
Given the original scores y, post-process them such that the predictions
satisfy the desired fairness criteria.
Args:
y_orig: Original classifier scores. If this is for postprocessing binary
classifiers, y_orig = 2 * p(y = 1) - 1.
group_feature: An array containing the group id of each instance starting
from group 0 to group K-1.
ignore_warnings: Set to True to suppress warnings.
Returns:
y_new_prob: y_new_prob[i] is the probability of predicting the positive
class for instance i.
"""
if (((min(y_orig) >= 0 and self.yscale == 'negative') or
(min(y_orig) < 0 and self.yscale == 'positive')) and
not ignore_warnings):
print('Warning: the scores seem to have a different scale from the '
'training data. '
'If the data is scaled in [0, 1], e.g. for preprocessing, or '
'in [-1, +1], e.g. for postprocessing, make sure that test labels '
'are scaled similarly.')
num_examples = len(y_orig) # number of training examples
gamma = self.gamma
lambdas = self.lambdas
mus = self.mus
y_new_prob = np.zeros((num_examples,))
for i in range(num_examples):
k = group_feature[i]
if y_orig[i] < (lambdas[k] - mus[k]):
y_new_prob[i] = 0
elif y_orig[i] < (lambdas[k] - mus[k]) + gamma:
y_new_prob[i] = (1.0 / gamma) * (y_orig[i] - (lambdas[k] - mus[k]))
else:
y_new_prob[i] = 1.0
return y_new_prob
| [
"[email protected]"
] | |
0397c9f0d2e40acf497622b8b4cb2e5299202bba | 471ea669e21abdb4e4915610b4b5eb43ea3cffe9 | /剑指Offer/31.整数中1出现的次数.py | d663b71492aabfba5cd8ae82b899c772a9d0eb39 | [] | no_license | JiahuaLink/nowcoder-leetcode | 26aed099e215cfc1d8e8afffc62fafa26b26b06f | 0155fc33511cbe892f58550d561d3aa3efcd56b9 | refs/heads/master | 2023-07-09T03:05:31.227720 | 2021-08-03T06:50:36 | 2021-08-03T06:50:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 883 | py | # 方法一:判断整数每个数字
class Solution:
def NumberOf1Between1AndN_Solution(self, n):
count = 0
for i in range(1, n+1):
temp = i
while(temp):
if temp%10 == 1:
count += 1
temp /= 10
return count
# 方法二:将整数转为字符串逐位判断
class Solution:
def NumberOf1Between1AndN_Solution(self, n):
count = 0
for i in range(1, n+1):
s = str(i)
for j in s:
if j == '1':
count += 1
return count
# 方法三:将整数转为字符串,组合含有‘1’的字符串,再统计‘1’的个数
def NumberOf1Between1AndN_Solution(self, n):
a = map(str, range(n+1))
ones = [i for i in a if '1' in i]
return ''.join(ones).count('1') | [
"[email protected]"
] | |
3220c6fedfbdef66c2d1bc9c9c4a39bc047ce8ae | 40c2bce56832d97797c115f60d1e0459fd4ebf93 | /Eclipse_Project_2/Section_1_5/database.py | 3c56f2e590f4f519ae6e0c1a2f4d52010d0af71a | [] | no_license | amanoj319319319/Eclipse_Python_LastSeleniumTest | 0be2e7f615160248f329b4df0e9d109612b29560 | 4d0978e4c2dfe9c3a9d4b429f7ff6340278c0252 | refs/heads/master | 2023-04-27T09:14:38.726807 | 2021-05-19T08:18:40 | 2021-05-19T08:18:40 | 267,038,244 | 0 | 0 | null | 2021-05-19T08:17:45 | 2020-05-26T12:35:36 | Python | UTF-8 | Python | false | false | 6,224 | py | #connecting to the database using database credentials and finding version of the database
'''
import cx_Oracle
con=cx_Oracle.connect('system/Manoj319319319')
if con!=None:
print ("successfully connected")
print ("Version is:-",con.version)
else:
print ("connection failed")
'''
#creating a table name in the database
'''
import cx_Oracle
try:
con=cx_Oracle.connect('system/Manoj319319319')
query="create table employees(eno number,ename varchar2(10),esal number(10,2))"
cursor=con.cursor()
cursor.execute(query)
print ("Table created succesfully")
except Exception as e:
print (e)
finally:
if cursor:
cursor.close()
if con:
con.close()
'''
#deleted a particular table name in the database
'''
import cx_Oracle
try:
con=cx_Oracle.connect('system/Manoj319319319')
query="drop table employees"
cursor=con.cursor()
cursor.execute(query)
print ("Table dropped succesfully")
except Exception as e:
print (e)
finally:
if cursor:
cursor.close()
if con:
con.close()
'''
#creating a table in the database
'''
import cx_Oracle
try:
con=cx_Oracle.connect('system/Manoj319319319')
query="create table employees(eno number,ename varchar2(10),esal number(10,2))"
cursor=con.cursor()
cursor.execute(query)
print ("Table created succesfully")
except Exception as e:
print (e)
finally:
if cursor:
cursor.close()
if con:
con.close()
'''
#Inserting multiple values to the required paramters in the employees table
'''
import cx_Oracle
try:
con=cx_Oracle.connect('system/Manoj320320320')
cursor=con.cursor()
query = "insert into employees values(:eno,:ename,:esal)"
records=[(101,"manoj",10000),(102,"anki",20000),(103,"jyothi",30000)]
cursor.executemany(query,records)
con.commit()
print ("Record Inserted succesfully")
except Exception as e:
print (e)
finally:
if cursor:
cursor.close()
if con:
con.close()
'''
#Reading input from the console
'''
import cx_Oracle
try:
con=cx_Oracle.connect('system/Manoj320320320')
cursor=con.cursor()
while True:
eno=int(input("Enter employee number:-"))
ename =(input("Enter employee name:-"))
esal = float(input("Enter employee salary:-"))
query = "insert into employees values(%d,'%s',%f)"
cursor.execute(query %(eno,ename,esal))
con.commit()
print ("Records Inserted succesfully")
option=input("Do you want to insert one more record[yes/no]")
if option == "no":
break
except Exception as e:
print (e)
finally:
if cursor:
cursor.close()
if con:
con.close()
'''
#Updating records in the database using SQL query
#The employees whose salary was less than 5000,i i had to increment Rs 1000 to their existing salary
'''
import cx_Oracle
try:
con=cx_Oracle.connect('system/Manoj320320320')
cursor=con.cursor()
increment=float(input("Enter increment amount:-"))
salaryrange=float(input("Enter salary range:-"))
query="update employees set esal=esal+%f where esal<%f"
cursor.execute(query %(increment, salaryrange))
con.commit()
print ("Records are updated successfully")
except Exception as e:
print (e)
finally:
if cursor:
cursor.close()
if con:
con.close()
'''
#Deleting records from the employees table based on their salary ranges
#in the temployees table whose salary was greater than 5000 they were deleted from the table by me
'''
import cx_Oracle
try:
con=cx_Oracle.connect('system/Manoj320320320')
cursor=con.cursor()
cutoff=float(input("Enter cutoff amount:-"))
query="delete from employees where esal>%f"
cursor.execute(query %(cutoff))
con.commit()
print ("Records are deleted successfully")
except Exception as e:
print (e)
finally:
if cursor:#if cursor means if cursor is not equal to None
cursor.close()
if con:
con.close()
'''
'''
DDL coommands are ;;; table created , table dropped
DML Commnds are ;;;; insert operation , update operation , delete operation (for doing this ,
commit() method is must)
'''
#desc employees
#select * from employees;
#how to use fetchone() method to retrive data from the table
'''
import cx_Oracle
try:
con=cx_Oracle.connect('system/Manoj320320320')
cursor=con.cursor()
query="select * from employees"
cursor.execute(query)
row=cursor.fetchone()
while row is not None:
print(row)
row = cursor.fetchone()
except Exception as e:
if con:
con.rollback()
print ("There is a problem:-",e)
finally:
if cursor:#if cursor means if cursor is not equal to None
cursor.close()
if con:
con.close()
'''
#how to use fetchall() method to retrive data from the table
'''
import cx_Oracle
try:
con=cx_Oracle.connect('system/Manoj320320320')
cursor=con.cursor()
query="select * from employees"
cursor.execute(query)
rows=cursor.fetchall()
print (rows)
for row in rows:
print ("Employee number is:-",row[0])
print("Employee name is:-", row[1])
print("Employee salary is:-", row[2])
print ("***************")
except Exception as e:
if con:
con.rollback()
print ("There is a problem:-",e)
finally:
if cursor:#if cursor means if cursor is not equal to None
cursor.close()
if con:
con.close()
'''
#how to use fetchmany() method to retrive data from the table
'''
import cx_Oracle
try:
con=cx_Oracle.connect('system/Manoj320320320')
cursor=con.cursor()
query="select * from employees"
cursor.execute(query)
n=int(input("How many rows do you want:-"))
data = cursor.fetchmany(n)
for row in data:
print ("Employee number is:-",row[0])
print("Employee name is:-", row[1])
print("Employee salary is:-", row[2])
print ("***************")
except Exception as e:
if con:
con.rollback()
print ("There is a problem:-",e)
finally:
if cursor:#if cursor means if cursor is not equal to None
cursor.close()
if con:
con.close()
''' | [
"[email protected]"
] | |
57c166495a5ba9c4d4d739bff152b1a67f6e3fea | 5ceea4106e0df754ae581c1f5e2d16082d7b6386 | /hackerRank/Algorithms/Implementation/bon-appetit.py | 0da733b5b6475b0511073b0a9b33e4e31f2c3664 | [] | no_license | vikramlance/Python-Programming | b0d4bd70145bfaa7a66434656c5970fbc57e8bd3 | 4094961e3c613e33f2d8a6d30281c60ed09d8c80 | refs/heads/master | 2022-06-17T00:58:50.646615 | 2022-06-03T03:39:35 | 2022-06-03T03:39:35 | 53,989,511 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | '''
https://www.hackerrank.com/challenges/bon-appetit
'''
n,k=raw_input().split()
n=int(n)
k=int(k)
a=map(int, raw_input().split())
b=int(raw_input())
if (2*b== (sum(a) - a[k])):
print "Bon Appetit"
else:
print ( b - ((sum(a) - a[k])//2))
| [
"[email protected]"
] | |
76f8185eb90a42766f86ea066b38f022fd6156e5 | 131688c1006670be2bab5ce062521ce9b79b64af | /week2/design_hashset.py | ff2fc0f9e3e5b52cdb2f8f1875abad001dd4aa75 | [
"MIT"
] | permissive | ravichalla/wallbreaker | 4e3dc98ff02fd8a7bace2466c071c65a37124426 | 0d587f12c60df5e4bca47f9183484a69d284d1f5 | refs/heads/master | 2020-06-08T05:44:35.510146 | 2020-01-29T02:25:19 | 2020-01-29T02:25:19 | 193,169,833 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 959 | py | class MyHashSet:
def __init__(self):
self.capacity = 5000
self.arr = [None] * self.capacity
def add(self, key):
hash_val = hash(key) % self.capacity
if self.arr[hash_val] == None:
self.arr[hash_val] = [key]
else:
if key not in self.arr[hash_val]:
self.arr[hash_val].append(key)
def remove(self, key) -> None:
hash_val = hash(key) % self.capacity
if self.arr[hash_val] == None:
return
for ind in range(len(self.arr[hash_val])):
if self.arr[hash_val][ind] == key:
del self.arr[hash_val][ind]
return
def contains(self, key):
hash_val = hash(key) % self.capacity
if self.arr[hash_val] == None:
return False
else:
for h_key in self.arr[hash_val]:
if h_key == key:
return True
return False
| [
"[email protected]"
] | |
3c8d65c57a7bdbf95d8cdf533395ad17aa5f6a99 | 03e115c1937ec7bd1e249f82db0225828eaaa186 | /2-GUI (tkinter)/3imagenes.py | 5d2a2f4a214587d17e084764b7496fb9400deb31 | [] | no_license | mivargas/Master-python | 236c04205637ddd44d1cc879de2b7c48418153f9 | 9d1c04a8d658aa0dd8620ed792fa2133adfa57e7 | refs/heads/master | 2023-03-06T13:35:58.177058 | 2021-02-16T00:06:00 | 2021-02-16T00:06:00 | 321,731,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | from tkinter import *
from PIL import Image, ImageTk
ventana = Tk()
ventana.geometry("700x500")
Label(ventana, text="HOLA SOY MIGUEL").pack(anchor=W)
imagen = Image.open("./imagenes/labsxd.png")
render = ImageTk.PhotoImage(imagen)
Label(ventana, image=render).pack(anchor=E)
ventana.mainloop() | [
"[email protected]"
] | |
5c02e94311a37dbaf15d56d180884328cdaf081d | 761a20a79420bc6da491c5a487f6cf218f598b66 | /DemoTest/graphicsTkinter003.py | d8eb0e19c8eb84a4b963313d1955a8126b63903c | [] | no_license | namexiaohuihui/linuxlogin | 50b21e247d2e06c479907aa2f94f4b5979b4025d | ad8ffce5e87624f40f89eedc0229ba70cd66699b | refs/heads/master | 2020-03-24T16:56:06.313441 | 2018-08-03T14:06:48 | 2018-08-03T14:06:48 | 142,843,487 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | # -*- coding: utf-8 -*-
"""
@__author__ :DingDong
@file: graphicsTkinter001.py
@time: 2018/5/17 21:37
@Entry Name:operating
"""
from tkinter import *
from tkinter.messagebox import showinfo
def reply(name):
showinfo(title='弹窗',message='你的名字: %s !' % name)
top = Tk()
top.title('账号登陆')
# top.iconbitma('1178420.gif')
Label(top,text='请输入你的名字 :').pack(side=TOP)
ent = Entry(top)
ent.pack(side=TOP)
btn = Button(top,text='登陆',command=lambda :reply(ent.get()))
btn.pack(side=TOP)
top.mainloop()
| [
"[email protected]"
] | |
aef37705f286e46903ffcd71491000c635addd56 | 2dc17d12ff6ea9794177c81aa4f385e4e09a4aa5 | /archive/531LonelyPixelI.py | 85d2ad88ff367da5b050933632ef6d2bb1308b12 | [] | no_license | doraemon1293/Leetcode | 924b19f840085a80a9e8c0092d340b69aba7a764 | 48ba21799f63225c104f649c3871444a29ab978a | refs/heads/master | 2022-10-01T16:20:07.588092 | 2022-09-08T02:44:56 | 2022-09-08T02:44:56 | 122,086,222 | 0 | 0 | null | null | null | null | WINDOWS-1252 | Python | false | false | 913 | py | # coding=utf-8
'''
Created on 2017�4�11�
@author: Administrator
'''
class Solution(object):
def findLonelyPixel(self, picture):
"""
:type picture: List[List[str]]
:rtype: int
"""
if picture:
rows = [0] * len(picture)
cols = [0] * len(picture[0])
for row in range(len(picture)):
for col in range(len(picture[row])):
if picture[row][col] == "B":
rows[row] += 1
cols[col] += 1
ans = 0
for row in range(len(picture)):
for col in range(len(picture[row])):
if rows[row] == 1 and cols[col] == 1 and picture[row][col] == "B":
ans += 1
return ans
else:
return 0
picture = ["BBB"]
print Solution().findLonelyPixel(picture)
| [
"[email protected]"
] | |
b09d2c84b36ef30c97f1bc81ce017ce57b4ec3d9 | 7f863ca7ed47981f69e42fc1add75ba4acad921a | /code-lab/DSA - Long Hike(Fractional Knapsack).py | f4d12e06067fcbb5aa404a68e6bcd836d7d613b3 | [
"CC0-1.0"
] | permissive | Nahid-Hassan/fullstack-software-development | e9f920be9a999c78f156e6102683b93a50c4e597 | 892ffb33e46795061ea63378279a6469de317b1a | refs/heads/main | 2023-08-19T14:16:57.801056 | 2021-09-29T15:34:35 | 2021-09-29T15:34:35 | 376,595,866 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,049 | py | weights = 1
weights_values = [(14, 2), (20, 4), (18, 3)]
def knapsack_slow(weights_values, weights):
values_by_weights = [(x / y, y) for x, y in weights_values]
values_by_weights.sort(reverse=True)
# print(values_by_weights)
print(values_by_weights)
bags = []
for i in range(len(values_by_weights)):
if sum(bags) == weights:
break
if values_by_weights[i][1] <= weights - sum(bags):
bags.append(values_by_weights[i][1])
# weights -= values_by_weights[i][1]
else:
# temp = values_by_weights[i][1]
bags.append(weights)
print(weights + '----------')
return bags
def knapsack_fast(weights_values, weights):
bags = []
volume = 0
temp_weights = weights
values_by_weights = [(x/y, y) for x, y in weights_values]
values_by_weights.sort()
for i in range(len(weights_values)):
if weights == 0:
return (bags, volume)
if values_by_weights[i][1]:
pass
| [
"[email protected]"
] | |
f1033a3a96ab29a179996c21324e2e9a90a9b91e | 8ecd899a8558ad0a644ecefa28faf93e0710f6fb | /ABC163/ABC163_A.py | bb4bb6c963e85231718a3704fce4761be0b06a79 | [] | no_license | yut-inoue/AtCoder_ABC | b93885547049788d452e86b442a4a9f5ee191b0e | 3d2c4b2b2f8871c75f86040ad07ccd7736ad3dbe | refs/heads/master | 2021-07-03T09:09:20.478613 | 2021-02-21T13:20:31 | 2021-02-21T13:20:31 | 227,140,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 179 | py | r=int(input())
#a,b=map(int,input().split())
#l=list(map(int,input().split()))
#l=[list(map(int,input().split())) for i in range(n)]
ans=2*r*3.14159
print('{:.5f}'.format(ans))
| [
"[email protected]"
] | |
bdd60ed37f1b70cbf11c34d6e1db1b4d9f5d6caa | ef0917d016a1a2b60b2ccbb18325eadab37b61a8 | /Mplayer_Reproductor.py | 8e3074068ccd5aa85d1863d99f949ea05e5a9ca3 | [] | no_license | sugar-activities/4355-activity | a8dcab94e01c5e45b78196a69df4391e48263e04 | acefbd1ea5eabaf2783326bb1c9e2bd8f4255007 | refs/heads/master | 2021-01-19T23:14:25.746896 | 2017-04-21T05:54:35 | 2017-04-21T05:54:35 | 88,936,742 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,529 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Mplayer_Reproductor.py por:
# Flavio Danesse <[email protected]>
# CeibalJAM! - Uruguay
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import gobject, time, os, subprocess, platform
from gettext import gettext as _
UPDATE_TIME = 30
STDOUT = "/tmp/jamediaout%d" % time.time()
STDERR = "/dev/null"
MPLAYER = "mplayer"
if "olpc" in platform.platform(): MPLAYER = "./mplayer"
class Mplayer_Reproductor(gobject.GObject):
__gsignals__ = {"cambio_estado":(gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE, (gobject.TYPE_STRING,)),
"update_progress":(gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE, (gobject.TYPE_FLOAT,)),
"video":(gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE, (gobject.TYPE_BOOLEAN,)),
"mplayer_info":(gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE, (gobject.TYPE_STRING,))}
''' Posibles estados: "playing Audio_Video", "paused Audio_Video", "stoped Audio_Video", "playing Radio", "playing TV", None '''
def __init__(self, id_pantalla):
self.__gobject_init__()
self.ejecutable = MPLAYER
self.id_pantalla = id_pantalla
self.tipo_de_datos = None
self.mplayer = None
self.salida = None
self.entrada = None
self.Actualizador = False
self.pista_actual = None
self.estado= None
self.progress= 0.0
#self.name_origen= None
estado= property(self.get_estado, self.set_estado)
progress= property(self.get_progress, self.set_progress)
'''
estructura= "%s -wid %i -slave -idle -nolirc -quiet −volume 100" % (self.ejecutable, self.id_pantalla)
self.mplayer= subprocess.Popen(estructura, shell=True, stdin=subprocess.PIPE,
stdout=open(STDOUT,"w+b"), stderr=open(STDERR,"r+b"), universal_newlines=True)
self.entrada= self.mplayer.stdin
self.salida= open(STDOUT,"r")'''
self.set_estado(None)
# ----------- Propiedades -----------
def get_estado(self):
return self.estado
def set_estado(self, valor= None):
self.estado = valor
self.emit("cambio_estado", self.get_estado())
def get_progress(self):
return self.progress
def set_progress(self, valor):
self.progress = valor
self.emit("update_progress", self.get_progress())
# ----------- Propiedades -----------
# ------------------------ ACTUALIZACIONES De REPRODUCCION ------------------------
def update_progress(self):
if not self.entrada.closed:
self.entrada.write("%s 0\n" % ("get_percent_pos"))
self.entrada.flush()
linea = self.salida.readline()
if linea:
if "ANS_PERCENT_POSITION" in linea:
self.get_progress_in_mplayer(linea)
elif "Video: no video" in linea or "Audio only file format detected" in linea:
self.emit("video", False)
elif "Cache" in linea:
self.get_progress_cache_in_mplayer(linea)
elif "Movie-Aspect" in linea:
self.emit("video", True)
elif "Starting playback" in linea:
#self.emit("cambio_estado", self.get_estado())
pass
elif "Position:" in linea or "VO:" in linea or "AO:" in linea:
# AO: [pulse] 22050Hz 2ch s16le (2 bytes per sample)
# VO: [xv] 540x360 => 540x360 Planar YV12
#self.emit("mplayer_info", linea)
pass
elif "Resolving" in linea or "Connecting" in linea:
#self.emit("mplayer_info", linea)
pass
elif "Name" in linea:
#self.name_origen= linea.split(": ")[-1]
pass
elif "Playing" in linea:
#self.name_origen= linea.split("Playing ")[-1]
pass
elif "Opening" in linea or "AUDIO" in linea or "Selected" in linea \
or "Genre" in linea or "Website" in linea or "Bitrate" in linea:
'''
Opening video decoder: [ffmpeg] FFmpeg's libavcodec codec family
Selected video codec: [ffh264] vfm: ffmpeg (FFmpeg H.264)
Opening audio decoder: [faad] AAC (MPEG2/4 Advanced Audio Coding)
AUDIO: 44100 Hz, 2 ch, s16le, 119.9 kbit/8.50% (ratio: 14989->176400)
Selected audio codec: [faad] afm: faad (FAAD AAC (MPEG-2/MPEG-4 Audio))'''
#self.emit("mplayer_info", linea)
pass
else:
'''
mplayer: Symbol `ff_codec_bmp_tags' has different size in shared object, consider re-linking
eos/Beautiful Liar - Beyonce ft Shakira.
stream 0: video (h264), -vid 0
[lavf] stream 1: audio (aac), -aid 0
VIDEOopen: No such file or directory
[MGA] Couldn't open: /dev/mga_vid
open: No such file or directory
[MGA] Couldn't open: /dev/mga_vid
[VO_TDFXFB] Can't open /dev/fb0: Permission denied.
[VO_3DFX] Unable to open /dev/3dfx.
Failed to open VDPAU backend libvdpau_nvidia.so: cannot open shared object file: No such file or directo==========================================================================
==========================================================================
==========================================================================
==========================================================================
AO: [pulse] 44100Hz 2ch s16le (2 bytes per sample)
Starting playback...
VO: [xv] 320x240 => 320x240 Planar YV12'''
pass
return True
def get_progress_in_mplayer(self, linea):
try:
if "Cache size" in linea:
return
try:
progress = float(linea[linea.index('=')+1:-1])
if self.get_progress()!= progress:
self.set_progress(progress)
if self.get_progress() >= 100.0:
self.set_estado("stoped Audio_Video")
except Exception, e:
print "Error en Progreso de Reproducción: %s" % (e)
#print linea
except Exception, e:
print "Error en Progreso de Reproducción: %s" % (e)
#print linea
def get_progress_cache_in_mplayer(self, linea):
if "Cache not responding" in linea: return
try:
if "Cache size" in linea:
return
try:
progress = float((linea.split(": ")[-1]).split("%")[0])/20*100
except:
return
if self.get_progress()!= progress:
self.set_progress(progress)
#self.emit("mplayer_info", "Cargando Caché")
except Exception, e:
print "Error en Carga de Caché: %s" % (e)
#print linea
# ------------------------ ACTUALIZACIONES De REPRODUCCION ------------------------
# ------------------------ REPRODUCCION -------------------------------------------
def seek(self, valor):
if self.Actualizador:
gobject.source_remove(self.Actualizador)
self.Actualizador = False
self.entrada.write('seek %s 1 0\n' % (float(valor)))
self.entrada.flush()
self.set_estado("playing Audio_Video")
self.Actualizador = gobject.timeout_add(UPDATE_TIME, self.update_progress)
def play(self, direccion, tipo_de_datos):
self.tipo_de_datos = tipo_de_datos
if tipo_de_datos == "Radio":
self.play_radio(direccion)
elif tipo_de_datos == "TV":
self.play_tv(direccion)
elif tipo_de_datos == "Audio_Video":
self.play_Audio_Video(direccion)
def play_Audio_Video(self, direccion):
self.pista_actual = "%s%s%s" % ("\"", direccion, "\"")
self.play_archivo(self.pista_actual)
def play_archivo(self, direccion):
ejecutable_cache_pantalla = "%s -cache %i -wid %i" % (self.ejecutable, 1024, self.id_pantalla)
estructura= "%s -slave -idle -nolirc -rtc -nomouseinput -noconsolecontrols -nojoystick" % (ejecutable_cache_pantalla)
self.mplayer = subprocess.Popen(estructura, shell=True, stdin=subprocess.PIPE,
stdout=open(STDOUT,"w+b"), stderr=open(STDOUT,"r+b"), universal_newlines=True)
self.entrada = self.mplayer.stdin
self.salida = open(STDOUT,"r")
self.entrada.write("loadfile %s 0\n" % direccion)
self.entrada.flush()
if self.Actualizador:
gobject.source_remove(self.Actualizador)
self.Actualizador = False
self.Actualizador = gobject.timeout_add(UPDATE_TIME, self.update_progress)
self.pista_actual = direccion
self.set_estado("playing Audio_Video")
self.emit("mplayer_info", self.pista_actual)
def play_radio(self, direccion):
ejecutable_cache_pantalla = "%s -cache %i" % (self.ejecutable, 32)
estructura= "%s -slave -idle -nolirc -quiet -rtc -nomouseinput -noconsolecontrols -nojoystick" % (ejecutable_cache_pantalla)
self.mplayer= subprocess.Popen(estructura, shell=True, stdin=subprocess.PIPE,
stdout=open(STDOUT,"w+b"), stderr=open(STDOUT,"r+b"), universal_newlines=True)
self.entrada= self.mplayer.stdin
self.salida= open(STDOUT,"r")
self.entrada.write("loadfile %s 0\n" % direccion)
self.entrada.flush()
if self.Actualizador:
gobject.source_remove(self.Actualizador)
self.Actualizador = False
#self.Actualizador= gobject.timeout_add(UPDATE_TIME, self.update_progress)
self.pista_actual= direccion
self.set_estado("playing Radio")
self.emit("mplayer_info", self.pista_actual)
def play_tv(self, direccion):
ejecutable_cache_pantalla = "%s -cache %i -wid %i" % (self.ejecutable, 1024, self.id_pantalla)
estructura= "%s -slave -idle -nolirc -quiet -rtc -nomouseinput -noconsolecontrols -nojoystick" % (ejecutable_cache_pantalla)
self.mplayer= subprocess.Popen(estructura, shell=True, stdin=subprocess.PIPE,
stdout=open(STDOUT,"w+b"), stderr=open(STDOUT,"r+b"), universal_newlines=True)
self.entrada= self.mplayer.stdin
self.salida= open(STDOUT,"r")
self.entrada.write("loadfile %s 0\n" % direccion)
self.entrada.flush()
if self.Actualizador:
gobject.source_remove(self.Actualizador)
self.Actualizador = False
#self.Actualizador= gobject.timeout_add(UPDATE_TIME, self.update_progress)
self.pista_actual= direccion
self.set_estado("playing TV")
self.emit("mplayer_info", self.pista_actual)
def pause_play(self):
try:
if self.entrada:
if self.get_estado() == "playing Audio_Video": # pausa
self.entrada.write('pause 0\n')
self.entrada.flush()
if self.Actualizador:
gobject.source_remove(self.Actualizador)
self.Actualizador = False
self.set_estado("paused Audio_Video")
self.emit("mplayer_info", _("Reproducción Pausada"))
elif self.get_estado() == "paused Audio_Video":
self.entrada.write('pause 0\n') # hace unpause
self.entrada.flush()
if self.Actualizador:
gobject.source_remove(self.Actualizador)
self.Actualizador = False
self.Actualizador = gobject.timeout_add(UPDATE_TIME, self.update_progress)
self.set_estado("playing Audio_Video")
self.emit("mplayer_info", "%s: %s" % (_("Reproduciendo"), self.pista_actual))
except Exception, e:
print "HA OCURRIDO UN ERROR EN PAUSE_PLAY DEL REPRODUCTOR", e
def quit(self, widget=None):
try:
if self.entrada:
self.entrada.write('%s 0\n' % "quit")
self.entrada.flush()
if self.Actualizador:
gobject.source_remove(self.Actualizador)
self.Actualizador = False
except Exception, e:
print "HA OCURRIDO UN ERROR EN QUIT DEL REPRODUCTOR", e
self.set_progress(0.0)
if os.path.exists(STDOUT): os.unlink(STDOUT)
self.pista_actual = None
self.set_estado(None)
self.emit("mplayer_info", _("Reproducción Detenida"))
# ------------------------ REPRODUCCION -------------------------------------------
| [
"[email protected]"
] | |
abc19a89f586d28d24cd2468c387a49113282b1c | 03520abb58a66aeed9a556d53e3a21006af02dde | /named_storms/migrations/0079_nsempsa_covered_data_snapshot.py | 73c0e3338456b81ae6bf6fe88566507a8f794e9c | [] | no_license | flackdl/cwwed | 23ce1d1a5e48a57ee2cb3229860f1b97ccc81636 | 0a1454897d397cd5e1652643616abe883ccc853b | refs/heads/master | 2023-07-21T20:05:35.093270 | 2023-06-29T15:29:26 | 2023-06-29T15:29:26 | 118,942,664 | 1 | 0 | null | 2023-09-12T21:56:59 | 2018-01-25T17:06:47 | Python | UTF-8 | Python | false | false | 530 | py | # Generated by Django 2.2.6 on 2019-10-17 17:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('named_storms', '0078_auto_20191017_1705'),
]
operations = [
migrations.AddField(
model_name='nsempsa',
name='covered_data_snapshot',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='named_storms.NamedStormCoveredDataSnapshot'),
),
]
| [
"[email protected]"
] | |
a5c3f6f93d1f5122a502c4bff3c84593cf568c96 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_201/2402.py | f7e5ebea73232aadb5a3d4e17099e09452301ea6 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,266 | py | def increment_by_value(d, l, index, value):
if index in d:
d[index] += value
else:
d[index] = value
l.append(index)
l.sort()
def occupy(s):
num_stalls, num_people = [int(x) for x in s.split(" ")]
free, free_index = dict(), []
free[num_stalls] = 1
free_index.append(num_stalls)
count = 0
while 1:
#print("free ", free)
#print("free index ", free_index)
length = free_index[-1]
num_served = free[length]
free[length] = 0
free_index.remove(length)
#print("serving ", num_served, " people")
if length % 2 == 0:
increment_by_value(free, free_index, length // 2, num_served)
increment_by_value(free, free_index, length // 2 - 1, num_served)
max, min = length // 2, length // 2 - 1
else:
increment_by_value(free, free_index, length // 2, num_served * 2)
#free[length // 2] += 1
max, min = length // 2, length // 2
count += num_served
if count >= num_people:
return max, min
def main():
l = int(input())
for i in range(l):
max, min= occupy(input())
print("Case #{}: {} {}".format(i + 1, max, min))
main() | [
"[email protected]"
] | |
cfbf28112e456f0999b8c8dc64ea310f31fb5227 | ebd5c4632bb5f85c9e3311fd70f6f1bf92fae53f | /Sourcem8/pirates/leveleditor/worldData/del_fuego_building_int_tattoo.py | 615b94089f0e48bcfbf591d8fc665740418ee377 | [] | no_license | BrandonAlex/Pirates-Online-Retribution | 7f881a64ec74e595aaf62e78a39375d2d51f4d2e | 980b7448f798e255eecfb6bd2ebb67b299b27dd7 | refs/heads/master | 2020-04-02T14:22:28.626453 | 2018-10-24T15:33:17 | 2018-10-24T15:33:17 | 154,521,816 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,540 | py | from pandac.PandaModules import Point3, VBase3, Vec4
objectStruct = {
'Objects': {
'1153434880.63dzlu0': {
'Type': 'Building Interior',
'Name': '',
'AdditionalData': [
'interior_spanish_store_tattoo'],
'Instanced': True,
'Objects': {
'1201136836.84dxschafe': {
'Type': 'Door Locator Node',
'Name': 'door_locator',
'Hpr': VBase3(-180.0, 0.0, 0.0),
'Pos': Point3(-7.141, -11.488, 0.0060000000000000001),
'Scale': VBase3(1.0, 1.0, 1.0) } },
'Visual': {
'Model': 'models/buildings/interior_spanish_npc' } } },
'Node Links': [],
'Layers': {
'Collisions': [
'1184008208.59kmuller',
'1184016064.62kmuller',
'1184013852.84kmuller',
'1185822696.06kmuller',
'1184006140.32kmuller',
'1184002350.98kmuller',
'1184007573.29kmuller',
'1184021176.59kmuller',
'1184005963.59kmuller',
'1188324241.31akelts',
'1184006537.34kmuller',
'1184006605.81kmuller',
'1187139568.33kmuller',
'1188324186.98akelts',
'1184006730.66kmuller',
'1184007538.51kmuller',
'1184006188.41kmuller',
'1184021084.27kmuller',
'1185824396.94kmuller',
'1185824250.16kmuller',
'1185823630.52kmuller',
'1185823760.23kmuller',
'1185824497.83kmuller',
'1185824751.45kmuller',
'1187739103.34akelts',
'1188323993.34akelts',
'1184016538.29kmuller',
'1185822200.97kmuller',
'1184016225.99kmuller',
'1195241421.34akelts',
'1195242796.08akelts',
'1184020642.13kmuller',
'1195237994.63akelts',
'1184020756.88kmuller',
'1184020833.4kmuller',
'1185820992.97kmuller',
'1185821053.83kmuller',
'1184015068.54kmuller',
'1184014935.82kmuller',
'1185821432.88kmuller',
'1185821701.86kmuller',
'1195240137.55akelts',
'1195241539.38akelts',
'1195238422.3akelts',
'1195238473.22akelts',
'1185821453.17kmuller',
'1184021269.96kmuller',
'1185821310.89kmuller',
'1185821165.59kmuller',
'1185821199.36kmuller',
'1185822035.98kmuller',
'1184015806.59kmuller',
'1185822059.48kmuller',
'1185920461.76kmuller',
'1194984449.66akelts',
'1185824206.22kmuller',
'1184003446.23kmuller',
'1184003254.85kmuller',
'1184003218.74kmuller',
'1184002700.44kmuller',
'1186705073.11kmuller',
'1187658531.86akelts',
'1186705214.3kmuller',
'1185824927.28kmuller',
'1184014204.54kmuller',
'1184014152.84kmuller'] },
'ObjectIds': {
'1153434880.63dzlu0': '["Objects"]["1153434880.63dzlu0"]',
'1201136836.84dxschafe': '["Objects"]["1153434880.63dzlu0"]["Objects"]["1201136836.84dxschafe"]' } }
extraInfo = {
'camPos': Point3(-1202.78, 260.68599999999998, 149.845),
'camHpr': VBase3(-98.880099999999999, -28.781600000000001, 0),
'focalLength': 1.3999999761599999 }
| [
"[email protected]"
] | |
cf7a5c949b145a86634b083e8acd0620cef804a3 | de4817e9c0f16283c4c6f9dcec3a0c36f49adf0f | /pytest_cases/plugin.py | b79d808e778403f49ecb8187fb7308ad67041226 | [
"BSD-3-Clause"
] | permissive | yashtodi94/python-pytest-cases | 3422cd4f399543b5add22d8631980b20bb92d68a | 81bd5b3d2a7b358e8d9f97dae77654f6bc9c7999 | refs/heads/master | 2020-06-27T18:22:06.798892 | 2019-07-31T07:40:56 | 2019-07-31T07:40:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42,492 | py | from collections import OrderedDict, namedtuple
from copy import copy
from distutils.version import LooseVersion
from warnings import warn
from functools import partial
import pytest
from pytest_cases.common import get_pytest_nodeid, get_pytest_function_scopenum, \
is_function_node, get_param_names, get_pytest_scopenum, get_param_argnames_as_list
from pytest_cases.main_fixtures import NOT_USED, is_fixture_union_params, UnionFixtureAlternative, apply_id_style
try: # python 3.3+
from inspect import signature
except ImportError:
from funcsigs import signature
try: # python 3.3+ type hints
from typing import Optional, List, Tuple, Union, Iterable
from _pytest.python import CallSpec2
except ImportError:
pass
_DEBUG = False
# @hookspec(firstresult=True)
# @pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_collection(session):
# override the fixture manager's method
session._fixturemanager.getfixtureclosure = partial(getfixtureclosure, session._fixturemanager)
class FixtureDefsCache(object):
"""
The object plays a role of 'cache' for fixture definitions.
"""
__slots__ = 'fm', 'nodeid', 'cached_fix_defs'
def __init__(self, fm, nodeid):
self.fm = fm
self.nodeid = nodeid
self.cached_fix_defs = dict()
def get_fixture_defs(self, fixname):
try:
# try to retrieve it from cache
fixdefs = self.cached_fix_defs[fixname]
except KeyError:
# otherwise get it and store for next time
fixdefs = self.fm.getfixturedefs(fixname, self.nodeid)
self.cached_fix_defs[fixname] = fixdefs
return fixdefs
class FixtureClosureNode(object):
__slots__ = 'parent', 'fixture_defs', \
'split_fixture_name', 'split_fixture_discarded_names', 'children', \
'_as_list', 'all_fixture_defs'
def __init__(self, parent_node=None):
self.parent = parent_node
# these will be set after closure has been built
self.fixture_defs = None
self.split_fixture_name = None
self.split_fixture_discarded_names = []
self.children = OrderedDict()
# this will be created after the first time the object is converted to a list (cache)
self._as_list = None
self.all_fixture_defs = None
# ------ tree
def get_leaves(self):
if self.has_split():
return [n for c in self.children.values() for n in c.get_leaves()]
else:
return [self]
# ------
def to_str(self, indent_nb=0, with_children=True, with_discarded=True):
"""
Provides a string representation, either with all the subtree (default) or without (with_children=False)
You can also remove the "discarded" information for clarity with with_discarded=False
:param indent_nb:
:param with_children:
:param with_discarded:
:return:
"""
indent = " " * indent_nb
if not self.is_closure_built():
str_repr = "<pending, incomplete>"
else:
str_repr = "%s(%s)" % (indent, ",".join([("%s" % f) for f in self.fixture_defs.keys()]))
if with_discarded:
str_repr += " (discarded: %s)" % self.split_fixture_discarded_names
if self.has_split() and with_children:
children_str_prefix = "\n%s - " % indent
children_str = children_str_prefix + children_str_prefix.join([c.to_str(indent_nb=indent_nb + 1)
for c in self.children.values()])
str_repr = str_repr + " split: " + self.split_fixture_name + children_str
return str_repr
def __repr__(self):
return self.to_str()
# ---- list facade
def __iter__(self):
return iter(self.to_list())
def __getitem__(self, item):
return self.to_list()[item]
def __setitem__(self, key, value):
# This is called in Pytest 4+. TODO how should we behave ?
warn("WARNING the new order is not taken into account !!")
pass
def index(self, *args):
return self.to_list().index(*args)
def to_list(self):
"""
Converts self to a list to get all fixture names, and caches the result.
The first time this is called, a non-none arg2fixturedefs object Must be provided to sort the fixture names
according to scope.
TODO maybe this sorting should actually be propagated down the tree so that it is done per branch
:param arg2fixturedefs:
:return:
"""
if self._as_list is None:
# crawl the tree to get the list of unique fixture names
fixturenames_closure = self._to_list()
if LooseVersion(pytest.__version__) >= LooseVersion('3.5.0'):
# sort by scope
def sort_by_scope(arg_name):
try:
fixturedefs = self.get_all_fixture_defs()[arg_name]
except KeyError:
return get_pytest_function_scopenum()
else:
return fixturedefs[-1].scopenum
fixturenames_closure.sort(key=sort_by_scope)
self._as_list = fixturenames_closure
return self._as_list
def _to_list(self):
""" Returns a list of all fixture names used (with no redundancy) """
lst = []
self._append_to(lst)
# eliminate redundancy
unique_lst = _make_unique(lst)
# TODO remove for efficiency
assert set(unique_lst) == set(lst)
return unique_lst
def _append_to(self, lst):
"""Appends all fixture names of this subtree to the given list"""
# first append the fixture names
lst += list(self.fixture_defs.keys())
# then if there is a split at this node
if self.has_split():
# add the split fixture > not needed anymore
# lst.append(self.split_fixture_name)
# add all children
for c in self.children.values():
c._append_to(lst)
# ----
def get_all_fixture_defs(self):
if self.all_fixture_defs is None:
# collect
self.all_fixture_defs = self._get_all_fixture_defs()
return self.all_fixture_defs
def _get_all_fixture_defs(self):
all = OrderedDict()
for k, v in self.fixture_defs.items():
if v is not None:
all[k] = v
for c in self.children.values():
all.update(c.get_all_fixture_defs())
return all
# ---- utils to build the closure
def build_closure(self,
fixture_defs_mgr, # type: FixtureDefsCache
initial_fixture_names # type: Iterable[str]
):
self._build_closure(fixture_defs_mgr, initial_fixture_names)
def is_closure_built(self):
return self.fixture_defs is not None
def already_knows_fixture(self, fixture_name):
""" Return True if this fixture is known by this node or one of its parents """
if fixture_name in self.fixture_defs:
return True
elif self.parent is None:
return False
else:
return self.parent.already_knows_fixture(fixture_name)
def _build_closure(self,
fixture_defs_mgr, # type: FixtureDefsCache
initial_fixture_names # type: Iterable[str]
):
"""
:param arg2fixturedefs: set of fixtures already known by the parent node
:return: nothing (the input arg2fixturedefs is modified)
"""
# Grab all dependencies of all fixtures present at this node and add them to either this or to nodes below.
# -- first switch this object from 'pending' to 'under construction' if needed
# (indeed we now authorize and use the possibility to call this twice. see split() )
if self.fixture_defs is None:
self.fixture_defs = OrderedDict()
# -- then for all pending, add them with their dependencies
pending_fixture_names = list(initial_fixture_names)
while len(pending_fixture_names) > 0:
fixname = pending_fixture_names.pop(0)
# if the fixture is already known in this node or above, do not care
if self.already_knows_fixture(fixname):
continue
# else grab the fixture definition(s) for this fixture name for this test node id
fixturedefs = fixture_defs_mgr.get_fixture_defs(fixname)
if not fixturedefs:
# fixture without definition: add it
self.add_required_fixture(fixname, None)
else:
# the actual definition is the last one
_fixdef = fixturedefs[-1]
_params = _fixdef.params
if _params is not None and is_fixture_union_params(_params):
# create an UNION fixture
# transform the _params into a list of names
alternative_f_names = UnionFixtureAlternative.to_list_of_fixture_names(_params)
# if there are direct dependencies that are not the union members, add them to pending
non_member_dependencies = [f for f in _fixdef.argnames if f not in alternative_f_names]
pending_fixture_names += non_member_dependencies
# propagate WITH the pending
self.split_and_build(fixture_defs_mgr, fixname, fixturedefs, alternative_f_names,
pending_fixture_names)
# empty the pending
pending_fixture_names = []
else:
# normal fixture
self.add_required_fixture(fixname, fixturedefs)
# add all dependencies in the to do list
dependencies = _fixdef.argnames
# - append: was pytest default
# pending_fixture_names += dependencies
# - prepend: makes much more sense
pending_fixture_names = list(dependencies) + pending_fixture_names
# ------ tools to add new fixture names during closure construction
def add_required_fixture(self, new_fixture_name, new_fixture_defs):
""" Adds some required fixture names to this node. Returns True if new fixtures were added here (not in child)"""
if self.already_knows_fixture(new_fixture_name):
return
elif not self.has_split():
# add_required_fixture locally
if new_fixture_name not in self.fixture_defs:
self.fixture_defs[new_fixture_name] = new_fixture_defs
else:
# add_required_fixture in each child
for c in self.children.values():
c.add_required_fixture(new_fixture_name, new_fixture_defs)
def split_and_build(self,
fixture_defs_mgr, # type: FixtureDefsCache
split_fixture_name, # type: str
split_fixture_defs, # type: Tuple[FixtureDefinition]
alternative_fixture_names, # type: List[str]
pending_fixtures_list #
):
""" Declares that this node contains a union with alternatives (child nodes=subtrees) """
if self.has_split():
raise ValueError("This should not happen anymore")
# # propagate the split on the children: split each of them
# for n in self.children.values():
# n.split_and_build(fm, nodeid, split_fixture_name, split_fixture_defs, alternative_fixture_names)
else:
# add the split (union) name to known fixtures
self.add_required_fixture(split_fixture_name, split_fixture_defs)
# remember it
self.split_fixture_name = split_fixture_name
# create the child nodes
for f in alternative_fixture_names:
# create the child node
new_c = FixtureClosureNode(self)
self.children[f] = new_c
# set the discarded fixture names
new_c.split_fixture_discarded_names = [g for g in alternative_fixture_names if g != f]
# perform the propagation:
# create a copy of the pending fixtures list and prepend the fixture used
pending_for_child = copy(pending_fixtures_list)
# (a) first propagate all child's dependencies
new_c._build_closure(fixture_defs_mgr, [f])
# (b) then the ones required by parent
new_c._build_closure(fixture_defs_mgr, pending_for_child)
def has_split(self):
return self.split_fixture_name is not None
def get_not_always_used(self):
"""Returns the list of fixtures used by this subtree, that are not always used"""
results_list = []
# initial list is made of fixtures that are in the children
initial_list = self.gather_all_required(include_parents=False)
for c in self.get_leaves():
j = 0
for i in range(len(initial_list)):
fixture_name = initial_list[j]
if fixture_name not in c.gather_all_required():
del initial_list[j]
results_list.append(fixture_name)
else:
j += 1
return results_list
def gather_all_required(self, include_children=True, include_parents=True):
"""
Returns a list of all fixtures required by the subtree at this node
:param include_children:
:return:
"""
# first the fixtures required by this node
required = list(self.fixture_defs.keys())
# then the ones required by the parents
if include_parents and self.parent is not None:
required = required + self.parent.gather_all_required(include_children=False)
# then the ones from all the children
if include_children:
for child in self.children.values():
required = required + child.gather_all_required(include_parents=False)
return required
def requires(self, fixturename):
"""
Returns True if the fixture with this name is required by the subtree at this node
:param fixturename:
:return:
"""
return fixturename in self.gather_all_required()
def gather_all_discarded(self):
"""
Returns a list of all fixture names discarded during splits from the parent node down to this node.
Note: this does not include the split done at this node if any, nor all of its subtree.
:return:
"""
discarded = list(self.split_fixture_discarded_names)
if self.parent is not None:
discarded = discarded + self.parent.gather_all_discarded()
return discarded
# ------ tools to see the tree as a list of alternatives
def print_alternatives(self):
return FixtureClosureNode.print_alternatives_list(*self.get_alternatives())
@staticmethod
def print_alternatives_list(filters_list, fixtures_list):
for f, p in zip(filters_list, fixtures_list):
print(f, p)
def get_alternatives(self):
"""
Returns the alternatives
- a list of dictionaries union_fixture_name: value representing the filters on this alternative
- a list of tuples of fixture names used by each alternative
- a list of tuples of discarded fixture names in each alternative
:return:
"""
if self.has_split():
partitions_list = []
filters_list = []
discarded_list = []
for k, c in self.children.items():
child_filters_dct, child_partitions, child_discarded = c.get_alternatives()
for f_dct, p, d in zip(child_filters_dct, child_partitions, child_discarded):
# append a partition for this child:
# - filter
_f_dct = f_dct.copy()
_f_dct[self.split_fixture_name] = k
filters_list.append(_f_dct)
# - fixtures used
partitions_list.append(_make_unique(list(self.fixture_defs.keys()) + p))
# - fixtures not used.
discarded_list.append(_make_unique(self.split_fixture_discarded_names
+ [df for df in d if df not in self.fixture_defs.keys()]))
return filters_list, partitions_list, discarded_list
else:
# return a single partition containing all fixture names
return [dict()], [list(self.fixture_defs.keys())], [list(self.split_fixture_discarded_names)]
def merge(new_items, into_list):
"""
Appends items from `new_items` into `into_list`, only if they are not already there.
:param new_items:
:param into_list:
:return:
"""
at_least_one_added = False
for l in new_items:
if l not in into_list:
into_list.append(l)
at_least_one_added = True
return at_least_one_added
def getfixtureclosure(fm, fixturenames, parentnode, ignore_args=()):
# first retrieve the normal pytest output for comparison
kwargs = dict()
if LooseVersion(pytest.__version__) >= LooseVersion('4.6.0'):
# new argument "ignore_args" in 4.6+
kwargs['ignore_args'] = ignore_args
if LooseVersion(pytest.__version__) >= LooseVersion('3.7.0'):
# three outputs
initial_names, ref_fixturenames, ref_arg2fixturedefs = \
fm.__class__.getfixtureclosure(fm, fixturenames, parentnode, **kwargs)
else:
# two outputs
ref_fixturenames, ref_arg2fixturedefs = fm.__class__.getfixtureclosure(fm, fixturenames, parentnode)
# now let's do it by ourselves.
parentid = parentnode.nodeid
# Create closure
# -- auto-use fixtures
_init_fixnames = fm._getautousenames(parentid)
# -- required fixtures/params.
# ********* fix the order of initial fixtures: indeed this order may not be the right one ************
# this only works when pytest version is > 3.4, otherwise the parent node is a Module
if is_function_node(parentnode):
# grab all the parametrization on that node and fix the order.
# Note: on pytest >= 4 the list of param_names is probably the same than the `ignore_args` input
param_names = get_param_names(parentnode)
sorted_fixturenames = sort_according_to_ref_list(fixturenames, param_names)
# **********
merge(sorted_fixturenames, _init_fixnames)
else:
# we cannot sort yet
merge(fixturenames, _init_fixnames)
# Finally create the closure tree
if _DEBUG:
print("Creating closure for %s:" % parentid)
fixture_defs_mger = FixtureDefsCache(fm, parentid)
fixturenames_closure_node = FixtureClosureNode()
fixturenames_closure_node.build_closure(fixture_defs_mger, _init_fixnames)
if _DEBUG:
print("Closure for %s completed:" % parentid)
print(fixturenames_closure_node)
# sort the fixture names (note: only in recent pytest)
fixturenames_closure_node.to_list()
# FINALLY compare with the previous behaviour TODO remove when in 'production' ?
if len(ignore_args) == 0:
assert fixturenames_closure_node.get_all_fixture_defs() == ref_arg2fixturedefs
# if fixturenames_closure_node.has_split():
# # order might be changed
# assert set((str(f) for f in fixturenames_closure_node)) == set(ref_fixturenames)
# else:
# # same order
# if len(p_markers) < 2:
# assert list(fixturenames_closure_node) == ref_fixturenames
# else:
# NOW different order happens all the time because of the "prepend" strategy in the closure building
# which makes much more sense/intuition.
assert set((str(f) for f in fixturenames_closure_node)) == set(ref_fixturenames)
# and store our closure in the node
# note as an alternative we could return a custom object in place of the ref_fixturenames
# store_union_closure_in_node(fixturenames_closure_node, parentnode)
if LooseVersion(pytest.__version__) >= LooseVersion('3.7.0'):
our_initial_names = sorted_fixturenames # initial_names
return our_initial_names, fixturenames_closure_node, ref_arg2fixturedefs
else:
return fixturenames_closure_node, ref_arg2fixturedefs
# ------------ hack to store and retrieve our custom "closure" object
# def store_union_closure_in_node(fixturenames_closure_node, parentnode):
# parentnode.advanced_fixture_closure = fixturenames_closure_node
def retrieve_union_closure_from_metafunc(metafunc):
return metafunc.fixturenames
# ---------------------------------------
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_generate_tests(metafunc):
"""
We use this hook to replace the 'partial' function of `metafunc` with our own below, before it is called by pytest
:param metafunc:
:return:
"""
# override the parametrize method.
# Note we could do it in a static way in pytest_sessionstart or plugin init hook, but we would need to save the
metafunc.parametrize = partial(parametrize, metafunc)
# now let pytest parametrize the call as usual
_ = yield
class UnionParamz(namedtuple('UnionParamz', ['union_fixture_name', 'alternative_names', 'ids', 'scope', 'kwargs'])):
""" Represents some parametrization to be applied, for a union fixture """
__slots__ = ()
def __str__(self):
return "[UNION] %s=[%s], ids=%s, scope=%s, kwargs=%s" \
"" % (self.union_fixture_name, ','.join([str(a) for a in self.alternative_names]),
self.ids, self.scope, self.kwargs)
class NormalParamz(namedtuple('NormalParamz', ['argnames', 'argvalues', 'indirect', 'ids', 'scope', 'kwargs'])):
""" Represents some parametrization to be applied """
__slots__ = ()
def __str__(self):
return "[NORMAL] %s=[%s], indirect=%s, ids=%s, scope=%s, kwargs=%s" \
"" % (self.argnames, self.argvalues, self.indirect, self.ids, self.scope, self.kwargs)
def parametrize(metafunc, argnames, argvalues, indirect=False, ids=None, scope=None, **kwargs):
"""
This alternate implementation of metafunc.parametrize creates a list of calls that is not just the cartesian
product of all parameters (like the pytest behaviour).
Instead, it offers an alternate list of calls takinginto account all union fixtures.
For this, it replaces the `metafunc._calls` attribute with a `CallsReactor` instance, and feeds it with all parameters
and parametrized fixtures independently (not doing any cross-product).
The resulting `CallsReactor` instance is then able to dynamically behave like the correct list of calls, lazy-creating
that list when it is used.
"""
# create our special container object if needed
if not isinstance(metafunc._calls, CallsReactor):
# first call: should be an empty list
if len(metafunc._calls) > 0:
raise ValueError("This should not happen - please file an issue")
metafunc._calls = CallsReactor(metafunc)
# grab it
calls_reactor = metafunc._calls
# detect union fixtures
if is_fixture_union_params(argvalues):
if ',' in argnames or not isinstance(argnames, str):
raise ValueError("Union fixtures can not be parametrized")
union_fixture_name = argnames
union_fixture_alternatives = argvalues
if indirect is False or len(kwargs) > 0:
raise ValueError("indirect cannot be set on a union fixture, as well as unknown kwargs")
# add a union parametrization in the queue (but do not apply it now)
calls_reactor.append(UnionParamz(union_fixture_name, union_fixture_alternatives, ids, scope, kwargs))
else:
# add a normal parametrization in the queue (but do not apply it now)
calls_reactor.append(NormalParamz(argnames, argvalues, indirect, ids, scope, kwargs))
# put our object back in place - not needed anymore
# metafunc._calls = calls_reactor
class CallsReactor:
"""
This object replaces the list of calls that was in `metafunc._calls`.
It behaves like a list, but it actually builds that list dynamically based on all parametrizations collected
from the custom `metafunc.parametrize` above.
There are therefore three steps:
- when `metafunc.parametrize` is called, this object gets called on `add_union` or `add_param`. A parametrization
order gets stored in `self._pending`
- when this object is first read as a list, all parametrization orders in `self._pending` are transformed into a
tree in `self._tree`, and `self._pending` is discarded. This is done in `create_tree_from_pending_parametrization`.
- finally, the list is built from the tree using `self._tree.to_call_list()`. This will also be the case in
subsequent usages of this object.
"""
__slots__ = 'metafunc', '_pending', '_call_list'
def __init__(self, metafunc):
self.metafunc = metafunc
self._pending = []
self._call_list = None
# -- methods to provising parametrization orders without executing them --
def append(self,
parametrization # type: Union[UnionParamz, NormalParamz]
):
self._pending.append(parametrization)
def print_parametrization_list(self):
"""Helper method to print all pending parametrizations in this reactor """
print("\n".join([str(p) for p in self._pending]))
# -- list facade --
def __iter__(self):
return iter(self.calls_list)
def __getitem__(self, item):
return self.calls_list[item]
@property
def calls_list(self):
"""
Returns the list of calls. This property relies on self._tree, that is lazily created on first access,
based on `self.parametrizations`.
:return:
"""
if self._call_list is None:
# create the definitive tree.
self.create_call_list_from_pending_parametrizations()
return self._call_list
# --- tree creation (executed once the first time this object is used as a list)
def create_call_list_from_pending_parametrizations(self):
"""
Takes all parametrization operations that are pending in `self._pending`,
and creates a parametrization tree out of them.
self._pending is set to None afterwards
:return:
"""
# temporarily remove self from the _calls field, we'll need to change it
bak_calls = self.metafunc._calls
assert bak_calls is self
# grab the fixtures closure tree created previously (see getfixtureclosure above)
fix_closure_tree = retrieve_union_closure_from_metafunc(self.metafunc)
# ------ parametrize the calls --------
# create a dictionary of pending things to parametrize, and only keep the first parameter in case of several
pending_items = [(get_param_argnames_as_list(p[0])[0], p) for p in self._pending]
pending = OrderedDict(pending_items)
if _DEBUG:
print()
print("---- pending parametrization ----")
self.print_parametrization_list()
print("---------------------------------")
print()
print("Applying all of them in the closure tree nodes:")
calls, nodes = self._process_node(fix_closure_tree, pending.copy(), [])
self._cleanup_calls_list(fix_closure_tree, calls, nodes, pending)
if _DEBUG:
print("\n".join(["%s[%s]: funcargs=%s, params=%s" % (get_pytest_nodeid(self.metafunc),
c.id, c.funcargs, c.params)
for c in calls]))
print()
self._call_list = calls
# put back self as the _calls facade
self.metafunc._calls = bak_calls
# forget about all parametrizations now - this wont happen again
self._pending = None
def _cleanup_calls_list(self, fix_closure_tree, calls, nodes, pending):
"""
Cleans the calls list so that all calls contain a value for all parameters. This is basically
about adding "NOT_USED" parametrization everywhere relevant.
:param calls:
:param nodes:
:param pending:
:return:
"""
nb_calls = len(calls)
if nb_calls != len(nodes):
raise ValueError("This should not happen !")
# function_scope_num = get_pytest_function_scopenum()
for i in range(nb_calls):
c, n = calls[i], nodes[i]
# A/ set to "not used" all parametrized fixtures that were not used in some branches
for fixture, p_to_apply in pending.items():
if fixture not in c.params and fixture not in c.funcargs:
# parametrize with a single "not used" value and discard the id
if isinstance(p_to_apply, UnionParamz):
c_with_dummy = self._parametrize_calls([c], p_to_apply.union_fixture_name, [NOT_USED],
indirect=True, discard_id=True,
scope=p_to_apply.scope, **p_to_apply.kwargs)
else:
_nb_argnames = len(get_param_argnames_as_list(p_to_apply.argnames))
if _nb_argnames > 1:
_vals = [(NOT_USED,) * _nb_argnames]
else:
_vals = [NOT_USED]
c_with_dummy = self._parametrize_calls([c], p_to_apply.argnames, _vals,
indirect=p_to_apply.indirect, discard_id=True,
scope=p_to_apply.scope, **p_to_apply.kwargs)
assert len(c_with_dummy) == 1
calls[i] = c_with_dummy[0]
c = calls[i]
# B/ some non-parametrized fixtures may also need to be explicitly deactivated in some callspecs
# otherwise they will be setup/teardown.
#
# For this we use a dirty hack: we add a parameter with they name in the callspec, it seems to be propagated
# in the `request`. TODO is there a better way?
# for fixture in list(fix_closure_tree):
# for fixture_name, fixdef in self.metafunc._arg2fixturedefs.items():
for fixture_name in fix_closure_tree.get_not_always_used():
fixdef = self.metafunc._arg2fixturedefs[fixture_name]
if fixture_name not in c.params and fixture_name not in c.funcargs:
if not n.requires(fixture_name):
# explicitly add it as discarded by creating a parameter value for it.
c.params[fixture_name] = NOT_USED
c.indices[fixture_name] = 1
c._arg2scopenum[fixture_name] = get_pytest_scopenum(fixdef[-1].scope)
else:
# explicitly add it as active
c.params[fixture_name] = 'used'
c.indices[fixture_name] = 0
c._arg2scopenum[fixture_name] = get_pytest_scopenum(fixdef[-1].scope)
def _parametrize_calls(self, init_calls, argnames, argvalues, discard_id=False, indirect=False, ids=None,
scope=None, **kwargs):
"""Parametrizes the initial `calls` with the provided information and returns the resulting new calls"""
# make a backup so that we can restore the metafunc at the end
bak = self.metafunc._calls
# place the initial calls on the metafunc
self.metafunc._calls = init_calls if init_calls is not None else []
# parametrize the metafunc. Since we replaced the `parametrize` method on `metafunc` we have to call super
self.metafunc.__class__.parametrize(self.metafunc, argnames, argvalues, indirect=indirect, ids=ids,
scope=scope, **kwargs)
# extract the result
new_calls = self.metafunc._calls
# If the user wants to discard the newly created id, remove the last id in all these callspecs in this node
if discard_id:
for callspec in new_calls:
callspec._idlist.pop(-1)
# restore the metafunc and return the new calls
self.metafunc._calls = bak
return new_calls
def _process_node(self, current_node, pending, calls):
"""
Routine to apply all the parametrization orders in `pending` that are relevant to `current_node`,
to the `calls` (a list of pytest CallSpec2).
It returns a tuple containing a list of calls and a list of same length containing which leaf node each one
corresponds to.
:param current_node: the closure tree node we're focusing on
:param pending: a list of parametrization orders to apply
:param calls:
:return: a tuple (calls, nodes) of two lists of the same length. So that for each CallSpec calls[i], you can see
the corresponding leaf node in nodes[i]
"""
# (1) first apply all non-split fixtures at this node
fixtures_at_this_node = [f for f in current_node.fixture_defs.keys()
if f is not current_node.split_fixture_name]
# dirty hack if we want to preserve pytest legacy order when there are no children
# if current_node.parent is None and not current_node.has_split():
# # legacy compatibility: use pytest parametrization order even if it is wrong
# # see https://github.com/pytest-dev/pytest/issues/5054
#
# else:
# # rather trust the order we computed from the closure
# fixtures_to_process = fixtures_at_this_node
for fixturename in fixtures_at_this_node:
try:
# pop it from pending - do not rely the order in pending but rather the order in the closure node
p_to_apply = pending.pop(fixturename)
except KeyError:
# not a parametrized fixture
continue
else:
if isinstance(p_to_apply, UnionParamz):
raise ValueError("This should not happen !")
elif isinstance(p_to_apply, NormalParamz):
# ******** Normal parametrization **********
if _DEBUG:
print("[Node %s] Applying parametrization for NORMAL %s"
"" % (current_node.to_str(with_children=False, with_discarded=False),
p_to_apply.argnames))
calls = self._parametrize_calls(calls, p_to_apply.argnames, p_to_apply.argvalues,
indirect=p_to_apply.indirect, ids=p_to_apply.ids,
scope=p_to_apply.scope, **p_to_apply.kwargs)
else:
raise TypeError("Invalid parametrization type: %s" % p_to_apply.__class__)
# (2) then if there is a split apply it, otherwise return
if not current_node.has_split():
nodes = [current_node] * len(calls)
return calls, nodes
else:
try:
# pop it from pending - do not trust the order in pending.
p_to_apply = pending.pop(current_node.split_fixture_name)
except KeyError:
# not a parametrized fixture
raise ValueError("Error: fixture union parametrization not present")
else:
if isinstance(p_to_apply, NormalParamz):
raise ValueError("This should not happen !")
elif isinstance(p_to_apply, UnionParamz):
# ******** Union parametrization **********
if _DEBUG:
print("[Node %s] Applying parametrization for UNION %s"
"" % (current_node.to_str(with_children=False, with_discarded=False),
p_to_apply.union_fixture_name))
# always use 'indirect' since that's a fixture.
calls = self._parametrize_calls(calls, p_to_apply.union_fixture_name,
p_to_apply.alternative_names, indirect=True,
ids=p_to_apply.ids,
scope=p_to_apply.scope, **p_to_apply.kwargs)
# Change the ids
for callspec in calls:
callspec._idlist[-1] = apply_id_style(callspec._idlist[-1],
p_to_apply.union_fixture_name,
p_to_apply.alternative_names[0].idstyle)
# now move to the children
nodes_children = [None] * len(calls)
for i in range(len(calls)):
active_alternative = calls[i].params[p_to_apply.union_fixture_name]
child_node = current_node.children[active_alternative.fixture_name]
child_pending = pending.copy()
# place the childs parameter in the first position if it is in the list
# not needed anymore - already automatic
# try:
# child_pending.move_to_end(child_alternative, last=False)
# except KeyError:
# # not in the list: the child alternative is a non-parametrized fixture
# pass
calls[i], nodes_children[i] = self._process_node(child_node, child_pending, [calls[i]])
# finally flatten the list if needed
calls = flatten_list(calls)
nodes_children = flatten_list(nodes_children)
return calls, nodes_children
def _make_unique(lst):
_set = set()
def _first_time_met(v):
if v not in _set:
_set.add(v)
return True
else:
return False
return [v for v in lst if _first_time_met(v)]
def flatten_list(lst):
return [v for nested_list in lst for v in nested_list]
def sort_according_to_ref_list(fixturenames, param_names):
"""
Sorts items in the first list, according to their position in the second.
Items that are not in the second list stay in the same position, the others are just swapped.
A new list is returned.
:param fixturenames:
:param param_names:
:return:
"""
cur_indices = []
for pname in param_names:
cur_indices.append(fixturenames.index(pname))
target_indices = sorted(cur_indices)
sorted_fixturenames = list(fixturenames)
for old_i, new_i in zip(cur_indices, target_indices):
sorted_fixturenames[new_i] = fixturenames[old_i]
return sorted_fixturenames
_OPTION_NAME = 'with_reorder'
_SKIP = 'skip'
_NORMAL = 'normal'
_OPTIONS = {
_NORMAL: """(default) the usual reordering done by pytest to optimize setup/teardown of session- / module-
/ class- fixtures, as well as all the modifications made by other plugins (e.g. pytest-reorder)""",
_SKIP: """skips *all* reordering, even the one done by pytest itself or installed plugins
(e.g. pytest-reorder)"""
}
# @hookspec(historic=True)
def pytest_addoption(parser):
group = parser.getgroup('pytest-cases ordering', 'pytest-cases reordering options', after='general')
help_str = """String specifying one of the reordering alternatives to use. Should be one of :
- %s""" % ("\n - ".join(["%s: %s" % (k, v) for k, v in _OPTIONS.items()]))
group.addoption(
'--%s' % _OPTION_NAME.replace('_', '-'), type=str, default='normal', help=help_str
)
# @hookspec(historic=True)
def pytest_configure(config):
# validate the config
allowed_values = ('normal', 'skip')
reordering_choice = config.getoption(_OPTION_NAME)
if reordering_choice not in allowed_values:
raise ValueError("[pytest-cases] Wrong --%s option: %s. Allowed values: %s"
"" % (_OPTION_NAME, reordering_choice, allowed_values))
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_collection_modifyitems(session, config, items):
"""
An alternative to the `reorder_items` function in fixtures.py
(https://github.com/pytest-dev/pytest/blob/master/src/_pytest/fixtures.py#L209)
We basically set back the previous order once the pytest ordering routine has completed.
TODO we should set back an optimal ordering, but current PR https://github.com/pytest-dev/pytest/pull/3551
will probably not be relevant to handle our "union" fixtures > need to integrate the NOT_USED markers in the method
:param session:
:param config:
:param items:
:return:
"""
ordering_choice = config.getoption(_OPTION_NAME)
if ordering_choice == _SKIP:
# remember initial order
initial_order = copy(items)
yield
# put back the initial order but keep the filter
to_return = [None] * len(items)
i=0
for item in initial_order:
if item in items:
to_return[i] = item
i += 1
assert i == len(items)
items[:] = to_return
else:
# do nothing
yield
| [
"[email protected]"
] | |
cf38d83b92adeb028ec0a5e36ef6ed766d954ac0 | 0e4d09b2a1b93aaa6d623d16905854d993a934ae | /Python/Django/surprise_me/surprise_me/settings.py | f424efc942cbefeba7f250e60cd038b1cc08d43a | [] | no_license | freefaller69/DojoAssignments | ee7f6308b02041be3244f795422e0e044d4a41b2 | f40426ac448026c1172048665f36024ad22f0d81 | refs/heads/master | 2021-01-17T10:23:39.419514 | 2017-07-25T00:50:41 | 2017-07-25T00:50:41 | 84,012,790 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,253 | py | """
Django settings for surprise_me project.
Generated by 'django-admin startproject' using Django 1.11.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%*yk4s@mw!nm_8^^jkde_l^vdqldj2=v@dzqj&h6%z9l$t2b$='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'apps.surprise',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'surprise_me.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'surprise_me.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
a8ba2d5c8302ea20dac6cf2c653d709d5b012a3b | c35d5713b9991efeb0f8a2665c91c74127138594 | /bufferbloat.py | 4e0431721722d6c7a93a6b3fc70e29396676f8fa | [] | no_license | vs9390/bufferbloat | 70849c13f24e0f7744a7852e8ed838a6235dbd0f | cc5341b5f0c0f835e6ec2e3d536abd2d80a5b096 | refs/heads/master | 2020-04-21T14:16:58.228215 | 2019-02-07T19:30:32 | 2019-02-07T19:30:32 | 169,629,667 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,038 | py | #!/usr/bin/python
"CS144 In-class exercise: Buffer Bloat"
from mininet.topo import Topo
from mininet.node import CPULimitedHost
from mininet.link import TCLink
from mininet.net import Mininet
from mininet.node import RemoteController
from mininet.log import lg, info
from mininet.util import dumpNodeConnections
from mininet.cli import CLI
from monitor import monitor_qlen
from subprocess import Popen, PIPE
from time import sleep, time
from multiprocessing import Process
from argparse import ArgumentParser
import sys
import os
# Parse arguments
parser = ArgumentParser(description="BufferBloat tests")
parser.add_argument('--bw-host', '-B',
dest="bw_host",
type=float,
action="store",
help="Bandwidth of host links",
required=True)
parser.add_argument('--bw-net', '-b',
dest="bw_net",
type=float,
action="store",
help="Bandwidth of network link",
required=True)
parser.add_argument('--delay',
dest="delay",
type=float,
help="Delay in milliseconds of host links",
default=10)
parser.add_argument('--dir', '-d',
dest="dir",
action="store",
help="Directory to store outputs",
default="results",
required=True)
parser.add_argument('-n',
dest="n",
type=int,
action="store",
help="Number of nodes in star.",
required=True)
parser.add_argument('--nflows',
dest="nflows",
action="store",
type=int,
help="Number of flows per host (for TCP)",
required=True)
parser.add_argument('--maxq',
dest="maxq",
action="store",
help="Max buffer size of network interface in packets",
default=500)
parser.add_argument('--cong',
dest="cong",
help="Congestion control algorithm to use",
default="reno")
parser.add_argument('--diff',
help="Enabled differential service",
action='store_true',
dest="diff",
default=False)
# Expt parameters
args = parser.parse_args()
class StarTopo(Topo):
"Star topology for Buffer Bloat experiment"
def __init__(self, n=2, cpu=None, bw_host=1000, bw_net=1.5,
delay=10, maxq=None, diff=False):
# Add default members to class.
super(StarTopo, self ).__init__()
# Create switch and host nodes
for i in xrange(n):
self.addHost( 'h%d' % (i+1), cpu=cpu )
self.addSwitch('s0', fail_mode='open')
self.addLink('h1', 's0', bw=bw_host,
max_queue_size=int(maxq) )
for i in xrange(1, n):
self.addLink('h%d' % (i+1), 's0', bw=bw_host)
def ping_latency(net):
"(Incomplete) verify link latency"
h1 = net.getNodeByName('h1')
h1.sendCmd('ping -c 2 10.0.0.2')
result = h1.waitOutput()
print "Ping result:"
print result.strip()
def bbnet():
"Create network and run Buffer Bloat experiment"
print "starting mininet ...."
# Seconds to run iperf; keep this very high
seconds = 3600
start = time()
# Reset to known state
topo = StarTopo(n=args.n, bw_host=args.bw_host,
delay='%sms' % args.delay,
bw_net=args.bw_net, maxq=args.maxq, diff=args.diff)
net = Mininet(topo=topo, host=CPULimitedHost, link=TCLink,
autoPinCpus=True, controller=lambda name: RemoteController("c0",
ip="0.0.0.0",
port=6653))
# c0 = net.addController('c0', controller=RemoteController, ip="127.0.0.1", port=6653)
net.start()
dumpNodeConnections(net.hosts)
net.pingAll()
print args.diff
if args.diff:
print "Differentiate Traffic Between iperf and wget"
os.system("bash tc_cmd_diff.sh")
else:
print "exec tc_cmd.sh"
os.system("bash tc_cmd.sh %s" % args.maxq)
sleep(2)
ping_latency(net)
print "Initially, the delay between two hosts is around %dms" % (int(args.delay)*2)
h2 = net.getNodeByName('h2')
h1 = net.getNodeByName('h1')
h1.cmd('cd ./http/; nohup python2.7 ./webserver.py &')
h1.cmd('cd ../')
h2.cmd('iperf -s -w 16m -p 5001 -i 1 > iperf-recv.txt &')
CLI( net )
h1.cmd("sudo pkill -9 -f webserver.py")
h2.cmd("rm -f index.html*")
Popen("killall -9 cat", shell=True).wait()
if __name__ == '__main__':
bbnet()
| [
"None"
] | None |
781728cd41d7b2d6039a59dec118afaea02aea57 | df3e3e937e85ae03bc6714bf9aa487d9338d44fd | /mpmp/exceptions.py | feb76e4c5975c4adf3db8b9f293ccc2c91ce9877 | [
"BSD-3-Clause"
] | permissive | mayala1925/mpmp | 9a6b4be43f9bc29874e9c0cdfa0866d70b61263c | 7bd4d49e4acd745447dc0018ac121d1a45e8bfbc | refs/heads/master | 2023-08-16T13:23:08.019630 | 2021-10-13T23:09:07 | 2021-10-13T23:09:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,675 | py | """
Exceptions specific to pan-cancer prediction experiments
"""
class ResultsFileExistsError(Exception):
"""
Custom exception to raise when the results file already exists for the
given gene and cancer type.
This allows calling scripts to choose how to handle this case (e.g. to
print an error message and continue, or to abort execution).
"""
pass
class NoTrainSamplesError(Exception):
"""
Custom exception to raise when there are no train samples in a
cross-validation fold for a given cancer type.
This allows calling scripts to choose how to handle this case (e.g. to
print an error message and continue, or to abort execution).
"""
pass
class NoTestSamplesError(Exception):
"""
Custom exception to raise when there are no test samples in a
cross-validation fold for a given cancer type.
This allows calling scripts to choose how to handle this case (e.g. to
print an error message and continue, or to abort execution).
"""
pass
class OneClassError(Exception):
"""
Custom exception to raise when there is only one class present in the
test set for the given cancer type.
This allows calling scripts to choose how to handle this case (e.g. to
print an error message and continue, or to abort execution).
"""
pass
class GenesNotFoundError(Exception):
"""
Custom exception to raise when genes provided for classification are not
part of existing datasets with oncogene/TSG info.
This allows calling scripts to choose how to handle this case (e.g. to
print an error message and continue, or to abort execution).
"""
pass
| [
"[email protected]"
] | |
2bc1432323a455395c7e8d97b4f3896a33278eb9 | c1c00ced90d47b9425fa11b6e0e5148a26a70085 | /tests/test_cli.py | d3438f75559b5b4993b1f8da97e7d6b0531eb024 | [
"MIT"
] | permissive | destos/Patterner | a8e90e30f0f2ca9411beb39e4cb8ef9e25fedc23 | 3e32468e843ec817b94da9df543c891ca69927fc | refs/heads/master | 2020-04-25T14:44:23.872391 | 2019-02-27T05:50:01 | 2019-02-27T05:50:01 | 172,852,064 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 718 | py | """Sample integration test module using pytest-describe and expecter."""
# pylint: disable=redefined-outer-name,unused-variable,expression-not-assigned
import pytest
from click.testing import CliRunner
from expecter import expect
from patterner.cli import main
@pytest.fixture
def runner():
return CliRunner()
def describe_cli():
def describe_conversion():
def when_integer(runner):
result = runner.invoke(main, ['42'])
expect(result.exit_code) == 0
expect(result.output) == "12.80165\n"
def when_invalid(runner):
result = runner.invoke(main, ['foobar'])
expect(result.exit_code) == 0
expect(result.output) == ""
| [
"[email protected]"
] | |
7581ff553d9d2380b9a3fa8d04bc19aa2433dd6d | 664c3ced94ab0e9a5bac547028db59a3ca1f2074 | /10. Use classes to create active objects /EG10-07 Time Tracker with exception handler.py | 6ab5ddc9eaa23b6a53954e815d434c237f056033 | [
"MIT"
] | permissive | nikcbg/Begin-to-Code-with-Python | 2b1283a7818e26d3471677b51d1832cde52c4ddc | a72fdf18ca15f564be895c6394a91afc75fc3e2c | refs/heads/master | 2021-06-23T23:09:36.009442 | 2021-06-23T11:17:24 | 2021-06-23T11:17:24 | 209,285,197 | 0 | 0 | MIT | 2021-03-17T07:48:09 | 2019-09-18T10:50:51 | Python | UTF-8 | Python | false | false | 6,121 | py | # EG10-07 Time Tracker with exception handler
import pickle
from BTCInput import *
# Create the contact class
class Contact:
min_session_length = 0.5
max_session_length = 3.5
@staticmethod
def validate_session_length(session_length):
'''
Validates a session length and returns
True if the session is valid or False if not
'''
if session_length < Contact.min_session_length:
return False
if session_length > Contact.max_session_length:
return False
return True
def __init__(self, name, address, telephone):
self.name = name
self.address = address
self.telephone = telephone
self.hours_worked = 0
def get_hours_worked(self):
'''
Gets the hours worked for this contact
'''
return self.hours_worked
def add_session(self, session_length):
'''
Adds the value of the parameter
onto the hours spent with this contact
Raises an exception if the session length is invalid
'''
if not Contact.validate_session_length(session_length):
raise Exception('Invalid session length')
self.hours_worked = self.hours_worked + session_length
return
def new_contact():
'''
Reads in a new contact and stores it
'''
print('Create new contact')
# add the data attributes
name=read_text('Enter the contact name: ')
address=read_text('Enter the contact address: ')
telephone=read_text('Enter the contact phone: ')
# create a new instance
new_contact=Contact(name=name,address=address,telephone=telephone)
# add the new contact to the contact list
contacts.append(new_contact)
def find_contact(search_name):
'''
Finds the contact with the matching name
Returns a contact instance or None if there is
no contact with the given name
'''
# remove any whitespace from around the search name
search_name = search_name.strip()
# convert the search name to lower case
search_name = search_name.lower()
for contact in contacts:
# get the name out of the contact
name=contact.name
# remove any whitespace from around the name
name=name.strip()
# convert the name to lower case
name = name.lower()
# see if the names match
if name.startswith(search_name):
# return the contact that was found
return contact
# if we get here no contact was found
# with the given name
return None
def display_contact():
'''
Reads in a name to search for and then displays
the content information for that name or a
message indicating that the name was not found
'''
print('Find contact')
search_name = read_text('Enter the contact name: ')
contact=find_contact(search_name)
if contact!=None:
# Found a contact
print('Name:', contact.name)
print('Address:', contact.address)
print('Telephone:', contact.telephone)
print('Hours on the case:', contact.get_hours_worked())
else:
print('This name was not found.')
def edit_contact():
'''
Reads in a name to search for and then allows
the user to edit the details of that contact
If there is no contact the funciton displays a
message indicating that the name was not found
'''
print('Edit contact')
search_name=read_text('Enter the contact name: ')
contact=find_contact(search_name)
if contact!=None:
# Found a contact
print('Name: ',contact.name)
new_name=read_text('Enter new name or . to leave unchanged: ')
if new_name!='.':
contact.name=new_name
new_address=read_text('Enter new address or . to leave unchanged: ')
if new_address!='.':
contact.address=new_address
new_phone=read_text('Enter new telephone or . to leave unchanged: ')
if new_phone!='.':
contact.telephone=new_phone
else:
print('This name was not found.')
def add_session_to_contact():
'''
Reads in a name to search for and then allows
the user to add a session spent working for
that contact
'''
print('add session')
search_name=read_text('Enter the contact name: ')
contact=find_contact(search_name)
if contact!=None:
# Found a contact
print('Name: ',contact.name)
print('Previous hours worked:',contact.get_hours_worked())
session_length=read_float(prompt='Session length: ')
try:
contact.add_session(session_length)
print('Updated hours worked:', contact.get_hours_worked())
except Exception as e:
print('Add hours failed:',e)
else:
print('This name was not found.')
def save_contacts(file_name):
'''
Saves the contacts to the given filename
Contacts are stored in binary as pickled file
Exceptions will be raised if the save fails
'''
print('save contacts')
with open(file_name,'wb') as out_file:
pickle.dump(contacts,out_file)
def load_contacts(file_name):
'''
Loads the contacts from the given filename
Contacts are stored in binary as pickled file
Exceptions will be raised if the load fails
'''
global contacts
print('Load contacts')
with open(file_name,'rb') as input_file:
contacts=pickle.load(input_file)
menu='''Time Tracker
1. New Contact
2. Find Contact
3. Edit Contact
4. Add Session
5. Exit Program
Enter your command: '''
filename='contacts.pickle'
try:
load_contacts(filename)
except:
print('Contacts file not found')
contacts=[]
while True:
command=read_int_ranged(prompt=menu,min_value=1,max_value=5)
if command==1:
new_contact()
elif command==2:
display_contact()
elif command==3:
edit_contact()
elif command==4:
add_session_to_contact()
elif command==5:
save_contacts(filename)
break
| [
"[email protected]"
] | |
8fda8333924bdd0b3d4d4a1fc03469652dc5986d | df823d33423d37251c49b4be12ee022170138071 | /python/mycurses.py | 198e2605f0b0559ffdd2ed3200c896e81e5f5c89 | [] | no_license | von/sandbox | ca2a87870f0f5e3153cb33fd940f1b4cb9da7985 | 5e47e93c32bc85f986f39b1d4df8a384c7ff0019 | refs/heads/main | 2023-04-30T02:14:36.466490 | 2023-04-18T14:11:54 | 2023-04-18T14:11:54 | 331,739 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 722 | py | #!/usr/bin/env python3
import curses
import os
def main(stdscr):
win1 = curses.newwin(3, 30, 2,0)
win1.border()
win2 = curses.newwin(10, 30, 10,0)
win2.border()
stdscr.addstr(0,0, "Testing...")
win1.addstr(0,0, "Foobar")
win2.addstr(0,0, "I win")
stdscr.refresh()
win1.refresh()
win2.refresh()
stdscr.getch()
win2.clear()
win2.addstr(0,0, "2..3..")
win2.refresh()
stdscr.getch()
ls = os.popen("ls")
for i,line in enumerate(ls):
try:
win2.addstr(i, 0, line.encode("utf-8"))
except curses.error:
# Assume we've hit the end of the window
break
win2.refresh()
stdscr.getch()
curses.wrapper(main)
| [
"[email protected]"
] | |
4d97fad9266a037d603b5a43d20dff72f6a5cdfc | ebd5c4632bb5f85c9e3311fd70f6f1bf92fae53f | /PORMain/pirates/shipparts/WheelInteractive.py | ac3fc5be3f7f39bac48476e6aa630f9acf8c2189 | [] | no_license | BrandonAlex/Pirates-Online-Retribution | 7f881a64ec74e595aaf62e78a39375d2d51f4d2e | 980b7448f798e255eecfb6bd2ebb67b299b27dd7 | refs/heads/master | 2020-04-02T14:22:28.626453 | 2018-10-24T15:33:17 | 2018-10-24T15:33:17 | 154,521,816 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 620 | py | from pirates.interact.SimpleInteractive import SimpleInteractive
from pirates.piratesbase import PLocalizer
class WheelInteractive(SimpleInteractive):
def __init__(self, ship):
self.ship = ship
wheel = ship.model.locators.find('**/location_wheel')
if not wheel:
wheel = ship.model.root.attachNewNode('dummyWheel')
SimpleInteractive.__init__(self, wheel, 'wheel-%s' % ship.doId, PLocalizer.InteractWheel)
def interactionAllowed(self, avId):
return self.ship.canTakeWheel(avId)
def requestInteraction(self, avId):
self.ship.requestPilot(avId)
| [
"[email protected]"
] | |
97149269400558d93a4ef6ec0d73377a66d2b056 | 4bd555bc662b8182a2e7644976bfdb00ed5e1ebe | /PythonistaAppTemplate/PythonistaKit.framework/pylib/encodings/base64_codec.py | 731c567665b73aafb55074d0d5ffc115ec9270e3 | [] | no_license | fhelmli/homeNOWG2 | a103df1ef97194dec9501dbda87ec1f7c111fb4a | e794fd87b296544542fd9dc7ac94c981c6312419 | refs/heads/master | 2020-04-04T13:40:20.417769 | 2019-01-30T21:41:04 | 2019-01-30T21:41:04 | 155,970,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,227 | py | #\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
""" Python 'base64_codec' Codec - base64 content transfer encoding
Unlike most of the other codecs which target Unicode, this codec
will return Python string objects for both encode and decode.
Written by Marc-Andre Lemburg ([email protected]).
"""
import codecs, base64
### Codec APIs
def base64_encode(input,errors='strict'):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = base64.encodestring(input)
return (output, len(input))
def base64_decode(input,errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = base64.decodestring(input)
return (output, len(input))
class Codec(codecs.Codec):
def encode(self, input,errors='strict'):
return base64_encode(input,errors)
def decode(self, input,errors='strict'):
return base64_decode(input,errors)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
assert self.errors == 'strict'
return base64.encodestring(input)
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
assert self.errors == 'strict'
return base64.decodestring(input)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='base64',
encode=base64_encode,
decode=base64_decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
| [
"[email protected]"
] | |
184b601a9277e7e6f8aa27a0c38c389b529ad172 | 59b3dce3c770e70b2406cc1dd623a2b1f68b8394 | /python_3/lessons/Timing_Computations/src/ count_thirtyone_days.py | 682d87d1e84181941930cc296f2428ddc1f00032 | [] | no_license | patrickbeeson/python-classes | 04ed7b54fc4e1152a191eeb35d42adc214b08e39 | b5041e71badd1ca2c013828e3b2910fb02e9728f | refs/heads/master | 2020-05-20T07:17:36.693960 | 2015-01-23T14:41:46 | 2015-01-23T14:41:46 | 29,736,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | from datetime import datetime, timedelta
now = datetime.now()
delta = timedelta(31)
delivery = now + delta
print("Today: %s" % now.strftime("%d"))
print("Delivery: %s" % delivery.strftime("%d"))
| [
"[email protected]"
] | |
b9c8c3198ea3b91ca79b7666122aeba124f8d46b | 8be217fe977aa0bcd9e375c75b0fb522f5bf0101 | /mergetwosortedlists21.py | 0065e807191a8350423dd2e81ae12019e30106ab | [] | no_license | blueones/LeetcodePractices | c63a5e773bebea17e988e8bb4962e012d7d402ba | 194375ba0c07e420f420aafec98aede2f9f5d8fa | refs/heads/master | 2021-07-14T14:21:55.389334 | 2021-01-24T22:13:21 | 2021-01-24T22:13:21 | 230,814,709 | 0 | 1 | null | 2020-02-25T02:58:04 | 2019-12-29T23:18:25 | Python | UTF-8 | Python | false | false | 947 | py | # Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:
beforeN = ListNode(0)
dummyN = beforeN
if l1 == None and l2 == None:
return None
while l1 or l2:
#print("here?")
if l1 and l2:
if l1.val >= l2.val:
beforeN.next = l2
l2 = l2.next
else:
beforeN.next = l1
l1 = l1.next
elif l1 == None:
beforeN.next = l2
break
elif l2 == None:
beforeN.next = l1
break
beforeN = beforeN.next
return dummyN.next
sunnyNode = ListNode(1)
sunnyNode2 = None
print(Solution().mergeTwoLists(sunnyNode,sunnyNode2))
| [
"[email protected]"
] | |
d8d742854ec7842465b985ad93830852b7b6d3a1 | 8d14d526969d8e970254f08563ff2c6e6583dd35 | /Python/2019/Hafta20191122/venv/Scripts/easy_install-script.py | 530572ec9997ac842f173dfe02dcf73848586a38 | [] | no_license | osmanraifgunes/MedipolCodes | c29db62896162c4b1a2c8c274877fff63149f826 | 943b014269e9a7b529e74741ce14447dbd7d5df5 | refs/heads/master | 2023-01-09T10:31:02.907945 | 2020-06-09T18:05:04 | 2020-06-09T18:05:04 | 218,612,787 | 6 | 13 | null | 2023-01-07T18:58:55 | 2019-10-30T19:59:16 | Python | UTF-8 | Python | false | false | 453 | py | #!C:\code\MedipolCodes\Python\Hafta20191122\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| [
"[email protected]"
] | |
699c75b97c7a8afdb70e4ce79f91ad7f94158668 | 95a2bb2ef56ca80ad7cb51d67a42242bf18fa337 | /jump/models.py | b17ca1bff68351cf244316d1a03fec9d36836f23 | [] | no_license | zhangxianbo/soms | ac2183c0a285fe56456461101ecc78ca314c3929 | 0ba1802b0e2e9111e0f1855480723be8e2941bcd | refs/heads/master | 2021-04-09T15:46:14.086425 | 2016-07-14T08:15:21 | 2016-07-14T08:15:21 | 62,615,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,671 | py | #coding=utf-8
from django.db import models
from datetime import datetime
# Create your models here.
class Host(models.Model):
hostid = models.AutoField(primary_key=True)
idc = models.CharField('机房',max_length=50)
addr = models.CharField('机架等标识',max_length=50)
sn = models.CharField('序列号',max_length=30,blank=True)
ip = models.GenericIPAddressField('ip地址')
port = models.IntegerField()
online = models.CharField('在线状态',max_length=10)
use = models.CharField('用途',max_length=50,blank=True)
switch = models.CharField('交换机',max_length=50,blank=True)
comment = models.CharField('备注',max_length=100, blank=True, null=True)
def __unicode__(self):
return u' %s' % (self.ip)
class User(models.Model):
userid = models.AutoField(primary_key=True)
username = models.CharField('用户名',max_length=20)
password = models.CharField('密码',max_length=100,blank=True)
#ip = models.ManyToManyField(Host)
name = models.CharField('姓名',max_length=50,blank=True)
email = models.EmailField('邮箱',max_length=50,blank=True)
update_time = models.DateTimeField('更新时间',default=datetime.now)
def __unicode__(self):
return u'%s' % (self.username)
class Meta:
ordering = ['username']
class Userhost(models.Model):
#uid = models.OneToOneField(User)
#hid = models.ManyToManyField(Host)
uid = models.ForeignKey(User)
hid = models.ForeignKey(Host)
permcode = models.CharField('权限位',max_length=10,blank=True)
def __unicode__(self):
return u'%s %s %s' % (self.uid,self.hid,self.permcode)
| [
"[email protected]"
] | |
cffdbd0ab90b9b59ef7a69aff564ea1323fbe6b4 | 3181efe062a6745fc2e5d182260b8e94ce6c5701 | /0MyProject_Quant/海龟反转策略/4_2.方向过滤参数自动选择及策略回测_并行.py | 2c92a341e8f5ec2c95609c7db032528948d0fb42 | [] | no_license | LibreChou/PythonLearning | e240fddc559dc8614d4db95e79d047b18cc1be52 | 562ded21e84b68f43c539c65b91aed3a880162ce | refs/heads/master | 2023-03-12T12:18:33.501881 | 2021-03-04T11:33:42 | 2021-03-04T11:33:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,317 | py | # Author:Zhang Yuan
import warnings
warnings.filterwarnings('ignore')
from MyPackage import *
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import seaborn as sns
import statsmodels.api as sm
from scipy import stats
#------------------------------------------------------------
__mypath__ = MyPath.MyClass_Path("") # 路径类
mylogging = MyDefault.MyClass_Default_Logging(activate=True, filename=__mypath__.get_desktop_path()+"\\方向过滤策略回测.log") # 日志记录类,需要放在上面才行
myfile = MyFile.MyClass_File() # 文件操作类
myword = MyFile.MyClass_Word() # word生成类
myexcel = MyFile.MyClass_Excel() # excel生成类
mytime = MyTime.MyClass_Time() # 时间类
myplt = MyPlot.MyClass_Plot() # 直接绘图类(单个图窗)
mypltpro = MyPlot.MyClass_PlotPro() # Plot高级图系列
myfig = MyPlot.MyClass_Figure(AddFigure=False) # 对象式绘图类(可多个图窗)
myfigpro = MyPlot.MyClass_FigurePro(AddFigure=False) # Figure高级图系列
mynp = MyArray.MyClass_NumPy() # 多维数组类(整合Numpy)
mypd = MyArray.MyClass_Pandas() # 矩阵数组类(整合Pandas)
mypdpro = MyArray.MyClass_PandasPro() # 高级矩阵数组类
myDA = MyDataAnalysis.MyClass_DataAnalysis() # 数据分析类
myDefault = MyDefault.MyClass_Default_Matplotlib() # 画图恢复默认设置类
# myMql = MyMql.MyClass_MqlBackups() # Mql备份类
# myBaidu = MyWebCrawler.MyClass_BaiduPan() # Baidu网盘交互类
# myImage = MyImage.MyClass_ImageProcess() # 图片处理类
myBT = MyBackTest.MyClass_BackTestEvent() # 事件驱动型回测类
myBTV = MyBackTest.MyClass_BackTestVector() # 向量型回测类
myML = MyMachineLearning.MyClass_MachineLearning() # 机器学习综合类
mySQL = MyDataBase.MyClass_MySQL(connect=False) # MySQL类
mySQLAPP = MyDataBase.MyClass_SQL_APPIntegration() # 数据库应用整合
myWebQD = MyWebCrawler.MyClass_QuotesDownload(tushare=False) # 金融行情下载类
myWebR = MyWebCrawler.MyClass_Requests() # Requests爬虫类
myWebS = MyWebCrawler.MyClass_Selenium(openChrome=False) # Selenium模拟浏览器类
myWebAPP = MyWebCrawler.MyClass_Web_APPIntegration() # 爬虫整合应用类
myEmail = MyWebCrawler.MyClass_Email() # 邮箱交互类
myReportA = MyQuant.MyClass_ReportAnalysis() # 研报分析类
myFactorD = MyQuant.MyClass_Factor_Detection() # 因子检测类
myKeras = MyDeepLearning.MyClass_tfKeras() # tfKeras综合类
myTensor = MyDeepLearning.MyClass_TensorFlow() # Tensorflow综合类
myMT5 = MyMql.MyClass_ConnectMT5(connect=False) # Python链接MetaTrader5客户端类
myMT5Pro = MyMql.MyClass_ConnectMT5Pro(connect=False) # Python链接MT5高级类
myMT5Indi = MyMql.MyClass_MT5Indicator() # MT5指标Python版
myDefault.set_backend_default("Pycharm") # Pycharm下需要plt.show()才显示图
#------------------------------------------------------------
'''
# 1.根据前面 信号利润过滤测试 输出的文档,解析文档名称,读取参数,选择极值。
# 2.一个特定的策略参数作为一个目录,存放该下面所有指标的结果。
# 3.不同名称的指标会自动判断极值,且输出图片。最后会输出表格文档,整理这些极值。
# 4.由于不是大型计算,并行是一次性所有并行。
# 5.并行运算注意内存释放,并且不要一次性都算完,这样容易爆内存。分组进行并行。
'''
'''
# 说明
# 这里的策略回测是建立在前面已经对指标的范围过滤做了参数选择。
# 前面对每个具体策略都通过指标过滤方式,算出了各个指标过滤效果的极值。我们根据极值对应的指标值做回测。
# 画的图中,分别展示 过滤前训练集价格和指标、过滤前训练集策略、过滤后全集价格和指标、过滤后全集策略以及训练集策略。
# 方向过滤作用到整个样本。
# 并行以品种来并行,以时间框来分组。
# 由于指标较多,并行运算时间长,防止出错输出日志。
'''
#%%
from MyPackage.MyProjects.向量化策略测试.Direct_Filter import Auto_Choose_DFilter_Param
choo_para = Auto_Choose_DFilter_Param()
myDefault.set_backend_default("agg")
#%% ******需要修改******
choo_para.symbol_list = myMT5Pro.get_main_symbol_name_list()
choo_para.total_folder = "F:\\工作---策略研究\\公开的海龟策略\\_海龟反转研究"
choo_para.core_num = -1
#%%
from MyPackage.MyProjects.向量化策略测试.Direct_Filter import Direct_Filter_BackTest
rf_bt = Direct_Filter_BackTest()
myplt.set_backend("agg") # agg 后台输出图片,不占pycharm内存
#%%
rf_bt.symbol_list = choo_para.symbol_list
rf_bt.total_folder = choo_para.total_folder
rf_bt.core_num = -1
#%% ******修改函数******
# 策略的当期信号(不用平移):para_list策略参数,默认-1为lag_trade,-2为holding。
def stratgy_signal(dataframe, para_list=list or tuple):
return myBTV.stra.turtle_reverse(dataframe, para_list[0], price_arug= ["High", "Low", "Close"])
rf_bt.stratgy_signal = stratgy_signal
#%%
# ---多进程必须要在这里执行
if __name__ == '__main__':
# ---
print("开始方向过滤参数自动选择:")
choo_para.main_func()
print("开始方向过滤策略回测:")
rf_bt.main_func()
| [
"[email protected]"
] | |
7c5b13fc736557163c95d289141ff4870117e2e0 | b5a9469cb779031936bb613719397d7b3c279626 | /backend/apps/privacy/sitemaps.py | 2bc9aa6eae410f25322fcf965d670fd616158b73 | [] | no_license | arsavit/Cidsfg_copy | a34858d63749db0e821cb2f26b1eb31c4565c0f9 | 0145e9f1a397899b03a8d767fb96f1d238ec21f9 | refs/heads/main | 2023-07-08T11:18:10.042595 | 2021-08-11T08:09:27 | 2021-08-11T08:09:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | from django.contrib.sitemaps import Sitemap
from .models import Privacy
class PrivacySitemap(Sitemap):
""" Получение страницы ПОЛИТИКА КОНФИДЕНЦИАЛЬНОСТИ """
changefreq = 'weekly'
priority = 0.9
location = '/privacy/'
def items(self):
return Privacy.objects.all().order_by('-id')[:1]
def lastmod(self, obj):
return obj.updated
| [
"[email protected]"
] | |
8da58df298c3f417894362409649e16ba045b26b | 11806ceb316950e41725f8acb2d7e5ecea6036a1 | /biomass/core.py | fb61327fbf5acde1d04b4780683f21c215b7631e | [
"Apache-2.0"
] | permissive | TrendingTechnology/biomass | 82cb65892c467cc236ce212caa9ff21cc9812e22 | 2e2b262f6d99834d2d1b44a1304fcf5395b566ef | refs/heads/master | 2023-05-28T03:50:38.016708 | 2021-06-01T04:40:23 | 2021-06-01T04:40:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,116 | py | """BioMASS core functions"""
import multiprocessing
import os
from dataclasses import dataclass
from importlib import import_module
from pathlib import Path
from typing import Any, Optional
from .analysis import InitialConditionSensitivity, ParameterSensitivity, ReactionSensitivity
from .dynamics import SignalingSystems
from .estimation import GeneticAlgorithmContinue, GeneticAlgorithmInit
from .exec_model import ModelObject
__all__ = ["Model", "optimize", "optimize_continue", "run_simulation", "run_analysis"]
@dataclass
class Model(object):
"""
The BioMASS model object.
Attributes
----------
pkg_name: str
Path (dot-sepalated) to a biomass model directory.
Use '__package__'.
"""
pkg_name: str
def _load_model(self) -> Any:
try:
biomass_model = import_module(self.pkg_name)
return biomass_model
except ImportError:
p = Path(self.pkg_name.replace(".", os.sep))
print(f"cannot import '{p.name}' from '{p.parent}'.")
def create(self, show_info: bool = False) -> ModelObject:
"""
Build a biomass model.
Parameters
----------
show_info : bool (default: False)
Set to 'True' to print the information related to model size.
Examples
--------
>>> from biomass import Model
>>> import your_model
>>> model = Model(your_model.__package__).create()
"""
model = ModelObject(self.pkg_name.replace(".", os.sep), self._load_model())
if model.sim.normalization:
for obs_name in model.obs:
if (
isinstance(model.sim.normalization[obs_name]["timepoint"], int)
and not model.sim.t[0]
<= model.sim.normalization[obs_name]["timepoint"]
<= model.sim.t[-1]
):
raise ValueError("Normalization timepoint must lie within sim.t.")
if not model.sim.normalization[obs_name]["condition"]:
model.sim.normalization[obs_name]["condition"] = model.sim.conditions
else:
for c in model.sim.normalization[obs_name]["condition"]:
if c not in model.sim.conditions:
raise ValueError(
f"Normalization condition '{c}' is not defined in sim.conditions."
)
if show_info:
model_name = Path(model.path).name
print(
f"{model_name} information\n" + ("-" * len(model_name)) + "------------\n"
f"{len(model.species):d} species\n"
f"{len(model.parameters):d} parameters, "
f"of which {len(model.sp.idx_params):d} to be estimated"
)
return model
def _check_optional_arguments(
end: Optional[int],
options: Optional[dict],
) -> None:
if options is None:
pass
elif isinstance(options, dict):
if options["local_search_method"].lower() not in ["mutation", "powell", "de"]:
raise ValueError(
f"'{options['local_search_method']}': "
"Invalid local_search_method. Should be one of ['mutation', 'Powell', 'DE']"
)
elif (
isinstance(end, int)
and options["local_search_method"].lower() == "de"
and options["workers"] != 1
):
raise AssertionError(
"daemonic processes are not allowed to have children. Set options['workers'] to 1."
)
else:
raise TypeError("options must be dict or None.")
def optimize(
model: ModelObject,
start: int,
end: Optional[int] = None,
options: Optional[dict] = None,
) -> None:
"""
Estimate model parameters from experimental data.
Parameters
----------
model : ModelObject
Model for parameter estimation.
start : int
Index of parameter set to estimate.
end : int, optional
When `end` is specified, parameter sets from `start` to `end` will be estimated.
options : dict, optional
* popsize : int (default: 5)
A multiplier for setting the total population size.
The population has popsize * len(search_param) individuals.
* max_generation : int (default: 10000)
Stop optimization if Generation > max_generation.
* initial_threshold : float (default: 1e12)
Threshold on objective function value used to generate initial population.
Default value is 1e12 (numerically solvable).
* allowable_error : float (default: 0.0)
Stop optimization if Best Fitness <= allowable_error.
* local_search_method : str (default: 'mutation')
Method used in local search. Should be one of
* 'mutation' : NDM/MGG
* 'Powell' : Modified Powell method
* 'DE' : Differential Evolution (strategy: best2bin)
* n_children : int (default: 200)
(method='mutation') The number of children generated in NDM/MGG.
* maxiter : int (default: 10)
(method='Powell' or 'DE') The maximum number of iterations
over which the entire population is evolved.
* workers : int (default: -1 if `end` is None else 1)
(method='DE') The population is subdivided into workers sections and
evaluated in parallel (uses multiprocessing.Pool). Supply -1 to use
all available CPU cores. Set workers to 1 when searching multiple
parameter sets simultaneously.
* overwrite : bool (default: False)
If True, the out/n folder will be overwritten.
Examples
--------
>>> from biomass.models import Nakakuki_Cell_2010
>>> from biomass import Model, optimize
>>> model = Model(Nakakuki_Cell_2010.__package__).create()
>>> optimize(
... model=model, start=1, end=10,
... options={
... 'max_generation': 10000,
... 'allowable_error': 0.5
... }
... )
"""
if options is None:
options = {}
options.setdefault("popsize", 5)
options.setdefault("max_generation", 10000)
options.setdefault("initial_threshold", 1e12)
options.setdefault("allowable_error", 0.0)
options.setdefault("local_search_method", "mutation")
options.setdefault("n_children", 200)
options.setdefault("maxiter", 10)
options.setdefault("workers", -1 if end is None else 1)
options.setdefault("overwrite", False)
_check_optional_arguments(end, options)
ga_init = GeneticAlgorithmInit(model, **options)
if end is None:
ga_init.run(int(start))
else:
n_proc = max(1, multiprocessing.cpu_count() - 1)
with multiprocessing.Pool(processes=n_proc) as p:
for _ in p.imap_unordered(
ga_init.run,
range(int(start), int(end) + 1),
):
pass
def optimize_continue(
model: ModelObject,
start: int,
end: Optional[int] = None,
options: Optional[dict] = None,
) -> None:
"""
Continue running optimization from where you stopped in the last parameter search.
Parameters
----------
model : ModelObject
Model for parameter estimation.
start : int
Index of parameter set to estimate.
end : int, optional
When `end` is specified, parameter sets from `start` to `end` will be estimated.
options : dict, optional
* popsize : int (default: 5)
A multiplier for setting the total population size.
The population has popsize * len(search_param) individuals.
* max_generation : int (default: 15000)
Stop optimization if Generation > max_generation.
* initial_threshold : float (default: 1e12)
Threshold on objective function value used to generate initial population.
Default value is 1e12 (numerically solvable).
* allowable_error : float (default: 0.0)
Stop optimization if Best Fitness <= allowable_error.
* local_search_method : str (default: 'mutation')
Method used in local search. Should be one of
* 'mutation' : NDM/MGG
* 'Powell' : Modified Powell method
* 'DE' : Differential Evolution (strategy: best2bin)
* n_children : int (default: 200)
(method='mutation') The number of children generated in NDM/MGG.
* maxiter : int (default: 10)
(method='Powell' or 'DE') The maximum number of iterations
over which the entire population is evolved.
* workers : int (default: -1 if `end` is None else 1)
(method='DE') The population is subdivided into workers sections and
evaluated in parallel (uses multiprocessing.Pool). Supply -1 to use
all available CPU cores. Set workers to 1 when searching multiple
parameter sets simultaneously.
* p0_bounds : list of floats (default: [0.1, 10.0])
Generate initial population using best parameter values in the last
parameter search.
- lower_bound = po_bounds[0] * best_parameter_value
- upper_bound = p0_bounds[1] * best_parameter_value
Examples
--------
>>> from biomass.models import Nakakuki_Cell_2010
>>> from biomass import Model, optimize_continue
>>> model = Model(Nakakuki_Cell_2010.__package__).create()
>>> optimize_continue(
... model=model, start=1, end=10,
... options={
... 'max_generation': 20000,
... 'allowable_error': 0.5
... }
... )
"""
if options is None:
options = {}
options.setdefault("popsize", 5)
options.setdefault("max_generation", 15000)
options.setdefault("initial_threshold", 1e12)
options.setdefault("allowable_error", 0.0)
options.setdefault("local_search_method", "mutation")
options.setdefault("n_children", 200)
options.setdefault("maxiter", 10)
options.setdefault("workers", -1 if end is None else 1)
options.setdefault("p0_bounds", [0.1, 10.0])
_check_optional_arguments(end, options)
ga_continue = GeneticAlgorithmContinue(model, **options)
if end is None:
ga_continue.run(int(start))
else:
n_proc = max(1, multiprocessing.cpu_count() - 1)
with multiprocessing.Pool(processes=n_proc) as p:
for _ in p.imap_unordered(
ga_continue.run,
range(int(start), int(end) + 1),
):
pass
def run_simulation(
model: ModelObject,
*,
viz_type: str = "original",
show_all: bool = False,
stdev: bool = False,
save_format: str = "pdf",
param_range: Optional[dict] = None,
) -> None:
"""
Simulate ODE model with estimated parameter values.
Parameters
----------
model : ModelObject
Model for simulation.
viz_type : str
* 'average':
The average of simulation results with parameter sets in "out/".
* 'best':
The best simulation result in "out/", simulation with
"best_fit_param".
* 'original':
Simulation with the default parameters and initial values
defined in "set_model.py".
* 'n(=1,2,...)':
Use the parameter set in "out/n/".
* 'experiment'
Draw the experimental data written in observable.py without
simulation results.
show_all : bool
Whether to show all simulation results.
stdev : bool
If True, the standard deviation of simulated values will be shown
(only available for 'average' visualization type).
save_format : str (default: "pdf")
Either "png" or "pdf", indicating whether to save figures
as png or pdf format.
param_range : dict, optional
* orientation : str (default: 'portrait')
Either 'portrait' or 'landscape'.
* distribution : str (default: 'boxenplot')
Either 'boxplot' or 'boxenplot'.
* scatter : bool (default: False)
If True, draw a stripplot.
Examples
--------
>>> from biomass.models import Nakakuki_Cell_2010
>>> from biomass import Model, run_simulation
>>> model = Model(Nakakuki_Cell_2010.__package__).create()
>>> run_simulation(
... model,
... viz_type='average',
... show_all=False,
... stdev=True,
... save_format="png",
... )
"""
if viz_type not in ["best", "average", "original", "experiment"] and not viz_type.isdecimal():
raise ValueError(
"Available viz_type are: 'best','average','original','experiment','n(=1, 2, ...)'"
)
if save_format not in ["pdf", "png"]:
raise ValueError("save_format must be either 'pdf' or 'png'.")
if param_range is None:
param_range = {}
param_range.setdefault("orientation", "portrait")
param_range.setdefault("distribution", "boxenplot")
param_range.setdefault("scatter", False)
if param_range["orientation"] not in ["portrait", "landscape"]:
raise ValueError("Available param_range['orientation'] are: 'portrait' or 'landscape'.")
if param_range["distribution"] not in ["boxplot", "boxenplot"]:
raise ValueError("Available param_range['distribution'] are: 'boxplot' or 'boxenplot'.")
if not isinstance(param_range["scatter"], bool):
raise TypeError("param_range['scatter'] must be a boolean.")
SignalingSystems(model).simulate_all(
viz_type=viz_type,
show_all=show_all,
stdev=stdev,
save_format=save_format,
param_range=param_range,
)
def run_analysis(
model: ModelObject,
*,
target: str,
metric: str = "integral",
style: str = "barplot",
save_format: str = "pdf",
options: Optional[dict] = None,
) -> None:
"""
Employ sensitivity analysis to identify critical parameters, species or
reactions in the complex biological network.
The sensitivity S(y,x) was calculated according to the following equation:
S(y,x) = d ln(yi) / d ln (xj), where yi is the signaling metric and xj is
each nonzero species, parameter value or reaction rate.
Parameters
---------
model : ModelObject
Model for sensitivity analysis.
target : str
* 'reaction'
* 'initial_condition'
* 'parameter'
metric : str (default: 'integral')
* 'maximum' : The maximum value.
* 'minimum' : The minimum value.
* 'argmax' : The time to reach the maximum value.
* 'argmin' : The time to reach the minimum value.
* 'timepoint' : The simulated value at the time point set via options['timepoint'].
* 'duration' : The time it takes to decline below the threshold set via options['duration'].
* 'integral' : The integral of concentration over the observation time.
style : str (default: 'barplot')
* 'barplot'
* 'heatmap'
save_format : str (default: "pdf")
Either "png" or "pdf", indicating whether to save figures
as png or pdf format.
options : dict, optional
* show_indices : bool (default: True)
(target == 'reaction') Set to True to put reaction index on each bar.
* excluded_params : list of strings
(target == 'parameter') List of parameters which are not used for analysis.
* excluded_initials : list of strings
(target == 'initial_condition') List of species which are not used for analysis.
* timepoint : int (default: model.sim.t[-1])
(metric=='timepoint') Which timepoint to use.
* duration : float (default: 0.5)
(metric=='duration') 0.1 for 10% of its maximum.
Examples
--------
>>> from biomass.models import Nakakuki_Cell_2010
>>> from biomass import Model, run_analysis
>>> model = Model(Nakakuki_Cell_2010.__package__).create()
>>> # Parameters
>>> run_analysis(
... model,
... target='parameter',
... options = {
... 'excluded_params': [
... 'a', 'Vn', 'Vc', 'Ligand', 'EGF', 'HRG', 'no_ligand'
... ]
... }
... )
>>> # Initial condition
>>> run_analysis(model, target='initial_condition')
>>> # Reaction
>>> run_analysis(model, target='reaction')
"""
if save_format not in ["pdf", "png"]:
raise ValueError("save_format must be either 'pdf' or 'png'.")
if options is None:
options = {}
options.setdefault("show_indices", True)
options.setdefault("excluded_params", [])
options.setdefault("excluded_initials", [])
options.setdefault("timepoint", model.sim.t[-1])
options.setdefault("duration", 0.5)
if not model.sim.t[0] <= options["timepoint"] <= model.sim.t[-1]:
raise ValueError("options['timepooint'] must lie within sim.t.")
if not 0.0 < options["duration"] < 1.0:
raise ValueError("options['duration'] must lie within (0, 1).")
if target == "reaction":
ReactionSensitivity(model).analyze(
metric=metric,
style=style,
save_format=save_format,
options=options,
)
elif target == "parameter":
ParameterSensitivity(model).analyze(
metric=metric,
style=style,
save_format=save_format,
options=options,
)
elif target == "initial_condition":
InitialConditionSensitivity(model).analyze(
metric=metric,
style=style,
save_format=save_format,
options=options,
)
else:
raise ValueError(
"Available targets are: '{}".format(
"', '".join(["reaction", "parameter", "initial_condition"]) + "'."
)
)
| [
"[email protected]"
] | |
9d9a806f2ec508f3d202103ff17d592e98259b7b | 26f23588e80acc2b28d4cc70a8fbcf78c5b33a20 | /PythonSkills/decorator/basic02.py | ac7856a1b7b8a973b0e4280108fd34948670b37e | [] | no_license | Timehsw/PythonCouldbeEverything | aa31b3e32bf68b49fe8e96b971637353a8ef644f | 85d4f1a2c93c7b1edc34ceb9e8bb3c8d7beb30e9 | refs/heads/master | 2021-01-01T15:38:25.253094 | 2018-01-22T06:49:05 | 2018-01-22T06:49:05 | 97,661,530 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 698 | py | # -*- coding: utf-8 -*-
'''
Created by hushiwei on 2018/1/7.
学习装饰器
闭包
函数里面可以定义函数
函数可以被传递赋值
函数可以被返回
那么装饰器就是,在函数之前额外做些事情
'''
'''
装饰器
有参函数
'''
def a_new_decorator(a_func):
def wrapTheFunction(*args,**kwargs):
print "I am doing some boring work before execution a_func()"
a_func(*args,**kwargs)
print "I am doing some boring work after execution a_func()"
return wrapTheFunction
@a_new_decorator
def a_function_requiring_decoration(name="hushiwei"):
print "I am %s"%name
a_function_requiring_decoration("Mike")
| [
"[email protected]"
] | |
627bcc579421c9e68946a4001c3726b2fc02b966 | e7b665624c1134f7a6b3ab7c043cfa5ec83227bb | /CycleGAN/__init__.py | c73deef2fd0626ca2afa3252d2b902d7958b1f51 | [] | no_license | zhijie-ai/GAN | 46f896909d1f5caedb7725cf44d328e24f4ad699 | 5e64b416209058721c582c3b71a1e9ca25cf169d | refs/heads/master | 2022-10-26T10:28:08.279901 | 2019-08-26T14:09:15 | 2019-08-26T14:09:15 | 204,423,289 | 1 | 3 | null | 2022-10-07T00:52:36 | 2019-08-26T07:45:08 | Python | UTF-8 | Python | false | false | 622 | py | #----------------------------------------------
# -*- encoding=utf-8 -*- #
# __author__:'xiaojie' #
# CreateTime: #
# 2019/7/5 22:13 #
# #
# 天下风云出我辈, #
# 一入江湖岁月催。 #
# 皇图霸业谈笑中, #
# 不胜人生一场醉。 #
#----------------------------------------------
# CycleGAN的分别用keras和tf的2种实现方式 | [
"[email protected]"
] | |
dcb95199ae8b2d00c2e425403a3da419cc0d1c69 | c8a41e7b2caa015903dc5aff2d8e34a5cbd34b8d | /python/itertools/compress-the-string.py | 0208eec6cbcbd5eed48a9fa26c1a73150292dc0a | [] | no_license | mauricioabreu/hacker-rank | bad197fec3582979df148a8212d330097191c2b7 | 3d2aaae53f438e4ef8d9382cc0c22003248c6787 | refs/heads/master | 2021-01-10T07:25:23.869714 | 2018-06-16T23:17:51 | 2018-06-16T23:17:51 | 46,177,986 | 6 | 1 | null | 2016-08-27T16:18:36 | 2015-11-14T14:38:13 | Python | UTF-8 | Python | false | false | 212 | py | from itertools import groupby
chars = raw_input().strip()
groups = []
for key, value in groupby(chars):
groups.append((len(list(value)), int(key)))
print ' '.join(['(%s, %s)' % (k, v) for k, v in groups])
| [
"[email protected]"
] | |
0db8f21a975d0dc646839485352d0b6cf4df4064 | 4d66852253aaff5ee93ab73f41a531dd4a0b615d | /baseline_midsf.py | e37424d670ee6649c54e0d648617910e620d2ad2 | [] | no_license | zsc19/CaBERT-SLU | ed030ed01809f0ca6f93505d483a9d6750a48442 | ee1a46cf7c69ab5662a47ce9e65735cf877b1ea9 | refs/heads/main | 2023-08-10T19:30:21.793823 | 2021-09-17T14:18:25 | 2021-09-17T14:18:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,880 | py | """For model training and inference
Data input should be a single sentence.
"""
import random
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.optim import Adam, RMSprop
from transformers import BertTokenizer, BertModel, BertConfig
from keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split
import pickle
import copy
import numpy as np
import collections
from tqdm import tqdm
from collections import Counter, defaultdict
from model import MULTI
from all_data_slot import get_dataloader
from config import opt
from utils import *
def train(**kwargs):
# attributes
for k, v in kwargs.items():
setattr(opt, k, v)
np.random.seed(0)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
torch.backends.cudnn.enabled = False
print('Dataset to use: ', opt.train_path)
print('Dictionary to use: ', opt.dic_path_with_tokens)
print('Data Type: ', opt.datatype)
print('Use pretrained weights: ', opt.retrain)
# dataset
with open(opt.dic_path_with_tokens, 'rb') as f:
dic = pickle.load(f)
with open(opt.slot_path, 'rb') as f:
slot_dic = pickle.load(f)
with open(opt.train_path, 'rb') as f:
train_data = pickle.load(f)
if opt.datatype == "mixatis" or opt.datatype == "mixsnips":
# ATIS Dataset
X_train, y_train, _ = zip(*train_data)
X_test, y_test, _ = zip(*test_data)
elif opt.datatype == "semantic":
# Semantic parsing Dataset
X, y = zip(*train_data)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
elif opt.datatype == "e2e" or opt.datatype == "sgd":
# Microsoft Dialogue Dataset / SGD Dataset
all_data = []
dialogue_id = {}
dialogue_counter = 0
counter = 0
for data in train_data:
for instance in data:
all_data.append(instance)
dialogue_id[counter] = dialogue_counter
counter += 1
dialogue_counter += 1
indices = np.random.permutation(len(all_data))
train = np.array(all_data)[indices[:int(len(all_data)*0.7)]]#[:10000]
test = np.array(all_data)[indices[int(len(all_data)*0.7):]]#[:100]
train_loader = get_dataloader(train, len(dic), len(slot_dic), opt)
val_loader = get_dataloader(test, len(dic), len(slot_dic), opt)
# model
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = MULTI(opt, len(dic), len(slot_dic))
if opt.model_path:
model.load_state_dict(torch.load(opt.model_path))
print("Pretrained model has been loaded.\n")
else:
print("Train from scratch...")
model = model.to(device)
# optimizer, criterion
# param_optimizer = list(model.named_parameters())
# no_decay = ['bias', 'gamma', 'beta']
# optimizer_grouped_parameters = [
# {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
# 'weight_decay_rate': 0.01},
# {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
# 'weight_decay_rate': 0.0}
# ]
# optimizer = BertAdam(optimizer_grouped_parameters,lr=opt.learning_rate_bert, warmup=.1)
optimizer = Adam(model.parameters(), weight_decay=0.01, lr=opt.learning_rate_classifier)
if opt.data_mode == 'single':
criterion = nn.CrossEntropyLoss().to(device)
else:
criterion = nn.BCEWithLogitsLoss(reduction='sum').to(device)
criterion2 = nn.CrossEntropyLoss(reduction='sum').to(device)
best_loss = 100
best_accuracy = 0
best_f1 = 0
# Start training
for epoch in range(opt.epochs):
print("====== epoch %d / %d: ======"% (epoch+1, opt.epochs))
# Training Phase
total_train_loss = 0
total_P = 0
total_R = 0
total_F1 = 0
total_acc = 0
model.train()
ccounter = 0
for (captions_t, masks, labels, slot_labels) in tqdm(train_loader):
captions_t = captions_t.to(device)
masks = masks.to(device)
labels = labels.to(device)
slot_labels = slot_labels.to(device)
slot_labels = slot_labels.reshape(-1)
optimizer.zero_grad()
encoder_logits, decoder_logits, slot_logits = model(captions_t)
train_loss = criterion(encoder_logits, labels)
decoder_logits = decoder_logits.view(-1, len(dic))
slabels = labels.unsqueeze(1)
slabels = slabels.repeat(1, opt.maxlen, 1)
slabels = slabels.view(-1, len(dic))
train_loss += criterion(decoder_logits, slabels)
train_loss += criterion2(slot_logits, slot_labels)
train_loss.backward()
optimizer.step()
total_train_loss += train_loss
P, R, F1, acc = f1_score_intents(encoder_logits, labels)
total_P += P
total_R += R
total_F1 += F1
total_acc += acc
ccounter += 1
print('Average train loss: {:.4f} '.format(total_train_loss / train_loader.dataset.num_data))
precision = total_P / ccounter
recall = total_R / ccounter
f1 = total_F1 / ccounter
print(f'P = {precision:.4f}, R = {recall:.4f}, F1 = {f1:.4f}')
print('Accuracy: ', total_acc/train_loader.dataset.num_data)
# Validation Phase
total_val_loss = 0
total_P = 0
total_R = 0
total_F1 = 0
total_acc = 0
model.eval()
ccounter = 0
stats = defaultdict(Counter)
for (captions_t, masks, labels, slot_labels) in val_loader:
captions_t = captions_t.to(device)
masks = masks.to(device)
labels = labels.to(device)
slot_labels = slot_labels.to(device)
slot_labels = slot_labels.reshape(-1)
with torch.no_grad():
encoder_logits, decoder_logits, slot_logits = model(captions_t)
val_loss = criterion(encoder_logits, labels)
decoder_logits = decoder_logits.view(-1, len(dic))
slabels = labels.unsqueeze(1)
slabels = slabels.repeat(1, opt.maxlen, 1)
slabels = slabels.view(-1, len(dic))
val_loss += criterion(decoder_logits, slabels)
total_val_loss += val_loss
P, R, F1, acc = f1_score_intents(encoder_logits, labels)
total_P += P
total_R += R
total_F1 += F1
total_acc += acc
ccounter += 1
_, index = torch.topk(slot_logits, k=1, dim=-1)
evaluate_iob(index, slot_labels, slot_dic, stats)
print('========= Validation =========')
print('Average val loss: {:.4f} '.format(total_val_loss / val_loader.dataset.num_data))
precision = total_P / ccounter
recall = total_R / ccounter
f1 = total_F1 / ccounter
print(f'P = {precision:.4f}, R = {recall:.4f}, F1 = {f1:.4f}')
print('Accuracy: ', total_acc/val_loader.dataset.num_data)
val_acc = total_acc/val_loader.dataset.num_data
# print slot stats
p_slot, r_slot, f1_slot = prf(stats['total'])
print('========= Slot =========')
print(f'Slot Score: P = {p_slot:.4f}, R = {r_slot:.4f}, F1 = {f1_slot:.4f}')
# for label in stats:
# if label != 'total':
# p, r, f1 = prf(stats[label])
# print(f'{label:4s}: P = {p:.4f}, R = {r:.4f}, F1 = {f1:.4f}')
if f1 > best_f1:
print('saving with loss of {}'.format(total_val_loss),
'improved over previous {}'.format(best_loss))
best_loss = total_val_loss
best_accuracy = val_acc
best_f1 = f1
best_stats = copy.deepcopy(stats)
torch.save(model.state_dict(), 'checkpoints/best_{}_{}_baseline.pth'.format(opt.datatype, opt.data_mode))
print()
print('Best total val loss: {:.4f}'.format(total_val_loss))
print('Best Test Accuracy: {:.4f}'.format(best_accuracy))
print('Best F1 Score: {:.4f}'.format(best_f1))
p_slot, r_slot, f1_slot = prf(best_stats['total'])
print('Final evaluation on slot filling of the validation set:')
print(f'Overall: P = {p_slot:.4f}, R = {r_slot:.4f}, F1 = {f1_slot:.4f}')
#####################################################################
def test(**kwargs):
# attributes
for k, v in kwargs.items():
setattr(opt, k, v)
np.random.seed(0)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
torch.backends.cudnn.enabled = False
print('Dataset to use: ', opt.train_path)
print('Dictionary to use: ', opt.dic_path)
# dataset
with open(opt.dic_path, 'rb') as f:
dic = pickle.load(f)
reverse_dic = {v: k for k,v in dic.items()}
with open(opt.slot_path, 'rb') as f:
slot_dic = pickle.load(f)
with open(opt.train_path, 'rb') as f:
train_data = pickle.load(f)
if opt.test_path:
with open(opt.test_path, 'rb') as f:
test_data = pickle.load(f)
if opt.datatype == "atis":
# ATIS Dataset
X_train, y_train, _ = zip(*train_data)
X_test, y_test, _ = zip(*test_data)
elif opt.datatype == "semantic":
# Semantic parsing Dataset
X, y = zip(*train_data)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
elif opt.datatype == "e2e" or opt.datatype == "sgd":
# Microsoft Dialogue Dataset / SGD Dataset
all_data = []
dialogue_id = {}
dialogue_counter = 0
counter = 0
for data in train_data:
for instance in data:
all_data.append(instance)
dialogue_id[counter] = dialogue_counter
counter += 1
dialogue_counter += 1
indices = np.random.permutation(len(all_data))
X_train = np.array(all_data)[indices[:int(len(all_data)*0.7)]]#[:10000]
X_test = np.array(all_data)[indices[int(len(all_data)*0.7):]]#[:100]
X_train, mask_train = load_data(X_train)
X_test, mask_test = load_data(X_test)
# model
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = MULTI(opt, len(dic), len(slot_dic))
if opt.model_path:
model.load_state_dict(torch.load(opt.model_path))
print("Pretrained model has been loaded.\n")
model = model.to(device)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
# Store embeddings
if opt.test_mode == "embedding":
train_loader = get_dataloader(X_train, y_train, mask_train, opt)
results = collections.defaultdict(list)
model.eval()
for i, (captions_t, labels, masks) in enumerate(train_loader):
captions_t = captions_t.to(device)
labels = labels.to(device)
masks = masks.to(device)
with torch.no_grad():
hidden_states, pooled_output, outputs = model(captions_t, masks)
print("Saving Data: %d" % i)
for ii in range(len(labels)):
key = labels[ii].data.cpu().item()
embedding = pooled_output[ii].data.cpu().numpy().reshape(-1)
word_embeddings = hidden_states[-1][ii].data.cpu().numpy()
tokens = tokenizer.convert_ids_to_tokens(captions_t[ii].data.cpu().numpy())
tokens = [token for token in tokens if token != "[CLS]" and token != "[SEP]" and token != "[PAD]"]
original_sentence = " ".join(tokens)
results[key].append((original_sentence, embedding, word_embeddings))
torch.save(results, embedding_path)
# Run test classification
elif opt.test_mode == "data":
# Single instance
# index = np.random.randint(0, len(X_test), 1)[0]
# input_ids = X_test[index]
# attention_masks = mask_test[index]
# print(" ".join(tokenizer.convert_ids_to_tokens(input_ids)))
# captions_t = torch.LongTensor(input_ids).unsqueeze(0).to(device)
# mask = torch.LongTensor(attention_masks).unsqueeze(0).to(device)
# with torch.no_grad():
# pooled_output, outputs = model(captions_t, mask)
# print("Predicted label: ", reverse_dic[torch.max(outputs, 1)[1].item()])
# print("Real label: ", reverse_dic[y_test[index]])
# Validation Phase
test_loader = get_dataloader(X_test, y_test, mask_test, len(dic), opt)
error_ids = []
pred_labels = []
real_labels = []
test_corrects = 0
totals = 0
model.eval()
for i, (captions_t, labels, masks) in enumerate(test_loader):
print('predict batches: ', i)
captions_t = captions_t.to(device)
labels = labels.to(device)
masks = masks.to(device)
with torch.no_grad():
_, pooled_output, outputs = model(captions_t, masks)
co, to = calc_score(outputs, labels)
test_corrects += co
totals += to
if opt.data_mode == 'single':
idx = torch.max(outputs, 1)[1] != labels
wrong_ids = [tokenizer.convert_ids_to_tokens(caption, skip_special_tokens=True) for caption in captions_t[idx]]
error_ids += wrong_ids
pred_labels += [reverse_dic[label.item()] for label in torch.max(outputs, 1)[1][idx]]
real_labels += [reverse_dic[label.item()] for label in labels[idx]]
else:
for i, logits in enumerate(outputs):
log = torch.sigmoid(logits)
correct = (labels[i][torch.where(log>0.5)[0]]).sum()
total = len(torch.where(labels[i]==1)[0])
if correct != total:
wrong_caption = tokenizer.convert_ids_to_tokens(captions_t[i], skip_special_tokens=True)
error_ids.append(wrong_caption)
pred_ls = [reverse_dic[p] for p in torch.where(log>0.5)[0].detach().cpu().numpy()]
real_ls = [reverse_dic[i] for i, r in enumerate(labels[i].detach().cpu().numpy()) if r == 1]
pred_labels.append(pred_ls)
real_labels.append(real_ls)
with open('error_analysis/{}_{}.txt'.format(opt.datatype, opt.data_mode), 'w') as f:
f.write('----------- Wrong Examples ------------\n')
for i, (caption, pred, real) in enumerate(zip(error_ids, pred_labels, real_labels)):
f.write(str(i)+'\n')
f.write(' '.join(caption)+'\n')
f.write('Predicted label: {}\n'.format(pred))
f.write('Real label: {}\n'.format(real))
f.write('------\n')
test_acc = test_corrects.double() / test_loader.dataset.num_data if opt.data_mode == 'single' else test_corrects.double() / totals
print('Test accuracy: {:.4f}'.format(test_acc))
# User defined
elif opt.test_mode == "user":
while True:
print("Please input the sentence: ")
text = input()
print("\n======== Predicted Results ========")
print(text)
text = "[CLS] " + text + " [SEP]"
tokenized_text = tokenizer.tokenize(text)
tokenized_ids = np.array(tokenizer.convert_tokens_to_ids(tokenized_text))[np.newaxis,:]
input_ids = pad_sequences(tokenized_ids, maxlen=opt.maxlen, dtype="long", truncating="post", padding="post").squeeze(0)
attention_masks = [float(i>0) for i in input_ids]
captions_t = torch.LongTensor(input_ids).unsqueeze(0).to(device)
mask = torch.LongTensor(attention_masks).unsqueeze(0).to(device)
with torch.no_grad():
pooled_output, outputs = model(captions_t, mask)
print("Predicted label: ", reverse_dic[torch.max(outputs, 1)[1].item()])
print("=================================")
if __name__ == '__main__':
import fire
fire.Fire()
| [
"[email protected]"
] | |
ec3c3e9a1609b3241c9287dcf01219c6d607eeb7 | d12b53101c289a1d752862e20ffc079e3ab4e057 | /2.0/overturn.py | a15642ab7a89395e9a8230990f94277a71dc0b9f | [] | no_license | UCAS007/adavanced-aritificial-intelligence | 13708985b65fe0d27ed1fe93e05eb54ddef9949d | d88fcc8f5a59f290a866a04db6bcbe133bdc3ba3 | refs/heads/master | 2021-01-10T15:21:07.819354 | 2016-05-03T14:03:00 | 2016-05-03T14:03:00 | 45,598,387 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,038 | py | """
Fmax=0.995512 Perceptron
"""
import pickle
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import SGDClassifier,Perceptron
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn import metrics
import jieba
import mydataset
import os
import csv,codecs
import preprocessing as pp
import time
def train():
trainFileName='train.pkl'
testFileName='test.pkl'
pipelineFileName='pipeline.pkl'
if(os.path.exists(trainFileName)):
fin=open(trainFileName,'r')
trainData=pickle.load(fin)
trainClass=pickle.load(fin)
fin.close()
else:
trainText=mydataset.getAllTrainTextList()
i=0;
N=trainText.__len__()
trainData=[]
trainClass=[]
for (tag,text) in trainText:
i=i+1
if(i%5000==0):
print('i=%08d finished %5.5f%% using jieba to cut the text\n'%(i,i*100.0/N))
trainData.append(text)
trainClass.append(tag)
fout=open(trainFileName,'w')
pickle.dump(trainData,fout)
pickle.dump(trainClass,fout)
fout.close()
#if(os.path.exists(pipelineFileName)):
if(False):
fin=open(pipelineFileName,'r')
pipeline=pickle.load(fin)
fin.close()
else:
pipeline = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', Perceptron()),
])
#pipeline.set_params(vect__max_df=0.6,clf__alpha=1e-07,clf__penalty='l2',tfidf__norm='l1',tfidf__use_idf=True,vect__ngram_range=(1,2))
pipeline.set_params(vect__max_df=0.6,tfidf__norm='l1',tfidf__use_idf=True,vect__ngram_range=(1,2))
trainNum=trainData.__len__()
pipeline.fit(trainData[0:trainNum],trainClass[0:trainNum])
fout=open(pipelineFileName,'w')
pickle.dump(pipeline,fout)
fout.close()
#################################### output train result
trainNum=trainData.__len__()
#print 'train result '+"#"*30
prec=pipeline.predict(trainData[0:trainNum])
expected=trainClass[0:trainNum]
#print("Classification report for classifier:\n%s\n"
#% (metrics.classification_report(expected, prec)))
TP=0.0
TN=0.0
FP=0.0
FN=0.0
N=trainData.__len__()
for i in range(0,trainNum):
if(prec[i]==expected[i]):
if(prec[i]==u'1'):
TP=TP+1
else:
TN=TN+1
else:
if(prec[i]==u'1'):
FP=FP+1
else:
FN=FN+1
P=TP/(TP+FP)
R=TP/(TP+FN)
F=2*P*R/(P+R)
#print('train result: P=%f,R=%f,F=%f\n'%(P,R,F))
return F,pipeline
############################################# output test result
if __name__ == '__main__' :
trainFileName='train.pkl'
testFileName='test.pkl'
pipelineFileName='pipeline.pkl'
bestPipelineFileName='bestPipeline.pkl'
Fmax=0
for i in range(1,10):
print ('i=%d \n'%(i))
t1=time.time()
F,pipeline=train()
t2=time.time()
print (t2-t1).__str__()+'s'
if(F>Fmax):
Fmax=F
bestPipeline=pipeline
print('Fmax=%f \n'%(Fmax))
fout=open(bestPipelineFileName,'w')
pickle.dump(bestPipeline,fout)
fout.close()
print('Fmax=%f \n' % (Fmax))
if(os.path.exists(testFileName)):
fin=open(testFileName,'r')
testText=pickle.load(fin)
fin.close()
else:
testText=mydataset.getTestTextList()
fout=open(testFileName,'w')
pickle.dump(testText,fout)
fout.close()
outputFileName='../output/upload.csv'
fileOutput=codecs.open(outputFileName,'w','utf-8')
prec=pipeline.predict(testText)
N=800001
for i in prec:
fileOutput.write(N.__str__()+','+i+'\n')
N=N+1
fileOutput.close()
os.system("mplayer ~/music/alert.mp3") | [
"[email protected]"
] | |
069240fe041da4600557e9ba6ab166a4c5a27da8 | 0c6bd6305cbd128fe7426f66ec9bf4d01fb9b40c | /backend_apps_web_based/flask/RESTful_api_part3/test.py | fe6c099a291d13fe3bef662c606846c898e25092 | [] | no_license | yennanliu/web_development | 08dbffc549214952f7b02dc29837474b0ad6e980 | 8dc6b224040b3953999d5e8d4a7f26d8e92ca931 | refs/heads/master | 2021-05-24T03:12:24.344381 | 2020-12-30T09:31:15 | 2020-12-30T09:31:15 | 72,651,027 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,281 | py | import sys, os, json, requests
import pytest, unittest
from flask_sqlalchemy import SQLAlchemy
# main flask app
from app import app
db = SQLAlchemy(app)
def TestHelloworld():
response = requests.get('http://0.0.0.0:5000/')
assert response.status_code == 200
def TestApi():
response = requests.get('http://0.0.0.0:5000/product/api/v1.0/products')
print (response)
assert response.status_code == 200
class TestDB(unittest.TestCase):
# setup and tear down
# executed prior to each test
def setUp(self):
app.config['TESTING'] = True
app.config['WTF_CSRF_ENABLED'] = False
app.config['DEBUG'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + 'database.db'
self.app = app.test_client()
db.drop_all()
db.create_all()
self.assertEqual(app.debug, False)
# executed after each test
def tearDown(self):
db.session.remove()
db.drop_all()
# tests
def test_main_page(self):
response = self.app.get('/', follow_redirects=True)
self.assertEqual(response.status_code, 200)
# test models
# TODO, will update this when creating DB via db model
if __name__ == "__main__":
TestHelloworld()
TestApi()
unittest.main() | [
"[email protected]"
] | |
e35602b4e63050a98f36b2620a2b840278beb790 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02972/s165580953.py | 33b7efe88d0ed61cd2af5b945b54fc7bd16ee28d | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 590 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Created: Jul, 13, 2020 08:25:55 by Nobody
# $Author$
# $Date$
# $URL$
__giturl__ = "$URL$"
from sys import stdin
input = stdin.readline
def main():
N = int(input())
A = [-1]+list(map(int, input().split()))
D = [-1]*(N+1)
for i in range(N, 0, -1):
if i > int(N/2):
D[i] = A[i]
else:
temp_sum = 0
for j in range(N//i, 1, -1):
temp_sum += D[i*j]
D[i] = (temp_sum % 2) ^ A[i]
print(sum(D[1:]))
for i in range(1, N+1):
if D[i]:
print(i)
if(__name__ == '__main__'):
main()
| [
"[email protected]"
] | |
57ba84aabc962427d8bb568812dcabaa61ca840a | e705de3a44a7cc922e93c76c3aa6e6108222e538 | /problems/0128_longest_consecutive_sequence.py | 5e3a863543e3dfb214407f3bf1547862272121e1 | [] | no_license | sokazaki/leetcode_solutions | 34d4877dc7d13dc80ef067211a316c48c6269eca | 42cf52eeef537806c9e3ec7a6e5113c53d0f18a3 | refs/heads/master | 2021-06-21T22:23:25.403545 | 2021-02-21T16:47:19 | 2021-02-21T16:47:19 | 193,951,202 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 826 | py | # O(N) Solution with Hashmap
import unittest
def longestConsecutive(nums):
nums = set(nums)
res = 0
while nums:
first = last = nums.pop()
while first-1 in nums:
first -= 1
nums.remove(first)
while last+1 in nums:
last += 1
nums.remove(last)
res = max(res, last-first+1)
return res
class Test(unittest.TestCase):
def test_longestConsecutive(self):
self.assertEqual(longestConsecutive([100,4,200,1,3,2]), 4)
self.assertEqual(longestConsecutive([100,4,200,1,33,2]), 2)
self.assertEqual(longestConsecutive([1,44,200,1,3,2]), 3)
self.assertEqual(longestConsecutive([]), 0)
self.assertEqual(longestConsecutive([100,44,200,11,33,2]), 1)
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
4dc955342c28aac7a9bf9c2f0272ce450110998e | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/testData/formatter/indentInComprehensions.py | 7e88b6b3d3bd6218119837995f02a78b214acc0d | [
"Apache-2.0"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 31 | py | attrs = [e.attr for e in
items] | [
"[email protected]"
] | |
06a59e43096e806dd20c21c29f851772da55e59a | e2a0d262b5a3c26a30ed02c78cb905363df9241c | /com/11_class2.py | 2d657a02b7bdf2f9b0f1bb5a9fc78a3329a1a38c | [] | no_license | Kyeongrok/python_selinium | 75b158f0c46aa5d2b7b627dd4a6775c3c6ab66ef | 233c90b3294949813cc910a8b0b2f5fed7df80a9 | refs/heads/master | 2020-04-01T03:35:06.925347 | 2018-10-27T05:03:51 | 2018-10-27T05:03:51 | 152,827,717 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | class people():
name = "kyeongrok"
def sayHello(self):
print("hello")
def leftHand(self):
print("i'm left hand")
def rightHand(self):
print("i'm right hand")
def setName(self, name):
self.name = name
kyeongrok = people()
kyeongrok.setName("iu")
print(kyeongrok.name)
kyeongrok.sayHello()
kyeongrok.leftHand()
kyeongrok.rightHand() | [
"[email protected]"
] | |
90dfd10d500b640ee62fc6fd0666c4b2d4804062 | 9fb51c3fc9e3d2b3a4267bc1ecfc826109d0b60d | /test/LaneDetectionTest.py | 319478eb6ad1c3e17bce3cff37e2b897ccec2494 | [] | no_license | g41903/LaneDetection | 6b929dcac664d003851cf4b60eaa124fffe5908d | 1cfba5940ff25274147252f1169e737954ad29f4 | refs/heads/master | 2021-01-19T06:36:54.047242 | 2016-07-27T16:52:51 | 2016-07-27T16:52:51 | 64,326,062 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 20,631 | py | # ipm.py
import numpy as np
import cv2
import cv
# # focal length
# fu = 0.0
# fv = 0.0
#
# # optical center
# center_u = 0.0
# center_v = 0.0
#
# # extrinsic parameters
# pitch = 0.0
# yaw = 0.0
# # height of the camera in mm
# h = 0.0
#
# # ROI (region of interest)
# ROILeft = 0
# ROIRight = 0
# ROITop = 0
# ROIBottom = 0
#
# # ipm size
# ipm_width = 0
# ipm_height = 0
#
# # intermediate variables
# # sin and cos use radians, not degrees
# c1 = 0.0
# c2 = 0.0
# s1 = 0.0
# s2 = 0.0
#
# # distances (in the world frame) - to - pixels ratio
# ratio_x = 0
# ratio_y = 0
# focal length
fu = 750.0
fv = 720.0
# optical center
center_u = 658
center_v = 372
# extrinsic parameters
pitch = 9.0
yaw = 0
# height of the camera in mm
h = 790
# ROI (region of interest)
ROILeft = 0
ROIRight = 1100
ROITop = 500
ROIBottom = 719
# ipm size
ipm_width = 600
ipm_height = 800
# intermediate variables
# sin and cos use radians, not degrees
c1 = 1.0
c2 = 1.0
s1 = 1.0
s2 = 1.0
# distances (in the world frame) - to - pixels ratio
ratio_x = 10
ratio_y = 10
# transformation of a point from image frame [u v] to world frame [x y]
offset_x=0.0
offset_y=0.0
def image2ground(uv):
dummy_data = np.array([
-c2 / fu, s1 * s2 / fv, center_u * c2 /
fu - center_v * s1 * s2 / fv - c1 * s2,
s2 / fu, s1 * c2 / fv, -center_u *
s2 / fu - center_v * s1 * c2 / fv - c1 * c2,
0, c1 / fv, -center_v * c1 / fv + s1,
0, -c1 / h / fv, center_v * c1 / h / fv - s1 / h
])
# static cv::Mat transformation_image2ground = cv::Mat(4, 3, CV_32F, dummy_data);
# Mat object was needed because C/C++ lacked a standard/native implementation of matrices.
# However, numpy's array is a perfect replacement for that functionality.
# Hence, the cv2 module accepts numpy.arrays wherever a matrix is
# indicated in the docs.
transformation_image2ground = dummy_data.reshape((4, 3))
transformation_image2ground=np.asmatrix(transformation_image2ground)
# Construct the image frame coordinates
# dummy_data2 = [uv.x, uv.y, 1]
image_coordinate=np.matrix([[uv[0]],[uv[1]],[1]])
# Find the world frame coordinates
world_coordinate = transformation_image2ground * image_coordinate
# Normalize the vector
# the indexing of matrix elements starts from 0
#?? world_coordinate.at<float>(3, 0);
# print(world_coordinate)
world_coordinate = world_coordinate / (world_coordinate[3][0])
return (world_coordinate[0][0], world_coordinate[1][0])
# transformation of a point from world frame [x y] to image frame [u v]
def ground2image(xy):
dummy_data = np.array([
c2 * fu + center_u * c1 * s2, center_u * c1 * c2 - s2 * fu, -center_u * s1,
s2 * (center_v * c1 - fv * s1), c2 *
(center_v * c1 - fv * s1), -fv * c1 - center_v * s1,
c1 * s2, c1 * c2, -s1,
c1 * s2, c1 * c2, -s1
])
transformation_ground2image = dummy_data.reshape(4, 3)
# Construct the image frame coordinates
dummy_data2 = [xy.x, xy.y, -h]
world_coordinate = dummy_data2.reshape((3, 1))
# Find the world frame coordinates
image_coordinate = np.multiply(
transformation_ground2image, world_coordinate)
# Normalize the vector
# the indexing of matrix elements starts from 0
image_coordinate = image_coordinate / image_coordinate[3, 0]
return (image_coordinate[0, 0], image_coordinate[0, 1])
def ipm2image(uv):
x_world = offset_x + u * ratio_x
y_world = offset_y + (ipm_height - v) * ratio_y
return ground2image((x_world, y_world))
def getIPM(input, ipm_width, ipm_height):
# Input Quadilateral or Image plane coordinates
imageQuad = np.empty([4, 2])
# World plane coordinates
groundQuad = np.empty([4, 2])
# Output Quadilateral
ipmQuad = np.empty([4, 2])
# Lambda Matrix
lambda_mat = np.empty([3, 3])
# The 4 points that select quadilateral on the input , from top-left in clockwise order
# These four pts are the sides of the rect box used as input
imageQuad=np.array([[ROILeft,ROITop],[ROIRight,ROITop],[ROIRight,ROIBottom],[ROILeft,ROIBottom]],np.float32)
# The world coordinates of the 4 points
for i in range(0, 4):
groundQuad[i] = image2ground(imageQuad[i])
offset_x = groundQuad[0][0]
offset_y = groundQuad[3][1]
# float ground_width = (groundQuad[1][0]-groundQuad[0][0]) //top-right.x - top-left.x
# float ground_length = (groundQuad[0][1]-groundQuad[4][1]) //top-left.y - bottom-left.y
ratio_x = (groundQuad[1][0] - groundQuad[0][0]) / ipm_width
ratio_y = (groundQuad[0][1] - groundQuad[3][1]) / ipm_height
# Compute coordinates of the bottom two points in the ipm image frame
x_bottom_left = (groundQuad[3][0] - groundQuad[0][0]) / ratio_x
x_bottom_right = (groundQuad[2][0] - groundQuad[0][0]) / ratio_y
# The 4 points where the mapping is to be done , from top-left in
# clockwise order
ipmQuad=np.array([[0,0],[ipm_width-1,0],[x_bottom_right,ipm_height-1],[x_bottom_left,ipm_height-1]],np.float32)
# Get the Perspective Transform Matrix i.e. lambda
lambda_mat = cv2.getPerspectiveTransform(imageQuad, ipmQuad)
# Apply the Perspective Transform just found to the src image
ipm = cv2.warpPerspective(input, lambda_mat, (ipm_width, ipm_height))
return ipm
# misc.py
# // parameters for white pixel extraction
hueMinValue = 0
hueMaxValue = 255
satMinValue = 0
satMaxValue = 15
volMinValue = 240
volMaxValue = 255
lightMinValue = 190
lightMaxValue = 255
# // extraction of white pixels
thres_white_init = 0.5
thres_exposure_max = 1500
thres_exposure_min = 1200
# // This function takes an angle in the range [-3*pi, 3*pi] and
# // wraps it to the range [-pi, pi].
def wrapTheta(theta):
if theta > np.pi:
return theta - 2 * np.pi
elif theta < -np.pi:
return theta + 2 * np.pi
return theta
# // Construct a new image using only one single channel of the input image
# // if color_image is set to 1, create a color image; otherwise a single-channel image is returned.
# // 0 - B; 1 - G; 2 - R
def getSingleChannel(input, channel, color_image):
spl = cv2.split(input)
if color_image == 0:
return spl[channel]
# emptyMat = thresholded
channels = np.empty(input.shape)
# Only show color blue channel
if channel == 0:
input[:, :, 1] = 0
input[:, :, 2] = 0
channels = input
elif channel == 1:
# Only show color green channel
input[:, :, 0] = 0
input[:, :, 2] = 0
channels = input
else:
# Only show colorred channel
input[:, :, 0] = 0
input[:, :, 1] = 0
channels = input
print "channels:", channels.shape
output = channels
return output
# // Show different channels of an image
def showChannels(input):
# cv2.imshow("B",getSingleChannel(input,0,True)) #b
# cv2.imshow("G",getSingleChannel(input,1,True)) #g
# cv2.imshow("R",getSingleChannel(input,2,True)) #r
# greyMat = thresholded
# greyMat=cv2.cvtColor(input,cv2.COLOR_BGR2GRAY)
# cv2.imshow("GREY",greyMat) #grey-scale
print "Hello"
def edgeDetection(rgb_frame, detect_method, debug_mode):
singleChannel = getSingleChannel(rgb_frame, 0, False)
# // First apply Gaussian filtering
blurred_ipm = cv2.GaussianBlur(singleChannel, (5, 5), 0)
cv2.imshow("blurred_ipm:",blurred_ipm)
if debug_mode:
cv2.imshow('Blurred ipm:', blurred_ipm)
# // Edge detection
# // adaptive thresholding outperforms canny and other filtering methods
max_value = 255
adaptiveMethod = cv2.ADAPTIVE_THRESH_GAUSSIAN_C
thresholdType = cv2.THRESH_BINARY_INV
blockSize = 11
C = 5
detection_edge=cv2.adaptiveThreshold(blurred_ipm,max_value,adaptiveMethod,thresholdType,blockSize,C)
# detection_edge = np.zeros(blurred_ipm.shape, np.uint8)
# detection_edge = cv2.adaptiveThreshold(
# blurred_ipm, max_value, adaptiveMethod, thresholdType, blockSize, C)
if debug_mode:
cv2.imshow('Detection edge:', detection_edge)
return detection_edge
def testShowChannels():
img = cv2.imread(
'/Users/g41903/Desktop/MIT/Media Lab/LaneDetection/LaneView.jpg', cv2.CV_8UC1)
edgeDetection(img, 0, 0)
cv2.waitKey(0)
cv2.destroyAllWindows()
# showChannels(img)
# cv2.waitKey(0)
# cv2.imshow("image",img)
# testShowChannels()
# // Extract white pixels from a RGB image#
def extractWhitePixel(rgb_frame,extract_method,debug_mode):
if extract_method=='HSV':
# Convert BGR to HSV
hsv_frame=cv2.cvtColor(rgb_frame,cv2.COLOR_BGR2HSV)
# define range of color in HSV
min_color=np.array([hueMinValue,satMinValue,volMinValue])
max_color=np.array([hueMaxValue,satMaxValue,volMaxValue])
# Threshold the HSV image to get only specific color
threshold_frame = cv2.inRange(hsv_frame, min_color, max_color)
return threshold_frame
# cv2.imshow('frame',rgb_frame)
# cv2.imshow('mask',mask)
# cv2.imshow('res',res)
elif extract_method=='HLS':
# Convert BGR to HLS
hls_frame=cv2.cvtColor(rgb_frame,cv2.COLOR_BGR2HLS)
# define range of color in HSV
min_color=np.array([hueMinValue,satMinValue,volMinValue])
max_color=np.array([hueMaxValue,satMaxValue,volMaxValue])
# Threshold the HSV image to get only specific color
mask = cv2.inRange(hls_frame, min_color, max_color)
# Bitwise-AND mask and original image
res = cv2.bitwise_and(rgb_frame,rgb_frame, mask= mask)
cv2.imshow('frame',rgb_frame)
cv2.imshow('mask',mask)
cv2.imshow('res',res)
return res
elif extract_method=='ADAPTIVE':
# Get a single channel
singleChannel=getSingleChannel(rgb_frame,0,False)
# Extraction of white pixels
minVal,maxVal,minLoc,maxLoc=cv2.minMaxLoc(singleChannel)
# min,max=minMaxLoc(singleChannel)
thresholded=np.zeros(singleChannel.shape, np.uint8)
#adaptive thresholding
maxValue=255
thres_count=0
thres_adaptive=thres_white_init
thres_upper_bound=1
thres_lower_bound=0
type=cv2.THRESH_BINARY
while thres_count<10:
thres_count+=1
thresh=min+(max-min)*thres_adaptive
thresholded=cv2.threshold(singleChannel,thresh,maxValue,type)
s=np.sum(singleChannel,axis=0)/255
if s>thres_exposure_max:
thres_lower_bound=thres_adaptive
thres_adaptive=(thres_upper_bound+thres_lower_bound)/2
elif s<thres_exposure_min:
thres_upper_bound=thres_adaptive
thres_adaptive=(thres_upper_bound+thres_lower_bound)/2
else:
break
return thresholded
def testExtractWhitePixel():
# hueMinValue=110
# satMinValue=50
# volMinValue=50
# hueMaxValue=130
# satMaxValue=255
# volMaxValue=255
cap = cv2.VideoCapture(0)
while(1):
# Take each frame
_, frame = cap.read()
extractWhitePixel(frame,'HSV',True)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
# testExtractWhitePixel()
# /** @function Dilation */
def Dilation(src,dilation_elem,dilation_size,debug_mode,title):
dilation_type=0
if dilation_elem==0:
dilation_type=cv2.MORPH_RECT
elif dilation_elem==1:
dilation_type=cv2.MORPH_CROSS
elif dilation_elem==2:
dilation_type=cv2.MORPH_ELLIPSE
element=cv2.getStructuringElement(dilation_type,(int(2*dilation_size+1),int(2*dilation_size+1)))
dilation_dst=0
dilation_dst=cv2.dilate(src,element,iterations=1)
if debug_mode:
cv2.imshow(title,dilation_dst)
return dilation_dst
# // TODO: curve fitting
# void fitCurve(cv::Mat lane_boundaries, cv::Mat ipm_rgb)
# {
# std::vector<Point> filtered_points;
# int count = 0;
# for (int y = 0; y < lane_boundaries.rows; y++)
# {
# for (int x = 0; x < lane_boundaries.cols; x++)
# {
# // cout << lane_boundaries.at<uchar>(y, x) << " ";
# // In OpenCV, x and y are inverted when trying to access an element
# if (lane_boundaries.at<uchar>(y, x) == 255)
# {
# Point pt(x, y);
# filtered_points.push_back(pt);
# }
# }
# }
# def fitCurve(lane_boundaries,ipm_rgb):
# for y in range(0,lane_boundaries.rows):
# for x in range(0,lane_boundaries.cols):
# if point
# std::vector<double> coefficients = polyFit(filtered_points);
# double c1 = coefficients[0];
# double c2 = coefficients[1];
# double c3 = coefficients[2];
# cout << "c1 = " << c1 << "\tc2 = " << c2 << "\tc3 = " << c3 << endl;
# // cout << "filtered_points.size() = " << filtered_points.size() << "\tapproxCurve.size() = " << approxCurve.size() << endl;
# std::vector<Point> testCurve;
# for (int x=0; x<lane_boundaries.cols; x++)
# {
# int appro_y = c1 + c2 * x + c3 * pow(x, 2);
# Point pt(x, appro_y);
# // cout << "appro_y = " << appro_y << endl;
# testCurve.push_back(pt);
# }
# Scalar color = Scalar( 255, 0, 0 );
# polylines(ipm_rgb, testCurve, false, color);
# imshow("Curve detection", ipm_rgb);
# // polylines(ipm_rgb, filtered_points, false, color);
# // imshow("check input to curve detection", ipm_rgb);
# imshow("lane_boundaries", lane_boundaries);
# }
# LaneDetectTest.py
# // input image size
image_width = 1280
image_height = 720
# // Hough transform
thres_num_points = 200
# // clustering of lines
thres_cluster_delta_angle = 10
thres_cluster_delta_rho = 20
# // if two lanes are parallel and of certain distance, then left and right lanes are both detected. Pick the left one
thres_parallel_delta_angle = 3
thres_parallel_delta_rho =150
# // if two lanes are converging. Pick the right one
thres_converge_delta_angle = 10
thres_converge_delta_rho = 60
# // method for edge detection
detect_method = 10
dilation_white_size = 3
# // method for white pixel extraction
extract_method = 'HSV'
dilation_element = 0.5
dilation_edge_size = 0.5
# /*
# This is the main function for lane detection. It takes an image as input and returns a vector of lines.
# Each element in the returned vector contains rho and theta of the detected lane in the ground plane.
# rho - the angle between the detected lane and the heading of the robot (i.e., the camera).
# theta - the distance from the origin (bottom left of the ground plane) to the detected lane
# */
def getLanes(input, isDebug):
clusters=np.empty(shape=(2,2),dtype=float)
if input.size == 0:
print "Error: Input image is empty.Function getLanes(input) aborts."
return clusters
# Verify size of input images.
rows = input.shape[0]
cols = input.shape[1]
if rows is not image_height and cols is not image_width:
print "Warning: forced resizing of input images"
size = (image_height, image_width)
np.resize(input, size)
# Get inverse projection mapping
ipm_rgb = getIPM(input, ipm_width, ipm_height)
# Edge detection
detection_edge = edgeDetection(ipm_rgb, detect_method, False)
cv2.imshow('Final Detection Edge: ',detection_edge)
dilated_edges=Dilation(detection_edge,dilation_element,dilation_edge_size,isDebug,"Dilated Edges")
cv2.imshow('Final dilated edges: ', dilated_edges)
# Get white pixels
white_pixels = extractWhitePixel(ipm_rgb, extract_method, False)
cv2.imshow('Final white pixels: ',white_pixels)
# Dilation of the white pixels
dilated_white_pixels = Dilation(
white_pixels, dilation_element, dilation_white_size, isDebug, "Dilated White Pixels")
cv2.imshow('Final dilated white pixels: ', dilated_white_pixels)
# dilated_white_pixels=0
# cv2.dilate(white_pixels, dilated_white_pixels, dilation_white_size, isDebug, "Dilated White Pixels")
# combine edge detection and white pixel extraction
lane_boundaries = cv2.bitwise_and(dilated_white_pixels, dilated_edges)
cv2.imshow('Final lane_boundaries: ', lane_boundaries)
if isDebug:
cv2.imshow("Bitwise and", lane_boundaries)
# HoughLines: First parameter, Input image should be a binary image, so
# apply threshold or use canny edge detection before finding applying
# hough transform. Second and third parameters are \rho and \theta
# accuracies respectively. Fourth argument is the threshold, which means
# minimum vote it should get for it to be considered as a line.
lines = cv2.HoughLines(lane_boundaries, 1, np.pi / 180, thres_num_points)
# Result cleanning: make sure the distance rho is always positive.
# rho_theta_pairs are list of [rho,theta] generated from the picture
rho_theta_pairs = lines[0]
for i in range(0, len(rho_theta_pairs)):
# if rho in the ith [rho,theta] pairs is smaller than 0
if rho_theta_pairs[i][0] < 0:
rho_theta_pairs[i][0] = -rho_theta_pairs[i][0]
rho_theta_pairs[i][1] = np.pi + rho_theta_pairs[i][1]
# ?? what does wrapTheta means
# In case theta is over pi or less then -pi: If the theta is over pi, then it will be deducted by 2pi, if it's less then -pi, it will add up 2pi
rho_theta_pairs[i][1] = wrapTheta(rho_theta_pairs[i][1]);
# Show results before clustering
if True:
ipm_duplicate = ipm_rgb
for i in range(0, len(rho_theta_pairs)):
rho = rho_theta_pairs[i][0]
theta = rho_theta_pairs[i][1]
a = np.cos(theta)
b = np.sin(theta)
x0 = a * rho
y0 = b * rho
pt1 = (cv.Round(x0 + 1000 * (-b)), cv.Round(y0 + 1000 * (a)))
pt2 = (cv.Round(x0 - 1000 * (-b)), cv.Round(y0 - 1000 * (a)))
img = np.zeros((1280,760,3), np.uint8)
cv2.line(img,pt1, pt2, (0, 255, 0), 3)
cv2.imshow('Show Line:',img)
# print len(lines[0])
# // cluster lines into groups and take averages, in order to remove duplicate segments of the same line
# // TODO: need a robust way of distinguishing the left and right lanes
num_of_lines = 0
# for i in range(0, len(rho_theta_pairs)):
# rho = rho_theta_pairs[i][0]
# theta = rho_theta_pairs[i][1]
# if isDebug:
# print "Now it's debugging"
# a = np.cos(theta)
# b = np.sin(theta)
# custer_found = False
#
# # Match this line with existing clusters
# for j in range(0, len(clusters)):
# avg_line = clusters[j] / num_of_lines[j]
# avg_rho = avg_line[0]
# avg_theta = avg_line[1]
#
# if abs(rho - avg_rho) < thres_cluster_delta_rho and abs(theta - avg_theta) / np.pi * 180 < thres_cluster_delta_angle:
# clusters[j] += lines[i]
# num_of_lines[j] += 1
# clusters_found = True
# break
# if cluster_found:
# pass
# else:
# #?? not sure how does clusters look like and how push_back applied to clusters
# # clusters.push_back(lines[i])
# # num_of_lines.push_back(1);
# clusters = lines[i]
# num_of_lines = 1
# for i in range(0, len(clusters)):
# clusters[i] = clusters[i] / num_of_lines[i]
# rho = clusters[i][0]
# theta = clusters[i][1]
# a = np.cos(theta)
# b = np.sin(theta)
# x0 = a * rho
# y0 = b * rho
# pt1 = (cv.Round(x0 + 1000 * (-b)), cv.Round(y0 + 1000 * (a)))
# pt2 = (cv.Round(x0 - 1000 * (-b)), cv.Round(y0 - 1000 * (a)))
# ipm_rgb = cv2.line(pt1, pt2(0, 255, 0), 3)
#
# if isDebug:
# cv2.imshow("Hough Line Transform After Clustering", ipm_rgb)
# print len(clusters), "clusters found."
# return clusters
def testShowChannels():
img = cv2.imread(
'./LaneView3.jpg', 1)
edgeDetection(img, 0, 0)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == '__main__':
input = cv2.imread('../data/LaneView3.jpg', 1)
cv2.imshow('Origin input:', input)
getLanes(input, True)
print("Finished")
cv2.waitKey(0)
cv2.destroyAllWindows()
# testShowChannels()
| [
"[email protected]"
] | |
042eb90c4d3065ab75fc8c59d35336f3d37f6d12 | cc08f8eb47ef92839ba1cc0d04a7f6be6c06bd45 | /Personal/JaipurCity/smart_city/models.py | 4edef2ae9219cc2bf12785fae1060189c82a680f | [] | no_license | ProsenjitKumar/PycharmProjects | d90d0e7c2f4adc84e861c12a3fcb9174f15cde17 | 285692394581441ce7b706afa3b7af9e995f1c55 | refs/heads/master | 2022-12-13T01:09:55.408985 | 2019-05-08T02:21:47 | 2019-05-08T02:21:47 | 181,052,978 | 1 | 1 | null | 2022-12-08T02:31:17 | 2019-04-12T17:21:59 | null | UTF-8 | Python | false | false | 2,081 | py | from django.contrib.gis.db import models
from django.contrib.gis.geos import Point
class SmartRestaurant(models.Model):
restaurant = models.CharField(max_length=254)
rating = models.FloatField()
type = models.CharField(max_length=254)
cuisines = models.CharField(max_length=254)
cost = models.CharField(max_length=254)
address = models.CharField(max_length=254)
features = models.CharField(max_length=254)
latitude = models.FloatField()
longitude = models.FloatField()
point = models.PointField()
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.restaurant
class Fort(models.Model):
title = models.CharField(max_length=254)
rating = models.FloatField()
category = models.CharField(max_length=254)
descriptio = models.CharField(max_length=254)
latitude = models.FloatField()
longitude = models.FloatField()
point = models.PointField()
def __str__(self):
return self.title
class Hospital55(models.Model):
hospital_n = models.CharField(max_length=255)
hospital_r = models.FloatField()
contact_nu = models.CharField(max_length=255)
address = models.CharField(max_length=255)
latitude = models.FloatField()
longitude = models.FloatField()
point = models.PointField()
def __str__(self):
return self.hospital_n
class Market(models.Model):
market_nam = models.CharField(max_length=255)
rating = models.FloatField()
location = models.CharField(max_length=255)
latitude = models.FloatField()
longitude = models.FloatField()
point = models.PointField()
def __str__(self):
return self.market_nam
class PoliceStation(models.Model):
police_sta = models.CharField(max_length=255)
rating = models.FloatField()
contact_nu = models.CharField(max_length=255)
address = models.CharField(max_length=255)
latitude = models.FloatField()
longitude = models.FloatField()
point = models.PointField()
def __str__(self):
return self.police_sta | [
"[email protected]"
] | |
4b208c47d8b238082b1f0e0926b8ca03994e7acb | 50a20e25c1cb7ac05b0d7eb05bf174973a866a4b | /Day20/Day20.py | a80a573319bf5001f8c8df8dc3bb1248856c81d2 | [] | no_license | bakkerjangert/AoC_2016 | 1733b15bbb762d9fff0c986e33d404c5b7148591 | 3ccafab3f6d8b8efb4bf7de0549e22a4bd4de527 | refs/heads/master | 2023-02-04T06:09:32.862621 | 2020-12-18T14:39:00 | 2020-12-18T14:39:00 | 322,620,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 956 | py | import numpy as np
import pylab as plt
with open('input.txt') as f:
lines = f.read().splitlines()
start = []
end = []
for line in lines:
start.append(int(line.split('-')[0]))
end.append(int(line.split('-')[1]))
# for i in range(len(start)):
# print(start[i], '-', end[i])
ips = np.array([start, end])
ips = ips.transpose()
ips = np.sort(ips, axis=0)
# print(ips)
# print(len(ips[:,0]))
i = 0
while i + 1 < len(ips[:, 0]):
print(f'{ips[i + 1, 0]} < {ips[i, 1]} < {ips[i + 1, 1]}')
if ips[i + 1, 0] <= ips[i, 1] <= ips[i + 1, 1]:
ips[i, 1] = ips[i + 1, 1]
ips = np.delete(ips, i + 1, 0)
elif ips[i + 1, 1] <= ips[i, 1]:
ips = np.delete(ips, i + 1, 0)
else:
i += 1
print(ips)
print(len(ips[:,0]))
print(f'the answer to part 1 is {ips[0, 1] + 1}')
#part b
count = 0
for i in range(len(ips[:,0]) - 1):
count += (ips[i + 1, 0] - ips[i, 1] - 1)
print(f'the answer to part 2 is {count}') | [
"[email protected]"
] | |
316c6f696121e8eb21ad87bd9966d1689f929134 | 37cfcdfa3b8f1499f5899d2dfa2a48504a690abd | /test/functional/p2p_disconnect_ban.py | 1886e64fb2499ff15b887e636597f96dd7018069 | [
"MIT"
] | permissive | CJwon-98/Pyeongtaekcoin | 28acc53280be34b69c986198021724181eeb7d4d | 45a81933a98a7487f11e57e6e9315efe740a297e | refs/heads/master | 2023-08-17T11:18:24.401724 | 2021-10-14T04:32:55 | 2021-10-14T04:32:55 | 411,525,736 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,354 | py | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Pyeongtaekcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test node disconnect and ban behavior"""
import time
from test_framework.test_framework import PyeongtaekcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
connect_nodes_bi,
wait_until,
)
class DisconnectBanTest(PyeongtaekcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
def run_test(self):
self.log.info("Test setban and listbanned RPCs")
self.log.info("setban: successfully ban single IP address")
assert_equal(len(self.nodes[1].getpeerinfo()), 2) # node1 should have 2 connections to node0 at this point
self.nodes[1].setban(subnet="127.0.0.1", command="add")
wait_until(lambda: len(self.nodes[1].getpeerinfo()) == 0, timeout=10)
assert_equal(len(self.nodes[1].getpeerinfo()), 0) # all nodes must be disconnected at this point
assert_equal(len(self.nodes[1].listbanned()), 1)
self.log.info("clearbanned: successfully clear ban list")
self.nodes[1].clearbanned()
assert_equal(len(self.nodes[1].listbanned()), 0)
self.nodes[1].setban("127.0.0.0/24", "add")
self.log.info("setban: fail to ban an already banned subnet")
assert_equal(len(self.nodes[1].listbanned()), 1)
assert_raises_rpc_error(-23, "IP/Subnet already banned", self.nodes[1].setban, "127.0.0.1", "add")
self.log.info("setban: fail to ban an invalid subnet")
assert_raises_rpc_error(-30, "Error: Invalid IP/Subnet", self.nodes[1].setban, "127.0.0.1/42", "add")
assert_equal(len(self.nodes[1].listbanned()), 1) # still only one banned ip because 127.0.0.1 is within the range of 127.0.0.0/24
self.log.info("setban remove: fail to unban a non-banned subnet")
assert_raises_rpc_error(-30, "Error: Unban failed", self.nodes[1].setban, "127.0.0.1", "remove")
assert_equal(len(self.nodes[1].listbanned()), 1)
self.log.info("setban remove: successfully unban subnet")
self.nodes[1].setban("127.0.0.0/24", "remove")
assert_equal(len(self.nodes[1].listbanned()), 0)
self.nodes[1].clearbanned()
assert_equal(len(self.nodes[1].listbanned()), 0)
self.log.info("setban: test persistence across node restart")
self.nodes[1].setban("127.0.0.0/32", "add")
self.nodes[1].setban("127.0.0.0/24", "add")
# Set the mocktime so we can control when bans expire
old_time = int(time.time())
self.nodes[1].setmocktime(old_time)
self.nodes[1].setban("192.168.0.1", "add", 1) # ban for 1 seconds
self.nodes[1].setban("2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19", "add", 1000) # ban for 1000 seconds
listBeforeShutdown = self.nodes[1].listbanned()
assert_equal("192.168.0.1/32", listBeforeShutdown[2]['address'])
# Move time forward by 3 seconds so the third ban has expired
self.nodes[1].setmocktime(old_time + 3)
assert_equal(len(self.nodes[1].listbanned()), 3)
self.stop_node(1)
self.start_node(1)
listAfterShutdown = self.nodes[1].listbanned()
assert_equal("127.0.0.0/24", listAfterShutdown[0]['address'])
assert_equal("127.0.0.0/32", listAfterShutdown[1]['address'])
assert_equal("/19" in listAfterShutdown[2]['address'], True)
# Clear ban lists
self.nodes[1].clearbanned()
connect_nodes_bi(self.nodes, 0, 1)
self.log.info("Test disconnectnode RPCs")
self.log.info("disconnectnode: fail to disconnect when calling with address and nodeid")
address1 = self.nodes[0].getpeerinfo()[0]['addr']
node1 = self.nodes[0].getpeerinfo()[0]['addr']
assert_raises_rpc_error(-32602, "Only one of address and nodeid should be provided.", self.nodes[0].disconnectnode, address=address1, nodeid=node1)
self.log.info("disconnectnode: fail to disconnect when calling with junk address")
assert_raises_rpc_error(-29, "Node not found in connected nodes", self.nodes[0].disconnectnode, address="221B Baker Street")
self.log.info("disconnectnode: successfully disconnect node by address")
address1 = self.nodes[0].getpeerinfo()[0]['addr']
self.nodes[0].disconnectnode(address=address1)
wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10)
assert not [node for node in self.nodes[0].getpeerinfo() if node['addr'] == address1]
self.log.info("disconnectnode: successfully reconnect node")
connect_nodes_bi(self.nodes, 0, 1) # reconnect the node
assert_equal(len(self.nodes[0].getpeerinfo()), 2)
assert [node for node in self.nodes[0].getpeerinfo() if node['addr'] == address1]
self.log.info("disconnectnode: successfully disconnect node by node id")
id1 = self.nodes[0].getpeerinfo()[0]['id']
self.nodes[0].disconnectnode(nodeid=id1)
wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1, timeout=10)
assert not [node for node in self.nodes[0].getpeerinfo() if node['id'] == id1]
if __name__ == '__main__':
DisconnectBanTest().main()
| [
"[email protected]"
] | |
6f136cc6daa5a8670845de0e72b5aa253d75137b | de5dc978e0a5b9fc4ecbbdd00c1cebe57c465775 | /wso2_apim_storeclient/models/__init__.py | 5c4b27271173e651942c4c622035cbca741cb8fe | [] | no_license | junetigerlee/python-wso2-apim-storeclient | 8c3502dfd039eca0093c218cb6ac1183c050edb5 | 60c84988a2417a0104aaa53ed082902012d6247d | refs/heads/master | 2021-01-01T16:12:12.197633 | 2017-07-25T06:21:21 | 2017-07-25T06:21:21 | 97,787,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,500 | py | # coding: utf-8
"""
WSO2 API Manager - Store
This specifies a **RESTful API** for WSO2 **API Manager** - Store. Please see [full swagger definition](https://raw.githubusercontent.com/wso2/carbon-apimgt/v6.0.4/components/apimgt/org.wso2.carbon.apimgt.rest.api.store/src/main/resources/store-api.yaml) of the API which is written using [swagger 2.0](http://swagger.io/) specification.
OpenAPI spec version: 0.11.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import models into model package
from .api import API
from .api_info import APIInfo
from .api_info_object_with_basic_api_details_ import APIInfoObjectWithBasicAPIDetails_
from .api_list import APIList
from .api_object import APIObject
from .api_object_business_information import APIObjectBusinessInformation
from .api_object_endpoint_ur_ls import APIObjectEndpointURLs
from .api_object_environment_ur_ls import APIObjectEnvironmentURLs
from .application import Application
from .application_1 import Application1
from .application_2 import Application2
from .application_3 import Application3
from .application_info import ApplicationInfo
from .application_info_object_with_basic_application_details import ApplicationInfoObjectWithBasicApplicationDetails
from .application_key import ApplicationKey
from .application_key_details import ApplicationKeyDetails
from .application_key_generate_request import ApplicationKeyGenerateRequest
from .application_key_generation_request_object import ApplicationKeyGenerationRequestObject
from .application_list import ApplicationList
from .description_of_individual_errors_that_may_have_occurred_during_a_request_ import DescriptionOfIndividualErrorsThatMayHaveOccurredDuringARequest_
from .document import Document
from .document_1 import Document1
from .document_list import DocumentList
from .error import Error
from .error_list_item import ErrorListItem
from .error_object_returned_with_4_xx_http_status import ErrorObjectReturnedWith4XXHTTPStatus
from .subscription import Subscription
from .subscription_1 import Subscription1
from .subscription_2 import Subscription2
from .subscription_list import SubscriptionList
from .tag import Tag
from .tag_1 import Tag1
from .tag_list import TagList
from .tier import Tier
from .tier_1 import Tier1
from .tier_list import TierList
from .token import Token
from .token_details_for_invoking_ap_is import TokenDetailsForInvokingAPIs
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.