blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
37ea7ab064e4996dca287d814428daa3078abc0a
|
aa4b80cf7e7ac0028d0c7f67ade982d9b740a38b
|
/python/touple/touple_t.py
|
9137b51f9f319510cd7c35b7e666637c5b9bd7b2
|
[] |
no_license
|
ratularora/python_code
|
9ac82492b8dc2e0bc2d96ba6df6fdc9f8752d322
|
ddce847ba338a41b0b2fea8a36d49a61aa0a5b13
|
refs/heads/master
| 2021-01-19T04:34:22.038909 | 2017-09-27T08:14:45 | 2017-09-27T08:14:45 | 84,435,244 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 130 |
py
|
aList = [123, 'xyz', 'zara', 'abc'];
aTuple = tuple(aList)
print "Tuple elements : ", aTuple
aTuple.append('effd')
print aTuple
|
[
"[email protected]"
] | |
0498e3544e187420ab7cacac12ad7fd5a5fb2a9c
|
f908adce7e25824f7daaffddfaacb2a18b3e721b
|
/feder/letters/logs/migrations/0002_auto_20170820_1447.py
|
4a5853834afff5e659b5fc69e8d1aaf1bbcfd885
|
[
"MIT"
] |
permissive
|
miklobit/feder
|
7c0cfdbcb0796f8eb66fd67fa4dabddb99370a7c
|
14a59e181a18af5b625ccdcbd892c3b886a8d97e
|
refs/heads/master
| 2023-01-13T23:03:51.266990 | 2020-11-12T14:31:52 | 2020-11-12T15:47:16 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,032 |
py
|
# Generated by Django 1.11.4 on 2017-08-20 14:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("logs", "0001_initial")]
operations = [
migrations.AddField(
model_name="emaillog",
name="status",
field=models.CharField(
choices=[
(b"open", "Open"),
(b"ok", "Open"),
(b"spambounce", "Open"),
(b"softbounce", "Open"),
(b"hardbounce", "Open"),
(b"dropped", "Open"),
(b"deferred", "Deferred"),
(b"unknown", "Unknown"),
],
default=b"unknown",
max_length=20,
),
),
migrations.AddField(
model_name="emaillog",
name="to",
field=models.CharField(default="", max_length=255, verbose_name="To"),
preserve_default=False,
),
]
|
[
"[email protected]"
] | |
16cea51f52ae930ff0c9d3a383497d26d391b856
|
80a3d98eae1d755d6914b5cbde63fd10f5cc2046
|
/autox/autox_video/mmaction2/mmaction/datasets/rawframe_dataset.py
|
9359e117b7f52bc234b0e389de0b731e96c9e8db
|
[
"Apache-2.0"
] |
permissive
|
4paradigm/AutoX
|
efda57b51b586209e1d58e1dab7d0797083aadc5
|
7eab9f4744329a225ff01bb5ec360c4662e1e52e
|
refs/heads/master
| 2023-05-24T00:53:37.109036 | 2023-02-14T14:21:50 | 2023-02-14T14:21:50 | 388,068,949 | 752 | 162 |
Apache-2.0
| 2022-07-12T08:28:09 | 2021-07-21T09:45:41 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 7,927 |
py
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import torch
from mmaction.datasets.pipelines import Resize
from .base import BaseDataset
from .builder import DATASETS
@DATASETS.register_module()
class RawframeDataset(BaseDataset):
"""Rawframe dataset for action recognition.
The dataset loads raw frames and apply specified transforms to return a
dict containing the frame tensors and other information.
The ann_file is a text file with multiple lines, and each line indicates
the directory to frames of a video, total frames of the video and
the label of a video, which are split with a whitespace.
Example of a annotation file:
.. code-block:: txt
some/directory-1 163 1
some/directory-2 122 1
some/directory-3 258 2
some/directory-4 234 2
some/directory-5 295 3
some/directory-6 121 3
Example of a multi-class annotation file:
.. code-block:: txt
some/directory-1 163 1 3 5
some/directory-2 122 1 2
some/directory-3 258 2
some/directory-4 234 2 4 6 8
some/directory-5 295 3
some/directory-6 121 3
Example of a with_offset annotation file (clips from long videos), each
line indicates the directory to frames of a video, the index of the start
frame, total frames of the video clip and the label of a video clip, which
are split with a whitespace.
.. code-block:: txt
some/directory-1 12 163 3
some/directory-2 213 122 4
some/directory-3 100 258 5
some/directory-4 98 234 2
some/directory-5 0 295 3
some/directory-6 50 121 3
Args:
ann_file (str): Path to the annotation file.
pipeline (list[dict | callable]): A sequence of data transforms.
data_prefix (str | None): Path to a directory where videos are held.
Default: None.
test_mode (bool): Store True when building test or validation dataset.
Default: False.
filename_tmpl (str): Template for each filename.
Default: 'img_{:05}.jpg'.
with_offset (bool): Determines whether the offset information is in
ann_file. Default: False.
multi_class (bool): Determines whether it is a multi-class
recognition dataset. Default: False.
num_classes (int | None): Number of classes in the dataset.
Default: None.
modality (str): Modality of data. Support 'RGB', 'Flow'.
Default: 'RGB'.
sample_by_class (bool): Sampling by class, should be set `True` when
performing inter-class data balancing. Only compatible with
`multi_class == False`. Only applies for training. Default: False.
power (float): We support sampling data with the probability
proportional to the power of its label frequency (freq ^ power)
when sampling data. `power == 1` indicates uniformly sampling all
data; `power == 0` indicates uniformly sampling all classes.
Default: 0.
dynamic_length (bool): If the dataset length is dynamic (used by
ClassSpecificDistributedSampler). Default: False.
"""
def __init__(self,
ann_file,
pipeline,
data_prefix=None,
test_mode=False,
filename_tmpl='img_{:05}.jpg',
with_offset=False,
multi_class=False,
num_classes=None,
start_index=1,
modality='RGB',
sample_by_class=False,
power=0.,
dynamic_length=False,
**kwargs):
self.filename_tmpl = filename_tmpl
self.with_offset = with_offset
super().__init__(
ann_file,
pipeline,
data_prefix,
test_mode,
multi_class,
num_classes,
start_index,
modality,
sample_by_class=sample_by_class,
power=power,
dynamic_length=dynamic_length)
self.short_cycle_factors = kwargs.get('short_cycle_factors',
[0.5, 0.7071])
self.default_s = kwargs.get('default_s', (224, 224))
def load_annotations(self):
"""Load annotation file to get video information."""
if self.ann_file.endswith('.json'):
return self.load_json_annotations()
video_infos = []
with open(self.ann_file, 'r') as fin:
for line in fin:
line_split = line.strip().split()
video_info = {}
idx = 0
# idx for frame_dir
frame_dir = line_split[idx]
if self.data_prefix is not None:
frame_dir = osp.join(self.data_prefix, frame_dir)
video_info['frame_dir'] = frame_dir
idx += 1
if self.with_offset:
# idx for offset and total_frames
video_info['offset'] = int(line_split[idx])
video_info['total_frames'] = int(line_split[idx + 1])
idx += 2
else:
# idx for total_frames
video_info['total_frames'] = int(line_split[idx])
idx += 1
# idx for label[s]
label = [int(x) for x in line_split[idx:]]
assert label, f'missing label in line: {line}'
if self.multi_class:
assert self.num_classes is not None
video_info['label'] = label
else:
assert len(label) == 1
video_info['label'] = label[0]
video_infos.append(video_info)
return video_infos
def prepare_train_frames(self, idx):
"""Prepare the frames for training given the index."""
def pipeline_for_a_sample(idx):
results = copy.deepcopy(self.video_infos[idx])
results['filename_tmpl'] = self.filename_tmpl
results['modality'] = self.modality
results['start_index'] = self.start_index
# prepare tensor in getitem
if self.multi_class:
onehot = torch.zeros(self.num_classes)
onehot[results['label']] = 1.
results['label'] = onehot
return self.pipeline(results)
if isinstance(idx, tuple):
index, short_cycle_idx = idx
last_resize = None
for trans in self.pipeline.transforms:
if isinstance(trans, Resize):
last_resize = trans
origin_scale = self.default_s
long_cycle_scale = last_resize.scale
if short_cycle_idx in [0, 1]:
# 0 and 1 is hard-coded as PySlowFast
scale_ratio = self.short_cycle_factors[short_cycle_idx]
target_scale = tuple(
[int(round(scale_ratio * s)) for s in origin_scale])
last_resize.scale = target_scale
res = pipeline_for_a_sample(index)
last_resize.scale = long_cycle_scale
return res
else:
return pipeline_for_a_sample(idx)
def prepare_test_frames(self, idx):
"""Prepare the frames for testing given the index."""
results = copy.deepcopy(self.video_infos[idx])
results['filename_tmpl'] = self.filename_tmpl
results['modality'] = self.modality
results['start_index'] = self.start_index
# prepare tensor in getitem
if self.multi_class:
onehot = torch.zeros(self.num_classes)
onehot[results['label']] = 1.
results['label'] = onehot
return self.pipeline(results)
|
[
"[email protected]"
] | |
1bc8f879513747c4fcd355558feb0b1ee673f864
|
c11cd1d6a99eafa740c3aa6d9a9e90d622af9630
|
/examples/ConvolutionalPoseMachines/load-cpm.py
|
b8999c203ea53bde85e9ba5e4158f11b1413ecf9
|
[
"Apache-2.0"
] |
permissive
|
bzhong2/tensorpack
|
0c06e45ed2357cedd0d459511a2c85a07b522d2c
|
0202038159fda7aa4baa2e249903b929949e0976
|
refs/heads/master
| 2021-07-02T19:05:10.948197 | 2017-09-24T09:47:13 | 2017-09-24T09:47:13 | 105,573,277 | 1 | 0 | null | 2017-10-02T19:02:49 | 2017-10-02T19:02:48 | null |
UTF-8
|
Python
| false | false | 4,599 |
py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# File: load-cpm.py
# Author: Yuxin Wu <[email protected]>
import cv2
import tensorflow as tf
import numpy as np
import argparse
from tensorpack import *
from tensorpack.utils import viz
from tensorpack.utils.argtools import memoized
"""
15 channels:
0-1 head, neck
2-4 right shoulder, right elbow, right wrist
5-7 left shoulder, left elbow, left wrist
8-10 right hip, right knee, right ankle
11-13 left hip, left knee, left ankle
14: background
"""
def colorize(img, heatmap):
""" img: bgr, [0,255]
heatmap: [0,1]
"""
heatmap = viz.intensity_to_rgb(heatmap, cmap='jet')[:, :, ::-1]
return img * 0.5 + heatmap * 0.5
@memoized
def get_gaussian_map():
sigma = 21
gaussian_map = np.zeros((368, 368), dtype='float32')
for x_p in range(368):
for y_p in range(368):
dist_sq = (x_p - 368 / 2) * (x_p - 368 / 2) + \
(y_p - 368 / 2) * (y_p - 368 / 2)
exponent = dist_sq / 2.0 / (21**2)
gaussian_map[y_p, x_p] = np.exp(-exponent)
return gaussian_map.reshape((1, 368, 368, 1))
class Model(ModelDesc):
def _get_inputs(self):
return [InputDesc(tf.float32, (None, 368, 368, 3), 'input'),
InputDesc(tf.float32, (None, 368, 368, 15), 'label'),
]
def _build_graph(self, inputs):
image, label = inputs
image = image / 256.0 - 0.5
gmap = tf.constant(get_gaussian_map())
gmap = tf.pad(gmap, [[0, 0], [0, 1], [0, 1], [0, 0]])
pool_center = AvgPooling('mappool', gmap, 9, stride=8, padding='VALID')
with argscope(Conv2D, kernel_shape=3, nl=tf.nn.relu,
W_init=tf.random_normal_initializer(stddev=0.01)):
shared = (LinearWrap(image)
.Conv2D('conv1_1', 64)
.Conv2D('conv1_2', 64)
.MaxPooling('pool1', 2)
# 184
.Conv2D('conv2_1', 128)
.Conv2D('conv2_2', 128)
.MaxPooling('pool2', 2)
# 92
.Conv2D('conv3_1', 256)
.Conv2D('conv3_2', 256)
.Conv2D('conv3_3', 256)
.Conv2D('conv3_4', 256)
.MaxPooling('pool3', 2)
# 46
.Conv2D('conv4_1', 512)
.Conv2D('conv4_2', 512)
.Conv2D('conv4_3_CPM', 256)
.Conv2D('conv4_4_CPM', 256)
.Conv2D('conv4_5_CPM', 256)
.Conv2D('conv4_6_CPM', 256)
.Conv2D('conv4_7_CPM', 128)())
def add_stage(stage, l):
l = tf.concat([l, shared, pool_center], 3,
name='concat_stage{}'.format(stage))
for i in range(1, 6):
l = Conv2D('Mconv{}_stage{}'.format(i, stage), l, 128)
l = Conv2D('Mconv6_stage{}'.format(stage), l, 128, kernel_shape=1)
l = Conv2D('Mconv7_stage{}'.format(stage),
l, 15, kernel_shape=1, nl=tf.identity)
return l
with argscope(Conv2D, kernel_shape=7, nl=tf.nn.relu):
out1 = (LinearWrap(shared)
.Conv2D('conv5_1_CPM', 512, kernel_shape=1)
.Conv2D('conv5_2_CPM', 15, kernel_shape=1, nl=tf.identity)())
out2 = add_stage(2, out1)
out3 = add_stage(3, out2)
out4 = add_stage(4, out3)
out5 = add_stage(5, out4)
out6 = add_stage(6, out4)
resized_map = tf.image.resize_bilinear(out6,
[368, 368], name='resized_map')
def run_test(model_path, img_file):
param_dict = np.load(model_path, encoding='latin1').item()
predict_func = OfflinePredictor(PredictConfig(
model=Model(),
session_init=DictRestore(param_dict),
input_names=['input'],
output_names=['resized_map']
))
im = cv2.imread(img_file, cv2.IMREAD_COLOR).astype('float32')
im = cv2.resize(im, (368, 368))
out = predict_func([[im]])[0][0]
hm = out[:, :, :14].sum(axis=2)
viz = colorize(im, hm)
cv2.imwrite("output.jpg", viz)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--load', required=True, help='.npy model file')
parser.add_argument('--input', required=True, help='input image')
args = parser.parse_args()
run_test(args.load, args.input)
|
[
"[email protected]"
] | |
14f0f6764cb65cfbd369c21f69c8b01bedf0dc40
|
3247d399cee22f988baa83de6c03e97c0e40ad89
|
/kbengine/assets/scripts/base/GameWorld.py
|
2237c2da8d78017ac1f2904e9770820cfa1fe028
|
[] |
no_license
|
ruifly2012/PaoDeKuai
|
3f85af8fda1a53c9a61c35d95188e503734a6bee
|
b671faa60ca6e9458a863379a9c1c64ccb1fbc43
|
refs/heads/master
| 2020-05-14T13:18:23.005557 | 2019-04-16T08:31:27 | 2019-04-16T08:31:27 | 181,809,954 | 0 | 1 | null | 2019-04-17T03:23:51 | 2019-04-17T03:23:51 | null |
UTF-8
|
Python
| false | false | 9,422 |
py
|
# -*- coding: utf-8 -*-
import KBEngine
from KBEDebug import *
import Functor
import const
import math
import random
import time
import h1global
import switch
from urllib.parse import unquote
from LoggerManager import LoggerManager
from interfaces.GameObject import GameObject
from worldmembers.iFriends import iFriends
from worldmembers.iMail import iMail
from worldmembers.iGameManager import iGameManager
from worldmembers.iGroupManager import iGroupManager
from worldmembers.iRoomManager import iRoomManager
from worldmembers.iRoomRecord import iRoomRecord
from worldmembers.iPay import iPay
from worldmembers.iCache import iCache
from worldmembers.iBot import iBot
from BaseEntity import BaseEntity
import x42
import table_name
BROADCAST_NUM = 100
INTERVAL_TIME = 60 * 60
class GameWorld(GameObject,
iBot,
iCache,
iGameManager,
iGroupManager,
iRoomManager,
iRoomRecord,
iPay,
BaseEntity,
):
"""
这是一个脚本层封装的空间管理器
KBEngine的space是一个抽象空间的概念,一个空间可以被脚本层视为游戏场景、游戏房间、甚至是一个宇宙。
"""
def __init__(self):
iBot.__init__(self)
iGameManager.__init__(self)
iCache.__init__(self)
iGroupManager.__init__(self)
iRoomManager.__init__(self)
iRoomRecord.__init__(self)
BaseEntity.__init__(self)
self.dbid = self.databaseID
self.avatars = {}
self.logger = LoggerManager()
KBEngine.Base.__init__(self)
GameObject.__init__(self)
self.broadcastQueue = []
if self.serverStartTime == 0:
self.serverStartTime = time.time()
self.world_notice = '#'
# 不扣房卡的开关, 金钱之源, 慎重开启
self.free_play = False
# 开服之后的房卡消耗累积
self.total_cards = 0
self.rankCount = 0
x42.GW = self
self.initGameWorld()
return
def initGameWorld(self):
self.initGroupManager()
ttime = time.time()
tlocaltime = time.localtime()
DEBUG_MSG("initGameWorld 1 = {0},{1}".format(ttime,self.lastWeekResetTime))
if not h1global.isSameDay2(ttime, self.lastWeekResetTime):
DEBUG_MSG("initGameWorld 2 = {0},{1}".format(ttime,self.lastWeekResetTime))
self.refreshOnResetDay()
self.hourlyTimer = None
self.setTimerByHour()
return
def getServerStartTime(self):
return self.serverStartTime
# 定时器 每整点调用一次
def setTimerByHour(self):
offset = 0
ctime = time.time()
ctime_s = list(time.localtime())
if ctime_s[4] != 0 or ctime_s[5] != 0:
ctime_s[4] = 0
ctime_s[5] = 0
atime = time.mktime(time.struct_time(ctime_s))
offset = 60*60 - (ctime - atime)
if self.hourlyTimer is not None:
self.cancel_timer(self.hourlyTimer)
self.hourlyTimer = None
self.hourlyTimer = self.add_repeat_timer(math.ceil(offset), 60 * 60, self.refreshOnResetDay)
return
def refreshOnResetDay(self):
ttime = time.time()
tlocaltime = time.localtime()
ctime_s = list(tlocaltime)
DEBUG_MSG("refreshOnResetDay 0 = {0}".format(ctime_s))
server_refresh = const.SERVER_REFRESH_TIME
if ctime_s[6] == server_refresh[0]:
DEBUG_MSG("refreshOnResetDay 1 = {0},{1}".format(ttime,self.lastWeekResetTime))
if not h1global.isSameDay2(ttime, self.lastWeekResetTime):
DEBUG_MSG("refreshOnResetDay 1 = {0},{1}".format(ttime,self.lastWeekResetTime))
self.lastWeekResetTime = ttime
self.rankingInfos = []
if ctime_s[3] == 12:
self.rankCount = 0
self.genGlobalRankBotData()
return
def loginToSpace(self, avatarEntity):
"""
defined method.
某个玩家请求登陆到某个space中
"""
self.avatars[avatarEntity.userId] = avatarEntity
def logoutSpace(self, avatarID):
"""
defined method.
某个玩家请求登出这个space
"""
if avatarID in self.avatars:
del self.avatars[avatarID]
def runFuncOnAllPlayers(self, num, funcs, *args):
alist = list(self.avatars.keys())
bn = 0
en = BROADCAST_NUM if len(alist) > BROADCAST_NUM else len(alist)
self.broadcastQueue.append(Functor.Functor(self.runFuncOnSubPlayers, bn, en, alist, num, funcs, *args))
self.add_timer(0, self.broadcastFunc)
def broadcastFunc(self):
if self.broadcastQueue:
func = self.broadcastQueue.pop()
func()
if len(self.broadcastQueue) > 0:
self.add_timer(0.1, self.broadcastFunc)
return
def runFuncOnSubPlayers(self, bn, en, alist, num, funcs, *args):
def getFuncInAvatar(avatar, num, funcs):
if avatar is None:
ERROR_MSG("GameWorld[%i].runFuncOnAllPlayers:avatar is None" % (self.id))
return
curFunc = avatar
for count in range(num):
curFunc = getattr(curFunc, funcs[count])
if curFunc is None:
ERROR_MSG("GameWorld[%i].runFuncOnAllPlayers: %s, %s is None" % (self.id, str(funcs), funcs[count]))
return None
return curFunc
for i in range(bn, en):
if alist[i] not in self.avatars or alist[i] in self.bots:
continue
avatarMailbox = self.avatars[alist[i]]
curFunc = getFuncInAvatar(avatarMailbox, num, funcs)
if curFunc is not None:
curFunc(*args)
if en >= len(alist):
return
bn = en
en = (bn + BROADCAST_NUM) if len(alist) > (bn + BROADCAST_NUM) else len(alist)
self.broadcastQueue.append(Functor.Functor(self.runFuncOnSubPlayers, bn, en, alist, num, funcs, *args))
return
def genGlobalBornData(self, accountMailbox):
if switch.DEBUG_BASE:
# 测试环境 玩家userID
self.userCount = self.userCount + 1
bornData = {
"userId": self.userCount + 1134701,
}
else:
# 比赛场机器人userID
self.botCount += 1
bornData = {
"userId" : self.botCount + 2134701,
}
accountMailbox.reqCreateAvatar(bornData)
def callMethodOnAllAvatar(self, method_name, *args):
for mb in self.avatars.values():
func = getattr(mb, method_name, None)
if func and callable(func):
self.broadcastQueue.append(lambda avt_mb=mb: getattr(avt_mb, method_name)(*args))
self.add_timer(0.1, self.broadcastFunc)
def externalDataDispatcher(self, dataStr):
DEBUG_MSG("externalDataDispatcher data = {}".format(dataStr))
try:
dataStr = unquote(dataStr)
(dataStr, op_code) = dataStr.split("&9op=")
op = int(op_code)
if (op == 1):
(dataStr, free) = dataStr.split("free=")
free = int(free)
self.free_play = (free == 1)
DEBUG_MSG("set free_play = {0}".format(free))
elif (op == 2):
(dataStr, content) = dataStr.split("&2content=")
(dataStr, count) = dataStr.split("1count=")
count = int(count)
if content != '#':
DEBUG_MSG("call recvWorldNotice content = {0}, count = {1}".format(content, count))
self.callMethodOnAllAvatar("recvWorldNotice", content, count)
elif (op == 3):
(dataStr, proxy) = dataStr.split("&3proxy=")
(dataStr, uid) = dataStr.split("&2uid=")
uid = int(uid)
(dataStr, cards) = dataStr.split("1cards=")
cards = int(cards)
self.userPaySuccess([proxy, uid, cards])
else:
DEBUG_MSG("Error: externalDataDispatcher, no this op={}".format(op_code))
except:
DEBUG_MSG("Error: externalDataDispatcher exception {}".format(dataStr))
def updateRankingInfo(self,rankingInfo):
flag = 0
for i, rank in enumerate(self.rankingInfos):
DEBUG_MSG("userid{0},{1},{2}".format(self.rankingInfos[i]["userid"],rank["userid"],rankingInfo["userid"]))
if rank["userid"] == rankingInfo["userid"]:
self.rankingInfos[i]["integral"] = rankingInfo["integral"]
flag = 1
break
if len(self.rankingInfos) == 0 or (flag != 1 and len(self.rankingInfos) < 15) \
or (flag != 1 and len(self.rankingInfos) >= 15 and self.rankingInfos[-1]["integral"] < rankingInfo["integral"]):
self.rankingInfos.append(rankingInfo)
self.rankingInfos=sorted(self.rankingInfos,key=lambda x:x["integral"],reverse = True)
length = len(self.rankingInfos)
if length > 15:
new_h = []
for s in self.rankingInfos:
new_h.append(s)
self.rankingInfos = new_h[0:15]
def reqRankingInfos(self,avt_mb):
DEBUG_MSG("reqRankingInfos World {}".format(self.rankingInfos))
avt_mb.rebckRankingInfos(self.rankingInfos)
#self.client and self.client.pushRankingInfos(self.rankingInfos)
def reqSignInfo(self,avt_mb):
avt_mb.pushSignInfo(self.signInfo)
def initSignInfo(self):
self.signInfo ={
'day1': 1,
'day2': 2,
'day3': 3,
'day4': 4,
'day5': 5,
'day6': 6,
'day7': 7
}
def getRankMaxAndMinScore(self):
length = len(self.rankingInfos)
if length <= 0:
minScore = 0
maxScore = 100
else:
maxScore = self.rankingInfos[0]["integral"]
minScore = self.rankingInfos[-1]["integral"]
return minScore,maxScore
def genGlobalRankBotData(self):
minScore,maxScore = self.getRankMaxAndMinScore()
num = const.RANK_BOT_NUM
while num > 0:
# 排行榜机器人userID
self.rankCount = self.rankCount + 1
userId = self.rankCount + 8134701
lastname = table_name.lastnameTbl[random.randint(0, table_name.lastnameNum - 1)]
firstname = table_name.firstnameTbl[random.randint(0, table_name.firstnameNum - 1)]
name = lastname + firstname
iconUrl = "http://mypdk.game918918.com/portraits/" + str(random.randint(1, 50)) + ".png"
integral = random.randint(minScore,maxScore + 200)
rankingInfo = {
'userid': userId,
'uuid': KBEngine.genUUID64(),
'head_icon': iconUrl,
'name': name,
'integral': integral
}
DEBUG_MSG("genGlobalRankBotData00000000{}".format(rankingInfo))
self.updateRankingInfo(rankingInfo)
num = num - 1
|
[
"[email protected]"
] | |
2b199881e94f19fe4b040e1da9ae0108a52c857f
|
292d23019c18d0b724aed88f04a0f20b5b616bb9
|
/Python1/Crawler/douban_movie1.py
|
b845a75327d8ec2485e72a9f2f10dabbb124ec4b
|
[] |
no_license
|
RedAnanas/macbendi
|
6f1f6fd41ed1fe8b71408dffa0b964464bd00aa8
|
8d5aa39d9389937f0e0c3f7a7d6532537f33cda8
|
refs/heads/master
| 2023-06-11T18:57:31.061009 | 2021-06-29T15:04:56 | 2021-06-29T15:04:56 | 380,759,110 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,912 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/2/7 17:41
# @Software: PyCharm
import requests
from bs4 import BeautifulSoup
base_url = 'https://movie.douban.com/top250?start=%d&filter='
for page in range(0, 25, 25):
allurl = base_url %int(page)
resp=requests.get(allurl)
soup = BeautifulSoup(resp.text, 'lxml')
all_names = soup.find_all('span', class_='title')
names = [a.get_text() for a in all_names]
all_names1 = soup.find_all('span', class_='other')
names1 = [a1.get_text() for a1 in all_names1]
all_grade = soup.find_all('span', class_='rating_num')
grade = [a.get_text() for a in all_grade]
all_director = soup.find_all('p', class_='')
director = [a.get_text() for a in all_director]
all_intro = soup.find_all('span', class_='inq')
intro = [a.get_text() for a in all_intro]
for names, names1,grade, director, intro in zip(all_names, all_names1, all_grade,all_director, all_intro):
name = '影名:' + str(names.text) + '\n'
author = '别名:' + str(names1.text) + '\n'
grade = '评分:' + str(grade.text) + '\n'
# str.replace(u'\xa0', u' ')
score = '导演:' + str(director.text).replace(' ','') + '\n'
# score = '导演:' + str(director.text) + '\n'
sum = '简介:' + str(intro.text) + '\n'
data = name + author + grade + score + sum
# print(data)
# 文件名
filename = '豆瓣电影Top250.txt'
# 保存文件操作
with open(filename, 'a', encoding='utf-8') as f:
# 保存数据
f.writelines(data + '=======================' + '\n')
print('保存成功')
# print(names)
# print(names1)
# print(director)
# print(intro)
# all_author = soup.find_all('p', class_='pl')
# author = [b.text for b in all_author]
# # print(author)
#
# all_grade = soup.find_all('span',class_='rating_nums')
# grade = [c.text for c in all_grade]
# # print(grade)
#
# all_intro = soup.find_all('span',class_='inq')
# intro = [d.text for d in all_intro]
# # print(intro)
#
# for name, author, score, sum in zip(names, all_author, all_grade, all_intro):
# name = '书名:' + str(name) + '\n'
# author = '作者:' + str(author.text) + '\n'
# score = '评分:' + str(score.text) + '\n'
# sum = '简介:' + str(sum.text) + '\n'
# data = name + author + score + sum
# # print(data)
#
# # 文件名
# filename = '豆瓣图书Top250.txt'
# # 保存文件操作
# with open(filename, 'a', encoding='utf-8') as f:
# # 保存数据
# f.writelines(data + '=======================' + '\n')
# print('保存成功')
|
[
"[email protected]"
] | |
13700a8e3b257b54c718ee11ebc82eb267a92b87
|
af8f0d50bb11279c9ff0b81fae97f754df98c350
|
/src/tests/account/registration.py
|
8717aa6a49451c1b33cfc8c6aa4bdab639888e5e
|
[
"Apache-2.0"
] |
permissive
|
DmytroKaminskiy/ltt
|
592ed061efe3cae169a4e01f21d2e112e58714a1
|
d08df4d102e678651cd42928e2343733c3308d71
|
refs/heads/master
| 2022-12-18T09:56:36.077545 | 2020-09-20T15:57:35 | 2020-09-20T15:57:35 | 292,520,616 | 0 | 0 |
Apache-2.0
| 2020-09-20T15:49:58 | 2020-09-03T09:09:26 |
HTML
|
UTF-8
|
Python
| false | false | 4,191 |
py
|
from urllib.parse import urlparse
from account.models import User
from django.conf import settings
from django.core import mail
from django.urls import reverse, reverse_lazy
from tests.const import URLS_PATTERN
URL = reverse_lazy('account:django_registration_register')
def test_registration_get(client):
response = client.get(URL)
assert response.status_code == 200
assert 'form' in response.context_data
def test_registration_create_empty_data(client):
user_count = User.objects.count()
response = client.post(URL, data={})
assert response.status_code == 200
assert response.context_data['form'].errors == {
'email': ['This field is required.'],
'password1': ['This field is required.'],
'password2': ['This field is required.'],
}
assert len(mail.outbox) == 0
assert User.objects.count() == user_count
def test_registration_create_different_password(client, fake):
user_count = User.objects.count()
data = {
'email': fake.email(),
'password1': fake.password(),
'password2': fake.password(),
}
response = client.post(URL, data=data)
assert response.status_code == 200
assert response.context_data['form'].errors == {
'password2': ["The two password fields didn't match."]
}
assert len(mail.outbox) == 0
assert User.objects.count() == user_count
def test_registration_create_same_password(client, fake):
user_count = User.objects.count()
data = {
'email': fake.email(),
'password1': fake.password(),
}
data['password2'] = data['password1']
response = client.post(URL, data=data)
assert response.status_code == 302
assert response['Location'] == reverse('django_registration_complete')
assert User.objects.count() == user_count + 1
user = User.objects.last()
assert user.email == data['email']
assert user.is_active is False
assert len(mail.outbox) == 1
email = mail.outbox[0]
assert email.to == [data['email']]
assert email.cc == []
assert email.bcc == []
assert email.reply_to == []
assert email.from_email == settings.DEFAULT_FROM_EMAIL
assert email.subject == 'Activate your Account'
assert 'Thanks for signing up!' in email.body
url = urlparse(URLS_PATTERN.findall(email.body)[-1])
response = client.get(url.path)
assert response.status_code == 302
assert response['Location'] == reverse('django_registration_activation_complete')
user.refresh_from_db()
assert user.is_active is True
# post same data again
response = client.post(URL, data=data)
assert response.status_code == 200
assert response.context_data['form'].errors == {
'email': ['This email address is already in use. Please supply a different email address.'],
}
assert User.objects.count() == user_count + 1
assert len(mail.outbox) == 1
assert response.wsgi_request.user.is_authenticated is False
# test login wrong password
response = client.post(
reverse('login'),
data={'username': data['email'], 'password': 'wrong-password'},
)
assert response.status_code == 200
assert response.context_data['form'].errors == {
'__all__': ['Please enter a correct email address and password. Note that both fields may be case-sensitive.']
}
assert response.wsgi_request.user.is_authenticated is False
# test login wrong email
response = client.post(
reverse('login'),
data={'username': fake.email(), 'password': data['password1']},
)
assert response.status_code == 200
assert response.context_data['form'].errors == {
'__all__': ['Please enter a correct email address and password. Note that both fields may be case-sensitive.']
}
assert response.wsgi_request.user.is_authenticated is False
# test login correct
assert response.wsgi_request.user.is_authenticated is False
response = client.post(
reverse('login'),
data={'username': data['email'], 'password': data['password1']},
)
assert response.status_code == 302
assert response.wsgi_request.user.is_authenticated is True
|
[
"[email protected]"
] | |
1c6a1215b0db21e8519fe8f44c4fd556a89e12d7
|
78c3808342711fe04e662cfea3d394e34841f2fb
|
/docs/rg/rgkod11.py
|
05478f163c5d469994dbf4f3844fdac574ad4c29
|
[] |
no_license
|
astefaniuk/linetc
|
cd0f8aa1bb2858e971caddaf6e6396363ca50a47
|
b23b3b4747dded19f7030862bf486a9e0f65b4e0
|
refs/heads/master
| 2021-01-22T13:08:15.266332 | 2015-06-12T21:37:59 | 2015-06-12T21:37:59 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 182 |
py
|
# idź bezpiecznie na najbliższego wroga
# wersja oparta na zbiorach
if wrogowie:
najblizszy_wrog = mindist(wrogowie,self.location)
else:
najblizszy_wrog = rg.CENTER_POINT
|
[
"[email protected]"
] | |
3510224b9ff10ba629557b67a1f2a7494d96ed42
|
4e353bf7035eec30e5ad861e119b03c5cafc762d
|
/QtCore/QElapsedTimer.py
|
907ac3c36dc910112b15b9bf46aa8486cbb0d152
|
[] |
no_license
|
daym/PyQt4-Stubs
|
fb79f54d5c9a7fdb42e5f2506d11aa1181f3b7d5
|
57d880c0d453641e31e1e846be4087865fe793a9
|
refs/heads/master
| 2022-02-11T16:47:31.128023 | 2017-10-06T15:32:21 | 2017-10-06T15:32:21 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,312 |
py
|
# encoding: utf-8
# module PyQt4.QtCore
# from C:\Python27\lib\site-packages\PyQt4\QtCore.pyd
# by generator 1.145
# no doc
# imports
import sip as __sip
class QElapsedTimer(): # skipped bases: <type 'sip.simplewrapper'>
"""
QElapsedTimer()
QElapsedTimer(QElapsedTimer)
"""
def clockType(self): # real signature unknown; restored from __doc__
""" QElapsedTimer.clockType() -> QElapsedTimer.ClockType """
pass
def elapsed(self): # real signature unknown; restored from __doc__
""" QElapsedTimer.elapsed() -> int """
return 0
def hasExpired(self, p_int): # real signature unknown; restored from __doc__
""" QElapsedTimer.hasExpired(int) -> bool """
return False
def invalidate(self): # real signature unknown; restored from __doc__
""" QElapsedTimer.invalidate() """
pass
def isMonotonic(self): # real signature unknown; restored from __doc__
""" QElapsedTimer.isMonotonic() -> bool """
return False
def isValid(self): # real signature unknown; restored from __doc__
""" QElapsedTimer.isValid() -> bool """
return False
def msecsSinceReference(self): # real signature unknown; restored from __doc__
""" QElapsedTimer.msecsSinceReference() -> int """
return 0
def msecsTo(self, QElapsedTimer): # real signature unknown; restored from __doc__
""" QElapsedTimer.msecsTo(QElapsedTimer) -> int """
return 0
def nsecsElapsed(self): # real signature unknown; restored from __doc__
""" QElapsedTimer.nsecsElapsed() -> int """
return 0
def restart(self): # real signature unknown; restored from __doc__
""" QElapsedTimer.restart() -> int """
return 0
def secsTo(self, QElapsedTimer): # real signature unknown; restored from __doc__
""" QElapsedTimer.secsTo(QElapsedTimer) -> int """
return 0
def start(self): # real signature unknown; restored from __doc__
""" QElapsedTimer.start() """
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __init__(self, QElapsedTimer=None): # real signature unknown; restored from __doc__ with multiple overloads
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
MachAbsoluteTime = 3
MonotonicClock = 1
PerformanceCounter = 4
SystemTime = 0
TickCounter = 2
|
[
"[email protected]"
] | |
fa7f36f70571120cbb262878199b1a168357ff47
|
d9ecb105ed56979691f7776238301a3d0564665e
|
/ParameterUI/__init__.py
|
ac2bbe925bef7623a4a85333d4e5c6e16cb5d4d1
|
[] |
no_license
|
muyr/hapi_test
|
4dcc5eb8e5aea4a18556002aec3d68301cb09024
|
910ca037d9afc8fd112ff6dc4fc8686f7f188eb0
|
refs/heads/main
| 2023-03-28T19:37:25.518719 | 2021-03-30T12:03:20 | 2021-03-30T12:03:20 | 309,288,978 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 787 |
py
|
from MParameterLabel import MParameterLabel
from MParameterString import MParameterString
from MParameterStringChoice import MParameterStringChoice
from MParameterInteger import MParameterInteger
from MParameterIntegerChoice import MParameterIntegerChoice
from MParameterFloat import MParameterFloat
from MParameterFolder import MParameterFolder
from MParameterFolderList import MParameterFolderList
from MParameterToggle import MParameterToggle
from MParameterMulti import MParameterMulti
from MParameterMultiInstance import MParameterMultiInstance
from MParameterButton import MParameterButton
from MParameterColor import MParameterColor
from MParameterSeparator import MParameterSeparator
from MParameterPathFile import MParameterPathFile
from MParameterNode import MParameterNode
|
[
"[email protected]"
] | |
ad94f6c7e328945e686c5c49d7071033fa26365a
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03549/s009493844.py
|
b43be6ebe855df366e38343d97e9fb47993b6a8d
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 608 |
py
|
import sys
from collections import defaultdict, deque, Counter
import math
# import copy
from bisect import bisect_left, bisect_right
# import heapq
# sys.setrecursionlimit(1000000)
# input aliases
input = sys.stdin.readline
getS = lambda: input().strip()
getN = lambda: int(input())
getList = lambda: list(map(int, input().split()))
getZList = lambda: [int(x) - 1 for x in input().split()]
INF = 10 ** 20
MOD = 10**9 + 7
divide = lambda x: pow(x, MOD-2, MOD)
def main():
n, m= getList()
one = 1900 * m + 100 * (n - m)
print(one * (2 ** m))
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
c6c4e0a698e2185e960aa28af3da3501e4305561
|
a2cbd654d7126c21442111fb315454561790b579
|
/backend/dating/api/v1/urls.py
|
f32a7d281cd91a40f3f177ab0d3f660a362883a6
|
[] |
no_license
|
crowdbotics-apps/breakify-23632
|
d43c9cfb23bf185c10499301f6e14ec441181907
|
14bc9010e101062f22a98837b5ac7e10de0511bf
|
refs/heads/master
| 2023-02-09T11:07:18.680204 | 2020-12-31T16:20:19 | 2020-12-31T16:20:19 | 325,820,032 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 620 |
py
|
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from .viewsets import (
SettingViewSet,
ProfileViewSet,
InboxViewSet,
DislikeViewSet,
MatchViewSet,
UserPhotoViewSet,
LikeViewSet,
)
router = DefaultRouter()
router.register("inbox", InboxViewSet)
router.register("profile", ProfileViewSet)
router.register("setting", SettingViewSet)
router.register("dislike", DislikeViewSet)
router.register("like", LikeViewSet)
router.register("match", MatchViewSet)
router.register("userphoto", UserPhotoViewSet)
urlpatterns = [
path("", include(router.urls)),
]
|
[
"[email protected]"
] | |
7bcc3b56f063f47763a83bf930c3fc789819f161
|
0ffd4524067a737faf34bb60c4041a23258ac5cd
|
/assignment1/q1_softmax.py
|
9fc47b3c18a9c55b546f6e0848605fbe5bffcbe2
|
[] |
no_license
|
gjwei/cs224n
|
6dc410ab2efc8dfc665711daac5dd1e396ae7c8f
|
1ebdd31d5f3943547dc1654c756387ae5d7ef9f3
|
refs/heads/master
| 2021-08-23T02:38:47.120738 | 2017-12-02T16:11:32 | 2017-12-02T16:11:32 | 112,850,941 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,168 |
py
|
import numpy as np
def softmax(x):
"""
Compute the softmax function for each row of the input x.
It is crucial that this function is optimized for speed because
it will be used frequently in later code.
You might find numpy functions np.exp, np.sum, np.reshape,
np.max, and numpy broadcasting useful for this task. (numpy
broadcasting documentation:
http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
You should also make sure that your code works for one
dimensional inputs (treat the vector as a row), you might find
it helpful for your later problems.
You must implement the optimization in problem 1(a) of the
written assignment!
"""
### YOUR CODE HERE
if x.ndim > 1:
x -= np.max(x, axis=1, keepdims=True)
x = np.exp(x)
x /= np.sum(x, axis=1, keepdims=True)
else:
x -= np.max(x)
x = np.exp(x)
x /= np.sum(x)
### END YOUR CODE
return x
def test_softmax_basic():
"""
Some simple tests to get you started.
Warning: these are not exhaustive.
"""
print "Running basic tests..."
test1 = softmax(np.array([1, 2]))
print test1
assert np.amax(np.fabs(test1 - np.array(
[0.26894142, 0.73105858]))) <= 1e-6
test2 = softmax(np.array([[1001, 1002], [3, 4]]))
print test2
assert np.amax(np.fabs(test2 - np.array(
[[0.26894142, 0.73105858], [0.26894142, 0.73105858]]))) <= 1e-6
test3 = softmax(np.array([[-1001, -1002]]))
print test3
assert np.amax(np.fabs(test3 - np.array(
[0.73105858, 0.26894142]))) <= 1e-6
print "You should verify these results!\n"
def test_softmax():
"""
Use this space to test your softmax implementation by running:
python q1_softmax.py
This function will not be called by the autograder, nor will
your tests be graded.
"""
print "Running your tests..."
### YOUR CODE HERE
a = np.random.random(size=(100, 49)) * 10
result = softmax(a)
### END YOUR CODE
if __name__ == "__main__":
test_softmax_basic()
test_softmax()
|
[
"[email protected]"
] | |
efeea6d5486e83703709910a52331973707ea48f
|
e47875e83c19f8e7ec56fb1cf2ae7e67e650f15b
|
/kRPC/OrbitalLaunch/old/LaunchIntoOrbit_2.py
|
4237e0b917cd2ff8ff4d5bc79f569b6e41f796bc
|
[] |
no_license
|
crashtack/KSP
|
a69b031ca942adb9fd798de034605b2b2c229b8d
|
2549319c116a4687639a0ebb59adafd8b6ce1ad9
|
refs/heads/master
| 2021-01-19T04:26:20.143710 | 2017-12-07T05:17:50 | 2017-12-07T05:17:50 | 63,728,682 | 0 | 0 | null | 2017-12-07T05:17:51 | 2016-07-19T21:17:52 | null |
UTF-8
|
Python
| false | false | 2,517 |
py
|
import krpc, time, math
turn_start_altitude = 250
turn_end_altitude = 90000
target_altitude = 200000
conn = krpc.connect(name='Launch Science Station to Orbit')
vessel = conn.space_center.active_vessel
# Set up streams for telemetry
ut = conn.add_stream(getattr, conn.space_center, 'ut')
altitude = conn.add_stream(getattr, vessel.flight(), 'mean_altitude')
apoapsis = conn.add_stream(getattr, vessel.orbit, 'apoapsis_altitude')
periapsis = conn.add_stream(getattr, vessel.orbit, 'periapsis_altitude')
eccentricity = conn.add_stream(getattr, vessel.orbit, 'eccentricity')
stage_2_resources = vessel.resources_in_decouple_stage(stage=2, cumulative=False)
stage_3_resources = vessel.resources_in_decouple_stage(stage=3, cumulative=False)
srb_fuel = conn.add_stream(stage_3_resources.amount, 'SolidFuel')
launcher_fuel = conn.add_stream(stage_2_resources.amount, 'LiquidFuel')
# Pre-launch setup
vessel.control.sas = False
vessel.control.rcs = False
vessel.control.throttle = 1
# Countdown...
print('3...'); time.sleep(1)
print('2...'); time.sleep(1)
print('1...'); time.sleep(1)
print('Launch!')
# Activate the first stage
vessel.control.activate_next_stage()
vessel.auto_pilot.engage()
vessel.auto_pilot.target_pitch_and_heading(90, 90)
# Main ascent loop
srbs_separated = False
turn_angle = 0
while True:
time.sleep(.05)
print("altitude: %.2f" % altitude())
# Gravity turn
if altitude() > turn_start_altitude and altitude() < turn_end_altitude:
frac = (altitude() - turn_start_altitude) / (turn_end_altitude - turn_start_altitude)
new_turn_angle = frac * 90
if abs(new_turn_angle - turn_angle) > 0.5:
turn_angle = new_turn_angle
vessel.auto_pilot.target_pitch_and_heading(90-turn_angle, 90)
# Separate SRBs when finished
if not srbs_separated:
#print("srb fuel: %f" % srb_fuel())
if srb_fuel() < .1:
time.sleep(.5)
vessel.control.activate_next_stage()
srbs_separated = True
print('SRBs separated')
# Decrease throttle when approaching target apoapsis
if apoapsis() > target_altitude*0.9:
print('Approaching target apoapsis')
break
# Disable engines when target apoapsis is reached
vessel.control.throttle = 0.25
while apoapsis() < target_altitude:
pass
print('Target apoapsis reached')
vessel.control.throttle = 0
# Wait until out of atmosphere
print('Coasting out of atmosphere')
while altitude() < 70500:
pass
print('Launch complete')
|
[
"[email protected]"
] | |
d67d3eaeff1fdd029f4ca5a75a83df6c79287ba1
|
7bc54bae28eec4b735c05ac7bc40b1a8711bb381
|
/src/models/keras_model/AverageEmbedding.py
|
3a6238d5bcbbe898432e3adcac1b93bdc85d2781
|
[] |
no_license
|
clover3/Chair
|
755efd4abbd5f3f2fb59e9b1bc6e7bc070b8d05e
|
a2102ebf826a58efbc479181f1ebb5de21d1e49f
|
refs/heads/master
| 2023-07-20T17:29:42.414170 | 2023-07-18T21:12:46 | 2023-07-18T21:12:46 | 157,024,916 | 0 | 0 | null | 2023-02-16T05:20:37 | 2018-11-10T21:55:29 |
Python
|
UTF-8
|
Python
| false | false | 828 |
py
|
import keras
import tensorflow as tf
class WeightedSum(keras.layers.Layer):
def __init__(self):
super(WeightedSum, self).__init__()
def call(self, args):
x = args[0]
m = args[1]
s = tf.reduce_sum(x, axis=1)
d = tf.reduce_sum(tf.cast(tf.equal(m, 0), tf.float32), axis=-1)
s = s / tf.expand_dims(d, 1)
return s
def make_embedding_layer(params, name: str = 'embedding',) -> keras.layers.Layer:
return keras.layers.Embeddings(
params['embedding_input_dim'],
params['embedding_output_dim'],
trainable=params['embedding_trainable'],
name=name,
)
def build_model(word_index, embedding_matrix, embedding_dim, max_seq_length):
embedding = make_embedding_layer()
model = keras.Model(inputs=[query, doc], outputs=[out])
|
[
"[email protected]"
] | |
3d720c47a2290b71c9e834a808bf58802fdc1e16
|
ecc1638f75a6ccd814923cb980e69d770c2525b7
|
/Workspace for Python/studying file/class/Greeter.py
|
cac9ab6ba069fec9304a0df0bdd989b97385857d
|
[
"MIT"
] |
permissive
|
ArchibaldChain/python-workspace
|
5570e1df01f29f9916129e12d7fb1fb0608255d7
|
71890f296c376155e374b2096ac3d8f1d286b7d2
|
refs/heads/master
| 2022-12-01T03:00:37.224908 | 2020-08-04T10:04:47 | 2020-08-04T10:04:47 | 174,573,744 | 0 | 1 |
MIT
| 2022-11-22T04:02:07 | 2019-03-08T16:45:09 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 342 |
py
|
class Greeter(object):
# constructor
def __init__(self, name):
self.name = name
# instance method
def greet(self, loud=False):
if loud:
print('HELLO, %s' % self.name.upper())
else:
print("hello, %s" % self.name)
g = Greeter('Freed')
g.greet()
g.greet(True)
|
[
"[email protected]"
] | |
bd4068cefe9147518981b5aba901ffe4a4d36eee
|
56a0762c741bcac3ab1172eb6114a9e59a48a5df
|
/domotica/apps.py
|
fab422866156e3faf00870f59a37cbf90056dadd
|
[
"MIT"
] |
permissive
|
jjmartinr01/gauss3
|
54af1735a035a566f237d8e0fd9a6fe4447845a2
|
41a23d35c763890d8f729c9d63ac073673689400
|
refs/heads/master
| 2023-08-23T06:40:51.033857 | 2023-08-08T11:50:50 | 2023-08-08T11:50:50 | 171,710,013 | 1 | 0 |
MIT
| 2023-02-15T18:43:56 | 2019-02-20T16:35:03 |
HTML
|
UTF-8
|
Python
| false | false | 91 |
py
|
from django.apps import AppConfig
class DomoticaConfig(AppConfig):
name = 'domotica'
|
[
"[email protected]"
] | |
8c9d773cf834776b2cef2c5b7df3300f7601ecc1
|
3c000380cbb7e8deb6abf9c6f3e29e8e89784830
|
/venv/Lib/site-packages/cobra/modelimpl/bgp/bdevi.py
|
ced83174740c95c1b16930b1c648c607d05579b5
|
[] |
no_license
|
bkhoward/aciDOM
|
91b0406f00da7aac413a81c8db2129b4bfc5497b
|
f2674456ecb19cf7299ef0c5a0887560b8b315d0
|
refs/heads/master
| 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,393 |
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class BDEvi(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = ClassMeta("cobra.model.bgp.BDEvi")
meta.moClassName = "bgpBDEvi"
meta.rnFormat = "bdevi-[%(encap)s]"
meta.category = MoCategory.REGULAR
meta.label = "Bridge Domain Ethernet VPN Instance"
meta.writeAccessMask = 0x8008020040001
meta.readAccessMask = 0x8008020040001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.bgp.EVpnImetRoute")
meta.childClasses.add("cobra.model.bgp.EVpnMacIpRoute")
meta.childClasses.add("cobra.model.bgp.EVpnPfxRoute")
meta.childClasses.add("cobra.model.bgp.CktEpEvi")
meta.childClasses.add("cobra.model.bgp.RttP")
meta.childNamesAndRnPrefix.append(("cobra.model.bgp.EVpnMacIpRoute", "evpnmaciprt-"))
meta.childNamesAndRnPrefix.append(("cobra.model.bgp.EVpnImetRoute", "evpnimetrt-"))
meta.childNamesAndRnPrefix.append(("cobra.model.bgp.EVpnPfxRoute", "evpnpfxrt-"))
meta.childNamesAndRnPrefix.append(("cobra.model.bgp.CktEpEvi", "cktepevi-"))
meta.childNamesAndRnPrefix.append(("cobra.model.bgp.RttP", "rttp-"))
meta.parentClasses.add("cobra.model.bgp.Dom")
meta.superClasses.add("cobra.model.nw.Conn")
meta.superClasses.add("cobra.model.bgp.EviBase")
meta.superClasses.add("cobra.model.nw.Item")
meta.superClasses.add("cobra.model.nw.CpDom")
meta.superClasses.add("cobra.model.nw.GEp")
meta.superClasses.add("cobra.model.bgp.EncapEviBase")
meta.rnPrefixes = [
('bdevi-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "encap", "encap", 20695, PropCategory.REGULAR)
prop.label = "Encapsulation"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("encap", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "name", "name", 16434, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 128)]
meta.props.add("name", prop)
prop = PropMeta("str", "rd", "rd", 20636, PropCategory.REGULAR)
prop.label = "Route Distinguisher"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("rd", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
meta.namingProps.append(getattr(meta.props, "encap"))
getattr(meta.props, "encap").needDelimiter = True
def __init__(self, parentMoOrDn, encap, markDirty=True, **creationProps):
namingVals = [encap]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"[email protected]"
] | |
08a482edc4791f5a7c670fbd4fb08bbe58dbb95f
|
6ee8765a4d98472d32b1aa22f9a885f4ab54adae
|
/select_with_filter.py
|
8c55222aa57348a1abbd9c23bbe17e2ba7b5dcec
|
[] |
no_license
|
rohitaswchoudhary/mysql_python
|
31e9b55f30fdd87a7c7eb7d2b24e75f8d8cf58ce
|
9160b7e374472ccfafadc39d6692bc7a798d99c0
|
refs/heads/main
| 2023-06-03T23:25:59.713109 | 2021-02-26T15:01:12 | 2021-02-26T15:01:12 | 377,754,217 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,207 |
py
|
import mysql.connector
mydb = mysql.connector.connect(
host='localhost',
user='root',
password='Rohitasw@2002',
database = "mydatabase"
)
mycursor = mydb.cursor()
# Select With a Filter
# When selecting records from a table, you can filter the selection by using the "WHERE" statement:
sql = "SELECT * FROM customers WHERE address ='Park Lane 38'"
mycursor.execute(sql)
myresult = mycursor.fetchall()
for x in myresult:
print(x)
# Wildcard Characters
# You can also select the records that starts, includes, or ends with a given letter or phrase.
# Use the % to represent wildcard characters:
sql = "SELECT * FROM customers WHERE address LIKE '%way%'"
mycursor.execute(sql)
myresult = mycursor.fetchall()
for x in myresult:
print(x)
# Prevent SQL Injection
# When query values are provided by the user, you should escape the values.
# This is to prevent SQL injections, which is a common web hacking technique to destroy or misuse your database.
# The mysql.connector module has methods to escape query values:
sql = "SELECT * FROM customers WHERE address = %s"
adr = ("Yellow Garden 2", )
mycursor.execute(sql, adr)
myresult = mycursor.fetchall()
for x in myresult:
print(x)
|
[
"[email protected]"
] | |
a343f6cb593f4ac770460ec9fed2e071f2bc7a98
|
e0f133b49f9f0f416f14da70a2cadb7011c0cb7b
|
/new_spider/downloader_sx/sx_render_local_downloader_phantomJS.py
|
8cdd9dc021fba1d1b144c03a5b6055d148945024
|
[] |
no_license
|
cash2one/python_frame
|
ac52d052fd3698303f1f4fa022f3b35a56e07533
|
2dbda155780a19cf42d5376104879d0667fbbf75
|
refs/heads/master
| 2021-06-18T13:28:40.356527 | 2017-06-28T02:51:35 | 2017-06-28T02:51:35 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,789 |
py
|
# -*- coding: utf8 -*-
import sys
import traceback
import urllib2
from downloader.downloader import Downloader
from downloader_sx.list_pulldown_picture.sx_picture_phantomjs import FindPicture
reload(sys)
sys.setdefaultencoding('utf8')
# from downloader.picture_phantomjs
class HtmlLocalDownloader(Downloader):
"""
html下载器
"""
def __init__(self, set_mode='db', get_mode='db'):
super(HtmlLocalDownloader, self).__init__(set_mode, get_mode)
def set(self, request):
try:
results = dict()
# param = request.downloader_get_param('http')
param = request.downloader_set_param('http')
for url in param['urls']:
# print url
results[url["url"]] = 1
return results
except Exception:
print(traceback.format_exc())
return 0
@staticmethod
def encoding(data):
types = ['utf-8', 'gb2312', 'gbk', 'gb18030', 'iso-8859-1']
for t in types:
try:
return data.decode(t)
except Exception, e:
pass
return None
def get(self, request):
param = request.downloader_set_param('http')
if param is None:
return 0
urls = param['urls']
if len(urls) > 0:
try:
results = dict()
for url in urls:
task = {"url": url["url"], "type": 4, "store_type": 1, "status": "3", "result": ""}
result = {"url": url["url"], "status": "3", "result": "", "header": ""}
for i in range(0, 2):
try:
import datetime
starttime = datetime.datetime.now()
print "开始截图"
# render = WebRender(task)
# sx_result = render.result
# sx_result = ""
sx = FindPicture()
sx_result = sx.picture_screenshot_html(url["url"])
endtime = datetime.datetime.now()
print (endtime - starttime).seconds
if sx_result:
result['status'] = 2
result['result'] = sx_result
break
except Exception as e:
print e
print('抓取失败:第%d次' % (i + 1))
results[url['md5']] = result
return results
except Exception, e:
print sx_result
print e
print 'get:'+(traceback.format_exc())
return 0
def get_result(self, opener, request, result):
for i in range(0, 2):
try:
response = opener.open(request, timeout=10)
# 什么情况下 是 元祖
if isinstance(response, tuple):
result["redirect_url"] = response[0]
result["code"] = response[1]
headers = {}
if "User-agent" in request.headers.keys():
headers = {"User-agent": request.headers.get("User-agent")}
request = urllib2.Request(result["redirect_url"], headers=headers)
self.get_result(opener, request, result)
else:
header = response.info()
body = response.read()
if ('Content-Encoding' in header and header['Content-Encoding']) or \
('content-encoding' in header and header['content-encoding']):
import gzip
import StringIO
d = StringIO.StringIO(body)
gz = gzip.GzipFile(fileobj=d)
body = gz.read()
gz.close()
body = self.encoding(body)
if body is not None:
# result["result"] = body
# base64.b64encode()
result["result"] = body
result["status"] = "2"
if str(result["type"]) == "2":
result["header"] = str(header)
break
except urllib2.HTTPError, e:
print e.code
result["code"] = e.code
break
except Exception, e:
# 404 页面有可能断网 也返回这边
# print e
pass
class UnRedirectHandler(urllib2.HTTPRedirectHandler):
def __init__(self):
pass
def http_error_302(self, req, fp, code, msg, headers):
# if 'location' in headers:
# newurl = headers.getheaders('location')[0]
# print 'header location:'+newurl
# return newurl
print headers
if 'location' in headers:
newurl = headers.getheaders('location')[0]
return newurl, code
pass
if __name__ == "__main__":
sx = FindPicture()
sx_result = sx.picture_screenshot_html("https://www.baidu.com/s?wd=%E6%B7%AE%E5%AE%89%E4%BA%BA%E6%89%8D%E7%BD%91%E6%9C%80%E6%96%B0%E6%8B%9B%E8%81%98%E4%BF%A1%E6%81%AF&rsv_spt=1&rsv_iqid=0x9d684d0e0000cecd&issp=1&f=8&rsv_bp=1&rsv_idx=2&ie=utf-8&rqlang=cn&tn=78040160_5_pg&rsv_enter=0&oq=%E6%8B%9B%E8%81%98&rsv_t=7e39msJWAhkatRpmx%2F691Ir2BU1904ljWxb%2B3gy7cl5pNJsIfLHDNBbY7prEA2Kv9ez9OQ&rsv_pq=dd1bb49d0003954a&inputT=135689006&rsv_n=2&rsv_sug3=1298&bs=%E6%8B%9B%E8%81%98")
print sx_result
|
[
"[email protected]"
] | |
a8da85d257cc62052c4f35689f975fc7464e7619
|
09efb7c148e82c22ce6cc7a17b5140aa03aa6e55
|
/env/lib/python3.6/site-packages/plotly/validators/scatterternary/__init__.py
|
bf59e22ccd4da2a9a4c7aa7e40b78651b9390213
|
[
"MIT"
] |
permissive
|
harryturr/harryturr_garmin_dashboard
|
53071a23b267116e1945ae93d36e2a978c411261
|
734e04f8257f9f84f2553efeb7e73920e35aadc9
|
refs/heads/master
| 2023-01-19T22:10:57.374029 | 2020-01-29T10:47:56 | 2020-01-29T10:47:56 | 235,609,069 | 4 | 0 |
MIT
| 2023-01-05T05:51:27 | 2020-01-22T16:00:13 |
Python
|
UTF-8
|
Python
| false | false | 34,573 |
py
|
import _plotly_utils.basevalidators
class VisibleValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="visible", parent_name="scatterternary", **kwargs):
super(VisibleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", [True, False, "legendonly"]),
**kwargs
)
import _plotly_utils.basevalidators
class UnselectedValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="unselected", parent_name="scatterternary", **kwargs
):
super(UnselectedValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Unselected"),
data_docs=kwargs.pop(
"data_docs",
"""
marker
plotly.graph_objects.scatterternary.unselected.
Marker instance or dict with compatible
properties
textfont
plotly.graph_objects.scatterternary.unselected.
Textfont instance or dict with compatible
properties
""",
),
**kwargs
)
import _plotly_utils.basevalidators
class UirevisionValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(
self, plotly_name="uirevision", parent_name="scatterternary", **kwargs
):
super(UirevisionValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class UidValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="uid", parent_name="scatterternary", **kwargs):
super(UidValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class TexttemplatesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="texttemplatesrc", parent_name="scatterternary", **kwargs
):
super(TexttemplatesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class TexttemplateValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="texttemplate", parent_name="scatterternary", **kwargs
):
super(TexttemplateValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class TextsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="textsrc", parent_name="scatterternary", **kwargs):
super(TextsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class TextpositionsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="textpositionsrc", parent_name="scatterternary", **kwargs
):
super(TextpositionsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class TextpositionValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="textposition", parent_name="scatterternary", **kwargs
):
super(TextpositionValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
values=kwargs.pop(
"values",
[
"top left",
"top center",
"top right",
"middle left",
"middle center",
"middle right",
"bottom left",
"bottom center",
"bottom right",
],
),
**kwargs
)
import _plotly_utils.basevalidators
class TextfontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="textfont", parent_name="scatterternary", **kwargs):
super(TextfontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Textfont"),
data_docs=kwargs.pop(
"data_docs",
"""
color
colorsrc
Sets the source reference on plot.ly for color
.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for
family .
size
sizesrc
Sets the source reference on plot.ly for size
.
""",
),
**kwargs
)
import _plotly_utils.basevalidators
class TextValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="text", parent_name="scatterternary", **kwargs):
super(TextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class SumValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="sum", parent_name="scatterternary", **kwargs):
super(SumValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class SubplotValidator(_plotly_utils.basevalidators.SubplotidValidator):
def __init__(self, plotly_name="subplot", parent_name="scatterternary", **kwargs):
super(SubplotValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
dflt=kwargs.pop("dflt", "ternary"),
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class StreamValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="stream", parent_name="scatterternary", **kwargs):
super(StreamValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Stream"),
data_docs=kwargs.pop(
"data_docs",
"""
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to 50, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See
https://plot.ly/settings for more details.
""",
),
**kwargs
)
import _plotly_utils.basevalidators
class ShowlegendValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="showlegend", parent_name="scatterternary", **kwargs
):
super(ShowlegendValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class SelectedpointsValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(
self, plotly_name="selectedpoints", parent_name="scatterternary", **kwargs
):
super(SelectedpointsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class SelectedValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="selected", parent_name="scatterternary", **kwargs):
super(SelectedValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Selected"),
data_docs=kwargs.pop(
"data_docs",
"""
marker
plotly.graph_objects.scatterternary.selected.Ma
rker instance or dict with compatible
properties
textfont
plotly.graph_objects.scatterternary.selected.Te
xtfont instance or dict with compatible
properties
""",
),
**kwargs
)
import _plotly_utils.basevalidators
class OpacityValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="opacity", parent_name="scatterternary", **kwargs):
super(OpacityValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
max=kwargs.pop("max", 1),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class NameValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="name", parent_name="scatterternary", **kwargs):
super(NameValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class ModeValidator(_plotly_utils.basevalidators.FlaglistValidator):
def __init__(self, plotly_name="mode", parent_name="scatterternary", **kwargs):
super(ModeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
extras=kwargs.pop("extras", ["none"]),
flags=kwargs.pop("flags", ["lines", "markers", "text"]),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class MetasrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="metasrc", parent_name="scatterternary", **kwargs):
super(MetasrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class MetaValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="meta", parent_name="scatterternary", **kwargs):
super(MetaValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class MarkerValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="marker", parent_name="scatterternary", **kwargs):
super(MarkerValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Marker"),
data_docs=kwargs.pop(
"data_docs",
"""
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `marker.colorscale`. Has an
effect only if in `marker.color`is set to a
numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the
default palette will be chosen according to
whether numbers in the `color` array are all
positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `marker.color`) or the bounds set in
`marker.cmin` and `marker.cmax` Has an effect
only if in `marker.color`is set to a numerical
array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `marker.color`is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `marker.cmin` and/or `marker.cmax` to
be equidistant to this point. Has an effect
only if in `marker.color`is set to a numerical
array. Value should have the same units as in
`marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `marker.color`is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmax` must be set as well.
color
Sets themarkercolor. It accepts either a
specific color or an array of numbers that are
mapped to the colorscale relative to the max
and min values of the array or relative to
`marker.cmin` and `marker.cmax` if set.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorbar
plotly.graph_objects.scatterternary.marker.Colo
rBar instance or dict with compatible
properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color`is set to a numerical array. The
colorscale must be an array containing arrays
mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in
color space, use`marker.cmin` and
`marker.cmax`. Alternatively, `colorscale` may
be a palette name string of the following list:
Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu,Reds,Bl
ues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,E
arth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on plot.ly for color
.
gradient
plotly.graph_objects.scatterternary.marker.Grad
ient instance or dict with compatible
properties
line
plotly.graph_objects.scatterternary.marker.Line
instance or dict with compatible properties
maxdisplayed
Sets a maximum number of points to be drawn on
the graph. 0 corresponds to no limit.
opacity
Sets the marker opacity.
opacitysrc
Sets the source reference on plot.ly for
opacity .
reversescale
Reverses the color mapping if true. Has an
effect only if in `marker.color`is set to a
numerical array. If true, `marker.cmin` will
correspond to the last color in the array and
`marker.cmax` will correspond to the first
color.
showscale
Determines whether or not a colorbar is
displayed for this trace. Has an effect only if
in `marker.color`is set to a numerical array.
size
Sets the marker size (in px).
sizemin
Has an effect only if `marker.size` is set to a
numerical array. Sets the minimum size (in px)
of the rendered marker points.
sizemode
Has an effect only if `marker.size` is set to a
numerical array. Sets the rule for which the
data in `size` is converted to pixels.
sizeref
Has an effect only if `marker.size` is set to a
numerical array. Sets the scale factor used to
determine the rendered size of marker points.
Use with `sizemin` and `sizemode`.
sizesrc
Sets the source reference on plot.ly for size
.
symbol
Sets the marker symbol type. Adding 100 is
equivalent to appending "-open" to a symbol
name. Adding 200 is equivalent to appending
"-dot" to a symbol name. Adding 300 is
equivalent to appending "-open-dot" or "dot-
open" to a symbol name.
symbolsrc
Sets the source reference on plot.ly for
symbol .
""",
),
**kwargs
)
import _plotly_utils.basevalidators
class LineValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="line", parent_name="scatterternary", **kwargs):
super(LineValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Line"),
data_docs=kwargs.pop(
"data_docs",
"""
color
Sets the line color.
dash
Sets the dash style of lines. Set to a dash
type string ("solid", "dot", "dash",
"longdash", "dashdot", or "longdashdot") or a
dash length list in px (eg "5px,10px,2px,2px").
shape
Determines the line shape. With "spline" the
lines are drawn using spline interpolation. The
other available values correspond to step-wise
line shapes.
smoothing
Has an effect only if `shape` is set to
"spline" Sets the amount of smoothing. 0
corresponds to no smoothing (equivalent to a
"linear" shape).
width
Sets the line width (in px).
""",
),
**kwargs
)
import _plotly_utils.basevalidators
class LegendgroupValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="legendgroup", parent_name="scatterternary", **kwargs
):
super(LegendgroupValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class IdssrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="idssrc", parent_name="scatterternary", **kwargs):
super(IdssrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class IdsValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="ids", parent_name="scatterternary", **kwargs):
super(IdsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "data"),
**kwargs
)
import _plotly_utils.basevalidators
class HovertextsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="hovertextsrc", parent_name="scatterternary", **kwargs
):
super(HovertextsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class HovertextValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="hovertext", parent_name="scatterternary", **kwargs):
super(HovertextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "style"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class HovertemplatesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="hovertemplatesrc", parent_name="scatterternary", **kwargs
):
super(HovertemplatesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class HovertemplateValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="hovertemplate", parent_name="scatterternary", **kwargs
):
super(HovertemplateValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class HoveronValidator(_plotly_utils.basevalidators.FlaglistValidator):
def __init__(self, plotly_name="hoveron", parent_name="scatterternary", **kwargs):
super(HoveronValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
flags=kwargs.pop("flags", ["points", "fills"]),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class HoverlabelValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="hoverlabel", parent_name="scatterternary", **kwargs
):
super(HoverlabelValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Hoverlabel"),
data_docs=kwargs.pop(
"data_docs",
"""
align
Sets the horizontal alignment of the text
content within hover label box. Has an effect
only if the hover label text spans more two or
more lines
alignsrc
Sets the source reference on plot.ly for align
.
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on plot.ly for
bgcolor .
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on plot.ly for
bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of
characters) of the trace name in the hover
labels for all traces. -1 shows the whole name
regardless of length. 0-3 shows the first 0-3
characters, and an integer >3 will show the
whole name if it is less than that many
characters, but if it is longer, will truncate
to `namelength - 3` characters and add an
ellipsis.
namelengthsrc
Sets the source reference on plot.ly for
namelength .
""",
),
**kwargs
)
import _plotly_utils.basevalidators
class HoverinfosrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="hoverinfosrc", parent_name="scatterternary", **kwargs
):
super(HoverinfosrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class HoverinfoValidator(_plotly_utils.basevalidators.FlaglistValidator):
def __init__(self, plotly_name="hoverinfo", parent_name="scatterternary", **kwargs):
super(HoverinfoValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
extras=kwargs.pop("extras", ["all", "none", "skip"]),
flags=kwargs.pop("flags", ["a", "b", "c", "text", "name"]),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class FillcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(self, plotly_name="fillcolor", parent_name="scatterternary", **kwargs):
super(FillcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
role=kwargs.pop("role", "style"),
**kwargs
)
import _plotly_utils.basevalidators
class FillValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="fill", parent_name="scatterternary", **kwargs):
super(FillValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["none", "toself", "tonext"]),
**kwargs
)
import _plotly_utils.basevalidators
class CustomdatasrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="customdatasrc", parent_name="scatterternary", **kwargs
):
super(CustomdatasrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class CustomdataValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(
self, plotly_name="customdata", parent_name="scatterternary", **kwargs
):
super(CustomdataValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "data"),
**kwargs
)
import _plotly_utils.basevalidators
class CsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="csrc", parent_name="scatterternary", **kwargs):
super(CsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class ConnectgapsValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="connectgaps", parent_name="scatterternary", **kwargs
):
super(ConnectgapsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class CliponaxisValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="cliponaxis", parent_name="scatterternary", **kwargs
):
super(CliponaxisValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class CValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="c", parent_name="scatterternary", **kwargs):
super(CValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "data"),
**kwargs
)
import _plotly_utils.basevalidators
class BsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="bsrc", parent_name="scatterternary", **kwargs):
super(BsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class BValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="b", parent_name="scatterternary", **kwargs):
super(BValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "data"),
**kwargs
)
import _plotly_utils.basevalidators
class AsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="asrc", parent_name="scatterternary", **kwargs):
super(AsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class AValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="a", parent_name="scatterternary", **kwargs):
super(AValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "data"),
**kwargs
)
|
[
"[email protected]"
] | |
a547eea325ba1119f9fbf2b2bb4e7fcf323eb6cb
|
9e30a239886210dc57e6c7cb9a71ad95a840712e
|
/views/posts_with_more_postive_recations/tests/__init__.py
|
5bdbd747b63d59acf9e27cc1f37f3940355ac2b3
|
[] |
no_license
|
sridhar562345/fb_post_v2
|
0a26d661a3f335d9a9cf129c24265d7674b3fb22
|
dfd150ab5521f05291f66944d7a8686a00477547
|
refs/heads/master
| 2022-11-08T00:32:35.752419 | 2020-06-23T15:32:02 | 2020-06-23T15:32:02 | 274,440,245 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 339 |
py
|
# pylint: disable=wrong-import-position
APP_NAME = "fb_post_v2"
OPERATION_NAME = "posts_with_more_postive_recations"
REQUEST_METHOD = "get"
URL_SUFFIX = "more/postive/reactions/posts/v1/"
from .test_case_01 import TestCase01PostsWithMorePostiveRecationsAPITestCase
__all__ = [
"TestCase01PostsWithMorePostiveRecationsAPITestCase"
]
|
[
"="
] |
=
|
5a4a390b1acf15d380fc6a36f240cf181b7614db
|
f6078890ba792d5734d289d7a0b1d429d945a03a
|
/hw2/submission/babbejames/babbejames_11046_1275480_119_hw_2_jtb/119_hw2.2.py
|
68334c12c326ab34413cf2530584fbe2321c2faa
|
[] |
no_license
|
huazhige/EART119_Lab
|
1c3d0b986a0f59727ee4ce11ded1bc7a87f5b7c0
|
47931d6f6a2c7bc053cd15cef662eb2f2027712c
|
refs/heads/master
| 2020-05-04T23:40:53.709217 | 2019-06-11T18:30:45 | 2019-06-11T18:30:45 | 179,552,067 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,943 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: jtbabbe
"""
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.basemap import Basemap
#------------my modules-----------------------
import seis_utils
#--------------------------0---------------------------------------------
# params, dirs, files, equations
#------------------------------------------------------------------------
file_eq = 'seism_OK.txt'
file_well = 'injWell_OK.txt'
dPar = { 'nClicks' : 10,
'tmin' : 2010,
'areaOK' : 181*1e3,#in km
# -----basemap params----------------------
'xmin' : -101, 'xmax' : -94,
'ymin' : 33.5, 'ymax' : 37.1,
'projection' : 'aea',# or 'cea' 'aea' for equal area projections
}
# Decimal year eq
YR = mSeis[1]
MO = mSeis[2]
DY = mSeis[3]
HR = mSeis[4]
MN = mSeis[5]
SC = mSeis[6]
def DecYear( YR, MO, DY, HR, MN, SC):
return YR + (MO-1)/12 + (DY-1)/365.25 + HR/(365.25) + MN/(365.25*24*60)
+ SC/(365.25*24*3600)
#--------------------------1---------------------------------------------
# load data
#------------------------------------------------------------------------
# load seismicity and well data using loadtxt
mSeis = np.loadtxt( file_eq).T
#time to decimal years
aTime = DecYear( YR, MO, DY, HR, MN, SC)
mSeis = np.array( [aTime, mSeis[7], mSeis[8], mSeis[-1]])
#select most recent seismic events
sort_id = aTime.argsort()
sel = aTime(sort_id)
mSeis = mSeis.T[sel].T
mWells = np.loadtxt( file_well).T
#--------------------------2---------------------------------------------
# map view, select boundaries of seismicity
#------------------------------------------------------------------------
pat_bin = np.arange( dPar['tmin'], 2018, dPar['dt_map'])
for i in range( at_bin.shape[0]-1):
t1, t2 = at_bin[i], at_bin[i+1]
#select earthquakes after 2010
sel_eq = mSeis[0] >= 'tmin'
#select wells with start dates after tmin
sel_well = mWells[1] >= 'tmin'
# create basemap object
plt.figure(2)
plt.cla()
ax2 = plt.subplot(111)
lon_0, lat_0 = .5*( dPar['xmin']+dPar['xmax']), .5*( dPar['ymin']+dPar['ymax'])
# project into equal area system
m = Basemap(llcrnrlon = dPar['xmin'], urcrnrlon=dPar['xmax'],
llcrnrlat = dPar['ymin'], urcrnrlat=dPar['ymax'],
projection=dPar['projection'], lon_0 = lon_0, lat_0 = lat_0)
#draw state boundaries
m.drawstates(color = 'aqua')
#convert spherical to 2D coordinate system using basemap
xpt_Seis, ypt_Seis = m(mSeis[-4][sel_eq], mSeis[-3][sel_eq])
xpt_Well, ypt_Well = m(mWell[3][sel_well],mWell[4][sel_well])
#plot seismicity and well locations
plt.plot(xpt_Seis, ypt_Seis, 'ro', ms = 6, mew = 1.5, mfc = 'none', label = 'seismicity')
plt.plot(xpt_Well, ypt_Well, 'bo', ms = 6, mew = 1.5, mfc = 'none', label = 'wells')
# x and y labels
m.drawparallels( np.arange( 33, 38, 1), fmt='%i',labels=[1,0,0,0])
m.drawmeridians( np.arange(-100, -92, 2), fmt='%i',labels=[0,0,0,1])
print("Please click %i times"%( dPar['nClicks']))
tCoord = plt.ginput( dPar['nClicks'])
print("clicked", tCoord)
plt.show()
aLon = np.array( tCoord).T[0]
aLat = np.array( tCoord).T[1]
#--------------------------3---------------------------------------------
# compute affected area
#------------------------------------------------------------------------
#TODO: compute area using eis_utils.area_poly
def area_poly( aX, aY):
sumVert1 = np.dot( aX[0:-1], aY[1::])+aX[-1]*aY[0]
sumVert2 = np.dot(aY[0:-1], aX[1::])+aY[-1]*aX[0]
sum = (aX[0:-1]*aY[1::] - aY[0:-1]*aX[1::]).sum() + (aX[-1]*aY[0]-aY[-1]*aX[0])
return 0.5*abs( sumVert1 - sumVert2)
A_seis = area_poly(aLon, aLat)
print('total area affected by seismicity: ', A_seis)
print( 'fraction of area of OK', A_seis/(dPar['areaOK'])) # about 1/3
|
[
"[email protected]"
] | |
c248ec65670cf22224af860090d811ffa837db1f
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/4t6YAJS8dtT7RQjta_4.py
|
8feb0bb3b7cebedc11c07c31453a8da5dcb0fe43
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 55 |
py
|
def num_layers(n):
return(str(0.0005*(2**n))+ 'm')
|
[
"[email protected]"
] | |
f06a5cdc1389ec164b3277400d4616727b4d0b93
|
4f57124af46dd2a73166239df9c53af561d5f5d6
|
/venv/lib/python2.7/site-packages/openstack/tests/unit/orchestration/v1/test_proxy.py
|
0193781afe351d66a8143d8ea5423be658a9f3f8
|
[] |
no_license
|
briankoco/cc-scripts
|
5db6e8c498d8ff103cde6c7e4914620cc5bb2c52
|
51e78f88e96c51cc5d4c8fe6debae45ab1953724
|
refs/heads/master
| 2018-09-17T15:07:12.800875 | 2018-06-05T20:30:35 | 2018-06-05T20:30:35 | 120,139,270 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 11,003 |
py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
from openstack import exceptions
from openstack.orchestration.v1 import _proxy
from openstack.orchestration.v1 import resource
from openstack.orchestration.v1 import software_config as sc
from openstack.orchestration.v1 import software_deployment as sd
from openstack.orchestration.v1 import stack
from openstack.orchestration.v1 import stack_environment
from openstack.orchestration.v1 import stack_files
from openstack.orchestration.v1 import stack_template
from openstack.orchestration.v1 import template
from openstack.tests.unit import test_proxy_base
class TestOrchestrationProxy(test_proxy_base.TestProxyBase):
def setUp(self):
super(TestOrchestrationProxy, self).setUp()
self.proxy = _proxy.Proxy(self.session)
def test_create_stack(self):
self.verify_create(self.proxy.create_stack, stack.Stack)
def test_create_stack_preview(self):
method_kwargs = {"preview": True, "x": 1, "y": 2, "z": 3}
self.verify_create(self.proxy.create_stack, stack.StackPreview,
method_kwargs=method_kwargs)
def test_find_stack(self):
self.verify_find(self.proxy.find_stack, stack.Stack)
def test_stacks(self):
self.verify_list(self.proxy.stacks, stack.Stack, paginated=False)
def test_get_stack(self):
self.verify_get(self.proxy.get_stack, stack.Stack)
def test_update_stack(self):
self.verify_update(self.proxy.update_stack, stack.Stack)
def test_delete_stack(self):
self.verify_delete(self.proxy.delete_stack, stack.Stack, False)
def test_delete_stack_ignore(self):
self.verify_delete(self.proxy.delete_stack, stack.Stack, True)
@mock.patch.object(stack.Stack, 'check')
def test_check_stack_with_stack_object(self, mock_check):
stk = stack.Stack(id='FAKE_ID')
res = self.proxy.check_stack(stk)
self.assertIsNone(res)
mock_check.assert_called_once_with(self.proxy)
@mock.patch.object(stack.Stack, 'existing')
def test_check_stack_with_stack_ID(self, mock_stack):
stk = mock.Mock()
mock_stack.return_value = stk
res = self.proxy.check_stack('FAKE_ID')
self.assertIsNone(res)
mock_stack.assert_called_once_with(id='FAKE_ID')
stk.check.assert_called_once_with(self.proxy)
@mock.patch.object(stack.Stack, 'find')
def test_get_stack_environment_with_stack_identity(self, mock_find):
stack_id = '1234'
stack_name = 'test_stack'
stk = stack.Stack(id=stack_id, name=stack_name)
mock_find.return_value = stk
self._verify2('openstack.proxy.BaseProxy._get',
self.proxy.get_stack_environment,
method_args=['IDENTITY'],
expected_args=[stack_environment.StackEnvironment],
expected_kwargs={'requires_id': False,
'stack_name': stack_name,
'stack_id': stack_id})
mock_find.assert_called_once_with(mock.ANY, 'IDENTITY',
ignore_missing=False)
def test_get_stack_environment_with_stack_object(self):
stack_id = '1234'
stack_name = 'test_stack'
stk = stack.Stack(id=stack_id, name=stack_name)
self._verify2('openstack.proxy.BaseProxy._get',
self.proxy.get_stack_environment,
method_args=[stk],
expected_args=[stack_environment.StackEnvironment],
expected_kwargs={'requires_id': False,
'stack_name': stack_name,
'stack_id': stack_id})
@mock.patch.object(stack_files.StackFiles, 'get')
@mock.patch.object(stack.Stack, 'find')
def test_get_stack_files_with_stack_identity(self, mock_find, mock_get):
stack_id = '1234'
stack_name = 'test_stack'
stk = stack.Stack(id=stack_id, name=stack_name)
mock_find.return_value = stk
mock_get.return_value = {'file': 'content'}
res = self.proxy.get_stack_files('IDENTITY')
self.assertEqual({'file': 'content'}, res)
mock_find.assert_called_once_with(mock.ANY, 'IDENTITY',
ignore_missing=False)
mock_get.assert_called_once_with(self.proxy)
@mock.patch.object(stack_files.StackFiles, 'get')
def test_get_stack_files_with_stack_object(self, mock_get):
stack_id = '1234'
stack_name = 'test_stack'
stk = stack.Stack(id=stack_id, name=stack_name)
mock_get.return_value = {'file': 'content'}
res = self.proxy.get_stack_files(stk)
self.assertEqual({'file': 'content'}, res)
mock_get.assert_called_once_with(self.proxy)
@mock.patch.object(stack.Stack, 'find')
def test_get_stack_template_with_stack_identity(self, mock_find):
stack_id = '1234'
stack_name = 'test_stack'
stk = stack.Stack(id=stack_id, name=stack_name)
mock_find.return_value = stk
self._verify2('openstack.proxy.BaseProxy._get',
self.proxy.get_stack_template,
method_args=['IDENTITY'],
expected_args=[stack_template.StackTemplate],
expected_kwargs={'requires_id': False,
'stack_name': stack_name,
'stack_id': stack_id})
mock_find.assert_called_once_with(mock.ANY, 'IDENTITY',
ignore_missing=False)
def test_get_stack_template_with_stack_object(self):
stack_id = '1234'
stack_name = 'test_stack'
stk = stack.Stack(id=stack_id, name=stack_name)
self._verify2('openstack.proxy.BaseProxy._get',
self.proxy.get_stack_template,
method_args=[stk],
expected_args=[stack_template.StackTemplate],
expected_kwargs={'requires_id': False,
'stack_name': stack_name,
'stack_id': stack_id})
@mock.patch.object(stack.Stack, 'find')
def test_resources_with_stack_object(self, mock_find):
stack_id = '1234'
stack_name = 'test_stack'
stk = stack.Stack(id=stack_id, name=stack_name)
self.verify_list(self.proxy.resources, resource.Resource,
paginated=False, method_args=[stk],
expected_kwargs={'stack_name': stack_name,
'stack_id': stack_id})
self.assertEqual(0, mock_find.call_count)
@mock.patch.object(stack.Stack, 'find')
def test_resources_with_stack_name(self, mock_find):
stack_id = '1234'
stack_name = 'test_stack'
stk = stack.Stack(id=stack_id, name=stack_name)
mock_find.return_value = stk
self.verify_list(self.proxy.resources, resource.Resource,
paginated=False, method_args=[stack_id],
expected_kwargs={'stack_name': stack_name,
'stack_id': stack_id})
mock_find.assert_called_once_with(mock.ANY, stack_id,
ignore_missing=False)
@mock.patch.object(stack.Stack, 'find')
@mock.patch.object(resource.Resource, 'list')
def test_resources_stack_not_found(self, mock_list, mock_find):
stack_name = 'test_stack'
mock_find.side_effect = exceptions.ResourceNotFound(
'No stack found for test_stack')
ex = self.assertRaises(exceptions.ResourceNotFound,
self.proxy.resources, stack_name)
self.assertEqual('No stack found for test_stack', six.text_type(ex))
def test_create_software_config(self):
self.verify_create(self.proxy.create_software_config,
sc.SoftwareConfig)
def test_software_configs(self):
self.verify_list(self.proxy.software_configs, sc.SoftwareConfig,
paginated=True)
def test_get_software_config(self):
self.verify_get(self.proxy.get_software_config, sc.SoftwareConfig)
def test_delete_software_config(self):
self.verify_delete(self.proxy.delete_software_config,
sc.SoftwareConfig, True)
self.verify_delete(self.proxy.delete_software_config,
sc.SoftwareConfig, False)
def test_create_software_deployment(self):
self.verify_create(self.proxy.create_software_deployment,
sd.SoftwareDeployment)
def test_software_deployments(self):
self.verify_list(self.proxy.software_deployments,
sd.SoftwareDeployment, paginated=False)
def test_get_software_deployment(self):
self.verify_get(self.proxy.get_software_deployment,
sd.SoftwareDeployment)
def test_update_software_deployment(self):
self.verify_update(self.proxy.update_software_deployment,
sd.SoftwareDeployment)
def test_delete_software_deployment(self):
self.verify_delete(self.proxy.delete_software_deployment,
sd.SoftwareDeployment, True)
self.verify_delete(self.proxy.delete_software_deployment,
sd.SoftwareDeployment, False)
@mock.patch.object(template.Template, 'validate')
def test_validate_template(self, mock_validate):
tmpl = mock.Mock()
env = mock.Mock()
tmpl_url = 'A_URI'
ignore_errors = 'a_string'
res = self.proxy.validate_template(tmpl, env, tmpl_url, ignore_errors)
mock_validate.assert_called_once_with(
self.proxy, tmpl, environment=env, template_url=tmpl_url,
ignore_errors=ignore_errors)
self.assertEqual(mock_validate.return_value, res)
def test_validate_template_invalid_request(self):
err = self.assertRaises(exceptions.InvalidRequest,
self.proxy.validate_template,
None, template_url=None)
self.assertEqual("'template_url' must be specified when template is "
"None", six.text_type(err))
|
[
"[email protected]"
] | |
6c2600913c51f10bf6c0e7f362a0a11f1888479f
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02422/s883793535.py
|
8a63bbefd3dc6a9170a3efaa6447ac90fe2600e7
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 506 |
py
|
#! python3
# transformation.py
sent = input()
def print_(arr):
global sent
print(sent[int(arr[0]):int(arr[1])+1])
def reverse_(arr):
global sent
sent = sent[:int(arr[0])] + sent[int(arr[0]):int(arr[1])+1][::-1] + sent[int(arr[1])+1:]
def replace_(arr):
global sent
sent = sent[:int(arr[0])] + arr[2] + sent[int(arr[1])+1:]
ops = {'print': print_, 'reverse': reverse_, 'replace': replace_}
q = int(input())
for i in range(q):
arr = input().split()
ops[arr[0]](arr[1:])
|
[
"[email protected]"
] | |
7c00b913a6745d7e2678de5fe3526def5f55a2ef
|
cfd5892a220ec7702d5c416aa1821d2429480ede
|
/neodroidagent/common/architectures/distributional/__init__.py
|
ce4e44cdf60a3308c53090f5e034df2228bd23f1
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
pything/agent
|
611eac80348127b951a72ca76d2ab1f5a7732775
|
71c3bc072a8f3b7c3c1d873757cf5a1dafc3302d
|
refs/heads/master
| 2022-04-28T08:13:27.705296 | 2020-11-17T16:58:46 | 2020-11-17T16:58:46 | 150,600,674 | 0 | 0 |
Apache-2.0
| 2019-01-10T11:01:17 | 2018-09-27T14:31:45 |
Python
|
UTF-8
|
Python
| false | false | 151 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "Christian Heider Nielsen"
__doc__ = ""
from .categorical import *
from .normal import *
|
[
"[email protected]"
] | |
975d95103dc51fff26bbb0543f1d8172c841ff9b
|
ced81611f03554989bd338ac32b08fd393ac424a
|
/src/mlog_kafka.py
|
c274ec8cdc5a6f462590f13eed0f9a27fa910e32
|
[] |
no_license
|
jack139/mlog
|
c20a8777c326014e7e319e4f80e9408a57ed9190
|
fcedd1d22bd4043e614d19b49735d83d0ca538cc
|
refs/heads/master
| 2023-03-17T09:46:49.884896 | 2020-05-12T10:21:15 | 2020-05-12T10:21:15 | 341,840,875 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,709 |
py
|
# -*- coding: utf-8 -*-
# 从kafka读入数据:
#
#
import sys, os
from kafka import KafkaConsumer
import predict
out_dir = 'logs/rt'
# 根据时间日期获取 label
# 参考格式: 2020/04/17 07:09:27 [error] 26119#0: ...
def get_nginx_error_label(log, minute=60): # 按min分钟为间隔,默认是60分钟
log_split = log.split()
if len(log_split)<2: # 可能是空行
return None
date = log_split[0]
time = log_split[1].split(':')
q = int(time[1])//minute
return '%s_%s_%d'%(date, time[0], q) # 20200417_07_1 按间隔返回
# 使用log进行预测
def predict_it(log_lines, label):
struct_log, unknown_templates = predict.parse_log(log_lines)
y_test = predict.predict_IM(struct_log)
print(y_test)
if y_test[0]==1: # 出现异常,保存日志
filepath = os.path.join(out_dir, 'anomaly_'+label+'.log')
with open(filepath, 'w') as f:
f.write(''.join(log_lines))
if len(unknown_templates)>0:
f.write('\nUNKNOWN templates:\n')
f.write(''.join(unknown_templates))
print('-------------------->>>> ANOMALY detected:', filepath)
sys.stdout.flush()
if __name__ == '__main__':
if len(sys.argv)>1:
out_dir = sys.argv[1] # 第一个参数:异常日志保存的路径
current_label = None
log_lines = []
last_dt = ''
consumer = KafkaConsumer('mlog', bootstrap_servers=['localhost:9092'])
for message in consumer:
line = message.value.decode('utf-8')
if len(line)==0:
continue
if (len(line.split('\n\r'))>1): # 检查是否存在一次多行,按说不应该
print('WARNING: more than one line!')
print(line.split('\n\r'))
# 预处理wechat-manger 的 Java 日志
if line[0] == '\t': # java异常的中间内容,忽略
continue
if line[0].isdigit(): # 正常格式日志
l2 = line.split()
last_dt = l2[0]+' '+l2[1]
else: # java异常,首行
if last_dt=='': # 没有时间记录,跳过
continue
else:
line = last_dt + ' [-] ERROR ' + line
label = get_nginx_error_label(line, predict.period)
if label is None:
continue
if label != current_label:
# 生成一个日志集合,开始预测计算
if len(log_lines)>0:
predict_it(log_lines, current_label)
current_label = label
log_lines = []
print(current_label)
log_lines.append(line)
# 结束
if len(log_lines)>0:
predict_it(log_lines, current_label)
|
[
"[email protected]"
] | |
b7452f3daa64a23ec2b6a144ea7b499a2a56416e
|
ab7c6042f69a921095ac036bd6919a81255847b7
|
/pipeline/article_processing/controller.py
|
c568918eb7861d4d2c38454cd50b4698cb7dc070
|
[] |
no_license
|
EST-Team-Adam/TheReadingMachine
|
49eb768d08ec0e266d076f113933a04b4bf66674
|
25c217602b3909410d9a2dc6189e3de584146a1b
|
refs/heads/master
| 2022-12-12T20:09:46.945526 | 2019-10-21T20:27:25 | 2019-10-21T20:27:25 | 70,123,521 | 1 | 2 | null | 2022-11-21T21:26:36 | 2016-10-06T04:46:20 |
HTML
|
UTF-8
|
Python
| false | false | 7,906 |
py
|
from __future__ import division
import itertools
import pandas as pd
import string
from nltk.corpus import stopwords
from nltk.tokenize import RegexpTokenizer
from nltk.stem import SnowballStemmer
from nltk import pos_tag
from datetime import datetime
# Manual invalid title and link
maintenance_title = ['Reduced service at Agrimoney.com',
'Apology to Agrimoney.com subscribers']
irrelevant_link = ['https://www.euractiv.com/topics/news/?type_filter=video',
'http://www.euractiv.com/topics/news/?type_filter=video',
'http://www.euractiv.com/topics/news/?type_filter=news',
'http://www.euractiv.com/topics/news/?type_filter=all',
'https://www.euractiv.com/topics/news/?type_filter=all',
'https://www.euractiv.com/topics/news/',
'https://www.euractiv.com/topics/news/?type_filter=news',
'http://www.euractiv.com/topics/news/',
'https://www.euractiv.com/news/',
'http://www.euractiv.com/news/']
def scraper_post_processing(raw_articles, model_start_date, id_col='id',
article_col='article', title_col='title',
link_col='link', date_col='date'):
'''Perform post processing of articles scrapped by the scrapper.
There have been a few issues identified regarding the
scraper. Certain issues are either impossible or difficult to
eliminate with the scrapy implementation. Thus, we post process
the data to resolve these known issues.
'''
# If an ID has already been created, then we drop it.
if id_col in raw_articles.columns:
raw_articles = raw_articles.drop(id_col, 1)
# Drop duplciates based on article content
processed_articles = (raw_articles
.drop_duplicates(subset=article_col))
# Remvoe entries that are associated with maintenance or service.
processed_articles = processed_articles[~processed_articles[title_col].isin(
maintenance_title)]
# Remoe links that are not associated with news articles.
processed_articles = processed_articles[~processed_articles[link_col].isin(
irrelevant_link)]
# Subset the data only after the model_start_date
processed_articles = processed_articles[processed_articles[date_col]
> model_start_date]
return processed_articles
def text_processor(text, remove_captalisation=True, remove_noun=False,
remove_numerical=True, remove_punctuation=True,
stem=False, tokenizer=None):
'''The function process the texts with the intention for topic
modelling.
The following steps are performed:
1. Tokenise
2. Prune words
3. Removal of stopwords
Details:
The regular expression tokeniser is used as we are interested just
on the key words, punctuation is irrelevant. Numerical and
captalisation removal can be specified as a parameter. Stop words
and certain manually coded phrases are also removed.
NOTE(Michael): The remove_noun is currently inactive. Further
investigation is required for the implementation.
'''
# Tokenize
if tokenizer is None:
tokenizer = RegexpTokenizer(r'\w+')
tokenized_text = tokenizer.tokenize(text)
else:
tokenized_text = tokenizer(text)
if remove_punctuation:
punct = string.punctuation
tokenized_text = [t for t in tokenized_text if t not in punct]
# This step is extremely computational expensive. The benchmark
# shows it would increase the total time by 12 times.
if remove_noun:
noun_set = set(['NNP', 'NNPS'])
tokenized_text = [w for w, t in pos_tag(tokenized_text)
if t not in noun_set]
# Stemming
if stem:
stemmer = SnowballStemmer('english')
tokenized_text = [stemmer.stem(word) for word in tokenized_text]
# This option is available as certain capital word has intrinsic
# meaning. e.g. Apple vs apple.
if remove_captalisation:
tokenized_text = [word.lower() for word in tokenized_text]
if remove_numerical:
tokenized_text = [word for word in tokenized_text
if not word.isdigit()]
# Remove stopwords and manual exlusion set
meaningless_words = ['euractiv', 'com',
'bloomberg', 'reuters', 'jpg', 'png']
exclusion_words = stopwords.words('english') + meaningless_words
nonstopword_text = [word
for word in tokenized_text
if word.lower() not in exclusion_words]
return nonstopword_text
def article_summariser(article_list):
'''Function to summarise the processing of the article text.
The purpose of this summary is to identify any significant changes
to the text extraction and processing.
'''
article_count = len(article_list)
vocab_size = len(set(itertools.chain.from_iterable(article_list)))
article_length = [len(t) for t in article_list]
article_vocab_size = [len(set(t)) for t in article_list]
lexical_diversity = [vs / l if l != 0 else 0
for l, vs in zip(article_length, article_vocab_size)]
average_lexical_diversity = sum(lexical_diversity) / len(lexical_diversity)
average_article_length = sum(article_length) / len(article_length)
# TODO (Michael): Should also save the data sources.
summary = {'createTime': datetime.utcnow(),
'article_count': article_count,
'vocab_size': vocab_size,
'average_lexical_diversity': average_lexical_diversity,
'average_article_length': average_article_length}
return pd.DataFrame(summary, index=[0])
def text_preprocessing(article_df, article_col, min_length,
remove_captalisation=True, remove_noun=False,
remove_numerical=True, remove_punctuation=True,
stem=False, date_col='date'):
'''Process the text extracted from the scrapper.
In addition, articles with tokens less than the min_length
specified will be dropped. This is because certain articles were
extracted incorrectly or contains insufficient information, thus
they are removed to avoid contamination of the output.
'''
# Tokenise and process the text
tokenised_text = [text_processor(a,
remove_captalisation=remove_captalisation,
remove_noun=remove_noun,
remove_numerical=remove_numerical,
remove_punctuation=remove_punctuation,
stem=stem)
for a in article_df[article_col]]
# Find the index of entries where the article length is less than
# the specified length. The entries are then removed from the
# article and the original data frame.
min_length_ind = [i for i, t in enumerate(tokenised_text)
if len(t) > min_length]
min_length_tokens = [tokenised_text[i] for i in min_length_ind]
exclude_min_length_df = article_df.iloc[min_length_ind, ].copy()
# Create the summary
summary = article_summariser(min_length_tokens)
# Concatenate the text together. This step is to enable the result
# to be saved in to a standard database.
exclude_min_length_df[article_col] = [' '.join(tt)
for tt in min_length_tokens]
# Recreate the index
exclude_min_length_df.sort_values([date_col], ascending=[1], inplace=True)
exclude_min_length_df['id'] = range(1, exclude_min_length_df.shape[0] + 1)
return exclude_min_length_df, summary
|
[
"[email protected]"
] | |
89f25f38fdef1463139ed5b10736c753d7dc5d5a
|
59636b143a2ab189145b17a7ea9a38de5af1f7a5
|
/All/selenium_all/guanjiaPlus/zonghe_test_object.py
|
d8dfaeeffee6ccfdb47fb72792cb45fdb42eb84d
|
[] |
no_license
|
woshichenya/hezi
|
880a70c34cc61b6b6bcf1ccb65fa54989595fb71
|
4211ff8ef78f5d15d8fc8065247f916dfe9d305d
|
refs/heads/master
| 2020-04-28T21:46:02.664025 | 2019-05-14T08:47:33 | 2019-05-14T08:47:33 | 175,593,966 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,673 |
py
|
import guanjiaPlus.xiaochengxuguanliyemian
import traceback
import time
from selenium.webdriver.support.ui import Select
'''调用进入小程序管理页面的脚本'''
go=guanjiaPlus.xiaochengxuguanliyemian.go
Go=guanjiaPlus.xiaochengxuguanliyemian.Go
GO=guanjiaPlus.xiaochengxuguanliyemian.GO
bug_num=0
def dianpu():
go.Ctext("店铺","店铺超链接","进入店铺超链接","Bug--无法进入店铺超链接")
#幻灯片操作
go.Ctext("幻灯片","幻灯片超链接","进入幻灯片超链接","Bug--无法进入幻灯片超链接")
go.Ctext("添加幻灯片", "添加幻灯片超链接", "进入添加幻灯片超链接", "Bug--无法进入添加幻灯片超链接")
go.Ctext("返回列表","返回列表按钮","点击返回列表按钮","Bug--无法点击返回列表按钮")
# 导航图标
go.Ctext("导航图标", "导航图标超链接", "进入导航图标超链接", "Bug--无法进入导航图标超链接")
go.Ctext("添加首页导航", "添加首页导航超链接", "点击添加首页导航超链接", "Bug--无法点击添加首页导航超链接")
go.CTag_name_zidingyi("input","value","返回列表","返回列表按钮", "点击返回列表按钮", "Bug--无法点击返回列表按钮")
# 广告
go.Ctext("广告", "广告超链接", "进入广告超链接", "Bug--无法进入广告超链接")
go.Ctext("添加广告", "添加广告超链接", "点击添加广告超链接", "Bug--无法点击添加广告超链接")
go.CTag_name_zidingyi("input", "value", "返回列表", "返回列表按钮", "点击返回列表按钮", "Bug--无法点击返回列表按钮")
#魔方推荐
go.Ctext("魔方推荐", "魔方推荐超链接", "点击魔方推荐超链接", "Bug--无法点击魔方推荐超链接")
go.Cxpath("/html/body/div[6]/div[2]/form/table/tfoot/tr/td/button","添加魔方按钮", "点击添加魔方按钮", "Bug--无法点击添加魔方按钮")
#商品推荐
go.Ctext("商品推荐","商品推荐超链接","点击商品超链接按钮","Bug--无法点击商品按钮")
#排版设置
go.Ctext("排版设置", "排版设置超链接", "点击排版设置按钮", "Bug--无法点击排版设置按钮")
#商城
go.C_class_text("商城超链接","div",'menu-header ',"商城", "点击商城按钮", "Bug--无法点击商城按钮")
go.Ctext("公告管理", "公告管理超链接", "点击公告管理按钮", "Bug--无法点击公告管理按钮")
go.Ctext("添加公告", "添加公告超链接", "点击添加公告超链接", "Bug--无法点击添加公告超链接")
go.CTag_name_zidingyi("input", "value", "返回列表", "返回列表按钮", "点击返回列表按钮", "Bug--无法点击返回列表按钮")
go.Ctext("评价管理", "评价管理超链接", "点击评价管理按钮", "Bug--无法点击评价管理按钮")
go.Ctext("添加虚拟评论", "添加虚拟评论超链接", "点击添加虚拟评论超链接", "Bug--无法点击添加虚拟评论超链接")
go.CTag_name_zidingyi("input", "value", "返回列表", "返回列表按钮", "点击返回列表按钮", "Bug--无法点击返回列表按钮")
go.Ctext("退货地址", "退货地址超链接", "点击退货地址按钮", "Bug--无法点击退货地址按钮")
go.Ctext("添加退货地址", "添加退货地址超链接", "点击添加退货地址超链接", "Bug--无法点击添加退货地址超链接")
go.CTag_name_zidingyi("input", "value", "返回列表", "返回列表按钮", "点击返回列表按钮", "Bug--无法点击返回列表按钮")
#配送方式
#go.Ctext("配送方式", "配送方式超链接", "点击配送方式按钮", "Bug--无法点击配送方式按钮")
go.C_class_text("配送方式超链接", "div", 'menu-header ', "配送方式", "点击配送方式按钮", "Bug--无法点击配送方式按钮")
go.Ctext("普通快递", "普通快递超链接", "点击普通快递按钮", "Bug--无法点击普通快递按钮")
go.Ctext("添加配送方式", "添加配送方式超链接", "点击添加配送方式超链接", "Bug--无法点击添加配送方式超链接")
go.CTag_name_zidingyi("input", "value", "返回列表", "返回列表按钮", "点击返回列表按钮", "Bug--无法点击返回列表按钮")
#店铺装修
go.Ctext("店铺装修", "店铺装修超链接", "点击店铺装修按钮", "Bug--无法点击店铺装修按钮")
try:
dianpu()
print("***************************************************测试通过")
except:
print("***************************************************会员测试过程之中有Bug")
bug_num += 1
|
[
"[email protected]"
] | |
1e12cfdc813dbe8b0998b82ee3627f8c4a7712ae
|
0adb68bbf576340c8ba1d9d3c07320ab3bfdb95e
|
/regexlib/2021-5-15/python_re2_test_file/regexlib_3439.py
|
343711792907104d76d4f43c1009d2d954ef90b2
|
[
"MIT"
] |
permissive
|
agentjacker/ReDoS-Benchmarks
|
c7d6633a3b77d9e29e0ee2db98d5dfb60cde91c6
|
f5b5094d835649e957bf3fec6b8bd4f6efdb35fc
|
refs/heads/main
| 2023-05-10T13:57:48.491045 | 2021-05-21T11:19:39 | 2021-05-21T11:19:39 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 533 |
py
|
# 3439
# [\w*|\W*]*<[[\w*|\W*]*|/[\w*|\W*]]>[\w*|\W*]*
# POLYNOMIAL
# nums:5
# POLYNOMIAL AttackString:""+"<"*5000+"@1 _SLQ_2"
import re2 as re
from time import perf_counter
regex = """[\w*|\W*]*<[[\w*|\W*]*|/[\w*|\W*]]>[\w*|\W*]*"""
REGEX = re.compile(regex)
for i in range(0, 150000):
ATTACK = "" + "<" * i * 10000 + "@1 _SLQ_2"
LEN = len(ATTACK)
BEGIN = perf_counter()
m = REGEX.search(ATTACK)
# m = REGEX.match(ATTACK)
DURATION = perf_counter() - BEGIN
print(f"{i *10000}: took {DURATION} seconds!")
|
[
"[email protected]"
] | |
718a8d60b16760ec352731a29ee43b53d90b448c
|
1dd72195bc08460df7e5bb82d3b7bac7a6673f49
|
/api/app/wildfire_one/query_builders.py
|
8a69476dd8130f4f63a78b313c9e2d522773494d
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
bcgov/wps
|
c4347c39cadfad6711502d47776abc8d03895593
|
0ba707b0eddc280240964efa481988df92046e6a
|
refs/heads/main
| 2023-08-19T00:56:39.286460 | 2023-08-16T18:03:06 | 2023-08-16T18:03:06 | 235,861,506 | 35 | 9 |
Apache-2.0
| 2023-09-11T21:35:07 | 2020-01-23T18:42:10 |
Python
|
UTF-8
|
Python
| false | false | 5,089 |
py
|
""" Query builder classes for making requests to WFWX API """
from typing import List, Tuple
from abc import abstractmethod, ABC
from app import config
class BuildQuery(ABC):
""" Base class for building query urls and params """
def __init__(self):
""" Initialize object """
self.max_page_size = config.get('WFWX_MAX_PAGE_SIZE', 1000)
self.base_url = config.get('WFWX_BASE_URL')
@abstractmethod
def query(self, page) -> Tuple[str, dict]:
""" Return query url and params """
class BuildQueryStations(BuildQuery):
""" Class for building a url and RSQL params to request all active stations. """
def __init__(self):
""" Prepare filtering on active, test and project stations. """
super().__init__()
self.param_query = None
# In conversation with Dana Hicks, on Apr 20, 2021 - Dana said to show active, test and project.
for status in ('ACTIVE', 'TEST', 'PROJECT'):
if self.param_query:
self.param_query += f',stationStatus.id=="{status}"'
else:
self.param_query = f'stationStatus.id=="{status}"'
def query(self, page) -> Tuple[str, dict]:
""" Return query url and params with rsql query for all weather stations marked active. """
params = {'size': self.max_page_size, 'sort': 'displayLabel',
'page': page, 'query': self.param_query}
url = f'{self.base_url}/v1/stations'
return url, params
class BuildQueryByStationCode(BuildQuery):
""" Class for building a url and params to request a list of stations by code """
def __init__(self, station_codes: List[int]):
""" Initialize object """
super().__init__()
self.querystring = ''
for code in station_codes:
if len(self.querystring) > 0:
self.querystring += ' or '
self.querystring += f'stationCode=={code}'
def query(self, page) -> Tuple[str, dict]:
""" Return query url and params for a list of stations """
params = {'size': self.max_page_size,
'sort': 'displayLabel', 'page': page, 'query': self.querystring}
url = f'{self.base_url}/v1/stations/rsql'
return url, params
class BuildQueryAllHourliesByRange(BuildQuery):
""" Builds query for requesting all hourlies in a time range"""
def __init__(self, start_timestamp: int, end_timestamp: int):
""" Initialize object """
super().__init__()
self.querystring: str = "weatherTimestamp >=" + \
str(start_timestamp) + ";" + "weatherTimestamp <" + str(end_timestamp)
def query(self, page) -> Tuple[str, dict]:
""" Return query url for hourlies between start_timestamp, end_timestamp"""
params = {'size': self.max_page_size, 'page': page, 'query': self.querystring}
url = f'{self.base_url}/v1/hourlies/rsql'
return url, params
class BuildQueryAllForecastsByAfterStart(BuildQuery):
""" Builds query for requesting all dailies in a time range"""
def __init__(self, start_timestamp: int):
""" Initialize object """
super().__init__()
self.querystring = f"weatherTimestamp >={start_timestamp};recordType.id == 'FORECAST'"
def query(self, page) -> Tuple[str, dict]:
""" Return query url for dailies between start_timestamp, end_timestamp"""
params = {'size': self.max_page_size, 'page': page, 'query': self.querystring}
url = f'{self.base_url}/v1/dailies/rsql'
return url, params
class BuildQueryDailiesByStationCode(BuildQuery):
""" Builds query for requesting dailies in a time range for the station codes"""
def __init__(self, start_timestamp: int, end_timestamp: int, station_ids: List[str]):
""" Initialize object """
super().__init__()
self.start_timestamp = start_timestamp
self.end_timestamp = end_timestamp
self.station_ids = station_ids
def query(self, page) -> Tuple[str, dict]:
""" Return query url for dailies between start_timestamp, end_timestamp"""
params = {'size': self.max_page_size,
'page': page,
'startingTimestamp': self.start_timestamp,
'endingTimestamp': self.end_timestamp,
'stationIds': self.station_ids}
url = (f'{self.base_url}/v1/dailies/search/findDailiesByStationIdIsInAndWeather' +
'TimestampBetweenOrderByStationIdAscWeatherTimestampAsc')
return url, params
class BuildQueryStationGroups(BuildQuery):
""" Builds a query for requesting all station groups """
def __init__(self):
""" Initilize object. """
super().__init__()
self.param_query = None
def query(self, page) -> Tuple[str, dict]:
""" Return query url and params with query for all weather stations groups. """
params = {'size': self.max_page_size, 'page': page, 'sort': 'groupOwnerUserId,asc'}
url = f'{self.base_url}/v1/stationGroups'
return url, params
|
[
"[email protected]"
] | |
cda579e2676fc52ddc8593410c78e43762839efd
|
53d0b80b64d201def809ef11acbeca38da2c1574
|
/hw_proyecciones/migrations/0003_drilldown.py
|
80145377e2f3e6f8f620aebac2f51441c52f7df9
|
[] |
no_license
|
NOKIA-NI/niproyecciones
|
068558b27afd26bc2eb6ab9c32f98a37742817ce
|
90c5829250643443f90ae4cbb9b234464a2fcaef
|
refs/heads/master
| 2022-12-11T18:22:32.928214 | 2018-10-25T14:52:52 | 2018-10-25T14:52:52 | 127,053,712 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,609 |
py
|
# Generated by Django 2.0.9 on 2018-10-25 14:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hw_proyecciones', '0002_hwcontrolrfe_hwsitelist'),
]
operations = [
migrations.CreateModel(
name='DrillDown',
fields=[
('id_drill_down_d1', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Site_Name', models.CharField(blank=True, max_length=255, null=True)),
('Implemented', models.CharField(blank=True, max_length=255, null=True)),
('TSS', models.CharField(blank=True, max_length=255, null=True)),
('RFIC', models.IntegerField(blank=True, null=True)),
('FC_RFIC', models.IntegerField(blank=True, null=True)),
('CPO_Status1', models.CharField(blank=True, max_length=255, null=True)),
('CPO_Status2', models.CharField(blank=True, max_length=255, null=True)),
('HW_Status', models.CharField(blank=True, max_length=255, null=True)),
('FC_HW', models.IntegerField(blank=True, null=True)),
('Status_Despachos', models.CharField(blank=True, max_length=255, null=True)),
('FC_Antenas', models.IntegerField(blank=True, null=True)),
],
options={
'verbose_name': 'DrillDown',
'verbose_name_plural': 'DrillDown',
'db_table': 'DrillDown_D1',
'managed': False,
},
),
]
|
[
"[email protected]"
] | |
da137d50e78fd365f0c5dc7bb54e588af907cded
|
c48570083578f2ad5b10d397889c452a69091582
|
/sge-python/practica05/ej8.py
|
ecaad2fbf012935615a6b80628281ab05a3fbc1b
|
[] |
no_license
|
RamonMR95/sge-python
|
c9471fcf3e33eeda540982a8f947971c94e8254c
|
895e3a7012f62518c4c1f61717f18f9d493f2ad0
|
refs/heads/master
| 2022-04-09T07:13:26.633241 | 2020-03-01T11:18:44 | 2020-03-01T11:18:44 | 234,789,451 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,845 |
py
|
# !/usr/bin/env python3
# 8.- Supón que mantenemos dos listas con igual número de elementos. Una de ellas,
# llamada alumnos, contiene una serie de nombres y la otra, llamada notas, una serie de números flotantes 0.0 y 10.0.
# En notas guardamos la calificación obtenida por los alumnos cuyos nombres están en alumnos. La nota notas[i]
# corresponde al estudiante alumnos[i]. Una posible configuración de las listas sería esta:
# alumnos = ['Ana_Pi','Pau_Lopez', 'Luis_Sol', 'Mar_Vega', 'Paz_Mir']
# notas = [10, 5.5, 2.0, 8.5, 7.0]
# De acuerdo con ella, el alumno Pau López, por ejemplo, fue calificado con un 5.5. Nos piden diseñar un procedimiento
# que recibe como datos las dos listas y una cadena con el nombre de un estudiante. Si el estudiante pertenece a la
# clase, el procedimiento imprimirá su nombre y nota en pantalla. Si no es un alumno incluido en la lista, se imprimirá
# un mensaje que lo advierta.
# Realizar las siguientes funciones:
# 1) Diseñar una función que reciba las dos listas y que devuelva el nombre de todos los estudiantes
# que aprobaron el examen
# 2) Diseñar una función que reciba la lista de notas y devuelva el número de aprobados
# 3) Diseñar una función que reciba las dos listas y devuelva el nombre de todos los estudiantes que
# obtuvieron la máxima nota.
# 4) Diseñar una función que reciba las dos listas y devuelva el nombre de todos los estudiantes cuya
# calificación es igual o superior a la calificación media.
# 5) Diseñar una función que reciba las dos listas y un nombre (una cadena); si el nombre está en la
# lista de estudiantes, devolverá su nota, si no, devolverá None.
# Haciendo uso de las funciones anteriores y diseñando nuevas funciones si es necesario. Construir un
# programa que presente el siguiente menú y permita ejecutar las acciones correspondientes a cada opción:
# 1) Añadir estudiante y calificación
# 2) Mostrar lista de estudiantes con sus calificaciones
# 3) Mostrar estudiantes aprobados
# 4) Número de aprobados
# 5) Estudiantes con máxima nota
# 6) Estudiantes con nota mayor o igual a la media
# 7) Nota estudiante
# 8) Finalizar ejecución del programa
__author__ = "Ramón Moñino Rubio"
__email__ = "[email protected]"
__version__ = "1.0.0"
alumnos = ['Ana_Pi', 'Pau_Lopez', 'Luis_Sol', 'Mar_Vega', 'Paz_Mir']
notas = [10, 5.5, 2.0, 8.5, 7.0]
def mostrar_aprobados(alums, nots):
aprobados = []
for i in range(len(notas)):
if notas[i] >= 5:
aprobados.append(alumnos[i])
return aprobados
def mostrar_numero_aprobados(nots):
n_aprobados = 0
for nota in nots:
if nota >= 5:
n_aprobados += 1
return n_aprobados
def mostrar_alumnos_max_nota(alums, nots):
alumnos_max_nota = []
max_nota = max(nots)
for i in range(len(nots)):
if nots[i] == max_nota:
alumnos_max_nota.append(alumnos[i])
return alumnos_max_nota
def mostrar_alumnos_nota_sup_media(alums, nots):
alumnos_media = []
media = sum(nots) / len(nots)
for i in range(len(nots)):
if nots[i] >= media:
alumnos_media.append(alumnos[i])
return alumnos_media
def is_in_alumnos(alums, nots, nombre):
for i in range(len(alums)):
if alumnos[i] == nombre:
return nots[i]
return None
def mostrar_alumnos_calificaciones(alums, nots):
for i in range(len(alums)):
print(f"Alumno: {alums[i]}: {nots[i]}")
menu = f"1) Añadir estudiante y calificación\n" \
f"2) Mostrar lista de estudiantes con sus calificaciones\n" \
f"3) Mostrar estudiantes aprobados\n" \
f"4) Número de aprobados\n" \
f"5) Estudiantes con máxima nota\n" \
f"6) Estudiantes con nota mayor o igual a la media\n" \
f"7) Nota estudiante\n" \
f"8) Finalizar ejecución del programa\n"
opcion = int(input(menu))
while opcion != 8:
if opcion == 1:
print("Añadir estudiante: ")
nombre = input("Introduce nombre del estudiante: ")
calificacion = float(input("Introduce la nota del estudiante: "))
alumnos.append(nombre)
notas.append(calificacion)
elif opcion == 2:
mostrar_alumnos_calificaciones(alumnos, notas)
elif opcion == 3:
print(mostrar_aprobados(alumnos, notas))
elif opcion == 4:
print(mostrar_numero_aprobados(notas))
elif opcion == 5:
print(mostrar_alumnos_max_nota(alumnos, notas))
elif opcion == 6:
print(mostrar_alumnos_nota_sup_media(alumnos, notas))
elif opcion == 7:
nombre = input("Introduce el nombre del estudiante: ")
nota = is_in_alumnos(alumnos, notas, nombre)
if nota:
print(f" Alumno: {nombre}, Nota {nota}")
else:
print("El estudiante no existe en la lista")
opcion = int(input(menu))
|
[
"[email protected]"
] | |
47c7839a7f260182143c3fbbc27d47a4c6133d74
|
d02abf740dd326f12b48357692af41282616a271
|
/dataset-analysis/pyplot/simple_bar.py
|
2b87ed287b5210a8c63a83d86b0d214fb17486bb
|
[] |
no_license
|
jacowk/python-projects
|
a762c5542b7dab6cd7915b367b472a1a20d63a0a
|
2e5f39868d0cdf292b4b1a792e946d169de61780
|
refs/heads/master
| 2021-01-05T05:18:35.550518 | 2020-09-27T08:42:11 | 2020-09-27T08:42:11 | 240,889,795 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 367 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 12 18:15:55 2020
@author: jaco
"""
from matplotlib import pyplot as plt
DayOfWeekOfCall = [1,2,3]
DispatchesOnThisWeekday = [77, 32, 42]
LABELS = ["Monday", "Tuesday", "Wednesday"]
plt.bar(DayOfWeekOfCall, DispatchesOnThisWeekday, align='center')
plt.xticks(DayOfWeekOfCall, LABELS)
plt.show()
|
[
"[email protected]"
] | |
a3ae4cd9c8a7f48e4de904e34f3da61fec637a6d
|
f7cc8d3f04d34b7d7e64e1b54ba458e4b39bce49
|
/PythonLibraries/kiwisolver/1.0.1/package.py
|
187167ad925ae027309d3867621154cc3d81d323
|
[
"MIT"
] |
permissive
|
cashmerepipeline/CashmereRez
|
80a53af61ddb8506bb111cd16450538c3b405689
|
13a73931d715ffac27c337abcd6df97b5c47534b
|
refs/heads/master
| 2020-05-09T12:59:28.106229 | 2019-04-17T16:39:46 | 2019-04-17T16:39:46 | 181,132,180 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 419 |
py
|
# -*- coding: utf-8 -*-
name = u'kiwisolver'
version = '1.0.1'
description = \
"""
kiwisolver library
"""
requires = []
variants = []
def commands():
import os
kiwisolver_libs_path = os.path.join(getenv("PYTHON_LIBS_PATH"), "kiwisolver", "%s" % version)
# env.PATH.append(os.path.join(kiwisolver_libs_path, 'lib'))
env.PYTHONPATH.append(os.path.join(kiwisolver_libs_path, 'lib'))
|
[
"[email protected]"
] | |
a64453ac95c425e6155551e5d74b66ee20009f84
|
f63db957cb63b3a37642d138d3092f8f897d6a53
|
/roundup_getnodes/roundup/backends/back_anydbm.py
|
94f7047902880a7917b096c53c787820f6fc6b0c
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"ZPL-2.1",
"ZPL-2.0"
] |
permissive
|
fillarikanava/old-fillarikanava
|
c6fd819f95e675e6eddc674e71528c798b391967
|
8dbb89ea34c2aa98450e403ca2d7f17179edff8d
|
refs/heads/master
| 2021-01-13T02:30:01.501771 | 2013-10-03T16:26:13 | 2013-10-03T16:26:13 | 13,201,013 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 83,763 |
py
|
#
# Copyright (c) 2001 Bizar Software Pty Ltd (http://www.bizarsoftware.com.au/)
# This module is free software, and you may redistribute it and/or modify
# under the same terms as Python, so long as this copyright message and
# disclaimer are retained in their original form.
#
# IN NO EVENT SHALL BIZAR SOFTWARE PTY LTD BE LIABLE TO ANY PARTY FOR
# DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING
# OUT OF THE USE OF THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# BIZAR SOFTWARE PTY LTD SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS"
# BASIS, AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
# SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
#
"""This module defines a backend that saves the hyperdatabase in a
database chosen by anydbm. It is guaranteed to always be available in python
versions >2.1.1 (the dumbdbm fallback in 2.1.1 and earlier has several
serious bugs, and is not available)
"""
__docformat__ = 'restructuredtext'
try:
import anydbm, sys
# dumbdbm only works in python 2.1.2+
if sys.version_info < (2,1,2):
import dumbdbm
assert anydbm._defaultmod != dumbdbm
del dumbdbm
except AssertionError:
print "WARNING: you should upgrade to python 2.1.3"
import whichdb, os, marshal, re, weakref, string, copy, time, shutil, logging
from roundup import hyperdb, date, password, roundupdb, security, support
from roundup.support import reversed
from roundup.backends import locking
from roundup.i18n import _
from blobfiles import FileStorage
from sessions_dbm import Sessions, OneTimeKeys
try:
from indexer_xapian import Indexer
except ImportError:
from indexer_dbm import Indexer
def db_exists(config):
# check for the user db
for db in 'nodes.user nodes.user.db'.split():
if os.path.exists(os.path.join(config.DATABASE, db)):
return 1
return 0
def db_nuke(config):
shutil.rmtree(config.DATABASE)
#
# Now the database
#
class Database(FileStorage, hyperdb.Database, roundupdb.Database):
"""A database for storing records containing flexible data types.
Transaction stuff TODO:
- check the timestamp of the class file and nuke the cache if it's
modified. Do some sort of conflict checking on the dirty stuff.
- perhaps detect write collisions (related to above)?
"""
def __init__(self, config, journaltag=None):
"""Open a hyperdatabase given a specifier to some storage.
The 'storagelocator' is obtained from config.DATABASE.
The meaning of 'storagelocator' depends on the particular
implementation of the hyperdatabase. It could be a file name,
a directory path, a socket descriptor for a connection to a
database over the network, etc.
The 'journaltag' is a token that will be attached to the journal
entries for any edits done on the database. If 'journaltag' is
None, the database is opened in read-only mode: the Class.create(),
Class.set(), Class.retire(), and Class.restore() methods are
disabled.
"""
FileStorage.__init__(self, config.UMASK)
self.config, self.journaltag = config, journaltag
self.dir = config.DATABASE
self.classes = {}
self.cache = {} # cache of nodes loaded or created
self.stats = {'cache_hits': 0, 'cache_misses': 0, 'get_items': 0,
'filtering': 0}
self.dirtynodes = {} # keep track of the dirty nodes by class
self.newnodes = {} # keep track of the new nodes by class
self.destroyednodes = {}# keep track of the destroyed nodes by class
self.transactions = []
self.indexer = Indexer(self)
self.security = security.Security(self)
os.umask(config.UMASK)
# lock it
lockfilenm = os.path.join(self.dir, 'lock')
self.lockfile = locking.acquire_lock(lockfilenm)
self.lockfile.write(str(os.getpid()))
self.lockfile.flush()
def post_init(self):
"""Called once the schema initialisation has finished.
"""
# reindex the db if necessary
if self.indexer.should_reindex():
self.reindex()
def refresh_database(self):
"""Rebuild the database
"""
self.reindex()
def getSessionManager(self):
return Sessions(self)
def getOTKManager(self):
return OneTimeKeys(self)
def reindex(self, classname=None, show_progress=False):
if classname:
classes = [self.getclass(classname)]
else:
classes = self.classes.values()
for klass in classes:
if show_progress:
for nodeid in support.Progress('Reindex %s'%klass.classname,
klass.list()):
klass.index(nodeid)
else:
for nodeid in klass.list():
klass.index(nodeid)
self.indexer.save_index()
def __repr__(self):
return '<back_anydbm instance at %x>'%id(self)
#
# Classes
#
def __getattr__(self, classname):
"""A convenient way of calling self.getclass(classname)."""
if self.classes.has_key(classname):
return self.classes[classname]
raise AttributeError, classname
def addclass(self, cl):
cn = cl.classname
if self.classes.has_key(cn):
raise ValueError, cn
self.classes[cn] = cl
# add default Edit and View permissions
self.security.addPermission(name="Create", klass=cn,
description="User is allowed to create "+cn)
self.security.addPermission(name="Edit", klass=cn,
description="User is allowed to edit "+cn)
self.security.addPermission(name="View", klass=cn,
description="User is allowed to access "+cn)
def getclasses(self):
"""Return a list of the names of all existing classes."""
l = self.classes.keys()
l.sort()
return l
def getclass(self, classname):
"""Get the Class object representing a particular class.
If 'classname' is not a valid class name, a KeyError is raised.
"""
try:
return self.classes[classname]
except KeyError:
raise KeyError, 'There is no class called "%s"'%classname
#
# Class DBs
#
def clear(self):
"""Delete all database contents
"""
logging.getLogger('hyperdb').info('clear')
for cn in self.classes.keys():
for dummy in 'nodes', 'journals':
path = os.path.join(self.dir, 'journals.%s'%cn)
if os.path.exists(path):
os.remove(path)
elif os.path.exists(path+'.db'): # dbm appends .db
os.remove(path+'.db')
# reset id sequences
path = os.path.join(os.getcwd(), self.dir, '_ids')
if os.path.exists(path):
os.remove(path)
elif os.path.exists(path+'.db'): # dbm appends .db
os.remove(path+'.db')
def getclassdb(self, classname, mode='r'):
""" grab a connection to the class db that will be used for
multiple actions
"""
return self.opendb('nodes.%s'%classname, mode)
def determine_db_type(self, path):
""" determine which DB wrote the class file
"""
db_type = ''
if os.path.exists(path):
db_type = whichdb.whichdb(path)
if not db_type:
raise hyperdb.DatabaseError, \
_("Couldn't identify database type")
elif os.path.exists(path+'.db'):
# if the path ends in '.db', it's a dbm database, whether
# anydbm says it's dbhash or not!
db_type = 'dbm'
return db_type
def opendb(self, name, mode):
"""Low-level database opener that gets around anydbm/dbm
eccentricities.
"""
# figure the class db type
path = os.path.join(os.getcwd(), self.dir, name)
db_type = self.determine_db_type(path)
# new database? let anydbm pick the best dbm
if not db_type:
if __debug__:
logging.getLogger('hyperdb').debug("opendb anydbm.open(%r, 'c')"%path)
return anydbm.open(path, 'c')
# open the database with the correct module
try:
dbm = __import__(db_type)
except ImportError:
raise hyperdb.DatabaseError, \
_("Couldn't open database - the required module '%s'"\
" is not available")%db_type
if __debug__:
logging.getLogger('hyperdb').debug("opendb %r.open(%r, %r)"%(db_type, path,
mode))
return dbm.open(path, mode)
#
# Node IDs
#
def newid(self, classname):
""" Generate a new id for the given class
"""
# open the ids DB - create if if doesn't exist
db = self.opendb('_ids', 'c')
if db.has_key(classname):
newid = db[classname] = str(int(db[classname]) + 1)
else:
# the count() bit is transitional - older dbs won't start at 1
newid = str(self.getclass(classname).count()+1)
db[classname] = newid
db.close()
return newid
def setid(self, classname, setid):
""" Set the id counter: used during import of database
"""
# open the ids DB - create if if doesn't exist
db = self.opendb('_ids', 'c')
db[classname] = str(setid)
db.close()
#
# Nodes
#
def addnode(self, classname, nodeid, node):
""" add the specified node to its class's db
"""
# we'll be supplied these props if we're doing an import
if not node.has_key('creator'):
# add in the "calculated" properties (dupe so we don't affect
# calling code's node assumptions)
node = node.copy()
node['creator'] = self.getuid()
node['actor'] = self.getuid()
node['creation'] = node['activity'] = date.Date()
self.newnodes.setdefault(classname, {})[nodeid] = 1
self.cache.setdefault(classname, {})[nodeid] = node
self.savenode(classname, nodeid, node)
def setnode(self, classname, nodeid, node):
""" change the specified node
"""
self.dirtynodes.setdefault(classname, {})[nodeid] = 1
# can't set without having already loaded the node
self.cache[classname][nodeid] = node
self.savenode(classname, nodeid, node)
def savenode(self, classname, nodeid, node):
""" perform the saving of data specified by the set/addnode
"""
if __debug__:
logging.getLogger('hyperdb').debug('save %s%s %r'%(classname, nodeid, node))
self.transactions.append((self.doSaveNode, (classname, nodeid, node)))
def getnode(self, classname, nodeid, db=None, cache=1):
""" get a node from the database
Note the "cache" parameter is not used, and exists purely for
backward compatibility!
"""
# try the cache
cache_dict = self.cache.setdefault(classname, {})
if cache_dict.has_key(nodeid):
if __debug__:
logging.getLogger('hyperdb').debug('get %s%s cached'%(classname, nodeid))
self.stats['cache_hits'] += 1
return cache_dict[nodeid]
if __debug__:
self.stats['cache_misses'] += 1
start_t = time.time()
logging.getLogger('hyperdb').debug('get %s%s'%(classname, nodeid))
# get from the database and save in the cache
if db is None:
db = self.getclassdb(classname)
if not db.has_key(nodeid):
raise IndexError, "no such %s %s"%(classname, nodeid)
# check the uncommitted, destroyed nodes
if (self.destroyednodes.has_key(classname) and
self.destroyednodes[classname].has_key(nodeid)):
raise IndexError, "no such %s %s"%(classname, nodeid)
# decode
res = marshal.loads(db[nodeid])
# reverse the serialisation
res = self.unserialise(classname, res)
# store off in the cache dict
if cache:
cache_dict[nodeid] = res
if __debug__:
self.stats['get_items'] += (time.time() - start_t)
return res
def destroynode(self, classname, nodeid):
"""Remove a node from the database. Called exclusively by the
destroy() method on Class.
"""
logging.getLogger('hyperdb').info('destroy %s%s'%(classname, nodeid))
# remove from cache and newnodes if it's there
if (self.cache.has_key(classname) and
self.cache[classname].has_key(nodeid)):
del self.cache[classname][nodeid]
if (self.newnodes.has_key(classname) and
self.newnodes[classname].has_key(nodeid)):
del self.newnodes[classname][nodeid]
# see if there's any obvious commit actions that we should get rid of
for entry in self.transactions[:]:
if entry[1][:2] == (classname, nodeid):
self.transactions.remove(entry)
# add to the destroyednodes map
self.destroyednodes.setdefault(classname, {})[nodeid] = 1
# add the destroy commit action
self.transactions.append((self.doDestroyNode, (classname, nodeid)))
self.transactions.append((FileStorage.destroy, (self, classname, nodeid)))
def serialise(self, classname, node):
"""Copy the node contents, converting non-marshallable data into
marshallable data.
"""
properties = self.getclass(classname).getprops()
d = {}
for k, v in node.items():
if k == self.RETIRED_FLAG:
d[k] = v
continue
# if the property doesn't exist then we really don't care
if not properties.has_key(k):
continue
# get the property spec
prop = properties[k]
if isinstance(prop, hyperdb.Password) and v is not None:
d[k] = str(v)
elif isinstance(prop, hyperdb.Date) and v is not None:
d[k] = v.serialise()
elif isinstance(prop, hyperdb.Interval) and v is not None:
d[k] = v.serialise()
else:
d[k] = v
return d
def unserialise(self, classname, node):
"""Decode the marshalled node data
"""
properties = self.getclass(classname).getprops()
d = {}
for k, v in node.items():
# if the property doesn't exist, or is the "retired" flag then
# it won't be in the properties dict
if not properties.has_key(k):
d[k] = v
continue
# get the property spec
prop = properties[k]
if isinstance(prop, hyperdb.Date) and v is not None:
d[k] = date.Date(v)
elif isinstance(prop, hyperdb.Interval) and v is not None:
d[k] = date.Interval(v)
elif isinstance(prop, hyperdb.Password) and v is not None:
p = password.Password()
p.unpack(v)
d[k] = p
else:
d[k] = v
return d
def hasnode(self, classname, nodeid, db=None):
""" determine if the database has a given node
"""
# try the cache
cache = self.cache.setdefault(classname, {})
if cache.has_key(nodeid):
return 1
# not in the cache - check the database
if db is None:
db = self.getclassdb(classname)
res = db.has_key(nodeid)
return res
def countnodes(self, classname, db=None):
count = 0
# include the uncommitted nodes
if self.newnodes.has_key(classname):
count += len(self.newnodes[classname])
if self.destroyednodes.has_key(classname):
count -= len(self.destroyednodes[classname])
# and count those in the DB
if db is None:
db = self.getclassdb(classname)
count = count + len(db.keys())
return count
#
# Files - special node properties
# inherited from FileStorage
#
# Journal
#
def addjournal(self, classname, nodeid, action, params, creator=None,
creation=None):
""" Journal the Action
'action' may be:
'create' or 'set' -- 'params' is a dictionary of property values
'link' or 'unlink' -- 'params' is (classname, nodeid, propname)
'retire' -- 'params' is None
'creator' -- the user performing the action, which defaults to
the current user.
"""
if __debug__:
logging.getLogger('hyperdb').debug('addjournal %s%s %s %r %s %r'%(classname,
nodeid, action, params, creator, creation))
if creator is None:
creator = self.getuid()
self.transactions.append((self.doSaveJournal, (classname, nodeid,
action, params, creator, creation)))
def setjournal(self, classname, nodeid, journal):
"""Set the journal to the "journal" list."""
if __debug__:
logging.getLogger('hyperdb').debug('setjournal %s%s %r'%(classname,
nodeid, journal))
self.transactions.append((self.doSetJournal, (classname, nodeid,
journal)))
def getjournal(self, classname, nodeid):
""" get the journal for id
Raise IndexError if the node doesn't exist (as per history()'s
API)
"""
# our journal result
res = []
# add any journal entries for transactions not committed to the
# database
for method, args in self.transactions:
if method != self.doSaveJournal:
continue
(cache_classname, cache_nodeid, cache_action, cache_params,
cache_creator, cache_creation) = args
if cache_classname == classname and cache_nodeid == nodeid:
if not cache_creator:
cache_creator = self.getuid()
if not cache_creation:
cache_creation = date.Date()
res.append((cache_nodeid, cache_creation, cache_creator,
cache_action, cache_params))
# attempt to open the journal - in some rare cases, the journal may
# not exist
try:
db = self.opendb('journals.%s'%classname, 'r')
except anydbm.error, error:
if str(error) == "need 'c' or 'n' flag to open new db":
raise IndexError, 'no such %s %s'%(classname, nodeid)
elif error.args[0] != 2:
# this isn't a "not found" error, be alarmed!
raise
if res:
# we have unsaved journal entries, return them
return res
raise IndexError, 'no such %s %s'%(classname, nodeid)
try:
journal = marshal.loads(db[nodeid])
except KeyError:
db.close()
if res:
# we have some unsaved journal entries, be happy!
return res
raise IndexError, 'no such %s %s'%(classname, nodeid)
db.close()
# add all the saved journal entries for this node
for nodeid, date_stamp, user, action, params in journal:
res.append((nodeid, date.Date(date_stamp), user, action, params))
return res
def pack(self, pack_before):
""" Delete all journal entries except "create" before 'pack_before'.
"""
pack_before = pack_before.serialise()
for classname in self.getclasses():
packed = 0
# get the journal db
db_name = 'journals.%s'%classname
path = os.path.join(os.getcwd(), self.dir, classname)
db_type = self.determine_db_type(path)
db = self.opendb(db_name, 'w')
for key in db.keys():
# get the journal for this db entry
journal = marshal.loads(db[key])
l = []
last_set_entry = None
for entry in journal:
# unpack the entry
(nodeid, date_stamp, self.journaltag, action,
params) = entry
# if the entry is after the pack date, _or_ the initial
# create entry, then it stays
if date_stamp > pack_before or action == 'create':
l.append(entry)
else:
packed += 1
db[key] = marshal.dumps(l)
logging.getLogger('hyperdb').info('packed %d %s items'%(packed,
classname))
if db_type == 'gdbm':
db.reorganize()
db.close()
#
# Basic transaction support
#
def commit(self, fail_ok=False):
""" Commit the current transactions.
Save all data changed since the database was opened or since the
last commit() or rollback().
fail_ok indicates that the commit is allowed to fail. This is used
in the web interface when committing cleaning of the session
database. We don't care if there's a concurrency issue there.
The only backend this seems to affect is postgres.
"""
logging.getLogger('hyperdb').info('commit %s transactions'%(
len(self.transactions)))
# keep a handle to all the database files opened
self.databases = {}
try:
# now, do all the transactions
reindex = {}
for method, args in self.transactions:
reindex[method(*args)] = 1
finally:
# make sure we close all the database files
for db in self.databases.values():
db.close()
del self.databases
# clear the transactions list now so the blobfile implementation
# doesn't think there's still pending file commits when it tries
# to access the file data
self.transactions = []
# reindex the nodes that request it
for classname, nodeid in filter(None, reindex.keys()):
self.getclass(classname).index(nodeid)
# save the indexer state
self.indexer.save_index()
self.clearCache()
def clearCache(self):
# all transactions committed, back to normal
self.cache = {}
self.dirtynodes = {}
self.newnodes = {}
self.destroyednodes = {}
self.transactions = []
def getCachedClassDB(self, classname):
""" get the class db, looking in our cache of databases for commit
"""
# get the database handle
db_name = 'nodes.%s'%classname
if not self.databases.has_key(db_name):
self.databases[db_name] = self.getclassdb(classname, 'c')
return self.databases[db_name]
def doSaveNode(self, classname, nodeid, node):
db = self.getCachedClassDB(classname)
# now save the marshalled data
db[nodeid] = marshal.dumps(self.serialise(classname, node))
# return the classname, nodeid so we reindex this content
return (classname, nodeid)
def getCachedJournalDB(self, classname):
""" get the journal db, looking in our cache of databases for commit
"""
# get the database handle
db_name = 'journals.%s'%classname
if not self.databases.has_key(db_name):
self.databases[db_name] = self.opendb(db_name, 'c')
return self.databases[db_name]
def doSaveJournal(self, classname, nodeid, action, params, creator,
creation):
# serialise the parameters now if necessary
if isinstance(params, type({})):
if action in ('set', 'create'):
params = self.serialise(classname, params)
# handle supply of the special journalling parameters (usually
# supplied on importing an existing database)
journaltag = creator
if creation:
journaldate = creation.serialise()
else:
journaldate = date.Date().serialise()
# create the journal entry
entry = (nodeid, journaldate, journaltag, action, params)
db = self.getCachedJournalDB(classname)
# now insert the journal entry
if db.has_key(nodeid):
# append to existing
s = db[nodeid]
l = marshal.loads(s)
l.append(entry)
else:
l = [entry]
db[nodeid] = marshal.dumps(l)
def doSetJournal(self, classname, nodeid, journal):
l = []
for nodeid, journaldate, journaltag, action, params in journal:
# serialise the parameters now if necessary
if isinstance(params, type({})):
if action in ('set', 'create'):
params = self.serialise(classname, params)
journaldate = journaldate.serialise()
l.append((nodeid, journaldate, journaltag, action, params))
db = self.getCachedJournalDB(classname)
db[nodeid] = marshal.dumps(l)
def doDestroyNode(self, classname, nodeid):
# delete from the class database
db = self.getCachedClassDB(classname)
if db.has_key(nodeid):
del db[nodeid]
# delete from the database
db = self.getCachedJournalDB(classname)
if db.has_key(nodeid):
del db[nodeid]
def rollback(self):
""" Reverse all actions from the current transaction.
"""
logging.getLogger('hyperdb').info('rollback %s transactions'%(
len(self.transactions)))
for method, args in self.transactions:
# delete temporary files
if method == self.doStoreFile:
self.rollbackStoreFile(*args)
self.cache = {}
self.dirtynodes = {}
self.newnodes = {}
self.destroyednodes = {}
self.transactions = []
def close(self):
""" Nothing to do
"""
if self.lockfile is not None:
locking.release_lock(self.lockfile)
self.lockfile.close()
self.lockfile = None
_marker = []
class Class(hyperdb.Class):
"""The handle to a particular class of nodes in a hyperdatabase."""
def enableJournalling(self):
"""Turn journalling on for this class
"""
self.do_journal = 1
def disableJournalling(self):
"""Turn journalling off for this class
"""
self.do_journal = 0
# Editing nodes:
def create(self, **propvalues):
"""Create a new node of this class and return its id.
The keyword arguments in 'propvalues' map property names to values.
The values of arguments must be acceptable for the types of their
corresponding properties or a TypeError is raised.
If this class has a key property, it must be present and its value
must not collide with other key strings or a ValueError is raised.
Any other properties on this class that are missing from the
'propvalues' dictionary are set to None.
If an id in a link or multilink property does not refer to a valid
node, an IndexError is raised.
These operations trigger detectors and can be vetoed. Attempts
to modify the "creation" or "activity" properties cause a KeyError.
"""
self.fireAuditors('create', None, propvalues)
newid = self.create_inner(**propvalues)
self.fireReactors('create', newid, None)
return newid
def create_inner(self, **propvalues):
""" Called by create, in-between the audit and react calls.
"""
if propvalues.has_key('id'):
raise KeyError, '"id" is reserved'
if self.db.journaltag is None:
raise hyperdb.DatabaseError, _('Database open read-only')
if propvalues.has_key('creation') or propvalues.has_key('activity'):
raise KeyError, '"creation" and "activity" are reserved'
# new node's id
newid = self.db.newid(self.classname)
# validate propvalues
num_re = re.compile('^\d+$')
for key, value in propvalues.items():
if key == self.key:
try:
self.lookup(value)
except KeyError:
pass
else:
raise ValueError, 'node with key "%s" exists'%value
# try to handle this property
try:
prop = self.properties[key]
except KeyError:
raise KeyError, '"%s" has no property "%s"'%(self.classname,
key)
if value is not None and isinstance(prop, hyperdb.Link):
if type(value) != type(''):
raise ValueError, 'link value must be String'
link_class = self.properties[key].classname
# if it isn't a number, it's a key
if not num_re.match(value):
try:
value = self.db.classes[link_class].lookup(value)
except (TypeError, KeyError):
raise IndexError, 'new property "%s": %s not a %s'%(
key, value, link_class)
elif not self.db.getclass(link_class).hasnode(value):
raise IndexError, '%s has no node %s'%(link_class, value)
# save off the value
propvalues[key] = value
# register the link with the newly linked node
if self.do_journal and self.properties[key].do_journal:
self.db.addjournal(link_class, value, 'link',
(self.classname, newid, key))
elif isinstance(prop, hyperdb.Multilink):
if value is None:
value = []
if not hasattr(value, '__iter__'):
raise TypeError, 'new property "%s" not an iterable of ids'%key
# clean up and validate the list of links
link_class = self.properties[key].classname
l = []
for entry in value:
if type(entry) != type(''):
raise ValueError, '"%s" multilink value (%r) '\
'must contain Strings'%(key, value)
# if it isn't a number, it's a key
if not num_re.match(entry):
try:
entry = self.db.classes[link_class].lookup(entry)
except (TypeError, KeyError):
raise IndexError, 'new property "%s": %s not a %s'%(
key, entry, self.properties[key].classname)
l.append(entry)
value = l
propvalues[key] = value
# handle additions
for nodeid in value:
if not self.db.getclass(link_class).hasnode(nodeid):
raise IndexError, '%s has no node %s'%(link_class,
nodeid)
# register the link with the newly linked node
if self.do_journal and self.properties[key].do_journal:
self.db.addjournal(link_class, nodeid, 'link',
(self.classname, newid, key))
elif isinstance(prop, hyperdb.String):
if type(value) != type('') and type(value) != type(u''):
raise TypeError, 'new property "%s" not a string'%key
if prop.indexme:
self.db.indexer.add_text((self.classname, newid, key),
value)
elif isinstance(prop, hyperdb.Password):
if not isinstance(value, password.Password):
raise TypeError, 'new property "%s" not a Password'%key
elif isinstance(prop, hyperdb.Date):
if value is not None and not isinstance(value, date.Date):
raise TypeError, 'new property "%s" not a Date'%key
elif isinstance(prop, hyperdb.Interval):
if value is not None and not isinstance(value, date.Interval):
raise TypeError, 'new property "%s" not an Interval'%key
elif value is not None and isinstance(prop, hyperdb.Number):
try:
float(value)
except ValueError:
raise TypeError, 'new property "%s" not numeric'%key
elif value is not None and isinstance(prop, hyperdb.Boolean):
try:
int(value)
except ValueError:
raise TypeError, 'new property "%s" not boolean'%key
# make sure there's data where there needs to be
for key, prop in self.properties.items():
if propvalues.has_key(key):
continue
if key == self.key:
raise ValueError, 'key property "%s" is required'%key
if isinstance(prop, hyperdb.Multilink):
propvalues[key] = []
# done
self.db.addnode(self.classname, newid, propvalues)
if self.do_journal:
self.db.addjournal(self.classname, newid, 'create', {})
return newid
def get(self, nodeid, propname, default=_marker, cache=1):
"""Get the value of a property on an existing node of this class.
'nodeid' must be the id of an existing node of this class or an
IndexError is raised. 'propname' must be the name of a property
of this class or a KeyError is raised.
'cache' exists for backward compatibility, and is not used.
Attempts to get the "creation" or "activity" properties should
do the right thing.
"""
if propname == 'id':
return nodeid
# get the node's dict
d = self.db.getnode(self.classname, nodeid)
# check for one of the special props
if propname == 'creation':
if d.has_key('creation'):
return d['creation']
if not self.do_journal:
raise ValueError, 'Journalling is disabled for this class'
journal = self.db.getjournal(self.classname, nodeid)
if journal:
return self.db.getjournal(self.classname, nodeid)[0][1]
else:
# on the strange chance that there's no journal
return date.Date()
if propname == 'activity':
if d.has_key('activity'):
return d['activity']
if not self.do_journal:
raise ValueError, 'Journalling is disabled for this class'
journal = self.db.getjournal(self.classname, nodeid)
if journal:
return self.db.getjournal(self.classname, nodeid)[-1][1]
else:
# on the strange chance that there's no journal
return date.Date()
if propname == 'creator':
if d.has_key('creator'):
return d['creator']
if not self.do_journal:
raise ValueError, 'Journalling is disabled for this class'
journal = self.db.getjournal(self.classname, nodeid)
if journal:
num_re = re.compile('^\d+$')
value = journal[0][2]
if num_re.match(value):
return value
else:
# old-style "username" journal tag
try:
return self.db.user.lookup(value)
except KeyError:
# user's been retired, return admin
return '1'
else:
return self.db.getuid()
if propname == 'actor':
if d.has_key('actor'):
return d['actor']
if not self.do_journal:
raise ValueError, 'Journalling is disabled for this class'
journal = self.db.getjournal(self.classname, nodeid)
if journal:
num_re = re.compile('^\d+$')
value = journal[-1][2]
if num_re.match(value):
return value
else:
# old-style "username" journal tag
try:
return self.db.user.lookup(value)
except KeyError:
# user's been retired, return admin
return '1'
else:
return self.db.getuid()
# get the property (raises KeyErorr if invalid)
prop = self.properties[propname]
if not d.has_key(propname):
if default is _marker:
if isinstance(prop, hyperdb.Multilink):
return []
else:
return None
else:
return default
# return a dupe of the list so code doesn't get confused
if isinstance(prop, hyperdb.Multilink):
return d[propname][:]
return d[propname]
def set(self, nodeid, **propvalues):
"""Modify a property on an existing node of this class.
'nodeid' must be the id of an existing node of this class or an
IndexError is raised.
Each key in 'propvalues' must be the name of a property of this
class or a KeyError is raised.
All values in 'propvalues' must be acceptable types for their
corresponding properties or a TypeError is raised.
If the value of the key property is set, it must not collide with
other key strings or a ValueError is raised.
If the value of a Link or Multilink property contains an invalid
node id, a ValueError is raised.
These operations trigger detectors and can be vetoed. Attempts
to modify the "creation" or "activity" properties cause a KeyError.
"""
self.fireAuditors('set', nodeid, propvalues)
oldvalues = copy.deepcopy(self.db.getnode(self.classname, nodeid))
for name,prop in self.getprops(protected=0).items():
if oldvalues.has_key(name):
continue
if isinstance(prop, hyperdb.Multilink):
oldvalues[name] = []
else:
oldvalues[name] = None
propvalues = self.set_inner(nodeid, **propvalues)
self.fireReactors('set', nodeid, oldvalues)
return propvalues
def set_inner(self, nodeid, **propvalues):
""" Called by set, in-between the audit and react calls.
"""
if not propvalues:
return propvalues
if propvalues.has_key('creation') or propvalues.has_key('activity'):
raise KeyError, '"creation" and "activity" are reserved'
if propvalues.has_key('id'):
raise KeyError, '"id" is reserved'
if self.db.journaltag is None:
raise hyperdb.DatabaseError, _('Database open read-only')
node = self.db.getnode(self.classname, nodeid)
if node.has_key(self.db.RETIRED_FLAG):
raise IndexError
num_re = re.compile('^\d+$')
# if the journal value is to be different, store it in here
journalvalues = {}
for propname, value in propvalues.items():
# check to make sure we're not duplicating an existing key
if propname == self.key and node[propname] != value:
try:
self.lookup(value)
except KeyError:
pass
else:
raise ValueError, 'node with key "%s" exists'%value
# this will raise the KeyError if the property isn't valid
# ... we don't use getprops() here because we only care about
# the writeable properties.
try:
prop = self.properties[propname]
except KeyError:
raise KeyError, '"%s" has no property named "%s"'%(
self.classname, propname)
# if the value's the same as the existing value, no sense in
# doing anything
current = node.get(propname, None)
if value == current:
del propvalues[propname]
continue
journalvalues[propname] = current
# do stuff based on the prop type
if isinstance(prop, hyperdb.Link):
link_class = prop.classname
# if it isn't a number, it's a key
if value is not None and not isinstance(value, type('')):
raise ValueError, 'property "%s" link value be a string'%(
propname)
if isinstance(value, type('')) and not num_re.match(value):
try:
value = self.db.classes[link_class].lookup(value)
except (TypeError, KeyError):
raise IndexError, 'new property "%s": %s not a %s'%(
propname, value, prop.classname)
if (value is not None and
not self.db.getclass(link_class).hasnode(value)):
raise IndexError, '%s has no node %s'%(link_class, value)
if self.do_journal and prop.do_journal:
# register the unlink with the old linked node
if node.has_key(propname) and node[propname] is not None:
self.db.addjournal(link_class, node[propname], 'unlink',
(self.classname, nodeid, propname))
# register the link with the newly linked node
if value is not None:
self.db.addjournal(link_class, value, 'link',
(self.classname, nodeid, propname))
elif isinstance(prop, hyperdb.Multilink):
if value is None:
value = []
if not hasattr(value, '__iter__'):
raise TypeError, 'new property "%s" not an iterable of'\
' ids'%propname
link_class = self.properties[propname].classname
l = []
for entry in value:
# if it isn't a number, it's a key
if type(entry) != type(''):
raise ValueError, 'new property "%s" link value ' \
'must be a string'%propname
if not num_re.match(entry):
try:
entry = self.db.classes[link_class].lookup(entry)
except (TypeError, KeyError):
raise IndexError, 'new property "%s": %s not a %s'%(
propname, entry,
self.properties[propname].classname)
l.append(entry)
value = l
propvalues[propname] = value
# figure the journal entry for this property
add = []
remove = []
# handle removals
if node.has_key(propname):
l = node[propname]
else:
l = []
for id in l[:]:
if id in value:
continue
# register the unlink with the old linked node
if self.do_journal and self.properties[propname].do_journal:
self.db.addjournal(link_class, id, 'unlink',
(self.classname, nodeid, propname))
l.remove(id)
remove.append(id)
# handle additions
for id in value:
if not self.db.getclass(link_class).hasnode(id):
raise IndexError, '%s has no node %s'%(link_class, id)
if id in l:
continue
# register the link with the newly linked node
if self.do_journal and self.properties[propname].do_journal:
self.db.addjournal(link_class, id, 'link',
(self.classname, nodeid, propname))
l.append(id)
add.append(id)
# figure the journal entry
l = []
if add:
l.append(('+', add))
if remove:
l.append(('-', remove))
if l:
journalvalues[propname] = tuple(l)
elif isinstance(prop, hyperdb.String):
if value is not None and type(value) != type('') and type(value) != type(u''):
raise TypeError, 'new property "%s" not a string'%propname
if prop.indexme:
self.db.indexer.add_text((self.classname, nodeid, propname),
value)
elif isinstance(prop, hyperdb.Password):
if not isinstance(value, password.Password):
raise TypeError, 'new property "%s" not a Password'%propname
propvalues[propname] = value
elif value is not None and isinstance(prop, hyperdb.Date):
if not isinstance(value, date.Date):
raise TypeError, 'new property "%s" not a Date'% propname
propvalues[propname] = value
elif value is not None and isinstance(prop, hyperdb.Interval):
if not isinstance(value, date.Interval):
raise TypeError, 'new property "%s" not an '\
'Interval'%propname
propvalues[propname] = value
elif value is not None and isinstance(prop, hyperdb.Number):
try:
float(value)
except ValueError:
raise TypeError, 'new property "%s" not numeric'%propname
elif value is not None and isinstance(prop, hyperdb.Boolean):
try:
int(value)
except ValueError:
raise TypeError, 'new property "%s" not boolean'%propname
node[propname] = value
# nothing to do?
if not propvalues:
return propvalues
# update the activity time
node['activity'] = date.Date()
node['actor'] = self.db.getuid()
# do the set, and journal it
self.db.setnode(self.classname, nodeid, node)
if self.do_journal:
self.db.addjournal(self.classname, nodeid, 'set', journalvalues)
return propvalues
def retire(self, nodeid):
"""Retire a node.
The properties on the node remain available from the get() method,
and the node's id is never reused.
Retired nodes are not returned by the find(), list(), or lookup()
methods, and other nodes may reuse the values of their key properties.
These operations trigger detectors and can be vetoed. Attempts
to modify the "creation" or "activity" properties cause a KeyError.
"""
if self.db.journaltag is None:
raise hyperdb.DatabaseError, _('Database open read-only')
self.fireAuditors('retire', nodeid, None)
node = self.db.getnode(self.classname, nodeid)
node[self.db.RETIRED_FLAG] = 1
self.db.setnode(self.classname, nodeid, node)
if self.do_journal:
self.db.addjournal(self.classname, nodeid, 'retired', None)
self.fireReactors('retire', nodeid, None)
def restore(self, nodeid):
"""Restpre a retired node.
Make node available for all operations like it was before retirement.
"""
if self.db.journaltag is None:
raise hyperdb.DatabaseError, _('Database open read-only')
node = self.db.getnode(self.classname, nodeid)
# check if key property was overrided
key = self.getkey()
try:
id = self.lookup(node[key])
except KeyError:
pass
else:
raise KeyError, "Key property (%s) of retired node clashes with \
existing one (%s)" % (key, node[key])
# Now we can safely restore node
self.fireAuditors('restore', nodeid, None)
del node[self.db.RETIRED_FLAG]
self.db.setnode(self.classname, nodeid, node)
if self.do_journal:
self.db.addjournal(self.classname, nodeid, 'restored', None)
self.fireReactors('restore', nodeid, None)
def is_retired(self, nodeid, cldb=None):
"""Return true if the node is retired.
"""
node = self.db.getnode(self.classname, nodeid, cldb)
if node.has_key(self.db.RETIRED_FLAG):
return 1
return 0
def destroy(self, nodeid):
"""Destroy a node.
WARNING: this method should never be used except in extremely rare
situations where there could never be links to the node being
deleted
WARNING: use retire() instead
WARNING: the properties of this node will not be available ever again
WARNING: really, use retire() instead
Well, I think that's enough warnings. This method exists mostly to
support the session storage of the cgi interface.
"""
if self.db.journaltag is None:
raise hyperdb.DatabaseError, _('Database open read-only')
self.db.destroynode(self.classname, nodeid)
def history(self, nodeid):
"""Retrieve the journal of edits on a particular node.
'nodeid' must be the id of an existing node of this class or an
IndexError is raised.
The returned list contains tuples of the form
(nodeid, date, tag, action, params)
'date' is a Timestamp object specifying the time of the change and
'tag' is the journaltag specified when the database was opened.
"""
if not self.do_journal:
raise ValueError, 'Journalling is disabled for this class'
return self.db.getjournal(self.classname, nodeid)
# Locating nodes:
def hasnode(self, nodeid):
"""Determine if the given nodeid actually exists
"""
return self.db.hasnode(self.classname, nodeid)
def setkey(self, propname):
"""Select a String property of this class to be the key property.
'propname' must be the name of a String property of this class or
None, or a TypeError is raised. The values of the key property on
all existing nodes must be unique or a ValueError is raised. If the
property doesn't exist, KeyError is raised.
"""
prop = self.getprops()[propname]
if not isinstance(prop, hyperdb.String):
raise TypeError, 'key properties must be String'
self.key = propname
def getkey(self):
"""Return the name of the key property for this class or None."""
return self.key
# TODO: set up a separate index db file for this? profile?
def lookup(self, keyvalue):
"""Locate a particular node by its key property and return its id.
If this class has no key property, a TypeError is raised. If the
'keyvalue' matches one of the values for the key property among
the nodes in this class, the matching node's id is returned;
otherwise a KeyError is raised.
"""
if not self.key:
raise TypeError, 'No key property set for class %s'%self.classname
cldb = self.db.getclassdb(self.classname)
try:
for nodeid in self.getnodeids(cldb):
node = self.db.getnode(self.classname, nodeid, cldb)
if node.has_key(self.db.RETIRED_FLAG):
continue
if not node.has_key(self.key):
continue
if node[self.key] == keyvalue:
return nodeid
finally:
cldb.close()
raise KeyError, 'No key (%s) value "%s" for "%s"'%(self.key,
keyvalue, self.classname)
# change from spec - allows multiple props to match
def find(self, **propspec):
"""Get the ids of nodes in this class which link to the given nodes.
'propspec' consists of keyword args propname=nodeid or
propname={nodeid:1, }
'propname' must be the name of a property in this class, or a
KeyError is raised. That property must be a Link or
Multilink property, or a TypeError is raised.
Any node in this class whose 'propname' property links to any of
the nodeids will be returned. Examples::
db.issue.find(messages='1')
db.issue.find(messages={'1':1,'3':1}, files={'7':1})
"""
propspec = propspec.items()
for propname, itemids in propspec:
# check the prop is OK
prop = self.properties[propname]
if not isinstance(prop, hyperdb.Link) and not isinstance(prop, hyperdb.Multilink):
raise TypeError, "'%s' not a Link/Multilink property"%propname
# ok, now do the find
cldb = self.db.getclassdb(self.classname)
l = []
try:
for id in self.getnodeids(db=cldb):
item = self.db.getnode(self.classname, id, db=cldb)
if item.has_key(self.db.RETIRED_FLAG):
continue
for propname, itemids in propspec:
if type(itemids) is not type({}):
itemids = {itemids:1}
# special case if the item doesn't have this property
if not item.has_key(propname):
if itemids.has_key(None):
l.append(id)
break
continue
# grab the property definition and its value on this item
prop = self.properties[propname]
value = item[propname]
if isinstance(prop, hyperdb.Link) and itemids.has_key(value):
l.append(id)
break
elif isinstance(prop, hyperdb.Multilink):
hit = 0
for v in value:
if itemids.has_key(v):
l.append(id)
hit = 1
break
if hit:
break
finally:
cldb.close()
return l
def stringFind(self, **requirements):
"""Locate a particular node by matching a set of its String
properties in a caseless search.
If the property is not a String property, a TypeError is raised.
The return is a list of the id of all nodes that match.
"""
for propname in requirements.keys():
prop = self.properties[propname]
if not isinstance(prop, hyperdb.String):
raise TypeError, "'%s' not a String property"%propname
requirements[propname] = requirements[propname].lower()
l = []
cldb = self.db.getclassdb(self.classname)
try:
for nodeid in self.getnodeids(cldb):
node = self.db.getnode(self.classname, nodeid, cldb)
if node.has_key(self.db.RETIRED_FLAG):
continue
for key, value in requirements.items():
if not node.has_key(key):
break
if node[key] is None or node[key].lower() != value:
break
else:
l.append(nodeid)
finally:
cldb.close()
return l
def list(self):
""" Return a list of the ids of the active nodes in this class.
"""
l = []
cn = self.classname
cldb = self.db.getclassdb(cn)
try:
for nodeid in self.getnodeids(cldb):
node = self.db.getnode(cn, nodeid, cldb)
if node.has_key(self.db.RETIRED_FLAG):
continue
l.append(nodeid)
finally:
cldb.close()
l.sort()
return l
def getnodeids(self, db=None, retired=None):
""" Return a list of ALL nodeids
Set retired=None to get all nodes. Otherwise it'll get all the
retired or non-retired nodes, depending on the flag.
"""
res = []
# start off with the new nodes
if self.db.newnodes.has_key(self.classname):
res += self.db.newnodes[self.classname].keys()
must_close = False
if db is None:
db = self.db.getclassdb(self.classname)
must_close = True
try:
res = res + db.keys()
# remove the uncommitted, destroyed nodes
if self.db.destroyednodes.has_key(self.classname):
for nodeid in self.db.destroyednodes[self.classname].keys():
if db.has_key(nodeid):
res.remove(nodeid)
# check retired flag
if retired is False or retired is True:
l = []
for nodeid in res:
node = self.db.getnode(self.classname, nodeid, db)
is_ret = node.has_key(self.db.RETIRED_FLAG)
if retired == is_ret:
l.append(nodeid)
res = l
finally:
if must_close:
db.close()
return res
def _filter(self, search_matches, filterspec, proptree,
num_re = re.compile('^\d+$')):
"""Return a list of the ids of the active nodes in this class that
match the 'filter' spec, sorted by the group spec and then the
sort spec.
"filterspec" is {propname: value(s)}
"sort" and "group" are (dir, prop) where dir is '+', '-' or None
and prop is a prop name or None
"search_matches" is a sequence type or None
The filter must match all properties specificed. If the property
value to match is a list:
1. String properties must match all elements in the list, and
2. Other properties must match any of the elements in the list.
"""
if __debug__:
start_t = time.time()
cn = self.classname
# optimise filterspec
l = []
props = self.getprops()
LINK = 'spec:link'
MULTILINK = 'spec:multilink'
STRING = 'spec:string'
DATE = 'spec:date'
INTERVAL = 'spec:interval'
OTHER = 'spec:other'
for k, v in filterspec.items():
propclass = props[k]
if isinstance(propclass, hyperdb.Link):
if type(v) is not type([]):
v = [v]
u = []
for entry in v:
# the value -1 is a special "not set" sentinel
if entry == '-1':
entry = None
u.append(entry)
l.append((LINK, k, u))
elif isinstance(propclass, hyperdb.Multilink):
# the value -1 is a special "not set" sentinel
if v in ('-1', ['-1']):
v = []
elif type(v) is not type([]):
v = [v]
l.append((MULTILINK, k, v))
elif isinstance(propclass, hyperdb.String) and k != 'id':
if type(v) is not type([]):
v = [v]
for v in v:
# simple glob searching
v = re.sub(r'([\|\{\}\\\.\+\[\]\(\)])', r'\\\1', v)
v = v.replace('?', '.')
v = v.replace('*', '.*?')
l.append((STRING, k, re.compile(v, re.I)))
elif isinstance(propclass, hyperdb.Date):
try:
date_rng = propclass.range_from_raw(v, self.db)
l.append((DATE, k, date_rng))
except ValueError:
# If range creation fails - ignore that search parameter
pass
elif isinstance(propclass, hyperdb.Interval):
try:
intv_rng = date.Range(v, date.Interval)
l.append((INTERVAL, k, intv_rng))
except ValueError:
# If range creation fails - ignore that search parameter
pass
elif isinstance(propclass, hyperdb.Boolean):
if type(v) != type([]):
v = v.split(',')
bv = []
for val in v:
if type(val) is type(''):
bv.append(val.lower() in ('yes', 'true', 'on', '1'))
else:
bv.append(val)
l.append((OTHER, k, bv))
elif k == 'id':
if type(v) != type([]):
v = v.split(',')
l.append((OTHER, k, [str(int(val)) for val in v]))
elif isinstance(propclass, hyperdb.Number):
if type(v) != type([]):
v = v.split(',')
l.append((OTHER, k, [float(val) for val in v]))
filterspec = l
# now, find all the nodes that are active and pass filtering
matches = []
cldb = self.db.getclassdb(cn)
t = 0
try:
# TODO: only full-scan once (use items())
for nodeid in self.getnodeids(cldb):
node = self.db.getnode(cn, nodeid, cldb)
if node.has_key(self.db.RETIRED_FLAG):
continue
# apply filter
for t, k, v in filterspec:
# handle the id prop
if k == 'id':
if nodeid not in v:
break
continue
# get the node value
nv = node.get(k, None)
match = 0
# now apply the property filter
if t == LINK:
# link - if this node's property doesn't appear in the
# filterspec's nodeid list, skip it
match = nv in v
elif t == MULTILINK:
# multilink - if any of the nodeids required by the
# filterspec aren't in this node's property, then skip
# it
nv = node.get(k, [])
# check for matching the absence of multilink values
if not v:
match = not nv
else:
# othewise, make sure this node has each of the
# required values
for want in v:
if want in nv:
match = 1
break
elif t == STRING:
if nv is None:
nv = ''
# RE search
match = v.search(nv)
elif t == DATE or t == INTERVAL:
if nv is None:
match = v is None
else:
if v.to_value:
if v.from_value <= nv and v.to_value >= nv:
match = 1
else:
if v.from_value <= nv:
match = 1
elif t == OTHER:
# straight value comparison for the other types
match = nv in v
if not match:
break
else:
matches.append([nodeid, node])
# filter based on full text search
if search_matches is not None:
k = []
for v in matches:
if v[0] in search_matches:
k.append(v)
matches = k
# add sorting information to the proptree
JPROPS = {'actor':1, 'activity':1, 'creator':1, 'creation':1}
children = []
if proptree:
children = proptree.sortable_children()
for pt in children:
dir = pt.sort_direction
prop = pt.name
assert (dir and prop)
propclass = props[prop]
pt.sort_ids = []
is_pointer = isinstance(propclass,(hyperdb.Link,
hyperdb.Multilink))
if not is_pointer:
pt.sort_result = []
try:
# cache the opened link class db, if needed.
lcldb = None
# cache the linked class items too
lcache = {}
for entry in matches:
itemid = entry[-2]
item = entry[-1]
# handle the properties that might be "faked"
# also, handle possible missing properties
try:
v = item[prop]
except KeyError:
if JPROPS.has_key(prop):
# force lookup of the special journal prop
v = self.get(itemid, prop)
else:
# the node doesn't have a value for this
# property
v = None
if isinstance(propclass, hyperdb.Multilink):
v = []
if prop == 'id':
v = int (itemid)
pt.sort_ids.append(v)
if not is_pointer:
pt.sort_result.append(v)
continue
# missing (None) values are always sorted first
if v is None:
pt.sort_ids.append(v)
if not is_pointer:
pt.sort_result.append(v)
continue
if isinstance(propclass, hyperdb.Link):
lcn = propclass.classname
link = self.db.classes[lcn]
key = link.orderprop()
child = pt.propdict[key]
if key!='id':
if not lcache.has_key(v):
# open the link class db if it's not already
if lcldb is None:
lcldb = self.db.getclassdb(lcn)
lcache[v] = self.db.getnode(lcn, v, lcldb)
r = lcache[v][key]
child.propdict[key].sort_ids.append(r)
else:
child.propdict[key].sort_ids.append(v)
pt.sort_ids.append(v)
if not is_pointer:
r = propclass.sort_repr(pt.parent.cls, v, pt.name)
pt.sort_result.append(r)
finally:
# if we opened the link class db, close it now
if lcldb is not None:
lcldb.close()
del lcache
finally:
cldb.close()
# pull the id out of the individual entries
matches = [entry[-2] for entry in matches]
if __debug__:
self.db.stats['filtering'] += (time.time() - start_t)
return matches
def count(self):
"""Get the number of nodes in this class.
If the returned integer is 'numnodes', the ids of all the nodes
in this class run from 1 to numnodes, and numnodes+1 will be the
id of the next node to be created in this class.
"""
return self.db.countnodes(self.classname)
# Manipulating properties:
def getprops(self, protected=1):
"""Return a dictionary mapping property names to property objects.
If the "protected" flag is true, we include protected properties -
those which may not be modified.
In addition to the actual properties on the node, these
methods provide the "creation" and "activity" properties. If the
"protected" flag is true, we include protected properties - those
which may not be modified.
"""
d = self.properties.copy()
if protected:
d['id'] = hyperdb.String()
d['creation'] = hyperdb.Date()
d['activity'] = hyperdb.Date()
d['creator'] = hyperdb.Link('user')
d['actor'] = hyperdb.Link('user')
return d
def addprop(self, **properties):
"""Add properties to this class.
The keyword arguments in 'properties' must map names to property
objects, or a TypeError is raised. None of the keys in 'properties'
may collide with the names of existing properties, or a ValueError
is raised before any properties have been added.
"""
for key in properties.keys():
if self.properties.has_key(key):
raise ValueError, key
self.properties.update(properties)
def index(self, nodeid):
""" Add (or refresh) the node to search indexes """
# find all the String properties that have indexme
for prop, propclass in self.getprops().items():
if isinstance(propclass, hyperdb.String) and propclass.indexme:
# index them under (classname, nodeid, property)
try:
value = str(self.get(nodeid, prop))
except IndexError:
# node has been destroyed
continue
self.db.indexer.add_text((self.classname, nodeid, prop), value)
#
# import / export support
#
def export_list(self, propnames, nodeid):
""" Export a node - generate a list of CSV-able data in the order
specified by propnames for the given node.
"""
properties = self.getprops()
l = []
for prop in propnames:
proptype = properties[prop]
value = self.get(nodeid, prop)
# "marshal" data where needed
if value is None:
pass
elif isinstance(proptype, hyperdb.Date):
value = value.get_tuple()
elif isinstance(proptype, hyperdb.Interval):
value = value.get_tuple()
elif isinstance(proptype, hyperdb.Password):
value = str(value)
l.append(repr(value))
# append retired flag
l.append(repr(self.is_retired(nodeid)))
return l
def import_list(self, propnames, proplist):
""" Import a node - all information including "id" is present and
should not be sanity checked. Triggers are not triggered. The
journal should be initialised using the "creator" and "created"
information.
Return the nodeid of the node imported.
"""
if self.db.journaltag is None:
raise hyperdb.DatabaseError, _('Database open read-only')
properties = self.getprops()
# make the new node's property map
d = {}
newid = None
for i in range(len(propnames)):
# Figure the property for this column
propname = propnames[i]
# Use eval to reverse the repr() used to output the CSV
value = eval(proplist[i])
# "unmarshal" where necessary
if propname == 'id':
newid = value
continue
elif propname == 'is retired':
# is the item retired?
if int(value):
d[self.db.RETIRED_FLAG] = 1
continue
elif value is None:
d[propname] = None
continue
prop = properties[propname]
if isinstance(prop, hyperdb.Date):
value = date.Date(value)
elif isinstance(prop, hyperdb.Interval):
value = date.Interval(value)
elif isinstance(prop, hyperdb.Password):
pwd = password.Password()
pwd.unpack(value)
value = pwd
d[propname] = value
# get a new id if necessary
if newid is None:
newid = self.db.newid(self.classname)
# add the node and journal
self.db.addnode(self.classname, newid, d)
return newid
def export_journals(self):
"""Export a class's journal - generate a list of lists of
CSV-able data:
nodeid, date, user, action, params
No heading here - the columns are fixed.
"""
properties = self.getprops()
r = []
for nodeid in self.getnodeids():
for nodeid, date, user, action, params in self.history(nodeid):
date = date.get_tuple()
if action == 'set':
export_data = {}
for propname, value in params.items():
if not properties.has_key(propname):
# property no longer in the schema
continue
prop = properties[propname]
# make sure the params are eval()'able
if value is None:
pass
elif isinstance(prop, hyperdb.Date):
# this is a hack - some dates are stored as strings
if not isinstance(value, type('')):
value = value.get_tuple()
elif isinstance(prop, hyperdb.Interval):
# hack too - some intervals are stored as strings
if not isinstance(value, type('')):
value = value.get_tuple()
elif isinstance(prop, hyperdb.Password):
value = str(value)
export_data[propname] = value
params = export_data
l = [nodeid, date, user, action, params]
r.append(map(repr, l))
return r
def import_journals(self, entries):
"""Import a class's journal.
Uses setjournal() to set the journal for each item."""
properties = self.getprops()
d = {}
for l in entries:
l = map(eval, l)
nodeid, jdate, user, action, params = l
r = d.setdefault(nodeid, [])
if action == 'set':
for propname, value in params.items():
prop = properties[propname]
if value is None:
pass
elif isinstance(prop, hyperdb.Date):
if type(value) == type(()):
print _('WARNING: invalid date tuple %r')%(value,)
value = date.Date( "2000-1-1" )
value = date.Date(value)
elif isinstance(prop, hyperdb.Interval):
value = date.Interval(value)
elif isinstance(prop, hyperdb.Password):
pwd = password.Password()
pwd.unpack(value)
value = pwd
params[propname] = value
r.append((nodeid, date.Date(jdate), user, action, params))
for nodeid, l in d.items():
self.db.setjournal(self.classname, nodeid, l)
class FileClass(hyperdb.FileClass, Class):
"""This class defines a large chunk of data. To support this, it has a
mandatory String property "content" which is typically saved off
externally to the hyperdb.
The default MIME type of this data is defined by the
"default_mime_type" class attribute, which may be overridden by each
node if the class defines a "type" String property.
"""
def __init__(self, db, classname, **properties):
"""The newly-created class automatically includes the "content"
and "type" properties.
"""
if not properties.has_key('content'):
properties['content'] = hyperdb.String(indexme='yes')
if not properties.has_key('type'):
properties['type'] = hyperdb.String()
Class.__init__(self, db, classname, **properties)
def create(self, **propvalues):
""" Snarf the "content" propvalue and store in a file
"""
# we need to fire the auditors now, or the content property won't
# be in propvalues for the auditors to play with
self.fireAuditors('create', None, propvalues)
# now remove the content property so it's not stored in the db
content = propvalues['content']
del propvalues['content']
# make sure we have a MIME type
mime_type = propvalues.get('type', self.default_mime_type)
# do the database create
newid = self.create_inner(**propvalues)
# store off the content as a file
self.db.storefile(self.classname, newid, None, content)
# fire reactors
self.fireReactors('create', newid, None)
return newid
def get(self, nodeid, propname, default=_marker, cache=1):
""" Trap the content propname and get it from the file
'cache' exists for backwards compatibility, and is not used.
"""
poss_msg = 'Possibly an access right configuration problem.'
if propname == 'content':
try:
return self.db.getfile(self.classname, nodeid, None)
except IOError, (strerror):
# XXX by catching this we don't see an error in the log.
return 'ERROR reading file: %s%s\n%s\n%s'%(
self.classname, nodeid, poss_msg, strerror)
if default is not _marker:
return Class.get(self, nodeid, propname, default)
else:
return Class.get(self, nodeid, propname)
def set(self, itemid, **propvalues):
""" Snarf the "content" propvalue and update it in a file
"""
self.fireAuditors('set', itemid, propvalues)
# create the oldvalues dict - fill in any missing values
oldvalues = copy.deepcopy(self.db.getnode(self.classname, itemid))
for name,prop in self.getprops(protected=0).items():
if oldvalues.has_key(name):
continue
if isinstance(prop, hyperdb.Multilink):
oldvalues[name] = []
else:
oldvalues[name] = None
# now remove the content property so it's not stored in the db
content = None
if propvalues.has_key('content'):
content = propvalues['content']
del propvalues['content']
# do the database update
propvalues = self.set_inner(itemid, **propvalues)
# do content?
if content:
# store and possibly index
self.db.storefile(self.classname, itemid, None, content)
if self.properties['content'].indexme:
mime_type = self.get(itemid, 'type', self.default_mime_type)
self.db.indexer.add_text((self.classname, itemid, 'content'),
content, mime_type)
propvalues['content'] = content
# fire reactors
self.fireReactors('set', itemid, oldvalues)
return propvalues
def index(self, nodeid):
""" Add (or refresh) the node to search indexes.
Use the content-type property for the content property.
"""
# find all the String properties that have indexme
for prop, propclass in self.getprops().items():
if prop == 'content' and propclass.indexme:
mime_type = self.get(nodeid, 'type', self.default_mime_type)
self.db.indexer.add_text((self.classname, nodeid, 'content'),
str(self.get(nodeid, 'content')), mime_type)
elif isinstance(propclass, hyperdb.String) and propclass.indexme:
# index them under (classname, nodeid, property)
try:
value = str(self.get(nodeid, prop))
except IndexError:
# node has been destroyed
continue
self.db.indexer.add_text((self.classname, nodeid, prop), value)
# deviation from spec - was called ItemClass
class IssueClass(Class, roundupdb.IssueClass):
# Overridden methods:
def __init__(self, db, classname, **properties):
"""The newly-created class automatically includes the "messages",
"files", "nosy", and "superseder" properties. If the 'properties'
dictionary attempts to specify any of these properties or a
"creation" or "activity" property, a ValueError is raised.
"""
if not properties.has_key('title'):
properties['title'] = hyperdb.String(indexme='yes')
if not properties.has_key('messages'):
properties['messages'] = hyperdb.Multilink("msg")
if not properties.has_key('files'):
properties['files'] = hyperdb.Multilink("file")
if not properties.has_key('nosy'):
# note: journalling is turned off as it really just wastes
# space. this behaviour may be overridden in an instance
properties['nosy'] = hyperdb.Multilink("user", do_journal="no")
if not properties.has_key('superseder'):
properties['superseder'] = hyperdb.Multilink(classname)
Class.__init__(self, db, classname, **properties)
# vim: set et sts=4 sw=4 :
|
[
"[email protected]"
] | |
3f082db88de075c9ca3e973e5512ab9093ea3e0c
|
c317f6a390de255540c2fb6a2e637c20bec03762
|
/final/rev-kyrkkodning/obfuscate.py
|
5bb035cc6b5f9c3b84c76c559e079fe4659e3869
|
[] |
no_license
|
Kodsport/sakerhetssm-2021-solutions
|
a7329ef22862bcfc4c970d43ac210bbe951cf3a8
|
85bc2aa619d55139acf7c91483259088329c15e2
|
refs/heads/master
| 2023-05-12T00:54:24.546337 | 2021-06-07T14:12:32 | 2021-06-07T14:12:32 | 353,975,490 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 290 |
py
|
from pathlib import Path
import re
source = Path("challeasy.py").read_text()
names = [row.split(" = ")[0] for row in source.split("\n") if " = " in row]
for i,name in enumerate(names):
source = re.sub("\\b"+name+"\\b",chr(ord("A")+i),source)
Path("challhard.py").write_text(source)
|
[
"[email protected]"
] | |
1fbf502a09cc2ddd28c1e76023f44e6a52f422d4
|
65dce36be9eb2078def7434455bdb41e4fc37394
|
/Two Sum - Closest to target.py
|
83946cbedd32144499f639c865b05e651eadea0f
|
[] |
no_license
|
EvianTan/Lintcode-Leetcode
|
9cf2d2f6a85c0a494382b9c347bcdb4ee0b5d21a
|
d12dd31e98c2bf24acc20c5634adfa950e68bd97
|
refs/heads/master
| 2021-01-22T08:13:55.758825 | 2017-10-20T21:46:23 | 2017-10-20T21:46:23 | 92,607,185 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,070 |
py
|
'''
Given an array nums of n integers, find two integers in nums such that the sum is closest to a given number, target.
Return the difference between the sum of the two integers and the target.
Have you met this question in a real interview? Yes
Example
Given array nums = [-1, 2, 1, -4], and target = 4.
The minimum difference is 1. (4 - (2 + 1) = 1).
Challenge
Do it in O(nlogn) time complexity.
'''
class Solution:
"""
@param: nums: an integer array
@param: target: An integer
@return: the difference between the sum and the target
"""
import sys
def twoSumClosest(self, nums, target):
# write your code here
nums.sort()
i = 0
j = len(nums)-1
diff = sys.maxint
while i < j:
if nums[i]+nums[j] < target:
diff = min(diff, target-nums[i]-nums[j])
i += 1
elif nums[i]+nums[j] > target:
diff = min(diff, nums[i] + nums[j] - target)
j -= 1
else:
return 0
return diff
|
[
"[email protected]"
] | |
3d37ccbbc8f7b579d69b9a4f8e074fbabb34d92d
|
26bd175ffb3bd204db5bcb70eec2e3dfd55fbe9f
|
/exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/net_tools/test_hetzner_firewall.py
|
439ac8d08f12a6e166045ed0235f1abe7c8ded57
|
[
"GPL-3.0-only",
"MIT",
"CC0-1.0",
"GPL-1.0-or-later"
] |
permissive
|
tr3ck3r/linklight
|
37814ed19173d893cdff161355d70a1cf538239b
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
refs/heads/master
| 2021-04-11T04:33:02.727318 | 2020-03-25T17:38:41 | 2020-03-25T17:38:41 | 248,992,437 | 0 | 0 |
MIT
| 2020-03-21T14:26:25 | 2020-03-21T14:26:25 | null |
UTF-8
|
Python
| false | false | 47,090 |
py
|
# (c) 2019 Felix Fontein <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
from ansible_collections.community.general.plugins.module_utils.hetzner import BASE_URL
from ansible_collections.community.general.plugins.modules.net_tools import hetzner_firewall
# ##########################################################
# ## General test framework
import json
from mock import MagicMock
from ansible_collections.community.general.tests.unit.modules.utils import set_module_args
from ansible.module_utils.six.moves.urllib.parse import parse_qs
class FetchUrlCall:
def __init__(self, method, status):
assert method == method.upper(), \
'HTTP method names are case-sensitive and should be upper-case (RFCs 7230 and 7231)'
self.method = method
self.status = status
self.body = None
self.headers = {}
self.error_data = {}
self.expected_url = None
self.expected_headers = {}
self.form_parse = False
self.form_present = set()
self.form_values = {}
self.form_values_one = {}
def result(self, body):
self.body = body
assert self.error_data.get('body') is None, 'Error body must not be given'
return self
def result_str(self, str_body):
return self.result(str_body.encode('utf-8'))
def result_json(self, json_body):
return self.result(json.dumps(json_body).encode('utf-8'))
def result_error(self, msg, body=None):
self.error_data['msg'] = msg
if body is not None:
self.error_data['body'] = body
assert self.body is None, 'Result must not be given if error body is provided'
return self
def expect_url(self, url):
self.expected_url = url
return self
def return_header(self, name, value):
assert value is not None
self.headers[name] = value
return self
def expect_header(self, name, value):
self.expected_headers[name] = value
return self
def expect_header_unset(self, name):
self.expected_headers[name] = None
return self
def expect_form_present(self, key):
self.form_parse = True
self.form_present.append(key)
return self
def expect_form_value(self, key, value):
self.form_parse = True
self.form_values[key] = [value]
return self
def expect_form_value_absent(self, key):
self.form_parse = True
self.form_values[key] = []
return self
def expect_form_value_one_of(self, key, value):
self.form_parse = True
if key not in self.form_values_subset:
self.form_values_subset[key] = set()
self.form_values_subset[key].add(value)
return self
class FetchUrlProxy:
def __init__(self, calls):
self.calls = calls
self.index = 0
def _validate_form(self, call, data):
form = {}
if data is not None:
form = parse_qs(data, keep_blank_values=True)
for k in call.form_present:
assert k in form
for k, v in call.form_values.items():
if len(v) == 0:
assert k not in form
else:
assert form[k] == v
for k, v in call.form_values_one.items():
assert v <= set(form[k])
def _validate_headers(self, call, headers):
given_headers = {}
if headers is not None:
for k, v in headers.items():
given_headers[k.lower()] = v
for k, v in call.expected_headers:
if v is None:
assert k.lower() not in given_headers, \
'Header "{0}" specified for fetch_url call, but should not be'.format(k)
else:
assert given_headers.get(k.lower()) == v, \
'Header "{0}" specified for fetch_url call, but with wrong value'.format(k)
def __call__(self, module, url, data=None, headers=None, method=None,
use_proxy=True, force=False, last_mod_time=None, timeout=10,
use_gssapi=False, unix_socket=None, ca_path=None, cookies=None):
assert self.index < len(self.calls), 'Got more fetch_url calls than expected'
call = self.calls[self.index]
self.index += 1
# Validate call
assert method == call.method
if call.expected_url is not None:
assert url == call.expected_url, \
'Exepected URL does not match for fetch_url call'
if call.expected_headers:
self._validate_headers(call, headers)
if call.form_parse:
self._validate_form(call, data)
# Compose result
info = dict(status=call.status)
for k, v in call.headers.items():
info[k.lower()] = v
info.update(call.error_data)
res = object()
if call.body is not None:
res = MagicMock()
res.read = MagicMock(return_value=call.body)
return (res, info)
def assert_is_done(self):
assert self.index == len(self.calls), 'Got less fetch_url calls than expected'
class ModuleExitException(Exception):
def __init__(self, kwargs):
self.kwargs = kwargs
class ModuleFailException(Exception):
def __init__(self, kwargs):
self.kwargs = kwargs
def run_module(mocker, module, arguments, fetch_url):
def exit_json(module, **kwargs):
module._return_formatted(kwargs)
raise ModuleExitException(kwargs)
def fail_json(module, **kwargs):
module._return_formatted(kwargs)
raise ModuleFailException(kwargs)
mocker.patch('ansible_collections.community.general.plugins.module_utils.hetzner.fetch_url', fetch_url)
mocker.patch('ansible_collections.community.general.plugins.module_utils.hetzner.time.sleep', lambda duration: None)
mocker.patch('ansible_collections.community.general.plugins.modules.net_tools.hetzner_firewall.AnsibleModule.exit_json', exit_json)
mocker.patch('ansible_collections.community.general.plugins.modules.net_tools.hetzner_firewall.AnsibleModule.fail_json', fail_json)
set_module_args(arguments)
module.main()
def run_module_success(mocker, module, arguments, fetch_url_calls):
fetch_url = FetchUrlProxy(fetch_url_calls or [])
with pytest.raises(ModuleExitException) as e:
run_module(mocker, module, arguments, fetch_url)
fetch_url.assert_is_done()
return e.value.kwargs
def run_module_failed(mocker, module, arguments, fetch_url_calls):
fetch_url = FetchUrlProxy(fetch_url_calls or [])
with pytest.raises(ModuleFailException) as e:
run_module(mocker, module, arguments, fetch_url)
fetch_url.assert_is_done()
return e.value.kwargs
# ##########################################################
# ## Hetzner firewall tests
# Tests for state (absent and present)
def test_absent_idempotency(mocker):
result = run_module_success(mocker, hetzner_firewall, {
'hetzner_user': '',
'hetzner_password': '',
'server_ip': '1.2.3.4',
'state': 'absent',
}, [
FetchUrlCall('GET', 200)
.result_json({
'firewall': {
'server_ip': '1.2.3.4',
'server_number': 1,
'status': 'disabled',
'whitelist_hos': False,
'port': 'main',
'rules': {
'input': [],
},
},
})
.expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
])
assert result['changed'] is False
assert result['diff']['before']['status'] == 'disabled'
assert result['diff']['after']['status'] == 'disabled'
assert result['firewall']['status'] == 'disabled'
assert result['firewall']['server_ip'] == '1.2.3.4'
assert result['firewall']['server_number'] == 1
def test_absent_changed(mocker):
result = run_module_success(mocker, hetzner_firewall, {
'hetzner_user': '',
'hetzner_password': '',
'server_ip': '1.2.3.4',
'state': 'absent',
}, [
FetchUrlCall('GET', 200)
.result_json({
'firewall': {
'server_ip': '1.2.3.4',
'server_number': 1,
'status': 'active',
'whitelist_hos': True,
'port': 'main',
'rules': {
'input': [],
},
},
})
.expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
FetchUrlCall('POST', 200)
.result_json({
'firewall': {
'server_ip': '1.2.3.4',
'server_number': 1,
'status': 'disabled',
'whitelist_hos': False,
'port': 'main',
'rules': {
'input': [],
},
},
})
.expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL))
.expect_form_value('status', 'disabled'),
])
assert result['changed'] is True
assert result['diff']['before']['status'] == 'active'
assert result['diff']['after']['status'] == 'disabled'
assert result['firewall']['status'] == 'disabled'
assert result['firewall']['server_ip'] == '1.2.3.4'
assert result['firewall']['server_number'] == 1
def test_present_idempotency(mocker):
result = run_module_success(mocker, hetzner_firewall, {
'hetzner_user': '',
'hetzner_password': '',
'server_ip': '1.2.3.4',
'state': 'present',
}, [
FetchUrlCall('GET', 200)
.result_json({
'firewall': {
'server_ip': '1.2.3.4',
'server_number': 1,
'status': 'active',
'whitelist_hos': False,
'port': 'main',
'rules': {
'input': [],
},
},
})
.expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
])
assert result['changed'] is False
assert result['diff']['before']['status'] == 'active'
assert result['diff']['after']['status'] == 'active'
assert result['firewall']['status'] == 'active'
assert result['firewall']['server_ip'] == '1.2.3.4'
assert result['firewall']['server_number'] == 1
def test_present_changed(mocker):
result = run_module_success(mocker, hetzner_firewall, {
'hetzner_user': '',
'hetzner_password': '',
'server_ip': '1.2.3.4',
'state': 'present',
}, [
FetchUrlCall('GET', 200)
.result_json({
'firewall': {
'server_ip': '1.2.3.4',
'server_number': 1,
'status': 'disabled',
'whitelist_hos': True,
'port': 'main',
'rules': {
'input': [],
},
},
})
.expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
FetchUrlCall('POST', 200)
.result_json({
'firewall': {
'server_ip': '1.2.3.4',
'server_number': 1,
'status': 'active',
'whitelist_hos': False,
'port': 'main',
'rules': {
'input': [],
},
},
})
.expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL))
.expect_form_value('status', 'active'),
])
assert result['changed'] is True
assert result['diff']['before']['status'] == 'disabled'
assert result['diff']['after']['status'] == 'active'
assert result['firewall']['status'] == 'active'
assert result['firewall']['server_ip'] == '1.2.3.4'
assert result['firewall']['server_number'] == 1
# Tests for state (absent and present) with check mode
def test_absent_idempotency_check(mocker):
result = run_module_success(mocker, hetzner_firewall, {
'hetzner_user': '',
'hetzner_password': '',
'server_ip': '1.2.3.4',
'state': 'absent',
'_ansible_check_mode': True,
}, [
FetchUrlCall('GET', 200)
.result_json({
'firewall': {
'server_ip': '1.2.3.4',
'server_number': 1,
'status': 'disabled',
'whitelist_hos': False,
'port': 'main',
'rules': {
'input': [],
},
},
})
.expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
])
assert result['changed'] is False
assert result['diff']['before']['status'] == 'disabled'
assert result['diff']['after']['status'] == 'disabled'
assert result['firewall']['status'] == 'disabled'
assert result['firewall']['server_ip'] == '1.2.3.4'
assert result['firewall']['server_number'] == 1
def test_absent_changed_check(mocker):
result = run_module_success(mocker, hetzner_firewall, {
'hetzner_user': '',
'hetzner_password': '',
'server_ip': '1.2.3.4',
'state': 'absent',
'_ansible_check_mode': True,
}, [
FetchUrlCall('GET', 200)
.result_json({
'firewall': {
'server_ip': '1.2.3.4',
'server_number': 1,
'status': 'active',
'whitelist_hos': True,
'port': 'main',
'rules': {
'input': [],
},
},
})
.expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
])
assert result['changed'] is True
assert result['diff']['before']['status'] == 'active'
assert result['diff']['after']['status'] == 'disabled'
assert result['firewall']['status'] == 'disabled'
assert result['firewall']['server_ip'] == '1.2.3.4'
assert result['firewall']['server_number'] == 1
def test_present_idempotency_check(mocker):
result = run_module_success(mocker, hetzner_firewall, {
'hetzner_user': '',
'hetzner_password': '',
'server_ip': '1.2.3.4',
'state': 'present',
'_ansible_check_mode': True,
}, [
FetchUrlCall('GET', 200)
.result_json({
'firewall': {
'server_ip': '1.2.3.4',
'server_number': 1,
'status': 'active',
'whitelist_hos': False,
'port': 'main',
'rules': {
'input': [],
},
},
})
.expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
])
assert result['changed'] is False
assert result['diff']['before']['status'] == 'active'
assert result['diff']['after']['status'] == 'active'
assert result['firewall']['status'] == 'active'
assert result['firewall']['server_ip'] == '1.2.3.4'
assert result['firewall']['server_number'] == 1
def test_present_changed_check(mocker):
result = run_module_success(mocker, hetzner_firewall, {
'hetzner_user': '',
'hetzner_password': '',
'server_ip': '1.2.3.4',
'state': 'present',
'_ansible_check_mode': True,
}, [
FetchUrlCall('GET', 200)
.result_json({
'firewall': {
'server_ip': '1.2.3.4',
'server_number': 1,
'status': 'disabled',
'whitelist_hos': True,
'port': 'main',
'rules': {
'input': [],
},
},
})
.expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
])
assert result['changed'] is True
assert result['diff']['before']['status'] == 'disabled'
assert result['diff']['after']['status'] == 'active'
assert result['firewall']['status'] == 'active'
assert result['firewall']['server_ip'] == '1.2.3.4'
assert result['firewall']['server_number'] == 1
# Tests for port
def test_port_idempotency(mocker):
result = run_module_success(mocker, hetzner_firewall, {
'hetzner_user': '',
'hetzner_password': '',
'server_ip': '1.2.3.4',
'state': 'present',
'port': 'main',
}, [
FetchUrlCall('GET', 200)
.result_json({
'firewall': {
'server_ip': '1.2.3.4',
'server_number': 1,
'status': 'active',
'whitelist_hos': False,
'port': 'main',
'rules': {
'input': [],
},
},
})
.expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
])
assert result['changed'] is False
assert result['diff']['before']['port'] == 'main'
assert result['diff']['after']['port'] == 'main'
assert result['firewall']['status'] == 'active'
assert result['firewall']['server_ip'] == '1.2.3.4'
assert result['firewall']['server_number'] == 1
assert result['firewall']['port'] == 'main'
def test_port_changed(mocker):
result = run_module_success(mocker, hetzner_firewall, {
'hetzner_user': '',
'hetzner_password': '',
'server_ip': '1.2.3.4',
'state': 'present',
'port': 'main',
}, [
FetchUrlCall('GET', 200)
.result_json({
'firewall': {
'server_ip': '1.2.3.4',
'server_number': 1,
'status': 'disabled',
'whitelist_hos': True,
'port': 'kvm',
'rules': {
'input': [],
},
},
})
.expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
FetchUrlCall('POST', 200)
.result_json({
'firewall': {
'server_ip': '1.2.3.4',
'server_number': 1,
'status': 'active',
'whitelist_hos': False,
'port': 'main',
'rules': {
'input': [],
},
},
})
.expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL))
.expect_form_value('port', 'main'),
])
assert result['changed'] is True
assert result['diff']['before']['port'] == 'kvm'
assert result['diff']['after']['port'] == 'main'
assert result['firewall']['status'] == 'active'
assert result['firewall']['server_ip'] == '1.2.3.4'
assert result['firewall']['server_number'] == 1
assert result['firewall']['port'] == 'main'
# Tests for whitelist_hos
def test_whitelist_hos_idempotency(mocker):
result = run_module_success(mocker, hetzner_firewall, {
'hetzner_user': '',
'hetzner_password': '',
'server_ip': '1.2.3.4',
'state': 'present',
'whitelist_hos': True,
}, [
FetchUrlCall('GET', 200)
.result_json({
'firewall': {
'server_ip': '1.2.3.4',
'server_number': 1,
'status': 'active',
'whitelist_hos': True,
'port': 'main',
'rules': {
'input': [],
},
},
})
.expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
])
assert result['changed'] is False
assert result['diff']['before']['whitelist_hos'] is True
assert result['diff']['after']['whitelist_hos'] is True
assert result['firewall']['status'] == 'active'
assert result['firewall']['server_ip'] == '1.2.3.4'
assert result['firewall']['server_number'] == 1
assert result['firewall']['whitelist_hos'] is True
def test_whitelist_hos_changed(mocker):
result = run_module_success(mocker, hetzner_firewall, {
'hetzner_user': '',
'hetzner_password': '',
'server_ip': '1.2.3.4',
'state': 'present',
'whitelist_hos': True,
}, [
FetchUrlCall('GET', 200)
.result_json({
'firewall': {
'server_ip': '1.2.3.4',
'server_number': 1,
'status': 'disabled',
'whitelist_hos': False,
'port': 'main',
'rules': {
'input': [],
},
},
})
.expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
FetchUrlCall('POST', 200)
.result_json({
'firewall': {
'server_ip': '1.2.3.4',
'server_number': 1,
'status': 'active',
'whitelist_hos': True,
'port': 'main',
'rules': {
'input': [],
},
},
})
.expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL))
.expect_form_value('whitelist_hos', 'true'),
])
assert result['changed'] is True
assert result['diff']['before']['whitelist_hos'] is False
assert result['diff']['after']['whitelist_hos'] is True
assert result['firewall']['status'] == 'active'
assert result['firewall']['server_ip'] == '1.2.3.4'
assert result['firewall']['server_number'] == 1
assert result['firewall']['whitelist_hos'] is True
# Tests for wait_for_configured in getting status
def test_wait_get(mocker):
result = run_module_success(mocker, hetzner_firewall, {
'hetzner_user': '',
'hetzner_password': '',
'server_ip': '1.2.3.4',
'state': 'present',
'wait_for_configured': True,
}, [
FetchUrlCall('GET', 200)
.result_json({
'firewall': {
'server_ip': '1.2.3.4',
'server_number': 1,
'status': 'in process',
'whitelist_hos': False,
'port': 'main',
'rules': {
'input': [],
},
},
})
.expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
FetchUrlCall('GET', 200)
.result_json({
'firewall': {
'server_ip': '1.2.3.4',
'server_number': 1,
'status': 'active',
'whitelist_hos': False,
'port': 'main',
'rules': {
'input': [],
},
},
})
.expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
])
assert result['changed'] is False
assert result['diff']['before']['status'] == 'active'
assert result['diff']['after']['status'] == 'active'
assert result['firewall']['status'] == 'active'
assert result['firewall']['server_ip'] == '1.2.3.4'
assert result['firewall']['server_number'] == 1
def test_wait_get_timeout(mocker):
result = run_module_failed(mocker, hetzner_firewall, {
'hetzner_user': '',
'hetzner_password': '',
'server_ip': '1.2.3.4',
'state': 'present',
'wait_for_configured': True,
'timeout': 0,
}, [
FetchUrlCall('GET', 200)
.result_json({
'firewall': {
'server_ip': '1.2.3.4',
'server_number': 1,
'status': 'in process',
'whitelist_hos': False,
'port': 'main',
'rules': {
'input': [],
},
},
})
.expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
FetchUrlCall('GET', 200)
.result_json({
'firewall': {
'server_ip': '1.2.3.4',
'server_number': 1,
'status': 'in process',
'whitelist_hos': False,
'port': 'main',
'rules': {
'input': [],
},
},
})
.expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
])
assert result['msg'] == 'Timeout while waiting for firewall to be configured.'
def test_nowait_get(mocker):
result = run_module_failed(mocker, hetzner_firewall, {
'hetzner_user': '',
'hetzner_password': '',
'server_ip': '1.2.3.4',
'state': 'present',
'wait_for_configured': False,
}, [
FetchUrlCall('GET', 200)
.result_json({
'firewall': {
'server_ip': '1.2.3.4',
'server_number': 1,
'status': 'in process',
'whitelist_hos': False,
'port': 'main',
'rules': {
'input': [],
},
},
})
.expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
])
assert result['msg'] == 'Firewall configuration cannot be read as it is not configured.'
# Tests for wait_for_configured in setting status
def test_wait_update(mocker):
result = run_module_success(mocker, hetzner_firewall, {
'hetzner_user': '',
'hetzner_password': '',
'server_ip': '1.2.3.4',
'wait_for_configured': True,
'state': 'present',
}, [
FetchUrlCall('GET', 200)
.result_json({
'firewall': {
'server_ip': '1.2.3.4',
'server_number': 1,
'status': 'disabled',
'whitelist_hos': False,
'port': 'main',
'rules': {
'input': [],
},
},
})
.expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
FetchUrlCall('POST', 200)
.result_json({
'firewall': {
'server_ip': '1.2.3.4',
'server_number': 1,
'status': 'in process',
'whitelist_hos': False,
'port': 'main',
'rules': {
'input': [],
},
},
})
.expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
FetchUrlCall('GET', 200)
.result_json({
'firewall': {
'server_ip': '1.2.3.4',
'server_number': 1,
'status': 'active',
'whitelist_hos': False,
'port': 'main',
'rules': {
'input': [],
},
},
})
.expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
])
assert result['changed'] is True
assert result['diff']['before']['status'] == 'disabled'
assert result['diff']['after']['status'] == 'active'
assert result['firewall']['status'] == 'active'
assert result['firewall']['server_ip'] == '1.2.3.4'
assert result['firewall']['server_number'] == 1
def test_wait_update_timeout(mocker):
result = run_module_success(mocker, hetzner_firewall, {
'hetzner_user': '',
'hetzner_password': '',
'server_ip': '1.2.3.4',
'state': 'present',
'wait_for_configured': True,
'timeout': 0,
}, [
FetchUrlCall('GET', 200)
.result_json({
'firewall': {
'server_ip': '1.2.3.4',
'server_number': 1,
'status': 'disabled',
'whitelist_hos': False,
'port': 'main',
'rules': {
'input': [],
},
},
})
.expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
FetchUrlCall('POST', 200)
.result_json({
'firewall': {
'server_ip': '1.2.3.4',
'server_number': 1,
'status': 'in process',
'whitelist_hos': False,
'port': 'main',
'rules': {
'input': [],
},
},
})
.expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
FetchUrlCall('GET', 200)
.result_json({
'firewall': {
'server_ip': '1.2.3.4',
'server_number': 1,
'status': 'in process',
'whitelist_hos': False,
'port': 'main',
'rules': {
'input': [],
},
},
})
.expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
])
assert result['changed'] is True
assert result['diff']['before']['status'] == 'disabled'
assert result['diff']['after']['status'] == 'active'
assert result['firewall']['status'] == 'in process'
assert result['firewall']['server_ip'] == '1.2.3.4'
assert result['firewall']['server_number'] == 1
assert 'Timeout while waiting for firewall to be configured.' in result['warnings']
def test_nowait_update(mocker):
result = run_module_success(mocker, hetzner_firewall, {
'hetzner_user': '',
'hetzner_password': '',
'server_ip': '1.2.3.4',
'state': 'present',
'wait_for_configured': False,
}, [
FetchUrlCall('GET', 200)
.result_json({
'firewall': {
'server_ip': '1.2.3.4',
'server_number': 1,
'status': 'disabled',
'whitelist_hos': False,
'port': 'main',
'rules': {
'input': [],
},
},
})
.expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
FetchUrlCall('POST', 200)
.result_json({
'firewall': {
'server_ip': '1.2.3.4',
'server_number': 1,
'status': 'in process',
'whitelist_hos': False,
'port': 'main',
'rules': {
'input': [],
},
},
})
.expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
])
assert result['changed'] is True
assert result['diff']['before']['status'] == 'disabled'
assert result['diff']['after']['status'] == 'active'
assert result['firewall']['status'] == 'in process'
assert result['firewall']['server_ip'] == '1.2.3.4'
assert result['firewall']['server_number'] == 1
# Idempotency checks: different amount of input rules
def test_input_rule_len_change_0_1(mocker):
result = run_module_success(mocker, hetzner_firewall, {
'hetzner_user': '',
'hetzner_password': '',
'server_ip': '1.2.3.4',
'state': 'present',
'rules': {
'input': [
{
'ip_version': 'ipv4',
'action': 'discard',
},
],
},
}, [
FetchUrlCall('GET', 200)
.result_json({
'firewall': {
'server_ip': '1.2.3.4',
'server_number': 1,
'status': 'active',
'whitelist_hos': True,
'port': 'main',
'rules': {
'input': [],
},
},
})
.expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
FetchUrlCall('POST', 200)
.result_json({
'firewall': {
'server_ip': '1.2.3.4',
'server_number': 1,
'status': 'active',
'whitelist_hos': False,
'port': 'main',
'rules': {
'input': [
{
'name': None,
'ip_version': 'ipv4',
'dst_ip': None,
'dst_port': None,
'src_ip': None,
'src_port': None,
'protocol': None,
'tcp_flags': None,
'action': 'discard',
},
],
},
},
})
.expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL))
.expect_form_value('status', 'active')
.expect_form_value_absent('rules[input][0][name]')
.expect_form_value('rules[input][0][ip_version]', 'ipv4')
.expect_form_value_absent('rules[input][0][dst_ip]')
.expect_form_value_absent('rules[input][0][dst_port]')
.expect_form_value_absent('rules[input][0][src_ip]')
.expect_form_value_absent('rules[input][0][src_port]')
.expect_form_value_absent('rules[input][0][protocol]')
.expect_form_value_absent('rules[input][0][tcp_flags]')
.expect_form_value('rules[input][0][action]', 'discard')
.expect_form_value_absent('rules[input][1][action]'),
])
assert result['changed'] is True
assert result['diff']['before']['status'] == 'active'
assert result['diff']['after']['status'] == 'active'
assert len(result['diff']['before']['rules']['input']) == 0
assert len(result['diff']['after']['rules']['input']) == 1
assert result['firewall']['status'] == 'active'
assert len(result['firewall']['rules']['input']) == 1
def test_input_rule_len_change_1_0(mocker):
result = run_module_success(mocker, hetzner_firewall, {
'hetzner_user': '',
'hetzner_password': '',
'server_ip': '1.2.3.4',
'state': 'present',
'rules': {
},
}, [
FetchUrlCall('GET', 200)
.result_json({
'firewall': {
'server_ip': '1.2.3.4',
'server_number': 1,
'status': 'active',
'whitelist_hos': True,
'port': 'main',
'rules': {
'input': [
{
'name': None,
'ip_version': 'ipv4',
'dst_ip': None,
'dst_port': None,
'src_ip': None,
'src_port': None,
'protocol': None,
'tcp_flags': None,
'action': 'discard',
},
],
},
},
})
.expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
FetchUrlCall('POST', 200)
.result_json({
'firewall': {
'server_ip': '1.2.3.4',
'server_number': 1,
'status': 'active',
'whitelist_hos': False,
'port': 'main',
'rules': {
'input': [],
},
},
})
.expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL))
.expect_form_value('status', 'active')
.expect_form_value_absent('rules[input][0][action]'),
])
assert result['changed'] is True
assert result['diff']['before']['status'] == 'active'
assert result['diff']['after']['status'] == 'active'
assert len(result['diff']['before']['rules']['input']) == 1
assert len(result['diff']['after']['rules']['input']) == 0
assert result['firewall']['status'] == 'active'
assert len(result['firewall']['rules']['input']) == 0
def test_input_rule_len_change_1_2(mocker):
result = run_module_success(mocker, hetzner_firewall, {
'hetzner_user': '',
'hetzner_password': '',
'server_ip': '1.2.3.4',
'state': 'present',
'rules': {
'input': [
{
'ip_version': 'ipv4',
'dst_port': 80,
'protocol': 'tcp',
'action': 'accept',
},
{
'ip_version': 'ipv4',
'action': 'discard',
},
],
},
}, [
FetchUrlCall('GET', 200)
.result_json({
'firewall': {
'server_ip': '1.2.3.4',
'server_number': 1,
'status': 'active',
'whitelist_hos': True,
'port': 'main',
'rules': {
'input': [
{
'name': None,
'ip_version': 'ipv4',
'dst_ip': None,
'dst_port': None,
'src_ip': None,
'src_port': None,
'protocol': None,
'tcp_flags': None,
'action': 'discard',
},
],
},
},
})
.expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
FetchUrlCall('POST', 200)
.result_json({
'firewall': {
'server_ip': '1.2.3.4',
'server_number': 1,
'status': 'active',
'whitelist_hos': False,
'port': 'main',
'rules': {
'input': [
{
'name': None,
'ip_version': 'ipv4',
'dst_ip': None,
'dst_port': '80',
'src_ip': None,
'src_port': None,
'protocol': 'tcp',
'tcp_flags': None,
'action': 'accept',
},
{
'name': None,
'ip_version': 'ipv4',
'dst_ip': None,
'dst_port': None,
'src_ip': None,
'src_port': None,
'protocol': None,
'tcp_flags': None,
'action': 'discard',
},
],
},
},
})
.expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL))
.expect_form_value('status', 'active')
.expect_form_value('rules[input][0][action]', 'accept')
.expect_form_value('rules[input][1][action]', 'discard')
.expect_form_value_absent('rules[input][2][action]'),
])
assert result['changed'] is True
assert result['diff']['before']['status'] == 'active'
assert result['diff']['after']['status'] == 'active'
assert len(result['diff']['before']['rules']['input']) == 1
assert len(result['diff']['after']['rules']['input']) == 2
assert result['firewall']['status'] == 'active'
assert len(result['firewall']['rules']['input']) == 2
# Idempotency checks: change one value
def create_params(parameter, *values):
assert len(values) > 1
result = []
for i in range(1, len(values)):
result.append((parameter, values[i - 1], values[i]))
return result
def flatten(list_of_lists):
result = []
for l in list_of_lists:
result.extend(l)
return result
@pytest.mark.parametrize("parameter, before, after", flatten([
create_params('name', None, '', 'Test', 'Test', 'foo', '', None),
create_params('ip_version', 'ipv4', 'ipv4', 'ipv6', 'ipv6'),
create_params('dst_ip', None, '1.2.3.4/24', '1.2.3.4/32', '1.2.3.4/32', None),
create_params('dst_port', None, '80', '80-443', '80-443', None),
create_params('src_ip', None, '1.2.3.4/24', '1.2.3.4/32', '1.2.3.4/32', None),
create_params('src_port', None, '80', '80-443', '80-443', None),
create_params('protocol', None, 'tcp', 'tcp', 'udp', 'udp', None),
create_params('tcp_flags', None, 'syn', 'syn|fin', 'syn|fin', 'syn&fin', '', None),
create_params('action', 'accept', 'accept', 'discard', 'discard'),
]))
def test_input_rule_value_change(mocker, parameter, before, after):
input_call = {
'ip_version': 'ipv4',
'action': 'discard',
}
input_before = {
'name': None,
'ip_version': 'ipv4',
'dst_ip': None,
'dst_port': None,
'src_ip': None,
'src_port': None,
'protocol': None,
'tcp_flags': None,
'action': 'discard',
}
input_after = {
'name': None,
'ip_version': 'ipv4',
'dst_ip': None,
'dst_port': None,
'src_ip': None,
'src_port': None,
'protocol': None,
'tcp_flags': None,
'action': 'discard',
}
if after is not None:
input_call[parameter] = after
input_before[parameter] = before
input_after[parameter] = after
calls = [
FetchUrlCall('GET', 200)
.result_json({
'firewall': {
'server_ip': '1.2.3.4',
'server_number': 1,
'status': 'active',
'whitelist_hos': True,
'port': 'main',
'rules': {
'input': [input_before],
},
},
})
.expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
]
changed = (before != after)
if changed:
after_call = (
FetchUrlCall('POST', 200)
.result_json({
'firewall': {
'server_ip': '1.2.3.4',
'server_number': 1,
'status': 'active',
'whitelist_hos': False,
'port': 'main',
'rules': {
'input': [input_after],
},
},
})
.expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL))
.expect_form_value('status', 'active')
.expect_form_value_absent('rules[input][1][action]')
)
if parameter != 'ip_version':
after_call.expect_form_value('rules[input][0][ip_version]', 'ipv4')
if parameter != 'action':
after_call.expect_form_value('rules[input][0][action]', 'discard')
if after is not None:
after_call.expect_form_value('rules[input][0][{0}]'.format(parameter), after)
else:
after_call.expect_form_value_absent('rules[input][0][{0}]'.format(parameter))
calls.append(after_call)
result = run_module_success(mocker, hetzner_firewall, {
'hetzner_user': '',
'hetzner_password': '',
'server_ip': '1.2.3.4',
'state': 'present',
'rules': {
'input': [input_call],
},
}, calls)
assert result['changed'] == changed
assert result['diff']['before']['status'] == 'active'
assert result['diff']['after']['status'] == 'active'
assert len(result['diff']['before']['rules']['input']) == 1
assert len(result['diff']['after']['rules']['input']) == 1
assert result['diff']['before']['rules']['input'][0][parameter] == before
assert result['diff']['after']['rules']['input'][0][parameter] == after
assert result['firewall']['status'] == 'active'
assert len(result['firewall']['rules']['input']) == 1
assert result['firewall']['rules']['input'][0][parameter] == after
# Idempotency checks: IP address normalization
@pytest.mark.parametrize("ip_version, parameter, before_normalized, after_normalized, after", [
('ipv4', 'src_ip', '1.2.3.4/32', '1.2.3.4/32', '1.2.3.4'),
('ipv6', 'src_ip', '1:2:3::4/128', '1:2:3::4/128', '1:2:3::4'),
('ipv6', 'dst_ip', '1:2:3::4/128', '1:2:3::4/128', '1:2:3:0::4'),
('ipv6', 'dst_ip', '::/0', '::/0', '0:0::0/0'),
])
def test_input_rule_ip_normalization(mocker, ip_version, parameter, before_normalized, after_normalized, after):
assert ip_version in ('ipv4', 'ipv6')
assert parameter in ('src_ip', 'dst_ip')
input_call = {
'ip_version': ip_version,
'action': 'discard',
}
input_before = {
'name': None,
'ip_version': ip_version,
'dst_ip': None,
'dst_port': None,
'src_ip': None,
'src_port': None,
'protocol': None,
'tcp_flags': None,
'action': 'discard',
}
input_after = {
'name': None,
'ip_version': ip_version,
'dst_ip': None,
'dst_port': None,
'src_ip': None,
'src_port': None,
'protocol': None,
'tcp_flags': None,
'action': 'discard',
}
if after is not None:
input_call[parameter] = after
input_before[parameter] = before_normalized
input_after[parameter] = after_normalized
calls = [
FetchUrlCall('GET', 200)
.result_json({
'firewall': {
'server_ip': '1.2.3.4',
'server_number': 1,
'status': 'active',
'whitelist_hos': True,
'port': 'main',
'rules': {
'input': [input_before],
},
},
})
.expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL)),
]
changed = (before_normalized != after_normalized)
if changed:
after_call = (
FetchUrlCall('POST', 200)
.result_json({
'firewall': {
'server_ip': '1.2.3.4',
'server_number': 1,
'status': 'active',
'whitelist_hos': False,
'port': 'main',
'rules': {
'input': [input_after],
},
},
})
.expect_url('{0}/firewall/1.2.3.4'.format(BASE_URL))
.expect_form_value('status', 'active')
.expect_form_value_absent('rules[input][1][action]')
)
after_call.expect_form_value('rules[input][0][ip_version]', ip_version)
after_call.expect_form_value('rules[input][0][action]', 'discard')
after_call.expect_form_value('rules[input][0][{0}]'.format(parameter), after_normalized)
calls.append(after_call)
result = run_module_success(mocker, hetzner_firewall, {
'hetzner_user': '',
'hetzner_password': '',
'server_ip': '1.2.3.4',
'state': 'present',
'rules': {
'input': [input_call],
},
}, calls)
assert result['changed'] == changed
assert result['diff']['before']['status'] == 'active'
assert result['diff']['after']['status'] == 'active'
assert len(result['diff']['before']['rules']['input']) == 1
assert len(result['diff']['after']['rules']['input']) == 1
assert result['diff']['before']['rules']['input'][0][parameter] == before_normalized
assert result['diff']['after']['rules']['input'][0][parameter] == after_normalized
assert result['firewall']['status'] == 'active'
assert len(result['firewall']['rules']['input']) == 1
assert result['firewall']['rules']['input'][0][parameter] == after_normalized
|
[
"[email protected]"
] | |
16176f545abb87396a77a21ab271f39fd5f52f3b
|
0347ed077da6f98d2740809d8582928485afc4e6
|
/wechatutility/wechatReceiveMsg.py
|
2640346a39717961923d2e95c0907c0cbfc19776
|
[] |
no_license
|
AhMay/wechatpublic_practice
|
140c45f2ca4c7423bea15dc5b57d26d032a8a03b
|
b38683c5e8e6a1db078c164342fead10af78818a
|
refs/heads/master
| 2020-11-27T23:57:20.358331 | 2019-12-23T01:15:27 | 2019-12-23T01:15:27 | 229,653,225 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,240 |
py
|
'''微信公众号接收到的用户消息类型
https://developers.weixin.qq.com/doc/offiaccount/Message_Management/Receiving_standard_messages.html
'''
import xml.etree.ElementTree as ET
def parse_xml(web_data):
if len(web_data) == 0:
return None
xmlData = ET.fromstring(web_data)
msg_type = xmlData.find('MsgType').text
if msg_type == 'text':
return ReceiveTextMsg(xmlData)
elif msg_type == 'image':
return ReceiveImageMsg(xmlData)
elif msg_type == 'voice':
return ReceiveVoiceMsg(xmlData)
elif msg_type in ('video','shortvideo'):
return ReceiveVideoMsg(xmlData)
elif msg_type == 'location':
return ReceiveLocationMsg(xmlData)
elif msg_type == 'link':
return ReceiveLinkMsg(xmlData)
elif msg_type == 'event':
recEventObj = ReceiveEventMsg(xmlData)
if recEventObj.Event == 'LOCATION':
return ReveiveLocationEventMsg(xmlData)
return recEventObj
else:
print('不能识别的消息类型:'+ msg_type)
return None
class ReceiveMsg(object):
'''基类'''
def __init__(self,xmlData):
self.ToUserName = xmlData.find('ToUserName').text
self.FromUserName = xmlData.find('FromUserName').text
self.CreateTime = xmlData.find('CreateTime').text
self.MsgType = xmlData.find('MsgType').text
self.MsgId =''
if xmlData.find('MsgId') is not None:
self.MsgId = xmlData.find('MsgId').text
class ReceiveTextMsg(ReceiveMsg):
'''文本消息'''
def __init__(self,xmlData):
super(ReceiveTextMsg,self).__init__(xmlData)
self.Content = xmlData.find('Content').text
class ReceiveImageMsg(ReceiveMsg):
'''图片消息'''
def __init__(self,xmlData):
super(ReceiveImageMsg,self).__init__(xmlData)
self.PicUrl = xmlData.find('PicUrl').text
self.MediaId = xmlData.find('MediaId').text
class ReceiveVoiceMsg(ReceiveMsg):
'''语音消息'''
def __init__(self,xmlData):
super(ReceiveVoiceMsg,self).__init__(xmlData)
self.Format = xmlData.find('Format').text
self.MediaId = xmlData.find('MediaId').text
self.Recognition = ''
if xmlData.find('Recognition') is not None:
self.Recognition ='' if xmlData.find('Recognition').text is None else xmlData.find('Recognition').text
class ReceiveVideoMsg(ReceiveMsg):
'''视频消息和小视频消息'''
def __init__(self,xmlData):
super(ReceiveVideoMsg,self).__init__(xmlData)
self.ThumbMediaId = xmlData.find('ThumbMediaId').text
self.MediaId = xmlData.find('MediaId').text
class ReceiveLocationMsg(ReceiveMsg):
'''地理位置消息'''
def __init__(self,xmlData):
super(ReceiveLocationMsg,self).__init__(xmlData)
self.Location_X = xmlData.find('Location_X').text
self.Location_Y = xmlData.find('Location_Y').text
self.Scale = xmlData.find('Scale').text
self.Label = xmlData.find('Label').text
class ReceiveLinkMsg(ReceiveMsg):
'''链接消息'''
def __init__(self,xmlData):
super(ReceiveLinkMsg,self).__init__(xmlData)
self.Title = xmlData.find('Title').text
self.Description = xmlData.find('Description').text
self.Url = xmlData.find('Url').text
class ReceiveEventMsg(ReceiveMsg):
'''普通事件'''
def __init__(self, xmlData):
super(ReceiveEventMsg,self).__init__(xmlData)
self.Event = xmlData.find('Event').text
self.EventKey = (False,'')
if xmlData.find('EventKey') is not None:
eventkey ='' if xmlData.find('EventKey').text is None else xmlData.find('EventKey').text
self.EventKey =(True, eventkey)
self.Ticket = (False,'')
if xmlData.find('Ticket') is not None:
ticket = '' if xmlData.find('Ticket').text is None else xmlData.find('Ticket').text
self.Ticket =(True, ticket)
class ReveiveLocationEventMsg(ReceiveEventMsg):
'''上报地理位置事件'''
def __init__(self,xmlData):
super(ReveiveLocationEventMsg,self).__init__(xmlData)
self.Latitude = xmlData.find('Latitude').text
self.Longitude = xmlData.find('Longitude').text
self.Precision = xmlData.find('Precision').text
class ReceiveViewEventMsg(ReceiveEventMsg):
'''view 和小程序'''
def __init__(self, xmlData):
super(ReceiveViewEventMsg, self).__init__(xmlData)
self.MenuId = xmlData.find('MenuId').text
class ReceiveScanCodeEventMsg(ReceiveEventMsg):
'''scancode_push scancode_waitmsg'''
def __init__(self, xmlData):
super(ReceiveScanCodeEventMsg, self).__init__(xmlData)
self.ScanCodeInfo = xmlData.find('ScanCodeInfo').text
self.ScanResult = xmlData.find('ScanResult').text
class ReceivePicEventMsg(ReceiveEventMsg):
'''pic_sysphoto scancode_waitmsg pic_weixin'''
def __init__(self, xmlData):
super(ReceivePicEventMsg, self).__init__(xmlData)
self.ScanCodeInfo = xmlData.find('ScanCodeInfo').text
self.ScanResult = xmlData.find('ScanResult').text
picItems = xmlData.getiterator('PicMd5Sum')
self.SendPicsInfo = [x.text for x in picItems]
|
[
"[email protected]"
] | |
74701596a433057380ef80ccb963ff63b98ec52e
|
f9b1157ac3486709f2655810b196086dc181adc2
|
/backend/test_upgrade_plan_de_1173/wsgi.py
|
0def1be27bb2fd809c50a6995188f5110839d443
|
[] |
no_license
|
crowdbotics-apps/test-upgrade-plan-de-1173
|
21364bb141fefbab45e1ee53283f9f298482f1f9
|
fc5e4584fc8ddb571b2f76b52f4c7c9c3ee3bd25
|
refs/heads/master
| 2023-02-14T20:46:31.539334 | 2019-12-11T18:29:30 | 2019-12-11T18:29:30 | 227,433,239 | 0 | 0 | null | 2023-01-24T00:56:00 | 2019-12-11T18:28:03 |
JavaScript
|
UTF-8
|
Python
| false | false | 427 |
py
|
"""
WSGI config for test_upgrade_plan_de_1173 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'test_upgrade_plan_de_1173.settings')
application = get_wsgi_application()
|
[
"[email protected]"
] | |
023e8cfd0b3aeb66d815ceac84e680bcb62c4bca
|
9137e1ccf070b3f9d92d8635662c569639910ae5
|
/apps/modules/setting/apis/session_set.py
|
07590e1711af07d77f4c7d0d5f050da905e11017
|
[
"BSD-2-Clause"
] |
permissive
|
zhangning123798/osroom
|
08e87a4c32e9d92807a66109e7074723279179cc
|
21859b77b8980ccb8a5392f02c76bd552b2bf653
|
refs/heads/master
| 2020-05-26T10:00:38.658768 | 2019-05-23T08:48:03 | 2019-05-23T08:48:03 | 188,195,911 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 571 |
py
|
# -*-coding:utf-8-*-
from flask import request
from apps.core.blueprint import api
from apps.core.flask.permission import permission_required
from apps.core.flask.response import response_format
from apps.modules.setting.process.session_set import language_set
__author__ = "Allen Woo"
@api.route('/session/language-set', methods=['PUT'])
@permission_required(use_default=False)
def api_language_set():
"""
PUT :
修改当前语言
language:<str>, 如en_US, zh_CN
:return:
"""
data = language_set()
return response_format(data)
|
[
"[email protected]"
] | |
49eaa8cbc43819fdcce800289fb77bb25c70faaa
|
bf11991193cd09d5d95e2706ed4168c36221f582
|
/HW1/q2.py
|
e8aa57704cc17595e677bc46cfc4b7a1d61f2ed9
|
[] |
no_license
|
NightKirie/COMPUTER-VISION-AND-DEEP-LEARNING_2020
|
f3569bf06cb824aace5fa3c3b01c26909b7b1a68
|
8b770c12bdc1e149c12c424d0637ab8f35df0370
|
refs/heads/main
| 2023-02-05T07:45:03.399847 | 2020-12-18T12:51:10 | 2020-12-18T12:51:10 | 304,540,542 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,464 |
py
|
import cv2
import numpy as np
WIDTH = 11
HEIGHT = 8
def draw(img, corners, imgpts):
img = cv2.line(img, tuple(imgpts[0].ravel()), tuple(imgpts[1].ravel()), (0,0,255), 5)
img = cv2.line(img, tuple(imgpts[0].ravel()), tuple(imgpts[2].ravel()), (0,0,255), 5)
img = cv2.line(img, tuple(imgpts[0].ravel()), tuple(imgpts[3].ravel()), (0,0,255), 5)
img = cv2.line(img, tuple(imgpts[1].ravel()), tuple(imgpts[2].ravel()), (0,0,255), 5)
img = cv2.line(img, tuple(imgpts[1].ravel()), tuple(imgpts[3].ravel()), (0,0,255), 5)
img = cv2.line(img, tuple(imgpts[2].ravel()), tuple(imgpts[3].ravel()), (0,0,255), 5)
return img
def augmentedReality():
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
obj_point_list = []
img_point_list = []
img_list = []
objp = np.zeros((WIDTH*HEIGHT,3), np.float32)
objp[:,:2] = np.mgrid[0:WIDTH,0:HEIGHT].T.reshape(-1,2)
axis = np.float32([[3, 3, -3], [1, 1, 0], [3, 5, 0], [5, 1, 0]]).reshape(-1,3)
for i in range(1, 6):
img = cv2.imread(f"Q1_image/{i}.bmp")
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray_img, (WIDTH, HEIGHT), None)
if ret == True:
obj_point_list.append(objp)
img_point_list.append(corners)
ret, mtx_p, dist, rvecs, tvecs = cv2.calibrateCamera(obj_point_list, img_point_list, gray_img.shape[::-1], None, None)
for i in range(1, 6):
img = cv2.imread(f"Q2_image/{i}.bmp")
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray_img, (WIDTH, HEIGHT), None)
if ret == True:
corners2 = cv2.cornerSubPix(gray_img,corners,(11,11),(-1,-1),criteria)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera([objp], [corners], gray_img.shape[::-1], None, None)
# Find the rotation and translation vectors.
ret, rvecs, tvecs = cv2.solvePnP(objp, corners2, mtx_p, dist)
# project 3D points to image plane
imgpts, jac = cv2.projectPoints(axis, rvecs, tvecs, mtx_p, dist)
img = draw(img,corners2,imgpts)
img = cv2.resize(img, (720, 720))
img_list.append(img)
for i in range(0, 5):
cv2.imshow('img',img_list[i])
cv2.waitKey(500)
if i == 4:
cv2.destroyAllWindows()
|
[
"[email protected]"
] | |
da9b60fd9c51d7ee28d89f0b14483faf7101b364
|
423670088b9795d645cacc760dc9d9b0df0a2b34
|
/vlttng/conf_template.py
|
fa86571506a859086343dda2d80b64c24c00f653
|
[
"MIT"
] |
permissive
|
eepp/vlttng
|
4711ecadec7249795190566809a51f589b051bcd
|
27c775db01ba13f27cb77bd5e6eca022c0db42bc
|
refs/heads/master
| 2022-10-06T02:31:23.733577 | 2022-09-14T18:59:32 | 2022-09-14T18:59:32 | 59,193,437 | 9 | 4 |
MIT
| 2021-05-13T16:33:16 | 2016-05-19T09:38:00 |
Python
|
UTF-8
|
Python
| false | false | 2,563 |
py
|
# The MIT License (MIT)
#
# Copyright (c) 2016 Philippe Proulx <eepp.ca>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
conf_template = '''#!/usr/bin/env bash
# The MIT License (MIT)
#
# Copyright (c) 2016 Philippe Proulx <eepp.ca>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Run this file from your shell to configure the {name} project
# within this virtual environment.
# Make sure we're in the right current working directory
cd {src_path}
# Set the original build-time environment
{exports}
# Configure {name}
{conf_lines}
'''
|
[
"[email protected]"
] | |
7df1adc8cd446b6046a1e4172ae5851a82b39653
|
ec77edd3a7db89f8b12202fe6ecc21ce2897bce0
|
/examples/digits_basic.py
|
06b871b0b71a2b192d0751a0f558f4853ad66f3e
|
[] |
no_license
|
lantunes/wehd
|
db2a3ea9edd4b9b012f1bb8e56d6f7d331f386e6
|
c13f9fb3b417d800fab09ab6f724c350c2f4d8cc
|
refs/heads/main
| 2023-07-12T06:07:00.410568 | 2021-08-16T14:34:05 | 2021-08-16T14:34:05 | 395,657,147 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,764 |
py
|
import matplotlib
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import mode
from sklearn import datasets
from sklearn.manifold import TSNE
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn.metrics.cluster import rand_score
from sklearn.preprocessing import MinMaxScaler
from sklearn_extra.cluster import KMedoids
import warnings
warnings.filterwarnings("ignore")
if __name__ == '__main__':
digits = datasets.load_digits()
X = digits.data
y = digits.target
scaler = MinMaxScaler().fit(X)
X = scaler.transform(X)
kmedoids = KMedoids(n_clusters=10, metric="euclidean").fit(X)
labels = kmedoids.labels_
print("Rand Index of K-medoids classifier: %s" % rand_score(y, labels))
# re-map the cluster labels so that they match class labels
labels_remapped = np.zeros_like(labels)
for i in range(10):
mask = (labels == i)
labels_remapped[mask] = mode(y[mask])[0]
print("accuracy score: %s" % accuracy_score(y, labels_remapped))
print("confusion matrix: \n%s" % confusion_matrix(y, labels_remapped))
tsne = TSNE(n_components=2, verbose=1, perplexity=50, n_iter=500, learning_rate=10, metric="euclidean")
result = tsne.fit_transform(X)
norm = matplotlib.colors.Normalize(vmin=0, vmax=10, clip=True)
mapper = cm.ScalarMappable(norm=norm, cmap=cm.Accent)
colors = [mapper.to_rgba(label) for label in labels]
fig = plt.figure()
plt.scatter(result[:, 0], result[:, 1], c=colors, marker="o", edgecolors="black", picker=True)
def onpick(event):
ind = event.ind
print()
for i in ind:
print(y[i])
fig.canvas.mpl_connect("pick_event", onpick)
plt.show()
|
[
"[email protected]"
] | |
f97b906b4a697556ced6a92198111804e4fcb722
|
1e53216c58f3c7843031721305590b83dbaed3f2
|
/week_five/log_and_reg/log_and_reg/settings.py
|
c6158a025afa487c688f6c615e4526c97c9271a4
|
[] |
no_license
|
MTaylorfullStack/python_july_20
|
991852ba12d6f06d6b93b8efc60b66ee311b5cb3
|
bdfb0d9a74300f2d6743ac2d108571692ca43ad9
|
refs/heads/master
| 2022-12-12T18:03:00.886048 | 2020-08-27T23:53:31 | 2020-08-27T23:53:31 | 277,956,745 | 2 | 2 | null | 2023-06-30T20:06:11 | 2020-07-08T01:09:34 |
Python
|
UTF-8
|
Python
| false | false | 3,118 |
py
|
"""
Django settings for log_and_reg project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'o7qi48o7r49tj(i7mj245h==i2=@cv_n^0wyck!&f9*1nu&*z7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'logregapp',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'log_and_reg.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'log_and_reg.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
|
[
"[email protected]"
] | |
bb938cd5a049722984a0234f69971204ca9c7af4
|
4310871d6d0ad59332360a0d6e980e0a41d25fdd
|
/comment/serializers.py
|
a79978c15226c0e45d99ee2139bc53813bc6fc26
|
[] |
no_license
|
jacegem/drf-test
|
21bf7eaa47aeb045c31d2ba8239e587804c27a86
|
00105b6ecd38b68c58f2e5b498e0fb7ad2de099d
|
refs/heads/master
| 2022-12-12T13:18:02.168914 | 2020-08-04T09:08:10 | 2020-08-04T09:08:10 | 146,073,595 | 0 | 0 | null | 2022-12-08T09:34:16 | 2018-08-25T07:01:35 |
Python
|
UTF-8
|
Python
| false | false | 201 |
py
|
from rest_framework import serializers
from .models import Comment
class CommentSerializer(serializers.ModelSerializer):
class Meta:
model = Comment
fields = '__all__'
|
[
"[email protected]"
] | |
36801ff15a033f2b405073e33139f9994ac55639
|
98879590858368d5c32c389db31b761e479a0ab8
|
/python-features/iterators.py
|
fc8bf99d47289b0d870921c1830127ecb4664be9
|
[] |
no_license
|
zhiruchen/get_hands_dirty
|
0bbf3719113dcf474baae571ecd55e5c234072a3
|
af98a11bbeb8183428fe41cb7c9fa9a2354983e9
|
refs/heads/master
| 2020-04-17T12:00:44.275247 | 2017-06-24T16:28:43 | 2017-06-24T16:28:43 | 66,988,195 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,966 |
py
|
# -*- encoding: utf-8 -*-
"""
迭代器
http://anandology.com/python-practice-book/iterators.html
http://nvie.com/posts/iterators-vs-generators/
迭代器一定是可迭代的,
"""
from collections import Iterable, Iterator
class yrange(object):
"""迭代器,yrange的对象既是可迭代的也是迭代器"""
def __init__(self, n):
self.n = n
self.i = 0
def __iter__(self):
return self
def next(self):
if self.i < self.n:
i = self.i
self.i += 1
return i
else:
raise StopIteration()
class zrange(object):
"""zrange的实例仅是可迭代的"""
def __init__(self, n):
self.n = n
def __iter__(self):
return zrange_iter(self.n)
class zrange_iter(object):
def __init__(self, n):
self.n = n
self.i = 0
def __iter__(self):
return self
def next(self):
if self.i < self.n:
i = self.i
self.i += 1
return i
else:
raise StopIteration()
class reverse_iter(object):
"""反向迭代器"""
def __init__(self, lst):
self.lst = lst
self.start = -1
self.end = 0 - len(lst)
def __iter__(self):
return self
def next(self):
if self.start >= self.end:
index = self.start
self.start -= 1
return self.lst[index]
raise StopIteration()
def test_yrange():
assert list(yrange(4)) == [0, 1, 2, 3]
assert sum(yrange(4)) == 6
assert isinstance(yrange(3), Iterable) is True
assert isinstance(yrange(3), Iterator) is True
def test_zrange():
z = zrange(4)
z_list1 = list(z)
z_list2 = list(z)
assert z_list1 == [0, 1, 2, 3]
assert z_list2 == [0, 1, 2, 3]
assert isinstance(z, Iterable) is True
assert isinstance(z, Iterator) is False
if __name__ == '__main__':
test_yrange()
test_zrange()
|
[
"[email protected]"
] | |
ba4660a4acd3ec99cc6333f14b87878bb163b698
|
d54ef1dee58c239d3a5bb74cbf8d8a717dcdcb33
|
/paytest/test_paytest.py
|
6cf586627df8c94c2155c66542e5871c2a50535e
|
[
"BSD-3-Clause"
] |
permissive
|
bhoovd/lightningd-plugins
|
88cef7c51b5291ef14b32761a1b4ceeb26ee5784
|
a7a0007dcee1fcf3cc401ca83663a66b678e07d3
|
refs/heads/master
| 2023-03-14T20:50:46.115145 | 2021-02-16T10:15:54 | 2021-02-26T10:28:07 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,873 |
py
|
from pyln.testing.fixtures import * # noqa: F401,F403
from pyln.testing.utils import DEVELOPER
from pyln.client import RpcError
import os
import unittest
import pytest
from pprint import pprint
pluginopt = {'plugin': os.path.join(os.path.dirname(__file__), "paytest.py")}
EXPERIMENTAL_FEATURES = int(os.environ.get("EXPERIMENTAL_FEATURES", "0"))
def test_start(node_factory):
node_factory.get_node(options=pluginopt)
def test_invoice(node_factory):
l1 = node_factory.get_node(options=pluginopt)
inv = l1.rpc.testinvoice('03'*33)
details = l1.rpc.decodepay(inv['invoice'])
pprint(details)
def test_simple_pay(node_factory):
""" l1 generates and pays an invoice on behalf of l2.
"""
l1, l2 = node_factory.line_graph(2, opts=pluginopt, wait_for_announce=True)
inv = l1.rpc.testinvoice(destination=l2.info['id'], amount=1)['invoice']
details = l1.rpc.decodepay(inv)
pprint(details)
# Paying the invoice without the reinterpretation from paytest
# will cause an unknown payment details directly.
with pytest.raises(RpcError, match=r'WIRE_INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS'):
l1.rpc.pay(inv)
def test_mpp_pay(node_factory):
""" l1 send a payment that is going to be split.
"""
l1, l2 = node_factory.line_graph(2, opts=pluginopt, wait_for_announce=True)
res = l1.rpc.paytest(l2.info['id'], 10**8)
from pprint import pprint
#pprint(res)
l2.daemon.wait_for_log(r'Received 100000000/100000000 with [0-9]+ parts')
parts = res['status']['attempts']
assert len(parts) > 2 # Initial split + >1 part
failures = [p['failure']['data'] for p in parts if 'failure' in p and 'data' in p['failure']]
pprint(failures)
outcomes = [f['failcode'] for f in failures]
is16399 = [p == 16399 for p in outcomes]
assert all(is16399)
assert len(is16399) >= 1
|
[
"[email protected]"
] | |
2a7781ca167806d15563fda92dec8c31c733744d
|
df44affab179c2546fb3e0d1dc29eebcfdf51c1c
|
/toughradius/txradius/authorize.py
|
fedf4305359d7f9665ba6b081ea9725c186fcd57
|
[] |
no_license
|
sailorhdx/taurusradius
|
121c508e7faffaddcd5326d2b6d3710eaf0ed08e
|
92d30820611a0c9102ae41713ea3c35437a3c6ee
|
refs/heads/master
| 2021-01-22T02:28:31.543338 | 2017-06-17T02:15:33 | 2017-06-17T02:15:33 | 92,362,551 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,662 |
py
|
#!/usr/bin/env python
# coding=utf-8
import os
import six
from twisted.python import log
from twisted.internet import protocol
from twisted.internet import reactor, defer
from toughradius.txradius.radius import packet
from toughradius.txradius.ext import ikuai
from toughradius.txradius import message
from toughradius.txradius.radius import dictionary
from toughradius import txradius
RADIUS_DICT = dictionary.Dictionary(os.path.join(os.path.dirname(txradius.__file__), 'dictionary/dictionary'))
def get_dm_packet(vendor_id, nas_secret, nas_addr, coa_port = 3799, **kwargs):
coa_request = message.CoAMessage(code=packet.DisconnectRequest, dict=RADIUS_DICT, secret=six.b(str(nas_secret)), **kwargs)
username = coa_request['User-Name'][0]
if int(vendor_id) == ikuai.VENDOR_ID:
pkg = ikuai.create_dm_pkg(six.b(str(nas_secret)), username)
return (pkg, nas_addr, coa_port)
else:
return (coa_request.RequestPacket(), nas_addr, coa_port)
class CoAClient(protocol.DatagramProtocol):
def __init__(self, vendor_id, dictionary, nas_secret, nas_addr, coa_port = 3799, debug = False):
self.dictionary = dictionary
self.secret = six.b(str(nas_secret))
self.addr = nas_addr
self.port = int(coa_port)
self.vendor_id = int(vendor_id)
self.debug = debug
self.uport = reactor.listenUDP(0, self)
def close(self):
if self.transport is not None:
self.transport.stopListening()
self.transport = None
return
def onError(self, err):
log.err('Packet process error: %s' % str(err))
reactor.callLater(0.01, self.close)
return err
def onResult(self, resp):
reactor.callLater(0.01, self.close)
return resp
def onTimeout(self):
if not self.deferrd.called:
defer.timeout(self.deferrd)
def sendDisconnect(self, **kwargs):
timeout_sec = kwargs.pop('timeout', 5)
coa_req = message.CoAMessage(code=packet.DisconnectRequest, dict=self.dictionary, secret=self.secret, **kwargs)
username = coa_req['User-Name'][0]
if self.vendor_id == ikuai.VENDOR_ID:
pkg = ikuai.create_dm_pkg(self.secret, username)
if self.debug:
log.msg('send ikuai radius Coa Request to (%s:%s) [username:%s]: %s' % (self.addr,
self.port,
username,
repr(pkg)))
self.transport.write(pkg, (self.addr, self.port))
else:
if self.debug:
log.msg('send radius Coa Request to (%s:%s) [username:%s] : %s' % (self.addr,
self.port,
username,
coa_req))
self.transport.write(coa_req.RequestPacket(), (self.addr, self.port))
self.deferrd = defer.Deferred()
self.deferrd.addCallbacks(self.onResult, self.onError)
reactor.callLater(timeout_sec, self.onTimeout)
return self.deferrd
def datagramReceived(self, datagram, (host, port)):
try:
response = packet.Packet(packet=datagram)
if self.debug:
log.msg('Received Radius Response from (%s:%s): %s' % (host, port, repr(response)))
self.deferrd.callback(response.code)
except Exception as err:
log.err('Invalid Response packet from %s: %s' % ((host, port), str(err)))
self.deferrd.errback(err)
def disconnect(vendor_id, dictionary, nas_secret, nas_addr, coa_port = 3799, debug = False, **kwargs):
return CoAClient(vendor_id, dictionary, nas_secret, nas_addr, coa_port, debug).sendDisconnect(**kwargs)
|
[
"[email protected]"
] | |
14455f7f45b76263d76c758b85a92c16f61f1e47
|
b38739b790ec087251280a352d38188bfc4ce9fa
|
/src/pretix/control/signals.py
|
25b9d8c53f1ce99cf5df5685d779dd8a2bc86c1a
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
marcelosantos/pretix
|
c4ab9f01d5b4adadb7a7257c9d0a5494c7211da4
|
badad709843a64d370608ab7ead7b796d9e49950
|
refs/heads/master
| 2020-03-11T10:13:12.615651 | 2018-04-17T08:20:26 | 2018-04-17T08:20:26 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,478 |
py
|
from django.dispatch import Signal
from pretix.base.signals import DeprecatedSignal, EventPluginSignal
restriction_formset = EventPluginSignal(
providing_args=["item"]
)
"""
This signal is sent out to build configuration forms for all restriction formsets
(see plugin API documentation for details).
As with all plugin signals, the ``sender`` keyword argument will contain the event.
"""
html_head = EventPluginSignal(
providing_args=["request"]
)
"""
This signal allows you to put code inside the HTML ``<head>`` tag
of every page in the backend. You will get the request as the keyword argument
``request`` and are expected to return plain HTML.
As with all plugin signals, the ``sender`` keyword argument will contain the event.
"""
nav_event = EventPluginSignal(
providing_args=["request"]
)
"""
This signal allows you to add additional views to the admin panel
navigation. You will get the request as a keyword argument ``request``.
Receivers are expected to return a list of dictionaries. The dictionaries
should contain at least the keys ``label`` and ``url``. You can also return
a fontawesome icon name with the key ``icon``, it will be respected depending
on the type of navigation. You should also return an ``active`` key with a boolean
set to ``True``, when this item should be marked as active. The ``request`` object
will have an attribute ``event``.
If you use this, you should read the documentation on :ref:`how to deal with URLs <urlconf>`
in pretix.
As with all plugin signals, the ``sender`` keyword argument will contain the event.
"""
nav_topbar = Signal(
providing_args=["request"]
)
"""
This signal allows you to add additional views to the top navigation bar.
You will get the request as a keyword argument ``request``.
Receivers are expected to return a list of dictionaries. The dictionaries
should contain at least the keys ``label`` and ``url``. You can also return
a fontawesome icon name with the key ``icon``, it will be respected depending
on the type of navigation. If set, on desktops only the ``icon`` will be shown.
The ``title`` property can be used to set the alternative text.
If you use this, you should read the documentation on :ref:`how to deal with URLs <urlconf>`
in pretix.
This is no ``EventPluginSignal``, so you do not get the event in the ``sender`` argument
and you may get the signal regardless of whether your plugin is active.
"""
nav_global = Signal(
providing_args=["request"]
)
"""
This signal allows you to add additional views to the navigation bar when no event is
selected. You will get the request as a keyword argument ``request``.
Receivers are expected to return a list of dictionaries. The dictionaries
should contain at least the keys ``label`` and ``url``. You can also return
a fontawesome icon name with the key ``icon``, it will be respected depending
on the type of navigation. You should also return an ``active`` key with a boolean
set to ``True``, when this item should be marked as active.
If you use this, you should read the documentation on :ref:`how to deal with URLs <urlconf>`
in pretix.
This is no ``EventPluginSignal``, so you do not get the event in the ``sender`` argument
and you may get the signal regardless of whether your plugin is active.
"""
event_dashboard_widgets = EventPluginSignal(
providing_args=[]
)
"""
This signal is sent out to include widgets in the event dashboard. Receivers
should return a list of dictionaries, where each dictionary can have the keys:
* content (str, containing HTML)
* display_size (str, one of "full" (whole row), "big" (half a row) or "small"
(quarter of a row). May be ignored on small displays, default is "small")
* priority (int, used for ordering, higher comes first, default is 1)
* url (str, optional, if the full widget should be a link)
As with all plugin signals, the ``sender`` keyword argument will contain the event.
An additional keyword argument ``subevent`` *can* contain a sub-event.
"""
user_dashboard_widgets = Signal(
providing_args=['user']
)
"""
This signal is sent out to include widgets in the personal user dashboard. Receivers
should return a list of dictionaries, where each dictionary can have the keys:
* content (str, containing HTML)
* display_size (str, one of "full" (whole row), "big" (half a row) or "small"
(quarter of a row). May be ignored on small displays, default is "small")
* priority (int, used for ordering, higher comes first, default is 1)
* url (str, optional, if the full widget should be a link)
This is a regular django signal (no pretix event signal).
"""
voucher_form_html = EventPluginSignal(
providing_args=['form']
)
"""
This signal allows you to add additional HTML to the form that is used for modifying vouchers.
You receive the form object in the ``form`` keyword argument.
As with all plugin signals, the ``sender`` keyword argument will contain the event.
"""
voucher_form_class = EventPluginSignal(
providing_args=['cls']
)
"""
This signal allows you to replace the form class that is used for modifying vouchers.
You will receive the default form class (or the class set by a previous plugin) in the
``cls`` argument so that you can inherit from it.
As with all plugin signals, the ``sender`` keyword argument will contain the event.
"""
voucher_form_validation = EventPluginSignal(
providing_args=['form']
)
"""
This signal allows you to add additional validation to the form that is used for
creating and modifying vouchers. You will receive the form instance in the ``form``
argument and the current data state in the ``data`` argument.
As with all plugin signals, the ``sender`` keyword argument will contain the event.
"""
quota_detail_html = EventPluginSignal(
providing_args=['quota']
)
"""
This signal allows you to append HTML to a Quota's detail view. You receive the
quota as argument in the ``quota`` keyword argument.
As with all plugin signals, the ``sender`` keyword argument will contain the event.
"""
organizer_edit_tabs = DeprecatedSignal(
providing_args=['organizer', 'request']
)
"""
Deprecated signal, no longer works. We just keep the definition so old plugins don't
break the installation.
"""
nav_organizer = Signal(
providing_args=['organizer', 'request']
)
"""
This signal is sent out to include tab links on the detail page of an organizer.
Receivers are expected to return a list of dictionaries. The dictionaries
should contain at least the keys ``label`` and ``url``. You should also return
an ``active`` key with a boolean set to ``True``, when this item should be marked
as active.
If your linked view should stay in the tab-like context of this page, we recommend
that you use ``pretix.control.views.organizer.OrganizerDetailViewMixin`` for your view
and your template inherits from ``pretixcontrol/organizers/base.html``.
This is a regular django signal (no pretix event signal). Receivers will be passed
the keyword arguments ``organizer`` and ``request``.
"""
order_info = EventPluginSignal(
providing_args=["order", "request"]
)
"""
This signal is sent out to display additional information on the order detail page
As with all plugin signals, the ``sender`` keyword argument will contain the event.
Additionally, the argument ``order`` and ``request`` are available.
"""
nav_event_settings = EventPluginSignal(
providing_args=['request']
)
"""
This signal is sent out to include tab links on the settings page of an event.
Receivers are expected to return a list of dictionaries. The dictionaries
should contain at least the keys ``label`` and ``url``. You should also return
an ``active`` key with a boolean set to ``True``, when this item should be marked
as active.
If your linked view should stay in the tab-like context of this page, we recommend
that you use ``pretix.control.views.event.EventSettingsViewMixin`` for your view
and your template inherits from ``pretixcontrol/event/settings_base.html``.
As with all plugin signals, the ``sender`` keyword argument will contain the event.
A second keyword argument ``request`` will contain the request object.
"""
event_settings_widget = EventPluginSignal(
providing_args=['request']
)
"""
This signal is sent out to include template snippets on the settings page of an event
that allows generating a pretix Widget code.
As with all plugin signals, the ``sender`` keyword argument will contain the event.
A second keyword argument ``request`` will contain the request object.
"""
|
[
"[email protected]"
] | |
25bb83c6f782b5a36d42a6c483e404476b3c2f1e
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/yfTyMb3SSumPQeuhm_5.py
|
80ed33b96fc160972e0d5109dcaa669d35356e56
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 617 |
py
|
"""
Write a function that efficiently calculates Fibonacci terms.
### Examples
fibonacci(1) ➞ 1
fibonacci(2) ➞ 1
fibonacci(4) ➞ 3
fibonacci(64) ➞ 10610209857723
### Notes
The input will always be a power of two.
"""
def fibonacci(n):
def fib_inner(n):
if n == 0:
return 0, 1
u, v = fib_inner(n >> 1)
q = (n & 2) - 1
u *= u
v *= v
if (n & 1):
return u + v, 3*v - 2*(u - q)
return 2*(v + q) - 3*u, u + v
u, v = fib_inner(n >> 1)
l = 2*v - u
if (n & 1):
q = (n & 2) - 1
return v * l + q
return u * l
|
[
"[email protected]"
] | |
b954d06a1d1d14afe11039a99c3494d4ccb1ac89
|
77327e4c6de68fab4061c5acdb569e95cb8a9fae
|
/TestEnv/PrisonerDilemma.py
|
92ce9bbf4e611e7790df41d38d79049698cca47e
|
[] |
no_license
|
jiankangren/RARL-1
|
577148384bd065708c868c854ff53bc8d16fac99
|
a7b28d9b56cfc7e5f832bb839d2c8f5db4d5b5ab
|
refs/heads/master
| 2020-04-08T06:33:53.706725 | 2018-08-27T22:45:48 | 2018-08-27T22:45:48 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 813 |
py
|
from rllab.envs.base import Env
from rllab.spaces import Box, Discrete
from rllab.envs.base import Step
class PrisonerDilemma(Env):
def __init__(self):
self.num_action = 2
def render(self):
pass
@property
def observation_space(self):
return Discrete(1)
@property
def action_space(self):
return Discrete(self.num_action)
def close(self):
pass
def reset(self):
return 0
def step(self, Action):
obs = 0
done = True
action = Action['action']
a1 = action[0]
a2 = action[1]
policy_num = Action['policy_num']
r = -100
if a1 == 0:
if a2 == 0:
r = -1
else:
if policy_num == 1:
r = -3
else:
r = 0
else:
if a2 == 0:
if policy_num == 1:
r = 0
else:
r = -3
else:
r = -2
return Step(observation=obs, reward=r, done=done)
|
[
"[email protected]"
] | |
fd0348e317dfb0cc806d13e3c1787e7508e61c8a
|
f8d043a7941cb311d9ea8e991b7c5be3c461675f
|
/mac/shop/urls.py
|
a1db41ac4bb8b25b8045d600df7ea205fe297640
|
[] |
no_license
|
shreyakapadia10/Django-Project
|
acf5c56f95ea552b589dfc6f8819b92339b3a377
|
4e9f5c2697ef1ed07209f50b1d8ed8a1bd953b87
|
refs/heads/master
| 2023-05-02T18:47:08.892747 | 2021-05-25T11:11:54 | 2021-05-25T11:11:54 | 370,583,864 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 524 |
py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='ShopHome'),
path('about/', views.about, name='AboutUs'),
path('contact/', views.contact, name='ContactUs'),
path('tracker/', views.tracker, name='TrackingStatus'),
path('search/', views.search, name='Search'),
path('product/<int:vid>', views.viewproduct, name='ViewProduct'),
path('checkout/', views.checkout, name='Checkout'),
path('handlerequest/', views.handlerequest, name='HandleRequest'),
]
|
[
"[email protected]"
] | |
fe3be7f503140e17f885a9751de4ac6ce8bd2a4e
|
234f0a885f6f6bffdfe21dcb4882ed9bc611029d
|
/fullcyclepy/helpers/taskschedule.py
|
ed6c9f97ea07397779e5441fbc22dddcd94fee81
|
[
"MIT"
] |
permissive
|
gitter-badger/fullcycle
|
4a0fe84d92f93a333094de76706c6aeb7c4b9402
|
1f21fb5bfacdaa7005f506bd6327689368b421d4
|
refs/heads/master
| 2020-03-28T07:04:31.463488 | 2018-09-06T23:15:00 | 2018-09-06T23:15:00 | 147,879,359 | 0 | 0 | null | 2018-09-07T22:08:08 | 2018-09-07T22:08:08 | null |
UTF-8
|
Python
| false | false | 1,188 |
py
|
import datetime
class TaskSchedule(object):
lastrun = None
start = None
pending_run = False
#default to 0 seconds means disabled
#interval is in seconds
interval = 0
q = None
def __init__(self, run_on_init=False):
self.pending_run = run_on_init
def is_disabled(self):
return self.interval <= 0
def is_time_to_run(self):
if self.is_disabled(): return False
now = datetime.datetime.now()
if self.pending_run:
self.pending_run = False
return self.kick_it_off(True)
if self.lastrun is None and self.start is None:
#never run before, run now
return self.kick_it_off(True)
elif self.start is not None and self.lastrun is None:
#never run before and after start time
return self.kick_it_off(now > self.start)
else:
sincelast = now - self.lastrun
if sincelast.total_seconds() > self.interval:
return self.kick_it_off(True)
return False
def kick_it_off(self, dorun=False):
if dorun:
self.lastrun = datetime.datetime.now()
return dorun
|
[
"[email protected]"
] | |
91a59fef16036229feabbf59c9aa7c02b86b5d38
|
3f7fd5abd0fe3a516d620a6948f9079bc34c1f5e
|
/glooey/__init__.py
|
6790c321f675d52a9caeabef925ecb67ace45d5b
|
[
"MIT"
] |
permissive
|
wkentaro/glooey
|
f8cb4723e266a29941da41d5ab81f8d2b809d2f2
|
4eacfdc7c14b5903f1bc3d5d4fa2b355f5fc5ee1
|
refs/heads/master
| 2020-05-17T19:42:38.554766 | 2019-04-28T05:41:43 | 2019-04-28T05:41:43 | 183,922,793 | 1 | 0 |
MIT
| 2019-04-28T15:02:41 | 2019-04-28T15:02:40 | null |
UTF-8
|
Python
| false | false | 319 |
py
|
#!/usr/bin/env python3
__version__ = '0.1.2'
from .widget import *
from .root import *
from .containers import *
from .text import *
from .images import *
from .buttons import *
from .dialogs import *
from .scrolling import *
from .misc import *
from . import drawing
from .drawing import Color
from . import themes
|
[
"[email protected]"
] | |
0f1b8f58a2fabca481414c2c84477d370d059f5d
|
be4d32d35fd4af3cf4ecf3736c8e879d50b8ae37
|
/Python/Django/djangoform/djangoform/wsgi.py
|
145141d25d9c0de325f197f0995939cb185d6510
|
[] |
no_license
|
yangluo0901/CodingDojo_Assignment
|
f09bbec26f87b5b276fd6ef3c77f27d13518937e
|
44ccb5158b12c1656793bac76f1a7a707b147982
|
refs/heads/master
| 2021-10-11T21:12:14.889189 | 2019-01-29T19:52:56 | 2019-01-29T19:52:56 | 105,716,841 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 398 |
py
|
"""
WSGI config for djangoform project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djangoform.settings")
application = get_wsgi_application()
|
[
"[email protected]"
] | |
8ba2e00236e70179ab87d41710598309635d289b
|
c4dacd0f5e397422018460c268ec8375aebe6419
|
/pyRMSD/benchmark/alias/test/TestCondensedMatrix.py
|
1fe99c0cac1171d98093825018e25272436a89ee
|
[
"MIT"
] |
permissive
|
asford/pyRMSD
|
032c9e03094392a957dfc5650b28b6e70bcdf17a
|
8f1149fb631bfffebeb595c5b164d6945f7444fa
|
refs/heads/master
| 2021-01-15T20:04:10.524616 | 2013-08-01T12:56:04 | 2013-08-01T12:56:04 | 12,771,855 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,491 |
py
|
'''
Created on 30/01/2012
@author: victor
'''
import unittest
import scipy.spatial.distance as distance
import cStringIO
import random
from pyproclust.matrix.condensedMatrix import CondensedDistanceMatrix, load_condensed_matrix, calc_number_of_rows,complete_to_condensed,zero_condensed
from pyproclust.matrix.completeMatrix import CompleteDistanceMatrix
import numpy as np
class Test(unittest.TestCase):
def test_equal(self):
cm1 = CondensedDistanceMatrix([1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9])
cm2 = CondensedDistanceMatrix([1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9])
cm3 = CondensedDistanceMatrix([6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4])
cm4 = CondensedDistanceMatrix([6,7,8,9,0,1,2,3])
self.assertEqual(cm1 == cm2, True)
self.assertEqual(cm1 == cm3, False)
self.assertEqual(cm1 == cm4, False)
self.assertEqual(cm2 == cm3, False)
self.assertEqual(cm2 == cm4, False)
self.assertEqual(cm3 == cm4, False)
def test_compare_condensed_matrixes(self):
cm1 = CondensedDistanceMatrix([1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9])
cm2 = CondensedDistanceMatrix([6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4])
cm3 = CondensedDistanceMatrix([1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1])
cm4 = CondensedDistanceMatrix([0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5])
result_1 = cm1.compare_with(cm2)
result_2 = cm1.compare_with(cm3)
result_3 = cm3.compare_with(cm4,1.,2.)
result_4 = cm3.compare_with(cm4,1.,1.)
self.assertEqual(result_1, (5.0, 0.0))
self.assertEqual(result_2, (3.8421052631578947, 2.6008734948643863))
self.assertEqual(result_3, (0., 0.))
self.assertEqual(result_4, (0.5, 0.))
def test_get_number_of_rows(self):
random.seed()
for i in range(100): #@UnusedVariable
rows = random.randint(1,1000)
number_of_elements = (rows *(rows-1)) / 2
calculated_rows = calc_number_of_rows(number_of_elements)
self.assertEqual(rows,calculated_rows)
def test_normalize_condensed_matrix(self):
condensed = CondensedDistanceMatrix([ 1., 4.5, 8.5, 7.2, 4.5, 7.8, 6.7, 3.6,2.2, 2.])
expected = CondensedDistanceMatrix([0.0, 0.47, 1.0, 0.83, 0.47, 0.91, 0.76, 0.35, 0.16, 0.13])
minmax = condensed.get_minimum_and_maximum()
condensed.normalize(minmax[0], minmax[1])
for i in range(len(condensed.get_data())):
self.assertAlmostEqual(condensed.get_data()[i],expected.get_data()[i],2)
def test_data_sharing(self):
mylist = [ 1., 4.5, 8.5, 7.2, 4.5, 7.8, 6.7, 3.6,2.2, 2.]
myarray = np.array([ 1., 4.5, 8.5, 7.2, 4.5, 7.8, 6.7, 3.6,2.2, 2.])
mylistaarray = np.array(mylist)
condensed1 = CondensedDistanceMatrix(mylist)
condensed2 = CondensedDistanceMatrix(myarray)
condensed3 = CondensedDistanceMatrix(mylistaarray)
mylist[5] = 0.
self.assertEqual(False, mylist[5] == condensed1.get_data()[5])
myarray[5] = 0.
self.assertEqual(False, myarray[5] == condensed2.get_data()[5])
mylistaarray[5] = 0.
self.assertEqual(False, mylistaarray[5] == condensed3.get_data()[5])
mycontents = condensed3.get_data()
mycontents[5] = 0.
self.assertEqual(True, mycontents[5] == condensed3.get_data()[5] and\
condensed3.get_data()[5] == 0.)
def test_gen_condensed_matrix(self):
obs = [(1,1),(2,1),(4,5),(7,7),(5,7)]
## distance matrix
distance_matrix = CompleteDistanceMatrix(distance.cdist(obs,obs))
## lower distance matrix (wo diagonal)
expected_distance_condensed = CondensedDistanceMatrix(distance.pdist(obs))
distance_condensed = complete_to_condensed(distance_matrix)
self.assertEqual(True,distance_condensed == expected_distance_condensed)
def test_validate_dimensions(self):
condensed_matrix_1 = CondensedDistanceMatrix([ 1., 4.5, 8.5, 7.2, 4.5, 7.8, 6.7, 3.6,2.2, 2.])
self.assertEqual(True,condensed_matrix_1._CondensedDistanceMatrix__validate_dimensions())
condensed_matrix_2 = CondensedDistanceMatrix([ 1., 4.5, 8.5, 7.2, 4.5, 7.8, 6.7, 3.6])
self.assertEqual(False,condensed_matrix_2._CondensedDistanceMatrix__validate_dimensions())
def test_minmax_condensed(self):
condensed_matrix = CondensedDistanceMatrix([ 1.,
4.5, 8.5,
7.2, 4.5, 7.8,
6.7, 3.6,2.2, 2.0])
expected = (1,8.5)
self.assertEqual(condensed_matrix.get_minimum_and_maximum(),expected)
def test_save_condensed_matrix(self):
# with final spaces!
expected_matrix_string = """1.0 4.5 7.2 6.7
8.5 4.5 3.6
7.8 2.2
2.0
"""
condensed_matrix = CondensedDistanceMatrix([1.0, 4.5, 7.2, 6.7,
8.5, 4.5, 3.6,
7.8, 2.2,
2.0])
output = cStringIO.StringIO()
condensed_matrix.save(output)
self.assertEqual(expected_matrix_string,output.getvalue())
def test_load_condensed_matrix(self):
matrix_string = """1.0
4.5 8.5
7.2 4.5 7.8
6.7 3.6 2.2 2.0
"""
expected_matrix = CondensedDistanceMatrix([ 1., 4.5, 8.5, 7.2, 4.5, 7.8, 6.7, 3.6,2.2, 2.])
input = cStringIO.StringIO(matrix_string)
loaded_matrix = load_condensed_matrix(input)
for i in range(len(expected_matrix.get_data())):
self.assertAlmostEqual(expected_matrix.get_data()[i],\
loaded_matrix.get_data()[i],3)
def test_item_access(self):
condensed_matrix_1 = CondensedDistanceMatrix([1.0, 4.5,7.2,
8.5, 4.5,
7.8])
condensed_matrix_2 = CondensedDistanceMatrix([.0]*6)
complete_matrix = [[0.0, 1.0, 4.5, 7.2],
[1.0, 0.0, 8.5, 4.5],
[4.5, 8.5, 0.0, 7.8],
[7.2, 4.5, 7.8, 0.0]]
row_len = condensed_matrix_1.row_length
for i in range(row_len):
for j in range(row_len):
condensed_matrix_2[i,j] = complete_matrix[i][j]
## The access for a complete and a condensed matrix is exactly the same
for i in range(row_len):
for j in range(row_len):
self.assertEquals(condensed_matrix_1[i,j],complete_matrix[i][j])
## And we can build a condensed matrix as a complete matrix
self.assertItemsEqual(condensed_matrix_1.get_data(), condensed_matrix_2.get_data())
def test_zero_condensed(self):
row_len = 5
zeroed_condensed = zero_condensed(row_len)
self.assertEqual(row_len,zeroed_condensed.row_length)
for i in range(row_len):
for j in range(row_len):
self.assertEquals(zeroed_condensed[i,j],0.)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
[
"[email protected]"
] | |
e68071c5d957adac8446ee734833fc462a73c82f
|
ac6922fbaa51c3068883c3d60b407350f13213f9
|
/src/einsteinpy/tests/test_metric/test_schwarzschild.py
|
42b0c43107a4dc2b105cb234aeecafd5dac58e47
|
[
"MIT",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
divya144/einsteinpy
|
719c961e3c4698c1d7df77a78a3f9b51f234d077
|
e5f7c15fb80f8fef8d1e8ca41188d9ac7ee668ec
|
refs/heads/master
| 2020-05-03T09:26:27.003181 | 2019-04-09T13:33:35 | 2019-04-10T10:32:36 | 178,553,798 | 2 | 1 |
MIT
| 2019-03-30T12:08:14 | 2019-03-30T12:08:14 | null |
UTF-8
|
Python
| false | false | 7,272 |
py
|
import warnings
import numpy as np
import pytest
from astropy import units as u
from numpy.testing import assert_allclose
from einsteinpy import constant
from einsteinpy.metric import Schwarzschild
from einsteinpy.utils import schwarzschild_radius
_c = constant.c.value
@pytest.mark.parametrize(
"pos_vec, vel_vec, time, M, start_lambda, end_lambda, OdeMethodKwargs",
[
(
[306 * u.m, np.pi / 2 * u.rad, np.pi / 2 * u.rad],
[0 * u.m / u.s, 0 * u.rad / u.s, 951.0 * u.rad / u.s],
0 * u.s,
4e24 * u.kg,
0.0,
0.002,
{"stepsize": 0.5e-6},
),
(
[1 * u.km, 0.15 * u.rad, np.pi / 2 * u.rad],
[
0.1 * _c * u.m / u.s,
0.5e-5 * _c * u.rad / u.s,
0.5e-4 * _c * u.rad / u.s,
],
0 * u.s,
5.972e24 * u.kg,
0.0,
0.0001,
{"stepsize": 0.5e-6},
),
(
[50 * u.km, np.pi / 2 * u.rad, np.pi / 2 * u.rad],
[0.1 * _c * u.m / u.s, 2e-7 * _c * u.rad / u.s, 1e-5 * u.rad / u.s],
0 * u.s,
5.972e24 * u.g,
0.0,
0.001,
{"stepsize": 5e-6},
),
],
)
def test_calculate_trajectory(
pos_vec, vel_vec, time, M, start_lambda, end_lambda, OdeMethodKwargs
):
cl = Schwarzschild.from_spherical(pos_vec, vel_vec, time, M)
ans = cl.calculate_trajectory(
start_lambda=start_lambda,
end_lambda=end_lambda,
OdeMethodKwargs=OdeMethodKwargs,
)
_c, _scr = constant.c.value, schwarzschild_radius(M).value
ans = ans[1]
testarray = (
(1 - (_scr / ans[:, 1])) * np.square(ans[:, 4])
- (np.square(ans[:, 5])) / ((1 - (_scr / ans[:, 1])) * (_c ** 2))
- np.square(ans[:, 1] / _c)
* (np.square(ans[:, 6]) + np.square(np.sin(ans[:, 2])) * np.square(ans[:, 7]))
)
comparearray = np.ones(shape=ans[:, 4].shape, dtype=float)
assert_allclose(testarray, comparearray, 1e-4)
def test_calculate_trajectory2():
# based on the revolution of earth around sun
# data from https://en.wikipedia.org/wiki/Earth%27s_orbit
M = 1.989e30 * u.kg
distance_at_perihelion = 147.10e6 * u.km
speed_at_perihelion = 30.29 * u.km / u.s
angular_vel = (speed_at_perihelion / distance_at_perihelion) * u.rad
pos_vec = [distance_at_perihelion, np.pi / 2 * u.rad, 0 * u.rad]
vel_vec = [0 * u.km / u.s, 0 * u.rad / u.s, angular_vel]
end_lambda = ((1 * u.year).to(u.s)).value
cl = Schwarzschild.from_spherical(pos_vec, vel_vec, 0 * u.s, M)
ans = cl.calculate_trajectory(
start_lambda=0.0,
end_lambda=end_lambda,
OdeMethodKwargs={"stepsize": end_lambda / 2e3},
)[1]
# velocity should be 29.29 km/s at apehelion(where r is max)
i = np.argmax(ans[:, 1]) # index whre radial distance is max
v_apehelion = (((ans[i][1] * ans[i][7]) * (u.m / u.s)).to(u.km / u.s)).value
assert_allclose(v_apehelion, 29.29, rtol=0.01)
def test_calculate_trajectory3():
# same test as with test_calculate_trajectory2(),
# but initialialized with cartesian coordinates
# and function returning cartesian coordinates
M = 1.989e30 * u.kg
distance_at_perihelion = 147.10e6 * u.km
speed_at_perihelion = 30.29 * u.km / u.s
pos_vec = [
distance_at_perihelion / np.sqrt(2),
distance_at_perihelion / np.sqrt(2),
0 * u.km,
]
vel_vec = [
-1 * speed_at_perihelion / np.sqrt(2),
speed_at_perihelion / np.sqrt(2),
0 * u.km / u.h,
]
end_lambda = ((1 * u.year).to(u.s)).value
cl = Schwarzschild.from_cartesian(pos_vec, vel_vec, 0 * u.min, M)
ans = cl.calculate_trajectory(
start_lambda=0.0,
end_lambda=end_lambda,
return_cartesian=True,
OdeMethodKwargs={"stepsize": end_lambda / 2e3},
)[1]
# velocity should be 29.29 km/s at apehelion(where r is max)
R = np.sqrt(ans[:, 1] ** 2 + ans[:, 2] ** 2 + ans[:, 3] ** 2)
i = np.argmax(R) # index whre radial distance is max
v_apehelion = (
(np.sqrt(ans[i, 5] ** 2 + ans[i, 6] ** 2 + ans[i, 7] ** 2) * (u.m / u.s)).to(
u.km / u.s
)
).value
assert_allclose(v_apehelion, 29.29, rtol=0.01)
@pytest.mark.parametrize(
"pos_vec, vel_vec, time, M, start_lambda, end_lambda, OdeMethodKwargs, return_cartesian",
[
(
[306 * u.m, np.pi / 2 * u.rad, np.pi / 2 * u.rad],
[0 * u.m / u.s, 0.1 * u.rad / u.s, 951.0 * u.rad / u.s],
0 * u.s,
4e24 * u.kg,
0.0,
0.0003,
{"stepsize": 0.3e-6},
True,
),
(
[1 * u.km, 0.15 * u.rad, np.pi / 2 * u.rad],
[_c * u.m / u.s, 0.5e-5 * _c * u.rad / u.s, 1e-4 * _c * u.rad / u.s],
0 * u.s,
5.972e24 * u.kg,
0.0,
0.0004,
{"stepsize": 0.5e-6},
False,
),
],
)
def test_calculate_trajectory_iterator(
pos_vec,
vel_vec,
time,
M,
start_lambda,
end_lambda,
OdeMethodKwargs,
return_cartesian,
):
cl1 = Schwarzschild.from_spherical(pos_vec, vel_vec, time, M)
arr1 = cl1.calculate_trajectory(
start_lambda=start_lambda,
end_lambda=end_lambda,
OdeMethodKwargs=OdeMethodKwargs,
return_cartesian=return_cartesian,
)[1]
cl2 = Schwarzschild.from_spherical(pos_vec, vel_vec, time, M)
it = cl2.calculate_trajectory_iterator(
start_lambda=start_lambda,
OdeMethodKwargs=OdeMethodKwargs,
return_cartesian=return_cartesian,
)
arr2_list = list()
for _, val in zip(range(100), it):
arr2_list.append(val[1])
arr2 = np.array(arr2_list)
assert_allclose(arr1[:100, :], arr2, rtol=1e-10)
def test_calculate_trajectory_iterator_RuntimeWarning():
pos_vec = [306 * u.m, np.pi / 2 * u.rad, np.pi / 2 * u.rad]
vel_vec = [0 * u.m / u.s, 0.01 * u.rad / u.s, 10 * u.rad / u.s]
time = 0 * u.s
M = 1e25 * u.kg
start_lambda = 0.0
OdeMethodKwargs = {"stepsize": 0.4e-6}
cl = Schwarzschild.from_spherical(pos_vec, vel_vec, time, M)
with warnings.catch_warnings(record=True) as w:
it = cl.calculate_trajectory_iterator(
start_lambda=start_lambda,
OdeMethodKwargs=OdeMethodKwargs,
stop_on_singularity=True,
)
for _, _ in zip(range(1000), it):
pass
assert len(w) >= 1
def test_calculate_trajectory_iterator_RuntimeWarning2():
pos_vec = [306 * u.m, np.pi / 2 * u.rad, np.pi / 3 * u.rad]
vel_vec = [0 * u.m / u.s, 0.01 * u.rad / u.s, 10 * u.rad / u.s]
time = 0 * u.s
M = 1e25 * u.kg
start_lambda = 0.0
OdeMethodKwargs = {"stepsize": 0.4e-6}
cl = Schwarzschild.from_spherical(pos_vec, vel_vec, time, M)
with warnings.catch_warnings(record=True) as w:
it = cl.calculate_trajectory_iterator(
start_lambda=start_lambda,
OdeMethodKwargs=OdeMethodKwargs,
stop_on_singularity=False,
)
for _, _ in zip(range(1000), it):
pass
assert len(w) >= 1
|
[
"[email protected]"
] | |
a29f11c438bd1ae1ff2edaed687eb601ffaa6d46
|
8fc2ab3d29a30e603e19b30bb9517928de529167
|
/CoinChange_5.py
|
9879dd2cfd48bef094087426f31b8a20a3a81491
|
[] |
no_license
|
rushilchugh/Practise
|
35a9861bec6786580dc0a440eb25d78e43cb7bc9
|
98fd593b95dad641bef1d519c6c6ed1daaae630f
|
refs/heads/master
| 2020-03-13T21:14:14.013604 | 2018-04-27T12:23:50 | 2018-04-27T12:23:50 | 131,291,684 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 400 |
py
|
import math
def coinChange(cList, S):
minVals = [math.inf for _ in range(S + 1)]
minVals[0] = 0
for i in range(1, S + 1):
for j in range(len(cList)):
currCoin = cList[j]
if cList[j] <= i and minVals[i - cList[j]] + 1 < minVals[i]:
minVals[i] = minVals[i - cList[j]] + 1
return minVals
print(coinChange([1, 5, 7], 8))
|
[
"[email protected]"
] | |
aa697365a29cb901be7dae97bff8fa8573350419
|
a2e638cd0c124254e67963bda62c21351881ee75
|
/Extensions/Settlement/FPythonCode/FSettlementCommitterFunctions.py
|
bbb921bda4c49e2e4f2cd0efe447daea3a2bc283
|
[] |
no_license
|
webclinic017/fa-absa-py3
|
1ffa98f2bd72d541166fdaac421d3c84147a4e01
|
5e7cc7de3495145501ca53deb9efee2233ab7e1c
|
refs/heads/main
| 2023-04-19T10:41:21.273030 | 2021-05-10T08:50:05 | 2021-05-10T08:50:05 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,110 |
py
|
""" Compiled: 2020-09-18 10:38:49 """
#__src_file__ = "extensions/settlement/etc/FSettlementCommitterFunctions.py"
import acm
#-------------------------------------------------------------------------
def CommitCommitters(committerList, logger):
commitSuccessful = True
acm.BeginTransaction()
try:
for committer in committerList:
settlement = committer.GetSettlement()
RunSTPAndUpdateStateChart(settlement)
committer.Commit()
acm.CommitTransaction()
except Exception as e:
acm.AbortTransaction()
commitSuccessful = False
logger.LP_Log("Exception occurred while committing settlements: {}".format(str(e)))
logger.LP_Flush()
return commitSuccessful
#-------------------------------------------------------------------------
def RunSTPAndUpdateStateChart(settlement):
if settlement.IsValidForSTP():
settlement.STP()
stateChart = acm.Operations.GetMappedSettlementProcessStateChart(settlement)
settlement.StateChart(stateChart)
if settlement.IsValidForSTP():
settlement.STP()
|
[
"[email protected]"
] | |
1db686d84707ba2314d219fbd59ee026669dd20e
|
14de6d507e471d582a7e7f5cba898f72f6ba186d
|
/python/Linked-List-Cycle/hashmap.py
|
2ac2a62d573e60b6ccf32acf0e8d80d75b77303c
|
[
"MIT"
] |
permissive
|
yutong-xie/Leetcode-Solution
|
a7d9c1f73a0fecd9de1d04dbd4c06393959dd95a
|
6578f288a757bf76213030b73ec3319a7baa2661
|
refs/heads/master
| 2023-03-30T01:59:58.554650 | 2021-03-27T21:09:46 | 2021-03-27T21:09:46 | 290,101,434 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 643 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Copyright 2020, Yutong Xie, UIUC.
Using hashmap to test whether a cycle exists in the linked list
'''
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def hasCycle(self, head):
"""
:type head: ListNode
:rtype: bool
"""
hashmap = {}
while head:
if head in hashmap:
return True
else:
hashmap[head] = 0
head = head.next
return False
|
[
"[email protected]"
] | |
0f41900cd8ccc0a02af26e8e6ac41f8c02048d26
|
7caa438706a423dd9779a81f8345fcf1ec11e921
|
/NXT-Python/pyglet-1.2.4/examples/opengl_3.py
|
0298cc248eb88ac3b54cfbdfafdf472714967189
|
[
"BSD-3-Clause"
] |
permissive
|
tamarinvs19/python-learning
|
5dd2582f5dc504e19a53e9176677adc5170778b0
|
1e514ad7ca8f3d2e2f785b11b0be4d57696dc1e9
|
refs/heads/master
| 2021-07-15T13:23:24.238594 | 2021-07-08T07:07:21 | 2021-07-08T07:07:21 | 120,604,826 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,616 |
py
|
#!/usr/bin/python
# $Id:$
'''In order to use the new features of OpenGL 3, you must explicitly create
an OpenGL 3 context. You can do this by supplying the `major_version` and
`minor_version` attributes for a GL Config.
This example creates an OpenGL 3 context, prints the version string to stdout,
and exits.
At time of writing, only the beta nvidia driver on Windows and Linux support
OpenGL 3, and requires an 8-series or higher.
On Windows, OpenGL 3 API must be explicitly enabled using the nvemulate tool
[1]. Additionally, at time of writing the latest driver did not yet support
forward compatible or debug contexts.
On Linux, the only driver that currently exposes the required GLX extensions
is 177.61.02 -- later drivers (177.67, 177.68, 177.7*, 177.8*, 180.06) seem to
be missing the extensions.
[1] http://developer.nvidia.com/object/nvemulate.html
'''
import pyglet
# Specify the OpenGL version explicitly to request 3.0 features, including
# GLSL 1.3.
#
# Some other attributes relevant to OpenGL 3:
# forward_compatible = True To request a context without deprecated
# functionality
# debug = True To request a debug context
config = pyglet.gl.Config(major_version=3, minor_version=0)
# Create a context matching the above configuration. Will fail if
# OpenGL 3 is not supported by the driver.
window = pyglet.window.Window(config=config, visible=False)
# Print the version of the context created.
print 'OpenGL version:', window.context.get_info().get_version()
window.close()
|
[
"[email protected]"
] | |
cbffe481b8c2609d89a052878599e76a03d759bc
|
95697a9f8fed6d45cb8ae9ae2525873c99cc7cfb
|
/Project File/02. Second Project - notice board/server/View/V1/API/service/ShowUser.py
|
33e58e2227326af8328954721c03870669d60b25
|
[] |
no_license
|
parkjinhong03/Python-Flask
|
d30c7447c70eb0cbda0454bfd2f2168347209adb
|
5732f17c594c1fc213940b214c0beafd4448bc14
|
refs/heads/master
| 2022-02-24T21:04:35.868873 | 2019-09-04T02:13:49 | 2019-09-04T02:13:49 | 198,320,040 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 474 |
py
|
from View.V1.Function import LoginCheck
import os
# 사용자 명단 라우터
def showuser():
if LoginCheck.logincheck() == False:
return 'Please Login First!'
count = 0
Userlist = ''
for root, dirs, files in os.walk('./Data/List'):
for dir in dirs:
count = count + 1
Userlist = Userlist + str(count) + '. '
Userlist = Userlist + dir + '\n'
return 'You can choose this user\'s list:\n' + Userlist
|
[
"[email protected]"
] | |
978d109a9d79229a04673c409ce440e1dc8754e1
|
6923f79f1eaaba0ab28b25337ba6cb56be97d32d
|
/Introduction_to_numerical_programming_using_Python_and_CPP_Beu/Ch09/Python/P09-LinFit.py
|
ad49008506665cc9671c465c648ff7ed9fc70fb7
|
[] |
no_license
|
burakbayramli/books
|
9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0
|
5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95
|
refs/heads/master
| 2023-08-17T05:31:08.885134 | 2023-08-14T10:05:37 | 2023-08-14T10:05:37 | 72,460,321 | 223 | 174 | null | 2022-10-24T12:15:06 | 2016-10-31T17:24:00 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 2,416 |
py
|
# Linear fit of a model to observed data points
from modfunc import *
from graphlib import *
# main
nn = [0]*4 # end-indexes of the 3 plots
col = [""]*4 # colors of plots
sty = [0]*4 # styles of plots
n = 5 # number of observed data
nfit = 2 # number of points plotted from model
n1 = n + nfit; n2 = n + 2*nfit # end indexes
x = [0]*(n2+1); y = [0]*(n2+1) # observed data
sigmy = [0]*(n+1) # standard deviations of observed data
x[1] = 1e0; y[1] = 0.8e0 # data points
x[2] = 2e0; y[2] = 2.1e0
x[3] = 3e0; y[3] = 2.8e0
x[4] = 4e0; y[4] = 4.0e0
x[5] = 5e0; y[5] = 4.4e0
iopt = 0 # least squares fit: equal errors sigmy
(a, b, sigma, sigmb, chi2) = LinFit(x,y,sigmy,n,iopt)
print("Least squares fit:")
print("a = {0:8.4f} +/- {1:8.4f}".format(a,sigma))
print("b = {0:8.4f} +/- {1:8.4f}".format(b,sigmb))
print("Chi^2 = {0:8.4f}".format(chi2))
h = (x[n]-x[1])/(nfit-1)
for i in range(1,nfit+1): # append model points
x[n+i] = x[1] + (i-1)*h
y[n+i] = a*x[n+i] + b # regression line
for i in range(1,n+1): sigmy[i] = 0.15*y[i] # generate standard deviations
iopt = 1 # Chi-square fit: different errors sigmy
(a, b, sigma, sigmb, chi2) = LinFit(x,y,sigmy,n,iopt)
print("\nChi-square fit:")
print("a = {0:8.4f} +/- {1:8.4f}".format(a,sigma))
print("b = {0:8.4f} +/- {1:8.4f}".format(b,sigmb))
print("Chi^2 = {0:8.4f}".format(chi2))
for i in range(1,nfit+1): # append model points
x[n1+i] = x[n+i]
y[n1+i] = a*x[n+i] + b # Chi-square regression line
GraphInit(800,600)
nn[1] = n ; col[1] = "black"; sty[1] = 4 # data points
nn[2] = n1; col[2] = "red" ; sty[2] = -1 # least squares fit
nn[3] = n2; col[3] = "blue" ; sty[3] = 1 # Chi-square fit
MultiPlot(x,y,sigmy,nn,col,sty,3,10,0.5e0,5.5e0,1,0e0,0e0,0,
0.15,0.95,0.15,0.85,"x","y","Linear fit")
MainLoop()
|
[
"[email protected]"
] | |
6176f6f471d91cdedce7764c2d165f4fab302ff0
|
9d89530e784922173aa1c032dcfaf772a26cf99e
|
/vulnerabilities/tests/test_suse.py
|
a4695d8265175e094b60103b31d56577c44d9269
|
[
"Python-2.0",
"Apache-2.0"
] |
permissive
|
nileshprasad137/vulnerablecode
|
7da3d9c15e436919bedb29d3bfeb574a233f3a5b
|
4677f70c654a15da529a80d19d7de1ca013ef8eb
|
refs/heads/main
| 2023-08-22T23:51:30.806190 | 2021-10-08T13:13:32 | 2021-10-08T13:13:32 | 418,471,773 | 1 | 0 |
Apache-2.0
| 2021-10-18T11:33:24 | 2021-10-18T11:33:24 | null |
UTF-8
|
Python
| false | false | 7,207 |
py
|
import os
import unittest
import xml.etree.ElementTree as ET
from vulnerabilities.oval_parser import OvalParser
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
TEST_DATA = os.path.join(BASE_DIR, "test_data/")
class TestSUSEOvalParser(unittest.TestCase):
@classmethod
def setUpClass(cls):
xml_doc = ET.parse(os.path.join(TEST_DATA, "suse_oval_data.xml"))
translator = {"less than": "<"}
# all the elements which require "equals" are ignored(because they are not useful)
cls.parsed_oval = OvalParser(translator, xml_doc)
def setUp(self):
self.definition_1 = self.parsed_oval.all_definitions[0]
self.definition_2 = self.parsed_oval.all_definitions[1]
def test_get_definitions(self):
assert len(self.parsed_oval.all_definitions) == 2
assert (
self.parsed_oval.all_definitions[0].getId() == "oval:org.opensuse.security:def:20094112"
)
assert (
self.parsed_oval.all_definitions[1].getId() == "oval:org.opensuse.security:def:20112767"
)
def test_get_tests_of_definition(self):
definition_1_test_ids = {
"oval:org.opensuse.security:tst:2009281999",
"oval:org.opensuse.security:tst:2009282000",
}
definition_2_test_ids = {
"oval:org.opensuse.security:tst:2009271113",
"oval:org.opensuse.security:tst:2009271114",
}
assert definition_1_test_ids == {
i.getId() for i in self.parsed_oval.get_tests_of_definition(self.definition_1)
}
assert definition_2_test_ids == {
i.getId() for i in self.parsed_oval.get_tests_of_definition(self.definition_2)
}
def test_get_vuln_id_from_definition(self):
vuln_id_1 = "CVE-2009-4112"
vuln_id_2 = "CVE-2011-2767"
assert vuln_id_1 == self.parsed_oval.get_vuln_id_from_definition(self.definition_1)
assert vuln_id_2 == self.parsed_oval.get_vuln_id_from_definition(self.definition_2)
def test_get_object_state_of_test(self):
# This method is inherited as it is from UbuntuOvalParser
# this test ensures that the method works with suse OVAL documents
assert len(self.parsed_oval.oval_document.getTests()) == 9
test_1 = self.parsed_oval.oval_document.getTests()[0]
test_2 = self.parsed_oval.oval_document.getTests()[1]
obj_t1, state_t1 = self.parsed_oval.get_object_state_of_test(test_1)
obj_t2, state_t2 = self.parsed_oval.get_object_state_of_test(test_2)
assert state_t1.getId() == "oval:org.opensuse.security:ste:2009068342"
assert state_t2.getId() == "oval:org.opensuse.security:ste:2009072069"
assert obj_t2.getId() == "oval:org.opensuse.security:obj:2009031297"
assert obj_t1.getId() == "oval:org.opensuse.security:obj:2009031246"
def test_get_pkgs_from_obj(self):
assert len(self.parsed_oval.oval_document.getObjects()) == 5
obj_t1 = self.parsed_oval.oval_document.getObjects()[0]
obj_t2 = self.parsed_oval.oval_document.getObjects()[1]
pkg_set1 = set(self.parsed_oval.get_pkgs_from_obj(obj_t1))
pkg_set2 = set(self.parsed_oval.get_pkgs_from_obj(obj_t2))
assert pkg_set1 == {"openSUSE-release"}
# In a full run we wont get pkg_set1 because we won't obtain
# it's object due to filters to avoid such tests in the first place
assert pkg_set2 == {"cacti"}
def test_get_version_range_from_state(self):
assert len(self.parsed_oval.oval_document.getStates()) == 4
state_1 = self.parsed_oval.oval_document.getStates()[0]
state_2 = self.parsed_oval.oval_document.getStates()[1]
exp_range_1 = None
exp_range_2 = "<1.2.11-lp151.3.6"
# In a full run we wont get exp_range1 because we won't obtain
# it's state due to filters to avoid such tests in the first place
assert self.parsed_oval.get_version_range_from_state(state_1) == exp_range_1
assert self.parsed_oval.get_version_range_from_state(state_2) == exp_range_2
def test_get_urls_from_definition(self):
def1_urls = {
"http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2009-4112",
"https://www.suse.com/security/cve/CVE-2009-4112.html",
"https://bugzilla.suse.com/1122535",
"https://bugzilla.suse.com/558664",
}
assert def1_urls == self.parsed_oval.get_urls_from_definition(self.definition_1)
def2_urls = {
"http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2011-2767",
"https://bugzilla.suse.com/1156944",
"https://www.suse.com/security/cve/CVE-2011-2767.html",
}
assert def2_urls == self.parsed_oval.get_urls_from_definition(self.definition_2)
def test_get_data(self):
expected_data = [
{
"test_data": [
{
"package_list": ["cacti"],
"version_ranges": "<1.2.11-lp151.3.6",
},
{
"package_list": ["cacti-spine"],
"version_ranges": "<1.2.11-lp151.3.6",
},
],
"description": '\n Cacti 0.8.7e and earlier allows remote authenticated administrators to gain privileges by modifying the "Data Input Method" for the "Linux - Get Memory Usage" setting to contain arbitrary commands.\n ',
"vuln_id": "CVE-2009-4112",
"reference_urls": {
"https://bugzilla.suse.com/1122535",
"https://bugzilla.suse.com/558664",
"http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2009-4112",
"https://www.suse.com/security/cve/CVE-2009-4112.html",
},
},
{
"test_data": [
{
"package_list": ["apache2-mod_perl"],
"version_ranges": "<2.0.11-lp151.3.3",
},
{
"package_list": ["apache2-mod_perl-devel"],
"version_ranges": "<2.0.11-lp151.3.3",
},
],
"description": "\n mod_perl 2.0 through 2.0.10 allows attackers to execute arbitrary Perl code by placing it in a user-owned .htaccess file, because (contrary to the documentation) there is no configuration option that permits Perl code for the administrator's control of HTTP request processing without also permitting unprivileged users to run Perl code in the context of the user account that runs Apache HTTP Server processes.\n ",
"vuln_id": "CVE-2011-2767",
"reference_urls": {
"https://bugzilla.suse.com/1156944",
"https://www.suse.com/security/cve/CVE-2011-2767.html",
"http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2011-2767",
},
},
]
assert expected_data == self.parsed_oval.get_data()
|
[
"[email protected]"
] | |
1be8a4ba8920c53045cf4597fe0649599d965eeb
|
cd9f819b968def4f9b57448bdd926dc5ffa06671
|
/B_Python程式設計大數據資料分析_蔡明志_碁峰_2018/ch12/shape.py
|
6abc3387132728cee32128b7b8749b663d682949
|
[] |
no_license
|
AaronCHH/jb_pyoop
|
06c67f3c17e722cf18147be4ae0fac81726e4cbc
|
356baf0963cf216db5db7e11fb67234ff9b31b68
|
refs/heads/main
| 2023-04-02T05:55:27.477763 | 2021-04-07T01:48:04 | 2021-04-07T01:48:13 | 344,676,005 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 421 |
py
|
import math
class Shape:
def __init__(self, xPoint = 0, yPoint = 0):
self.__xPoint = xPoint
self.__yPoint = yPoint
def getPoint(self):
return self.__xPoint, self.__yPoint
def setPoint(self, xPoint, yPoint):
self.__xPoint = xPoint
self.__yPoint = yPoint
def __str__(self):
print('xPoint = %d, yPoint = %d'%(self.__xPoint, self.__yPoint))
|
[
"[email protected]"
] | |
a84a792cbc0b3a11189f3108b5bb171958a14f1f
|
0ced6dc4f7c30cd58475bc5a13a7a8ad00081bab
|
/AndroidCase/test_023_mydingdan.py
|
a69a7eb2a0f03ec5b805b1325343d92ee87a1967
|
[] |
no_license
|
duanbibo/app-autotes-python
|
1ef7bc635a7dcd9e82a61441ac7a567bba1c1e25
|
951fcb2a138a75e7722a7714cde62990f33b0f3e
|
refs/heads/master
| 2022-04-25T03:39:20.675426 | 2020-04-27T15:08:47 | 2020-04-27T15:08:47 | 259,368,112 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,798 |
py
|
import unittest
from time import sleep
from util.driver import Driver
from util.isElement import iselement
class Login(unittest.TestCase):
# 我的订单
def setUp(self):
self.driver = Driver.DRIVER
def test_mydingdan(self):
driver = self.driver
sleep(2)
driver.find_element_by_xpath(
"//*[@resource-id='cn.xinzhili.core:id/tl_home_tabs']/android.widget.LinearLayout[4]").click()
sleep(2)
driver.find_element_by_xpath("//*[@text='我的订单']").click()
sleep(3)
self.assertIn("订单编号", driver.page_source)
driver.find_element_by_xpath("//*[@text='未完成']").click()
sleep(2)
if "进行中" in driver.page_source:
self.assertIn("进行中", driver.page_source)
elif "待支付" in driver.page_source:
self.assertIn("待支付", driver.page_source)
else:
self.assertEqual(1,2)
sleep(3)
driver.find_element_by_xpath("//*[@text='已完成']").click()
sleep(3)
self.assertIn("已取消", driver.page_source)
sleep(2)
driver.find_element_by_xpath("//*[@text='退款']").click()
sleep(2)
self.assertIn("退款成功", driver.page_source)
def tearDown(self):
self.driver = Driver.DRIVER
exit = iselement()
back = exit.findelementid("cn.xinzhili.core:id/iv_title_back")
back2 = exit.findelementid("cn.xinzhili.core:id/iv_title_left")
if back is True:
self.driver.find_element_by_id("cn.xinzhili.core:id/iv_title_back").click()
else:
sleep(2)
if back2 is True:
self.driver.find_element_by_id("cn.xinzhili.core:id/iv_title_left").click()
else:
sleep(2)
|
[
"[email protected]"
] | |
4b32656a28a2c68bceb7c7b718403512b88994f1
|
6bfda75657070e177fa620a43c917096cbd3c550
|
/kubernetes/test/test_autoscaling_v2alpha1_api.py
|
6f7571908e7305b29558bf7d6d8a9a4487042303
|
[
"Apache-2.0"
] |
permissive
|
don41382/client-python
|
8e7e747a62f9f4fc0402eea1a877eab1bb80ab36
|
e69d4fe204b98f7d7ee3ada3996b4f5fbceae5fe
|
refs/heads/master
| 2021-01-19T23:15:50.172933 | 2017-04-18T18:00:48 | 2017-04-18T18:00:48 | 88,943,866 | 0 | 0 | null | 2017-04-21T05:19:52 | 2017-04-21T05:19:52 | null |
UTF-8
|
Python
| false | false | 2,946 |
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.apis.autoscaling_v2alpha1_api import AutoscalingV2alpha1Api
class TestAutoscalingV2alpha1Api(unittest.TestCase):
""" AutoscalingV2alpha1Api unit test stubs """
def setUp(self):
self.api = kubernetes.client.apis.autoscaling_v2alpha1_api.AutoscalingV2alpha1Api()
def tearDown(self):
pass
def test_create_namespaced_horizontal_pod_autoscaler(self):
"""
Test case for create_namespaced_horizontal_pod_autoscaler
"""
pass
def test_delete_collection_namespaced_horizontal_pod_autoscaler(self):
"""
Test case for delete_collection_namespaced_horizontal_pod_autoscaler
"""
pass
def test_delete_namespaced_horizontal_pod_autoscaler(self):
"""
Test case for delete_namespaced_horizontal_pod_autoscaler
"""
pass
def test_get_api_resources(self):
"""
Test case for get_api_resources
"""
pass
def test_list_horizontal_pod_autoscaler_for_all_namespaces(self):
"""
Test case for list_horizontal_pod_autoscaler_for_all_namespaces
"""
pass
def test_list_namespaced_horizontal_pod_autoscaler(self):
"""
Test case for list_namespaced_horizontal_pod_autoscaler
"""
pass
def test_patch_namespaced_horizontal_pod_autoscaler(self):
"""
Test case for patch_namespaced_horizontal_pod_autoscaler
"""
pass
def test_patch_namespaced_horizontal_pod_autoscaler_status(self):
"""
Test case for patch_namespaced_horizontal_pod_autoscaler_status
"""
pass
def test_read_namespaced_horizontal_pod_autoscaler(self):
"""
Test case for read_namespaced_horizontal_pod_autoscaler
"""
pass
def test_read_namespaced_horizontal_pod_autoscaler_status(self):
"""
Test case for read_namespaced_horizontal_pod_autoscaler_status
"""
pass
def test_replace_namespaced_horizontal_pod_autoscaler(self):
"""
Test case for replace_namespaced_horizontal_pod_autoscaler
"""
pass
def test_replace_namespaced_horizontal_pod_autoscaler_status(self):
"""
Test case for replace_namespaced_horizontal_pod_autoscaler_status
"""
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
e1a3999957bf6d49d0e8a8693b74c61ff9a7ae7d
|
4de2bfe570af0ae03db661223dc36524642a4016
|
/libermatic_customization/libermatic_customization/doctype/libermatic_settings/libermatic_settings.py
|
ea23f8e293f4e03572f53c1e63b16d67b32d51f8
|
[
"MIT"
] |
permissive
|
libermatic/libermatic_customization
|
6b645c6424987cee39653499bfa7b2683da65f42
|
38ac7abd691ee289ee85cb3926cafb3989af24e1
|
refs/heads/master
| 2020-03-31T18:29:28.152141 | 2019-08-27T11:16:14 | 2019-08-27T11:16:14 | 152,460,666 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 255 |
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Libermatic and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
from frappe.model.document import Document
class LibermaticSettings(Document):
pass
|
[
"[email protected]"
] | |
b331e8d4738cf4364bf4bb4dba8cab7ab907871e
|
057d2d1e2a78fc89851154e87b0b229e1e1f003b
|
/venv/Lib/site-packages/oslo_utils/tests/test_reflection.py
|
34384f7920a16de56a31f1e5bb0ed015841a979b
|
[
"Apache-2.0"
] |
permissive
|
prasoon-uta/IBM-Cloud-Secure-File-Storage
|
276dcbd143bd50b71121a73bc01c8e04fe3f76b0
|
82a6876316715efbd0b492d0d467dde0ab26a56b
|
refs/heads/master
| 2022-12-13T00:03:31.363281 | 2018-02-22T02:24:11 | 2018-02-22T02:24:11 | 122,420,622 | 0 | 2 |
Apache-2.0
| 2022-12-08T05:15:19 | 2018-02-22T02:26:48 |
Python
|
UTF-8
|
Python
| false | false | 11,178 |
py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslotest import base as test_base
import six
import testtools
from oslo_utils import reflection
if six.PY3:
RUNTIME_ERROR_CLASSES = ['RuntimeError', 'Exception',
'BaseException', 'object']
else:
RUNTIME_ERROR_CLASSES = ['RuntimeError', 'StandardError', 'Exception',
'BaseException', 'object']
def dummy_decorator(f):
@six.wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs)
return wrapper
def mere_function(a, b):
pass
def function_with_defs(a, b, optional=None):
pass
def function_with_kwargs(a, b, **kwargs):
pass
class TestObject(object):
def _hello(self):
pass
def hi(self):
pass
class Class(object):
def method(self, c, d):
pass
@staticmethod
def static_method(e, f):
pass
@classmethod
def class_method(cls, g, h):
pass
class BadClass(object):
def do_something(self):
pass
def __nonzero__(self):
return False
class CallableClass(object):
def __call__(self, i, j):
pass
class ClassWithInit(object):
def __init__(self, k, l):
pass
class MemberGetTest(test_base.BaseTestCase):
def test_get_members_exclude_hidden(self):
obj = TestObject()
members = list(reflection.get_members(obj, exclude_hidden=True))
self.assertEqual(1, len(members))
def test_get_members_no_exclude_hidden(self):
obj = TestObject()
members = list(reflection.get_members(obj, exclude_hidden=False))
self.assertGreater(len(members), 1)
def test_get_members_names_exclude_hidden(self):
obj = TestObject()
members = list(reflection.get_member_names(obj, exclude_hidden=True))
self.assertEqual(["hi"], members)
def test_get_members_names_no_exclude_hidden(self):
obj = TestObject()
members = list(reflection.get_member_names(obj, exclude_hidden=False))
members = [member for member in members if not member.startswith("__")]
self.assertEqual(["_hello", "hi"], sorted(members))
class CallbackEqualityTest(test_base.BaseTestCase):
def test_different_simple_callbacks(self):
def a():
pass
def b():
pass
self.assertFalse(reflection.is_same_callback(a, b))
def test_static_instance_callbacks(self):
class A(object):
@staticmethod
def b(a, b, c):
pass
a = A()
b = A()
self.assertTrue(reflection.is_same_callback(a.b, b.b))
def test_different_instance_callbacks(self):
class A(object):
def b(self):
pass
def __eq__(self, other):
return True
def __ne__(self, other):
return not self.__eq__(other)
b = A()
c = A()
self.assertFalse(reflection.is_same_callback(b.b, c.b))
self.assertTrue(reflection.is_same_callback(b.b, c.b, strict=False))
class BoundMethodTest(test_base.BaseTestCase):
def test_baddy(self):
b = BadClass()
self.assertTrue(reflection.is_bound_method(b.do_something))
def test_static_method(self):
self.assertFalse(reflection.is_bound_method(Class.static_method))
class GetCallableNameTest(test_base.BaseTestCase):
def test_mere_function(self):
name = reflection.get_callable_name(mere_function)
self.assertEqual('.'.join((__name__, 'mere_function')), name)
def test_method(self):
name = reflection.get_callable_name(Class.method)
self.assertEqual('.'.join((__name__, 'Class', 'method')), name)
def test_instance_method(self):
name = reflection.get_callable_name(Class().method)
self.assertEqual('.'.join((__name__, 'Class', 'method')), name)
def test_static_method(self):
name = reflection.get_callable_name(Class.static_method)
if six.PY3:
self.assertEqual('.'.join((__name__, 'Class', 'static_method')),
name)
else:
# NOTE(imelnikov): static method are just functions, class name
# is not recorded anywhere in them.
self.assertEqual('.'.join((__name__, 'static_method')), name)
def test_class_method(self):
name = reflection.get_callable_name(Class.class_method)
self.assertEqual('.'.join((__name__, 'Class', 'class_method')), name)
def test_constructor(self):
name = reflection.get_callable_name(Class)
self.assertEqual('.'.join((__name__, 'Class')), name)
def test_callable_class(self):
name = reflection.get_callable_name(CallableClass())
self.assertEqual('.'.join((__name__, 'CallableClass')), name)
def test_callable_class_call(self):
name = reflection.get_callable_name(CallableClass().__call__)
self.assertEqual('.'.join((__name__, 'CallableClass',
'__call__')), name)
# These extended/special case tests only work on python 3, due to python 2
# being broken/incorrect with regard to these special cases...
@testtools.skipIf(not six.PY3, 'python 3.x is not currently available')
class GetCallableNameTestExtended(test_base.BaseTestCase):
# Tests items in http://legacy.python.org/dev/peps/pep-3155/
class InnerCallableClass(object):
def __call__(self):
pass
def test_inner_callable_class(self):
obj = self.InnerCallableClass()
name = reflection.get_callable_name(obj.__call__)
expected_name = '.'.join((__name__, 'GetCallableNameTestExtended',
'InnerCallableClass', '__call__'))
self.assertEqual(expected_name, name)
def test_inner_callable_function(self):
def a():
def b():
pass
return b
name = reflection.get_callable_name(a())
expected_name = '.'.join((__name__, 'GetCallableNameTestExtended',
'test_inner_callable_function', '<locals>',
'a', '<locals>', 'b'))
self.assertEqual(expected_name, name)
def test_inner_class(self):
obj = self.InnerCallableClass()
name = reflection.get_callable_name(obj)
expected_name = '.'.join((__name__,
'GetCallableNameTestExtended',
'InnerCallableClass'))
self.assertEqual(expected_name, name)
class GetCallableArgsTest(test_base.BaseTestCase):
def test_mere_function(self):
result = reflection.get_callable_args(mere_function)
self.assertEqual(['a', 'b'], result)
def test_function_with_defaults(self):
result = reflection.get_callable_args(function_with_defs)
self.assertEqual(['a', 'b', 'optional'], result)
def test_required_only(self):
result = reflection.get_callable_args(function_with_defs,
required_only=True)
self.assertEqual(['a', 'b'], result)
def test_method(self):
result = reflection.get_callable_args(Class.method)
self.assertEqual(['self', 'c', 'd'], result)
def test_instance_method(self):
result = reflection.get_callable_args(Class().method)
self.assertEqual(['c', 'd'], result)
def test_class_method(self):
result = reflection.get_callable_args(Class.class_method)
self.assertEqual(['g', 'h'], result)
def test_class_constructor(self):
result = reflection.get_callable_args(ClassWithInit)
self.assertEqual(['k', 'l'], result)
def test_class_with_call(self):
result = reflection.get_callable_args(CallableClass())
self.assertEqual(['i', 'j'], result)
def test_decorators_work(self):
@dummy_decorator
def special_fun(x, y):
pass
result = reflection.get_callable_args(special_fun)
self.assertEqual(['x', 'y'], result)
class AcceptsKwargsTest(test_base.BaseTestCase):
def test_no_kwargs(self):
self.assertEqual(False, reflection.accepts_kwargs(mere_function))
def test_with_kwargs(self):
self.assertEqual(True, reflection.accepts_kwargs(function_with_kwargs))
class GetClassNameTest(test_base.BaseTestCase):
def test_std_exception(self):
name = reflection.get_class_name(RuntimeError)
self.assertEqual('RuntimeError', name)
def test_class(self):
name = reflection.get_class_name(Class)
self.assertEqual('.'.join((__name__, 'Class')), name)
def test_qualified_class(self):
class QualifiedClass(object):
pass
name = reflection.get_class_name(QualifiedClass)
self.assertEqual('.'.join((__name__, 'QualifiedClass')), name)
def test_instance(self):
name = reflection.get_class_name(Class())
self.assertEqual('.'.join((__name__, 'Class')), name)
def test_int(self):
name = reflection.get_class_name(42)
self.assertEqual('int', name)
def test_class_method(self):
name = reflection.get_class_name(Class.class_method)
self.assertEqual('%s.Class' % __name__, name)
# test with fully_qualified=False
name = reflection.get_class_name(Class.class_method,
fully_qualified=False)
self.assertEqual('Class', name)
def test_static_method(self):
self.assertRaises(TypeError, reflection.get_class_name,
Class.static_method)
def test_unbound_method(self):
self.assertRaises(TypeError, reflection.get_class_name,
mere_function)
def test_bound_method(self):
c = Class()
name = reflection.get_class_name(c.method)
self.assertEqual('%s.Class' % __name__, name)
# test with fully_qualified=False
name = reflection.get_class_name(c.method, fully_qualified=False)
self.assertEqual('Class', name)
class GetAllClassNamesTest(test_base.BaseTestCase):
def test_std_class(self):
names = list(reflection.get_all_class_names(RuntimeError))
self.assertEqual(RUNTIME_ERROR_CLASSES, names)
def test_std_class_up_to(self):
names = list(reflection.get_all_class_names(RuntimeError,
up_to=Exception))
self.assertEqual(RUNTIME_ERROR_CLASSES[:-2], names)
|
[
"[email protected]"
] | |
129e7bf0bd902cefe5098246834c9b2b435b12ac
|
67117fb75f765d3426b8d0b06567b9a0d446e25b
|
/src/gtk/toga_gtk/libs/gtk.py
|
95678bd2d52085e07397acbccbe29539cb31503f
|
[
"BSD-3-Clause"
] |
permissive
|
maks232/toga
|
2e61723aea004c97f97f6ac4e8f2e0e6de193b8f
|
47b8961ded119bc147961c0a7054d354e3f3222f
|
refs/heads/master
| 2022-12-10T14:09:25.963436 | 2020-09-03T11:53:53 | 2020-09-03T11:53:53 | 292,660,054 | 0 | 0 |
NOASSERTION
| 2020-09-03T19:20:57 | 2020-09-03T19:20:56 | null |
UTF-8
|
Python
| false | false | 771 |
py
|
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, Gdk, GdkPixbuf, Gio, GLib # noqa: F401, E402
# The following import will fail if WebKit or its API wrappers aren't
# installed; handle failure gracefully
# (see https://github.com/beeware/toga/issues/26)
# Accept any API version greater than 3.0
WebKit2 = None
for version in ['4.0', '3.0']:
try:
gi.require_version('WebKit2', version)
from gi.repository import WebKit2 # noqa: F401, E402
break
except (ImportError, ValueError):
pass
try:
gi.require_version("Pango", "1.0")
from gi.repository import Pango # noqa: F401, E402
except ImportError:
Pango = None
try:
import cairo # noqa: F401, E402
except ImportError:
cairo = None
|
[
"[email protected]"
] | |
fc738564a77ede09246ec3055abf893fcb7cccc8
|
e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f
|
/indices/nnaft.py
|
c99a4f51bfc46638106f2eb133a0128b90268820
|
[] |
no_license
|
psdh/WhatsintheVector
|
e8aabacc054a88b4cb25303548980af9a10c12a8
|
a24168d068d9c69dc7a0fd13f606c080ae82e2a6
|
refs/heads/master
| 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 |
Python
|
UTF-8
|
Python
| false | false | 584 |
py
|
ii = [('MarrFDI.py', 6), ('FerrSDO3.py', 1), ('RennJIT.py', 1), ('ProuWCM.py', 1), ('LeakWTI2.py', 1), ('LeakWTI3.py', 2), ('AdamWEP.py', 3), ('FitzRNS3.py', 1), ('ClarGE2.py', 1), ('CarlTFR.py', 2), ('SeniNSP.py', 1), ('LyelCPG.py', 1), ('GilmCRS.py', 2), ('FerrSDO2.py', 1), ('KirbWPW2.py', 2), ('SoutRD2.py', 1), ('BackGNE.py', 19), ('LeakWTI.py', 1), ('MedwTAI2.py', 1), ('BachARE.py', 1), ('WheeJPT.py', 1), ('FitzRNS.py', 1), ('FerrSDO.py', 1), ('RoscTTI.py', 1), ('LewiMJW.py', 1), ('BrewDTO.py', 1), ('ClarGE3.py', 1), ('FitzRNS2.py', 4), ('MartHSI.py', 1), ('DwigTHH.py', 1)]
|
[
"[email protected]"
] | |
539d0113207a5c499e7c828ef9766dbe5e11d854
|
48832d27da16256ee62c364add45f21b968ee669
|
/res/scripts/client/gui/shared/utils/methodsrules.py
|
660a7b0a547bb320ff1113ef3462da88efbd7c12
|
[] |
no_license
|
webiumsk/WOT-0.9.15.1
|
0752d5bbd7c6fafdd7f714af939ae7bcf654faf7
|
17ca3550fef25e430534d079876a14fbbcccb9b4
|
refs/heads/master
| 2021-01-20T18:24:10.349144 | 2016-08-04T18:08:34 | 2016-08-04T18:08:34 | 64,955,694 | 0 | 0 | null | null | null | null |
WINDOWS-1250
|
Python
| false | false | 3,585 |
py
|
# 2016.08.04 19:53:25 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/shared/utils/MethodsRules.py
from collections import defaultdict
from types import MethodType
from debug_utils import LOG_DEBUG
class MethodsRules(object):
__slots__ = ('__listenersToSkip', '__notificationToDelay', '__delayersProcessed')
class skipable(object):
def __init__(self, func):
self.__listerner = func
def __call__(self, *args, **kwargs):
instance = args[0]
if not isinstance(instance, MethodsRules):
raise AssertionError('Wrong inheritance.')
instance.skip(self.__listerner) and LOG_DEBUG('Notification skipped: ', instance, self.__listerner)
return
self.__listerner(*args, **kwargs)
def __get__(self, obj, objtype = None):
return MethodType(self, obj, objtype)
class delayable(object):
def __init__(self, delayerName = None):
self.__delayerName = delayerName
def __call__(self, listener):
def wrapper(*args, **kwargs):
instance = args[0]
if not isinstance(instance, MethodsRules):
raise AssertionError('Wrong inheritance.')
instance.delay(self.__delayerName, listener, *args, **kwargs) and LOG_DEBUG('Notification delayed: ', listener, *args, **kwargs)
return
result = listener(*args, **kwargs)
instance.processDelayer(listener.__name__)
return result
return wrapper
def __get__(self, obj, objtype = None):
return MethodType(self, obj, objtype)
def __init__(self):
super(MethodsRules, self).__init__()
self.__listenersToSkip = []
self.__notificationToDelay = defaultdict(list)
self.__delayersProcessed = set()
def clear(self):
self.__listenersToSkip = []
self.__notificationToDelay.clear()
self.__delayersProcessed.clear()
def skipListenerNotification(self, wrapper):
self.__listenersToSkip.append(wrapper.listener)
def isSkipable(self, listener):
return listener in self.__listenersToSkip
def isDelayerProcessed(self, delayerName):
return delayerName in self.__delayersProcessed
def skip(self, listener):
if self.isSkipable(listener):
self.__listenersToSkip.remove(listener)
return True
return False
def delay(self, delayerName, notification, *args, **kwargs):
if delayerName is not None and not self.isDelayerProcessed(delayerName):
self.__notificationToDelay[delayerName].append((notification, args, kwargs))
return True
else:
return False
def processDelayer(self, delayerName):
LOG_DEBUG('Delayer processed: ', delayerName)
self.__delayersProcessed.add(delayerName)
pending = self.__notificationToDelay.pop(delayerName, ())
delayers = set()
for notification, args, kwargs in pending:
LOG_DEBUG('Notification processed: ', notification, args, kwargs)
notification(*args, **kwargs)
delayers.add(notification.__name__)
for delayerName in delayers:
self.processDelayer(delayerName)
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\shared\utils\methodsrules.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.08.04 19:53:26 Střední Evropa (letní čas)
|
[
"[email protected]"
] | |
4a651167e8552a4ce397906eb3f85051e3281757
|
1065ec75d9ee668ffd7aafc6a8de912d7c2cee6f
|
/addons/script.icechannel.extn.extra.uk/plugins/livetv_uk/sky_news_ltvi.py
|
0533ee893da846b0f640a240561d8b1b06333b69
|
[] |
no_license
|
bopopescu/kodiprofile
|
64c067ee766e8a40e5c148b8e8ea367b4879ffc7
|
7e78640a569a7f212a771aab6a4a4d9cb0eecfbe
|
refs/heads/master
| 2021-06-11T17:16:15.498281 | 2016-04-03T06:37:30 | 2016-04-03T06:37:30 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,004 |
py
|
'''
Ice Channel
'''
from entertainment.plugnplay.interfaces import LiveTVIndexer
from entertainment.plugnplay import Plugin
from entertainment import common
class sky_news(LiveTVIndexer):
implements = [LiveTVIndexer]
display_name = "Sky News"
name = "sky_news"
other_names = "sky_news,Sky News"
import xbmcaddon
import os
addon_id = 'script.icechannel.extn.extra.uk'
addon = xbmcaddon.Addon(addon_id)
img = os.path.join( addon.getAddonInfo('path'), 'resources', 'images', name + '.png' )
regions = [
{
'name':'United Kingdom',
'img':addon.getAddonInfo('icon'),
'fanart':addon.getAddonInfo('fanart')
},
]
languages = [
{'name':'English', 'img':'', 'fanart':''},
]
genres = [
{'name':'News', 'img':'', 'fanart':''}
]
addon = None
|
[
"[email protected]"
] | |
b4ee0e774735f7e22ec95c1c42dc59f30949f18c
|
e2b5f9c5ccc51be2f3c0b55f580b882f2adb4875
|
/docs/conf.py
|
37ab66664e95edd05101d56e93214e099d7deeac
|
[
"MIT"
] |
permissive
|
bfontaine/firapria
|
5e0930db20689dd5d5a5bfa4511f6781b1521c21
|
a2eeeab6f6d1db50337950cfbd6f835272306ff0
|
refs/heads/master
| 2021-01-25T06:36:39.101391 | 2014-09-30T22:09:38 | 2014-09-30T22:09:38 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,173 |
py
|
# -*- coding: utf-8 -*-
#
# Firapria documentation build configuration file, created by
# sphinx-quickstart on Fri Mar 28 23:02:11 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Firapria'
copyright = u'2014, Baptiste Fontaine'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Firapriadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Firapria.tex', u'Firapria Documentation',
u'Baptiste Fontaine', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'firapria', u'Firapria Documentation',
[u'Baptiste Fontaine'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Firapria', u'Firapria Documentation',
u'Baptiste Fontaine', 'Firapria', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
[
"[email protected]"
] | |
07c23570e03becd7f5bb4afa422a232f230fa5d1
|
f60eb7d15ce3ca06e2db1dc0af8b3b87bed08c37
|
/home/migrations/0012_auto_20170609_2252.py
|
848d2b5ca42484916880c1fe1c4dcfbfde12ea15
|
[] |
no_license
|
wlminimal/epc
|
96136f0c5f2b4ddc04fbc7e7b76d6a41c631ea26
|
2127a4e273a69a3ca0d5711fd1452c1bc5ab7590
|
refs/heads/master
| 2022-12-12T11:33:57.711869 | 2019-04-12T16:33:58 | 2019-04-12T16:33:58 | 92,700,181 | 0 | 0 | null | 2022-12-07T23:58:05 | 2017-05-29T02:20:33 |
Python
|
UTF-8
|
Python
| false | false | 879 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-06-09 22:52
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('home', '0011_delete_sermonday'),
]
operations = [
migrations.CreateModel(
name='SermonDay',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sermon_day', models.CharField(default="Lord's Day", max_length=50)),
],
),
migrations.AddField(
model_name='sermonvideo',
name='sermon_day',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='home.SermonDay'),
),
]
|
[
"[email protected]"
] | |
2095e3bfdb5024a6fd017a4c63e5e06b6416e81f
|
19fb0eb26f5a6d2180a323cf242ce00f5e4e1c6d
|
/contrib/seeds/makeseeds.py
|
39db93d0b44c8c2565919b9c1b35031aba660f40
|
[
"MIT"
] |
permissive
|
j00v/NestEGG
|
bd4c9555f6473cc655e203531c6ab4d0dc795b61
|
8c507974a5d49f5ffa7000fa8b864a528dcb9c3e
|
refs/heads/master
| 2022-12-03T09:16:14.732378 | 2020-08-12T15:25:31 | 2020-08-12T15:25:31 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,517 |
py
|
#!/usr/bin/env python3
# Copyright (c) 2013-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Generate seeds.txt from Pieter's DNS seeder
#
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 615801
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
SUSPICIOUS_HOSTS = {
""
}
import re
import sys
import dns.resolver
import collections
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(r"^(/NESTEGGCore:2.2.(0|1|99)/)$")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
if len(sline) > 11:
agent = sline[11][1:] + sline[12][:-1]
else:
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in list(hist.items()) if len(value)==1]
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
# Sift out ips by type
ips_ipv4 = [ip for ip in ips if ip['net'] == 'ipv4']
ips_ipv6 = [ip for ip in ips if ip['net'] == 'ipv6']
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv4 by ASN
result = []
asn_count = {}
for ip in ips_ipv4:
if len(result) == max_total:
break
try:
asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
# TODO: filter IPv6 by ASN
# Add back non-IPv4
result.extend(ips_ipv6)
result.extend(ips_onion)
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid address.
ips = [ip for ip in ips if ip is not None]
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
ips = [ip for ip in ips if ip['uptime'] > 50]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(re.sub(' ', '-', ip['agent']))]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple bitcoin ports, these are likely abusive
ips = filtermultiport(ips)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print('[%s]:%i' % (ip['ip'], ip['port']))
else:
print('%s:%i' % (ip['ip'], ip['port']))
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
4850a93389426ba1af6d9b80f1441d2e15b33c02
|
a88a07f83024f6781bf0f297a7585686a97030e1
|
/src/learning/theano/concatennate.py
|
35bef22d31217cc2097e13b618070d02eb1adef4
|
[] |
no_license
|
muqiann/NamedEntityRecognition
|
0b89c79a46dc6be6b61a5fe020d003724d04a971
|
3c3a979950f3f172a61fd7c9ff5d3563877810a9
|
refs/heads/master
| 2020-04-27T17:53:21.485758 | 2018-06-26T01:26:08 | 2018-06-26T01:26:09 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 574 |
py
|
# -*- coding: utf-8 -*-
import theano
import numpy as np
import theano.tensor as T
ones = theano.shared(np.float32([[1, 2, 3], [4, 5, 6], [7, 8, 9]]))
twos = theano.shared(np.float32([[10, 11, 12], [13, 14, 15]]))
print(ones.get_value())
result = T.concatenate([ones, ones], axis=0) # 在列上连接
print(result.eval())
result = T.concatenate([ones, ones], axis=1) # 在行上连接
print(result.eval())
# wrong : all the input array dimensions except for the concatenation axis must match exactly
result = T.concatenate([ones, twos], axis=1)
print (result.eval())
|
[
"[email protected]"
] | |
740e7c5bd00047252a123bf3aef1db3897986788
|
e41e614249db33edfd62831ae30e08596e32dde6
|
/filter_data.py
|
839b19a473b33cbb8d42b50d12d96b238eef8d2a
|
[] |
no_license
|
newtein/pollution_science
|
237fd2385c870db2fdb3cc97bbc5a9d864b4e5f8
|
6779aa729f412ffe0901c069c8ef63b3a83c4ce4
|
refs/heads/master
| 2023-02-01T08:14:26.612619 | 2020-12-13T06:07:38 | 2020-12-13T06:07:38 | 275,144,805 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,741 |
py
|
from read_data import ReadData
from combine_data import CombineData
from copy import copy
class FilterData:
def __init__(self, pollutant, year=None, observation_type="daily", start_date="01-12-2019",
end_date="15-06-2020", index_col = None, fixed_where_payload = {}):
self.pollutant = pollutant
self.start_date = start_date
self.end_date = end_date
self.observation_type = observation_type
self.index_col = index_col
if year:
self.df = ReadData(self.pollutant, observation_type=observation_type, year=year).get_pandas_obj()
else:
self.df = CombineData(self.pollutant, start_date=self.start_date, end_date=self.end_date,
observation_type=observation_type).get_pandas_obj()
if self.index_col:
self.df = self.df.set_index(self.index_col)
if fixed_where_payload:
for col_name, col_value in fixed_where_payload.items():
if col_name in self.index_col:
self.df = self.df[self.df.index == col_value]
else:
self.df = self.df[self.df[col_name] == col_value]
def filter_df(self, select_columns, where_payload):
df = copy(self.df)
for col_name, col_value in where_payload.items():
if col_name in self.index_col:
df = df[df.index == col_value]
else:
df = df[df[col_name] == col_value]
df = df[select_columns] if select_columns else df
return df
if __name__ == "__main__":
obj = FilterData('2020', 'PM2')
print(obj.filter_df(['County Name', '1st Max Value'], {'State Name': 'California'}))
|
[
"[email protected]"
] | |
6934696f4d50fcab91b608637cf2ad56e25e07b2
|
f3b233e5053e28fa95c549017bd75a30456eb50c
|
/CDK2_input/L31/31-28_wat_20Abox/set_5.py
|
0b41608e37632fc6b7eb344e7e03bd50ed7ca3cd
|
[] |
no_license
|
AnguseZhang/Input_TI
|
ddf2ed40ff1c0aa24eea3275b83d4d405b50b820
|
50ada0833890be9e261c967d00948f998313cb60
|
refs/heads/master
| 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 738 |
py
|
import os
dir = '/mnt/scratch/songlin3/run/CDK2/L31/wat_20Abox/ti_one-step/31_28/'
filesdir = dir + 'files/'
temp_prodin = filesdir + 'temp_prod_5.in'
temp_pbs = filesdir + 'temp_5.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.chdir("%6.5f" %(j))
workdir = dir + "%6.5f" %(j) + '/'
#prodin
prodin = workdir + "%6.5f_prod_5.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f_5.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#submit pbs
#os.system("qsub %s" %(pbs))
os.chdir(dir)
|
[
"[email protected]"
] | |
7b1108fcacc70c764138daa4e2ec874037df6884
|
5135adcb2924ab78bb2f5ae492aca57768bd9565
|
/sneaker_map_23022/wsgi.py
|
fa427aebab7479f1786df9cbfd48e4123136c7dd
|
[] |
no_license
|
crowdbotics-apps/sneaker-map-23022
|
658eb82858b7c5f56a83870145e8aa6fc5620012
|
747c458d1e0a66e9b3df733196706f360b0bab0b
|
refs/heads/master
| 2023-01-19T15:36:21.267419 | 2020-11-27T23:15:08 | 2020-11-27T23:15:08 | 316,617,955 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 411 |
py
|
"""
WSGI config for sneaker_map_23022 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sneaker_map_23022.settings')
application = get_wsgi_application()
|
[
"[email protected]"
] | |
19dcebc74ce7412e7a09a61d775cc92d429daf42
|
531c47c15b97cbcb263ec86821d7f258c81c0aaf
|
/sdk/resources/azure-mgmt-resource/azure/mgmt/resource/deploymentscripts/v2019_10_preview/models/_models_py3.py
|
eb8330f2ab9119967c431a9943bf9e45fb981a55
|
[
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] |
permissive
|
YijunXieMS/azure-sdk-for-python
|
be364d3b88204fd3c7d223df23756386ff7a3361
|
f779de8e53dbec033f98f976284e6d9491fd60b3
|
refs/heads/master
| 2021-07-15T18:06:28.748507 | 2020-09-04T15:48:52 | 2020-09-04T15:48:52 | 205,457,088 | 1 | 2 |
MIT
| 2020-06-16T16:38:15 | 2019-08-30T21:08:55 |
Python
|
UTF-8
|
Python
| false | false | 56,100 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Dict, List, Optional, Union
from azure.core.exceptions import HttpResponseError
import msrest.serialization
from ._deployment_scripts_client_enums import *
class AzureResourceBase(msrest.serialization.Model):
"""Common properties for all Azure resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: String Id used to locate any resource on Azure.
:vartype id: str
:ivar name: Name of this resource.
:vartype name: str
:ivar type: Type of this resource.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AzureResourceBase, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class DeploymentScript(AzureResourceBase):
"""Deployment script object.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AzureCliScript, AzurePowerShellScript.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: String Id used to locate any resource on Azure.
:vartype id: str
:ivar name: Name of this resource.
:vartype name: str
:ivar type: Type of this resource.
:vartype type: str
:param identity: Required. Managed identity to be used for this deployment script. Currently,
only user-assigned MSI is supported.
:type identity:
~azure.mgmt.resource.deploymentscripts.v2019_10_preview.models.ManagedServiceIdentity
:param location: Required. The location of the ACI and the storage account for the deployment
script.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param kind: Required. Type of the script.Constant filled by server. Possible values include:
"AzurePowerShell", "AzureCLI".
:type kind: str or ~azure.mgmt.resource.deploymentscripts.v2019_10_preview.models.ScriptType
:ivar system_data: The system metadata related to this resource.
:vartype system_data: ~azure.mgmt.resource.deploymentscripts.v2019_10_preview.models.SystemData
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'identity': {'required': True},
'location': {'required': True},
'kind': {'required': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'ManagedServiceIdentity'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'kind': {'key': 'kind', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
}
_subtype_map = {
'kind': {'AzureCLI': 'AzureCliScript', 'AzurePowerShell': 'AzurePowerShellScript'}
}
def __init__(
self,
*,
identity: "ManagedServiceIdentity",
location: str,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(DeploymentScript, self).__init__(**kwargs)
self.identity = identity
self.location = location
self.tags = tags
self.kind: str = 'DeploymentScript'
self.system_data = None
class AzureCliScript(DeploymentScript):
"""Object model for the Azure CLI script.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: String Id used to locate any resource on Azure.
:vartype id: str
:ivar name: Name of this resource.
:vartype name: str
:ivar type: Type of this resource.
:vartype type: str
:param identity: Required. Managed identity to be used for this deployment script. Currently,
only user-assigned MSI is supported.
:type identity:
~azure.mgmt.resource.deploymentscripts.v2019_10_preview.models.ManagedServiceIdentity
:param location: Required. The location of the ACI and the storage account for the deployment
script.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param kind: Required. Type of the script.Constant filled by server. Possible values include:
"AzurePowerShell", "AzureCLI".
:type kind: str or ~azure.mgmt.resource.deploymentscripts.v2019_10_preview.models.ScriptType
:ivar system_data: The system metadata related to this resource.
:vartype system_data: ~azure.mgmt.resource.deploymentscripts.v2019_10_preview.models.SystemData
:param container_settings: Container settings.
:type container_settings:
~azure.mgmt.resource.deploymentscripts.v2019_10_preview.models.ContainerConfiguration
:param storage_account_settings: Storage Account settings.
:type storage_account_settings:
~azure.mgmt.resource.deploymentscripts.v2019_10_preview.models.StorageAccountConfiguration
:param cleanup_preference: The clean up preference when the script execution gets in a terminal
state. Default setting is 'Always'. Possible values include: "Always", "OnSuccess",
"OnExpiration".
:type cleanup_preference: str or
~azure.mgmt.resource.deploymentscripts.v2019_10_preview.models.CleanupOptions
:ivar provisioning_state: State of the script execution. This only appears in the response.
Possible values include: "Creating", "ProvisioningResources", "Running", "Succeeded", "Failed",
"Canceled".
:vartype provisioning_state: str or
~azure.mgmt.resource.deploymentscripts.v2019_10_preview.models.ScriptProvisioningState
:ivar status: Contains the results of script execution.
:vartype status: ~azure.mgmt.resource.deploymentscripts.v2019_10_preview.models.ScriptStatus
:ivar outputs: List of script outputs.
:vartype outputs: dict[str, object]
:param primary_script_uri: Uri for the script. This is the entry point for the external script.
:type primary_script_uri: str
:param supporting_script_uris: Supporting files for the external script.
:type supporting_script_uris: list[str]
:param script_content: Script body.
:type script_content: str
:param arguments: Command line arguments to pass to the script. Arguments are separated by
spaces. ex: -Name blue* -Location 'West US 2'.
:type arguments: str
:param environment_variables: The environment variables to pass over to the script.
:type environment_variables:
list[~azure.mgmt.resource.deploymentscripts.v2019_10_preview.models.EnvironmentVariable]
:param force_update_tag: Gets or sets how the deployment script should be forced to execute
even if the script resource has not changed. Can be current time stamp or a GUID.
:type force_update_tag: str
:param retention_interval: Required. Interval for which the service retains the script resource
after it reaches a terminal state. Resource will be deleted when this duration expires.
Duration is based on ISO 8601 pattern (for example P7D means one week).
:type retention_interval: ~datetime.timedelta
:param timeout: Maximum allowed script execution time specified in ISO 8601 format. Default
value is PT1H.
:type timeout: ~datetime.timedelta
:param az_cli_version: Required. Azure CLI module version to be used.
:type az_cli_version: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'identity': {'required': True},
'location': {'required': True},
'kind': {'required': True},
'system_data': {'readonly': True},
'provisioning_state': {'readonly': True},
'status': {'readonly': True},
'outputs': {'readonly': True},
'script_content': {'max_length': 32000, 'min_length': 0},
'retention_interval': {'required': True},
'az_cli_version': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'ManagedServiceIdentity'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'kind': {'key': 'kind', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'container_settings': {'key': 'properties.containerSettings', 'type': 'ContainerConfiguration'},
'storage_account_settings': {'key': 'properties.storageAccountSettings', 'type': 'StorageAccountConfiguration'},
'cleanup_preference': {'key': 'properties.cleanupPreference', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'ScriptStatus'},
'outputs': {'key': 'properties.outputs', 'type': '{object}'},
'primary_script_uri': {'key': 'properties.primaryScriptUri', 'type': 'str'},
'supporting_script_uris': {'key': 'properties.supportingScriptUris', 'type': '[str]'},
'script_content': {'key': 'properties.scriptContent', 'type': 'str'},
'arguments': {'key': 'properties.arguments', 'type': 'str'},
'environment_variables': {'key': 'properties.environmentVariables', 'type': '[EnvironmentVariable]'},
'force_update_tag': {'key': 'properties.forceUpdateTag', 'type': 'str'},
'retention_interval': {'key': 'properties.retentionInterval', 'type': 'duration'},
'timeout': {'key': 'properties.timeout', 'type': 'duration'},
'az_cli_version': {'key': 'properties.azCliVersion', 'type': 'str'},
}
def __init__(
self,
*,
identity: "ManagedServiceIdentity",
location: str,
retention_interval: datetime.timedelta,
az_cli_version: str,
tags: Optional[Dict[str, str]] = None,
container_settings: Optional["ContainerConfiguration"] = None,
storage_account_settings: Optional["StorageAccountConfiguration"] = None,
cleanup_preference: Optional[Union[str, "CleanupOptions"]] = None,
primary_script_uri: Optional[str] = None,
supporting_script_uris: Optional[List[str]] = None,
script_content: Optional[str] = None,
arguments: Optional[str] = None,
environment_variables: Optional[List["EnvironmentVariable"]] = None,
force_update_tag: Optional[str] = None,
timeout: Optional[datetime.timedelta] = None,
**kwargs
):
super(AzureCliScript, self).__init__(identity=identity, location=location, tags=tags, **kwargs)
self.kind: str = 'AzureCLI'
self.container_settings = container_settings
self.storage_account_settings = storage_account_settings
self.cleanup_preference = cleanup_preference
self.provisioning_state = None
self.status = None
self.outputs = None
self.primary_script_uri = primary_script_uri
self.supporting_script_uris = supporting_script_uris
self.script_content = script_content
self.arguments = arguments
self.environment_variables = environment_variables
self.force_update_tag = force_update_tag
self.retention_interval = retention_interval
self.timeout = timeout
self.az_cli_version = az_cli_version
class ScriptConfigurationBase(msrest.serialization.Model):
"""Common configuration settings for both Azure PowerShell and Azure CLI scripts.
All required parameters must be populated in order to send to Azure.
:param primary_script_uri: Uri for the script. This is the entry point for the external script.
:type primary_script_uri: str
:param supporting_script_uris: Supporting files for the external script.
:type supporting_script_uris: list[str]
:param script_content: Script body.
:type script_content: str
:param arguments: Command line arguments to pass to the script. Arguments are separated by
spaces. ex: -Name blue* -Location 'West US 2'.
:type arguments: str
:param environment_variables: The environment variables to pass over to the script.
:type environment_variables:
list[~azure.mgmt.resource.deploymentscripts.v2019_10_preview.models.EnvironmentVariable]
:param force_update_tag: Gets or sets how the deployment script should be forced to execute
even if the script resource has not changed. Can be current time stamp or a GUID.
:type force_update_tag: str
:param retention_interval: Required. Interval for which the service retains the script resource
after it reaches a terminal state. Resource will be deleted when this duration expires.
Duration is based on ISO 8601 pattern (for example P7D means one week).
:type retention_interval: ~datetime.timedelta
:param timeout: Maximum allowed script execution time specified in ISO 8601 format. Default
value is PT1H.
:type timeout: ~datetime.timedelta
"""
_validation = {
'script_content': {'max_length': 32000, 'min_length': 0},
'retention_interval': {'required': True},
}
_attribute_map = {
'primary_script_uri': {'key': 'primaryScriptUri', 'type': 'str'},
'supporting_script_uris': {'key': 'supportingScriptUris', 'type': '[str]'},
'script_content': {'key': 'scriptContent', 'type': 'str'},
'arguments': {'key': 'arguments', 'type': 'str'},
'environment_variables': {'key': 'environmentVariables', 'type': '[EnvironmentVariable]'},
'force_update_tag': {'key': 'forceUpdateTag', 'type': 'str'},
'retention_interval': {'key': 'retentionInterval', 'type': 'duration'},
'timeout': {'key': 'timeout', 'type': 'duration'},
}
def __init__(
self,
*,
retention_interval: datetime.timedelta,
primary_script_uri: Optional[str] = None,
supporting_script_uris: Optional[List[str]] = None,
script_content: Optional[str] = None,
arguments: Optional[str] = None,
environment_variables: Optional[List["EnvironmentVariable"]] = None,
force_update_tag: Optional[str] = None,
timeout: Optional[datetime.timedelta] = None,
**kwargs
):
super(ScriptConfigurationBase, self).__init__(**kwargs)
self.primary_script_uri = primary_script_uri
self.supporting_script_uris = supporting_script_uris
self.script_content = script_content
self.arguments = arguments
self.environment_variables = environment_variables
self.force_update_tag = force_update_tag
self.retention_interval = retention_interval
self.timeout = timeout
class DeploymentScriptPropertiesBase(msrest.serialization.Model):
"""Common properties for the deployment script.
Variables are only populated by the server, and will be ignored when sending a request.
:param container_settings: Container settings.
:type container_settings:
~azure.mgmt.resource.deploymentscripts.v2019_10_preview.models.ContainerConfiguration
:param storage_account_settings: Storage Account settings.
:type storage_account_settings:
~azure.mgmt.resource.deploymentscripts.v2019_10_preview.models.StorageAccountConfiguration
:param cleanup_preference: The clean up preference when the script execution gets in a terminal
state. Default setting is 'Always'. Possible values include: "Always", "OnSuccess",
"OnExpiration".
:type cleanup_preference: str or
~azure.mgmt.resource.deploymentscripts.v2019_10_preview.models.CleanupOptions
:ivar provisioning_state: State of the script execution. This only appears in the response.
Possible values include: "Creating", "ProvisioningResources", "Running", "Succeeded", "Failed",
"Canceled".
:vartype provisioning_state: str or
~azure.mgmt.resource.deploymentscripts.v2019_10_preview.models.ScriptProvisioningState
:ivar status: Contains the results of script execution.
:vartype status: ~azure.mgmt.resource.deploymentscripts.v2019_10_preview.models.ScriptStatus
:ivar outputs: List of script outputs.
:vartype outputs: dict[str, object]
"""
_validation = {
'provisioning_state': {'readonly': True},
'status': {'readonly': True},
'outputs': {'readonly': True},
}
_attribute_map = {
'container_settings': {'key': 'containerSettings', 'type': 'ContainerConfiguration'},
'storage_account_settings': {'key': 'storageAccountSettings', 'type': 'StorageAccountConfiguration'},
'cleanup_preference': {'key': 'cleanupPreference', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'status': {'key': 'status', 'type': 'ScriptStatus'},
'outputs': {'key': 'outputs', 'type': '{object}'},
}
def __init__(
self,
*,
container_settings: Optional["ContainerConfiguration"] = None,
storage_account_settings: Optional["StorageAccountConfiguration"] = None,
cleanup_preference: Optional[Union[str, "CleanupOptions"]] = None,
**kwargs
):
super(DeploymentScriptPropertiesBase, self).__init__(**kwargs)
self.container_settings = container_settings
self.storage_account_settings = storage_account_settings
self.cleanup_preference = cleanup_preference
self.provisioning_state = None
self.status = None
self.outputs = None
class AzureCliScriptProperties(DeploymentScriptPropertiesBase, ScriptConfigurationBase):
"""Properties of the Azure CLI script object.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param primary_script_uri: Uri for the script. This is the entry point for the external script.
:type primary_script_uri: str
:param supporting_script_uris: Supporting files for the external script.
:type supporting_script_uris: list[str]
:param script_content: Script body.
:type script_content: str
:param arguments: Command line arguments to pass to the script. Arguments are separated by
spaces. ex: -Name blue* -Location 'West US 2'.
:type arguments: str
:param environment_variables: The environment variables to pass over to the script.
:type environment_variables:
list[~azure.mgmt.resource.deploymentscripts.v2019_10_preview.models.EnvironmentVariable]
:param force_update_tag: Gets or sets how the deployment script should be forced to execute
even if the script resource has not changed. Can be current time stamp or a GUID.
:type force_update_tag: str
:param retention_interval: Required. Interval for which the service retains the script resource
after it reaches a terminal state. Resource will be deleted when this duration expires.
Duration is based on ISO 8601 pattern (for example P7D means one week).
:type retention_interval: ~datetime.timedelta
:param timeout: Maximum allowed script execution time specified in ISO 8601 format. Default
value is PT1H.
:type timeout: ~datetime.timedelta
:param container_settings: Container settings.
:type container_settings:
~azure.mgmt.resource.deploymentscripts.v2019_10_preview.models.ContainerConfiguration
:param storage_account_settings: Storage Account settings.
:type storage_account_settings:
~azure.mgmt.resource.deploymentscripts.v2019_10_preview.models.StorageAccountConfiguration
:param cleanup_preference: The clean up preference when the script execution gets in a terminal
state. Default setting is 'Always'. Possible values include: "Always", "OnSuccess",
"OnExpiration".
:type cleanup_preference: str or
~azure.mgmt.resource.deploymentscripts.v2019_10_preview.models.CleanupOptions
:ivar provisioning_state: State of the script execution. This only appears in the response.
Possible values include: "Creating", "ProvisioningResources", "Running", "Succeeded", "Failed",
"Canceled".
:vartype provisioning_state: str or
~azure.mgmt.resource.deploymentscripts.v2019_10_preview.models.ScriptProvisioningState
:ivar status: Contains the results of script execution.
:vartype status: ~azure.mgmt.resource.deploymentscripts.v2019_10_preview.models.ScriptStatus
:ivar outputs: List of script outputs.
:vartype outputs: dict[str, object]
:param az_cli_version: Required. Azure CLI module version to be used.
:type az_cli_version: str
"""
_validation = {
'script_content': {'max_length': 32000, 'min_length': 0},
'retention_interval': {'required': True},
'provisioning_state': {'readonly': True},
'status': {'readonly': True},
'outputs': {'readonly': True},
'az_cli_version': {'required': True},
}
_attribute_map = {
'primary_script_uri': {'key': 'primaryScriptUri', 'type': 'str'},
'supporting_script_uris': {'key': 'supportingScriptUris', 'type': '[str]'},
'script_content': {'key': 'scriptContent', 'type': 'str'},
'arguments': {'key': 'arguments', 'type': 'str'},
'environment_variables': {'key': 'environmentVariables', 'type': '[EnvironmentVariable]'},
'force_update_tag': {'key': 'forceUpdateTag', 'type': 'str'},
'retention_interval': {'key': 'retentionInterval', 'type': 'duration'},
'timeout': {'key': 'timeout', 'type': 'duration'},
'container_settings': {'key': 'containerSettings', 'type': 'ContainerConfiguration'},
'storage_account_settings': {'key': 'storageAccountSettings', 'type': 'StorageAccountConfiguration'},
'cleanup_preference': {'key': 'cleanupPreference', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'status': {'key': 'status', 'type': 'ScriptStatus'},
'outputs': {'key': 'outputs', 'type': '{object}'},
'az_cli_version': {'key': 'azCliVersion', 'type': 'str'},
}
def __init__(
self,
*,
retention_interval: datetime.timedelta,
az_cli_version: str,
primary_script_uri: Optional[str] = None,
supporting_script_uris: Optional[List[str]] = None,
script_content: Optional[str] = None,
arguments: Optional[str] = None,
environment_variables: Optional[List["EnvironmentVariable"]] = None,
force_update_tag: Optional[str] = None,
timeout: Optional[datetime.timedelta] = None,
container_settings: Optional["ContainerConfiguration"] = None,
storage_account_settings: Optional["StorageAccountConfiguration"] = None,
cleanup_preference: Optional[Union[str, "CleanupOptions"]] = None,
**kwargs
):
super(AzureCliScriptProperties, self).__init__(container_settings=container_settings, storage_account_settings=storage_account_settings, cleanup_preference=cleanup_preference, primary_script_uri=primary_script_uri, supporting_script_uris=supporting_script_uris, script_content=script_content, arguments=arguments, environment_variables=environment_variables, force_update_tag=force_update_tag, retention_interval=retention_interval, timeout=timeout, **kwargs)
self.primary_script_uri = primary_script_uri
self.supporting_script_uris = supporting_script_uris
self.script_content = script_content
self.arguments = arguments
self.environment_variables = environment_variables
self.force_update_tag = force_update_tag
self.retention_interval = retention_interval
self.timeout = timeout
self.az_cli_version = az_cli_version
self.container_settings = container_settings
self.storage_account_settings = storage_account_settings
self.cleanup_preference = cleanup_preference
self.provisioning_state = None
self.status = None
self.outputs = None
self.az_cli_version = az_cli_version
class AzurePowerShellScript(DeploymentScript):
"""Object model for the Azure PowerShell script.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: String Id used to locate any resource on Azure.
:vartype id: str
:ivar name: Name of this resource.
:vartype name: str
:ivar type: Type of this resource.
:vartype type: str
:param identity: Required. Managed identity to be used for this deployment script. Currently,
only user-assigned MSI is supported.
:type identity:
~azure.mgmt.resource.deploymentscripts.v2019_10_preview.models.ManagedServiceIdentity
:param location: Required. The location of the ACI and the storage account for the deployment
script.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param kind: Required. Type of the script.Constant filled by server. Possible values include:
"AzurePowerShell", "AzureCLI".
:type kind: str or ~azure.mgmt.resource.deploymentscripts.v2019_10_preview.models.ScriptType
:ivar system_data: The system metadata related to this resource.
:vartype system_data: ~azure.mgmt.resource.deploymentscripts.v2019_10_preview.models.SystemData
:param container_settings: Container settings.
:type container_settings:
~azure.mgmt.resource.deploymentscripts.v2019_10_preview.models.ContainerConfiguration
:param storage_account_settings: Storage Account settings.
:type storage_account_settings:
~azure.mgmt.resource.deploymentscripts.v2019_10_preview.models.StorageAccountConfiguration
:param cleanup_preference: The clean up preference when the script execution gets in a terminal
state. Default setting is 'Always'. Possible values include: "Always", "OnSuccess",
"OnExpiration".
:type cleanup_preference: str or
~azure.mgmt.resource.deploymentscripts.v2019_10_preview.models.CleanupOptions
:ivar provisioning_state: State of the script execution. This only appears in the response.
Possible values include: "Creating", "ProvisioningResources", "Running", "Succeeded", "Failed",
"Canceled".
:vartype provisioning_state: str or
~azure.mgmt.resource.deploymentscripts.v2019_10_preview.models.ScriptProvisioningState
:ivar status: Contains the results of script execution.
:vartype status: ~azure.mgmt.resource.deploymentscripts.v2019_10_preview.models.ScriptStatus
:ivar outputs: List of script outputs.
:vartype outputs: dict[str, object]
:param primary_script_uri: Uri for the script. This is the entry point for the external script.
:type primary_script_uri: str
:param supporting_script_uris: Supporting files for the external script.
:type supporting_script_uris: list[str]
:param script_content: Script body.
:type script_content: str
:param arguments: Command line arguments to pass to the script. Arguments are separated by
spaces. ex: -Name blue* -Location 'West US 2'.
:type arguments: str
:param environment_variables: The environment variables to pass over to the script.
:type environment_variables:
list[~azure.mgmt.resource.deploymentscripts.v2019_10_preview.models.EnvironmentVariable]
:param force_update_tag: Gets or sets how the deployment script should be forced to execute
even if the script resource has not changed. Can be current time stamp or a GUID.
:type force_update_tag: str
:param retention_interval: Required. Interval for which the service retains the script resource
after it reaches a terminal state. Resource will be deleted when this duration expires.
Duration is based on ISO 8601 pattern (for example P7D means one week).
:type retention_interval: ~datetime.timedelta
:param timeout: Maximum allowed script execution time specified in ISO 8601 format. Default
value is PT1H.
:type timeout: ~datetime.timedelta
:param az_power_shell_version: Required. Azure PowerShell module version to be used.
:type az_power_shell_version: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'identity': {'required': True},
'location': {'required': True},
'kind': {'required': True},
'system_data': {'readonly': True},
'provisioning_state': {'readonly': True},
'status': {'readonly': True},
'outputs': {'readonly': True},
'script_content': {'max_length': 32000, 'min_length': 0},
'retention_interval': {'required': True},
'az_power_shell_version': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'ManagedServiceIdentity'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'kind': {'key': 'kind', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'container_settings': {'key': 'properties.containerSettings', 'type': 'ContainerConfiguration'},
'storage_account_settings': {'key': 'properties.storageAccountSettings', 'type': 'StorageAccountConfiguration'},
'cleanup_preference': {'key': 'properties.cleanupPreference', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'ScriptStatus'},
'outputs': {'key': 'properties.outputs', 'type': '{object}'},
'primary_script_uri': {'key': 'properties.primaryScriptUri', 'type': 'str'},
'supporting_script_uris': {'key': 'properties.supportingScriptUris', 'type': '[str]'},
'script_content': {'key': 'properties.scriptContent', 'type': 'str'},
'arguments': {'key': 'properties.arguments', 'type': 'str'},
'environment_variables': {'key': 'properties.environmentVariables', 'type': '[EnvironmentVariable]'},
'force_update_tag': {'key': 'properties.forceUpdateTag', 'type': 'str'},
'retention_interval': {'key': 'properties.retentionInterval', 'type': 'duration'},
'timeout': {'key': 'properties.timeout', 'type': 'duration'},
'az_power_shell_version': {'key': 'properties.azPowerShellVersion', 'type': 'str'},
}
def __init__(
self,
*,
identity: "ManagedServiceIdentity",
location: str,
retention_interval: datetime.timedelta,
az_power_shell_version: str,
tags: Optional[Dict[str, str]] = None,
container_settings: Optional["ContainerConfiguration"] = None,
storage_account_settings: Optional["StorageAccountConfiguration"] = None,
cleanup_preference: Optional[Union[str, "CleanupOptions"]] = None,
primary_script_uri: Optional[str] = None,
supporting_script_uris: Optional[List[str]] = None,
script_content: Optional[str] = None,
arguments: Optional[str] = None,
environment_variables: Optional[List["EnvironmentVariable"]] = None,
force_update_tag: Optional[str] = None,
timeout: Optional[datetime.timedelta] = None,
**kwargs
):
super(AzurePowerShellScript, self).__init__(identity=identity, location=location, tags=tags, **kwargs)
self.kind: str = 'AzurePowerShell'
self.container_settings = container_settings
self.storage_account_settings = storage_account_settings
self.cleanup_preference = cleanup_preference
self.provisioning_state = None
self.status = None
self.outputs = None
self.primary_script_uri = primary_script_uri
self.supporting_script_uris = supporting_script_uris
self.script_content = script_content
self.arguments = arguments
self.environment_variables = environment_variables
self.force_update_tag = force_update_tag
self.retention_interval = retention_interval
self.timeout = timeout
self.az_power_shell_version = az_power_shell_version
class AzurePowerShellScriptProperties(DeploymentScriptPropertiesBase, ScriptConfigurationBase):
"""Properties of the Azure PowerShell script object.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param primary_script_uri: Uri for the script. This is the entry point for the external script.
:type primary_script_uri: str
:param supporting_script_uris: Supporting files for the external script.
:type supporting_script_uris: list[str]
:param script_content: Script body.
:type script_content: str
:param arguments: Command line arguments to pass to the script. Arguments are separated by
spaces. ex: -Name blue* -Location 'West US 2'.
:type arguments: str
:param environment_variables: The environment variables to pass over to the script.
:type environment_variables:
list[~azure.mgmt.resource.deploymentscripts.v2019_10_preview.models.EnvironmentVariable]
:param force_update_tag: Gets or sets how the deployment script should be forced to execute
even if the script resource has not changed. Can be current time stamp or a GUID.
:type force_update_tag: str
:param retention_interval: Required. Interval for which the service retains the script resource
after it reaches a terminal state. Resource will be deleted when this duration expires.
Duration is based on ISO 8601 pattern (for example P7D means one week).
:type retention_interval: ~datetime.timedelta
:param timeout: Maximum allowed script execution time specified in ISO 8601 format. Default
value is PT1H.
:type timeout: ~datetime.timedelta
:param container_settings: Container settings.
:type container_settings:
~azure.mgmt.resource.deploymentscripts.v2019_10_preview.models.ContainerConfiguration
:param storage_account_settings: Storage Account settings.
:type storage_account_settings:
~azure.mgmt.resource.deploymentscripts.v2019_10_preview.models.StorageAccountConfiguration
:param cleanup_preference: The clean up preference when the script execution gets in a terminal
state. Default setting is 'Always'. Possible values include: "Always", "OnSuccess",
"OnExpiration".
:type cleanup_preference: str or
~azure.mgmt.resource.deploymentscripts.v2019_10_preview.models.CleanupOptions
:ivar provisioning_state: State of the script execution. This only appears in the response.
Possible values include: "Creating", "ProvisioningResources", "Running", "Succeeded", "Failed",
"Canceled".
:vartype provisioning_state: str or
~azure.mgmt.resource.deploymentscripts.v2019_10_preview.models.ScriptProvisioningState
:ivar status: Contains the results of script execution.
:vartype status: ~azure.mgmt.resource.deploymentscripts.v2019_10_preview.models.ScriptStatus
:ivar outputs: List of script outputs.
:vartype outputs: dict[str, object]
:param az_power_shell_version: Required. Azure PowerShell module version to be used.
:type az_power_shell_version: str
"""
_validation = {
'script_content': {'max_length': 32000, 'min_length': 0},
'retention_interval': {'required': True},
'provisioning_state': {'readonly': True},
'status': {'readonly': True},
'outputs': {'readonly': True},
'az_power_shell_version': {'required': True},
}
_attribute_map = {
'primary_script_uri': {'key': 'primaryScriptUri', 'type': 'str'},
'supporting_script_uris': {'key': 'supportingScriptUris', 'type': '[str]'},
'script_content': {'key': 'scriptContent', 'type': 'str'},
'arguments': {'key': 'arguments', 'type': 'str'},
'environment_variables': {'key': 'environmentVariables', 'type': '[EnvironmentVariable]'},
'force_update_tag': {'key': 'forceUpdateTag', 'type': 'str'},
'retention_interval': {'key': 'retentionInterval', 'type': 'duration'},
'timeout': {'key': 'timeout', 'type': 'duration'},
'container_settings': {'key': 'containerSettings', 'type': 'ContainerConfiguration'},
'storage_account_settings': {'key': 'storageAccountSettings', 'type': 'StorageAccountConfiguration'},
'cleanup_preference': {'key': 'cleanupPreference', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'status': {'key': 'status', 'type': 'ScriptStatus'},
'outputs': {'key': 'outputs', 'type': '{object}'},
'az_power_shell_version': {'key': 'azPowerShellVersion', 'type': 'str'},
}
def __init__(
self,
*,
retention_interval: datetime.timedelta,
az_power_shell_version: str,
primary_script_uri: Optional[str] = None,
supporting_script_uris: Optional[List[str]] = None,
script_content: Optional[str] = None,
arguments: Optional[str] = None,
environment_variables: Optional[List["EnvironmentVariable"]] = None,
force_update_tag: Optional[str] = None,
timeout: Optional[datetime.timedelta] = None,
container_settings: Optional["ContainerConfiguration"] = None,
storage_account_settings: Optional["StorageAccountConfiguration"] = None,
cleanup_preference: Optional[Union[str, "CleanupOptions"]] = None,
**kwargs
):
super(AzurePowerShellScriptProperties, self).__init__(container_settings=container_settings, storage_account_settings=storage_account_settings, cleanup_preference=cleanup_preference, primary_script_uri=primary_script_uri, supporting_script_uris=supporting_script_uris, script_content=script_content, arguments=arguments, environment_variables=environment_variables, force_update_tag=force_update_tag, retention_interval=retention_interval, timeout=timeout, **kwargs)
self.primary_script_uri = primary_script_uri
self.supporting_script_uris = supporting_script_uris
self.script_content = script_content
self.arguments = arguments
self.environment_variables = environment_variables
self.force_update_tag = force_update_tag
self.retention_interval = retention_interval
self.timeout = timeout
self.az_power_shell_version = az_power_shell_version
self.container_settings = container_settings
self.storage_account_settings = storage_account_settings
self.cleanup_preference = cleanup_preference
self.provisioning_state = None
self.status = None
self.outputs = None
self.az_power_shell_version = az_power_shell_version
class ContainerConfiguration(msrest.serialization.Model):
"""Settings to customize ACI container instance.
:param container_group_name: Container group name, if not specified then the name will get
auto-generated. Not specifying a 'containerGroupName' indicates the system to generate a unique
name which might end up flagging an Azure Policy as non-compliant. Use 'containerGroupName'
when you have an Azure Policy that expects a specific naming convention or when you want to
fully control the name. 'containerGroupName' property must be between 1 and 63 characters long,
must contain only lowercase letters, numbers, and dashes and it cannot start or end with a dash
and consecutive dashes are not allowed. To specify a 'containerGroupName', add the following
object to properties: { "containerSettings": { "containerGroupName": "contoso-container" } }.
If you do not want to specify a 'containerGroupName' then do not add 'containerSettings'
property.
:type container_group_name: str
"""
_validation = {
'container_group_name': {'max_length': 63, 'min_length': 1},
}
_attribute_map = {
'container_group_name': {'key': 'containerGroupName', 'type': 'str'},
}
def __init__(
self,
*,
container_group_name: Optional[str] = None,
**kwargs
):
super(ContainerConfiguration, self).__init__(**kwargs)
self.container_group_name = container_group_name
class DeploymentScriptListResult(msrest.serialization.Model):
"""List of deployment scripts.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: An array of deployment scripts.
:type value:
list[~azure.mgmt.resource.deploymentscripts.v2019_10_preview.models.DeploymentScript]
:ivar next_link: The URL to use for getting the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DeploymentScript]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["DeploymentScript"]] = None,
**kwargs
):
super(DeploymentScriptListResult, self).__init__(**kwargs)
self.value = value
self.next_link = None
class DeploymentScriptsError(msrest.serialization.Model):
"""Deployment scripts error response.
:param error: The resource management error response.
:type error: ~azure.mgmt.resource.deploymentscripts.v2019_10_preview.models.ErrorResponse
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorResponse'},
}
def __init__(
self,
*,
error: Optional["ErrorResponse"] = None,
**kwargs
):
super(DeploymentScriptsError, self).__init__(**kwargs)
self.error = error
class DeploymentScriptUpdateParameter(AzureResourceBase):
"""Deployment script parameters to be updated.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: String Id used to locate any resource on Azure.
:vartype id: str
:ivar name: Name of this resource.
:vartype name: str
:ivar type: Type of this resource.
:vartype type: str
:param tags: A set of tags. Resource tags to be updated.
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(DeploymentScriptUpdateParameter, self).__init__(**kwargs)
self.tags = tags
class EnvironmentVariable(msrest.serialization.Model):
"""The environment variable to pass to the script in the container instance.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the environment variable.
:type name: str
:param value: The value of the environment variable.
:type value: str
:param secure_value: The value of the secure environment variable.
:type secure_value: str
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
'secure_value': {'key': 'secureValue', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
value: Optional[str] = None,
secure_value: Optional[str] = None,
**kwargs
):
super(EnvironmentVariable, self).__init__(**kwargs)
self.name = name
self.value = value
self.secure_value = secure_value
class ErrorAdditionalInfo(msrest.serialization.Model):
"""The resource management error additional info.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The additional info type.
:vartype type: str
:ivar info: The additional info.
:vartype info: object
"""
_validation = {
'type': {'readonly': True},
'info': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'info': {'key': 'info', 'type': 'object'},
}
def __init__(
self,
**kwargs
):
super(ErrorAdditionalInfo, self).__init__(**kwargs)
self.type = None
self.info = None
class ErrorResponse(msrest.serialization.Model):
"""The resource management error response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar message: The error message.
:vartype message: str
:ivar target: The error target.
:vartype target: str
:ivar details: The error details.
:vartype details:
list[~azure.mgmt.resource.deploymentscripts.v2019_10_preview.models.ErrorResponse]
:ivar additional_info: The error additional info.
:vartype additional_info:
list[~azure.mgmt.resource.deploymentscripts.v2019_10_preview.models.ErrorAdditionalInfo]
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'target': {'readonly': True},
'details': {'readonly': True},
'additional_info': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorResponse]'},
'additional_info': {'key': 'additionalInfo', 'type': '[ErrorAdditionalInfo]'},
}
def __init__(
self,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.code = None
self.message = None
self.target = None
self.details = None
self.additional_info = None
class ManagedServiceIdentity(msrest.serialization.Model):
"""Managed identity generic object.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: Type of the managed identity. Default value: "UserAssigned".
:vartype type: str
:param user_assigned_identities: The list of user-assigned managed identities associated with
the resource. Key is the Azure resource Id of the managed identity.
:type user_assigned_identities: dict[str,
~azure.mgmt.resource.deploymentscripts.v2019_10_preview.models.UserAssignedIdentity]
"""
_validation = {
'type': {'constant': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '{UserAssignedIdentity}'},
}
type = "UserAssigned"
def __init__(
self,
*,
user_assigned_identities: Optional[Dict[str, "UserAssignedIdentity"]] = None,
**kwargs
):
super(ManagedServiceIdentity, self).__init__(**kwargs)
self.user_assigned_identities = user_assigned_identities
class ScriptLog(AzureResourceBase):
"""Script execution log object.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: String Id used to locate any resource on Azure.
:vartype id: str
:ivar name: Name of this resource.
:vartype name: str
:ivar type: Type of this resource.
:vartype type: str
:ivar log: Script execution logs in text format.
:vartype log: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'log': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'log': {'key': 'properties.log', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ScriptLog, self).__init__(**kwargs)
self.log = None
class ScriptLogsList(msrest.serialization.Model):
"""Deployment script execution logs.
:param value: Deployment scripts logs.
:type value: list[~azure.mgmt.resource.deploymentscripts.v2019_10_preview.models.ScriptLog]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ScriptLog]'},
}
def __init__(
self,
*,
value: Optional[List["ScriptLog"]] = None,
**kwargs
):
super(ScriptLogsList, self).__init__(**kwargs)
self.value = value
class ScriptStatus(msrest.serialization.Model):
"""Generic object modeling results of script execution.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar container_instance_id: ACI resource Id.
:vartype container_instance_id: str
:ivar storage_account_id: Storage account resource Id.
:vartype storage_account_id: str
:ivar start_time: Start time of the script execution.
:vartype start_time: ~datetime.datetime
:ivar end_time: End time of the script execution.
:vartype end_time: ~datetime.datetime
:ivar expiration_time: Time the deployment script resource will expire.
:vartype expiration_time: ~datetime.datetime
:param error: Error that is relayed from the script execution.
:type error: ~azure.mgmt.resource.deploymentscripts.v2019_10_preview.models.ErrorResponse
"""
_validation = {
'container_instance_id': {'readonly': True},
'storage_account_id': {'readonly': True},
'start_time': {'readonly': True},
'end_time': {'readonly': True},
'expiration_time': {'readonly': True},
}
_attribute_map = {
'container_instance_id': {'key': 'containerInstanceId', 'type': 'str'},
'storage_account_id': {'key': 'storageAccountId', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'expiration_time': {'key': 'expirationTime', 'type': 'iso-8601'},
'error': {'key': 'error', 'type': 'ErrorResponse'},
}
def __init__(
self,
*,
error: Optional["ErrorResponse"] = None,
**kwargs
):
super(ScriptStatus, self).__init__(**kwargs)
self.container_instance_id = None
self.storage_account_id = None
self.start_time = None
self.end_time = None
self.expiration_time = None
self.error = error
class StorageAccountConfiguration(msrest.serialization.Model):
"""Settings to use an existing storage account. Valid storage account kinds are: Storage, StorageV2 and FileStorage.
:param storage_account_name: The storage account name.
:type storage_account_name: str
:param storage_account_key: The storage account access key.
:type storage_account_key: str
"""
_attribute_map = {
'storage_account_name': {'key': 'storageAccountName', 'type': 'str'},
'storage_account_key': {'key': 'storageAccountKey', 'type': 'str'},
}
def __init__(
self,
*,
storage_account_name: Optional[str] = None,
storage_account_key: Optional[str] = None,
**kwargs
):
super(StorageAccountConfiguration, self).__init__(**kwargs)
self.storage_account_name = storage_account_name
self.storage_account_key = storage_account_key
class SystemData(msrest.serialization.Model):
"""Metadata pertaining to creation and last modification of the resource.
:param created_by: The identity that created the resource.
:type created_by: str
:param created_by_type: The type of identity that created the resource. Possible values
include: "User", "Application", "ManagedIdentity", "Key".
:type created_by_type: str or
~azure.mgmt.resource.deploymentscripts.v2019_10_preview.models.CreatedByType
:param created_at: The timestamp of resource creation (UTC).
:type created_at: ~datetime.datetime
:param last_modified_by: The identity that last modified the resource.
:type last_modified_by: str
:param last_modified_by_type: The type of identity that last modified the resource. Possible
values include: "User", "Application", "ManagedIdentity", "Key".
:type last_modified_by_type: str or
~azure.mgmt.resource.deploymentscripts.v2019_10_preview.models.CreatedByType
:param last_modified_at: The type of identity that last modified the resource.
:type last_modified_at: ~datetime.datetime
"""
_attribute_map = {
'created_by': {'key': 'createdBy', 'type': 'str'},
'created_by_type': {'key': 'createdByType', 'type': 'str'},
'created_at': {'key': 'createdAt', 'type': 'iso-8601'},
'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'},
'last_modified_by_type': {'key': 'lastModifiedByType', 'type': 'str'},
'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'},
}
def __init__(
self,
*,
created_by: Optional[str] = None,
created_by_type: Optional[Union[str, "CreatedByType"]] = None,
created_at: Optional[datetime.datetime] = None,
last_modified_by: Optional[str] = None,
last_modified_by_type: Optional[Union[str, "CreatedByType"]] = None,
last_modified_at: Optional[datetime.datetime] = None,
**kwargs
):
super(SystemData, self).__init__(**kwargs)
self.created_by = created_by
self.created_by_type = created_by_type
self.created_at = created_at
self.last_modified_by = last_modified_by
self.last_modified_by_type = last_modified_by_type
self.last_modified_at = last_modified_at
class UserAssignedIdentity(msrest.serialization.Model):
"""User-assigned managed identity.
:param principal_id: Azure Active Directory principal ID associated with this identity.
:type principal_id: str
:param client_id: Client App Id associated with this identity.
:type client_id: str
"""
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'client_id': {'key': 'clientId', 'type': 'str'},
}
def __init__(
self,
*,
principal_id: Optional[str] = None,
client_id: Optional[str] = None,
**kwargs
):
super(UserAssignedIdentity, self).__init__(**kwargs)
self.principal_id = principal_id
self.client_id = client_id
|
[
"[email protected]"
] | |
31fd733e963aacdd5f306e6ce2af4e3ce832b28d
|
5d1c43bb4881039f198eedcee2ceb101b406e0a0
|
/Django/project03/app/migrations/0001_initial.py
|
e8289a90ad8bf2cb47749b6bf504ceb405feb277
|
[] |
no_license
|
MunSeoHee/Likelion_Gachon_2020
|
46155b1686a245a59c5664f7726ac754b7079e4b
|
e0e48845fdb0e4aa2365e7c47e29880a27f0f261
|
refs/heads/master
| 2021-04-10T09:51:06.618980 | 2020-12-07T10:06:43 | 2020-12-07T10:06:43 | 248,927,668 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 585 |
py
|
# Generated by Django 3.0.4 on 2020-07-29 06:54
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Blog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField()),
('title', models.CharField(max_length=100)),
('contents', models.TextField()),
],
),
]
|
[
"[email protected]"
] | |
0c34ebdee8bacb1ea327c5bb8e6ed30ba5e6a457
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_tuberculosis.py
|
254ad24d14a38ada684c33760bf2c6c129c5c8c6
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 390 |
py
|
#calss header
class _TUBERCULOSIS():
def __init__(self,):
self.name = "TUBERCULOSIS"
self.definitions = [u"a serious infectious disease that can attack many parts of a person's body, especially their lungs"]
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"[email protected]"
] | |
f3f02c3c24a39f79b783b10e87490b85b01b559c
|
75b9e31895336ee5f174ac94f679dcc7cda94cab
|
/core/__init__.py
|
fe72f6d4fd2808c9f8df490d4c90dc7ee37312ac
|
[] |
no_license
|
eduarde/PentaGroup
|
e319633274d128025e538ff0afd0d5b026461491
|
d67e67495daf96e274ccf5ac31f043ffd5f30f58
|
refs/heads/master
| 2021-09-06T16:08:02.748042 | 2018-02-08T10:56:47 | 2018-02-08T10:56:47 | 108,404,760 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 340 |
py
|
default_app_config = 'core.apps.CoreConfig'
# import logging
# from django.conf import settings
# fmt = getattr(settings, 'LOG_FORMAT', None)
# lvl = getattr(settings, 'LOG_LEVEL', logging.DEBUG)
# logging.basicConfig(format=fmt, level=lvl)
# logging.debug("Logging started on %s for %s" % (logging.root.name, logging.getLevelName(lvl)))
|
[
"[email protected]"
] | |
dea18cce19c36cadcf59982b6e850953b0d73ae9
|
741ee09b8b73187fab06ecc1f07f46a6ba77e85c
|
/AutonomousSourceCode/data/raw/squareroot/5e828e59-aad1-47ca-bae8-ed5d41514fe3__FairAndSquare.py
|
be747649a6d849ba6cd0795593bbc095c0784804
|
[] |
no_license
|
erickmiller/AutomatousSourceCode
|
fbe8c8fbf215430a87a8e80d0479eb9c8807accb
|
44ee2fb9ac970acf7389e5da35b930d076f2c530
|
refs/heads/master
| 2021-05-24T01:12:53.154621 | 2020-11-20T23:50:11 | 2020-11-20T23:50:11 | 60,889,742 | 6 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 975 |
py
|
'''
Solving the third code jam problem
'''
def isPalindrome(number):
original = number
reverse = 0
while number != 0:
reverse = reverse * 10 + number % 10
number /= 10
return reverse == original
# main program
import os, math
full_path = os.path.realpath(__file__)
path, file = os.path.split(full_path)
f = open(path + '\\' + 'C-small-attempt0.in')
numOfTests = int(f.readline())
results = []
for test in range(numOfTests):
a, b = [int(x) for x in f.readline().split()] # read a line
counter = 0
root = int(math.sqrt(a))
square = int(root**2)
#in case it's truncated
if square < a:
root += 1
square = root**2
while square <= b:
if isPalindrome(root) and isPalindrome(square):
counter += 1
square += root * 2 + 1 # (x+1)^2 = x^2 + 2x + 1
root += 1
results.append(counter)
outFile = open(path + '\\' + 'output.txt', 'w')
for i in range(0, len(results)):
caseNumber = i+1
outFile.write("Case #%d: %d\n" % (caseNumber, results[i] ))
|
[
"[email protected]"
] | |
aa93d7bd4ba05c34a7c892ee2434d753f300dcbf
|
a7b5adc5f72b9ef71c0c71691492f8af8a32c868
|
/Jaehwan-Hong/baekjoon/beginner_100th/14928.py
|
dc0c94e8361a2dcdbeddad8b4281472f3059747f
|
[] |
no_license
|
mintheon/Practice-Algorithm
|
535ff607e36d1bfa9f800a28091a52c48748221c
|
3a653a1d8cc6e1438cab47a427ccd0b421a10010
|
refs/heads/master
| 2023-04-10T17:43:10.434210 | 2021-04-10T18:46:26 | 2021-04-10T18:46:26 | 347,719,297 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 124 |
py
|
# 큰 수 (BIG)
from decimal import Decimal, getcontext
getcontext().prec = 10 ** 6
n = input()
print(Decimal(n) % 20000303)
|
[
"[email protected]"
] | |
56e00a955a0a95f250376e994248aea4c647cb18
|
3f53e38076713ab49fd03a54c7c9d3e21de5eb14
|
/Pyrado/pyrado/environments/one_step/catapult.py
|
275cacfdbd89a8bf1378ee405a40ae47ed35c6b3
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
arlene-kuehn/SimuRLacra
|
4510473789d1c8927c8d5969a9606238523d5dd7
|
15901f70f0538bce19acdda2a0018984f67cc0fe
|
refs/heads/master
| 2023-01-28T13:10:05.607575 | 2020-12-04T14:47:01 | 2020-12-04T14:47:01 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,904 |
py
|
# Copyright (c) 2020, Fabio Muratore, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Fabio Muratore, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL FABIO MURATORE, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from init_args_serializer.serializable import Serializable
import pyrado
from pyrado.environments.sim_base import SimEnv
from pyrado.spaces.box import BoxSpace
from pyrado.spaces.singular import SingularStateSpace
from pyrado.tasks.desired_state import DesStateTask
from pyrado.tasks.reward_functions import AbsErrRewFcn
from pyrado.utils.data_types import RenderMode
class CatapultSim(SimEnv, Serializable):
"""
In this special environment, the action is equal to the policy parameter. Therefore, it makes only sense to
use it in combination with a linear policy that has only one constant feature.
"""
name: str = 'cata'
def __init__(self, max_steps: int, example_config: bool):
"""
Constructor
:param max_steps: maximum number of simulation steps
:param example_config: configuration for the 'illustrative example' in the journal
"""
Serializable._init(self, locals())
super().__init__(dt=None, max_steps=max_steps)
self.example_config = example_config
self._planet = -1
# Initialize the domain parameters (Earth)
self._g = 9.81 # gravity constant [m/s**2]
self._k = 2e3 # catapult spring's stiffness constant [N/m]
self._x = 1. # catapult spring's pre-elongation [m]
# Domain independent parameter
self._m = 70. # victim's mass [kg]
# Set the bounds for the system's states adn actions
max_state = np.array([1000.]) # [m], arbitrary but >> self._x
max_act = max_state
self._curr_act = np.zeros_like(max_act) # just for usage in render function
self._state_space = BoxSpace(-max_state, max_state, labels=['h'])
self._init_space = SingularStateSpace(np.zeros(self._state_space.shape), labels=['h_0'])
self._act_space = BoxSpace(-max_act, max_act, labels=['theta'])
# Define the task including the reward function
self._task = self._create_task(task_args=dict())
@property
def state_space(self):
return self._state_space
@property
def obs_space(self):
return self._state_space
@property
def init_space(self):
return self._init_space
@property
def act_space(self):
return self._act_space
def _create_task(self, task_args: dict) -> DesStateTask:
# Define the task including the reward function
state_des = task_args.get('state_des', None)
if state_des is None:
state_des = np.zeros(self._state_space.shape)
return DesStateTask(self.spec, state_des, rew_fcn=AbsErrRewFcn(q=np.array([1.]), r=np.array([0.])))
@property
def task(self):
return self._task
@property
def domain_param(self):
if self.example_config:
return dict(planet=self._planet)
else:
return dict(g=self._g,
k=self._k,
x=self._x)
@domain_param.setter
def domain_param(self, param: dict):
assert isinstance(param, dict)
# Set the new domain params if given, else the default value
if self.example_config:
if param['planet'] == 0:
# Mars
self._g = 3.71
self._k = 1e3
self._x = 0.5
elif param['planet'] == 1:
# Venus
self._g = 8.87
self._k = 3e3
self._x = 1.5
elif param['planet'] == -1:
# Default value which should make the computation invalid
self._g = None
self._k = None
self._x = None
else:
raise ValueError("Domain parameter planet was {}, but must be either 0 or 1!".format(
param['planet']))
else:
assert self._g > 0 and self._k > 0 and self._x > 0
self._g = param.get('g', self._g)
self._k = param.get('k', self._k)
self._x = param.get('x', self._x)
@classmethod
def get_nominal_domain_param(cls) -> dict:
return dict(g=9.81, k=200., x=1.)
def reset(self, init_state: np.ndarray = None, domain_param: dict = None) -> np.ndarray:
# Reset time
self._curr_step = 0
# Reset the domain parameters
if domain_param is not None:
self.domain_param = domain_param
# Reset the state
if init_state is None:
self.state = self._init_space.sample_uniform() # zero
else:
if not init_state.shape == self.obs_space.shape:
raise pyrado.ShapeErr(given=init_state, expected_match=self.obs_space)
if isinstance(init_state, np.ndarray):
self.state = init_state.copy()
else:
try:
self.state = np.array(init_state)
except Exception:
raise pyrado.TypeErr(given=init_state, expected_type=[np.ndarray, list])
# Reset the task
self._task.reset(env_spec=self.spec)
# Return perfect observation
return self.observe(self.state)
def step(self, act):
# Apply actuator limits
act = self.limit_act(act) # dummy for CatapultSim
self._curr_act = act # just for the render function
# Calculate the maximum height of the flight trajectory ("one step dynamics")
self.state = self._k/(2.*self._m*self._g)*(act - self._x)**2 # h(theta, xi)
# Current reward depending on the state after the step (since there is only one step) and the (unlimited) action
self._curr_rew = self.task.step_rew(self.state, act, self._curr_step)
self._curr_step += 1
# Check if the task or the environment is done
done = self._task.is_done(self.state)
if self._curr_step >= self._max_steps:
done = True
if done:
# Add final reward if done
remaining_steps = self._max_steps - (self._curr_step + 1) if self._max_steps is not pyrado.inf else 0
self._curr_rew += self._task.final_rew(self.state, remaining_steps)
return self.observe(self.state), self._curr_rew, done, {}
def render(self, mode: RenderMode, render_step: int = 1):
# Call base class
super().render(mode)
# Print to console
if mode.text:
if self._curr_step%render_step == 0 and self._curr_step > 0: # skip the render before the first step
print("step: {:3} | r_t: {: 1.3f} | a_t: {}\t | s_t+1: {}".format(
self._curr_step,
self._curr_rew,
self._curr_act,
self.state))
class CatapultExample:
"""
For calculating the quantities of the 'illustrative example' in [1]
.. seealso::
[1] F. Muratore, M. Gienger, J. Peters, "Assessing Transferability from Simulation to Reality for Reinforcement
Learning", PAMI, 2019
"""
def __init__(self, m, g_M, k_M, x_M, g_V, k_V, x_V):
"""
Constructor
"""
# Store parameters
self.m = m
self.g_M, self.k_M, self.x_M = g_M, k_M, x_M
self.g_V, self.k_V, self.x_V = g_V, k_V, x_V
def opt_policy_param(self, n_M, n_V):
"""
Compute the optimal policy parameter.
:param n_M: number of Mars samples
:param n_V: number of Venus samples
:return: optimal policy parameter
"""
# Calculate (mixed-domain) constants
c_M = n_M*self.k_M*self.g_V
c_V = n_V*self.k_V*self.g_M
# Calculate optimal policy parameter
th_opt = (self.x_M*c_M + self.x_V*c_V)/(c_M + c_V)
return th_opt
def opt_est_expec_return(self, n_M, n_V):
"""
Calculate the optimal objective function value.
:param n_M: number of Mars samples
:param n_V: number of Venus samples
:return: optimal value of the estimated expected return
"""
c_M = n_M*self.k_M*self.g_V
c_V = n_V*self.k_V*self.g_M
c = c_M + c_V
n = n_M + n_V
M_part = -n_M*self.k_M/(2*n*self.m*self.g_M)*((self.x_V*c_V - self.x_M*c_V)/c)**2
V_part = -n_V*self.k_V/(2*n*self.m*self.g_V)*((self.x_M*c_M - self.x_V*c_M)/c)**2
Jhat_n_opt = M_part + V_part
# Check and return
assert Jhat_n_opt <= 1e-8, "Jhat_th_n_opt should be <= 0, but was {}!".format(Jhat_n_opt)
return Jhat_n_opt
def est_expec_return(self, th, n_M, n_V):
"""
Calculate the optimal objective function value.
:param th: policy parameter
:param n_M: number of Mars samples
:param n_V: number of Venus samples
:return: value of the estimated expected return
"""
n = n_M + n_V
M_part = -n_M/n*self.k_M/(2*self.m*self.g_M)*(th - self.x_M)**2
V_part = -n_V/n*self.k_V/(2*self.m*self.g_V)*(th - self.x_V)**2
Jhat_n = M_part + V_part
# Check and return
assert Jhat_n <= 0
return Jhat_n
|
[
"[email protected]"
] | |
8b0248ae612554e90e944519298e07b1bdeb10ae
|
0a89cad9f98e5c014b4c6970c6a63d29c89bacbf
|
/ilisa/antennameta/delays.py
|
7a368c97f1533e4be73314a83c1f4b556de7b0ec
|
[
"ISC"
] |
permissive
|
mpozoga/iLiSA
|
5fe516972d010f04695a5990f68c1b4b6f092889
|
164c198a7569413a12d52338738aaa24c763890d
|
refs/heads/master
| 2020-08-27T07:40:41.099906 | 2019-10-12T12:37:16 | 2019-10-12T12:37:16 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,663 |
py
|
#!/usr/bin/env python
"""Module for handling the LOFAR antenna delay files.
"""
import sys
import os
import numpy as np
#import pkg_resources
#my_data = pkg_resources.resource_filename(__name__, "share/StaticMetaData")
STATICMETADATA = os.path.join(os.path.dirname(__file__),'share/StaticMetaData/')
CABLEDELAYDIR = STATICMETADATA
TABCOLUMNS = ('RCU','LBL_len','LBL_delay','LBH_len','LBH_delay','HBA_len','HBA_delay')
TABFORMATS = ('i4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4')
def _load_stn_cabledelay_file(f):
lengthsNdelays = np.loadtxt(f, dtype={
'names': TABCOLUMNS,
'formats': TABFORMATS})
return lengthsNdelays
def _stnid2filename(stnid):
filename = stnid+'-'+'CableDelays.conf'
return os.path.join(CABLEDELAYDIR,filename)
def _get_units(quantitystr):
arr, qtype = quantitystr.split('_')
if qtype == 'len':
unit = 'm'
elif qtype == 'delay':
unit = 'ns'
else:
raise ValueError, "Unknown quantity type"
return unit
def get_stn_cabledelays(stnid):
f = _stnid2filename(stnid)
lengthsNdelays = _load_stn_cabledelay_file(f)
return lengthsNdelays
if __name__ == '__main__':
stnid = sys.argv[1]
quantity = sys.argv[2]
if quantity not in TABCOLUMNS:
raise ValueError, "Choose one of the following quantities: {}".format(TABCOLUMNS[1:])
unit = _get_units(quantity)
lengthsNdelays = get_stn_cabledelays(stnid)
print("RCU [#] {} [{}]".format(quantity,unit))
for row, rcu in enumerate(lengthsNdelays['RCU']):
print("{} {}".format(rcu, lengthsNdelays[quantity][row]))
|
[
"[email protected]"
] | |
18915148d4eae7ae3f755afbef18c840ec5dae52
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-5/e27b4c2ae40681b2c2ec196cd7d853614b265abd-<main>-bug.py
|
b7e22bbd6cbf18bf9fd295b34e4f4275576b906b
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,373 |
py
|
def main():
state_map = dict(present='install', absent='uninstall -y', latest='install -U', forcereinstall='install -U --force-reinstall')
module = AnsibleModule(argument_spec=dict(state=dict(default='present', choices=state_map.keys()), name=dict(type='list'), version=dict(type='str'), requirements=dict(), virtualenv=dict(type='path'), virtualenv_site_packages=dict(default=False, type='bool'), virtualenv_command=dict(default='virtualenv', type='path'), virtualenv_python=dict(type='str'), use_mirrors=dict(default=True, type='bool'), extra_args=dict(), editable=dict(default=True, type='bool'), chdir=dict(type='path'), executable=dict(), umask=dict()), required_one_of=[['name', 'requirements']], mutually_exclusive=[['name', 'requirements'], ['executable', 'virtualenv']], supports_check_mode=True)
state = module.params['state']
name = module.params['name']
version = module.params['version']
requirements = module.params['requirements']
extra_args = module.params['extra_args']
virtualenv_python = module.params['virtualenv_python']
chdir = module.params['chdir']
umask = module.params['umask']
if (umask and (not isinstance(umask, int))):
try:
umask = int(umask, 8)
except Exception:
module.fail_json(msg='umask must be an octal integer', details=to_native(sys.exc_info()[1]))
old_umask = None
if (umask is not None):
old_umask = os.umask(umask)
try:
if ((state == 'latest') and (version is not None)):
module.fail_json(msg='version is incompatible with state=latest')
if (chdir is None):
chdir = tempfile.gettempdir()
err = ''
out = ''
env = module.params['virtualenv']
if env:
if (not os.path.exists(os.path.join(env, 'bin', 'activate'))):
if module.check_mode:
module.exit_json(changed=True)
cmd = module.params['virtualenv_command']
if (os.path.basename(cmd) == cmd):
cmd = module.get_bin_path(cmd, True)
if module.params['virtualenv_site_packages']:
cmd += ' --system-site-packages'
else:
cmd_opts = _get_cmd_options(module, cmd)
if ('--no-site-packages' in cmd_opts):
cmd += ' --no-site-packages'
if virtualenv_python:
cmd += (' -p%s' % virtualenv_python)
elif PY3:
cmd += (' -p%s' % sys.executable)
cmd = ('%s %s' % (cmd, env))
(rc, out_venv, err_venv) = module.run_command(cmd, cwd=chdir)
out += out_venv
err += err_venv
if (rc != 0):
_fail(module, cmd, out, err)
pip = _get_pip(module, env, module.params['executable'])
cmd = ('%s %s' % (pip, state_map[state]))
path_prefix = None
if env:
path_prefix = '/'.join(pip.split('/')[:(- 1)])
has_vcs = False
if name:
for pkg in name:
if bool((pkg and re.match('(svn|git|hg|bzr)\\+', pkg))):
has_vcs = True
break
if (has_vcs and module.params['editable']):
args_list = []
if extra_args:
args_list = extra_args.split(' ')
if ('-e' not in args_list):
args_list.append('-e')
extra_args = ' '.join(args_list)
if extra_args:
cmd += (' %s' % extra_args)
if name:
for pkg in name:
cmd += (' %s' % _get_full_name(pkg, version))
elif requirements:
cmd += (' -r %s' % requirements)
if module.check_mode:
if (extra_args or requirements or (state == 'latest') or (not name)):
module.exit_json(changed=True)
elif has_vcs:
module.exit_json(changed=True)
(pkg_cmd, out_pip, err_pip) = _get_packages(module, pip, chdir)
out += out_pip
err += err_pip
changed = False
if name:
pkg_list = [p for p in out.split('\n') if ((not p.startswith('You are using')) and (not p.startswith('You should consider')) and p)]
if (pkg_cmd.endswith(' freeze') and (('pip' in name) or ('setuptools' in name))):
for pkg in ('setuptools', 'pip'):
if (pkg in name):
formatted_dep = _get_package_info(module, pkg, env)
if (formatted_dep is not None):
pkg_list.append(formatted_dep)
out += ('%s\n' % formatted_dep)
for pkg in name:
is_present = _is_present(pkg, version, pkg_list, pkg_cmd)
if (((state == 'present') and (not is_present)) or ((state == 'absent') and is_present)):
changed = True
break
module.exit_json(changed=changed, cmd=pkg_cmd, stdout=out, stderr=err)
if (requirements or has_vcs):
(_, out_freeze_before, _) = _get_packages(module, pip, chdir)
else:
out_freeze_before = None
(rc, out_pip, err_pip) = module.run_command(cmd, path_prefix=path_prefix, cwd=chdir)
out += out_pip
err += err_pip
if ((rc == 1) and (state == 'absent') and (('not installed' in out_pip) or ('not installed' in err_pip))):
pass
elif (rc != 0):
_fail(module, cmd, out, err)
if (state == 'absent'):
changed = ('Successfully uninstalled' in out_pip)
elif (out_freeze_before is None):
changed = ('Successfully installed' in out_pip)
elif (out_freeze_before is None):
changed = ('Successfully installed' in out_pip)
else:
(_, out_freeze_after, _) = _get_packages(module, pip, chdir)
changed = (out_freeze_before != out_freeze_after)
module.exit_json(changed=changed, cmd=cmd, name=name, version=version, state=state, requirements=requirements, virtualenv=env, stdout=out, stderr=err)
finally:
if (old_umask is not None):
os.umask(old_umask)
|
[
"[email protected]"
] | |
e5c2442247427cec1a3951aa995099d60185b687
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/380/usersdata/308/93222/submittedfiles/principal.py
|
2e2b9bd7c56e2b637fb56a6e0c2b10a929045553
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 286 |
py
|
# -*- coding: utf-8 -*-
from minha_bib import *
from random import randint
notas = []
for i in range(0, 5):
x = randint(0, 100)/10.0
notas.append(x)
print('Valor sorteado: %.1f' % x)
print (notas)
for i in range (len(notas), 0, -1):
print(notas[i-1])
|
[
"[email protected]"
] | |
40a782cbd11ac6b9d420ca5d39aea916b32437c7
|
0b01cb61a4ae4ae236a354cbfa23064e9057e434
|
/alipay/aop/api/domain/AlipayTradePageMergePayModel.py
|
070e26792a14dd740369010833dd12f5d7db348d
|
[
"Apache-2.0"
] |
permissive
|
hipacloud/alipay-sdk-python-all
|
e4aec2869bf1ea6f7c6fb97ac7cc724be44ecd13
|
bdbffbc6d5c7a0a3dd9db69c99443f98aecf907d
|
refs/heads/master
| 2022-11-14T11:12:24.441822 | 2020-07-14T03:12:15 | 2020-07-14T03:12:15 | 277,970,730 | 0 | 0 |
Apache-2.0
| 2020-07-08T02:33:15 | 2020-07-08T02:33:14 | null |
UTF-8
|
Python
| false | false | 2,684 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.OrderDetail import OrderDetail
class AlipayTradePageMergePayModel(object):
def __init__(self):
self._order_details = None
self._out_merge_no = None
self._timeout_express = None
@property
def order_details(self):
return self._order_details
@order_details.setter
def order_details(self, value):
if isinstance(value, list):
self._order_details = list()
for i in value:
if isinstance(i, OrderDetail):
self._order_details.append(i)
else:
self._order_details.append(OrderDetail.from_alipay_dict(i))
@property
def out_merge_no(self):
return self._out_merge_no
@out_merge_no.setter
def out_merge_no(self, value):
self._out_merge_no = value
@property
def timeout_express(self):
return self._timeout_express
@timeout_express.setter
def timeout_express(self, value):
self._timeout_express = value
def to_alipay_dict(self):
params = dict()
if self.order_details:
if isinstance(self.order_details, list):
for i in range(0, len(self.order_details)):
element = self.order_details[i]
if hasattr(element, 'to_alipay_dict'):
self.order_details[i] = element.to_alipay_dict()
if hasattr(self.order_details, 'to_alipay_dict'):
params['order_details'] = self.order_details.to_alipay_dict()
else:
params['order_details'] = self.order_details
if self.out_merge_no:
if hasattr(self.out_merge_no, 'to_alipay_dict'):
params['out_merge_no'] = self.out_merge_no.to_alipay_dict()
else:
params['out_merge_no'] = self.out_merge_no
if self.timeout_express:
if hasattr(self.timeout_express, 'to_alipay_dict'):
params['timeout_express'] = self.timeout_express.to_alipay_dict()
else:
params['timeout_express'] = self.timeout_express
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayTradePageMergePayModel()
if 'order_details' in d:
o.order_details = d['order_details']
if 'out_merge_no' in d:
o.out_merge_no = d['out_merge_no']
if 'timeout_express' in d:
o.timeout_express = d['timeout_express']
return o
|
[
"[email protected]"
] | |
a458a4a886e0d030318c0fd387a072c062b86fc0
|
bd010b944cb658531fe1e3a4fda9e8dd2d4e2a15
|
/runners/beginner/features/signal_to_noise_limit.py
|
4ac17e0544fedb41e5bca2399961cb2294abc81d
|
[] |
no_license
|
harshitjindal/autolens_workspace
|
f32132a51eff888c3b25098df09f514be2dd6422
|
47f85e6b7c2f5871055b9b88520c30d39fd91e2a
|
refs/heads/cern-submission
| 2021-03-20T19:53:12.153296 | 2020-04-24T15:25:25 | 2020-04-24T15:25:25 | 247,228,818 | 0 | 0 | null | 2020-04-24T15:26:45 | 2020-03-14T07:04:59 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 1,779 |
py
|
import os
# This pipeline runner demonstrates how to use the signal-to-noise limits in pipelines. Checkout the pipeline
# 'autolens_workspace/pipelines/beginner/features/signal_to_noise_limit.py' for a description binning up.
# I'll assume that you are familiar with how the beginner runners work, so if any code doesn't make sense familiarize
# yourself with those first!
### AUTOFIT + CONFIG SETUP ###
import autofit as af
workspace_path = "{}/../../../".format(os.path.dirname(os.path.realpath(__file__)))
config_path = workspace_path + "config"
af.conf.instance = af.conf.Config(
config_path=workspace_path + "config", output_path=workspace_path + "output"
)
dataset_label = "imaging"
dataset_name = "lens_sie__source_sersic"
pixel_scales = 0.1
### AUTOLENS + DATA SETUP ###
import autolens as al
import autolens.plot as aplt
dataset_path = af.path_util.make_and_return_path_from_path_and_folder_names(
path=workspace_path, folder_names=["dataset", dataset_label, dataset_name]
)
imaging = al.imaging.from_fits(
image_path=dataset_path + "image.fits",
psf_path=dataset_path + "psf.fits",
noise_map_path=dataset_path + "noise_map.fits",
pixel_scales=pixel_scales,
)
mask = al.mask.circular(
shape_2d=imaging.shape_2d, pixel_scales=imaging.pixel_scales, radius=3.0
)
aplt.imaging.subplot_imaging(imaging=imaging, mask=mask)
# We simply import the signal-to-noise limit pipeline and pass the signal-to-noise limit we want as an input parameter
# (which for the pipeline below, is only used in phase 1).
from pipelines.beginner.features import signal_to_noise_limit
pipeline = signal_to_noise_limit.make_pipeline(
phase_folders=[dataset_label, dataset_name], signal_to_noise_limit=20.0
)
pipeline.run(dataset=imaging, mask=mask)
|
[
"[email protected]"
] | |
88e2b2e2b6a4072ebb10fc9315ec7f85230da990
|
e2c0b31cf4e1611631658ac2bc2dd22e8d3607b0
|
/webapp/common/logger.py
|
6718fab11a845f77104cde6b48b96fa087f817bf
|
[
"MIT"
] |
permissive
|
binary-butterfly/open-booking-connect
|
2aef9ed443ed8096e4876a923cfb02e535494d99
|
ed153dd191c75810cbd2d9b74aee2962380a54d0
|
refs/heads/master
| 2023-08-19T22:12:47.150414 | 2021-10-10T18:53:35 | 2021-10-10T18:53:35 | 355,625,001 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,422 |
py
|
# encoding: utf-8
"""
open booking connect
Copyright (c) 2021, binary butterfly GmbH
Use of this source code is governed by an MIT-style license that can be found in the LICENSE file.
"""
import os
import logging
from logging.handlers import WatchedFileHandler
from ..config import Config
class Logger:
registered_logs = {}
def get_log(self, log_name):
if log_name in self.registered_logs:
return self.registered_logs[log_name]
logger = logging.getLogger(log_name)
logger.handlers.clear()
logger.setLevel(logging.INFO)
# Init File Handler
file_name = os.path.join(Config.LOG_DIR, '%s.log' % log_name)
file_handler = WatchedFileHandler(file_name)
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s ')
)
logger.addHandler(file_handler)
file_name = os.path.join(Config.LOG_DIR, '%s.err' % log_name)
file_handler = WatchedFileHandler(file_name)
file_handler.setLevel(logging.ERROR)
file_handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s ')
)
logger.addHandler(file_handler)
if Config.DEBUG:
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
console_format = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
console_handler.setFormatter(console_format)
logger.addHandler(console_handler)
self.registered_logs[log_name] = logger
return logger
def debug(self, log_name, message):
self.get_log(log_name).debug(message)
def info(self, log_name, message):
self.get_log(log_name).info(message)
def warn(self, log_name, message):
self.get_log(log_name).warning(message)
def error(self, log_name, message, details=None):
self.get_log(log_name).error(message + (("\n" + details) if details else ""))
def exception(self, log_name, message, details=None):
self.get_log(log_name).exception(message + (("\n" + details) if details else ""))
def critical(self, log_name, message, details=None):
self.get_log(log_name).critical(message + (("\n" + details) if details else ""))
|
[
"[email protected]"
] | |
919f93e4e6bf935186a408a749c680b7cfb98e10
|
d54b49fb7a899fa7c1b0bd5be02c9af43cb9cae0
|
/accesspanel/extensions/lock_input.py
|
e551b8ba5094a45ef28355b950c2c7a16667ba3e
|
[
"BSD-3-Clause"
] |
permissive
|
vincent-lg/accesspanel
|
923a03f852aa96804abafe4c51833ded6e091427
|
42d27b7f12e9c3f9b5467a8ba4e973e2e9735796
|
refs/heads/master
| 2021-06-16T03:11:33.937714 | 2017-05-19T19:19:20 | 2017-05-19T19:19:20 | 71,724,440 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,986 |
py
|
# Copyright (c) 2016, LE GOFF Vincent
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of ytranslate nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module containing the LockInput extension."""
import wx
from accesspanel.extensions.base import BaseExtension
class LockInput(BaseExtension):
"""Implement a lock-in-input mode.
In this mode, the user cannot use tab or shift-tab to leave the
input field. The cases in which the lock is applied can be changed
through the extension's settings.
Behavior of the extension can be altered through attributes:
empty: the lock will be active unless the input is empty
>>> import wx
>>> from accesspanel import AccessPanel
>>> class MyAccessPanel(AccessPanel):
... def __init__(self, parent):
... AccessPanel.__init__(self, parent, lock_input=True)
... # Configure the lock
... lock = self.extensions["lock_input"]
... lock.empty = True
Default values:
input: False
If you with to modify these default values, see the example above.
"""
def __init__(self, panel):
BaseExtension.__init__(self, panel)
# Features that can be set in the AccessPanel
self.empty = False
def OnKeyDown(self, modifiers, key):
"""Prevent changing focus with tab/shift-tab."""
skip = True
if modifiers in (wx.MOD_NONE, wx.MOD_SHIFT) and key == wx.WXK_TAB:
if not self.empty:
skip = False
elif self.panel.input:
skip = False
return skip
|
[
"[email protected]"
] | |
bf34a703de23cc4f65a0f4660199b7563d9f3c42
|
9cd180fc7594eb018c41f0bf0b54548741fd33ba
|
/sdk/python/pulumi_azure_nextgen/peering/v20200101preview/registered_asn.py
|
1b858c98471a749f3f06d915f00f04ec6173b3ae
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
MisinformedDNA/pulumi-azure-nextgen
|
c71971359450d03f13a53645171f621e200fe82d
|
f0022686b655c2b0744a9f47915aadaa183eed3b
|
refs/heads/master
| 2022-12-17T22:27:37.916546 | 2020-09-28T16:03:59 | 2020-09-28T16:03:59 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,592 |
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = ['RegisteredAsn']
class RegisteredAsn(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
asn: Optional[pulumi.Input[int]] = None,
peering_name: Optional[pulumi.Input[str]] = None,
registered_asn_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
The customer's ASN that is registered by the peering service provider.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] asn: The customer's ASN from which traffic originates.
:param pulumi.Input[str] peering_name: The name of the peering.
:param pulumi.Input[str] registered_asn_name: The name of the ASN.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['asn'] = asn
if peering_name is None:
raise TypeError("Missing required property 'peering_name'")
__props__['peering_name'] = peering_name
if registered_asn_name is None:
raise TypeError("Missing required property 'registered_asn_name'")
__props__['registered_asn_name'] = registered_asn_name
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['name'] = None
__props__['peering_service_prefix_key'] = None
__props__['provisioning_state'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:peering/latest:RegisteredAsn"), pulumi.Alias(type_="azure-nextgen:peering/v20200401:RegisteredAsn")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(RegisteredAsn, __self__).__init__(
'azure-nextgen:peering/v20200101preview:RegisteredAsn',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'RegisteredAsn':
"""
Get an existing RegisteredAsn resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return RegisteredAsn(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def asn(self) -> pulumi.Output[Optional[int]]:
"""
The customer's ASN from which traffic originates.
"""
return pulumi.get(self, "asn")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="peeringServicePrefixKey")
def peering_service_prefix_key(self) -> pulumi.Output[str]:
"""
The peering service prefix key that is to be shared with the customer.
"""
return pulumi.get(self, "peering_service_prefix_key")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
[
"[email protected]"
] | |
379c4b9eceb06d3d37321197658f376170da04d7
|
dde951c8bcfb79cdead3449de42d9ed3e6f24fbe
|
/LearnPythontheHradWay/ex48/ex48/parser.py
|
85e2fad7d534fc6e3c0c28fe74bf27d1a0b2d23c
|
[] |
no_license
|
wolfeyuanwei/study-python
|
c764353cbf75b0ccd79dc562fe11eebee712510b
|
be1a9ec93cd29d9fe6b69ad4f9c059fb9dd308de
|
refs/heads/master
| 2021-05-11T22:57:51.541684 | 2018-02-08T05:03:10 | 2018-02-08T05:03:10 | 117,504,326 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,955 |
py
|
#!/user/bin/python
#filename:parser.py
#-*-coding:utf-8 -*-
class ParseError(Exception):
pass
class Sentence(object):
def __init__(self, subject, verb, obj):
self.subject = subject[1]
self.verb = verb[1]
self.object = obj[1]
def peek(word_list):
if word_list:
word = word_list[0]
return word[0]
else:
return None
def match(word_list, expecting):
if word_list:
word = word_list.pop(0)
if word[0] == expecting:
return word
else:
return None
else:
return None
def skip(word_list, word_type):
while peek(word_list) == word_type:
match(word_list, word_type)
def parse_verb(word_list):
skip(word_list, 'stop')
if peek(word_list) == 'verb':
return match(word_list, 'verb')
else:
raise ParserError("Expected a verb next.")
def parse_object(word_list):
skip(word_list, 'stop')
next_word = peek(word_list)
if next_word == 'noun':
return match(word_list, 'noun')
elif next_word == 'direction':
return match(word_list, 'direction')
else:
raise ParseError("Expected a noun or direction next.")
def parse_subject(word_list):
skip(word_list, 'stop')
next_word = peek(word_list)
if next_word == 'noun':
return match(word_list, 'noun')
elif next_word == 'verb':
return ('noun', 'player')
else:
raise ParseError("Expected a verb next.")
def parse_sentence(word_list):
subj = parse_subject(word_list)
verb = parse_verb(word_list)
obj = parse_object(word_list)
return Sentence(subj, verb, obj)
if __name__ == '__main__':
x = parse_sentence([('verb', 'run'), ('direction', 'north')])
print x.subject
print x.verb
print x.object
x = parse_sentence([('noun','bear'),('verb', 'eat'), ('stop', 'the'),('noun', 'honey')])
print x.subject
print x.verb
print x.object
|
[
"[email protected]"
] | |
d4c65b9241fb8af0e9e3ce0f0f5a8c6653b57571
|
fc365e7d2a558bf819b8062fb5a452e8c4ad3ca8
|
/library/Codon.py
|
8898e8ca7433963a80232402bd67fbb3deb8b0a8
|
[] |
no_license
|
ajrichards/phylogenetic-models
|
9e47f27ff46ce95dc365d45fcd11949be3b506cb
|
783f9a6b6cea816d255fa23f2e62423d98059ad9
|
refs/heads/master
| 2021-01-01T19:39:41.550390 | 2015-08-19T13:59:58 | 2015-08-19T13:59:58 | 25,293,648 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,871 |
py
|
#!/usr/bin/env python
"""
TODO
change aa2pp to aa2ppc
and use http://www.geneinfinity.org/sp/sp_aaprops.html
"""
__author__ = "Adam Richards"
class Codon(object):
"""
A class to handle codon related functions
Amino acids are handled using the single character id, with the exception of STOP.
"""
def __init__(self):
"""
initialize dictionaries
"""
## dictionaries to convert between amino acids and codons
self.aa2codon = {'C': ['TGT','TGC'],\
'D': ['GAT','GAC'],\
'S': ['TCT', 'TCG', 'TCA', 'TCC', 'AGC', 'AGT'],\
'Q': ['CAA', 'CAG'],\
'M': ['ATG'],\
'N': ['AAC', 'AAT'],\
'P': ['CCT', 'CCG', 'CCA', 'CCC'],\
'K': ['AAG', 'AAA'],\
'STOP': ['TAG', 'TGA', 'TAA'],\
'T': ['ACC', 'ACA', 'ACG', 'ACT'],\
'F': ['TTT', 'TTC'],\
'A': ['GCA', 'GCC', 'GCG', 'GCT'],\
'G': ['GGT', 'GGG', 'GGA', 'GGC'],\
'I': ['ATC', 'ATA', 'ATT'],\
'L': ['TTA', 'TTG', 'CTC', 'CTT', 'CTG', 'CTA'],\
'H': ['CAT', 'CAC'],\
'R': ['CGA', 'CGC', 'CGG', 'CGT', 'AGG', 'AGA'],\
'W': ['TGG'],\
'V': ['GTA', 'GTC', 'GTG', 'GTT'],\
'E': ['GAG', 'GAA'],\
'Y': ['TAT', 'TAC']}
self.codon2aa = {}
for key,val in self.aa2codon.iteritems():
for c in val:
self.codon2aa[c] = key
## dictionaries to convert between amino acids physical property class
self.pp2aa = {"neg":["D","E"],\
"pos":["K","R","H"],\
"pnc":["S","T","C","M","N","Q"],\
"aro":["F","Y","W"],\
"npa":["G","A","V","L","I","P"]}
self.aa2pp = {}
for key,val in self.pp2aa.iteritems():
for c in val:
self.aa2pp[c] = key
## dictionaries to convert between short and long versions of amino acids
self.long2short = {"ALA":"A","ARG":"R","ASN":"N","ASP":"D",\
"CYS":"C","GLU":"E","GLN":"Q","GLY":"G",\
"HIS":"H","ILE":"I","LEU":"L","LYS":"K",\
"MET":"M","PHE":"F","PRO":"P","SER":"S",\
"THR":"T","TRP":"W","TYR":"Y","VAL":"V"}
self.short2long = dict([(value,key) for key,value in self.long2short.iteritems()])
if __name__ == "__main__":
print "Running..."
cd = Codon()
if cd.aa2pp["F"] != "aro":
raise Exception("Failed aa to pp test")
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.