hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
efd3f9d1de68654dbc76d3fbfef70bcad64b263b | 585 | py | Python | main/methods/analysis.py | hannxiao/autotrade2 | 8e6f3d463334b6ea8a18074de58e25c0dab93f39 | [
"MIT"
]
| null | null | null | main/methods/analysis.py | hannxiao/autotrade2 | 8e6f3d463334b6ea8a18074de58e25c0dab93f39 | [
"MIT"
]
| 6 | 2020-06-06T01:05:02.000Z | 2021-12-13T20:42:16.000Z | main/methods/analysis.py | hannxiao/autotrade | 8e6f3d463334b6ea8a18074de58e25c0dab93f39 | [
"MIT"
]
| null | null | null | from . import toolFuncs
def DefineTrend(data, K):
'''
Filter all the trend whose range less than K%
'''
pairs = list(zip(data['Date'], data['Close']))
is_extreme = toolFuncs.extreme_point(data['Close'], K, recognition_method='height')
output = [pairs[i] for i in range(len(is_extreme)) if is_extreme[i]]
return {'DefineTrend': {'name': 'Trend', 'data': output, 'position': 'main', 'type': 'line',
'lineStyle': {'normal': {'width': 3}, 'showSymbol':False}
}
}
| 32.5 | 98 | 0.529915 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 188 | 0.321368 |
efd40da6f7f764459934c721ccc5ec880311c2e3 | 607 | py | Python | FaceClassify/losses/TripletMarginLoss.py | CharlesPikachu/CharlesFace | 90bfe38c58068228d0069dce43b55b2570acaa16 | [
"MIT"
]
| 13 | 2018-05-23T07:07:28.000Z | 2021-05-28T07:37:30.000Z | FaceClassify/losses/TripletMarginLoss.py | CharlesPikachu/CharlesFace | 90bfe38c58068228d0069dce43b55b2570acaa16 | [
"MIT"
]
| null | null | null | FaceClassify/losses/TripletMarginLoss.py | CharlesPikachu/CharlesFace | 90bfe38c58068228d0069dce43b55b2570acaa16 | [
"MIT"
]
| null | null | null | # Author:
# Charles
# Function:
# Triplet loss function.
import torch
from torch.autograd import Function
import sys
sys.path.append('../')
from utils.utils import *
class TripletMarginLoss(Function):
def __init__(self, margin):
super(TripletMarginLoss, self).__init__()
self.margin = margin
# norm 2
self.pdist = PairwiseDistance(2)
def forward(self, anchor, positive, negative):
dis_apos = self.pdist.forward(anchor, positive)
dis_aneg = self.pdist.forward(anchor, negative)
dist_hinge = torch.clamp(self.margin+dis_apos-dis_aneg, min=0.0)
loss = torch.mean(dist_hinge)
return loss | 26.391304 | 66 | 0.744646 | 437 | 0.719934 | 0 | 0 | 0 | 0 | 0 | 0 | 68 | 0.112026 |
efd461c9230c324e2c8e6e92be4631dc26caa578 | 768 | py | Python | DailyProgrammer/20120316A.py | DayGitH/Python-Challenges | bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf | [
"MIT"
]
| 2 | 2020-12-23T18:59:22.000Z | 2021-04-14T13:16:09.000Z | DailyProgrammer/20120316A.py | DayGitH/Python-Challenges | bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf | [
"MIT"
]
| null | null | null | DailyProgrammer/20120316A.py | DayGitH/Python-Challenges | bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf | [
"MIT"
]
| null | null | null | """
you have a string "ddaaiillyypprrooggrraammeerr". We want to remove all the consecutive duplicates and put them in a
separate string, which yields two separate instances of the string "dailyprogramer".
use this list for testing:
input: "balloons"
expected output: "balons" "lo"
input: "ddaaiillyypprrooggrraammeerr"
expected output: "dailyprogramer" "dailyprogramer"
input: "aabbccddeded"
expected output: "abcdeded" "abcd"
input: "flabby aapples"
expected output: "flaby aples" "bap"
"""
inp = "ddaaiillyypprrooggrraammeerr"
org = ""
extra = ""
hold = ""
for a in range(len(inp)):
if hold == inp[a]:
extra += inp[a]
else:
org += inp[a]
hold = inp[a]
print("original:\t", inp)
print("first:\t\t", org)
print("repeats:\t", extra) | 25.6 | 116 | 0.69401 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 566 | 0.736979 |
efd60ec0f5dfed774930cf3e30f7572bed405c2b | 6,485 | py | Python | src/preppipe/enginesupport/enginesupport.py | PrepPipe/preppipe-python | 6fc547a539737ec37a7528eb97ce92e56d4f404a | [
"Apache-2.0"
]
| 1 | 2022-02-28T03:34:57.000Z | 2022-02-28T03:34:57.000Z | src/preppipe/enginesupport/enginesupport.py | PrepPipe/preppipe-python | 6fc547a539737ec37a7528eb97ce92e56d4f404a | [
"Apache-2.0"
]
| null | null | null | src/preppipe/enginesupport/enginesupport.py | PrepPipe/preppipe-python | 6fc547a539737ec37a7528eb97ce92e56d4f404a | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python3
import typing
import PIL.Image
from enum import Enum
import re
import preppipe.commontypes
from preppipe.vnmodel import *
class EngineSupport:
"""All engine support classes inherit this class, so that we can use reflection to query all supported engines"""
pass
# we define an MIR infrastructure for backend... Engine Model (EM)
class EMInstruction:
# abstract opcode data
opcode : typing.Any
# list of operands
operand_list : typing.List[typing.Any] = []
def __init__(self, opcode, operand_list : typing.List[typing.Any] = []) -> None :
self.opcode = opcode
if len(operand_list) == 0:
self.operand_list = []
else:
self.operand_list = operand_list
def set_operand_list(self, operand_list : typing.List[typing.Any]) -> None:
self.operand_list = operand_list
def add_operand(self, operand : typing.Any) -> None:
self.operand_list.append(operand)
def get_num_operands(self):
return len(self.operand_list)
def get_operand(self, index : int) -> typing.Any:
return self.operand_list[index]
def get_opcode(self) -> typing.Any:
return self.opcode
def get_operand_dict(self, arglist : typing.List[str]) -> typing.Dict[str, typing.Any]:
assert(len(arglist) == len(self.operand_list))
result : typing.Dict[str, typing.Any] = {}
for i in range(0, len(self.operand_list)):
result[arglist[i]] = self.operand_list[i]
return result
class EMBasicBlock:
label : str = ""
instr_list : typing.List[EMInstruction] = []
def __init__(self, label : str = "") -> None :
self.label = label
self.instr_list = []
def add_instruction(self, instr : EMInstruction) -> EMInstruction:
self.instr_list.append(instr)
return instr
def get_instruction_list(self) -> typing.List[EMInstruction]:
return self.instr_list
def get_label(self) -> str:
return self.label
class EMFunction:
"""It is fine if left unused; not all engines support functions"""
basicblock_list : typing.List[typing.Any] = []
def __init__(self) -> None :
self.basicblock_list = []
def add_basicblock(self, bb : typing.Any):
self.basicblock_list.append(bb)
return bb
# helper functions
def _get_label_name(name : str, type_prefix : str, scope_prefix: str, name_dict : typing.Dict[str, typing.Any], prefix : str = "") -> str:
# get the base name
base_label = re.sub(r'[^a-zA-Z0-9_]', '', name.replace(" ", "_"))
# ensure the name does not start with number or underscore, or is not empty
if len(base_label) > 0:
frontchar = base_label[0]
if frontchar == '_' or frontchar.isnumeric():
base_label = type_prefix + "_" + base_label
else:
# we have no alphanumetic characters
base_label = type_prefix + "_anon"
# make sure it is unique
# we may have duplicates
# try to add scope prefix to resolve this
if prefix + base_label in name_dict and len(scope_prefix) > 0:
base_label = scope_prefix + "_" + base_label
# now add the prefix; we no longer add prefix to base label
if len(prefix) > 0:
base_label = prefix + base_label
# if not working, add a numeric suffix
numeric_suffix = 0
result = base_label
while result in name_dict:
numeric_suffix += 1
result = base_label + '_' + str(numeric_suffix)
# done
return result
def label_branch_targets(model : VNModel, reserved_set : typing.Set[str] = [], include_basicblock : bool = True) -> typing.Dict[VNValue, str]:
"""Assign all functions (and optionally basic blocks) with a label that is:
1. alphanumeric, non-empty
2. does not start with underscore '_'
3. unique across all functions and basic blocks
We may need this labeling even when functions already has no duplicated label so avoid sanitization issue or reserved keywords
"""
name_dict = {} # label -> element (used internally)
elem_dict = {} # element -> label (for returning)
# add all reserved keywords to name_dict
for reserved in reserved_set:
assert isinstance(reserved, str)
name_dict[reserved] = None
# actual work
for func in model.get_function_list():
func_label = _get_label_name(func.get_name(), "control_label", "", name_dict)
name_dict[func_label] = func
elem_dict[func] = func_label
if include_basicblock:
for bb in func.get_basicblock_list():
bbname = bb.get_name()
if len(bbname) == 0 and bb is func.get_entry_block():
bbname = "entry"
bb_label = _get_label_name(bbname, "control_label", func_label, name_dict)
name_dict[bb_label] = bb
elem_dict[bb] = bb_label
return elem_dict
def label_basicblocks(func : VNFunction, reserved_set : typing.Set[str] = []) -> typing.Dict[VNBasicBlock, str]:
"""Assign labels to basic blocks with the same criteria as label_branch_targets:
1. alphanumeric, non-empty
2. does not start with underscore '_'
3. unique
"""
name_dict = {} # label -> element (used internally)
elem_dict = {} # element -> label (for returning)
# add all reserved keywords to name_dict
for reserved in reserved_set:
assert isinstance(reserved, str)
name_dict[reserved]= None
for bb in func.get_basicblock_list():
bbname = bb.get_name()
if len(bbname) == 0 and bb is func.get_entry_block():
bbname = "entry"
bb_label = _get_label_name(bbname, "label", "", name_dict, ".")
name_dict[bb_label] = bb
elem_dict[bb] = bb_label
return elem_dict
def label_sayer_identity(model : VNModel, reserved_set : typing.Set[str] = []) -> typing.Dict[str, str]:
"""make sure all characters and sayers have (alphanumeric) labels"""
name_dict = {}
elem_dict = {}
for reserved in reserved_set:
assert isinstance(reserved, str)
name_dict[reserved] = None
for character in model.get_character_list():
name = _get_label_name(character.get_name(), "character", "", name_dict)
name_dict[name] = character
elem_dict[character] = name
for sayer in model.get_sayer_list():
character = sayer.get_identity()
character_label = elem_dict[character]
name = _get_label_name(character_label + sayer.get_name(), "sayer", "", name_dict)
name_dict[name] = sayer
elem_dict[sayer] = name
return elem_dict
| 32.918782 | 143 | 0.665998 | 2,062 | 0.317965 | 0 | 0 | 0 | 0 | 0 | 0 | 1,611 | 0.248419 |
efd85393ed4e8b07da224123311c11a7291f7173 | 190 | py | Python | openprocurement/tender/limited/__init__.py | Leits/openprocurement.tender.limited | c216e5b96dc850036d94fdf21883845afee34252 | [
"Apache-2.0"
]
| null | null | null | openprocurement/tender/limited/__init__.py | Leits/openprocurement.tender.limited | c216e5b96dc850036d94fdf21883845afee34252 | [
"Apache-2.0"
]
| 2 | 2021-03-26T00:30:52.000Z | 2022-03-21T22:22:09.000Z | openprocurement/tender/limited/__init__.py | Leits/openprocurement.tender.limited | c216e5b96dc850036d94fdf21883845afee34252 | [
"Apache-2.0"
]
| null | null | null | from openprocurement.tender.limited.models import Tender
def includeme(config):
config.add_tender_procurementMethodType(Tender)
config.scan("openprocurement.tender.limited.views")
| 27.142857 | 56 | 0.815789 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 38 | 0.2 |
efd8cec6101a750931dee27419124950274496b7 | 3,422 | py | Python | upload.py | woodlords/nftmaker-pro-scripts | 86e1eef0d297bf9589d56272b1edea9bb3e18612 | [
"Apache-2.0"
]
| 2 | 2022-02-09T17:48:33.000Z | 2022-02-12T08:18:42.000Z | upload.py | woodlords/nftmaker-pro-scripts | 86e1eef0d297bf9589d56272b1edea9bb3e18612 | [
"Apache-2.0"
]
| null | null | null | upload.py | woodlords/nftmaker-pro-scripts | 86e1eef0d297bf9589d56272b1edea9bb3e18612 | [
"Apache-2.0"
]
| null | null | null | from pprint import pprint
import requests
import base64
import json
import argparse
import sys
p = argparse.ArgumentParser(description="New")
p.add_argument('-f','--folder-name', required=True, help='Folder name of the images/metadata files')
p.add_argument('-s','--start', required=False, help='Start ID to upload')
p.add_argument('-e','--end', required=False, help='End number for IDs to upload')
p.add_argument('--ids', nargs="+", required=False, help='List of local IDs to upload')
if len(sys.argv)==1:
p.print_help(sys.stderr)
sys.exit(1)
args = p.parse_args()
# Some variables you will need
api_key = "api_key_from_nftmakerpro"
nft_project_id = "12345"
upload_url = f'https://api.nft-maker.io/UploadNft/{api_key}/{nft_project_id}'
prefixName="WoodCastleProject"
prefixDispalyName="Wood Castle: Wood Lords S1 " # Leave a space at the end as we will add the #number of token at the end.
projectDescription="Wood Castle Studios Presents Woods Lords: Season One"
# Lord details
folder_name = args.folder_name
ids_list = args.ids
def convert_image_to_base64(image_file):
with open(image_file, 'rb') as binary_file:
binary_file_data = binary_file.read()
base64_encoded_data = base64.b64encode(binary_file_data)
base64_message = base64_encoded_data.decode('utf-8')
return base64_message
# See example Metadata file to use for adding metadata
def gen_api_metadata(metadata_json_file):
api_metadata = 'api_' + metadata_json_file
with open(metadata_json_file, 'r') as fd:
myjson = json.load(fd)
data = []
for k,v in myjson.items():
d = { }
d['name'] = k
d['value'] = v
data.append(d)
return data
def gen_metadata(assetName):
metadata_file = "images/" + folder_name + '/' + assetName + '.json'
image_file = "images/" + folder_name + '/' + assetName + '.jpg'
base64_message = convert_image_to_base64(image_file)
api_metadata = gen_api_metadata(metadata_file)
params = {
"assetName": prefixName+assetName, # If you set up a prefix in your project, you omit the prefix here, if not add prefix as well
"previewImageNft": {
"mimetype": "image/jpeg",
"displayname": prefixDispalyName + "#" + assetName,
"fileFromBase64": base64_message,
"description": projectDescription,
"metadataPlaceholder": api_metadata
}
}
return params
def upload_image(data):
try:
r = requests.post(upload_url, json=data)
print(r.json())
except:
print(str(i) + ' : FAILED!')
def upload_set(startCount, endCount):
# Names of the images/metadata files
for i in range(startCount, endCount+1):
if(i < 10):
assetName = '000' + str(i)
elif(i < 100):
assetName = '00' + str(i)
elif(i < 1000):
assetName = '0' + str(i)
else:
assetName = str(i)
print(f'INFO: Working on asset {prefixName+assetName}')
data = gen_metadata(assetName)
upload_image(data)
def main():
# Iterate through list of IDs and upload them
if args.ids:
for i in args.ids:
startCount = int(i)
endCount = int(i)
upload_set(startCount,endCount)
else:
startCount = int(args.start)
endCount = int(args.end)
upload_set(startCount,endCount)
main()
| 30.553571 | 136 | 0.648159 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 982 | 0.286967 |
efdbc298676774176ab064bb9bca24e8e6416478 | 1,613 | py | Python | runner/runner.py | lorne-luo/quicksilver | 79f2b66de9ab7aa63a35f56cac800b64c202f70c | [
"MIT"
]
| null | null | null | runner/runner.py | lorne-luo/quicksilver | 79f2b66de9ab7aa63a35f56cac800b64c202f70c | [
"MIT"
]
| null | null | null | runner/runner.py | lorne-luo/quicksilver | 79f2b66de9ab7aa63a35f56cac800b64c202f70c | [
"MIT"
]
| null | null | null | import logging
from queue import Queue
from redis_queue.queue import RedisQueue
from runner.base import BaseRunner
logger = logging.getLogger(__name__)
class MemoryQueueRunner(BaseRunner):
"""Memory queue runner"""
def create_queue(self, queue_name):
return Queue(maxsize=2000)
class ReisQueueRunner(BaseRunner):
"""Redis queue runner"""
def create_queue(self, queue_name):
return RedisQueue(queue_name)
class TestRedisRunner(ReisQueueRunner):
"""Test runner with redis"""
def loop_handlers(self, event):
print(f'Process event, {event.__dict__}')
super(TestRedisRunner, self).loop_handlers(event)
def put_event(self, event):
print(f'Put event = {event.__dict__}')
super(TestRedisRunner, self).put_event(event)
class StreamRunnerBase(ReisQueueRunner):
broker = ''
account = None
def __init__(self, queue_name, accounts, strategies, handlers, *args, **kwargs):
super(StreamRunnerBase, self).__init__(queue_name, accounts, strategies, handlers)
self.pairs = kwargs.get('pairs')
self.prices = self._set_up_prices_dict()
def _set_up_prices_dict(self):
prices_dict = dict(
(k, v) for k, v in [
(p, {"bid": None, "ask": None, "time": None, "spread": None}) for p in self.pairs
]
)
return prices_dict
if __name__ == '__main__':
# python -m event.runner
from handler import *
r = TestRedisRunner('test_runner', [], [],
[HeartBeatHandler(), TimeFramePublisher(timezone=0)])
r.run()
| 25.603175 | 97 | 0.650341 | 1,225 | 0.759454 | 0 | 0 | 0 | 0 | 0 | 0 | 222 | 0.137632 |
efdeda6cab101f5fe55ec27079a0f853fcc20c7e | 6,790 | py | Python | ultron/sentry/Analysis/TechnicalAnalysis/__init__.py | wangjiehui11235/ultron | ade46fdcff7eaf01187cdf9b9fb1d6a04ae972b7 | [
"Apache-2.0"
]
| 4 | 2019-06-06T09:38:49.000Z | 2022-01-29T00:02:11.000Z | ultron/sentry/Analysis/TechnicalAnalysis/__init__.py | wangjiehui11235/ultron | ade46fdcff7eaf01187cdf9b9fb1d6a04ae972b7 | [
"Apache-2.0"
]
| 1 | 2022-02-11T03:43:10.000Z | 2022-02-11T03:43:10.000Z | ultron/sentry/Analysis/TechnicalAnalysis/__init__.py | wangjiehui11235/ultron | ade46fdcff7eaf01187cdf9b9fb1d6a04ae972b7 | [
"Apache-2.0"
]
| 8 | 2019-06-02T13:11:00.000Z | 2021-11-11T01:06:22.000Z | # -*- coding: utf-8 -*-
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecuritySignValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecurityAverageValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecurityXAverageValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecurityMACDValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecurityExpValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecurityLogValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecuritySqrtValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecurityPowValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecurityAbsValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecurityAcosValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecurityAcoshValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecurityAsinValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecurityAsinhValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecurityNormInvValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecurityCeilValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecurityFloorValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecurityRoundValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecurityDiffValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecurityRoundValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecuritySigmoidValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecurityTanhValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecuritySimpleReturnValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecurityLogReturnValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecurityMaximumValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecurityMinimumValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatefulTechnicalAnalysers import SecurityMovingAverage
from ultron.sentry.Analysis.TechnicalAnalysis.StatefulTechnicalAnalysers import SecurityMovingDecay
from ultron.sentry.Analysis.TechnicalAnalysis.StatefulTechnicalAnalysers import SecurityMovingMax
from ultron.sentry.Analysis.TechnicalAnalysis.StatefulTechnicalAnalysers import SecurityMovingArgMax
from ultron.sentry.Analysis.TechnicalAnalysis.StatefulTechnicalAnalysers import SecurityMovingMin
from ultron.sentry.Analysis.TechnicalAnalysis.StatefulTechnicalAnalysers import SecurityMovingArgMin
from ultron.sentry.Analysis.TechnicalAnalysis.StatefulTechnicalAnalysers import SecurityMovingRank
from ultron.sentry.Analysis.TechnicalAnalysis.StatefulTechnicalAnalysers import SecurityMovingQuantile
from ultron.sentry.Analysis.TechnicalAnalysis.StatefulTechnicalAnalysers import SecurityMovingAllTrue
from ultron.sentry.Analysis.TechnicalAnalysis.StatefulTechnicalAnalysers import SecurityMovingAnyTrue
from ultron.sentry.Analysis.TechnicalAnalysis.StatefulTechnicalAnalysers import SecurityMovingSum
from ultron.sentry.Analysis.TechnicalAnalysis.StatefulTechnicalAnalysers import SecurityMovingVariance
from ultron.sentry.Analysis.TechnicalAnalysis.StatefulTechnicalAnalysers import SecurityMovingStandardDeviation
from ultron.sentry.Analysis.TechnicalAnalysis.StatefulTechnicalAnalysers import SecurityMovingCountedPositive
from ultron.sentry.Analysis.TechnicalAnalysis.StatefulTechnicalAnalysers import SecurityMovingPositiveAverage
from ultron.sentry.Analysis.TechnicalAnalysis.StatefulTechnicalAnalysers import SecurityMovingCountedNegative
from ultron.sentry.Analysis.TechnicalAnalysis.StatefulTechnicalAnalysers import SecurityMovingNegativeAverage
from ultron.sentry.Analysis.TechnicalAnalysis.StatefulTechnicalAnalysers import SecurityMovingPositiveDifferenceAverage
from ultron.sentry.Analysis.TechnicalAnalysis.StatefulTechnicalAnalysers import SecurityMovingNegativeDifferenceAverage
from ultron.sentry.Analysis.TechnicalAnalysis.StatefulTechnicalAnalysers import SecurityMovingRSI
from ultron.sentry.Analysis.TechnicalAnalysis.StatefulTechnicalAnalysers import SecurityMovingLogReturn
from ultron.sentry.Analysis.TechnicalAnalysis.StatefulTechnicalAnalysers import SecurityMovingCorrelation
__all__ = ['SecuritySignValueHolder',
'SecurityAverageValueHolder',
'SecurityXAverageValueHolder',
'SecurityMACDValueHolder',
'SecurityExpValueHolder',
'SecurityLogValueHolder',
'SecuritySqrtValueHolder',
'SecurityPowValueHolder',
'SecurityAbsValueHolder',
'SecurityAcosValueHolder',
'SecurityAcoshValueHolder',
'SecurityAsinValueHolder',
'SecurityAsinhValueHolder',
'SecurityNormInvValueHolder',
'SecurityCeilValueHolder',
'SecurityFloorValueHolder',
'SecurityRoundValueHolder',
'SecurityDiffValueHolder',
'SecurityTanhValueHolder',
'SecuritySigmoidValueHolder',
'SecuritySimpleReturnValueHolder',
'SecurityLogReturnValueHolder',
'SecurityMaximumValueHolder',
'SecurityMinimumValueHolder',
'SecurityMovingAverage',
'SecurityMovingDecay',
'SecurityMovingMax',
'SecurityMovingArgMax',
'SecurityMovingMin',
'SecurityMovingArgMin',
'SecurityMovingRank',
'SecurityMovingQuantile',
'SecurityMovingAllTrue',
'SecurityMovingAnyTrue',
'SecurityMovingSum',
'SecurityMovingVariance',
'SecurityMovingStandardDeviation',
'SecurityMovingCountedPositive',
'SecurityMovingPositiveAverage',
'SecurityMovingCountedNegative',
'SecurityMovingNegativeAverage',
'SecurityMovingPositiveDifferenceAverage',
'SecurityMovingNegativeDifferenceAverage',
'SecurityMovingRSI',
'SecurityMovingLogReturn',
'SecurityMovingCorrelation'] | 70 | 119 | 0.841679 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,224 | 0.180265 |
efdf05259aeb476a54f281ec506c8577fe42f662 | 17,015 | py | Python | app/common/helper.py | lguobin/KB_API | f7180cf430cb8de2eac8fa78e3937666da950c7a | [
"Apache-2.0"
]
| null | null | null | app/common/helper.py | lguobin/KB_API | f7180cf430cb8de2eac8fa78e3937666da950c7a | [
"Apache-2.0"
]
| null | null | null | app/common/helper.py | lguobin/KB_API | f7180cf430cb8de2eac8fa78e3937666da950c7a | [
"Apache-2.0"
]
| null | null | null | # from app.common.utils import *
from sqlalchemy import desc
from settings import Config
from app.models import *
from app.extensions import db
from app.models.base import _BaseModel
from app.common.message import DBError
# 获取分页数据
def Pages(_request, _TABLE, _filter=None):
page = get_page_value(_request)
per_page = get_per_page_value(_request, Config.PER_PAGE, Config.MAX_PER_PAGE)
paging = get_query_data(_request, "paging", 1)
filter_params = _filter
if bool(int(paging)):
pagination = get_models_filter_with_pagination(_TABLE, "", page, per_page, desc, *filter_params)
total = pagination['total']
models = pagination['models']
data = [model.get_json() for model in models]
return {
'status': 'ok',
'total': total,
'page': page,
'pages': get_pages(total, per_page),
'per_page': per_page,
'results': data
}
def input_files(pid, *row):
# 批量导入接口测试用例,不存在就创建
_interss = get_model_by(Interfaces, name=row[0])
if _interss != None:
_input = {
"name": row[1],
"description": "导入用例__" + str(row[2]),
"pid": pid,
"Iid": _interss.object_id,
"route": row[5],
"headers": row[6],
"requestMethod": row[7],
"requestBody": row[8],
"parameterType": row[9],
"setGlobalVars": eval(row[10]),
"checkoptions": None,
"checkSpendSeconds": row[12],
"checkResponseBody": eval(row[13]),
"checkResponseCode": row[14],
"uid": row[-1],
}
if row[11] == "Y" or row[11] == "True":
_input["checkoptions"] = True
else:
_input["checkoptions"] = False
create_model(TestCase, **_input)
else:
require_items = {
"pid": pid,
"uid": row[-1],
"name": row[0],
"route": row[5],
"headers": row[6],
"requestMethod": row[7],
"i_type": "HTTP",
"description": "导入用例__" + str(row[2])
}
_model = create_model(Interfaces, **require_items)
_input = {
"name": row[1],
"description": "导入用例__" + str(row[2]),
"pid": pid,
"Iid": _model.object_id,
"route": row[5],
"headers": row[6],
"requestMethod": row[7],
"requestBody": row[8],
"parameterType": row[9],
"setGlobalVars": row[10],
"checkoptions": None,
"checkSpendSeconds": row[12],
"checkResponseBody": row[13],
"checkResponseCode": row[14],
"uid": row[-1],
}
if row[11] == "Y" or row[11] == "True":
_input["checkoptions"] = True
else:
_input["checkoptions"] = False
create_model(TestCase, **_input)
return True
def get_Env(test_env_id):
# 获取环境变量信息
Temp_env_list = get_model(EnvConfig, test_env_id)
if Temp_env_list != None:
if Temp_env_list.domain == "" or Temp_env_list.domain == None:
return {'status': 'failed', 'data': '环境配置存在异常, 请前往环境设置检查'}
_env_list = [
Temp_env_list.object_id,
Temp_env_list.name,
Temp_env_list.domain,
Temp_env_list.redis,
Temp_env_list.mysql,
]
return _env_list
else:
return None
def composeCaseWorkshop(EnvId, ProjectId=None, Interface=None, Tcase=None):
if EnvId != None:
_EnvList = get_Env(EnvId)
if _EnvList == None:
return None
_CASE = []
if ProjectId != None:
_Pro_object_id = get_models(Project, object_id=ProjectId)
if _Pro_object_id != []:
__case = get_models(TestCase, pid=_Pro_object_id[0].object_id)
for x in range(len(__case)):
if __case[x].route:
reqs = {
"EnvId": _EnvList[0],
"EnvName": _EnvList[1],
"route": _EnvList[2] + __case[x].route,
"redis": _EnvList[3],
"mysql": _EnvList[4],
"name": __case[x].name,
"Project_id": __case[x].pid,
"Interface_id": __case[x].Iid,
"object_id": __case[x].object_id,
"Method": __case[x].requestMethod,
"Body": __case[x].requestBody,
"Headers": __case[x].headers,
"parameterType": __case[x].parameterType,
"filePath": __case[x].filePath,
"setGlobalVars": __case[x].setGlobalVars,
"checkoptions": __case[x].checkoptions,
"checkSpendSeconds": __case[x].checkSpendSeconds,
"checkResponseCode": __case[x].checkResponseCode,
"checkResponseBody": __case[x].checkResponseBody,
"checkResponseNumber": __case[x].checkResponseNumber,
}
_CASE.append(reqs)
else:
return None
elif Interface != None and Interface != []:
for index in range(len(Interface)):
_Inter_object_id = get_models(Interfaces, object_id=Interface[index])
if _Inter_object_id != []:
__case = get_models(TestCase, Iid=_Inter_object_id[0].object_id)
for x in range(len(__case)):
if __case[x].route:
reqs = {
"EnvId": _EnvList[0],
"EnvName": _EnvList[1],
"route": _EnvList[2] + __case[x].route,
"redis": _EnvList[3],
"mysql": _EnvList[4],
"name": __case[x].name,
"Project_id": __case[x].pid,
"Interface_id": __case[x].Iid,
"object_id": __case[x].object_id,
"Method": __case[x].requestMethod,
"Body": __case[x].requestBody,
"Headers": __case[x].headers,
"parameterType": __case[x].parameterType,
"filePath": __case[x].filePath,
"setGlobalVars": __case[x].setGlobalVars,
"checkoptions": __case[x].checkoptions,
"checkSpendSeconds": __case[x].checkSpendSeconds,
"checkResponseCode": __case[x].checkResponseCode,
"checkResponseBody": __case[x].checkResponseBody,
"checkResponseNumber": __case[x].checkResponseNumber,
}
_CASE.append(reqs)
else:
return None
elif Tcase != None and Tcase != []:
id_list = []
for case in Tcase:
_obj_id = case
if _obj_id in id_list:
Tcase.remove(case)
else:
# 判断 Id 是否有效
_temp = get_model(TestCase, object_id=_obj_id)
if _temp != None:
reqs = {
"EnvId": _EnvList[0],
"EnvName": _EnvList[1],
"route": _EnvList[2] + _temp.route,
"redis": _EnvList[3],
"mysql": _EnvList[4],
"name": _temp.name,
"Project_id": _temp.pid,
"Interface_id": _temp.Iid,
"object_id": _temp.object_id,
"Method": _temp.requestMethod,
"Body": _temp.requestBody,
"Headers": _temp.headers,
"parameterType": _temp.parameterType,
"filePath": _temp.filePath,
"setGlobalVars": _temp.setGlobalVars,
"checkoptions": _temp.checkoptions,
"checkSpendSeconds": _temp.checkSpendSeconds,
"checkResponseCode": _temp.checkResponseCode,
"checkResponseBody": _temp.checkResponseBody,
"checkResponseNumber": _temp.checkResponseNumber,
}
_CASE.append(reqs)
else:
pass
# print(7777777777777777777777777777)
# print(_CASE)
return _CASE
else:
return None
def single_Save_response(_response, object_id):
from app import app
with app.app_context():
_model = get_model(TestCase, object_id)
_model.responseBody = str(_response)
update_models(_model)
print("异步保存数据")
def save_TestReport(_response):
from app import app
with app.app_context():
_model = create_model(TestReport, **_response)
return {"object_id": _model.object_id}
def get_TestReport(_model):
from app.models.tools import get_username
if _model != None:
return {
"status": "ok",
"object_id": _model.object_id,
"uid": _model.uid,
"uid_name": get_username("UID", _model.uid),
"Project_id_name": get_username("PID", _model.Project_id),
"EnvId":_model.EnvId,
"EnvName":_model.EnvName,
"executionMode":_model.executionMode ,
"mission_name":_model.cronJobId,
# "cronJobId":_model.cronJobId,
"Project_id":_model.Project_id,
"StartTime":_model.StartTime,
"interfaces_Suites_CaseDetail":_model.interfaces_Suites_CaseDetail,
"totalCount":_model.totalCount,
"passCount":_model.passCount,
"failCount":_model.failCount,
"errorCount":_model.errorCount,
"spendTimeInSec":_model.spendTimeInSec,
"create_at": _model.created_at,
"updated_at": _model.updated_at,
}
else:
return {"status": "failed", "data": "报告不存在或已被删除!"}
# ------------------------------
# ------------------------------
# ------------------------------
def get_task_Job(table_class, **params):
_moble = db.session.query(table_class).filter_by(**params).first()
return _moble.object_id
def get_first_one_model(table_class):
return db.session.query(table_class).order_by(table_class.updated_at.desc()).first()
def get_like(table_class, params, _user=None):
if params != None and _user == None:
return db.session.query(table_class).filter(table_class.name.like("%"+params+"%")).all()
else:
_uid = db.session.query(Users).filter(Users.user==_user).first()
if _uid != None:
return db.session.query(table_class).filter(
table_class.name.like("%"+params+"%"),
table_class.uid==_uid.object_id).all()
# table_class.uid==_uid.user).all()
else:
return []
def safe_check(value):
return True
def get_query_data(request, key, default=None, throwable=False):
value = request.args.get(key, None)
if value is not None and safe_check(value):
return value
value = request.headers.get(key, None)
if value is not None and safe_check(value):
return value
if not throwable:
return default
def get_name(table_class, object_id):
try:
return get_model(table_class, object_id)
except BaseException:
return get_model(table_class, object_id)
def get_model(table_class, object_id):
return db.session.query(table_class).get(object_id)
def get_models(table_class, **params):
if params is not None and len(params) > 0:
return db.session.query(table_class).filter_by(**params, state=0).all()
else:
return db.session.query(table_class).all()
def get_post_data(request, key, throwable=False):
try:
value = request.form.get(key, None)
if value is not None:
return value
json = request.get_json(force=True)
if json is not None:
value = json.get(key, None)
if value is not None and safe_check(value):
return value
if not throwable:
return None
print("[ 缺少提交的参数 ] -> ", key)
except BaseException:
raise DBError("Error: post value no contains {0}".format(key))
def get_post_items(request, item_names, throwable=False):
items = {}
for name in item_names:
data = get_post_data(request, name, throwable)
if data is not None:
items[name] = data
return items
from sqlalchemy.exc import IntegrityError
def create_model(table_class, **items):
model = table_class()
for key, value in items.items():
setattr(model, key, value)
try:
model.update()
db.session.add(model)
db.session.commit()
return model
except IntegrityError as ie:
db.session.rollback()
raise DBError
except Exception as e:
db.session.rollback()
raise DBError
def update_models(*models, auto_commit=True):
try:
for model in models:
model.update()
db.session.add(model)
if auto_commit:
db.session.commit()
except IntegrityError as ie:
db.session.rollback()
raise DBError
except Exception as e:
db.session.rollback()
raise DBError
def get_models_timestamp(table_class, *params):
try:
return db.session.query(table_class).filter(_BaseModel.created_at <= params).all()
except Exception as e:
raise DBError
def get_models_filter(table_class, *params):
try:
return db.session.query(table_class).filter(*params).all()
except Exception as e:
raise DBError(e)
def get_page_value(request):
page = int(get_query_data(request, 'page', 1))
if page <= 0:
return 1
return page
def get_pages(total, per_page):
pages = (total + per_page - 1) // per_page
if pages <= 0:
pages = 1
return pages
def get_per_page_value(request, default, max_value):
per_page = int(get_query_data(request, 'per_page', default))
if per_page > max_value or per_page <= 0:
return max_value
return per_page
def params_filter(table_class, _name=None, _uid=None):
if _name != None and _uid != None:
return [table_class.state == table_class.STATE_NORMAL, table_class.name.like("%"+_name+"%"), table_class.like("%"+_uid+"%")]
elif _name == None and _uid != None:
return [table_class.state == table_class.STATE_NORMAL, table_class.uid.like("%"+_uid+"%")]
elif _uid == None and _name != None:
return [table_class.state == table_class.STATE_NORMAL, table_class.name.like("%"+_name+"%")]
else:
return [table_class.state == table_class.STATE_NORMAL]
def get_models_filter_with_pagination(table_class, order_name, page, per_page, order_func, *params):
# order_name 暂时废弃
try:
offset = (page - 1) * per_page
query = table_class.query.filter(*params)
total = query.count()
models = query.order_by(order_func(_BaseModel.updated_at)).offset(offset).limit(per_page).all()
return {
'total': total,
'models': models
}
except Exception as e:
raise DBError(e)
def get_model_by(table_class, **params):
try:
return db.session.query(table_class).filter_by(**params).first()
except Exception as e:
raise DBError(e)
def delete_model(table_class, object_id, real_delete=False, auto_commit=True):
try:
model = db.session.query(table_class).get(object_id)
delete_model_with_model(model, real_delete, auto_commit=auto_commit)
except Exception as e:
raise DBError(e)
def delete_model_with_model(model, real_delete=False, state=_BaseModel.STATE_DELETE, auto_commit=True):
try:
if real_delete:
db.session.delete(model)
else:
model.update()
model.state = state
db.session.add(model)
if auto_commit:
db.session.commit()
except Exception as e:
if auto_commit:
db.session.rollback()
raise DBError(e) | 33.759921 | 132 | 0.53253 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,274 | 0.132202 |
efe02360bc1283274b4bc2434f2af992e192e9a4 | 7,403 | py | Python | package/tests/test_cp/test_azure/test_domain/test_services/test_vm_credentials_service.py | tim-spiglanin/Azure-Shell | 58c52994f0d6cfd798c5dca33737419ec18363d4 | [
"Apache-2.0"
]
| 5 | 2016-09-08T08:33:47.000Z | 2020-02-10T12:31:15.000Z | package/tests/test_cp/test_azure/test_domain/test_services/test_vm_credentials_service.py | tim-spiglanin/Azure-Shell | 58c52994f0d6cfd798c5dca33737419ec18363d4 | [
"Apache-2.0"
]
| 505 | 2016-08-09T07:41:03.000Z | 2021-02-08T20:26:46.000Z | package/tests/test_cp/test_azure/test_domain/test_services/test_vm_credentials_service.py | tim-spiglanin/Azure-Shell | 58c52994f0d6cfd798c5dca33737419ec18363d4 | [
"Apache-2.0"
]
| 5 | 2016-12-21T12:52:55.000Z | 2021-07-08T09:50:42.000Z | from unittest import TestCase
import mock
from cloudshell.cp.azure.domain.services.vm_credentials_service import VMCredentialsService
from cloudshell.cp.azure.models.vm_credentials import VMCredentials
class TestVMCredentialsService(TestCase):
def setUp(self):
self.test_username = "test_username"
self.test_password = "testPassword123"
self.test_group_name = "test_username"
self.test_storage_name = "test_storage_name"
self.test_storage_service = mock.MagicMock()
self.test_key_pair_service = mock.MagicMock()
self.test_storage_client = mock.MagicMock()
self.vm_credentials = VMCredentialsService()
def test_generate_password(self):
"""Check that method will generate password with given length and with digit and uppercase letter"""
# Act
password = self.vm_credentials._generate_password(19)
# Verify
self.assertEqual(len(password), 19)
self.assertTrue(any(char.isdigit() for char in password),
msg="Generated password must contain at least one digit character")
self.assertTrue(any(char.isupper() for char in password),
msg="Generated password must contain at least one uppercase character")
@mock.patch("cloudshell.cp.azure.domain.services.vm_credentials_service.AuthorizedKey")
def test_get_ssh_key(self, authorized_key_class):
"""Check that method will return cloudshell.cp.azure.models.authorized_key.AuthorizedKey instance"""
authorized_key_class.return_value = authorized_key = mock.MagicMock()
ssh_key = self.vm_credentials._get_ssh_key(
username=self.test_username,
storage_service=self.test_storage_service,
key_pair_service=self.test_key_pair_service,
storage_client=self.test_storage_client,
group_name=self.test_group_name,
storage_name=self.test_storage_name)
self.assertIs(ssh_key, authorized_key)
@mock.patch("cloudshell.cp.azure.domain.services.vm_credentials_service.OperatingSystemTypes")
def test_prepare_credentials_with_windows_os_type(self, os_types):
"""Check that method will call _prepare_windows_credentials and return VMCredentials model instance"""
self.vm_credentials._prepare_windows_credentials = mock.MagicMock(return_value=(self.test_username,
self.test_password))
vm_creds = self.vm_credentials.prepare_credentials(
os_type=os_types.windows,
username=self.test_username,
password=self.test_password,
storage_service=self.test_storage_service,
key_pair_service=self.test_key_pair_service,
storage_client=self.test_storage_client,
group_name=self.test_group_name,
storage_name=self.test_storage_name)
self.vm_credentials._prepare_windows_credentials.assert_called_once_with(self.test_username, self.test_password)
self.assertIsInstance(vm_creds, VMCredentials)
@mock.patch("cloudshell.cp.azure.domain.services.vm_credentials_service.OperatingSystemTypes")
def test_prepare_credentials_with_linux_os_type(self, os_types):
"""Check that method will call _prepare_linux_credentials and return VMCredentials model instance"""
# from azure.mgmt.compute.models import OperatingSystemTypes
self.vm_credentials._prepare_linux_credentials = mock.MagicMock(return_value=(self.test_username,
self.test_password,
mock.MagicMock()))
vm_creds = self.vm_credentials.prepare_credentials(
os_type=os_types.linux,
username=self.test_username,
password=self.test_password,
storage_service=self.test_storage_service,
key_pair_service=self.test_key_pair_service,
storage_client=self.test_storage_client,
group_name=self.test_group_name,
storage_name=self.test_storage_name)
self.vm_credentials._prepare_linux_credentials.assert_called_once_with(
username=self.test_username,
password=self.test_password,
storage_service=self.test_storage_service,
key_pair_service=self.test_key_pair_service,
storage_client=self.test_storage_client,
group_name=self.test_group_name,
storage_name=self.test_storage_name)
self.assertIsInstance(vm_creds, VMCredentials)
def test_prepare_windows_credentials(self):
"""Check that method will return same credentials if username and password were provided"""
username, password = self.vm_credentials._prepare_windows_credentials(self.test_username, self.test_password)
self.assertEqual(username, self.test_username)
self.assertEqual(password, self.test_password)
def test_prepare_windows_credentials_without_user_and_password(self):
"""Check that method will return default username and generate password if credentials weren't provided"""
generated_pass = mock.MagicMock()
self.vm_credentials._generate_password = mock.MagicMock(return_value=generated_pass)
username, password = self.vm_credentials._prepare_windows_credentials("", "")
self.assertEqual(username, self.vm_credentials.DEFAULT_WINDOWS_USERNAME)
self.assertEqual(password, generated_pass)
def test_prepare_linux_credentials(self):
"""Check that method will return same credentials if username and password were provided"""
username, password, ssh_key = self.vm_credentials._prepare_linux_credentials(
username=self.test_username,
password=self.test_password,
storage_service=self.test_storage_service,
key_pair_service=self.test_key_pair_service,
storage_client=self.test_storage_client,
group_name=self.test_group_name,
storage_name=self.test_storage_name)
self.assertEqual(username, self.test_username)
self.assertEqual(password, self.test_password)
self.assertIsNone(ssh_key)
def test_prepare_linux_credentials_without_user_and_password(self):
"""Check that method will return default username and ssh_key if credentials weren't provided"""
returned_ssh_key = mock.MagicMock()
self.vm_credentials._get_ssh_key = mock.MagicMock(return_value=returned_ssh_key)
username, password, ssh_key = self.vm_credentials._prepare_linux_credentials(
username="",
password="",
storage_service=self.test_storage_service,
key_pair_service=self.test_key_pair_service,
storage_client=self.test_storage_client,
group_name=self.test_group_name,
storage_name=self.test_storage_name)
self.assertEqual(username, self.vm_credentials.DEFAULT_LINUX_USERNAME)
self.assertEqual(password, "")
self.assertEqual(ssh_key, returned_ssh_key)
| 51.409722 | 120 | 0.685533 | 7,196 | 0.972038 | 0 | 0 | 3,528 | 0.476564 | 0 | 0 | 1,299 | 0.175469 |
efe17a7b9267d6d10ac42dd61070b721d1c277ec | 751 | py | Python | src/handler/quit.py | junhg0211/Kreylin | aae5e1e5ba5cfaadfab6708cb0bf26a75c6dcb7a | [
"Apache-2.0"
]
| 1 | 2019-09-11T12:02:53.000Z | 2019-09-11T12:02:53.000Z | src/handler/quit.py | junhg0211/Kreylin | aae5e1e5ba5cfaadfab6708cb0bf26a75c6dcb7a | [
"Apache-2.0"
]
| 8 | 2019-09-11T12:06:54.000Z | 2020-02-09T04:42:13.000Z | src/handler/quit.py | junhg0211/Kreylin | aae5e1e5ba5cfaadfab6708cb0bf26a75c6dcb7a | [
"Apache-2.0"
]
| 1 | 2021-05-24T12:43:07.000Z | 2021-05-24T12:43:07.000Z | from sys import platform
import pygame
from handler.handler import Handler
class Quit(Handler):
def __init__(self, keyboard_manager, shutdown):
self.keyboard_manager = keyboard_manager
self.shutdown = shutdown
def tick(self):
if self.keyboard_manager.is_start(pygame.K_F4):
if platform == 'win32':
if self.keyboard_manager.is_pressed(pygame.K_LALT) or self.keyboard_manager.is_pressed(pygame.K_RALT):
self.shutdown()
if self.keyboard_manager.is_start(pygame.K_q):
if platform == 'darwin':
if self.keyboard_manager.is_pressed(pygame.K_LMETA) or self.keyboard_manager.is_pressed(pygame.K_RMETA):
self.shutdown()
| 34.136364 | 120 | 0.663116 | 671 | 0.893475 | 0 | 0 | 0 | 0 | 0 | 0 | 15 | 0.019973 |
efe1e27548d4a791c0325857f9e7735c777989c1 | 2,635 | py | Python | decisive/__init__.py | decisive/api-demo-python | 58cd14e9e1f6373a3cd927536fd29f5f286940a0 | [
"MIT"
]
| null | null | null | decisive/__init__.py | decisive/api-demo-python | 58cd14e9e1f6373a3cd927536fd29f5f286940a0 | [
"MIT"
]
| null | null | null | decisive/__init__.py | decisive/api-demo-python | 58cd14e9e1f6373a3cd927536fd29f5f286940a0 | [
"MIT"
]
| null | null | null | import requests
import requests.exceptions
import datetime
import ujson as json
import logging
class DecisiveApiClient(object):
HOST = 'https://ads.decisive.is'.strip('/')
def __init__(self, api_key, host=None):
self.session = requests.Session()
self.session.auth = (api_key,'')
self.host = host or DecisiveApiClient.HOST
def to_uri(self, *paths, **get_args):
path = '/'.join(p.strip('/') for p in map(unicode, paths))
args = '&'.join('{}={}'.format(*i) for i in self.flatten_getargs(get_args))
return '{}/{}?{}'.format(self.host, path, args)
def flatten_getargs(self, get_args):
# NOTE: support multiple value arg values, e.g. select=bids&select=spend
for key,value in get_args.items():
value_list = value if hasattr(v, '__iter__') else [v]
for list_value in value_list:
yield key, value
def get(self, *paths, **get_args):
uri = self.to_uri(*paths, **get_args)
response = self.session.get(uri)
return self.examine_response(response)
def put(self, updated_ad): # NOTE: only /ads supports PUT method at the moment
uri = self.to_uri('ads',updated_ad['ad_id'])
response = self.session.put(uri, data=json.dumps(updated_ad))
return self.examine_response(response, False)
def post(self, data, *paths):
uri = self.to_uri(*paths)
response = self.session.post(uri, data=json.dumps(data))
return self.examine_response(response)
def delete(self, *paths):
uri = self.to_uri(*paths)
response = self.session.delete(uri)
return self.examine_response(response, False)
def get_report(self, ad, type_, attribute, start_datehour, end_datehour, **options):
return self.get('ads', ad['ad_id'], 'reports',
type_, attribute,
start_datehour.date().isoformat(),
start_datehour.hour,
end_datehour.date().isoformat(),
end_datehour.hour,
**options)
def examine_response(self, response, return_json=True):
try:
response.raise_for_status()
except requests.exceptions.HTTPError as error:
body = response.json() or {}
message = body.get('reason') or error.messsage
logging.warning('HTTPError', response.status_code, message)
logging.info('Did you know?', body.get('did_you_know'))
return False
return True if not return_json else response.json()
| 38.188406 | 88 | 0.603036 | 2,535 | 0.962049 | 301 | 0.114231 | 0 | 0 | 0 | 0 | 270 | 0.102467 |
efe41b6dc8f659359b1e12cb86ef509b2e8e51a8 | 38,284 | py | Python | app/main/views/service_settings.py | karlchillmaid/notifications-admin | 9ef6da4ef9e2fa97b7debb4b573cb035a5cb8880 | [
"MIT"
]
| null | null | null | app/main/views/service_settings.py | karlchillmaid/notifications-admin | 9ef6da4ef9e2fa97b7debb4b573cb035a5cb8880 | [
"MIT"
]
| null | null | null | app/main/views/service_settings.py | karlchillmaid/notifications-admin | 9ef6da4ef9e2fa97b7debb4b573cb035a5cb8880 | [
"MIT"
]
| null | null | null | from flask import (
abort,
current_app,
flash,
redirect,
render_template,
request,
session,
url_for,
)
from flask_login import current_user, login_required
from notifications_python_client.errors import HTTPError
from notifications_utils.field import Field
from notifications_utils.formatters import formatted_list
from app import (
billing_api_client,
current_service,
email_branding_client,
inbound_number_client,
organisations_client,
service_api_client,
user_api_client,
zendesk_client,
)
from app.main import main
from app.main.forms import (
BrandingOptionsEmail,
ConfirmPasswordForm,
FreeSMSAllowance,
InternationalSMSForm,
LetterBranding,
LinkOrganisationsForm,
OrganisationTypeForm,
RenameServiceForm,
RequestToGoLiveForm,
ServiceBasicViewForm,
ServiceContactLinkForm,
ServiceEditInboundNumberForm,
ServiceInboundNumberForm,
ServiceLetterContactBlockForm,
ServiceReplyToEmailForm,
ServiceSetBranding,
ServiceSmsSenderForm,
ServiceSwitchLettersForm,
SMSPrefixForm,
branding_options_dict,
)
from app.utils import (
AgreementInfo,
email_safe,
get_cdn_domain,
user_has_permissions,
user_is_platform_admin,
)
@main.route("/services/<service_id>/service-settings")
@login_required
@user_has_permissions('manage_service', 'manage_api_keys')
def service_settings(service_id):
letter_branding_organisations = email_branding_client.get_letter_email_branding()
organisation = organisations_client.get_service_organisation(service_id).get('name', None)
if current_service['email_branding']:
email_branding = email_branding_client.get_email_branding(current_service['email_branding'])['email_branding']
else:
email_branding = None
inbound_number = inbound_number_client.get_inbound_sms_number_for_service(service_id)
disp_inbound_number = inbound_number['data'].get('number', '')
reply_to_email_addresses = service_api_client.get_reply_to_email_addresses(service_id)
reply_to_email_address_count = len(reply_to_email_addresses)
default_reply_to_email_address = next(
(x['email_address'] for x in reply_to_email_addresses if x['is_default']), "Not set"
)
letter_contact_details = service_api_client.get_letter_contacts(service_id)
letter_contact_details_count = len(letter_contact_details)
default_letter_contact_block = next(
(Field(x['contact_block'], html='escape') for x in letter_contact_details if x['is_default']), "Not set"
)
sms_senders = service_api_client.get_sms_senders(service_id)
sms_sender_count = len(sms_senders)
default_sms_sender = next(
(Field(x['sms_sender'], html='escape') for x in sms_senders if x['is_default']), "None"
)
free_sms_fragment_limit = billing_api_client.get_free_sms_fragment_limit_for_year(service_id)
return render_template(
'views/service-settings.html',
email_branding=email_branding,
letter_branding=letter_branding_organisations.get(
current_service.get('dvla_organisation', '001')
),
can_receive_inbound=('inbound_sms' in current_service['permissions']),
inbound_number=disp_inbound_number,
default_reply_to_email_address=default_reply_to_email_address,
reply_to_email_address_count=reply_to_email_address_count,
default_letter_contact_block=default_letter_contact_block,
letter_contact_details_count=letter_contact_details_count,
default_sms_sender=default_sms_sender,
sms_sender_count=sms_sender_count,
free_sms_fragment_limit=free_sms_fragment_limit,
prefix_sms=current_service['prefix_sms'],
organisation=organisation,
)
@main.route("/services/<service_id>/service-settings/name", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_service')
def service_name_change(service_id):
form = RenameServiceForm()
if request.method == 'GET':
form.name.data = current_service['name']
if form.validate_on_submit():
if form.name.data == current_service['name']:
return redirect(url_for('.service_settings', service_id=service_id))
unique_name = service_api_client.is_service_name_unique(service_id, form.name.data, email_safe(form.name.data))
if not unique_name:
form.name.errors.append("This service name is already in use")
return render_template('views/service-settings/name.html', form=form)
session['service_name_change'] = form.name.data
return redirect(url_for('.service_name_change_confirm', service_id=service_id))
return render_template(
'views/service-settings/name.html',
form=form,
)
@main.route("/services/<service_id>/service-settings/name/confirm", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_service')
def service_name_change_confirm(service_id):
# Validate password for form
def _check_password(pwd):
return user_api_client.verify_password(current_user.id, pwd)
form = ConfirmPasswordForm(_check_password)
if form.validate_on_submit():
try:
service_api_client.update_service(
current_service['id'],
name=session['service_name_change'],
email_from=email_safe(session['service_name_change'])
)
except HTTPError as e:
error_msg = "Duplicate service name '{}'".format(session['service_name_change'])
if e.status_code == 400 and error_msg in e.message['name']:
# Redirect the user back to the change service name screen
flash('This service name is already in use', 'error')
return redirect(url_for('main.service_name_change', service_id=service_id))
else:
raise e
else:
session.pop('service_name_change')
return redirect(url_for('.service_settings', service_id=service_id))
return render_template(
'views/service-settings/confirm.html',
heading='Change your service name',
form=form)
@main.route("/services/<service_id>/service-settings/request-to-go-live")
@login_required
@user_has_permissions('manage_service')
def request_to_go_live(service_id):
return render_template(
'views/service-settings/request-to-go-live.html',
has_team_members=(
user_api_client.get_count_of_users_with_permission(
service_id, 'manage_service'
) > 1
),
has_templates=(
service_api_client.count_service_templates(service_id) > 0
),
has_email_templates=(
service_api_client.count_service_templates(service_id, template_type='email') > 0
),
has_email_reply_to_address=bool(
service_api_client.get_reply_to_email_addresses(service_id)
)
)
@main.route("/services/<service_id>/service-settings/submit-request-to-go-live", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_service')
def submit_request_to_go_live(service_id):
form = RequestToGoLiveForm()
if form.validate_on_submit():
zendesk_client.create_ticket(
subject='Request to go live - {}'.format(current_service['name']),
message=(
'Service: {}\n'
'{}\n'
'\n---'
'\nOrganisation type: {}'
'\nAgreement signed: {}'
'\nChannel: {}\nStart date: {}\nStart volume: {}'
'\nPeak volume: {}'
'\nFeatures: {}'
).format(
current_service['name'],
url_for('main.service_dashboard', service_id=current_service['id'], _external=True),
current_service['organisation_type'],
AgreementInfo.from_current_user().as_human_readable,
formatted_list(filter(None, (
'email' if form.channel_email.data else None,
'text messages' if form.channel_sms.data else None,
'letters' if form.channel_letter.data else None,
)), before_each='', after_each=''),
form.start_date.data,
form.start_volume.data,
form.peak_volume.data,
formatted_list(filter(None, (
'one off' if form.method_one_off.data else None,
'file upload' if form.method_upload.data else None,
'API' if form.method_api.data else None,
)), before_each='', after_each='')
),
ticket_type=zendesk_client.TYPE_QUESTION,
user_email=current_user.email_address,
user_name=current_user.name
)
flash('Thanks for your request to go live. We’ll get back to you within one working day.', 'default')
return redirect(url_for('.service_settings', service_id=service_id))
return render_template('views/service-settings/submit-request-to-go-live.html', form=form)
@main.route("/services/<service_id>/service-settings/switch-live")
@login_required
@user_is_platform_admin
def service_switch_live(service_id):
service_api_client.update_service(
current_service['id'],
# TODO This limit should be set depending on the agreement signed by
# with Notify.
message_limit=250000 if current_service['restricted'] else 50,
restricted=(not current_service['restricted'])
)
return redirect(url_for('.service_settings', service_id=service_id))
@main.route("/services/<service_id>/service-settings/research-mode")
@login_required
@user_is_platform_admin
def service_switch_research_mode(service_id):
service_api_client.update_service_with_properties(
service_id,
{"research_mode": not current_service['research_mode']}
)
return redirect(url_for('.service_settings', service_id=service_id))
def switch_service_permissions(service_id, permission, sms_sender=None):
force_service_permission(
service_id,
permission,
on=permission not in current_service['permissions'],
sms_sender=sms_sender
)
def force_service_permission(service_id, permission, on=False, sms_sender=None):
permissions, permission = set(current_service['permissions']), {permission}
update_service_permissions(
service_id,
permissions | permission if on else permissions - permission,
sms_sender=sms_sender
)
def update_service_permissions(service_id, permissions, sms_sender=None):
current_service['permissions'] = list(permissions)
data = {'permissions': current_service['permissions']}
if sms_sender:
data['sms_sender'] = sms_sender
service_api_client.update_service_with_properties(service_id, data)
@main.route("/services/<service_id>/service-settings/can-send-email")
@login_required
@user_is_platform_admin
def service_switch_can_send_email(service_id):
switch_service_permissions(service_id, 'email')
return redirect(url_for('.service_settings', service_id=service_id))
@main.route("/services/<service_id>/service-settings/can-send-sms")
@login_required
@user_is_platform_admin
def service_switch_can_send_sms(service_id):
switch_service_permissions(service_id, 'sms')
return redirect(url_for('.service_settings', service_id=service_id))
@main.route("/services/<service_id>/service-settings/email-auth")
@login_required
@user_is_platform_admin
def service_switch_email_auth(service_id):
switch_service_permissions(service_id, 'email_auth')
return redirect(url_for('.service_settings', service_id=service_id))
@main.route("/services/<service_id>/service-settings/can-send-precompiled-letter")
@login_required
@user_is_platform_admin
def service_switch_can_send_precompiled_letter(service_id):
switch_service_permissions(service_id, 'precompiled_letter')
return redirect(url_for('.service_settings', service_id=service_id))
@main.route("/services/<service_id>/service-settings/can-upload-document", methods=['GET', 'POST'])
@login_required
@user_is_platform_admin
def service_switch_can_upload_document(service_id):
form = ServiceContactLinkForm()
# If turning the permission off, or turning it on and the service already has a contact_link,
# don't show the form to add the link
if 'upload_document' in current_service['permissions'] or current_service.get('contact_link'):
switch_service_permissions(service_id, 'upload_document')
return redirect(url_for('.service_settings', service_id=service_id))
if form.validate_on_submit():
service_api_client.update_service(
current_service['id'],
contact_link=form.url.data
)
switch_service_permissions(service_id, 'upload_document')
return redirect(url_for('.service_settings', service_id=service_id))
return render_template('views/service-settings/contact_link.html', form=form)
@main.route("/services/<service_id>/service-settings/archive", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_service')
def archive_service(service_id):
if request.method == 'POST':
service_api_client.archive_service(service_id)
return redirect(url_for('.service_settings', service_id=service_id))
else:
flash('There\'s no way to reverse this! Are you sure you want to archive this service?', 'delete')
return service_settings(service_id)
@main.route("/services/<service_id>/service-settings/suspend", methods=["GET", "POST"])
@login_required
@user_has_permissions('manage_service')
def suspend_service(service_id):
if request.method == 'POST':
service_api_client.suspend_service(service_id)
return redirect(url_for('.service_settings', service_id=service_id))
else:
flash("This will suspend the service and revoke all api keys. Are you sure you want to suspend this service?",
'suspend')
return service_settings(service_id)
@main.route("/services/<service_id>/service-settings/resume", methods=["GET", "POST"])
@login_required
@user_has_permissions('manage_service')
def resume_service(service_id):
if request.method == 'POST':
service_api_client.resume_service(service_id)
return redirect(url_for('.service_settings', service_id=service_id))
else:
flash("This will resume the service. New api key are required for this service to use the API.", 'resume')
return service_settings(service_id)
@main.route("/services/<service_id>/service-settings/contact-link", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_service')
def service_set_contact_link(service_id):
form = ServiceContactLinkForm()
if request.method == 'GET':
form.url.data = current_service.get('contact_link')
if form.validate_on_submit():
service_api_client.update_service(
current_service['id'],
contact_link=form.url.data
)
return redirect(url_for('.service_settings', service_id=current_service['id']))
return render_template('views/service-settings/contact_link.html', form=form)
@main.route("/services/<service_id>/service-settings/set-email", methods=['GET'])
@login_required
@user_has_permissions('manage_service')
def service_set_email(service_id):
return render_template(
'views/service-settings/set-email.html',
)
@main.route("/services/<service_id>/service-settings/set-reply-to-email", methods=['GET'])
@login_required
@user_has_permissions('manage_service')
def service_set_reply_to_email(service_id):
return redirect(url_for('.service_email_reply_to', service_id=service_id))
@main.route("/services/<service_id>/service-settings/email-reply-to", methods=['GET'])
@login_required
@user_has_permissions('manage_service', 'manage_api_keys')
def service_email_reply_to(service_id):
reply_to_email_addresses = service_api_client.get_reply_to_email_addresses(service_id)
return render_template(
'views/service-settings/email_reply_to.html',
reply_to_email_addresses=reply_to_email_addresses)
@main.route("/services/<service_id>/service-settings/email-reply-to/add", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_service')
def service_add_email_reply_to(service_id):
form = ServiceReplyToEmailForm()
reply_to_email_address_count = len(service_api_client.get_reply_to_email_addresses(service_id))
first_email_address = reply_to_email_address_count == 0
if form.validate_on_submit():
service_api_client.add_reply_to_email_address(
current_service['id'],
email_address=form.email_address.data,
is_default=first_email_address if first_email_address else form.is_default.data
)
return redirect(url_for('.service_email_reply_to', service_id=service_id))
return render_template(
'views/service-settings/email-reply-to/add.html',
form=form,
first_email_address=first_email_address)
@main.route(
"/services/<service_id>/service-settings/email-reply-to/<reply_to_email_id>/edit",
methods=['GET', 'POST'],
endpoint="service_edit_email_reply_to"
)
@main.route(
"/services/<service_id>/service-settings/email-reply-to/<reply_to_email_id>/delete",
methods=['GET'],
endpoint="service_confirm_delete_email_reply_to"
)
@login_required
@user_has_permissions('manage_service')
def service_edit_email_reply_to(service_id, reply_to_email_id):
form = ServiceReplyToEmailForm()
reply_to_email_address = service_api_client.get_reply_to_email_address(service_id, reply_to_email_id)
if request.method == 'GET':
form.email_address.data = reply_to_email_address['email_address']
form.is_default.data = reply_to_email_address['is_default']
if form.validate_on_submit():
service_api_client.update_reply_to_email_address(
current_service['id'],
reply_to_email_id=reply_to_email_id,
email_address=form.email_address.data,
is_default=True if reply_to_email_address['is_default'] else form.is_default.data
)
return redirect(url_for('.service_email_reply_to', service_id=service_id))
return render_template(
'views/service-settings/email-reply-to/edit.html',
form=form,
reply_to_email_address_id=reply_to_email_id,
confirm_delete=(request.endpoint == "main.service_confirm_delete_email_reply_to"),
)
@main.route("/services/<service_id>/service-settings/email-reply-to/<reply_to_email_id>/delete", methods=['POST'])
@login_required
@user_has_permissions('manage_service')
def service_delete_email_reply_to(service_id, reply_to_email_id):
service_api_client.delete_reply_to_email_address(
service_id=current_service['id'],
reply_to_email_id=reply_to_email_id,
)
return redirect(url_for('.service_email_reply_to', service_id=service_id))
@main.route("/services/<service_id>/service-settings/set-inbound-number", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_service')
def service_set_inbound_number(service_id):
available_inbound_numbers = inbound_number_client.get_available_inbound_sms_numbers()
service_has_inbound_number = inbound_number_client.get_inbound_sms_number_for_service(service_id)['data'] != {}
inbound_numbers_value_and_label = [
(number['id'], number['number']) for number in available_inbound_numbers['data']
]
no_available_numbers = available_inbound_numbers['data'] == []
form = ServiceInboundNumberForm(
inbound_number_choices=inbound_numbers_value_and_label
)
if form.validate_on_submit():
service_api_client.add_sms_sender(
current_service['id'],
sms_sender=form.inbound_number.data,
is_default=True,
inbound_number_id=form.inbound_number.data
)
switch_service_permissions(current_service['id'], 'inbound_sms')
return redirect(url_for('.service_settings', service_id=service_id))
return render_template(
'views/service-settings/set-inbound-number.html',
form=form,
no_available_numbers=no_available_numbers,
service_has_inbound_number=service_has_inbound_number
)
@main.route("/services/<service_id>/service-settings/set-sms", methods=['GET'])
@login_required
@user_has_permissions('manage_service')
def service_set_sms(service_id):
return render_template(
'views/service-settings/set-sms.html',
)
@main.route("/services/<service_id>/service-settings/sms-prefix", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_service')
def service_set_sms_prefix(service_id):
form = SMSPrefixForm(enabled=(
'on' if current_service['prefix_sms'] else 'off'
))
form.enabled.label.text = 'Start all text messages with ‘{}:’'.format(current_service['name'])
if form.validate_on_submit():
service_api_client.update_service(
current_service['id'],
prefix_sms=(form.enabled.data == 'on')
)
return redirect(url_for('.service_settings', service_id=service_id))
return render_template(
'views/service-settings/sms-prefix.html',
form=form
)
@main.route("/services/<service_id>/service-settings/set-international-sms", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_service')
def service_set_international_sms(service_id):
form = InternationalSMSForm(
enabled='on' if 'international_sms' in current_service['permissions'] else 'off'
)
if form.validate_on_submit():
force_service_permission(
service_id,
'international_sms',
on=(form.enabled.data == 'on'),
)
return redirect(
url_for(".service_settings", service_id=service_id)
)
return render_template(
'views/service-settings/set-international-sms.html',
form=form,
)
@main.route("/services/<service_id>/service-settings/set-inbound-sms", methods=['GET'])
@login_required
@user_has_permissions('manage_service')
def service_set_inbound_sms(service_id):
number = inbound_number_client.get_inbound_sms_number_for_service(service_id)['data'].get('number', '')
return render_template(
'views/service-settings/set-inbound-sms.html',
inbound_number=number,
)
@main.route("/services/<service_id>/service-settings/set-letters", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_service')
def service_set_letters(service_id):
form = ServiceSwitchLettersForm(
enabled='on' if 'letter' in current_service['permissions'] else 'off'
)
if form.validate_on_submit():
force_service_permission(
service_id,
'letter',
on=(form.enabled.data == 'on'),
)
return redirect(
url_for(".service_settings", service_id=service_id)
)
return render_template(
'views/service-settings/set-letters.html',
form=form,
)
@main.route("/services/<service_id>/service-settings/set-auth-type", methods=['GET'])
@login_required
@user_has_permissions('manage_service')
def service_set_auth_type(service_id):
return render_template(
'views/service-settings/set-auth-type.html',
)
@main.route("/services/<service_id>/service-settings/set-basic-view", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_service', 'send_messages')
def service_set_basic_view(service_id):
if current_user.previewing_basic_view:
session.pop('basic', None)
if not current_user.has_permissions('manage_service'):
abort(403)
form = ServiceBasicViewForm(
enabled='caseworking' in current_service['permissions']
)
if form.validate_on_submit():
force_service_permission(
service_id,
'caseworking',
on=(form.enabled.data == 'on'),
)
return redirect(
url_for('.service_settings', service_id=service_id)
)
return render_template(
'views/service-settings/set-basic-view.html',
form=form,
)
@main.route("/services/<service_id>/preview-basic-view")
@login_required
@user_has_permissions('manage_service')
def preview_basic_view(service_id):
session['basic'] = True
return redirect(url_for('.service_dashboard', service_id=service_id))
@main.route("/services/<service_id>/service-settings/letter-contacts", methods=['GET'])
@login_required
@user_has_permissions('manage_service', 'manage_api_keys')
def service_letter_contact_details(service_id):
letter_contact_details = service_api_client.get_letter_contacts(service_id)
return render_template(
'views/service-settings/letter-contact-details.html',
letter_contact_details=letter_contact_details)
@main.route("/services/<service_id>/service-settings/letter-contact/add", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_service')
def service_add_letter_contact(service_id):
form = ServiceLetterContactBlockForm()
letter_contact_blocks_count = len(service_api_client.get_letter_contacts(service_id))
first_contact_block = letter_contact_blocks_count == 0
if form.validate_on_submit():
service_api_client.add_letter_contact(
current_service['id'],
contact_block=form.letter_contact_block.data.replace('\r', '') or None,
is_default=first_contact_block if first_contact_block else form.is_default.data
)
if request.args.get('from_template'):
return redirect(
url_for('.set_template_sender', service_id=service_id, template_id=request.args.get('from_template'))
)
return redirect(url_for('.service_letter_contact_details', service_id=service_id))
return render_template(
'views/service-settings/letter-contact/add.html',
form=form,
first_contact_block=first_contact_block)
@main.route("/services/<service_id>/service-settings/letter-contact/<letter_contact_id>/edit", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_service')
def service_edit_letter_contact(service_id, letter_contact_id):
letter_contact_block = service_api_client.get_letter_contact(service_id, letter_contact_id)
form = ServiceLetterContactBlockForm(letter_contact_block=letter_contact_block['contact_block'])
if request.method == 'GET':
form.is_default.data = letter_contact_block['is_default']
if form.validate_on_submit():
service_api_client.update_letter_contact(
current_service['id'],
letter_contact_id=letter_contact_id,
contact_block=form.letter_contact_block.data.replace('\r', '') or None,
is_default=True if letter_contact_block['is_default'] else form.is_default.data
)
return redirect(url_for('.service_letter_contact_details', service_id=service_id))
return render_template(
'views/service-settings/letter-contact/edit.html',
form=form,
letter_contact_id=letter_contact_block['id'])
@main.route("/services/<service_id>/service-settings/sms-sender", methods=['GET'])
@login_required
@user_has_permissions('manage_service', 'manage_api_keys')
def service_sms_senders(service_id):
def attach_hint(sender):
hints = []
if sender['is_default']:
hints += ["default"]
if sender['inbound_number_id']:
hints += ["receives replies"]
if hints:
sender['hint'] = "(" + " and ".join(hints) + ")"
sms_senders = service_api_client.get_sms_senders(service_id)
for sender in sms_senders:
attach_hint(sender)
return render_template(
'views/service-settings/sms-senders.html',
sms_senders=sms_senders
)
@main.route("/services/<service_id>/service-settings/sms-sender/add", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_service')
def service_add_sms_sender(service_id):
form = ServiceSmsSenderForm()
sms_sender_count = len(service_api_client.get_sms_senders(service_id))
first_sms_sender = sms_sender_count == 0
if form.validate_on_submit():
service_api_client.add_sms_sender(
current_service['id'],
sms_sender=form.sms_sender.data.replace('\r', '') or None,
is_default=first_sms_sender if first_sms_sender else form.is_default.data
)
return redirect(url_for('.service_sms_senders', service_id=service_id))
return render_template(
'views/service-settings/sms-sender/add.html',
form=form,
first_sms_sender=first_sms_sender)
@main.route(
"/services/<service_id>/service-settings/sms-sender/<sms_sender_id>/edit",
methods=['GET', 'POST'],
endpoint="service_edit_sms_sender"
)
@main.route(
"/services/<service_id>/service-settings/sms-sender/<sms_sender_id>/delete",
methods=['GET'],
endpoint="service_confirm_delete_sms_sender"
)
@login_required
@user_has_permissions('manage_service')
def service_edit_sms_sender(service_id, sms_sender_id):
sms_sender = service_api_client.get_sms_sender(service_id, sms_sender_id)
is_inbound_number = sms_sender['inbound_number_id']
if is_inbound_number:
form = ServiceEditInboundNumberForm(is_default=sms_sender['is_default'])
else:
form = ServiceSmsSenderForm(**sms_sender)
if form.validate_on_submit():
service_api_client.update_sms_sender(
current_service['id'],
sms_sender_id=sms_sender_id,
sms_sender=sms_sender['sms_sender'] if is_inbound_number else form.sms_sender.data.replace('\r', ''),
is_default=True if sms_sender['is_default'] else form.is_default.data
)
return redirect(url_for('.service_sms_senders', service_id=service_id))
form.is_default.data = sms_sender['is_default']
return render_template(
'views/service-settings/sms-sender/edit.html',
form=form,
sms_sender=sms_sender,
inbound_number=is_inbound_number,
sms_sender_id=sms_sender_id,
confirm_delete=(request.endpoint == "main.service_confirm_delete_sms_sender")
)
@main.route(
"/services/<service_id>/service-settings/sms-sender/<sms_sender_id>/delete",
methods=['POST'],
)
@login_required
@user_has_permissions('manage_service')
def service_delete_sms_sender(service_id, sms_sender_id):
service_api_client.delete_sms_sender(
service_id=current_service['id'],
sms_sender_id=sms_sender_id,
)
return redirect(url_for('.service_sms_senders', service_id=service_id))
@main.route("/services/<service_id>/service-settings/set-letter-contact-block", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_service')
def service_set_letter_contact_block(service_id):
if 'letter' not in current_service['permissions']:
abort(403)
form = ServiceLetterContactBlockForm(letter_contact_block=current_service['letter_contact_block'])
if form.validate_on_submit():
service_api_client.update_service(
current_service['id'],
letter_contact_block=form.letter_contact_block.data.replace('\r', '') or None
)
if request.args.get('from_template'):
return redirect(
url_for('.view_template', service_id=service_id, template_id=request.args.get('from_template'))
)
return redirect(url_for('.service_settings', service_id=service_id))
return render_template(
'views/service-settings/set-letter-contact-block.html',
form=form
)
@main.route("/services/<service_id>/service-settings/set-organisation-type", methods=['GET', 'POST'])
@login_required
@user_is_platform_admin
def set_organisation_type(service_id):
form = OrganisationTypeForm(organisation_type=current_service.get('organisation_type'))
if form.validate_on_submit():
free_sms_fragment_limit = current_app.config['DEFAULT_FREE_SMS_FRAGMENT_LIMITS'].get(
form.organisation_type.data)
service_api_client.update_service(
service_id,
organisation_type=form.organisation_type.data,
)
billing_api_client.create_or_update_free_sms_fragment_limit(service_id, free_sms_fragment_limit)
return redirect(url_for('.service_settings', service_id=service_id))
return render_template(
'views/service-settings/set-organisation-type.html',
form=form,
)
@main.route("/services/<service_id>/service-settings/set-free-sms-allowance", methods=['GET', 'POST'])
@login_required
@user_is_platform_admin
def set_free_sms_allowance(service_id):
form = FreeSMSAllowance(free_sms_allowance=billing_api_client.get_free_sms_fragment_limit_for_year(service_id))
if form.validate_on_submit():
billing_api_client.create_or_update_free_sms_fragment_limit(service_id, form.free_sms_allowance.data)
return redirect(url_for('.service_settings', service_id=service_id))
return render_template(
'views/service-settings/set-free-sms-allowance.html',
form=form,
)
@main.route("/services/<service_id>/service-settings/set-email-branding", methods=['GET', 'POST'])
@login_required
@user_is_platform_admin
def service_set_email_branding(service_id):
email_branding = email_branding_client.get_all_email_branding()
form = ServiceSetBranding(branding_type=current_service.get('branding'))
# dynamically create org choices, including the null option
form.branding_style.choices = [('None', 'None')] + get_branding_as_value_and_label(email_branding)
if form.validate_on_submit():
branding_style = None if form.branding_style.data == 'None' else form.branding_style.data
service_api_client.update_service(
service_id,
branding=form.branding_type.data,
email_branding=branding_style
)
return redirect(url_for('.service_settings', service_id=service_id))
form.branding_style.data = current_service['email_branding'] or 'None'
return render_template(
'views/service-settings/set-email-branding.html',
form=form,
branding_dict=get_branding_as_dict(email_branding)
)
@main.route("/services/<service_id>/service-settings/set-letter-branding", methods=['GET', 'POST'])
@login_required
@user_is_platform_admin
def set_letter_branding(service_id):
form = LetterBranding(choices=email_branding_client.get_letter_email_branding().items())
if form.validate_on_submit():
service_api_client.update_service(
service_id,
dvla_organisation=form.dvla_org_id.data
)
return redirect(url_for('.service_settings', service_id=service_id))
form.dvla_org_id.data = current_service.get('dvla_organisation', '001')
return render_template(
'views/service-settings/set-letter-branding.html',
form=form,
)
@main.route("/services/<service_id>/service-settings/link-service-to-organisation", methods=['GET', 'POST'])
@login_required
@user_is_platform_admin
def link_service_to_organisation(service_id):
organisations = organisations_client.get_organisations()
current_organisation = organisations_client.get_service_organisation(service_id).get('id', None)
form = LinkOrganisationsForm(
choices=convert_dictionary_to_wtforms_choices_format(organisations, 'id', 'name'),
organisations=current_organisation
)
if form.validate_on_submit():
if form.organisations.data != current_organisation:
organisations_client.update_service_organisation(
service_id,
form.organisations.data
)
return redirect(url_for('.service_settings', service_id=service_id))
return render_template(
'views/service-settings/link-service-to-organisation.html',
has_organisations=organisations,
form=form,
)
@main.route("/services/<service_id>/branding-request/email", methods=['GET', 'POST'])
@login_required
@user_has_permissions('manage_service')
def branding_request(service_id):
form = BrandingOptionsEmail(
options=current_service['branding']
)
if form.validate_on_submit():
zendesk_client.create_ticket(
subject='Email branding request - {}'.format(current_service['name']),
message=(
'Organisation: {}\n'
'Service: {}\n'
'{}\n'
'\n---'
'\nBranding requested: {}'
).format(
AgreementInfo.from_current_user().as_info_for_branding_request,
current_service['name'],
url_for('main.service_dashboard', service_id=current_service['id'], _external=True),
branding_options_dict[form.options.data],
),
ticket_type=zendesk_client.TYPE_QUESTION,
user_email=current_user.email_address,
user_name=current_user.name,
)
flash((
'Thanks for your branding request. We’ll get back to you '
'within one working day.'
), 'default')
return redirect(url_for('.service_settings', service_id=service_id))
return render_template(
'views/service-settings/branding/email-options.html',
form=form,
)
def get_branding_as_value_and_label(email_branding):
return [
(branding['id'], branding['name'])
for branding in email_branding
]
def get_branding_as_dict(email_branding):
return {
branding['id']: {
'logo': 'https://{}/{}'.format(get_cdn_domain(), branding['logo']),
'colour': branding['colour']
} for branding in email_branding
}
def convert_dictionary_to_wtforms_choices_format(dictionary, value, label):
return [
(item[value], item[label]) for item in dictionary
]
| 37.132881 | 119 | 0.709252 | 0 | 0 | 0 | 0 | 35,426 | 0.925154 | 0 | 0 | 9,391 | 0.245247 |
efe457cbb3f9ed9d770c24aeb1ca7014a5e1296d | 3,094 | py | Python | doctools/spelling.py | Sketch98/oil | 2d5c51432b9699e48178236da2e5b3bf1a33d79f | [
"Apache-2.0"
]
| null | null | null | doctools/spelling.py | Sketch98/oil | 2d5c51432b9699e48178236da2e5b3bf1a33d79f | [
"Apache-2.0"
]
| null | null | null | doctools/spelling.py | Sketch98/oil | 2d5c51432b9699e48178236da2e5b3bf1a33d79f | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python2
"""
spelling.py
Filter the output of 'lynx -dump' into a list of words to spell check.
"""
from __future__ import print_function
from collections import Counter
import optparse
import re
import sys
def log(msg, *args):
if args:
msg = msg % args
print(msg, file=sys.stderr)
def SplitWords(contents):
# Remove URLs so path components don't show up as words
contents = re.sub(r'(http|https|file)://\S+', '', contents)
# Take into account contractions with apostrophes
#
# - doesn't
# - can't
WORD_RE = re.compile(r'''
[a-zA-Z]+
(?:\'t\b)? # optional contraction
''', re.VERBOSE)
words = WORD_RE.findall(contents)
for w in words:
yield w
def WordList(f):
for line in f:
# no special characters allowed
yield line.strip().lower()
def Options():
"""Returns an option parser instance."""
p = optparse.OptionParser()
p.add_option(
'--known-words', dest='known_words',
help='List of words like /usr/share/dict/words')
p.add_option(
'--more-than-bash', dest='more_than_bash', type=int, default=0,
help='Expected number of cases where OSH starts more processes than bash')
return p
def main(argv):
o = Options()
opts, argv = o.parse_args(argv[1:])
action = argv[0]
if action == 'word-split':
contents = sys.stdin.read()
for w in SplitWords(contents):
print(w)
elif action == 'check':
word_files = argv[1:]
d = Counter()
for path in word_files:
with open(path) as f:
for word in WordList(f):
d[word] += 1
print('')
print('Most common words')
print('')
for word, count in d.most_common()[:20]:
print('%10d %s' % (count, word))
print('')
print('Least common words')
print('')
for word, count in d.most_common()[-20:]:
print('%10d %s' % (count, word))
log('%d word files', len(word_files))
log('%d unique words', len(d))
known_words = {}
with open(opts.known_words) as f:
for w in WordList(f):
known_words[w] = True
print('')
print('Potential Misspellings')
print('')
for path in word_files:
print()
print('\t%s' % path)
print()
with open(path) as f:
unknown = {}
for w in WordList(f):
#if d.get(word) == 1:
# print(word)
if w not in known_words:
unknown[w] = True
if unknown:
for u in sorted(unknown):
# only occurs once
if d.get(u) == 1:
print(u)
log('\t%d unknown words in %s', len(unknown), path)
# Checking algorithms:
#
# - Does it appear in the dictionary? Problem: most computer terms
# - Does it appear only once or twice in the whole corpus?
# - Is the edit distance very close to a dictinoary word?
# - e.g. subsitutions is a typo
else:
raise RuntimeError('Invalid action %r' % action)
if __name__ == '__main__':
try:
main(sys.argv)
except RuntimeError as e:
print('FATAL: %s' % e, file=sys.stderr)
sys.exit(1)
| 21.636364 | 80 | 0.591791 | 0 | 0 | 493 | 0.159341 | 0 | 0 | 0 | 0 | 1,075 | 0.347447 |
efe4b76066b7fc615a3d5cb419d39e72b57d7593 | 20,659 | py | Python | train_deep_ls.py | Kamysek/DeepLocalShapes | 24ee92889381d40acbb5ad1c7c8abb512a8c26b5 | [
"MIT"
]
| 4 | 2021-09-23T11:36:30.000Z | 2022-02-23T20:10:46.000Z | train_deep_ls.py | Kamysek/DeepLocalShapes | 24ee92889381d40acbb5ad1c7c8abb512a8c26b5 | [
"MIT"
]
| null | null | null | train_deep_ls.py | Kamysek/DeepLocalShapes | 24ee92889381d40acbb5ad1c7c8abb512a8c26b5 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
# Based on: https://github.com/facebookresearch/DeepSDF using MIT LICENSE (https://github.com/facebookresearch/DeepSDF/blob/master/LICENSE)
# Copyright 2021-present Philipp Friedrich, Josef Kamysek. All Rights Reserved.
import functools
import json
import logging
import math
import os
import signal
import sys
import time
import warnings
import deep_ls
import deep_ls.workspace as ws
import torch
import torch.multiprocessing as mp
import torch.utils.data as data_utils
from scipy.spatial import cKDTree
import numpy as np
if not sys.warnoptions:
warnings.simplefilter("ignore")
class LearningRateSchedule:
def get_learning_rate(self, epoch):
pass
class ConstantLearningRateSchedule(LearningRateSchedule):
def __init__(self, value):
self.value = value
def get_learning_rate(self, epoch):
return self.value
class StepLearningRateSchedule(LearningRateSchedule):
def __init__(self, initial, interval, factor):
self.initial = initial
self.interval = interval
self.factor = factor
def get_learning_rate(self, epoch):
return self.initial * (self.factor ** (epoch // self.interval))
class WarmupLearningRateSchedule(LearningRateSchedule):
def __init__(self, initial, warmed_up, length):
self.initial = initial
self.warmed_up = warmed_up
self.length = length
def get_learning_rate(self, epoch):
if epoch > self.length:
return self.warmed_up
return self.initial + (self.warmed_up - self.initial) * epoch / self.length
def get_learning_rate_schedules(specs):
schedule_specs = specs["LearningRateSchedule"]
schedules = []
for schedule_specs in schedule_specs:
if schedule_specs["Type"] == "Step":
schedules.append(
StepLearningRateSchedule(
schedule_specs["Initial"],
schedule_specs["Interval"],
schedule_specs["Factor"],
)
)
elif schedule_specs["Type"] == "Warmup":
schedules.append(
WarmupLearningRateSchedule(
schedule_specs["Initial"],
schedule_specs["Final"],
schedule_specs["Length"],
)
)
elif schedule_specs["Type"] == "Constant":
schedules.append(ConstantLearningRateSchedule(schedule_specs["Value"]))
else:
raise Exception(
'no known learning rate schedule of type "{}"'.format(
schedule_specs["Type"]
)
)
return schedules
def save_model(experiment_directory, filename, decoder, epoch):
model_params_dir = ws.get_model_params_dir(experiment_directory, True)
torch.save(
{"epoch": epoch, "model_state_dict": decoder.state_dict()},
os.path.join(model_params_dir, filename),
)
def save_optimizer(experiment_directory, filename, optimizer, epoch):
optimizer_params_dir = ws.get_optimizer_params_dir(experiment_directory, True)
torch.save(
{"epoch": epoch, "optimizer_state_dict": optimizer.state_dict()},
os.path.join(optimizer_params_dir, filename),
)
def load_optimizer(experiment_directory, filename, optimizer):
full_filename = os.path.join(
ws.get_optimizer_params_dir(experiment_directory), filename
)
if not os.path.isfile(full_filename):
raise Exception(
'optimizer state dict "{}" does not exist'.format(full_filename)
)
data = torch.load(full_filename)
optimizer.load_state_dict(data["optimizer_state_dict"])
return data["epoch"]
def save_latent_vectors(experiment_directory, filename, latent_vec, epoch):
latent_codes_dir = ws.get_latent_codes_dir(experiment_directory, True)
all_latents = latent_vec.state_dict()
torch.save(
{"epoch": epoch, "latent_codes": all_latents},
os.path.join(latent_codes_dir, filename),
)
# TODO: duplicated in workspace
def load_latent_vectors(experiment_directory, filename, lat_vecs):
full_filename = os.path.join(
ws.get_latent_codes_dir(experiment_directory), filename
)
if not os.path.isfile(full_filename):
raise Exception('latent state file "{}" does not exist'.format(full_filename))
data = torch.load(full_filename)
if isinstance(data["latent_codes"], torch.Tensor):
# for backwards compatibility
if not lat_vecs.num_embeddings == data["latent_codes"].size()[0]:
raise Exception(
"num latent codes mismatched: {} vs {}".format(
lat_vecs.num_embeddings, data["latent_codes"].size()[0]
)
)
if not lat_vecs.embedding_dim == data["latent_codes"].size()[2]:
raise Exception("latent code dimensionality mismatch")
for i, lat_vec in enumerate(data["latent_codes"]):
lat_vecs.weight.data[i, :] = lat_vec
else:
lat_vecs.load_state_dict(data["latent_codes"])
return data["epoch"]
def save_logs(
experiment_directory,
loss_log,
lr_log,
timing_log,
lat_mag_log,
param_mag_log,
epoch,
):
torch.save(
{
"epoch": epoch,
"loss": loss_log,
"learning_rate": lr_log,
"timing": timing_log,
"latent_magnitude": lat_mag_log,
"param_magnitude": param_mag_log,
},
os.path.join(experiment_directory, ws.logs_filename),
)
def load_logs(experiment_directory):
full_filename = os.path.join(experiment_directory, ws.logs_filename)
if not os.path.isfile(full_filename):
raise Exception('log file "{}" does not exist'.format(full_filename))
data = torch.load(full_filename)
return (
data["loss"],
data["learning_rate"],
data["timing"],
data["latent_magnitude"],
data["param_magnitude"],
data["epoch"],
)
def clip_logs(loss_log, lr_log, timing_log, lat_mag_log, param_mag_log, epoch):
iters_per_epoch = len(loss_log) // len(lr_log)
loss_log = loss_log[: (iters_per_epoch * epoch)]
lr_log = lr_log[:epoch]
timing_log = timing_log[:epoch]
lat_mag_log = lat_mag_log[:epoch]
for n in param_mag_log:
param_mag_log[n] = param_mag_log[n][:epoch]
return loss_log, lr_log, timing_log, lat_mag_log, param_mag_log
def get_spec_with_default(specs, key, default):
try:
return specs[key]
except KeyError:
return default
def get_mean_latent_vector_magnitude(latent_vectors):
return torch.mean(torch.norm(latent_vectors.weight.data.detach(), dim=1))
def append_parameter_magnitudes(param_mag_log, model):
for name, param in model.named_parameters():
if len(name) > 7 and name[:7] == "module.":
name = name[7:]
if name not in param_mag_log.keys():
param_mag_log[name] = []
param_mag_log[name].append(param.data.norm().item())
def trainer(center_point, sdf_tree, sdf_grid_radius, lat_vecs, sdf_data, indices, cube_size, outer_sum, outer_lock, decoder, loss_l1, do_code_regularization, code_reg_lambda, epoch):
inner_sum = 0.0
# Get all indices of the samples that are within the L-radius around the cell center.
near_sample_indices = sdf_tree.query_ball_point(x=[center_point[1]], r=sdf_grid_radius, p=np.inf)
# Get number of samples located within the L-radius around the cell center
num_sdf_samples = len(near_sample_indices[0])
if num_sdf_samples < 1:
return
# Extract code from lat_vecs
code = lat_vecs((center_point[0] + indices[0].cuda() * (cube_size**3)).long()).cuda()
# Get groundtruth sdf value
sdf_gt = sdf_data[near_sample_indices[0], 3].unsqueeze(1)
sdf_gt = torch.tanh(sdf_gt)
transformed_sample = sdf_data[near_sample_indices[0], :3] - center_point[1]
transformed_sample.requires_grad = False
code = code.expand(1, 125)
code = code.repeat(transformed_sample.shape[0], 1)
decoder_input = torch.cat([code, transformed_sample.cuda()], dim=1).float().cuda()
# Get network prediction of current sample
pred_sdf = decoder(decoder_input)
# f_theta - s_j
inner_sum = loss_l1(pred_sdf.squeeze(0), sdf_gt.cuda()) / num_sdf_samples
# Right most part of formula (4) in DeepLS -> + 1/sigma^2 L2(z_i)
if do_code_regularization and num_sdf_samples != 0:
l2_size_loss = torch.sum(torch.norm(code, dim=0))
reg_loss = (code_reg_lambda * min(1.0, epoch / 100) * l2_size_loss) / num_sdf_samples
inner_sum = inner_sum.cuda() + reg_loss.cuda()
inner_sum.backward()
with outer_lock:
outer_sum.value += inner_sum.item()
return
def main_function(experiment_directory, continue_from, batch_split):
logging.debug("running " + experiment_directory)
specs = ws.load_experiment_specifications(experiment_directory)
logging.info("Experiment description: \n" + str(specs["Description"]))
data_source = specs["DataSource"]
train_split_file = specs["TrainSplit"]
arch = __import__("networks." + specs["NetworkArch"], fromlist=["Decoder"])
logging.debug(specs["NetworkSpecs"])
latent_size = specs["CodeLength"]
checkpoints = list(
range(
specs["SnapshotFrequency"],
specs["NumEpochs"] + 1,
specs["SnapshotFrequency"],
)
)
for checkpoint in specs["AdditionalSnapshots"]:
checkpoints.append(checkpoint)
checkpoints.sort()
lr_schedules = get_learning_rate_schedules(specs)
grad_clip = get_spec_with_default(specs, "GradientClipNorm", None)
if grad_clip is not None:
logging.debug("clipping gradients to max norm {}".format(grad_clip))
def save_latest(epoch):
save_model(experiment_directory, "latest.pth", decoder, epoch)
save_optimizer(experiment_directory, "latest.pth", optimizer_all, epoch)
save_latent_vectors(experiment_directory, "latest.pth", lat_vecs, epoch)
def save_checkpoints(epoch):
save_model(experiment_directory, str(epoch) + ".pth", decoder, epoch)
save_optimizer(experiment_directory, str(epoch) + ".pth", optimizer_all, epoch)
save_latent_vectors(experiment_directory, str(epoch) + ".pth", lat_vecs, epoch)
def signal_handler(sig, frame):
logging.info("Stopping early...")
sys.exit(0)
def adjust_learning_rate(lr_schedules, optimizer, epoch):
for i, param_group in enumerate(optimizer.param_groups):
param_group["lr"] = lr_schedules[i].get_learning_rate(epoch)
signal.signal(signal.SIGINT, signal_handler)
num_samp_per_scene = specs["SamplesPerScene"]
scene_per_batch = specs["ScenesPerBatch"]
do_code_regularization = get_spec_with_default(specs, "CodeRegularization", True)
code_reg_lambda = get_spec_with_default(specs, "CodeRegularizationLambda", 1e-4)
code_bound = get_spec_with_default(specs, "CodeBound", None)
cube_size = get_spec_with_default(specs, "CubeSize", 50)
box_size = get_spec_with_default(specs, "BoxSize", 2)
voxel_radius = get_spec_with_default(specs, "VoxelRadius", 1.5)
decoder = arch.Decoder(latent_size, **specs["NetworkSpecs"]).cuda()
logging.info("training with {} GPU(s)".format(torch.cuda.device_count()))
if torch.cuda.device_count() > 1:
decoder = torch.nn.DataParallel(decoder)
num_epochs = specs["NumEpochs"]
log_frequency = get_spec_with_default(specs, "LogFrequency", 10)
with open(train_split_file, "r") as f:
train_split = json.load(f)
sdf_dataset = deep_ls.data.SDFSamples(
data_source, train_split, num_samp_per_scene, load_ram=False
)
num_data_loader_threads = get_spec_with_default(specs, "DataLoaderThreads", 1)
logging.debug("loading data with {} threads".format(num_data_loader_threads))
sdf_loader = data_utils.DataLoader(
sdf_dataset,
batch_size=scene_per_batch,
shuffle=True,
num_workers=num_data_loader_threads,
drop_last=True,
)
sdf_grid_indices = deep_ls.data.generate_grid_center_indices(cube_size=cube_size, box_size=box_size)
# voxel_radius is defined as 1.5 times the voxel side length (see DeepLS sec. 4.1) since that value provides
# a good trade of between accuracy and efficiency
sdf_grid_radius = voxel_radius * ((box_size * 2) / cube_size)
logging.debug("torch num_threads: {}".format(torch.get_num_threads()))
num_scenes = len(sdf_dataset)
logging.info("There are {} scenes".format(num_scenes))
logging.debug(decoder)
# TODO check if there is something better than Embedding to store codes.
# TODO Not sure if max_norm=code_bound is necessary
# lat_vecs_size is num_scences times the grid (cube_size^3)
lat_vec_size = num_scenes * (cube_size**3)
lat_vecs = torch.nn.Embedding(lat_vec_size, latent_size, max_norm=code_bound).cuda()
torch.nn.init.normal_(
lat_vecs.weight.data,
0.0,
get_spec_with_default(specs, "CodeInitStdDev", 1.0) / math.sqrt(latent_size),
)
logging.debug(
"initialized with mean magnitude {}".format(
get_mean_latent_vector_magnitude(lat_vecs)
)
)
loss_l1 = torch.nn.L1Loss(reduction="sum").cuda()
optimizer_all = torch.optim.Adam(
[
{
"params": decoder.parameters(),
"lr": lr_schedules[0].get_learning_rate(0),
},
{
"params": lat_vecs.parameters(),
"lr": lr_schedules[1].get_learning_rate(0),
},
]
)
loss_log = []
lr_log = []
lat_mag_log = []
timing_log = []
param_mag_log = {}
start_epoch = 1
if continue_from is not None:
logging.info('continuing from "{}"'.format(continue_from))
lat_epoch = load_latent_vectors(
experiment_directory, continue_from + ".pth", lat_vecs
)
model_epoch = ws.load_model_parameters(
experiment_directory, continue_from, decoder
)
optimizer_epoch = load_optimizer(
experiment_directory, continue_from + ".pth", optimizer_all
)
loss_log, lr_log, timing_log, lat_mag_log, param_mag_log, log_epoch = load_logs(
experiment_directory
)
if not log_epoch == model_epoch:
loss_log, lr_log, timing_log, lat_mag_log, param_mag_log = clip_logs(
loss_log, lr_log, timing_log, lat_mag_log, param_mag_log, model_epoch
)
if not (model_epoch == optimizer_epoch and model_epoch == lat_epoch):
raise RuntimeError(
"epoch mismatch: {} vs {} vs {} vs {}".format(
model_epoch, optimizer_epoch, lat_epoch, log_epoch
)
)
start_epoch = model_epoch + 1
logging.debug("loaded")
logging.info("starting from epoch {}".format(start_epoch))
logging.info(
"Number of decoder parameters: {}".format(
sum(p.data.nelement() for p in decoder.parameters())
)
)
logging.info(
"Number of shape code parameters: {} (# codes {}, code dim {})".format(
lat_vecs.num_embeddings * lat_vecs.embedding_dim,
lat_vecs.num_embeddings,
lat_vecs.embedding_dim,
)
)
for epoch in range(start_epoch, num_epochs + 1):
start = time.time()
logging.info("epoch {}...".format(epoch))
decoder.train()
adjust_learning_rate(lr_schedules, optimizer_all, epoch)
current_scene = 0
scene_avg_loss = 0.0
len_data_loader = len(sdf_loader)
for sdf_data, indices in sdf_loader:
current_scene += 1
#logging.info("Scene: {}/{}".format(current_scene, len_data_loader))
# sdf_data contains the KDTree of the current scene and all the points in that scene
# indices is the index of the npz file -> the scene.
sdf_data = sdf_data.reshape(-1, 4)
sdf_data.requires_grad = False
xyz = sdf_data[:,:3]
num_sdf_samples_total = sdf_data.shape[0]
# TODO check leaf_size impact on speed. default = 40
# Default metric of kdtree is L2 norm, Paper uses L infinity -> chebyshev
sdf_tree = cKDTree(xyz)
outer_sum = 0.0
optimizer_all.zero_grad()
if __name__ == '__main__':
# Shared value counter and lock
mp.set_start_method('spawn', force=True)
manager = mp.Manager()
outer_sum = manager.Value('f', 0)
outer_lock = manager.Lock()
# Create Pool for multiprocessing
start = time.time()
pool = mp.Pool()
# Apply map on array of center points
res = pool.map(functools.partial(trainer,
sdf_tree = sdf_tree,
sdf_grid_radius = sdf_grid_radius,
lat_vecs = lat_vecs,
sdf_data = sdf_data,
indices = indices,
cube_size = cube_size,
outer_sum = outer_sum,
outer_lock = outer_lock,
decoder = decoder,
loss_l1 = loss_l1,
do_code_regularization = do_code_regularization,
code_reg_lambda = code_reg_lambda,
epoch = epoch),
enumerate(sdf_grid_indices))
pool.close()
pool.join()
logging.info("Multiprocessing Time {}".format(time.time() - start))
scene_avg_loss += outer_sum.value
logging.info("Scene {} loss = {}".format(current_scene, outer_sum))
loss_log.append(outer_sum.value)
optimizer_all.step()
logging.info("Epoch scene average loss: {}".format((scene_avg_loss/current_scene)))
end = time.time()
seconds_elapsed = end - start
timing_log.append(seconds_elapsed)
lr_log.append([schedule.get_learning_rate(epoch) for schedule in lr_schedules])
# TODO check what other functions do with lat_vecs and adapt if needed.
lat_mag_log.append(get_mean_latent_vector_magnitude(lat_vecs))
append_parameter_magnitudes(param_mag_log, decoder)
if epoch in checkpoints:
save_checkpoints(epoch)
if epoch % log_frequency == 0:
save_latest(epoch)
save_logs(
experiment_directory,
loss_log,
lr_log,
timing_log,
lat_mag_log,
param_mag_log,
epoch,
)
if __name__ == "__main__":
import argparse
arg_parser = argparse.ArgumentParser(description="Train a DeepLS autodecoder")
arg_parser.add_argument(
"--experiment",
"-e",
dest="experiment_directory",
required=True,
help="The experiment directory. This directory should include "
+ "experiment specifications in 'specs.json', and logging will be "
+ "done in this directory as well.",
)
arg_parser.add_argument(
"--continue",
"-c",
dest="continue_from",
help="A snapshot to continue from. This can be 'latest' to continue"
+ "from the latest running snapshot, or an integer corresponding to "
+ "an epochal snapshot.",
)
arg_parser.add_argument(
"--batch_split",
dest="batch_split",
default=1,
help="This splits the batch into separate subbatches which are "
+ "processed separately, with gradients accumulated across all "
+ "subbatches. This allows for training with large effective batch "
+ "sizes in memory constrained environments.",
)
deep_ls.add_common_args(arg_parser)
args = arg_parser.parse_args()
deep_ls.configure_logging(args)
main_function(args.experiment_directory, args.continue_from, int(args.batch_split))
| 32.330203 | 182 | 0.628588 | 965 | 0.046711 | 0 | 0 | 0 | 0 | 0 | 0 | 3,873 | 0.187473 |
efe7d81ac7833b8ba25967361da1b664addd861c | 498 | py | Python | setup.py | nicosandller/python-ethereumrpc | e826f99bbb34dc3d8009ac9392677e9ae2c9fa36 | [
"MIT"
]
| 1 | 2019-03-28T19:16:21.000Z | 2019-03-28T19:16:21.000Z | setup.py | nicosandller/python-ethereumrpc | e826f99bbb34dc3d8009ac9392677e9ae2c9fa36 | [
"MIT"
]
| null | null | null | setup.py | nicosandller/python-ethereumrpc | e826f99bbb34dc3d8009ac9392677e9ae2c9fa36 | [
"MIT"
]
| null | null | null | from distutils.core import setup
setup(
name = 'python-ethereumrpc',
packages = ['python-ethereumrpc'],
version = '0.1',
description = 'A python interface for ethereum JSON-RPC service.',
author = 'Nicolas Sandller',
author_email = '[email protected]',
url = 'https://github.com/nicosandller/python-ethereumrpc',
download_url = 'https://github.com/nicosandller/python-ethereumrpc/tarball/0.1',
keywords = ['ethereum', 'rpc', 'api', 'JSON', 'JSON-RPC'],
classifiers = [],
)
| 35.571429 | 82 | 0.696787 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 290 | 0.582329 |
efe8537711357e13e0aa907bd882c404ad86cc4e | 988 | py | Python | interface.py | robotafm/motor | 1c0838db12514304b930aec976d7adcbc51b7c92 | [
"MIT"
]
| null | null | null | interface.py | robotafm/motor | 1c0838db12514304b930aec976d7adcbc51b7c92 | [
"MIT"
]
| null | null | null | interface.py | robotafm/motor | 1c0838db12514304b930aec976d7adcbc51b7c92 | [
"MIT"
]
| null | null | null | # /robotafm/motor/interface.py
# Main web interface, contains basic
# information display
# imports:
import xml.dom.minidom
from flask import Flask, render_template
# constants:
LANG = "./lang/rus.xml"
# XML: load text strings from language file
dom = xml.dom.minidom.parse(LANG)
main_title = dom.getElementsByTagName("main_title")[0].childNodes[0].nodeValue
language = dom.getElementsByTagName("language")[0].childNodes[0].nodeValue
greeting = dom.getElementsByTagName("greeting")[0].childNodes[0].nodeValue
invitation = dom.getElementsByTagName("invitation")[0].childNodes[0].nodeValue
main_page_text = dom.getElementsByTagName("main_page_text")[0].childNodes[0].nodeValue
# Flask init:
app = Flask(__name__)
# Main site page:
@app.route('/')
def index():
return render_template(
'index.html',
main_title=main_title,
greeting=greeting,
invitation=invitation,
main_page_text = main_page_text
)
| 29.058824 | 87 | 0.709514 | 0 | 0 | 0 | 0 | 227 | 0.229757 | 0 | 0 | 281 | 0.284413 |
efe95ae7664ab58458aa225b5cb6251325f40d6d | 317 | py | Python | src/eval_codalab_offline.py | bjj9/EVE_SCPT | c91b13f8bbfe8ea29a0e9f1df0dc016a258c904f | [
"MIT"
]
| 21 | 2021-06-20T02:35:08.000Z | 2022-03-16T06:57:03.000Z | src/eval_codalab_offline.py | bjj9/EVE_SCPT | c91b13f8bbfe8ea29a0e9f1df0dc016a258c904f | [
"MIT"
]
| 2 | 2021-06-20T15:43:28.000Z | 2021-08-02T08:37:02.000Z | src/eval_codalab_offline.py | bjj9/EVE_SCPT | c91b13f8bbfe8ea29a0e9f1df0dc016a258c904f | [
"MIT"
]
| 1 | 2021-07-19T11:07:13.000Z | 2021-07-19T11:07:13.000Z | from eval_codalab_basic import eval_codalab_basic
if __name__ == '__main__':
# 1. run first round to prepare full memory
eval_codalab_basic(output_suffix='online', skip_first_round_if_memory_is_ready=True)
# 2. do offline evaluation when memory is ready
eval_codalab_basic(output_suffix='offline')
| 31.7 | 88 | 0.782334 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 117 | 0.369085 |
efed594b93f7036fd9e0fbb23d74fff628cd47d4 | 922 | py | Python | CountingValleys/ValleyCounter.py | monemonesi/TDD_Katas_Python | f21a4f3516b75d7618dcd044453e25be015b4251 | [
"MIT"
]
| null | null | null | CountingValleys/ValleyCounter.py | monemonesi/TDD_Katas_Python | f21a4f3516b75d7618dcd044453e25be015b4251 | [
"MIT"
]
| null | null | null | CountingValleys/ValleyCounter.py | monemonesi/TDD_Katas_Python | f21a4f3516b75d7618dcd044453e25be015b4251 | [
"MIT"
]
| null | null | null | UP = "U"
DOWN = "D"
ALLOWED_PATH_I = [UP, DOWN]
def update_high_for_step(high: int, step: str) -> int:
"""Update the current high given a step"""
if step == UP:
high += 1
elif step == DOWN:
high -= 1
return high
def update_valley_count(valleys_count: int, high: int, previous_high: int) -> int:
if high == 0 and previous_high < 0:
valleys_count += 1
return valleys_count
def count_valley(steps: int, path: str) -> int:
"""Function which returns the number of valley encountered in a given path"""
if len(path) != steps:
raise Exception("Steps should match length of path")
valleys = 0
high = 0
previous_high = 0
for i in range(steps):
previous_high = high
high = update_high_for_step(high, path[i])
valleys = update_valley_count(valleys, high, previous_high)
return valleys
| 27.117647 | 83 | 0.611714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 160 | 0.173536 |
efee0f491f5feefbc9f83692582c209722451e90 | 84 | py | Python | examples/coordinates3.py | r0the/gpanel | 34cb31ef5abf08b139330fce6b301d920b22cea4 | [
"MIT"
]
| 1 | 2021-03-22T06:31:38.000Z | 2021-03-22T06:31:38.000Z | examples/coordinates3.py | r0the/gpanel | 34cb31ef5abf08b139330fce6b301d920b22cea4 | [
"MIT"
]
| 8 | 2021-03-10T09:50:04.000Z | 2021-03-22T06:33:18.000Z | examples/coordinates3.py | r0the/gpanel | 34cb31ef5abf08b139330fce6b301d920b22cea4 | [
"MIT"
]
| null | null | null | from gpanel import *
coordinates(-3, -3, 11, 11)
line(0, 0, 8, 8)
line(8, 0, 0, 8)
| 14 | 27 | 0.583333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
efee15be03037d97374bea9c4059f5490403f268 | 682 | py | Python | Tree/Leetcode 226. Invert Binary Tree.py | kaizhengny/LeetCode | 67d64536ab80f4966699fe7460d165f2a98d6a82 | [
"MIT"
]
| 31 | 2020-06-23T00:40:04.000Z | 2022-01-08T11:06:24.000Z | Tree/Leetcode 226. Invert Binary Tree.py | kaizhengny/LeetCode | 67d64536ab80f4966699fe7460d165f2a98d6a82 | [
"MIT"
]
| null | null | null | Tree/Leetcode 226. Invert Binary Tree.py | kaizhengny/LeetCode | 67d64536ab80f4966699fe7460d165f2a98d6a82 | [
"MIT"
]
| 7 | 2020-04-30T08:46:03.000Z | 2021-08-28T16:25:54.000Z | class Solution:
def invertTree(self, root: TreeNode) -> TreeNode:
if not root: return root
root.left, root.right = root.right, root.left
self.invertTree(root.left)
self.invertTree(root.right)
return root
class Solution:
def invertTree(self, root: TreeNode) -> TreeNode:
if not root:
return None
q = collections.deque()
q.append(root)
while q:
node = q.popleft()
node.left, node.right =node.right, node.left
if node.left:
q.append(node.left)
if node.right:
q.append(node.right)
return root | 29.652174 | 56 | 0.541056 | 680 | 0.997067 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
efee470e855ae2a217e0a35720dd990d8a0f3c8b | 333 | py | Python | Ex044.py | JeanPauloGarcia/Python-Exercicios | faff4670806c423680ee00a88d3c4c49b437e72e | [
"MIT"
]
| null | null | null | Ex044.py | JeanPauloGarcia/Python-Exercicios | faff4670806c423680ee00a88d3c4c49b437e72e | [
"MIT"
]
| null | null | null | Ex044.py | JeanPauloGarcia/Python-Exercicios | faff4670806c423680ee00a88d3c4c49b437e72e | [
"MIT"
]
| null | null | null | preço = float(input('Preço: '))
print('''Preencha a forma de pagamento com:
1 - p/ À VISTA
2 - p/ CARTÃO 1x
3 - p/ CARTÃO 2x
4 - p/ CARTÃO 3x ou mais
''')
pagto = str(input('Pagamento: ')).strip()
if pagto == '1':
preço = preço*0.9
elif pagto == '2':
preço = preço*0.95
elif pagto == '4':
preço = preço*1.2
print(preço)
| 19.588235 | 43 | 0.597598 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 151 | 0.436416 |
eff27556e4f9b47dbc9ed41d42898d35ce432f5c | 1,264 | py | Python | scorebee/main.py | mikeboers/ScoreBee | e8c3476b6401808a61b495b9c42e8cbe752906b4 | [
"BSD-3-Clause"
]
| null | null | null | scorebee/main.py | mikeboers/ScoreBee | e8c3476b6401808a61b495b9c42e8cbe752906b4 | [
"BSD-3-Clause"
]
| null | null | null | scorebee/main.py | mikeboers/ScoreBee | e8c3476b6401808a61b495b9c42e8cbe752906b4 | [
"BSD-3-Clause"
]
| null | null | null |
import logging
import sys
from .application import Application
from .document import Document, Track, Event
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
app = Application(sys.argv)
if '--debug' in sys.argv:
# # Load a document.
# # We absolutely MUST have the document constructed fully BEFORE
# # setting it here. There are side effects to setting it.
# # HACK: This is just a hack for now.
# # doc = Document()
doc = Document('/Users/mikeboers/Desktop/example.MOV')
# doc = Document('/Users/mikeboers/Desktop/C00000S00A20091231112932302.avi')
doc.add_track(Track(
name='A behaviour',
key='q',
group='top two',
# events=[
# Event(10, 15), Event(50, 65), Event(500, 600)
# ]
))
doc.add_track(Track(
name='Nothin here',
key='w',
group='top two',
# events=[]
))
doc.add_track(Track(
name='Better one',
key='e',
# events=[
# Event(25, 26), Event(70, 71), Event(700, 701)
# ]
))
app.doc = doc
app.run() | 28.088889 | 84 | 0.511076 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 538 | 0.425633 |
eff28154f7d481027598302c0ee3f1c65be8e270 | 45,609 | py | Python | ceci/stage.py | eacharles/ceci | e52e956c9e373c9a632ad0c312770f32ceab0c8b | [
"BSD-3-Clause"
]
| null | null | null | ceci/stage.py | eacharles/ceci | e52e956c9e373c9a632ad0c312770f32ceab0c8b | [
"BSD-3-Clause"
]
| 1 | 2022-01-05T22:04:57.000Z | 2022-01-05T22:04:57.000Z | ceci/stage.py | eacharles/ceci | e52e956c9e373c9a632ad0c312770f32ceab0c8b | [
"BSD-3-Clause"
]
| null | null | null | """Module with core functionality for a single pipeline stage """
import pathlib
import os
import sys
from textwrap import dedent
import shutil
import cProfile
from abc import abstractmethod
from . import errors
from .monitor import MemoryMonitor
from .config import StageConfig, cast_to_streamable
SERIAL = "serial"
MPI_PARALLEL = "mpi"
DASK_PARALLEL = "dask"
IN_PROGRESS_PREFIX = "inprogress_"
class PipelineStage:
"""A PipelineStage implements a single calculation step within a wider pipeline.
Each different type of analysis stage is represented by a subclass of this
base class. The base class handles the connection between different pipeline
stages, and the execution of the stages within a workflow system (parsl),
potentially in parallel (MPI).
An instance of one of these classes represents an actual run of the stage,
with the required inputs, outputs, and configuration specified.
See documentation pages for more details.
"""
parallel = True
dask_parallel = False
config_options = {}
doc = ""
def __init__(self, args, comm=None):
"""Construct a pipeline stage, specifying the inputs, outputs, and configuration for it.
The constructor needs a dict or namespace. It should include:
- input paths (required)
- config path (required)
- output paths (optional but usual)
- additional configuration (required if not specified elsewhere)
Input and output paths should map tags to paths.
Tags are strings, and the first elements in each item in the subclass's
"inputs" and "output" attributes.
e.g. for a subclass with:
inputs = [('eggs', TextFile)]
outputs = [('spam', TextFile)]
the args could contain:
{'eggs': 'inputs/eggs.txt',
'spam': 'outputs/spam.txt' }
If spam is not specified it will default to "./spam.txt"
}
The config should map "config" to a path where a YAML config file
is located, e.g. {'config':'/path/to/config.yml'}
Any config variables that are specified in the class's config attribute
will be searched for first in args, then in the config file, and then
by looking at any default value they have been given.
If they have no default value (and just a type, like int, is listed), then
it's an error if they are not specified somewhere.
The execute method can instantiate and run the class together, with added bonuses
like profiling and debugging tools.
Parameters
----------
args: dict or namespace
Specification of input and output paths and any missing config options
comm: MPI communicator
(default is None) An MPI comm object to use in preference to COMM_WORLD
"""
self._configs = StageConfig(**self.config_options)
self._inputs = None
self._outputs = None
self._parallel = SERIAL
self._comm = None
self._size = 1
self._rank = 0
self.dask_client = None
self.load_configs(args)
if comm is not None:
self.setup_mpi(comm)
def get_aliases(self):
""" Returns the dictionary of aliases used to remap inputs and outputs
in the case that we want to have multiple instance of this class in the pipeline """
return self.config.get('aliases', None)
def get_aliased_tag(self, tag):
""" Returns the possibly remapped value for an input or output tag
Parameter
---------
tag : `str`
The input or output tag we are checking
Returns
-------
aliased_tag : `str`
The aliases version of the tag
"""
aliases = self.get_aliases()
if aliases is None:
return tag
return aliases.get(tag, tag)
@abstractmethod
def run(self): #pragma: no cover
"""Run the stage and return the execution status"""
raise NotImplementedError('run')
def load_configs(self, args):
"""
Load the configuraiton
Parameters
----------
args: dict or namespace
Specification of input and output paths and any missing config options
"""
if not isinstance(args, dict):
args = vars(args)
# First, we extract configuration information from a combination of
# command line arguments and optional 'config' file
self._inputs = dict(config=args["config"])
self.read_config(args)
# We first check for missing input files, that's a show stopper
missing_inputs = []
for x in self.input_tags():
val = args.get(x)
aliased_tag = self.get_aliased_tag(x)
if val is None:
val = args.get(aliased_tag)
if val is None: #pragma: no cover
missing_inputs.append(f"--{x}")
else:
self._inputs[aliased_tag] = val
if missing_inputs: #pragma: no cover
missing_inputs = " ".join(missing_inputs)
raise ValueError(
f"""
{self.instance_name} Missing these names on the command line:
Input names: {missing_inputs}"""
)
# We alwys assume the config arg exists, whether it is in input_tags or not
if 'config' not in args: #pragma: no cover
raise ValueError("The argument --config was missing on the command line.")
# We prefer to receive explicit filenames for the outputs but will
# tolerate missing output filenames and will default to tag name in
# current folder (this is for CWL compliance)
self._outputs = {}
for i, x in enumerate(self.output_tags()):
if args.get(x) is None:
ftype = self.outputs[i][1] #pylint: disable=no-member
self._outputs[self.get_aliased_tag(x)] = ftype.make_name(x)
else:
self._outputs[self.get_aliased_tag(x)] = args[x]
def setup_mpi(self, comm=None):
"""
Setup the MPI interface
Parameters
----------
comm: MPI communicator
(default is None) An MPI comm object to use in preference to COMM_WORLD
"""
mpi = self.config.get('mpi', False)
if mpi: #pragma: no cover
try:
# This isn't a ceci dependency, so give a sensible error message if not installed.
import mpi4py.MPI
except ImportError:
print("ERROR: Using --mpi option requires mpi4py to be installed.")
raise
# For scripting and testing we allow an MPI communicator or anything
# with the same API to be passed in directly, overriding the --mpi
# flag.
if comm is not None:
self._parallel = MPI_PARALLEL
self._comm = comm
self._size = self._comm.Get_size()
self._rank = self._comm.Get_rank()
elif mpi: #pragma: no cover
self._parallel = MPI_PARALLEL
self._comm = mpi4py.MPI.COMM_WORLD
self._size = self._comm.Get_size()
self._rank = self._comm.Get_rank()
else:
self._parallel = SERIAL
self._comm = None
self._size = 1
self._rank = 0
# If we are running under MPI but this subclass has enabled dask
# then we note that here. It stops various MPI-specific things happening
# later
if (self._parallel == MPI_PARALLEL) and self.dask_parallel:
self._parallel = DASK_PARALLEL
pipeline_stages = {}
incomplete_pipeline_stages = {}
def __init_subclass__(cls, **kwargs):
"""
Python 3.6+ provides a facility to automatically
call a method (this one) whenever a new subclass
is defined. In this case we use that feature to keep
track of all available pipeline stages, each of which is
defined by a class.
"""
super().__init_subclass__(**kwargs)
# This is a hacky way of finding the file
# where our stage was defined
filename = sys.modules[cls.__module__].__file__
stage_is_complete = (
hasattr(cls, 'inputs') and hasattr(cls, 'outputs') and not getattr(cls.run, '__isabstractmethod__', False)
)
# If there isn't an explicit name already then set it here.
# by default use the class name.
if not hasattr(cls, "name"): #pragma: no cover
cls.name = cls.__name__
if cls.name is None:
cls.name = cls.__name__
if stage_is_complete:
# Deal with duplicated class names
if cls.name in cls.pipeline_stages:
other = cls.pipeline_stages[cls.name][1]
raise errors.DuplicateStageName(
"You created two pipeline stages with the"
f"name {cls.name}.\nOne was in {filename}\nand the "
f"other in {other}\nYou can either change the class "
"name or explicitly put a variable 'name' in the top"
"level of the class."
)
# Check for "config" in the inputs list - this is implicit
for name, _ in cls.inputs:
if name == "config":
raise errors.ReservedNameError(
"An input called 'config' is implicit in each pipeline "
"stage and should not be added explicitly. Please update "
f"your pipeline stage called {cls.name} to remove/rename "
"the input called 'config'."
)
# Check if user has over-written the config variable.
# Quite a common error I make myself.
if not isinstance(cls.config, property):
raise errors.ReservedNameError(
"You have a class variable called 'config', which "
"is reserved in ceci for its own configuration. "
"You may have meant to specify config_options?"
)
# Find the absolute path to the class defining the file
path = pathlib.Path(filename).resolve()
# Register the class
if stage_is_complete:
cls.pipeline_stages[cls.name] = (cls, path)
else:
cls.incomplete_pipeline_stages[cls.__name__] = (cls, path)
#############################################
# Life cycle-related methods and properties.
#############################################
@classmethod
def get_stage(cls, name):
"""
Return the PipelineStage subclass with the given name.
This is used so that we do not need a new entry point __main__ function
for each new stage - instead we can just use a single one which can query
which class it should be using based on the name.
Returns
-------
cls: class
The corresponding subclass
"""
stage = cls.pipeline_stages.get(name)
# If not found, then check for incomplete stages
if stage is None:
if name in cls.incomplete_pipeline_stages:
raise errors.IncompleteStage(
f"The stage {name} is not completely written. "
"Stages must specify 'inputs', 'outputs' as class variables "
f"and a 'run' method.\n{name} might be unfinished, or it might "
"be intended as a base for other classes and not to be run."
)
raise errors.StageNotFound(f"Unknown stage '{name}'")
return stage[0]
@classmethod
def get_module(cls):
"""
Return the path to the python package containing the current sub-class
If we have a PipelineStage subclass defined in a module called "bar", in
a package called "foo" e.g.:
/path/to/foo/bar.py <-- contains subclass "Baz"
Then calling Baz.get_module() will return "foo.bar".
We use this later to construct command lines like "python -m foo Baz"
Returns
-------
module: str
The module containing this class.
"""
return cls.pipeline_stages[cls.name][0].__module__
@classmethod
def usage(cls): #pragma: no cover
"""
Print a usage message.
"""
stage_names = "\n- ".join(cls.pipeline_stages.keys())
try:
module = cls.get_module().split(".")[0]
except: #pylint: disable=bare-except
module = "<module_name>"
sys.stderr.write(
f"""
Usage: python -m {module} <stage_name> <stage_arguments>
If no stage_arguments are given then usage information
for the chosen stage will be given.
I currently know about these stages:
- {stage_names}
"""
)
@classmethod
def main(cls):
"""
Create an instance of this stage and execute it with
inputs and outputs taken from the command line
"""
try:
stage_name = sys.argv[1]
except IndexError: #pragma: no cover
cls.usage()
return 1
if stage_name in ["--help", "-h"] and len(sys.argv) == 2: #pragma: no cover
cls.usage()
return 1
stage = cls.get_stage(stage_name)
args = stage.parse_command_line()
stage.execute(args)
return 0
@classmethod
def parse_command_line(cls, cmd=None):
"""Set up and argument parser and parse the command line
Parameters
----------
cmd : str or None
The command line to part (if None this will use the system arguments)
Returns
-------
args : Namespace
The resulting Mapping of arguement to values
"""
import argparse
parser = argparse.ArgumentParser(description=f"Run pipeline stage {cls.name}")
parser.add_argument("stage_name")
for conf, def_val in cls.config_options.items():
opt_type = def_val if isinstance(def_val, type) else type(def_val)
if opt_type == bool:
parser.add_argument(f"--{conf}", action="store_const", const=True)
parser.add_argument(f"--no-{conf}", dest=conf, action="store_const", const=False)
elif opt_type == list:
out_type = def_val[0] if isinstance(def_val[0], type) else type(def_val[0])
if out_type is str: #pragma: no cover
parser.add_argument(
f"--{conf}", type=lambda string: string.split(",")
)
elif out_type is int: #pragma: no cover
parser.add_argument(
f"--{conf}",
type=lambda string: [int(i) for i in string.split(",")],
)
elif out_type is float:
parser.add_argument(
f"--{conf}",
type=lambda string: [float(i) for i in string.split(",")],
)
else: #pragma: no cover
raise NotImplementedError(
"Only handles str, int and float list arguments"
)
else: #pragma: no cover
parser.add_argument(f"--{conf}", type=opt_type)
for inp in cls.input_tags():
parser.add_argument(f"--{inp}")
for out in cls.output_tags():
parser.add_argument(f"--{out}")
parser.add_argument("--config")
if cls.parallel:
parser.add_argument(
"--mpi", action="store_true", help="Set up MPI parallelism"
)
parser.add_argument(
"--pdb", action="store_true", help="Run under the python debugger"
)
parser.add_argument(
"--cprofile",
action="store",
default="",
type=str,
help="Profile the stage using the python cProfile tool",
)
parser.add_argument(
"--memmon",
type=int,
default=0,
help="Report memory use. Argument gives interval in seconds between reports",
)
if cmd is None:
args = parser.parse_args()
else:
args = parser.parse_args(cmd)
return args
@classmethod
def execute(cls, args, comm=None):
"""
Create an instance of this stage and run it
with the specified inputs and outputs.
This is calld by the main method.
Parameters
----------
args: namespace
The argparse namespace for this subclass.
"""
import pdb
# Create the stage instance. Running under dask this only
# actually needs to happen for one process, but it's not a major
# overhead and lets us do a whole bunch of other setup above
stage = cls(args)
stage.setup_mpi(comm)
# This happens before dask is initialized
if stage.rank == 0:
print(f"Executing stage: {cls.name}")
if stage.is_dask():
is_client = stage.start_dask()
# worker and scheduler stages do not execute the
# run method under dask
if not is_client:
return
if args.cprofile: #pragma: no cover
profile = cProfile.Profile()
profile.enable()
if args.memmon: #pragma: no cover
monitor = MemoryMonitor.start_in_thread(interval=args.memmon)
try:
stage.run()
except Exception as error: #pragma: no cover
if args.pdb:
print(
"There was an exception - starting python debugger because you ran with --pdb"
)
print(error)
pdb.post_mortem()
else:
raise
finally:
if args.memmon: #pragma: no cover
monitor.stop()
if stage.is_dask():
stage.stop_dask()
# The default finalization renames any output files to their
# final location, but subclasses can override to do other things too
try:
stage.finalize()
except Exception as error: #pragma: no cover
if args.pdb:
print(
"There was an exception in the finalization - starting python debugger because you ran with --pdb"
)
print(error)
pdb.post_mortem()
else:
raise
if args.cprofile: #pragma: no cover
profile.disable()
profile.dump_stats(args.cprofile)
profile.print_stats("cumtime")
# Under dask the
# the root process has gone off to become the scheduler,
# and process 1 becomes the client which runs this code
# and gets to this point
if stage.rank == 0 or stage.is_dask():
print(f"Stage complete: {cls.name}")
def finalize(self):
"""Finalize the stage, moving all its outputs to their final locations."""
# Synchronize files so that everything is closed
if self.is_mpi(): #pragma: no cover
self.comm.Barrier()
# Move files to their final path
# Only the root process moves things, except under dask it is
# process 1, which is the only process that reaches this point
# (as noted above)
if (self.rank == 0) or self.is_dask():
for tag in self.output_tags():
# find the old and new names
temp_name = self.get_output(tag)
final_name = self.get_output(tag, final_name=True)
# it's not an error here if the path does not exist,
# because that will be handled later.
if pathlib.Path(temp_name).exists():
# replace directories, rather than nesting more results
if pathlib.Path(final_name).is_dir(): #pragma: no cover
shutil.rmtree(final_name)
shutil.move(temp_name, final_name)
else: #pragma: no cover
sys.stderr.write(
f"NOTE/WARNING: Expected output file {final_name} was not generated.\n"
)
#############################################
# Parallelism-related methods and properties.
#############################################
@property
def rank(self):
"""The rank of this process under MPI (0 if not running under MPI)"""
return self._rank
@property
def size(self):
"""The number or processes under MPI (1 if not running under MPI)"""
return self._size
@property
def comm(self):
"""The MPI communicator object (None if not running under MPI)"""
return self._comm
def is_parallel(self):
"""
Returns True if the code is being run in parallel.
Right now is_parallel() will return the same value as is_mpi(),
but that may change in future if we implement other forms of
parallelization.
"""
return self._parallel != SERIAL
def is_mpi(self):
"""
Returns True if the stage is being run under MPI.
"""
return self._parallel == MPI_PARALLEL
def is_dask(self):
"""
Returns True if the stage is being run in parallel with Dask.
"""
return self._parallel == DASK_PARALLEL
def start_dask(self):
"""
Prepare dask to run under MPI. After calling this method
only a single process, MPI rank 1 will continue to exeute code
"""
# using the programmatic dask configuration system
# does not seem to work. Presumably the loggers have already
# been created by the time we modify the config. Doing it with
# env vars seems to work. If the user has already set this then
# we use that value. Otherwise we only want error logs
key = "DASK_LOGGING__DISTRIBUTED"
os.environ[key] = os.environ.get(key, "error")
try:
import dask
import dask_mpi
import dask.distributed
except ImportError: #pragma: no cover
print(
"ERROR: Using --mpi option on stages that use dask requires "
"dask[distributed] and dask_mpi to be installed."
)
raise
if self.size < 3: #pragma: no cover
raise ValueError(
"Dask requires at least three processes. One becomes a scheduler "
"process, one is a client that runs the code, and more are required "
"as worker processes."
)
# This requires my fork until/unless they merge the PR, to allow
# us to pass in these two arguments. In vanilla dask-mpi sys.exit
# is called at the end of the event loop without returning to us.
# After this point only a single process, MPI rank 1,
# should continue to exeute code. The others enter an event
# loop and return with is_client=False, which we return here
# to tell the caller that they should not run everything.
is_client = dask_mpi.initialize(comm=self.comm, exit=False)
if is_client:
# Connect this local process to remote workers.
self.dask_client = dask.distributed.Client()
# I don't yet know how to see this dashboard link at nersc
print(f"Started dask. Diagnostics at {self.dask_client.dashboard_link}")
return is_client
@staticmethod
def stop_dask():
"""
End the dask event loop
"""
from dask_mpi import send_close_signal
send_close_signal()
def split_tasks_by_rank(self, tasks):
"""Iterate through a list of items, yielding ones this process is responsible for/
Tasks are allocated in a round-robin way.
Parameters
----------
tasks: iterable
Tasks to split up
"""
for i, task in enumerate(tasks):
if i % self.size == self.rank:
yield task
def data_ranges_by_rank(self, n_rows, chunk_rows, parallel=True):
"""Split a number of rows by process.
Given a total number of rows to read and a chunk size, yield
the ranges within them that this process should handle.
Parameters
----------
n_rows: int
Total number of rows to split up
chunk_rows: int
Size of each chunk to be read.
Parallel: bool
Whether to split data by rank or just give all procs all data.
Default=True
"""
n_chunks = n_rows // chunk_rows
if n_chunks * chunk_rows < n_rows: #pragma: no cover
n_chunks += 1
if parallel:
it = self.split_tasks_by_rank(range(n_chunks))
else:
it = range(n_chunks)
for i in it:
start = i * chunk_rows
end = min((i + 1) * chunk_rows, n_rows)
yield start, end
##################################################
# Input and output-related methods and properties.
##################################################
def get_input(self, tag):
"""Return the path of an input file with the given tag"""
return self._inputs[tag]
def get_output(self, tag, final_name=False):
"""Return the path of an output file with the given tag
If final_name is False then use a temporary name - file will
be moved to its final name at the end
"""
path = self._outputs[tag]
# If not the final version, add a tag at the start of the filename
if not final_name:
p = pathlib.Path(path)
p = p.parent / (IN_PROGRESS_PREFIX + p.name)
path = str(p)
return path
def open_input(self, tag, wrapper=False, **kwargs):
"""
Find and open an input file with the given tag, in read-only mode.
For general files this will simply return a standard
python file object.
For specialized file types like FITS or HDF5 it will return
a more specific object - see the types.py file for more info.
"""
path = self.get_input(tag)
input_class = self.get_input_type(tag)
obj = input_class(path, "r", **kwargs)
if wrapper: #pragma: no cover
return obj
return obj.file
def open_output(self, tag, wrapper=False, final_name=False, **kwargs): #pragma: no cover
"""
Find and open an output file with the given tag, in write mode.
If final_name is True then they will be opened using their final
target output name. Otherwise we will prepend "inprogress_" to their
file name. This means we know that if the final file exists then it
is completed.
If wrapper is True this will return an instance of the class
of the file as specified in the cls.outputs. Otherwise it will
return an open file object (standard python one or something more
specialized).
Parameters
----------
tag: str
Tag as listed in self.outputs
wrapper: bool
Default=False. Whether to return a wrapped file
final_name: bool
Default=False. Whether to save to
**kwargs:
Extra args are passed on to the file's class constructor.
"""
path = self.get_output(tag, final_name=final_name)
output_class = self.get_output_type(tag)
# HDF files can be opened for parallel writing
# under MPI. This checks if:
# - we have been told to open in parallel
# - we are actually running under MPI
# and adds the flags required if all these are true
run_parallel = kwargs.pop("parallel", False) and self.is_mpi()
if run_parallel:
kwargs["driver"] = "mpio"
kwargs["comm"] = self.comm
# XXX: This is also not a dependency, but it should be.
# Or even better would be to make it a dependency of descformats where it
# is actually used.
import h5py
if not h5py.get_config().mpi:
print(
dedent(
"""\
Your h5py installation is not MPI-enabled.
Options include:
1) Set nprocess to 1 for all stages
2) Upgrade h5py to use mpi. See instructions here:
http://docs.h5py.org/en/latest/build.html#custom-installation
Note: If using conda, the most straightforward way is to enable it is
conda install -c spectraldns h5py-parallel
"""
)
)
raise RuntimeError("h5py module is not MPI-enabled.")
# Return an opened object representing the file
obj = output_class(path, "w", **kwargs)
if wrapper:
return obj
return obj.file
@classmethod
def inputs_(cls):
"""
Return the dict of inputs
"""
return cls.inputs #pylint: disable=no-member
@classmethod
def outputs_(cls):
"""
Return the dict of inputs
"""
return cls.outputs #pylint: disable=no-member
@classmethod
def output_tags(cls):
"""
Return the list of output tags required by this stage
"""
return [tag for tag, _ in cls.outputs_()]
@classmethod
def input_tags(cls):
"""
Return the list of input tags required by this stage
"""
return [tag for tag, _ in cls.inputs_()]
def get_input_type(self, tag):
"""Return the file type class of an input file with the given tag."""
for t, dt in self.inputs_():
if t == tag:
return dt
raise ValueError(f"Tag {tag} is not a known input") #pragma: no cover
def get_output_type(self, tag):
"""Return the file type class of an output file with the given tag."""
for t, dt in self.outputs_():
if t == tag:
return dt
raise ValueError(f"Tag {tag} is not a known output") #pragma: no cover
##################################################
# Configuration-related methods and properties.
##################################################
@property
def instance_name(self):
"""Return the name associated to this particular instance of this stage"""
return self._configs.get('name', self.name)
@property
def config(self):
"""
Returns the configuration dictionary for this stage, aggregating command
line options and optional configuration file.
"""
return self._configs
def read_config(self, args):
"""
This function looks for the arguments of the pipeline stage using a
combination of default values, command line options and separate
configuration file.
The order for resolving config options is first looking for a default
value, then looking for a
In case a mandatory argument (argument with no default) is missing,
an exception is raised.
Note that we recognize arguments with no default as the ones where
self.config_options holds a type instead of a value.
"""
# Try to load configuration file if provided
import yaml
config_file = self.get_input("config")
# This is all the config information in the file, including
# things for other stages
if config_file is not None:
with open(config_file) as _config_file:
overall_config = yaml.safe_load(_config_file)
else:
overall_config = {}
# The user can define global options that are inherited by
# all the other sections if not already specified there.
input_config = overall_config.get("global", {})
# This is just the config info in the file for this stage.
# It may be incomplete - there may be things specified on the
# command line instead, or just using their default values
stage_config = overall_config.get(self.instance_name, {})
input_config.update(stage_config)
self._configs.set_config(input_config, args)
def get_config_dict(self, ignore=None, reduce_config=False):
"""Write the current configuration to a dict
Parameters
----------
ignore : dict or None
Global parameters not to write
reduce_config : bool
If true, reduce the configuration by parsing out the inputs, outputs and global params
Returns
-------
out_dict : dict
The configuration
"""
out_dict = {}
if reduce_config:
ignore_keys = self.input_tags() + self.output_tags() + ['config']
else:
ignore_keys = []
ignore = ignore or {}
for key, val in self.config.items():
if reduce_config:
if key in ignore:
if ignore[key] == val:
continue
if key in ignore_keys:
continue
out_dict[key] = cast_to_streamable(val)
return out_dict
def find_inputs(self, pipeline_files):
"""Find and retrun all the inputs associated to this stage in the FileManager
These are returned as a dictionary of tag : path pairs
"""
ret_dict = {}
for tag, _ in self.inputs_():
aliased_tag = self.get_aliased_tag(tag)
ret_dict[aliased_tag] = pipeline_files[aliased_tag]
return ret_dict
def find_outputs(self, outdir):
"""Find and retrun all the outputs associated to this stage
These are returned as a dictionary of tag : path pairs
"""
ret_dict = {}
for tag, ftype in self.outputs_():
aliased_tag = self.get_aliased_tag(tag)
ret_dict[aliased_tag] = f"{outdir}/{ftype.make_name(aliased_tag)}"
return ret_dict
def print_io(self, stream=sys.stdout):
"""Print out the tags, paths and types for all the inputs and outputs of this stage"""
stream.write("Inputs--------\n")
for tag, ftype in self.inputs_():
aliased_tag = self.get_aliased_tag(tag)
stream.write(f"{tag:20} : {aliased_tag:20} :{str(ftype):20} : {self._inputs[tag]}\n")
stream.write("Outputs--------\n")
for tag, ftype in self.outputs_():
aliased_tag = self.get_aliased_tag(tag)
stream.write(f"{tag:20} : {aliased_tag:20} :{str(ftype):20} : {self._outputs[aliased_tag]}\n")
def should_skip(self, run_config):
"""Return true if we should skip a stage b/c it's outputs already exist and we are in resume mode"""
outputs = self.find_outputs(run_config["output_dir"]).values()
already_run_stage = all(os.path.exists(output) for output in outputs)
return already_run_stage and run_config["resume"]
def already_finished(self):
"""Print a warning that a stage is being skipped"""
print(f"Skipping stage {self.instance_name} because its outputs exist already")
def iterate_fits(self, tag, hdunum, cols, chunk_rows, parallel=True): #pragma: no cover
"""
Loop through chunks of the input data from a FITS file with the given tag
TODO: add ceci tests of this functions
Parameters
----------
tag: str
The tag from the inputs list to use
hdunum: int
The extension number to read
cols: list
The columns to read
chunk_rows: int
Number of columns to read and return at once
parallel: bool
Whether to split up data among processes (parallel=True) or give
all processes all data (parallel=False). Default = True.
Returns
-------
it: iterator
Iterator yielding (int, int, array) tuples of (start, end, data)
data is a structured array.
"""
fits = self.open_input(tag)
ext = fits[hdunum]
n = ext.get_nrows()
for start, end in self.data_ranges_by_rank(n, chunk_rows, parallel=parallel):
data = ext.read_columns(cols, rows=range(start, end))
yield start, end, data
def iterate_hdf(
self, tag, group_name, cols, chunk_rows, parallel=True, longest=False
):
"""
Loop through chunks of the input data from an HDF5 file with the given tag.
All the selected columns must have the same length.
Parameters
----------
tag: str
The tag from the inputs list to use
group: str
The group within the HDF5 file to use, looked up as
file[group]
cols: list
The columns to read
chunk_rows: int
Number of columns to read and return at once
parallel: bool
Whether to split up data among processes (parallel=True) or give
all processes all data (parallel=False). Default = True.
longest: bool
Whether to allow mixed length arrays and keep going until the longest
array is completed, returning empty arrays for shorter ones
Returns
-------
it: iterator
Iterator yielding (int, int, dict) tuples of (start, end, data)
"""
import numpy as np
hdf = self.open_input(tag)
group = hdf[group_name]
# Check all the columns are the same length
N = [len(group[col]) for col in cols]
n = max(N)
if not longest:
if not np.equal(N, n).all():
raise ValueError(
f"Different columns among {cols} in file {tag} group {group_name}"
"are different sizes - if this is acceptable set longest=True"
)
# Iterate through the data providing chunks
for start, end in self.data_ranges_by_rank(n, chunk_rows, parallel=parallel):
data = {col: group[col][start:end] for col in cols}
yield start, end, data
################################
# Pipeline-related methods
################################
@classmethod
def generate_command(cls, inputs, config, outputs, aliases=None):
"""
Generate a command line that will run the stage
"""
module = cls.get_module()
module = module.split(".")[0]
flags = [cls.name]
aliases = aliases or {}
for tag, _ in cls.inputs_():
aliased_tag = aliases.get(tag, tag)
try:
fpath = inputs[aliased_tag]
except KeyError as msg: #pragma: no cover
raise ValueError(f"Missing input location {aliased_tag} {str(inputs)}") from msg
flags.append(f"--{tag}={fpath}")
flags.append(f"--config={config}")
for tag, _ in cls.outputs_():
aliased_tag = aliases.get(tag, tag)
try:
fpath = outputs[aliased_tag]
except KeyError as msg: #pragma: no cover
raise ValueError(f"Missing output location {aliased_tag} {str(outputs)}") from msg
flags.append(f"--{tag}={fpath}")
flags = " ".join(flags)
# We just return this, instead of wrapping it in a
# parsl job
cmd = f"python3 -m {module} {flags}"
return cmd
@classmethod
def generate_cwl(cls, log_dir=None):
"""
Produces a CWL App object which can then be exported to yaml
"""
import cwlgen
module = cls.get_module()
module = module.split(".")[0]
# Basic definition of the tool
cwl_tool = cwlgen.CommandLineTool(
tool_id=cls.name,
label=cls.name,
base_command="python3",
cwl_version="v1.0",
doc=cls.__doc__,
)
if log_dir is not None:
cwl_tool.stdout = f"{cls.name}.out"
cwl_tool.stderr = f"{cls.name}.err"
# Adds the first input binding with the name of the module and pipeline stage
input_arg = cwlgen.CommandLineBinding(position=-1, value_from=f"-m{module}")
cwl_tool.arguments.append(input_arg)
input_arg = cwlgen.CommandLineBinding(position=0, value_from=f"{cls.name}")
cwl_tool.arguments.append(input_arg)
type_dict = {int: "int", float: "float", str: "string", bool: "boolean"}
# Adds the parameters of the tool
for opt, def_val in cls.config_options.items():
# Handles special case of lists:
if isinstance(def_val, list):
v = def_val[0]
param_type = {
"type": "array",
"items": type_dict[v] if isinstance(v, type) else type_dict[type(v)],
}
default = def_val if not isinstance(v, type) else None
input_binding = cwlgen.CommandLineBinding(
prefix=f"--{opt}=", item_separator=",", separate=False
)
else:
param_type = (
type_dict[def_val]
if isinstance(def_val, type)
else type_dict[type(def_val)]
)
default = def_val if not isinstance(def_val, type) else None
if param_type == "boolean":
input_binding = cwlgen.CommandLineBinding(prefix=f"--{opt}")
else:
input_binding = cwlgen.CommandLineBinding(
prefix=f"--{opt}=", separate=False
)
input_param = cwlgen.CommandInputParameter(
opt,
label=opt,
param_type=param_type,
input_binding=input_binding,
default=default,
doc="Some documentation about this parameter",
)
# We are bypassing the cwlgen builtin type check for the special case
# of arrays until that gets added to the standard
if isinstance(def_val, list):
input_param.type = param_type
cwl_tool.inputs.append(input_param)
# Add the inputs of the tool
for i, inp in enumerate(cls.input_tags()):
input_binding = cwlgen.CommandLineBinding(prefix=f"--{inp}")
input_param = cwlgen.CommandInputParameter(
inp,
label=inp,
param_type="File",
param_format=cls.inputs[i][1].format, #pylint: disable=no-member
input_binding=input_binding,
doc="Some documentation about the input",
)
cwl_tool.inputs.append(input_param)
# Adds the overall configuration file
input_binding = cwlgen.CommandLineBinding(prefix="--config")
input_param = cwlgen.CommandInputParameter(
"config",
label="config",
param_type="File",
param_format="http://edamontology.org/format_3750",
input_binding=input_binding,
doc="Configuration file",
)
cwl_tool.inputs.append(input_param)
# Add the definition of the outputs
for i, out in enumerate(cls.output_tags()):
output_name = cls.outputs[i][1].make_name(out) #pylint: disable=no-member
output_binding = cwlgen.CommandOutputBinding(glob=output_name)
output = cwlgen.CommandOutputParameter(
out,
label=out,
param_type="File",
output_binding=output_binding,
param_format=cls.outputs[i][1].format, #pylint: disable=no-member
doc="Some results produced by the pipeline element",
)
cwl_tool.outputs.append(output)
if log_dir is not None:
output = cwlgen.CommandOutputParameter(
f"{cls.name}@stdout",
label="stdout",
param_type="stdout",
doc="Pipeline elements standard output",
)
cwl_tool.outputs.append(output)
error = cwlgen.CommandOutputParameter(
f"{cls.name}@stderr",
label="stderr",
param_type="stderr",
doc="Pipeline elements standard output",
)
cwl_tool.outputs.append(error)
# Potentially add more metadata
# This requires a schema however...
# metadata = {'name': cls.name,
# 'about': 'Some additional info',
# 'publication': [{'id': 'one_doi'}, {'id': 'another_doi'}],
# 'license': ['MIT']}
# cwl_tool.metadata = cwlgen.Metadata(**metadata)
return cwl_tool
| 35.912598 | 118 | 0.568967 | 45,206 | 0.991164 | 4,335 | 0.095047 | 16,911 | 0.370782 | 0 | 0 | 22,892 | 0.501918 |
eff725f55234c2a2a095069fa9d26ab47ed278d3 | 8,400 | py | Python | TrinaPointAndClick/src/TrinaPointAndClick/scripts/Marker_List_Node.py | mjclements/TRINA-WPI-2.0 | aa060819522ed9010d20e9db0cf45b19f6b083af | [
"MIT"
]
| null | null | null | TrinaPointAndClick/src/TrinaPointAndClick/scripts/Marker_List_Node.py | mjclements/TRINA-WPI-2.0 | aa060819522ed9010d20e9db0cf45b19f6b083af | [
"MIT"
]
| null | null | null | TrinaPointAndClick/src/TrinaPointAndClick/scripts/Marker_List_Node.py | mjclements/TRINA-WPI-2.0 | aa060819522ed9010d20e9db0cf45b19f6b083af | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
import rospy
import numpy as np
import math
from geometry_msgs.msg import PoseStamped, Transform
from TrinaPointAndClick.msg import Marker, MarkerArray
class Marker_Node():
"""
This node listens to marker tracker data and keeps track of the pose of all markers seen during run-time. This node also computes the transform from the robot frame to the marker.
"""
def __init__(self):
"""
Initialise the list of markers and the subscribers and publishers.
Parameters:
None
Returns:
None
"""
rospy.loginfo("Marker_node started.")
#initialize list of markers
self.marker_list = []
#set up publishers and subscribers
self.sub_marker_tracker = rospy.Subscriber('/MarkerPose', PoseStamped, self.callback_tracker)
self.pub_marker_list = rospy.Publisher('/MarkerArray', MarkerArray, latch=True, queue_size=1)
#create a ROS Timer for publishing data
rospy.Timer(rospy.Duration(1.0/60.0), self.callback_publish_list)
#keep the node alive while it is waiting for data
rospy.loginfo("rospy.spin()")
rospy.spin()
def callback_tracker(self, data):
"""
Takes a pose from the camera to a marker and adds or updates its values in the list.
Parameters:
data - containing a stamped pose from the camera to a marker as well as the marker id in the header
Returns:
None
"""
#convert incoming stamped pose data to Marker message
initial_data = self.convert_transform(data)
#check if marker is not in the known marker set
if initial_data.id_number <= 8 or initial_data.id_number == 999:
#loops through the list to see if the marker id provided is in the list
for i in xrange(len(self.marker_list)):
#compare provided number with the i'th marker's ID number
if self.marker_list[i].id_number == initial_data.id_number:
self.marker_list[i].workspace = initial_data.workspace
self.marker_list[i].visible = initial_data.visible
self.marker_list[i].time = initial_data.time
self.marker_list[i].transform = initial_data.transform
self.marker_list[i].distance = initial_data.distance
#return if the marker is found in the list
return None
#If marker is not found in the list, add marker to list
self.marker_list.append(initial_data)
#sorts the list by the marker id
self.marker_list=sorted(self.marker_list, key=lambda marker: marker.id_number, reverse=True)
else:
return None
def calculate_transform(self, data):
"""
Takes a marker pose ROS message and returns a robot-base-frame-to-marker transform ROS message.
Parameters:
data - pose ROS message of marker relative to camera
Returns:
Transform ROS message of robot base frame to marker
"""
#calculate the transform
in_x = data.position.x
in_y = data.position.y
in_z = data.position.z
input_translation = [in_x, in_y, in_z]
multiplier = np.array([[ -0.02025737, -0.31392, 0.04627322],
[-0.38235706, 0.04113464, 0.03979437],
[-0.03673691, -0.27182984, -0.36413172 ]], dtype=np.float)
offset = np.array([0.45368236, -0.14424458, 0.8933589], dtype=np.float)
output_translation = np.matmul(multiplier, input_translation)+ offset
#build the transform
output_transform = Transform()
output_transform.translation.x = output_translation[0]
output_transform.translation.y = output_translation[1]
output_transform.translation.z = output_translation[2]
#TODO: Check that the rotation transform is correct.
output_transform.rotation = data.orientation
return output_transform
def calculate_distance(self, data):
"""
Takes a transform and calculates the Euclidean distance to the marker from the robot base frame.
Parameters:
data - Transform ROS message of robot base frame to marker
Returns:
Euclidean distance to the marker from the robot base frame
"""
#read in the data
trans_x = data.translation.x
trans_y = data.translation.y
trans_z = data.translation.z
#calculate the Euclidean distance
distance = math.sqrt((0-trans_x)**2+(0-trans_y)**2+(0-trans_z)**2)
return distance
def convert_transform(self, data):
"""
Takes a header and transform ROS message and converts it to a Marker message. The transform from the camera to the marker is modified to be the transform from the robot base frame to the marker.
Parameters:
data - Transform ROS message with header containing marker id
Returns:
Marker ROS message
"""
#build the Marker message
marker_message = Marker()
marker_message.header.stamp = data.header.stamp
marker_message.header.frame_id = "camera"
marker_message.child_frame_id = "marker " + data.header.frame_id
marker_message.id_number = int(data.header.frame_id)
marker_message.workspace = self.classify_list(marker_message.id_number)
marker_message.visible = True
marker_message.time = rospy.get_time()
marker_message.transform = self.calculate_transform(data.pose)
marker_message.distance = self.calculate_distance(marker_message.transform)
return marker_message
def check_visible(self, duration):
"""
Checks the list to see what markers should be marked as having not been visible for at given duration.
Paramerters:
duration - maximum time that a marker can not be detected before it is marked as not visible
Returns:
None
"""
#current time
now = rospy.get_time()
#loop through list to find markers that are not visible
for i in xrange(len(self.marker_list)):
#last seen time
last_seen = self.marker_list[i].time
#check to see difference between time now and last time the marker was seen
if (now - last_seen > duration):
self.marker_list[i].visible = False
def classify_list(self, marker_id):
"""
Labels the provided marker if it is in the list of marker id numbers that are known to be the workspace markers.
Parameters:
marker_id - a Marker id number
Returns:
Marker ROS message
"""
#hard coded list of workspace marker id numbers:
list_of_marker_id = [0,1,2]
#loops through list of workspace marker id numbers to check if the given number is in the list
for i in xrange(len(list_of_marker_id)):
if marker_id == list_of_marker_id[i]:
return True
return False
def callback_publish_list(self, timer):
"""
Publishes the list at a fixed rate (60Hz), which is the frame rate of the camera.
Parameters:
None
Returns:
None
"""
#check list for markers that are no longer visible
self.check_visible(0.5)
#build the message to be published
pub_list = MarkerArray()
pub_list.header.stamp = rospy.Time.now()
pub_list.header.frame_id = "base"
pub_list.markers = self.marker_list
#publish the marker list
self.pub_marker_list.publish(pub_list)
if __name__ == '__main__':
"""
Initializes node and names it
Parameters:
None
Returns:
None
"""
print "Initializing Marker_Node..."
rospy.init_node('Marker_Node')
try:
Marker_Node = Marker_Node()
except rospy.ROSInterruptException:
rospy.logerror("Failed to start server node.")
pass
| 38.181818 | 202 | 0.616548 | 7,822 | 0.93119 | 0 | 0 | 0 | 0 | 0 | 0 | 3,671 | 0.437024 |
eff99e10986bd9b8e0f53017db77d82913562ddf | 1,102 | py | Python | topology.py | Patatone/ryu-static-load-balancing | 7f3508ff8b135736150ad5c38b544d6e6ba90509 | [
"Apache-2.0"
]
| null | null | null | topology.py | Patatone/ryu-static-load-balancing | 7f3508ff8b135736150ad5c38b544d6e6ba90509 | [
"Apache-2.0"
]
| null | null | null | topology.py | Patatone/ryu-static-load-balancing | 7f3508ff8b135736150ad5c38b544d6e6ba90509 | [
"Apache-2.0"
]
| null | null | null | from mininet.topo import Topo
from mininet.link import TCLink
class Topology(Topo):
def build(self):
# Hosts and switches
host1 = self.addHost('H1')
host2 = self.addHost('H2')
host3 = self.addHost('H3')
host4 = self.addHost('H4')
host5 = self.addHost('H5')
server1 = self.addHost('SRV1', ip='10.0.1.1/8', mac="00:00:00:00:01:01")
server2 = self.addHost('SRV2', ip='10.0.1.2/8', mac="00:00:00:00:01:02")
switch1 = self.addSwitch('SW1')
# Links
self.addLink(server1, switch1, port2=1, cls=TCLink, bw=1000, delay='1ms')
self.addLink(server2, switch1, port2=2, cls=TCLink, bw=1000, delay='1ms')
self.addLink(host1, switch1, cls=TCLink, bw=1000, delay='5ms')
self.addLink(host2, switch1, cls=TCLink, bw=1000, delay='5ms')
self.addLink(host3, switch1, cls=TCLink, bw=1000, delay='5ms')
self.addLink(host4, switch1, cls=TCLink, bw=1000, delay='5ms')
self.addLink(host5, switch1, cls=TCLink, bw=1000, delay='5ms')
topos = { 'topology': ( lambda: Topology() ) }
| 39.357143 | 81 | 0.607985 | 989 | 0.897459 | 0 | 0 | 0 | 0 | 0 | 0 | 171 | 0.155172 |
eff9cec3835ce08f6cdd64396a53993ba845ce23 | 5,155 | py | Python | JFJB.py | stevevai/JFJB-crawler | 182c8930e5e979ea9176452764e9494a17574b1f | [
"Apache-2.0"
]
| 1 | 2019-04-14T16:28:28.000Z | 2019-04-14T16:28:28.000Z | JFJB.py | stevevai/JFJB-crawler | 182c8930e5e979ea9176452764e9494a17574b1f | [
"Apache-2.0"
]
| null | null | null | JFJB.py | stevevai/JFJB-crawler | 182c8930e5e979ea9176452764e9494a17574b1f | [
"Apache-2.0"
]
| null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 12 23:00:28 2018
@author: wangshuai
"""
import urllib
import urllib.request as urllib2
import http.cookiejar as cookielib
import io
import re
import gzip
from selenium import webdriver
import datetime
def get_Time():
begin = datetime.date(2016,1,1)
end = datetime.date(2018,4,23)
time_list = []
for i in range((end - begin).days+1):
day = begin + datetime.timedelta(days=i)
time_list.append(day.strftime("%Y-%m/%d"))
return time_list
class Config:
def __init__(self):
self.config = {}
self.config["headers"] = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36"
self.config["outputPath"] = "./"
self.config["keywords"] = ["习近平","习主席","中央军委主席","中共中央总书记","国家主席"]
self.config["base_url"] = "http://www.81.cn/jfjbmap/content/"
def get(self, key, parent=None):
if key and key in self.config.keys():
return self.config[key]
def get_Html(url, js = False, time = 0):
config = Config()
if js:
try:
driver = webdriver.PhantomJS()
driver.get(url)
except Exception as err:
print (err)
print ("=== 网络不稳定,再次连接 ...")
if time==0:
return -1
time -= 1
return get_Html(url, js=True, time=time)
html = driver.page_source
driver.close()
return html
else:
try:
cj = cookielib.CookieJar()
proxy = urllib2.ProxyHandler({'https': '127.0.0.1:1080'})
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
opener.addheaders = [("User-agent", config.get("headers"))]
urllib2.install_opener(opener)
req=urllib2.Request(url)
con=urllib2.urlopen(req)
html=con.read()
if con.getheader('Content-Encoding') == "gzip":
buf = io.BytesIO(html)
gf = gzip.GzipFile(fileobj=buf)
html = gf.read()
html = html.decode('utf-8')
except Exception as err:
print (err)
print ("=== 网络不稳定,再次连接 ...")
if time==0:
return -1
time -= 1
return get_Html(url, js=False, time=time)
return html
def save(info, handler):
for i in range(len(info["time"])):
for ss in ["time","title"]:
txt = info[ss][i].strip(" ")
if ss=="time":
txt+="->"
handler.write(txt)
handler.write("\r\n")
class GetArticle:
def __init__(self, config, handler = None):
self.config = config
self.url = self.config.get("base_url")
self.handler = handler
self.article={}
self.article["url"] = []
self.article["title"] = []
self.article["detail"] = []
self.article["time"] = []
def index_detail(self):
pattern_index = re.compile('<li><a href="(.*?)">(.*?)</a></li>')
pattern_detail = re.compile('<P>(.*?)</P>')
time_list = get_Time()
# ifile = open("detail_info.txt","w",encoding='utf-8')
for i in range(len(time_list)):
url_loop = self.url+time_list[i]+"/node_2.htm"
try:
index = pattern_index.findall(get_Html(url_loop,js=False,time=3))
url = urllib.parse.urljoin(url_loop,index[0][0])
title = index[0][1]
# detail_list = pattern_detail.findall(get_Html(url,js=False,time=3))
# detail = ""
# for j in range(len(detail_list)):
# detail += detail_list[j]
key_flag = 0
for key in self.config.get("keywords"):
if key in title:
key_flag = 1
if key_flag:
self.article["time"].append(time_list[i])
self.article["title"].append(title)
self.article["url"].append(url)
# self.article["detail"].append(detail)
# ifile.write(time_list[i]+": "+title+"\r\n"+url+"\r\n"+detail+"\r\n")
if i%30 == 0:
print(str(i)+"->"+time_list[i]+": "+title)
print(url)
else:
continue
except Exception as err:
print(err)
print("...网址: "+url_loop+" 获取|解析 错误...")
continue
# ifile.close()
save(self.article, self.handler)
if __name__ == '__main__':
config = Config()
ifile = open(config.get("outputPath")+"rough_info.txt","w",encoding='utf-8')
getArticle = GetArticle(config, handler = ifile)
getArticle.index_detail()
ifile.close()
| 31.625767 | 156 | 0.49098 | 2,610 | 0.496292 | 0 | 0 | 0 | 0 | 0 | 0 | 1,231 | 0.234075 |
effa145f7d27636ce9979e5fe2ebebe04e1345c3 | 392 | py | Python | tensorprob/samplers/base.py | ibab/tensorfit | 53bbb324520f34335a272dc057c3ae6e9d2c575e | [
"MIT"
]
| 95 | 2016-02-29T08:25:07.000Z | 2021-06-02T15:33:01.000Z | tensorprob/samplers/base.py | ibab/tensorprob | 79efa5678f984a2bb92573fb25c17b9475baef23 | [
"MIT"
]
| 48 | 2016-02-19T00:56:05.000Z | 2016-02-28T23:12:12.000Z | tensorprob/samplers/base.py | ibab/tensorfit | 53bbb324520f34335a272dc057c3ae6e9d2c575e | [
"MIT"
]
| 19 | 2016-02-29T00:14:34.000Z | 2020-06-18T06:07:39.000Z | import tensorflow as tf
class BaseSampler(object):
def __init__(self, session=None):
self._session = session or tf.get_default_session()
def sample(self, variables, cost, gradient=None):
raise NotImplementedError
@property
def session(self):
return self._session
@session.setter
def session(self, session):
self._session = session
| 21.777778 | 59 | 0.67602 | 366 | 0.933673 | 0 | 0 | 140 | 0.357143 | 0 | 0 | 0 | 0 |
effaf46adea62c6d7c4589ee3471fc9f1f1bc8dc | 3,327 | py | Python | scripts/ocgis_subset.py | Zeitsperre/flyingpigeon | 678370bf428af7ffe11ee79be3b8a89c73215e5e | [
"Apache-2.0"
]
| 1 | 2016-12-04T18:01:49.000Z | 2016-12-04T18:01:49.000Z | scripts/ocgis_subset.py | Zeitsperre/flyingpigeon | 678370bf428af7ffe11ee79be3b8a89c73215e5e | [
"Apache-2.0"
]
| 13 | 2017-03-16T15:44:21.000Z | 2019-08-19T16:56:04.000Z | scripts/ocgis_subset.py | Zeitsperre/flyingpigeon | 678370bf428af7ffe11ee79be3b8a89c73215e5e | [
"Apache-2.0"
]
| null | null | null | from os import path, listdir
import ocgis
from flyingpigeon import subset
from flyingpigeon import utils
from flyingpigeon.ocgis_module import call
def get_prediction(gam_model, ncs_indices): # mask=None
"""
predict the probabillity based on the gam_model and the given climate index datasets
:param gam_model: fitted gam (output from sdm.get_gam)
:pram nsc_indices: list of netCDF files containing climate indices of one dataset
:param mask: 2D array of True/False to exclude areas (e.g ocean) for prediction
:return array: 3D array with prediction values
"""
from netCDF4 import Dataset
from os.path import basename
from numpy import squeeze, ravel, array, reshape # , zeros, broadcast_arrays, nan
from flyingpigeon.utils import get_variable
from rpy2.robjects.packages import importr
import rpy2.robjects as ro
import rpy2.robjects.numpy2ri
rpy2.robjects.numpy2ri.activate()
mgcv = importr("mgcv")
stats = importr("stats")
ncs_indices.sort()
data = {}
for i, nc in enumerate(ncs_indices):
var = get_variable(nc)
agg = basename(nc).split('_')[-2]
ds = Dataset(nc)
vals = squeeze(ds.variables[var])
if i == 0:
dims = vals.shape
# if mask != None:
# mask = broadcast_arrays(vals, mask)[1]
# vals[mask==False] = nan
indice = '%s_%s' % (var, agg)
data[str(indice)] = ro.FloatVector(ravel(vals))
dataf = ro.DataFrame(data)
predict_gam = mgcv.predict_gam(gam_model, newdata=dataf,
type="response", progress="text",
newdata_guaranteed=True, na_action=stats.na_pass)
prediction = array(predict_gam).reshape(dims)
return prediction
p = "/home/nils/data/AFR-44/tas/"
ncs = [path.join(p, nc) for nc in listdir(p)]
ncd = utils.sort_by_filename(ncs)
geom = subset.get_geom('CMR')
ugid = subset.get_ugid('CMR', geom=geom)
# from ocgis import RequestDataset, OcgOperations
keys = ncd.keys()
print len(keys)
ocgis.env.OVERWRITE = True
dmap = ocgis.DimensionMap()
dmap.set_variable('x', 'lon', dimension='rlon')
dmap.set_variable('y', 'lat', dimension='rlat')
dmap.set_variable('time', 'time', dimension='time')
#
# print dmap
# rd = ocgis.RequestDataset(ncd[keys[0]][0], crs=ocgis.crs.Spherical(), )
# geos = ocgis.OcgOperations(rd, geom=geom, select_ugid=ugid, output_format='nc', prefix='one_file').execute()
# geos
for key in ncd.keys():
# rd = ocgis.RequestDataset(ncd[key], crs=ocgis.crs.Spherical(), dimension_map=dmap)
# geos = ocgis.OcgOperations(rd,
# geom=geom, select_ugid=ugid,
# output_format='nc',
# prefix=key,
# add_auxiliary_files=False).execute()
geos = call(ncd[key], geom=geom, select_ugid=ugid, output_format='nc', prefix=key,
variable='tas', crs=ocgis.crs.Spherical(), dimension_map=dmap)
print geos
#
# rd = RequestDataset(ncd[keys[0]][0])
# geos = OcgOperations(rd, geom=geom, select_ugid=ugid, output_format='nc').execute()
#
# ncd[keys[0]]
#
# rd = RequestDataset(ncd[keys[0]])
#
# geos = OcgOperations(rd, geom=geom, select_ugid=ugid, output_format='nc').execute()
| 31.386792 | 110 | 0.644725 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,479 | 0.444545 |
effb3d0f203ab8c4e4ea27554b71aa4fcc456877 | 746 | py | Python | jacc/migrations/0019_entrytype_identifier.py | bachvtuan/django-jacc | 37cdd54d8602d25e43a433bd66ccbed61f45a112 | [
"MIT"
]
| 10 | 2019-02-25T23:30:33.000Z | 2021-05-02T18:02:48.000Z | jacc/migrations/0019_entrytype_identifier.py | bachvtuan/django-jacc | 37cdd54d8602d25e43a433bd66ccbed61f45a112 | [
"MIT"
]
| null | null | null | jacc/migrations/0019_entrytype_identifier.py | bachvtuan/django-jacc | 37cdd54d8602d25e43a433bd66ccbed61f45a112 | [
"MIT"
]
| 4 | 2019-09-09T09:33:55.000Z | 2022-01-01T09:28:13.000Z | # Generated by Django 2.1.2 on 2018-10-18 15:36
from django.db import migrations, models
from django.db.models import F
def migr_code_to_identifier_0019_entrytype_identifier(apps, schema):
EntryType = apps.get_model("jacc", "EntryType")
EntryType.objects.all().update(identifier=F("code"))
class Migration(migrations.Migration):
dependencies = [
("jacc", "0018_auto_20181008_2322"),
]
operations = [
migrations.AddField(
model_name="entrytype",
name="identifier",
field=models.CharField(blank=True, db_index=True, default="", max_length=40, verbose_name="identifier"),
),
migrations.RunPython(migr_code_to_identifier_0019_entrytype_identifier),
]
| 28.692308 | 116 | 0.687668 | 442 | 0.592493 | 0 | 0 | 0 | 0 | 0 | 0 | 138 | 0.184987 |
effc0cb6fddb743089c7bdb462500e13e334b104 | 342 | py | Python | tests/test_invalid_login.py | joshmgrant/Python-Pytest-Nerodia | 55e8d92cd21e3093e6eb434e4ab7b126c974c6f0 | [
"MIT"
]
| 1 | 2019-03-19T08:29:02.000Z | 2019-03-19T08:29:02.000Z | tests/test_invalid_login.py | joshmgrant/Python-Pytest-Nerodia | 55e8d92cd21e3093e6eb434e4ab7b126c974c6f0 | [
"MIT"
]
| null | null | null | tests/test_invalid_login.py | joshmgrant/Python-Pytest-Nerodia | 55e8d92cd21e3093e6eb434e4ab7b126c974c6f0 | [
"MIT"
]
| null | null | null | import pytest
def test_locked_out_user(browser):
browser.goto('http://www.saucedemo.com')
browser.text_field(data_test='username').value = 'locked_out_user'
browser.text_field(data_test='password').value ='secret_sauce'
browser.button(type='submit').click()
assert browser.button(class_name='error-button').exists
| 28.5 | 70 | 0.733918 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 99 | 0.289474 |
effc7b61a293ddc828780cd36ebddcbb6d17256b | 403 | py | Python | splicemachine/mlflow_support/flavors/mlflow_onnx.py | myles-novick/pysplice | 96a848d4adda0a937002798865d32939f059f4d1 | [
"Apache-2.0"
]
| null | null | null | splicemachine/mlflow_support/flavors/mlflow_onnx.py | myles-novick/pysplice | 96a848d4adda0a937002798865d32939f059f4d1 | [
"Apache-2.0"
]
| null | null | null | splicemachine/mlflow_support/flavors/mlflow_onnx.py | myles-novick/pysplice | 96a848d4adda0a937002798865d32939f059f4d1 | [
"Apache-2.0"
]
| null | null | null | from splicemachine.mlflow_support import *
from splicemachine.mlflow_support.mlflow_support import _GORILLA_SETTINGS
import gorilla
import mlflow.onnx
def _log_model(model, name='onnx_model', **flavor_options):
mlflow.log_model(model, name=name, model_lib='onnx', **flavor_options)
gorilla.apply(gorilla.Patch(mlflow.onnx, _log_model.__name__.lstrip('_'), _log_model, settings=_GORILLA_SETTINGS))
| 40.3 | 114 | 0.816377 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 21 | 0.052109 |
effc868ba3985263b54f27c9ba1dafa032b3a960 | 351 | py | Python | services/shortto.py | joshthecoder/shorty-python | 35687d010683944d75e3f0dce7799903296172c5 | [
"MIT"
]
| 11 | 2015-05-29T04:58:28.000Z | 2020-05-31T17:07:52.000Z | services/shortto.py | joshthecoder/shorty-python | 35687d010683944d75e3f0dce7799903296172c5 | [
"MIT"
]
| null | null | null | services/shortto.py | joshthecoder/shorty-python | 35687d010683944d75e3f0dce7799903296172c5 | [
"MIT"
]
| 2 | 2015-03-10T06:22:31.000Z | 2018-06-18T18:20:59.000Z | ## Shorty
## Copyright 2009 Joshua Roesslein
## See LICENSE
## @url short.to
class Shortto(Service):
def shrink(self, bigurl):
resp = request('http://short.to/s.txt', {'url': bigurl})
return resp.read()
def expand(self, tinyurl):
resp = request('http://long.to/do.txt', {'url': tinyurl})
return resp.read()
| 21.9375 | 65 | 0.60114 | 271 | 0.77208 | 0 | 0 | 0 | 0 | 0 | 0 | 129 | 0.367521 |
effded4514a6e107993718820a8e681baef231bd | 4,743 | py | Python | spinup/examples/pg_math/1_simple_pg.py | MengTianjian/spinningup-pytorch | 6b9b87ed7a8140a52f3c86cc88f61428a9fd1176 | [
"MIT"
]
| 1 | 2019-04-23T04:32:35.000Z | 2019-04-23T04:32:35.000Z | spinup/examples/pg_math/1_simple_pg.py | MengTianjian/spinningup-pytorch | 6b9b87ed7a8140a52f3c86cc88f61428a9fd1176 | [
"MIT"
]
| null | null | null | spinup/examples/pg_math/1_simple_pg.py | MengTianjian/spinningup-pytorch | 6b9b87ed7a8140a52f3c86cc88f61428a9fd1176 | [
"MIT"
]
| null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical
import numpy as np
import gym
from gym.spaces import Discrete, Box
class MLP(nn.Module):
def __init__(self, obs_dim, sizes, activation=nn.Tanh, output_activation=None):
super(MLP, self).__init__()
sizes = [obs_dim] + sizes
layers = nn.ModuleList()
for i in range(len(sizes)-2):
layers.append(nn.Linear(sizes[i], sizes[i+1]))
if activation is not None:
layers.append(activation())
layers.append(nn.Linear(sizes[-2], sizes[-1]))
if output_activation is not None:
layers.append(output_activation())
self.mlp = nn.Sequential(*layers)
def forward(self, x):
out = self.mlp(x)
return out
def train(env_name='CartPole-v0', hidden_sizes=[32], lr=1e-2,
epochs=50, batch_size=5000, render=False):
# make environment, check spaces, get obs / act dims
env = gym.make(env_name)
assert isinstance(env.observation_space, Box), \
"This example only works for envs with continuous state spaces."
assert isinstance(env.action_space, Discrete), \
"This example only works for envs with discrete action spaces."
obs_dim = env.observation_space.shape[0]
n_acts = env.action_space.n
# make core of policy network
policy_network = MLP(obs_dim, sizes=hidden_sizes+[n_acts])
# make train optimizer
optimizer = torch.optim.Adam(policy_network.parameters(), lr=lr)
# for training policy
def train_one_epoch():
# make some empty lists for logging.
batch_obs = [] # for observations
batch_log_probs = [] # for log probabilities
batch_acts = [] # for actions
batch_weights = [] # for R(tau) weighting in policy gradient
batch_rets = [] # for measuring episode returns
batch_lens = [] # for measuring episode lengths
# reset episode-specific variables
obs = env.reset() # first obs comes from starting distribution
done = False # signal from environment that episode is over
ep_rews = [] # list for rewards accrued throughout ep
# render first episode of each epoch
finished_rendering_this_epoch = False
# collect experience by acting in the environment with current policy
while True:
# rendering
if (not finished_rendering_this_epoch) and render:
env.render()
# save obs
batch_obs.append(obs.copy())
# act in the environment
logits = policy_network(torch.tensor(obs).view(1,-1).float())
m = Categorical(logits=logits)
act = m.sample()
batch_log_probs.append(m.log_prob(act))
obs, rew, done, _ = env.step(act.item())
# save action, reward
batch_acts.append(act)
ep_rews.append(rew)
if done:
# if episode is over, record info about episode
ep_ret, ep_len = sum(ep_rews), len(ep_rews)
batch_rets.append(ep_ret)
batch_lens.append(ep_len)
# the weight for each logprob(a|s) is R(tau)
batch_weights += [ep_ret] * ep_len
# reset episode-specific variables
obs, done, ep_rews = env.reset(), False, []
# won't render again this epoch
finished_rendering_this_epoch = True
# end experience loop if we have enough of it
if len(batch_obs) > batch_size:
break
# take a single policy gradient update step
optimizer.zero_grad()
batch_loss = torch.cat(batch_log_probs).mul(torch.tensor(batch_weights))
loss = -batch_loss.mean()
loss.backward()
optimizer.step()
return loss.detach(), batch_rets, batch_lens
# training loop
for i in range(epochs):
batch_loss, batch_rets, batch_lens = train_one_epoch()
print('epoch: %3d \t loss: %.3f \t return: %.3f \t ep_len: %.3f'%
(i, batch_loss, np.mean(batch_rets), np.mean(batch_lens)))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--env_name', '--env', type=str, default='CartPole-v0')
parser.add_argument('--render', action='store_true')
parser.add_argument('--lr', type=float, default=1e-2)
args = parser.parse_args()
print('\nUsing simplest formulation of policy gradient.\n')
train(env_name=args.env_name, render=args.render, lr=args.lr)
| 37.346457 | 83 | 0.609741 | 646 | 0.136201 | 0 | 0 | 0 | 0 | 0 | 0 | 1,231 | 0.25954 |
560191c793a93a302d95f1bc0f3bed7552833bd0 | 2,334 | py | Python | examples/protobuf/protobuftools.py | sunjinopensource/asynmsg | 9c1d14f859cc6702446c3bb30b9916280429bd1d | [
"MIT"
]
| 3 | 2015-05-10T16:10:35.000Z | 2019-02-08T12:22:27.000Z | examples/protobuf/protobuftools.py | sunjinopensource/asynmsg | 9c1d14f859cc6702446c3bb30b9916280429bd1d | [
"MIT"
]
| null | null | null | examples/protobuf/protobuftools.py | sunjinopensource/asynmsg | 9c1d14f859cc6702446c3bb30b9916280429bd1d | [
"MIT"
]
| null | null | null | import asynmsg
import struct
import google.protobuf.message
def protobuf_handler_config(msg_id, msg_cls=None):
def wrapper(func):
@asynmsg.message_handler_config(msg_id)
def wrapper2(self, msg_id, msg_data):
if msg_cls is None:
proto_data = msg_data
else:
proto_data = msg_cls()
if issubclass(msg_cls, google.protobuf.message.Message):
proto_data.ParseFromString(msg_data)
if self.has_netlog:
self.log_info('%s[RECV %04d] %s=%s' % (self.get_low_level_desc(), msg_id,
'' if proto_data is None else proto_data.__class__.__name__,
'' if proto_data is None else str(proto_data)))
return func(self, msg_id, proto_data)
return wrapper2
return wrapper
class MessagePacker(asynmsg.MessagePacker):
def __init__(self):
super(MessagePacker, self).__init__()
def pack(self, msg_id, msg_data):
bytes = struct.pack('H', msg_id)
if msg_data is not None:
bytes += msg_data
return bytes
def unpack(self, bytes_):
msg_id = struct.unpack_from('H', bytes_[:struct.calcsize('H')])[0]
return (msg_id, bytes(bytes_[struct.calcsize('H'):]))
def _send_message(cls, self, msg_id, msg_data):
if self.has_netlog:
self.log_info('%s[SEND %04d] %s=%s' % (self.get_low_level_desc(), msg_id,
'' if msg_data is None else msg_data.__class__.__name__,
'' if msg_data is None else str(msg_data)))
if msg_data is not None:
if isinstance(msg_data, google.protobuf.message.Message):
msg_data = msg_data.SerializeToString()
return super(cls, self).send_message(msg_id, msg_data)
class SessionS(asynmsg.SessionS):
message_packer = MessagePacker()
has_netlog = True
def send_message(self, msg_id, msg_data):
return _send_message(SessionS, self, msg_id, msg_data)
class SessionC(asynmsg.SessionC):
message_packer = MessagePacker()
has_netlog = True
def send_message(self, msg_id, msg_data):
return _send_message(SessionC, self, msg_id, msg_data)
| 33.826087 | 115 | 0.602828 | 849 | 0.363753 | 0 | 0 | 735 | 0.31491 | 0 | 0 | 62 | 0.026564 |
56027f5cae2f8100bbcabdb3f59b412acf2181e4 | 6,402 | py | Python | client/python/thegame/entity.py | afq984/thegame | 3769fffa281b7d5e8d1336d57e73c8e8d4d2289a | [
"MIT"
]
| 3 | 2017-08-18T00:32:54.000Z | 2017-11-18T02:25:51.000Z | client/python/thegame/entity.py | afq984/thegame | 3769fffa281b7d5e8d1336d57e73c8e8d4d2289a | [
"MIT"
]
| 3 | 2017-08-15T09:59:25.000Z | 2018-08-22T17:28:13.000Z | client/python/thegame/entity.py | afq984/thegame | 3769fffa281b7d5e8d1336d57e73c8e8d4d2289a | [
"MIT"
]
| 1 | 2018-08-07T12:38:48.000Z | 2018-08-07T12:38:48.000Z | import collections
from thegame.abilities import Ability
Vector = collections.namedtuple('Vector', ('x', 'y'))
Vector.__doc__ = '''
A 2D vector.
Used to represent a point and velocity in thegame
'''
class _EntityAttribute:
def __init__(self, doc=None):
self.__doc__ = doc
def __set_name__(self, klass, name):
self.name = name
def __get__(self, instance, klass=None):
if instance is None:
return self
return getattr(instance.data.entity, self.name)
def __set__(self, obj, value):
raise AttributeError(f'read-only attribute {self.name!r}')
class _DataAttribute:
def __init__(self, doc=None):
self.__doc__ = doc
def __set_name__(self, klass, name):
self.name = name
def __get__(self, instance, klass=None):
if instance is None:
return self
return getattr(instance.data, self.name)
def __set__(self, obj, value):
raise AttributeError(f'read-only attribute {self.name!r}')
class Entity:
def __init__(self, data):
self.data = data
def __repr__(self):
return (
f'<{self.__class__.__name__}#{self.id} '
f'BD={self.body_damage} '
f'HP={self.health}/{self.max_health} '
f'@({self.position.x:.0f},{self.position.y:.0f})>'
)
id = _EntityAttribute('The id of the entity')
@property
def position(self):
'''
The position of the entity in a 2-tuple (x, y).
'''
p = self.data.entity.position
return Vector(p.x, p.y)
@property
def velocity(self):
'''
The velocity of the entity in a 2-tuple (x, y).
'''
v = self.data.entity.velocity
return Vector(v.x, v.y)
radius = _EntityAttribute('The radius of the entity')
health = _EntityAttribute(
'''
The health of the entity in a non-negative integer.
When a entity's health is less than or equal to zero it dies.
And the one dealing the killing blow is rewarded with
``rewarding_experience``.
'''
)
body_damage = _EntityAttribute(
'''
The body damage of the entity.
When two entities collide, they reduce each other's health
with their body damage.
'''
)
rewarding_experience = _EntityAttribute(
'''
How much experience you will get if you kill this entity.
'''
)
max_health = _EntityAttribute(
'''
The maximum health of this entity.
'''
)
class Polygon(Entity):
'''
The netural polygons.
'''
@property
def edges(self):
'''
How many edges does the polygon have
'''
return self.data.edges
class Bullet(Entity):
'''
The bullet. Shot from a Hero.
'''
@property
def owner_id(self):
'''
The id of the hero owning the bullet
'''
return self.data.owner
HeroAbility = collections.namedtuple(
'HeroAbility',
['level', 'value']
)
HeroAbilityList = collections.namedtuple(
'HeroAbilityList',
[ab.as_camel for ab in Ability]
)
class _HeroAbilityShortcut:
def __init__(self, ability):
self.ability = ability
self.__doc__ = \
f'shortcut to ``hero.abilities.{ability.as_camel}.value``'
def __get__(self, instance, klass=None):
if instance is None:
return self
return instance.abilities[self.ability].value
def __set__(self, obj, value):
raise AttributeError(f'read-only attribute {self.name!r}')
class _HeroAbilityLevelShortcut:
def __init__(self, ability):
self.ability = ability
self.__doc__ = \
f'shortcut to ``hero.abilities.{ability.as_camel}.level``'
def __get__(self, instance, klass=None):
if instance is None:
return self
return instance.abilities[self.ability].level
def __set__(self, obj, value):
raise AttributeError(f'read-only attribute {self.name!r}')
class _HeroMeta(type):
@classmethod
def __prepare__(mcs, name, bases, **kwds):
return {
**{
ab.as_camel: _HeroAbilityShortcut(ab)
for ab in Ability
},
**{
ab.as_camel + '_level': _HeroAbilityLevelShortcut(ab)
for ab in Ability
}
}
class Hero(Entity, metaclass=_HeroMeta):
'''
A Hero is a player in thegame.
'''
def __init__(self, data):
super().__init__(data)
# we're doing this so it will not be modified accidently
# maybe not a good way, though.
self.__dict__['abilities'] = HeroAbilityList(
*[HeroAbility(*x) for x in zip(
self.data.ability_levels, self.data.ability_values)]
)
@property
def abilities(self):
'''
returns a tuple of abilities.
Example::
hero.abilities[MaxHealth].value # get the hero's max health
hero.abilities.max_health.value # the same thing
hero.abilities[MaxHealth].level # get the ability level
hero.abilities.max_health.level # the same thing again
'''
return self.__dict__['abilities']
orientation = _DataAttribute(
'''
The orientation of the hero; the direction the barrel is facing at,
in radians.
'''
)
level = _DataAttribute('The level of the hero')
score = _DataAttribute('The score of the hero')
experience = _DataAttribute('The experience the hero has')
experience_to_level_up = _DataAttribute(
'The experience required for the hero to level up')
skill_points = _DataAttribute(
'Number of skill points available to level up abilities'
)
cooldown = _DataAttribute(
'''
How many ticks until a bullet is ready.
Increase the *reload* ability to reduce the cooldown.
``shoot`` and ``shoot_at`` can still be called when on cooldown, but
nothing will happen instead.
'''
)
health_regen_cooldown = _DataAttribute(
'''
How many ticks until the hero can start to regenerate health
'''
)
name = _DataAttribute(
'''
The name of the hero. Not guranteed to be unique
'''
)
| 25.710843 | 76 | 0.592002 | 5,984 | 0.934708 | 0 | 0 | 1,399 | 0.218525 | 0 | 0 | 2,666 | 0.416432 |
4bc69e662f7af10d0c2438ee8ea0f1bb00d372e9 | 3,456 | py | Python | services/web/project/__init__.py | shekharRavi/croationa_topic_api | a68bc69a69c5a6898b74ee0f3adf83b23d29b40b | [
"MIT"
]
| null | null | null | services/web/project/__init__.py | shekharRavi/croationa_topic_api | a68bc69a69c5a6898b74ee0f3adf83b23d29b40b | [
"MIT"
]
| null | null | null | services/web/project/__init__.py | shekharRavi/croationa_topic_api | a68bc69a69c5a6898b74ee0f3adf83b23d29b40b | [
"MIT"
]
| null | null | null | import os
import json
# import wget
from flask import (
Flask,
jsonify,
send_from_directory,
request,
redirect,
url_for
)
from flask_sqlalchemy import SQLAlchemy
import werkzeug
werkzeug.cached_property = werkzeug.utils.cached_property
from werkzeug.utils import secure_filename
from werkzeug.middleware.proxy_fix import ProxyFix
from flask_restx import Api, Resource, fields, abort, reqparse
from celery import Celery
import celery.states as states
from . import api_functions
from . import topic_model_classifier
# global variables
CELERY_BROKER_URL = os.environ.get('CELERY_BROKER_URL')
CELERY_RESULT_BACKEND = os.environ.get('CELERY_RESULT_BACKEND')
celery = Celery('tasks', broker=CELERY_BROKER_URL, backend=CELERY_RESULT_BACKEND)
app = Flask(__name__)
app.wsgi_app = ProxyFix(app.wsgi_app)
app.config.from_object("project.config.Config")
db = SQLAlchemy(app)
api = Api(app, version='1.0',
title='UGC API services',
description='REST APIs for processing user-generated content')
ns = api.namespace('comments_api', description='REST services API for news comments')
# input and output definitions
topic_model_single_input = api.model('TopicModelSingleInput', {
'text': fields.String(required=True, description='input text for topic')
})
topic_model_single_output = api.model('TopicModelSingleOutput', {
'suggested_label': fields.List(fields.String(), required=True, description='suggested label for topics'),
'description': fields.List(fields.String(), required=True, description='description of suggested label'),
'topic_words': fields.List(fields.String(), required=True, description='topic words')
})
topic_model_list_input = api.model('TopicModelListInput', {
'texts': fields.List(fields.String, required=True, description='input list of texts for topic')
})
topic_model_list_output = api.model('TopicModelListOutput', {
'suggested_label': fields.List(fields.String(), required=True, description='suggested label for topics'),
'description': fields.List(fields.String(), required=True, description='description of suggested label'),
'topic_words': fields.List(fields.String(), required=True, description='topic words')
})
@ns.route('/topic_model/')
class TopicModelClassifier(Resource):
@ns.doc('predict topic from single text')
@ns.expect(topic_model_single_input, validate=True)
@ns.marshal_with(topic_model_single_output)
def post(self):
topics = topic_model_classifier.predict([api.payload['text']])
return {'suggested_label':topics['suggested_label'],
'description':topics['description'],
'topic_words':topics['topic_words'] }
@ns.route('/topic_model_list/')
class TopicModelListClassifier(Resource):
@ns.doc('predict topic from list of texts')
@ns.expect(topic_model_list_input, validate=True)
@ns.marshal_with(topic_model_list_output)
def post(self):
topics = topic_model_classifier.predict(api.payload['texts'])
return {'suggested_label': topics['suggested_label'],
'description': topics['description'],
'topic_words': topics['topic_words']}
@app.route("/health/")
#@app.doc('get information about the health of this API')
def health():
return api_functions.health()
@app.route("/documentation/")
#@app.doc('get Swagger documentation about this API')
def documentation():
return api_functions.documentation()
| 35.628866 | 109 | 0.739005 | 895 | 0.25897 | 0 | 0 | 1,227 | 0.355035 | 0 | 0 | 1,067 | 0.308738 |
4bc780e7bf91dc67b2e9b3c85f1b9477066d6c29 | 87 | py | Python | opensanctions/helpers/gender.py | fastbone/opensanctions | dea7f7d073083eece26241bcade697a2b959a09e | [
"MIT"
]
| null | null | null | opensanctions/helpers/gender.py | fastbone/opensanctions | dea7f7d073083eece26241bcade697a2b959a09e | [
"MIT"
]
| null | null | null | opensanctions/helpers/gender.py | fastbone/opensanctions | dea7f7d073083eece26241bcade697a2b959a09e | [
"MIT"
]
| null | null | null | # Welcome to the wonderful world of police databases:
MALE = "male"
FEMALE = "female"
| 17.4 | 53 | 0.724138 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 67 | 0.770115 |
4bc81ada6770b9d230169abfe03aa04a2271356b | 472 | py | Python | backups_operator/servers/views.py | jetchirag/backops | 777e8d3b3b89afdc0482f71f1ecc499036c62968 | [
"MIT"
]
| null | null | null | backups_operator/servers/views.py | jetchirag/backops | 777e8d3b3b89afdc0482f71f1ecc499036c62968 | [
"MIT"
]
| null | null | null | backups_operator/servers/views.py | jetchirag/backops | 777e8d3b3b89afdc0482f71f1ecc499036c62968 | [
"MIT"
]
| null | null | null | from django.shortcuts import render
from django.http import HttpResponse
from backups_operator.servers.models import Server
# Create your views here.
def home(request):
servers = Server.objects.all()
data = {
'servers': servers
}
return render(request, 'sources/home.html', data)
def test(request):
return HttpResponse('test')
def manage(request, id):
return HttpResponse('test')
def authHome(request):
return HttpResponse('test') | 21.454545 | 53 | 0.711864 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 71 | 0.150424 |
4bc9a28e7931530bacfb9f635e9e8859c38140a3 | 1,460 | py | Python | scripts/inspect_docker.py | lijing1996/DockerMonitor | b1105e120d9079a0d24a90ef401221dfceeed7b6 | [
"Apache-2.0"
]
| 1 | 2021-04-12T09:35:08.000Z | 2021-04-12T09:35:08.000Z | scripts/inspect_docker.py | lijing1996/DockerMonitor | b1105e120d9079a0d24a90ef401221dfceeed7b6 | [
"Apache-2.0"
]
| null | null | null | scripts/inspect_docker.py | lijing1996/DockerMonitor | b1105e120d9079a0d24a90ef401221dfceeed7b6 | [
"Apache-2.0"
]
| null | null | null | import argparse
import sys
import subprocess
import psutil
def insepect_process(pid):
"""Determine
1. is the process running in the container
2. if it's true, ourput the container id and the user
:return:
"""
assert psutil.pid_exists(pid), "The process doesn't exist"
try:
result = subprocess.check_output(f'cat /proc/{pid}/cgroup', shell=True)
# print(result)
except subprocess.CalledProcessError as e:
return_code = e.returncode
print(f"Inspect Wrong Error Code{return_code}")
sys.exit(1)
line = result.decode('utf-8').split('\n')[0].strip()
is_in_container = 'docker' in line
container_id = ''
user_name = ''
if is_in_container:
container_id = line.split('/')[-1][:12] #Only save first 12 char of container id
container_info = subprocess.check_output(f'docker ps -a|grep {container_id}', shell=True).decode('utf-8')
user_name = container_info.strip().split()[-1]
return is_in_container, container_id, user_name
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Inspector for docker")
parser.add_argument("-p", type=int, help="the pid")
args = parser.parse_args()
is_in_container, container_id, user_name = insepect_process(args.p)
print(f"Is the process running in the container :{is_in_container}")
print(f"The container id {container_id}")
print(f"The user name {user_name}") | 33.181818 | 113 | 0.678767 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 522 | 0.357534 |
4bc9dc6d068e1225034cf817b0d3efa5bdeee220 | 128 | py | Python | Aula10.py | rsmelocunha/Python-projects | 1740d1cbafb0aebfffeb0bfdb4ccccf0dbd14093 | [
"MIT"
]
| null | null | null | Aula10.py | rsmelocunha/Python-projects | 1740d1cbafb0aebfffeb0bfdb4ccccf0dbd14093 | [
"MIT"
]
| null | null | null | Aula10.py | rsmelocunha/Python-projects | 1740d1cbafb0aebfffeb0bfdb4ccccf0dbd14093 | [
"MIT"
]
| null | null | null | ano = int(input('Digite o ano do seu carro: '))
idadecarro = 2022 - ano
print('Carro novo' if idadecarro <=3 else 'Carro Velho') | 42.666667 | 56 | 0.695313 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 54 | 0.421875 |
4bca99ba9eda853683218d8ee0882faa531e6181 | 3,531 | py | Python | companion_app/live_log_retrieval.py | MorganJamesSmith/uni-project-ideas | 6c48d0edb526908ed95192e97ab47df1257b6036 | [
"BSD-3-Clause"
]
| 1 | 2020-09-15T15:33:33.000Z | 2020-09-15T15:33:33.000Z | companion_app/live_log_retrieval.py | MorganJamesSmith/uni-project | 6c48d0edb526908ed95192e97ab47df1257b6036 | [
"BSD-3-Clause"
]
| null | null | null | companion_app/live_log_retrieval.py | MorganJamesSmith/uni-project | 6c48d0edb526908ed95192e97ab47df1257b6036 | [
"BSD-3-Clause"
]
| null | null | null | """
implements a wrapper for loading live data from the serial connection and passing it to plotting
"""
import serial
import time
import struct
import plotly.express as px
try:
from . import log_parser
except ImportError:
import log_parser
# TODO: clean up CLI code
class LiveLogFile():
def __init__(self, serial_device_name: str="/dev/ttyACM0", initial_file_offset = -1,
callback_to_call_right_before_grabbing_new_data=lambda:None,
callback_to_call_when_caught_up_with_data=lambda:time.sleep(1)):
self.serial_device_name = serial_device_name
self.internal_buffer: bytes = b""
self.log_file_offset: int = initial_file_offset
self.sleep_callback = callback_to_call_when_caught_up_with_data
self.before_serial_hook = callback_to_call_right_before_grabbing_new_data
def read(self, nbytes=1):
if len(self.internal_buffer) < nbytes:
new_data = self.read_from_device()
self.internal_buffer = self.internal_buffer + new_data
if len(self.internal_buffer) < nbytes:
import warnings
warnings.warn("reading data from device didn't produce enough content to keep going.")
togive = self.internal_buffer[:nbytes]
self.internal_buffer = self.internal_buffer[nbytes:]
return togive
def read_from_device(self):
self.before_serial_hook()
with serial.Serial(self.serial_device_name) as conn:
if self.log_file_offset == -1:
self.set_offset_to_last_reset(conn)
print("READING FROM DEVICE")
command_to_send = f"hcat P21 {self.log_file_offset}\n\r".encode()
hex_data = self.interact_command(conn, command_to_send)
# if len(hex_data) < 20:
# print(f"small data: {hex_data!r}")
if hex_data == "" or hex_data.isspace(): # only \n\r
# we have caught up with live data, need to sleep for a bit
self.sleep_callback()
hex_data = self.interact_command(conn, command_to_send)
result = bytes.fromhex(hex_data)
self.log_file_offset += len(result)
return result
def set_offset_to_last_reset(self, conn):
"""sets the current tracking offset to the last reset found"""
data = bytes.fromhex(self.interact_command(conn,b"hcat P21_OFF\n\r"))
# last reset is just the last 4 bytes
assert len(data)%4 == 0, "length of P21_OFF is not a multiple of 32 bits"
[last_reset_offset] = struct.unpack("I",data[-4:])
self.log_file_offset = last_reset_offset
@staticmethod
def interact_command(conn, command):
conn.write(command)
data_we_just_sent_and_want_to_ignore = conn.read_until(b"\n\r")
if command != data_we_just_sent_and_want_to_ignore:
import warnings; warnings.warn(f"sent: {command!r} but saw back {data_we_just_sent_and_want_to_ignore!r}")
hex_data = conn.read_until(b"> ")
return hex_data.decode().rpartition("> ")[0]
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("input_file", nargs="?",default="/dev/ttyACM0")
ns = parser.parse_args()
for [type, *fields] in log_parser.parse_data(log_parser.parse_raw_entries(LiveLogFile(ns.input_file))):
if type != 4:
continue # ignore all but IMU data
print(*map("{:>8}".format, fields), sep=",")
| 46.460526 | 118 | 0.664118 | 2,818 | 0.798074 | 0 | 0 | 428 | 0.121212 | 0 | 0 | 745 | 0.210988 |
4bcbbf9c4a02cc75f67572b9d3e876126fc65c10 | 313 | py | Python | bin/bucrm.py | aelzenaar/bucephalus | 49cc084a5444ffbde2f850fc1f7b230d3bb8dfbc | [
"MIT"
]
| null | null | null | bin/bucrm.py | aelzenaar/bucephalus | 49cc084a5444ffbde2f850fc1f7b230d3bb8dfbc | [
"MIT"
]
| 12 | 2018-11-09T03:00:28.000Z | 2019-01-02T05:39:55.000Z | bin/bucrm.py | aelzenaar/bucephalus | 49cc084a5444ffbde2f850fc1f7b230d3bb8dfbc | [
"MIT"
]
| null | null | null | import sys
import dbops
from pathlib import Path
if len(sys.argv) < 2:
print("Bucephalus Remove File Script")
print("Usage: " + sys.argv[0] + " <identifier>")
sys.exit()
sys.argv.pop(0)
ident = sys.argv.pop(0)
if dbops.remove_record_by_id(ident) == None:
print("*** Error: failed to remove record.")
| 18.411765 | 50 | 0.677316 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 92 | 0.29393 |
4bcbcc55408d8cf46761e62d961a3d39291ace5d | 440 | py | Python | tests/test_get_current_os_name.py | c-pher/PyWinOS | a16a16a24abaa53a06b9365b2535c8ab31a7fdfb | [
"MIT"
]
| 4 | 2020-04-17T15:54:43.000Z | 2020-11-08T06:39:05.000Z | tests/test_get_current_os_name.py | c-pher/PyWinOS | a16a16a24abaa53a06b9365b2535c8ab31a7fdfb | [
"MIT"
]
| 65 | 2020-01-05T21:45:17.000Z | 2022-03-31T16:50:20.000Z | tests/test_get_current_os_name.py | c-pher/PyWinOS | a16a16a24abaa53a06b9365b2535c8ab31a7fdfb | [
"MIT"
]
| null | null | null | import os
def test_get_current_os_name(client_local):
response = client_local.get_current_os_name_local()
print(response)
print(os.name)
if os.name == 'nt':
assert 'Windows' in response, 'Current OS name is not Windows'
elif os.name == 'Linux':
assert 'Linux' in response, 'Current OS name is not Linux'
elif os.name == 'posix':
assert response == 'Darwin', 'Current OS name is not MacOS'
| 29.333333 | 70 | 0.661364 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 134 | 0.304545 |
4bcc388c3974bdfcd63888beb8ed71bb0fa61380 | 5,133 | py | Python | GUI/GUI_windows/TranslationLanguageWindow.py | Chenger1/stellaris-trpack | 5d85bbbc7374975b5da729899b5691ea77c16ea2 | [
"MIT"
]
| 3 | 2020-07-23T00:32:06.000Z | 2020-10-09T18:05:56.000Z | GUI/GUI_windows/TranslationLanguageWindow.py | Chenger1/stellaris-trpack | 5d85bbbc7374975b5da729899b5691ea77c16ea2 | [
"MIT"
]
| 105 | 2020-07-16T12:23:57.000Z | 2021-01-18T18:11:40.000Z | GUI/GUI_windows/TranslationLanguageWindow.py | Letiso/Stellaris-True-Machine-Translation-Tool | b80431c1c9b49c2482cb9aefa02eb0de62d7cc56 | [
"MIT"
]
| 1 | 2020-07-15T13:30:57.000Z | 2020-07-15T13:30:57.000Z | """
↓ Инициализация данных ↓
"""
from PyQt5 import QtWidgets, QtCore
from GUI.GUI_windows_source import TranslationLanguage
from json import load, dump
from functools import partial
import copy
from scripts.stylesheets import choosen_lang_style, not_chosen_lang_style
class TranslationLanguageWindow(QtWidgets.QDialog, TranslationLanguage.Ui_Dialog):
def __init__(self, parent):
super().__init__(parent)
self.setupUi(self)
self.setWindowFlags(QtCore.Qt.Window | QtCore.Qt.FramelessWindowHint)
self.setModal(True)
self.parent = parent
self.oldPos = self.pos()
self.buttons_data = {
'RussianButton': 'ru', 'UkrainianButton': 'uk', 'PolishButton': 'pl',
'ChineseButton': 'zh-cn', 'ArabicButton': 'ar', 'BelarusianButton': 'be',
'BulgarianButton': 'bg', 'CroatianButton': 'hr', 'CzechButton': 'cs',
'DanishButton': 'da', 'DutchButton': 'nl', 'EstonianButton': 'et',
'FinnishButton': 'fi', 'FrenchButton': 'fr', 'GermanButton': 'de',
'GreekButton': 'el', 'HungarianButton': 'hu', 'ItalianButton': 'it',
'JapaneseButton': 'ja', 'KoreanButton': 'ko', 'LithuanianButton': 'lt',
'NorwegianButton': 'no', 'PortugueseButton': 'pt', 'SlovakButton': 'sk',
'SpanishButton': 'es', 'SwedishButton': 'sv', 'TurkishButton': 'tr'
}
self.string = self.LanguagesList.text().split()
self.buttons = self.prep_buttons()
self.init_handlers()
self.gridLayout.setColumnMinimumWidth(1, 50)
self.generator = copy.copy(self.buttons)
self.row_index = 0
self.column_index = -1
self.paint_elements()
def init_handlers(self):
self.WindowMoveButton.installEventFilter(self)
self.ExitButton.clicked.connect(self.close)
self.SearchLine.textChanged.connect(self.search_init)
self.ReferenceButton.clicked.connect(lambda: self.parent.reference_window('QLabel_5_TargetLanguage'))
def prep_buttons(self):
buttons = {}
index = 0
for button, lang in self.buttons_data.items():
buttons[button] = QtWidgets.QPushButton(self.string[index])
buttons[button].setObjectName(button)
buttons[button].clicked.connect(partial(self.set_target_language, target_language=lang))
index += 1
return buttons
def search_init(self, text):
self.clean()
self.search(text)
self.choose_lang()
def eventFilter(self, source, event):
"""
Данная функция предназначена для отслеживания позиции окна
и его перемещения кликом по шапке
"""
if source == self.WindowMoveButton:
if event.type() == QtCore.QEvent.MouseButtonPress:
self.oldPos = event.pos()
elif event.type() == QtCore.QEvent.MouseMove and self.oldPos is not None:
self.move(self.pos() - self.oldPos + event.pos())
return True
elif event.type() == QtCore.QEvent.MouseButtonRelease:
self.oldPos = None
return super().eventFilter(source, event)
"""
↓ Рендер ↓
"""
def clean(self):
for i in reversed(range(self.gridLayout.count())):
self.gridLayout.itemAt(i).widget().setParent(None)
def search(self, text):
with open('Properties.json', 'r', encoding='utf-8') as prop:
properties = load(prop)
self.column_index = -1
self.generator = copy.copy(self.buttons)
for object_name, button in self.buttons.items():
if text not in button.text().lower():
if properties["target_language"] not in self.buttons_data[object_name]:
del self.generator[object_name]
self.paint_elements()
def paint_elements(self):
for object_name, button in self.generator.items():
if self.column_index < 2:
self.column_index += 1
else:
self.column_index = 0
self.row_index += 1
self.gridLayout.addWidget(button, self.row_index, self.column_index)
self.choose_lang()
"""
↓ Выбор языка, на который будут переводиться файлы ↓
"""
def choose_lang(self):
with open("Properties.json", 'r', encoding='utf-8') as prop:
properties = load(prop)
for object_name, button in self.buttons.items():
if self.buttons_data[object_name] == properties["target_language"]:
choosen_lang_style(button)
else:
not_chosen_lang_style(button)
def set_target_language(self, target_language=None):
with open("Properties.json", 'r', encoding='utf-8') as prop:
properties = load(prop)
properties["target_language"] = target_language
with open("Properties.json", 'w', encoding='utf-8') as prop:
dump(properties, prop)
self.choose_lang()
| 37.742647 | 109 | 0.601403 | 4,966 | 0.938575 | 0 | 0 | 0 | 0 | 0 | 0 | 1,223 | 0.231147 |
4bcc5632c54ea11fd3756fc709d789ae83392c50 | 55 | py | Python | dearpygui_map/__init__.py | mkouhia/dearpygui_map | 8db86e6917b88c118aff94a22e383ef517c40620 | [
"MIT"
]
| null | null | null | dearpygui_map/__init__.py | mkouhia/dearpygui_map | 8db86e6917b88c118aff94a22e383ef517c40620 | [
"MIT"
]
| 21 | 2022-02-21T08:31:03.000Z | 2022-03-08T19:27:33.000Z | dearpygui_map/__init__.py | mkouhia/dearpygui_map | 8db86e6917b88c118aff94a22e383ef517c40620 | [
"MIT"
]
| null | null | null | """Map widget for Dear PyGui"""
__version__ = "0.0.1"
| 13.75 | 31 | 0.636364 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 38 | 0.690909 |
4bcdc5c2dfab2675a93de75f43fee73049b1f7fb | 1,347 | py | Python | demosauruswebapp/demosaurus/subject_headings.py | KBNLresearch/Demosaurus | 9235e315d9eef9d8d64f94a90ab4fc8220670ef2 | [
"Apache-2.0"
]
| 1 | 2020-06-25T16:39:35.000Z | 2020-06-25T16:39:35.000Z | demosauruswebapp/demosaurus/subject_headings.py | KBNLresearch/Demosaurus | 9235e315d9eef9d8d64f94a90ab4fc8220670ef2 | [
"Apache-2.0"
]
| 6 | 2020-03-06T12:31:38.000Z | 2021-09-20T15:08:17.000Z | demosauruswebapp/demosaurus/subject_headings.py | KBNLresearch/Demosaurus | 9235e315d9eef9d8d64f94a90ab4fc8220670ef2 | [
"Apache-2.0"
]
| null | null | null | from flask import (
Blueprint, request)#, flash, g, redirect, render_template, get_template_attribute, url_for, jsonify
# )
# from werkzeug.exceptions import abort
import requests
# from demosaurus.db import get_db
# import pandas as pd
# from nltk.metrics import distance
# import re
# import numpy as np
bp = Blueprint('subject_headings', __name__)
annif_url = 'https://kbresearch.nl/annif/v1/'
@bp.route('/annif-projects/')
def annif_projects():
response = requests.get(annif_url+'projects')
if response.status_code == 200:
return response.json()
else:
print('Unable to obtain Annif projects from', response.url)
@bp.route('/annif-suggestions/')
def annif_suggestions():
params = dict(request.args) # turn into a mutable dictionary
project = params.pop('project')
project_options = [proj['project_id'] for proj in annif_projects()['projects']]
print(project_options)
if project not in project_options:
print("Annif was called with non-existing project parameter:", project)
url = annif_url + "projects/" + project + "/suggest"
response = requests.post(url, data = params)
if response.status_code == 200:
return response.json()
else:
print('Unable to obtain Annif suggestions from', response.url)
print(response.status_code) | 32.071429 | 104 | 0.697105 | 0 | 0 | 0 | 0 | 938 | 0.696362 | 0 | 0 | 561 | 0.416481 |
4bcf6cd36f0a7b205b865ac0d3e32dffbb450890 | 1,114 | py | Python | Modules/ZoneAnalysis_FULL.py | amaurijp/BioSPA | fd10b58c5a6aa444f34690a98cd939dd5111c4d5 | [
"MIT"
]
| 3 | 2019-10-29T17:26:24.000Z | 2021-01-08T22:15:17.000Z | Modules/ZoneAnalysis_FULL.py | amaurijp/BioSPA | fd10b58c5a6aa444f34690a98cd939dd5111c4d5 | [
"MIT"
]
| null | null | null | Modules/ZoneAnalysis_FULL.py | amaurijp/BioSPA | fd10b58c5a6aa444f34690a98cd939dd5111c4d5 | [
"MIT"
]
| null | null | null |
def ZoneAnalysis_FULL(SettingsDic):
import MODZoneAnalysis
import os
diretorio=os.getcwd()
# Análises das regioes com bacterias
MODZoneAnalysis.DetVolume(diretorio,
importstackRootName=SettingsDic['FolderName'],
FirstStack=1,LastStack=SettingsDic['timePoints'],
FirstSlice=1,LastSlice=SettingsDic['SliceNumber'],
TXTExportDir='/ExportedData/VolumeValues',
importformat=SettingsDic['imageFormat'],
RegionAnalysis=False)
'''
# Análises das regioes com EPS
MODZoneAnalysis.DetVolume(diretorio,
importstackRootName='/BinarizedCorr/EPS_THR4',
FirstStack=1,LastStack=18,
FirstSlice=1,LastSlice=123,
TXTExportDir='/VolumeValues/EPS_THR4',
importformat='.png',
RegionAnalysis=False)
''' | 37.133333 | 80 | 0.502693 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 572 | 0.512545 |
4bcf87fdcdb2f4bd16f622a1e7e79b1aeb825b7c | 3,448 | py | Python | server/Kusa/views.py | meshellchoo/senior-design-project-kusa | 829575259c31a620c895a0f2d5654ea099298eb6 | [
"MIT"
]
| 1 | 2022-03-28T23:20:09.000Z | 2022-03-28T23:20:09.000Z | server/Kusa/views.py | meshellchoo/senior-design-project-kusa | 829575259c31a620c895a0f2d5654ea099298eb6 | [
"MIT"
]
| null | null | null | server/Kusa/views.py | meshellchoo/senior-design-project-kusa | 829575259c31a620c895a0f2d5654ea099298eb6 | [
"MIT"
]
| 2 | 2022-03-24T07:17:27.000Z | 2022-03-28T23:20:18.000Z | from django.http import HttpResponse
from django.http.response import JsonResponse
from django.shortcuts import render
from rest_framework.serializers import Serializer
from admin import settings
import requests
from rest_framework import viewsets
from time import gmtime, strftime
from Kusa.models import SteamUser
from django.views.decorators.csrf import csrf_exempt
from bson import ObjectId
import json
from smtplib import SMTPException
from django.http import BadHeaderError
from django.http.response import JsonResponse
from django.shortcuts import redirect
from admin import settings
from admin.settings import FRONTEND_URL
from Kusa.authentication import get_token
from Kusa.authentication import validate_token
from collections import OrderedDict # keep this line for get_user_daily_hours
from datetime import datetime
from django.core.mail import send_mail
from Kusa.data_collection import get_steam_user
JWT_SECRET_KEY = settings.JWT_SECRET_KEY
conf = settings.CONF
@csrf_exempt
def add_post(request):
friendList = request.POST.get("FriendList").split(",")
friendRequest = request.POST.get("FriendRequest").split(",")
dummy=SteamUser(Name=request.POST.get("Name"),SteamID = request.POST.get("SteamID"),FriendList=friendList,FriendRequest=friendRequest)
dummy.save()
return HttpResponse("Inserted")
def close_view(request):
response = redirect(FRONTEND_URL + '/steamauth')
token = get_token(request)
response.set_cookie('token', (token), max_age=1000)
return response
def get_user_daily_hours(request):
"""
will return an array of the user's daily hours
Parameters: request
Returns: returns a list of json obj -> [{"date" : date1, "hours" : num_hours1},{"date" : date2, "hours" : num_hours2}]
"""
response = validate_token(request)
if "steamid" in response:
user = get_steam_user(response["steamid"])
daily_hours = user['daily_hours']
list_of_json = [dict(day) for day in eval(daily_hours)]
return JsonResponse(list_of_json, safe=False)
else:
return response
def get_user_achievements(request):
"""
Returns: returns a list of json obj -> [{id" : 1, "progress" : 0, "date_achieved" : "N/A"},...,{id" : 10, "progress" : 20, "date_achieved" : "03/10/2022"}]
"""
response = validate_token(request)
if "steamid" in response:
user = get_steam_user(response["steamid"])
achievements = user['achievements']
list_of_json = [dict(a) for a in eval(achievements)]
return JsonResponse(list_of_json , safe=False)
else:
return response
def send_user_email(steam_id):
success = False
user = get_steam_user(steam_id)
daily_hours = user['daily_hours']
goal = user['goal']
list_of_json = [dict(day) for day in eval(daily_hours)]
sum = 0
for value in list_of_json:
if(datetime.today().isocalendar()[1] == datetime.strptime(value['date'], "%m/%d/%Y").isocalendar()[1]):
sum += value['hours']
if(sum > goal):
try:
send_mail("Kusa Playtime Exceeded", 'You exceeded your goal for this week! Better luck next time. Remember, you can change your goal in the Kusa app.', settings.EMAIL_HOST_USER, [user['email']], fail_silently=False)
success = True
except SMTPException as e:
print(e)
return JsonResponse({'success': success}, safe=False)
| 36.680851 | 231 | 0.706787 | 0 | 0 | 0 | 0 | 351 | 0.101798 | 0 | 0 | 758 | 0.219838 |
4bd7a0c2448cc617b69365e5bcaa51dd7caf5ceb | 478 | py | Python | webapp/config.py | sebastien6/simple-project | 2f662c74695a7f566172330dcb7140efd6c71723 | [
"MIT"
]
| null | null | null | webapp/config.py | sebastien6/simple-project | 2f662c74695a7f566172330dcb7140efd6c71723 | [
"MIT"
]
| null | null | null | webapp/config.py | sebastien6/simple-project | 2f662c74695a7f566172330dcb7140efd6c71723 | [
"MIT"
]
| null | null | null | import os
from redis import StrictRedis
class Config(object):
SECRET_KEY = os.environ.get('FLASK_SECRET')
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL')
SQLALCHEMY_TRACK_MODIFICATIONS = False
SESSION_TYPE = 'redis'
SESSION_REDIS = StrictRedis(host=os.environ.get('REDIS_HOST'), db=0)
PERMANENT_SESSION_LIFETIME = 600
CACHE_REDIS = StrictRedis(host=os.environ.get('REDIS_HOST'), db=1)
CACHE_REDIS_DEFAULT_TIMEOUT = 3600
| 31.866667 | 73 | 0.728033 | 428 | 0.895397 | 0 | 0 | 0 | 0 | 0 | 0 | 59 | 0.123431 |
4bd7fb5f5d36389c2c5a61d083613ef4ed377538 | 15,928 | py | Python | moleculegen/estimation/model.py | sanjaradylov/moleculegen-ml | 4acb77244909cf8cfe4fb75461d4bed9b77f29f1 | [
"BSD-3-Clause"
]
| 3 | 2021-11-18T11:41:21.000Z | 2022-02-08T22:01:20.000Z | moleculegen/estimation/model.py | sanjaradylov/moleculegen-ml | 4acb77244909cf8cfe4fb75461d4bed9b77f29f1 | [
"BSD-3-Clause"
]
| 20 | 2019-12-12T11:47:32.000Z | 2021-06-02T07:55:18.000Z | moleculegen/estimation/model.py | sanjaradylov/moleculegen-ml | 4acb77244909cf8cfe4fb75461d4bed9b77f29f1 | [
"BSD-3-Clause"
]
| 2 | 2019-12-23T08:17:01.000Z | 2022-02-08T22:01:21.000Z | """
Generative language models.
Classes
-------
SMILESEncoderDecoder
A generative recurrent neural network to encode-decode SMILES strings.
SMILESEncoderDecoderFineTuner
The fine-tuner of SMILESEncoderDecoder model.
"""
__all__ = (
'SMILESEncoderDecoder',
'SMILESEncoderDecoderFineTuner',
)
import json
import warnings
from typing import Optional, Union
import mxnet as mx
from mxnet import gluon
from . import _gluon_common
from .base import SMILESEncoderDecoderABC
from ..description.common import OneHotEncoder
class SMILESEncoderDecoder(SMILESEncoderDecoderABC):
"""A generative recurrent neural network to encode-decode SMILES strings.
Parameters
----------
vocab_size : int
The vocabulary dimension, which will indicate the number of output
neurons of a decoder.
initialize : bool, default True
Whether to initialize model parameters.
When one decides to load parameters from a file, deferred
initialization is needless.
use_one_hot : bool, default False
Whether to use one-hot-encoding or an embedding layer.
embedding_dim : int, default 4
The output dimension of an embedding layer.
embedding_init : str or mxnet.init.Initializer,
default mxnet.init.Orthogonal()
The parameter initializer of an embedding layer.
embedding_prefix : str, default 'embedding_'
The prefix of an embedding block.
rnn : {'vanilla', 'lstm', 'gru'}, default 'lstm'
A recurrent layer.
n_rnn_layers : int, default 1
The number of layers of a (deep) recurrent layer.
n_rnn_units : int, default 64
The number of neurons in an RNN.
rnn_dropout : float, default 0.0
The dropout rate of a recurrent layer.
rnn_init : str or mxnet.init.Initializer,
default mxnet.init.Orthogonal()
The parameter initializer of a recurrent layer.
rnn_prefix : str, default 'encoder_'
The prefix of an encoder block.
n_dense_layers : int, default 1
The number of dense layers.
n_dense_units : int, default 128
The number of neurons in each dense layer.
dense_activation : str, default 'relu'
The activation function in a dense layer.
dense_dropout : float, default 0.0
The dropout rate of a dense layer.
dense_init : str or mxnet.init.Initializer,
default mxnet.init.Xavier()
The parameter initializer of a dense layer.
dense_prefix : str, default 'decoder_'
The prefix of a decoder block.
tie_weights : bool, default False
Whether to share the embedding block parameters w/ a decoder block.
dtype : str, default 'float32'
Data type.
ctx : mxnet.context.Context, default mxnet.context.cpu()
CPU or GPU.
prefix : str, default None
params : mxnet.gluon.ParameterDict, default None
Attributes
----------
ctx : mxnet.context.Context
The model's context.
embedding : OneHotEncoder or mxnet.gluon.nn.Embedding
An embedding layer.
encoder : mxnet.gluon.rnn.RNN or mxnet.gluon.rnn.LSTM
or mxnet.gluon.rnn.GRU
An RNN encoder.
decoder : mxnet.gluon.nn.Dense or mxnet.gluon.nn.Sequential
A Feed-Forward NN decoder.
"""
def __init__(
self,
vocab_size: int,
initialize: bool = True,
use_one_hot: bool = False,
embedding_dim: int = 4,
embedding_dropout: float = 0.,
embedding_init: Optional[
Union[str, mx.init.Initializer]] = mx.init.Uniform(),
embedding_prefix: str = 'embedding_',
rnn: str = 'lstm',
n_rnn_layers: int = 1,
n_rnn_units: int = 64,
rnn_dropout: float = 0.,
rnn_init: Optional[Union[str, mx.init.Initializer]] = mx.init.Orthogonal(),
rnn_prefix: str = 'encoder_',
n_dense_layers: int = 1,
n_dense_units: int = 128,
dense_activation: str = 'relu',
dense_dropout: float = 0.,
dense_init: Optional[Union[str, mx.init.Initializer]] = mx.init.Xavier(),
dense_prefix: str = 'decoder_',
tie_weights: bool = False,
dtype: Optional[str] = 'float32',
*,
ctx: mx.context.Context = mx.context.cpu(),
prefix: Optional[str] = None,
params: Optional[gluon.ParameterDict] = None,
):
warnings.warn(
message=(
f'{self.__class__.__name__} is deprecated; '
f'wil be removed in 1.1.0.'
f'consider `moleculegen.estimation.SMILESRNN` instead.'
),
category=DeprecationWarning,
)
# Validate the formal parameters that are not explicitly sent into and
# validated in mxnet.gluon objects.
if not isinstance(use_one_hot, bool):
raise TypeError(
'`use_one_hot` must be either True for OneHotEncoder layer '
'or False for Embedding layer.'
)
if not isinstance(initialize, bool):
raise TypeError(
'`initialize` must be either True for deferred '
'initialization or False for no initialization.'
)
if rnn not in _gluon_common.RNN_MAP:
raise ValueError(
f'The recurrent layer must be one of '
f'{list(_gluon_common.RNN_MAP.keys())}.'
)
if n_dense_layers < 1:
raise ValueError(
'The number of dense layers must be positive non-zero.'
)
if (
tie_weights
and (
embedding_dim != n_rnn_units
or
n_dense_layers > 1
and embedding_dim != n_dense_units
)
):
raise ValueError(
f'When sharing weights, the number of hidden units must be equal to '
f'the embedding dimension.'
)
# Initialize mxnet.gluon.Block parameters.
super().__init__(ctx=ctx, prefix=prefix, params=params)
with self.name_scope():
# Define (and initialize) an embedding layer.
if use_one_hot:
self._embedding = OneHotEncoder(vocab_size)
else:
embedding_block = gluon.nn.Embedding(
input_dim=vocab_size,
output_dim=embedding_dim,
dtype=dtype,
prefix=embedding_prefix,
)
if embedding_dropout > 1e-3:
seq_prefix = f'{embedding_prefix.rstrip("_")}seq_'
self._embedding = gluon.nn.HybridSequential(prefix=seq_prefix)
self._embedding.add(embedding_block)
self._embedding.add(gluon.nn.Dropout(embedding_dropout))
shared_params = self._embedding[0].params if tie_weights else None
else:
self._embedding = embedding_block
shared_params = self._embedding.params if tie_weights else None
if initialize:
self._embedding.initialize(init=embedding_init, ctx=ctx)
# Select and initialize a recurrent block.
self._encoder = _gluon_common.RNN_MAP[rnn](
hidden_size=n_rnn_units,
num_layers=n_rnn_layers,
dropout=rnn_dropout,
dtype=dtype,
prefix=rnn_prefix,
)
if initialize:
self._encoder.initialize(init=rnn_init, ctx=ctx)
# Define and initialize a dense layer(s).
self._decoder = _gluon_common.mlp(
n_layers=n_dense_layers,
n_units=n_dense_units,
activation=dense_activation,
output_dim=vocab_size,
dtype=dtype,
dropout=dense_dropout,
prefix=dense_prefix,
params=shared_params,
)
if initialize:
self._decoder.initialize(init=dense_init, ctx=ctx)
@property
def embedding(self) -> Union[OneHotEncoder, gluon.nn.Embedding]:
"""Return the embedding layer.
"""
return self._embedding
@property
def encoder(self) -> Union[gluon.rnn.RNN, gluon.rnn.LSTM, gluon.rnn.GRU]:
"""Return the RNN encoder.
"""
return self._encoder
@property
def decoder(self) -> Union[gluon.nn.Dense, gluon.nn.Sequential]:
"""Return the Feed-Forward NN decoder.
"""
return self._decoder
@classmethod
def from_config(cls, config_file: str) -> 'SMILESEncoderDecoder':
"""Instantiate a model loading formal parameters from a JSON file `config_file`.
config_file : str
A JSON file to load formal parameters from.
model : SMILESEncoderDecoder
"""
with open(config_file) as fh:
raw_data = json.load(fh)
return cls(
vocab_size=raw_data['vocab_size'],
initialize=raw_data['initialize'],
tie_weights=raw_data['tie_weights'],
dtype=raw_data['dtype'],
ctx=_gluon_common.get_ctx(raw_data['ctx'].lower()),
prefix=raw_data['prefix'],
use_one_hot=raw_data['embedding']['use_one_hot'],
embedding_dim=raw_data['embedding']['dim'],
embedding_dropout=raw_data['embedding']['dropout'],
embedding_init=_gluon_common.INIT_MAP[raw_data['embedding']['init'].lower()],
embedding_prefix=raw_data['embedding']['prefix'],
rnn=raw_data['encoder']['rnn'],
n_rnn_layers=raw_data['encoder']['n_layers'],
n_rnn_units=raw_data['encoder']['n_units'],
rnn_dropout=raw_data['encoder']['dropout'],
rnn_init=_gluon_common.INIT_MAP[raw_data['encoder']['init'].lower()],
rnn_prefix=raw_data['encoder']['prefix'],
n_dense_layers=raw_data['decoder']['n_layers'],
n_dense_units=raw_data['decoder']['n_units'],
dense_activation=raw_data['decoder']['activation'],
dense_dropout=raw_data['decoder']['dropout'],
dense_init=_gluon_common.INIT_MAP[raw_data['decoder']['init'].lower()],
dense_prefix=raw_data['decoder']['prefix'],
)
@classmethod
def load_fine_tuner(
cls,
path: str,
update_features: bool = True,
decoder_init: Optional[Union[str, mx.init.Initializer]] = mx.init.Xavier(),
) -> 'SMILESEncoderDecoder':
"""Create a new fine-tuner model: load model configuration and parameters, and
initialize decoder weights.
Parameters
----------
path : str
The path to the directory of model configuration and parameters.
path/config.json - the formal parameters of a model;
path/weights.params - the parameters of a model.
update_features : bool, default True
Whether to update embedding and encoder parameters during training.
decoder_init : str or mxnet.init.Initializer, default None
A decoder initializer.
Returns
-------
model : SMILESEncoderDecoder
"""
model = cls.from_config(f'{path}/config.json')
model.load_parameters(f'{path}/weights.params', ctx=model.ctx)
if not update_features:
model.embedding.collect_params().setattr('grad_req', 'null')
model.encoder.collect_params().setattr('grad_req', 'null')
model.decoder.initialize(init=decoder_init, force_reinit=True, ctx=model.ctx)
return model
class SMILESEncoderDecoderFineTuner(SMILESEncoderDecoderABC):
"""The fine-tuner of SMILESEncoderDecoder model. Loads embedding and encoder blocks,
and trains a new decoder block.
Parameters
----------
model : SMILESEncoderDecoder
An encoder-decoder model to fine-tune.
output_dim : int
The number of output neurons.
initialize : bool, default True
Whether to initialize decoder's parameters.
update_features : bool, default True
Whether to update embedding and encoder parameters during training.
n_dense_layers : int, default 1
The number of dense layers.
n_dense_units : int, default 128
The number of neurons in each dense layer.
dense_activation : str, default 'relu'
The activation function in a dense layer.
dense_dropout : float, default 0.0
The dropout rate of a dense layer.
dense_init : str or mxnet.init.Initializer,
default mxnet.init.Xavier()
The parameter initializer of a dense layer.
dense_prefix : str, default 'decoder_'
The prefix of a decoder block.
dtype : str, default 'float32'
Data type.
ctx : mxnet.context.Context, default mxnet.context.cpu()
CPU or GPU.
prefix : str, default None
params : mxnet.gluon.ParameterDict, default None
Attributes
----------
ctx : mxnet.context.Context
The model's context.
embedding : OneHotEncoder or mxnet.gluon.nn.Embedding
An embedding layer.
encoder : mxnet.gluon.rnn.RNN or mxnet.gluon.rnn.LSTM
or mxnet.gluon.rnn.GRU
An RNN encoder.
decoder : mxnet.gluon.nn.Dense or mxnet.gluon.nn.Sequential
A Feed-Forward NN decoder.
"""
def __init__(
self,
model: SMILESEncoderDecoder,
output_dim: int,
initialize: bool = True,
update_features: bool = True,
n_dense_layers: int = 1,
n_dense_units: int = 128,
dense_activation: str = 'relu',
dense_dropout: float = 0.,
dense_init: Optional[Union[str, mx.init.Initializer]] = mx.init.Xavier(),
dense_prefix: str = 'fine_tuner_decoder_',
dtype: Optional[str] = 'float32',
*,
ctx: mx.context.Context = mx.context.cpu(),
prefix: Optional[str] = None,
params: Optional[gluon.ParameterDict] = None,
):
warnings.warn(
message=(
f'{self.__class__.__name__} is deprecated; '
f'wil be removed in 1.1.0.'
f'consider `moleculegen.estimation.SMILESRNN.load_fine_tuner` instead.'
),
category=DeprecationWarning,
)
super().__init__(ctx=ctx, prefix=prefix, params=params)
model.ctx = self.ctx
self._embedding = model.embedding
self._encoder = model.encoder
if not update_features:
self._embedding.collect_params().setattr('grad_req', 'null')
self._encoder.collect_params().setattr('grad_req', 'null')
self._decoder = _gluon_common.mlp(
n_layers=n_dense_layers,
n_units=n_dense_units,
activation=dense_activation,
output_dim=output_dim,
dtype=dtype,
dropout=dense_dropout,
prefix=dense_prefix,
params=None,
)
if initialize:
self._decoder.initialize(init=dense_init, ctx=self.ctx)
@property
def embedding(self) -> Union[OneHotEncoder, gluon.nn.Embedding]:
"""Return the embedding layer.
"""
return self._embedding
@property
def encoder(self) -> Union[gluon.rnn.RNN, gluon.rnn.LSTM, gluon.rnn.GRU]:
"""Return the RNN encoder.
"""
return self._encoder
@property
def decoder(self) -> Union[gluon.nn.Dense, gluon.nn.Sequential]:
"""Return the Feed-Forward NN decoder.
"""
return self._decoder
| 36.28246 | 89 | 0.597878 | 15,387 | 0.966035 | 0 | 0 | 4,083 | 0.256341 | 0 | 0 | 7,411 | 0.465281 |
4bdd1cbdd04848eac2016e69df46179145d19903 | 2,351 | py | Python | projects/radish_paper/run_tpch_radish_sym_gbp.py | bmyerz/log2slqite | edb6bcba061132caa545b5e46c98b86547c68b48 | [
"MIT"
]
| null | null | null | projects/radish_paper/run_tpch_radish_sym_gbp.py | bmyerz/log2slqite | edb6bcba061132caa545b5e46c98b86547c68b48 | [
"MIT"
]
| 1 | 2015-07-15T00:00:19.000Z | 2015-07-15T00:06:33.000Z | projects/radish_paper/run_tpch_radish_sym_gbp.py | bmyerz/log2slqite | edb6bcba061132caa545b5e46c98b86547c68b48 | [
"MIT"
]
| null | null | null | from grappa import GrappaExperiment, MPIRunGrappaExperiment
tpch_bigdatann = MPIRunGrappaExperiment({
'trial': range(1, 3 + 1),
#'qn': [x for x in range(8, 20 + 1) if x!=7 and x!=9 and x!=8 and x!=10 and x!=11], # exclude 7 that runs forever
#'qn': [x for x in range(1, 20 + 1) if x!=7 and x!=10 and x!=11 and x!=20], # exclude 7 that runs forever
'qn': [x for x in range(1, 20 + 1) if x!=7], # exclude 7 that runs forever
'exe': lambda qn: "grappa_tpc_q{0}_sym_gbp.exe".format(qn),
'sf': 10,
'ppn': 16,
'nnode': 16,
'np': lambda ppn, nnode: ppn*nnode,
'query': lambda qn: 'q{0}'.format(qn),
'vtag': 'v99-noalign',
'machine': 'bigdata',
'system': 'radish-sym-gbp-noalign'
},
{
'shared_pool_memory_fraction': 0.5
})
tpch_sampa = GrappaExperiment({
'trial': range(1, 3 + 1),
#'qn': [x for x in range(8, 20 + 1) if x!=7 and x!=9 and x!=8 and x!=10 and x!=11], # exclude 7 that runs forever
'qn': [x for x in range(1, 20)], #if x!=7 and x!=10 and x!=11 and x!=20], # exclude 7 that runs forever
'exe': lambda qn: "grappa_tpc_q{0}_sym_gbp.exe".format(qn),
'sf': 10,
'ppn': 12,
'nnode': 16,
'np': lambda ppn, nnode: ppn*nnode,
'query': lambda qn: 'q{0}'.format(qn),
'vtag': 'align-fix',
'machine': 'sampa',
'system': 'radish-sym-gbp'
},
{
'shared_pool_memory_fraction': 0.5
})
#tpch_bigdatann.run()
tpch_sampa.run()
| 54.674419 | 146 | 0.35772 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 795 | 0.338154 |
4bdf2c801d395b3543ef88d753e14f32dd4a9b4a | 362 | py | Python | Activation Function/Softmax/softmax_cpp/test.py | kaka-lin/ML-Notes | 047b88d59346b2ec719b1b3e2fcd605e1ccfaf91 | [
"MIT"
]
| null | null | null | Activation Function/Softmax/softmax_cpp/test.py | kaka-lin/ML-Notes | 047b88d59346b2ec719b1b3e2fcd605e1ccfaf91 | [
"MIT"
]
| null | null | null | Activation Function/Softmax/softmax_cpp/test.py | kaka-lin/ML-Notes | 047b88d59346b2ec719b1b3e2fcd605e1ccfaf91 | [
"MIT"
]
| null | null | null | import numpy as np
from scipy.special import softmax
np.set_printoptions(precision=6)
def k_softmax(x):
exp = np.exp(x)
return exp / np.sum(exp, axis=1)
if __name__ == "__main__":
x = np.array([[1, 4.2, 0.6, 1.23, 4.3, 1.2, 2.5]])
print("Input Array: ", x)
print("Softmax Array: ", k_softmax(x))
print("Softmax Array: ", softmax(x))
| 21.294118 | 54 | 0.618785 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 59 | 0.162983 |
4be3297fddc6fb6fba4bd8355331638ba8b66d70 | 707 | py | Python | models/zeros.py | DawyD/illumination-preserving-rotations | 4fb69dc2526579a7677c27e75eae3a0b0000b5de | [
"MIT"
]
| null | null | null | models/zeros.py | DawyD/illumination-preserving-rotations | 4fb69dc2526579a7677c27e75eae3a0b0000b5de | [
"MIT"
]
| null | null | null | models/zeros.py | DawyD/illumination-preserving-rotations | 4fb69dc2526579a7677c27e75eae3a0b0000b5de | [
"MIT"
]
| null | null | null | import tensorflow as tf
from tensorflow.keras.layers import Conv2D, ReLU, SeparableConv2D, Input, SpatialDropout2D, MaxPool2D, Concatenate, Conv2DTranspose, BatchNormalization
from tensorflow.keras.regularizers import l1, l2
from models.net import Net
from layers.kerasGroupNorm import GroupNormalization
class Zeros(Net):
def __init__(self, output_shape):
super(Zeros).__init__()
self.output_shape = output_shape
def net(self):
model = tf.keras.Sequential()
model.add(Input(self.output_shape, name='output_shape'))
model.add(tf.keras.layers.Lambda(lambda x: tf.zeros_like(x)[..., 0:1]))
return model
def get_name(self):
return "Zeros"
| 35.35 | 151 | 0.721358 | 400 | 0.565771 | 0 | 0 | 0 | 0 | 0 | 0 | 21 | 0.029703 |
4be3c4c8872c7fe3765bcf529106a1cedf839f7c | 7,008 | py | Python | util/post_db.py | ReadMoa/web-service | f47c6cce471d97104074d403ab9ec39a08276213 | [
"MIT"
]
| null | null | null | util/post_db.py | ReadMoa/web-service | f47c6cce471d97104074d403ab9ec39a08276213 | [
"MIT"
]
| 21 | 2020-08-19T05:05:45.000Z | 2021-02-07T23:21:17.000Z | util/post_db.py | ReadMoa/web-service | f47c6cce471d97104074d403ab9ec39a08276213 | [
"MIT"
]
| 1 | 2020-09-05T03:40:45.000Z | 2020-09-05T03:40:45.000Z | """PostDB class definition.
PostDB encapsualte interactions (lookup, scan, insert) with the posts table.
Typical usage example:
from post import Post
from post_db import PostDB
post_db = PostDB(mode = "dev")
post = Post(
post_url = "https://www.example.com/",
title = "Test",
main_image_url = "https://www.example.com/foo.png",
description = "Bar")
post_db.insert(post)
"""
import logging
import sqlalchemy
from util.database import Database
from util.post import Post
# Max post index to return in scan().
MAX_POSTS_TO_START = 1000
logger = logging.getLogger()
class PostDB:
"""PostDB class to interact with the posts table.
PostDB provides lookup, scan, insert operations for posts.
Attributes:
...
"""
def __init__(self, mode="dev"):
self.db_instance = Database.get_instance().connection
self.mode = mode
def lookup(self, key):
"""Looks up a post from posts table with the input key.
Args:
key: A hash of a post URL.
Returns:
A Post instance with retrieved data from posts table or None.
"""
post = None
with self.db_instance.connect() as conn:
# Execute the query and fetch all results
returned_posts = conn.execute("""
SELECT post_url_hash, post_url, title, post_author, post_author_hash,
post_published_date, submission_time,
main_image_url, description, user_display_name,
user_email, user_photo_url, user_id, user_provider_id
FROM {mode}_posts_serving
where post_url_hash = '{key}'
""".format(mode=self.mode, key=key)
).fetchall()
if len(returned_posts) > 0:
row = returned_posts[0]
post = Post(
post_url=row[1], title=row[2], author=row[3],
author_hash=row[4], published_date=row[5],
submission_time=row[6], main_image_url=row[7],
description=row[8], user_display_name=row[9],
user_email=row[10], user_photo_url=row[11],
user_id=row[12], user_provider_id=row[13])
return post
def scan(self, author_key="", start_idx=0, count=10):
"""Scans posts table and resturns a list of Post instances.
Posts of [start_idx, start_idx + count) records will be returned.
Args:
author_key: return posts written by the 'author' if not empty.
start_idx: The start index of the scan.
count: # of posts to return
Returns:
A list of posts.
"""
# pylint: disable=fixme
# TODO: Can we change 'start' as an absolute position e.g. timestamp
# to make the result consistent even when there is a new item
# to posts_serving db.
posts = []
if start_idx < 0 or start_idx > MAX_POSTS_TO_START:
logger.warning("start_idx is out of range: %d", start_idx)
return posts # Empty list
if count < 0 or count > MAX_POSTS_TO_START:
logger.warning("count is out of range: %d", count)
return posts # Empty list
with self.db_instance.connect() as conn:
where_str = ""
if author_key:
where_str = "where post_author_hash = '" + author_key + "'"
sql_str = """
SELECT post_url_hash, post_url, title, post_author,
post_author_hash, post_published_date, submission_time,
main_image_url, description, user_display_name, user_email,
user_photo_url, user_id, user_provider_id
FROM {mode}_posts_serving
{where_clause}
ORDER BY submission_time DESC LIMIT {limit:d}
""".format(
mode=self.mode, where_clause=where_str,
limit=start_idx + count)
# Execute the query and fetch all results
recent_posts = conn.execute(sql_str).fetchall()
if len(recent_posts) > start_idx:
for row in recent_posts[start_idx:]:
posts.append(
Post(
post_url=row[1], title=row[2], author=row[3],
author_hash=row[4], published_date=row[5],
submission_time=row[6], main_image_url=row[7],
description=row[8], user_display_name=row[9],
user_email=row[10], user_photo_url=row[11],
user_id=row[12], user_provider_id=row[13]
)
)
return posts
def insert(self, post):
"""Insert a post record into posts table.
Args:
post: A Post instance.
"""
if not post.is_valid():
logger.error("Invalid post.")
return
stmt = sqlalchemy.text("""
INSERT INTO {mode}_posts_serving
(post_url_hash, post_url, post_author, post_author_hash,
post_published_date, submission_time, title, main_image_url,
description, user_id, user_display_name, user_email,
user_photo_url, user_provider_id)
VALUES
(:url_hash, :url, :author, :author_hash, :published_date,
:submission_time, :title, :main_image_url, :description,
:user_id, :user_display_name, :user_email, :user_photo_url,
:user_provider_id)
""".format(mode=self.mode)
)
logger.info(stmt)
try:
with self.db_instance.connect() as conn:
conn.execute(
stmt, url_hash=post.post_url_hash, url=post.post_url,
author=post.author, author_hash=post.author_hash,
published_date=post.published_date,
submission_time=post.submission_time,
title=post.title, main_image_url=post.main_image_url,
description=post.description, user_id=post.user_id,
user_display_name=post.user_display_name,
user_email=post.user_email,
user_photo_url=post.user_photo_url,
user_provider_id=post.user_provider_id)
except self.db_instance.Error as ex:
logger.exception(ex)
return
def delete(self, key):
"""Deletes a post from posts table with the input key.
Args:
key: A hash of a post URL.
"""
with self.db_instance.connect() as conn:
conn.execute("""
DELETE FROM {mode}_posts_serving
where post_url_hash = '{key}'
""".format(mode=self.mode, key=key)
)
| 36.884211 | 85 | 0.558219 | 6,398 | 0.912957 | 0 | 0 | 0 | 0 | 0 | 0 | 3,354 | 0.478596 |
4be4aa437d26726d4e8976afdb8dcefd45f45a42 | 9,491 | py | Python | plugins/leading_bot_mention.py | YukiSinonome/guided_bot | 3aff47c4192e9dae4ad4d95c1553a4752ce043cc | [
"MIT"
]
| null | null | null | plugins/leading_bot_mention.py | YukiSinonome/guided_bot | 3aff47c4192e9dae4ad4d95c1553a4752ce043cc | [
"MIT"
]
| null | null | null | plugins/leading_bot_mention.py | YukiSinonome/guided_bot | 3aff47c4192e9dae4ad4d95c1553a4752ce043cc | [
"MIT"
]
| null | null | null | # coding: utf-8
from slackbot.bot import respond_to
from slacker import Slacker
import slackbot_settings
# @respond_to("疲れた")
# @respond_to("つかれた")
# def cheer(message):
# message.reply("ファイト!")
import MeCab
import random
import ChatBotScript
import SentenceGenerator
import datetime
import webbrowser
import time
import sys
try:
import urllib.request as urllib2
except ImportError:
import urllib2
import json
import requests
from requests.exceptions import Timeout
import os
def count(f_count):
f_count += 1
# count_talk = 0
def weather(message, something, number):
try: citycode = sys.argv[1]
except: citycode = '130010' #東京
resp = urllib2.urlopen('http://weather.livedoor.com/forecast/webservice/json/v1?city=%s'%citycode).read().decode('utf-8')
# 読み込んだJSONデータをディクショナリ型に変換
resp = json.loads(resp)
# 明日の天気
if number == 1:
message.reply("私の住んでいるところ" + resp['title'][7:] + "は" + resp['forecasts'][1]['telop'] + "になると思います。")
# 今日の天気
else:
message.reply("私の住んでいるところ" + resp['title'][7:] + "は" + resp['forecasts'][0]['telop'] + "です。")
#現在時刻
def time_now(message, something):
todaydetail = datetime.datetime.today()
message.reply("現在時刻は" + str(todaydetail.hour) + ":" + str(todaydetail.minute) + "です。")
#挨拶
# def greeting():
# todaydetail = datetime.datetime.today()
# if 4 <= todaydetail.hour <= 10:
# message.reply(ChatBotScript.greeting[0] + symbol[random.randrange(2)])
# elif 11 <= todaydetail.hour <= 17:
# message.reply(ChatBotScript.greeting[1] + symbol[random.randrange(2)])
# else:
# message.reply(ChatBotScript.greeting[2])
# 天気の会話
def weather_talk():
count_weather = 0
count = 0
# 入力
@respond_to("(.*)")
def sentence(message, something):
global count_talk
sentence = SentenceGenerator.sentence_generator(something)
# \\\\\\\\\\
# message.reply("----------変換後: " + sentence + "--weather--")
# パターンマッチング
if ("天気" in sentence or "晴れ" in sentence or "曇り" in sentence or "雨" in sentence) and ("?" in sentence or "?" in sentence or "何" in sentence) and ("明日" not in sentence):
weather_talk.count_weather = 1
weather(message, something, 0)
elif ("天気" in sentence or "晴れ" in sentence or "曇り" in sentence or "雨" in sentence) and ("?" in sentence or "?" in sentence or "何" in sentence) and ("明日" in sentence):
weather_talk.count_weather = 1
weather(message, something, 1)
elif ("どこに" in sentence and "住んで" in sentence) or ("どこ住み" in sentence):
message.reply("どこかです。")
elif "リセット" in sentence:
count_talk = 0
main_talk()
elif "晴れ" in sentence and "?" not in sentence and "?" not in sentence:
message.reply(random.choice(ChatBotScript.sunny))
elif "曇" in sentence and "?" not in sentence and "?" not in sentence:
message.reply(random.choice(ChatBotScript.cloudy))
elif "雨" in sentence and "?" not in sentence and "?" not in sentence:
message.reply(random.choice(ChatBotScript.rainy))
elif ("風" in sentence and "強い" in sentence) or ("強風" in sentence):
message.reply("吹き飛ばされないように気をつけてくださいね")
elif "台風" in sentence:
message.reply(random.choice(ChatBotScript.typhoon))
elif "元気" in sentence:
message.reply(random.choice(ChatBotScript.physical_condition))
elif "本当" in sentence and ("?" in sentence or "?" in sentence):
message.reply(random.choice(ChatBotScript.response2))
elif "今何時" in sentence:
time_now(message, something)
elif "元気" in sentence or ("本当" in sentence and ("?" in sentence or "?" in sentence)) or "朝食" in sentence or "昼食" in sentence or "晩飯" in sentence or "夜食" in sentence or "食事" in sentence or "ご飯" in sentence or "ランチ" in sentence or "ディナー" in sentence or "かっこいい" in sentence or "かっこ良い" in sentence or "かわいい" in sentence or "高い" in sentence or "安い" in sentence or "難しい" in sentence or "簡単" in sentence or "面白" in sentence or "おもしろ" in sentence or "おいし" in sentence or "美味し" in sentence or (("体重" in sentence or "身長" in sentence or "スリーサイズ" in sentence) and ("?" in sentence or "?" in sentence)):
weather_talk.count = 1
main_talk()
else:
if weather_talk.count_weather == 1:
weather_talk.count_weather += 1
message.reply("今週の天気は安定しそうですか?")
elif weather_talk.count_weather == 3:
if "はい" in sentence or "よろ" in sentence or "お願い" in sentence or "調べて" in sentence:
message.reply("http://weather.yahoo.co.jp/weather/")
weather_talk.count = 1
main_talk()
else:
message.reply("わかりました。何か別の話をしませんか?")
weather_talk.count = 1
talk.count_talk = 2
main_talk()
else:
weather_talk.count_weather = 3
message.reply("天気を調べられるページのリンク載せましょうか?")
def food_talk():
global f_count
# 入力
@respond_to("(.*)")
def sentence(message, something):
global f_count
global count_talk
sentence = SentenceGenerator.sentence_generator(something)
# \\\\\\\\\\
# message.reply("----------変換後: " + sentence + "--food--")
if "ない" in sentence or "いや" in sentence:
message.reply("では、おすすめの食べ物ありますか?")
food_talk()
elif "リセット" in sentence:
count_talk = 0
main_talk()
elif "元気" in sentence or ("本当" in sentence and ("?" in sentence or "?" in sentence)) or "かっこいい" in sentence or "かっこ良い" in sentence or "かわいい" in sentence or "高い" in sentence or "安い" in sentence or "難しい" in sentence or "簡単" in sentence or "面白" in sentence or "おもしろ" in sentence or (("体重" in sentence or "身長" in sentence or "スリーサイズ" in sentence) and ("?" in sentence or "?" in sentence)):
main_talk()
else:
if f_count == 0:
message.reply("では、5つ質問をするので答えてください。答えていただいた条件から当てます。")
message.reply("晩御飯の種類は?(スープ系・どんぶり系・定食系・パン系など)")
f_count = 1
elif f_count == 1:
message.reply("晩御飯の味は?")
f_count = 2
elif f_count == 2:
message.reply("晩御飯の色は?")
f_count = 3
elif f_count == 3:
message.reply("晩御飯は温かいもの?冷たいもの?")
f_count = 4
elif f_count == 4:
message.reply("晩御飯の食感は?")
f_count = 5
elif f_count == 5:
message.reply("予測したメニューを送ります。正解ですか?")
f_count = 0
c_name = "guided_bot_test"
f_path = "food_result.pdf"
slacker = Slacker(slackbot_settings.API_TOKEN)
def upload():
try:
slacker.files.upload(f_path, channels=[c_name], title="晩御飯の予測結果")
except requests.exceptions.Timeout:
print("Timeout occurred")
upload()
upload()
main_talk()
def work_talk():
@respond_to("(.*)")
def sentence(message, something):
global count_talk
sentence = SentenceGenerator.sentence_generator(something)
if "いい" in sentence or "送って" in sentence or "確認" in sentence or "大丈夫" in sentence or "わか" in sentence:
message.reply("ありがとうございます。確認よろしくお願いします。")
c_name = "guided_bot_test"
f_path = "work.pdf"
slacker = Slacker(slackbot_settings.API_TOKEN)
def upload():
try:
slacker.files.upload(f_path, channels=[c_name], title="議事録")
except requests.exceptions.Timeout:
print("Timeout occurred")
upload()
upload()
main_talk()
elif "リセット" in sentence:
count_talk = 0
main_talk()
else:
message.reply("了解しました。別の機会にお願いします。")
main_talk()
def main_talk():
# 話題選択
@respond_to("(.*)")
def talk(message, something):
global count_talk
if count_talk == 0:
message.reply("何のお話をしましょうか?")
count_talk = 2
elif count_talk == 1:
message.reply("何の話ですか?")
else:
pass
@respond_to("(.*)")
def sentence(message, something):
global count_talk
sentence = SentenceGenerator.sentence_generator(something)
# \\\\\\\\\\
# message.reply("----------変換後: " + sentence + "--main--")
if "天気" in sentence:
message.reply("あなたの地域の今日の天気はどうですか?")
weather_talk()
count_talk = 1
elif "食" in sentence or "飯" in sentence:
message.reply("昨日の晩御飯が何か当てましょうか?")
food_talk()
count_talk = 1
elif "仕事" in sentence or "職場" in sentence:
message.reply("急な連絡ですみません。前回の会議の件で少し気になったことがあったので、今晩確認してもらいたいのですがよろしいでしょうか?よろしければ、気になった部分の資料をすぐに送りますので確認してください。")
work_talk()
count_talk = 1
#--------------
#-----メイン-----
#--------------
t_count = 0
f_count = 0
count_talk = 0
# count()
symbol = ["", "!", "?"]
main_talk()
| 38.425101 | 599 | 0.565272 | 0 | 0 | 0 | 0 | 8,832 | 0.800072 | 0 | 0 | 3,645 | 0.330193 |
4be54c7f61feb9501683fa638bd0374bbe09f529 | 13,922 | py | Python | Lib/pagebot/elements/conditions.py | bghryct/PageBot | 394150c0fd399f02faec28f4576046882f4d7d39 | [
"MIT"
]
| 68 | 2018-10-22T22:42:58.000Z | 2022-03-19T11:07:31.000Z | Lib/pagebot/elements/conditions.py | TypeNetwork/PageBot | 394150c0fd399f02faec28f4576046882f4d7d39 | [
"MIT"
]
| 97 | 2017-07-10T23:49:30.000Z | 2018-10-03T08:17:55.000Z | Lib/pagebot/elements/conditions.py | TypeNetwork/PageBot | 394150c0fd399f02faec28f4576046882f4d7d39 | [
"MIT"
]
| 9 | 2017-07-11T09:59:00.000Z | 2018-09-12T11:59:30.000Z | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# -----------------------------------------------------------------------------
#
# P A G E B O T
#
# Copyright (c) 2016+ Buro Petr van Blokland + Claudia Mens
# www.pagebot.io
# Licensed under MIT conditions
#
# Supporting DrawBot, www.drawbot.com
# Supporting Flat, xxyxyz.org/flat
# -----------------------------------------------------------------------------
#
# conditions.py
#
class Conditions:
# C O N D I T I O N S
def isBottomOnBottom(self, tolerance=0):
return abs(self.parent.pb - self.mBottom) <= tolerance
def isBottomOnSideBottom(self, tolerance=0):
return abs(self.mBottom) <= tolerance
def isBottomOnBleedBottom(self, tolerance=0):
return abs(self.mBottom - self.bleedBottom) <= tolerance
def isBottomOnTop(self, tolerance=0):
return abs(self.parent.h - self.parent.pt - self.mBottom) <= tolerance
def isCenterOnCenter(self, tolerance=0):
pl = self.parent.pl # Get parent padding left
center = (self.parent.w - self.parent.pr - pl)/2
return abs(pl + center - self.center) <= tolerance
def isCenterOnCenterSides(self, tolerance=0):
return abs(self.parent.w/2 - self.center) <= tolerance
def isCenterOnLeft(self, tolerance=0):
return abs(self.parent.pl - self.center) <= tolerance
def isCenterOnRight(self, tolerance=0):
return abs(self.parent.w - self.parent.pr - self.center) <= tolerance
def isCenterOnSideRight(self, tolerance=0):
return abs(self.parent.w - self.center) <= tolerance
def isMiddleOnBottom(self, tolerance=0):
return abs(self.parent.pb - self.middle) <= tolerance
def isMiddleOnSideBottom(self, tolerance=0):
return abs(self.middle) <= tolerance
def isMiddleOnTop(self, tolerance=0):
return abs(self.parent.h - self.parent.pt - self.middle) <= tolerance
def isMiddleOnSideTop(self, tolerance=0):
return abs(self.parent.h - self.middle) <= tolerance
def isMiddleOnMiddle(self, tolerance=0):
pt = self.parent.pt # Get parent padding top
pb = self.parent.pb
middle = (self.parent.h - pt - pb)/2
return abs(pb + middle - self.middle) <= tolerance
def isMiddleOnMiddleSides(self, tolerance=0):
return abs(self.parent.h - self.middle) <= tolerance
def isLeftOnCenter(self, tolerance=0):
pl = self.parent.pl # Get parent padding left
center = (self.parent.w - self.parent.pr - pl)/2
return abs(pl + center - self.mLeft) <= tolerance
def isLeftOnCenterSides(self, tolerance=0):
return abs(self.parent.w/2 - self.mLeft) <= tolerance
def isLeftOnLeft(self, tolerance=0):
return abs(self.parent.pl - self.mLeft) <= tolerance
def isLeftOnSideLeft(self, tolerance=0):
return abs(self.mLeft) <= tolerance
def isLeftOnBleedLeft(self, tolerance=0):
return abs(self.mLeft + self.bleedLeft) <= tolerance
def isLeftOnRight(self, tolerance=0):
return abs(self.parent.w - self.parent.pr - self.mLeft) <= tolerance
def isLeftOnSideRight(self, tolerance=0):
return abs(self.parent.w - self.mLeft) <= tolerance
def isCenterOnSideLeft(self, tolerance=0):
return abs(self.parent.mLeft - self.center) <= tolerance
def isTopOnMiddle(self, tolerance=0):
pt = self.parent.pt # Get parent padding top
pb = self.parent.pb
middle = (self.parent.h - pb - pt)/2
return abs(pb + middle - self.mTop) <= tolerance
def isTopOnMiddleSides(self, tolerance=0):
return abs(self.parent.h/2 - self.mTop) <= tolerance
def isOriginOnBottom(self, tolerance=0):
pb = self.parent.pb # Get parent padding left
return abs(pb - self.y) <= tolerance
def isOriginOnSideBottom(self, tolerance=0):
return abs(self.y) <= tolerance
def isOriginOnCenter(self, tolerance=0):
pl = self.parent.pl # Get parent padding left
center = (self.parent.w - self.parent.pr - pl)/2
return abs(pl + center - self.x) <= tolerance
def isOriginOnCenterSides(self, tolerance=0):
return abs(self.parent.w/2 - self.x) <= tolerance
def isOriginOnLeft(self, tolerance=0):
return abs(self.parent.pl - self.x) <= tolerance
def isOriginOnSideLeft(self, tolerance=0):
return abs(self.x) <= tolerance
def isOriginOnRight(self, tolerance=0):
return abs(self.parent.w - self.parent.pr - self.x) <= tolerance
def isOriginOnSideRight(self, tolerance=0):
return abs(self.parent.w - self.x) <= tolerance
def isOriginOnTop(self, tolerance=0):
return abs(self.parent.h - self.parent.pt - self.y) <= tolerance
def isOriginOnSideTop(self, tolerance=0):
"""Answers if the origin of self is on the top side of self.parent.
>>> from pagebot.elements.element import Element
>>> e1 = Element(w=200, h=400)
>>> e2 = Element(w=50, h=50, parent=e1)
>>> #FIX e1.isOriginOnSideTop()
False
>>> #FIX e2.isOriginOnSideTop()
False
>>> e2.y = e1.top
>>> #FIX e2.isOriginOnSideTop(), e2.y, e1.top
(True, 500pt, 500pt)
"""
if self.parent is None:
return False
return abs(self.parent.top - self.y) <= tolerance
def isOriginOnMiddle(self, tolerance=0):
"""Answers if the origin of self is on the top side of self.parent.
>>> from pagebot.elements.element import Element
>>> e1 = Element(w=200, h=400)
>>> e2 = Element(w=50, h=50, parent=e1)
>>> e1.isOriginOnMiddle()
False
>>> #FIX e2.isOriginOnMiddle()
False
>>> e2.y = e1.middle
>>> #FIX e2.isOriginOnMiddle(), e2.y, e1.middle
(True, 500pt, 500pt)
"""
if self.parent is None:
return False
return abs(self.parent.middle - self.y) <= tolerance
def isOriginOnMiddleSides(self, tolerance=0):
return abs(self.parent.h/2 - self.y) <= tolerance
def isRightOnCenter(self, tolerance=0):
"""Answers if the right size of `self` is on the middle of the parent.
>>> from pagebot.elements.element import Element
>>> e1 = Element(x=100, w=200) # e1.right == 300
>>> e2 = Element(w=600, elements=[e1])
"""
return abs(self.parent.pl + self.parent.pw/2 - self.mRight) <= tolerance
def isRightOnCenterSides(self, tolerance=0):
return abs(self.parent.w/2 - self.mRight) <= tolerance
def isRightOnLeft(self, tolerance=0):
return abs(self.parent.pl - self.mRight) <= tolerance
def isRightOnRight(self, tolerance=0):
return abs(self.parent.w - self.parent.pr - self.mRight) <= tolerance
def isRightOnSideRight(self, tolerance=0):
return abs(self.parent.w - self.mRight) <= tolerance
def isRightOnBleedRight(self, tolerance=0):
return abs(self.parent.w + self.bleedLeft) <= tolerance
def isBottomOnMiddle(self, tolerance=0):
pt = self.parent.pt # Get parent padding top
pb = self.parent.pb
middle = (self.parent.h - pb - pt)/2
return abs(pb + middle - self.mBottom) <= tolerance
def isBottomOnMiddleSides(self, tolerance=0):
return abs(self.parent.h/2 - self.mBottom) <= tolerance
def isTopOnBottom(self, tolerance=0):
return abs(self.parent.pb - self.mTop) <= tolerance
def isTopOnTop(self, tolerance=0):
return abs(self.parent.h - self.parent.pt - self.mTop) <= tolerance
def isTopOnSideTop(self, tolerance=0):
return abs(self.parent.h - self.mTop) <= tolerance
def isTopOnBleedTop(self, tolerance=0):
return abs(self.parent.h - self.mTop + self.bleedTop) <= tolerance
# Shrink block conditions
def isSchrunkOnBlockLeft(self, tolerance):
boxX, _, _, _ = self.marginBox
return abs(self.mLeft + self.pl - boxX) <= tolerance
def isShrunkOnBlockRight(self, tolerance):
boxX, _, boxW, _ = self.marginBox
return abs(self.mRight - self.pr - (boxX + boxW)) <= tolerance
def isShrunkOnBlockTop(self, tolerance):
_, boxY, _, boxH = self.marginBox
return self.mTop - self.pt - (boxY + boxH) <= tolerance
def isShrunkOnBlockBottom(self, tolerance):
"""Test if the bottom of self is shrunk to the bottom position of the
block."""
_, boxY, _, boxH = self.marginBox
return abs(self.pb - boxY) <= tolerance
def isShrunkOnBlockSideLeft(self, tolerance):
boxX, _, _, _ = self.box
return abs(self.mLeft - boxX) <= tolerance
def isShrunkOnBlockSideRight(self, tolerance):
boxX, _, boxW, _ = self.mbox
return abs(self.mRight - (boxX + boxW)) <= tolerance
def isShrunkOnBlockSideTop(self, tolerance):
_, boxY, _, boxH = self.box
return self.mTop - (boxY + boxH) <= tolerance
def isShrunkOnBlockSideBottom(self, tolerance):
_, boxY, _, boxH = self.marginBox
return abs(self.mBottom - boxY) <= tolerance
# Unimplemented here for text operations.
def isShrunkOnTextHeight(self, tolerance=0):
"""For non-text elements, this is always True to satisfy the calling
condition."""
return True
def shrink2TextHeight(self, tolerance=0):
"""For non-text elements, this is always True to satisfy the calling
condition."""
return True
def isShrunkOnTextWidth(self, tolerance=0):
"""For non-text elements, this is always True to satisfy the calling
condition."""
return True
def shrink2TextWidth(self, tolerance=0):
"""For non-text elements, this is always True to satisfy the calling
condition."""
return True
# Float conditions to page padding.
def isFloatOnTop(self, tolerance=0):
answer = abs(min(self.getFloatSideTop(), self.parent.h - self.parent.pt) - self.mTop) <= tolerance
return answer
def isFloatOnBottom(self, tolerance=0):
return abs(max(self.getFloatSideBottom(), self.parent.pb) - self.mBottom) <= tolerance
def isFloatOnLeft(self, tolerance=0):
answer = abs(max(self.getFloatSideLeft(), self.parent.pl) - self.mLeft) <= tolerance
return answer
def isFloatOnRight(self, tolerance=0):
return abs(min(self.getFloatSideRight(), self.parent.w - self.parent.pr) - self.mRight) <= tolerance
# Float conditions to page sides
def isFloatOnSideTop(self, tolerance=0):
return abs(self.getFloatSideTop() - self.mTop) <= tolerance
def isFloatOnSideBottom(self, tolerance=0):
return abs(self.getFloatSideBottom() - self.mBottom) <= tolerance
def isFloatOnSideLeft(self, tolerance=0):
return abs(self.getFloatSideLeft() - self.mLeft) <= tolerance
def isFloatOnSideRight(self, tolerance=0):
return abs(self.getFloatSideRight() - self.mRight) <= tolerance
# Column/Row conditions
def isLeftOnCol(self, col, tolerance):
"""Move top of the element to col index position."""
gridColumns = self.getGridColumns()
if col in range(len(gridColumns)):
return abs(self.mLeft - gridColumns[col][0]) <= tolerance
return False # row is not in range of gridColumns
def isRightOnCol(self, col, tolerance):
"""Move top of the element to col index position."""
gridColumns = self.getGridColumns()
if col in range(len(gridColumns)):
return abs(self.mRight - gridColumns[col][0] - self.gw) <= tolerance
return False # row is not in range of gridColumns
def isFitOnColSpan(self, col, colSpan, tolerance):
"""Answers if the self.w is the same as the total of column widths
between col and col+colSpan.
>>> from pagebot.toolbox.units import pt
>>> from pagebot.elements.element import Element
>>> gridX = (pt(100, 10), pt(200, 20), pt(300, 30), pt(400, 40), pt(500, 50))
>>> e1 = Element(padding=30, w=600, gridX=gridX)
>>> e1.getGridColumns()
[(0, 100pt), (110pt, 200pt), (330pt, 300pt), (660pt, 400pt), (1100pt, 500pt)]
>>> e2 = Element(w=100, parent=e1)
>>> e1.getGridColumns()
[(0, 100pt), (110pt, 200pt), (330pt, 300pt), (660pt, 400pt), (1100pt, 500pt)]
>>> e2.isFitOnColSpan(0, 1, 0)
True
>>> e2.w = 310
>>> e2.isFitOnColSpan(0, 2, 0)
True
>>> e2.w = 950
>>> e2.isFitOnColSpan(1, 3, 0)
True
"""
gridColumns = self.getGridColumns()
if col >= 0 and col+colSpan <= len(gridColumns):
c1 = gridColumns[col]
c2 = gridColumns[col + colSpan - 1]
return abs(self.w - (c2[0] - c1[0] + c2[1])) <= tolerance
return False
def isTopOnRow(self, row, tolerance):
"""Move top of the element to row."""
gridRows = self.getGridRows()
if row in range(len(gridRows)):
return abs(self.mTop - gridRows[row][0]) <= tolerance
# row is not in range of gridColumns.
return False
def isBottomOnRow(self, row, tolerance):
"""Move top of the element to row."""
gridRows = self.getGridRows()
if row in range(len(gridRows)):
return abs(self.mBottom - gridRows[row][0]) <= tolerance
# row is not in range of gridColumns.
return False
def isFitOnRowSpan(self, row, rowSpan, tolerance):
gridRows = self.getGridRows()
if row >= 0 and row+rowSpan < len(gridRows):
r1 = gridRows[row]
r2 = gridRows[row + rowSpan - 1]
return abs(self.h - (r2[0] - r1[0] + r2[1])) <= tolerance
return False
if __name__ == '__main__':
import doctest
import sys
sys.exit(doctest.testmod()[0])
| 35.789203 | 108 | 0.617943 | 13,364 | 0.95992 | 0 | 0 | 0 | 0 | 0 | 0 | 3,548 | 0.254848 |
4be5a05c40ee31ef9f187f13c41d25d878a65ca6 | 7,099 | py | Python | Pix2Pix/Streamlit_Pix2Pix_Main.py | NB094/LHL_Final_Project | 5df15d7bbf33d51840ea274629591cd938f58fce | [
"Apache-2.0"
]
| 2 | 2021-10-04T05:53:29.000Z | 2022-01-21T12:53:43.000Z | Pix2Pix/Streamlit_Pix2Pix_Main.py | NB094/LHL_Final_Project | 5df15d7bbf33d51840ea274629591cd938f58fce | [
"Apache-2.0"
]
| null | null | null | Pix2Pix/Streamlit_Pix2Pix_Main.py | NB094/LHL_Final_Project | 5df15d7bbf33d51840ea274629591cd938f58fce | [
"Apache-2.0"
]
| 1 | 2021-10-04T05:53:32.000Z | 2021-10-04T05:53:32.000Z | from PIL import Image
import streamlit as st
from streamlit_drawable_canvas import st_canvas
from Streamlit_Pix2Pix_Generator import Generator
import numpy as np
import urllib.request
from keras.preprocessing.image import load_img
from keras.models import load_model
import requests
# Page intro
st.title('Pix2Pix – See Your Sketches Brought to Life!')
st.text('')
st.markdown('Sketch out an object using the canvas below, and let your computer do the rest of the heavy lifting.')
st.text('')
st.text('')
# Links and FAQ section
st.sidebar.markdown("### [SRGANs Web Page](https://share.streamlit.io/nb094/easy-gans/main/SRGAN/Streamlit_SRGAN_Main.py)")
st.sidebar.markdown("### [NumGen Web Page](https://share.streamlit.io/nb094/easy-gans/main/NumGen/Streamlit_NumGen_Main.py)")
st.sidebar.text('')
expander = st.sidebar.expander("Pix2Pix Frequently-Asked Questions", expanded=True)
expander.write("**What type of machine learning is being used?** \n\n \
The model's architecture is based on solving image-to-image translation with a Conditional Generative Adversarial Network, or cGAN. \n\n   \n\n \
**How do GANs work?** \n\n \
There are two main components to GAN models: a *discriminator* and a *generator*. \n\n \
The purpose of the discriminator is to classify images presented to it as real or fake. \
The purpose of the generator is to create plausible images to fool the discriminator. \n\n \
After many cycles of training, the skill of the generator improves enough to produce some impressive results! \n\n   \n\n \
**What is the difference between a GAN and a cGAN?** \n\n \
The basic idea behind cGANs is the same. The primary difference is way the model improves after each cycle, which is based on \
a *loss* calculation. For cGANs, this calculation optimizes the structure or joint configuration of the output. \n\n   \n\n \
**What are the possible applications of cGANs?** \n\n \
cGANs have been used in self-driving cars, creating maps from satellite images, colorizing black and white photos, and much more. \n\n   \n\n \
**Where can I read more about cGANs?** \n\n \
For more information on cGANs, check out [this paper.](https://arxiv.org/abs/1611.07004) \n\n   \n\n \
**Who developed this web page?** \n\n \
This web page and the underlying models were developed by Niklas Bergen with the help of some additional resources. \
Check out the [GitHub repo](https://github.com/NB094/Easy-GANs) for more information.")
##### CODE FOR Pix2Pix #####
# Define page layout
left_column, right_column = st.columns([2,1])
# Create selection box and logic for various sketch subjects.
subject_selection = left_column.selectbox(label = 'Select what you wish to draw...', options = ['Human', 'Shoe', 'Handbag'], index = 0)
if subject_selection == 'Human':
stroke_color = '#F44F36'
background_color='#000000'
else:
stroke_color = '#F44F36'
background_color='#FFFFFF'
# Initialize a random number in the session state. Used to randomize examples shown.
if 'random_num' not in st.session_state:
st.session_state.random_num = 1
# Change the random example number whenever the radio buttons are changed.
def random_num():
st.session_state.random_num = np.random.randint(1,5+1)
return
# Retrieve a randomly-selected example image
urllib.request.urlretrieve(f'https://github.com/NB094/Easy-GANs/raw/main/Pix2Pix/example_images_streamlit/example_{str.lower(subject_selection)}{st.session_state.random_num}.jpg?raw=true', \
'example_img.jpg')
# Create more options menus
canvas_mode = st.radio(label = 'Select canvas mode...', options = ('Draw on a blank canvas', 'View an example sketch', 'Try tracing an example sketch'), \
index = 1, help='Example sketches are chosen randomly out of 5 options.', on_change=random_num)
drawing_mode = right_column.selectbox(label = "Drawing tool:", options = ("freedraw", "line", "rect", "circle", "polygon", "transform"), index = 0)
# Create the drawing canvas
if canvas_mode == 'View an example sketch':
st.image('example_img.jpg')
else:
canvas_result = st_canvas(
fill_color="rgba(255, 255, 255, 0.0)", # Fill colors from shape objects have full transparency
stroke_width=1,
stroke_color=stroke_color,
background_color=background_color,
background_image=Image.open('example_img.jpg') if canvas_mode == 'Try tracing an example sketch' else None,
height=256,
width=256,
drawing_mode=drawing_mode,
key="canvas")
##### SKETCH PROCESSING #####
if canvas_mode == 'View an example sketch':
drawn_image = load_img('example_img.jpg')
else:
# Store canvas sketch data into a variable
drawn_image = canvas_result.image_data
# Insert try/except loop to prevent website from temporarily throwing error when unchecking the box.
try:
# Convert sketch data into parseable numpy array
drawn_image = np.array(Image.fromarray((drawn_image * 255).astype(np.uint8)).resize((256, 256)).convert('RGB'))
drawn_image = (drawn_image * 255).astype(np.uint8)
# If needed, convert black background to white before passing image to generator.
if subject_selection != 'Human':
drawn_image[drawn_image == 0] = 255
except:
pass
# Download load model files. Cache due to large file sizes
@st.cache(suppress_st_warning=True, allow_output_mutation=True)
def cache_all_models():
st.text('Downloading models...')
r = requests.get('https://onedrive.live.com/download?cid=200A679661E47E0E&resid=200A679661E47E0E%211074&authkey=AKxNvSc7K-dVn9k')
with open('humans_fully_trained.h5', 'wb') as f:
f.write(r.content)
r = requests.get('https://onedrive.live.com/download?cid=200A679661E47E0E&resid=200A679661E47E0E%211076&authkey=AOXgLqS3bQIuwbU')
with open('shoes_fully_trained.h5', 'wb') as f:
f.write(r.content)
r = requests.get('https://onedrive.live.com/download?cid=200A679661E47E0E&resid=200A679661E47E0E%211075&authkey=AAtjUZTrsNbE2zk')
with open('handbags_fully_trained.h5', 'wb') as f:
f.write(r.content)
humans_model = load_model('humans_fully_trained.h5', compile=False)
shoes_model = load_model('shoes_fully_trained.h5', compile=False)
handbags_model = load_model('handbags_fully_trained.h5', compile=False)
st.text('Download complete')
return humans_model, shoes_model, handbags_model
humans_model, shoes_model, handbags_model = cache_all_models()
if subject_selection=='Human':
model = humans_model
elif subject_selection=='Shoe':
model = shoes_model
elif subject_selection=='Handbag':
model = handbags_model
# Insert try/except loop to prevent website from temporarily throwing error when unchecking the box.
try:
# Pass numpy array into generator, and predict
gen = Generator(drawn_image, subject_selection)
gen_image = gen.generate_image(model)
# Display prediction
st.image(gen_image)
except:
pass | 41.273256 | 190 | 0.720947 | 0 | 0 | 0 | 0 | 1,082 | 0.152373 | 0 | 0 | 4,215 | 0.593578 |
4be8b0689a8d30b24d0eb351d73f642c1be6c5a9 | 4,584 | py | Python | rbs/rbs.py | dexbiobot/SML-Cogs | e8d3d12e5bf1d760196006f86a6c16ed95e3c964 | [
"MIT"
]
| 17 | 2017-05-30T13:21:18.000Z | 2022-03-27T13:08:17.000Z | rbs/rbs.py | dexbiobot/SML-Cogs | e8d3d12e5bf1d760196006f86a6c16ed95e3c964 | [
"MIT"
]
| 16 | 2017-06-11T12:55:06.000Z | 2019-02-20T21:00:59.000Z | rbs/rbs.py | dexbiobot/SML-Cogs | e8d3d12e5bf1d760196006f86a6c16ed95e3c964 | [
"MIT"
]
| 17 | 2017-05-03T16:09:46.000Z | 2020-05-13T21:19:37.000Z | """
The MIT License (MIT)
Copyright (c) 2017 SML
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import os
from __main__ import send_cmd_help
from cogs.utils import checks
from cogs.utils.dataIO import dataIO
from discord.ext import commands
import discord
LOOP_INTERVAL = 60
SERVER_DEFAULTS = {
'autorole': {
"role_name": "Guest",
"role_id": None,
"timer": 86400
}
}
PATH = os.path.join('data', 'rbs')
JSON = os.path.join(PATH, 'settings.json')
class RBS:
"""Reddit Band System (RBS) general utility cog.
Functionality:
# Autorole
Automatically convert users with no role-assignements to Guest
"""
def __init__(self, bot):
"""Init."""
self.bot = bot
self.settings = dataIO.load_json(JSON)
self.task = bot.loop.create_task(self.loop_task())
async def loop_task(self):
"""Loop tasks.
- auto-role guests.
"""
await self.bot.wait_until_ready()
if self is self.bot.get_cog('RBS'):
self.task = self.bot.loop.create_task(self.loop_task())
@checks.mod_or_permissions()
@commands.group(pass_context=True, no_pm=True)
async def setrbs(self, ctx):
"""Set RBS settings."""
if ctx.invoked_subcommand is None:
await send_cmd_help(ctx)
@checks.serverowner_or_permissions(manage_server=True)
@setrbs.command(name="initserver", pass_context=True, no_pm=True)
async def setrbs_initserver(self, ctx):
"""Initialize server settings to default values.
Requires confirmation as this is a destructive process.
"""
await self.bot.say(
'This is a destructive operation. '
'Are you sure that you want to continue? '
'Type **I agree** to execute.')
answer = await self.bot.wait_for_message(
timeout=30,
author=ctx.message.author)
if answer == 'I agree':
self.settings = SERVER_DEFAULTS
dataIO.save_json(JSON, self.settings)
await self.bot.say(
'Settings set to server defaults.')
else:
await self.bot.say(
'Operation aborted.')
@setrbs.command(name="autorolename", pass_context=True, no_pm=True)
async def setrbs_autorolename(self, ctx, role_name):
"""Set auto-role’s role name.
This is the role name automatically assigned to
users when they have been on the server for x amount of time.
The exact amount of time to use is also settable.
"""
if 'autorole' not in self.settings:
self.settings = SERVER_DEFAULTS
dataIO.save_json(JSON, self.settings)
server = ctx.message.server
role = discord.utils.get(server.roles, name=role_name)
if role is None:
await self.bot.say(
'{} is not a valid role on this server.'.format(
role_name))
return
self.settings['autorole']['role_name'] = role.name
self.settings['autorole']['role_id'] = role.id
await self.bot.say(
'Auto-role’s role set to {}'.format(
role.name))
dataIO.save_json(JSON, self.settings)
def check_folder():
"""Check folder."""
if not os.path.exists(PATH):
os.makedirs(PATH)
def check_file():
"""Check files."""
if not dataIO.is_valid_json(JSON):
dataIO.save_json(JSON, SERVER_DEFAULTS)
def setup(bot):
"""Setup bot."""
check_folder()
check_file()
n = RBS(bot)
bot.add_cog(n)
| 29.960784 | 75 | 0.648778 | 2,770 | 0.603749 | 0 | 0 | 2,149 | 0.468396 | 2,108 | 0.459459 | 2,122 | 0.462511 |
4beabadec3de979135423c3abb7be1e6a84c41ad | 2,845 | py | Python | tests/nutsflow/test_iterfunction.py | maet3608/nuts-flow | 0d7b8eefc80cb45c079b155ff5062d1d93ff2caf | [
"Apache-2.0"
]
| 21 | 2017-05-01T10:15:41.000Z | 2022-01-25T07:02:44.000Z | tests/nutsflow/test_iterfunction.py | maet3608/nuts-flow | 0d7b8eefc80cb45c079b155ff5062d1d93ff2caf | [
"Apache-2.0"
]
| 7 | 2017-02-09T03:36:37.000Z | 2017-08-22T11:23:03.000Z | tests/nutsflow/test_iterfunction.py | maet3608/nuts-flow | 0d7b8eefc80cb45c079b155ff5062d1d93ff2caf | [
"Apache-2.0"
]
| 5 | 2017-05-30T01:56:31.000Z | 2020-10-05T08:21:43.000Z | """
.. module:: test_iterfunction
:synopsis: Unit tests for iterfunction module
"""
import time
import nutsflow.iterfunction as itf
from six.moves import range
def test_length():
assert itf.length(range(10)) == 10
assert itf.length([]) == 0
def test_interleave():
it1 = [1, 2]
it2 = 'abc'
it = itf.interleave(it1, it2)
assert list(it) == [1, 'a', 2, 'b', 'c']
assert list(itf.interleave([], [])) == []
assert list(itf.interleave('12', [])) == ['1', '2']
def test_take():
it = itf.take(range(10), 3)
assert list(it) == [0, 1, 2]
it = itf.take(range(10), 0)
assert list(it) == []
it = itf.take(range(0), 3)
assert list(it) == []
def test_nth():
assert itf.nth(range(10), 2) == 2
assert itf.nth(range(10), 100) is None
assert itf.nth(range(10), 100, -1) == -1
def test_unique():
assert list(itf.unique([1, 2, 3])) == [1, 2, 3]
assert list(itf.unique([2, 3, 1, 1, 2, 4])) == [2, 3, 1, 4]
assert list(itf.unique([])) == []
data = [(1, 'a'), (2, 'a'), (3, 'b')]
it = itf.unique(data, key=lambda t: t[1])
assert list(it) == [(1, 'a'), (3, 'b')]
def test_chunked():
it = itf.chunked(range(5), 2)
assert list(map(tuple, it)) == [(0, 1), (2, 3), (4,)]
it = itf.chunked(range(6), 3)
assert list(map(tuple, it)) == [(0, 1, 2), (3, 4, 5)]
assert list(itf.chunked([], 2)) == []
def test_consume():
it = iter(range(10))
itf.consume(it)
assert next(it, None) is None
it = iter(range(10))
itf.consume(it, 5)
assert next(it, None) == 5
def test_flatten():
assert list(itf.flatten([])) == []
iterable = [(1, 2), (3, 4, 5)]
assert list(itf.flatten(iterable)) == [1, 2, 3, 4, 5]
def test_flatmap():
f = lambda n: str(n) * n
it = itf.flatmap(f, [1, 2, 3])
assert list(it) == ['1', '2', '2', '3', '3', '3']
it = itf.flatmap(f, [])
assert list(it) == []
def test_partition():
pred = lambda x: x < 6
smaller, larger = itf.partition(range(10), pred)
assert list(smaller) == [0, 1, 2, 3, 4, 5]
assert list(larger) == [6, 7, 8, 9]
def test_prefetch_iterator_speed():
def sleep():
time.sleep(0.01)
def number_generator():
for i in range(10):
sleep()
yield i
start = time.time()
for _ in number_generator():
sleep()
duration1 = time.time() - start
start = time.time()
for _ in itf.PrefetchIterator(number_generator()):
sleep()
duration2 = time.time() - start
assert duration2 < duration1
def test_prefetch_iterator_thread_safe():
from multiprocessing.pool import ThreadPool
data = set(range(100))
prefetch_it = itf.PrefetchIterator(data)
pool = ThreadPool()
result = set(pool.map(lambda x: 2 * x - x, prefetch_it))
assert result == data
| 23.319672 | 63 | 0.555712 | 0 | 0 | 450 | 0.158172 | 0 | 0 | 0 | 0 | 143 | 0.050264 |
4beb4afba8d4e82f6ec0587a4a66ce29bdfa1be9 | 6,591 | py | Python | microcosm_flask/tests/conventions/test_upload.py | Sinon/microcosm-flask | c1404ebc94459c8156b04f5e04490a330117524c | [
"Apache-2.0"
]
| 11 | 2017-01-30T21:53:20.000Z | 2020-05-29T22:39:19.000Z | microcosm_flask/tests/conventions/test_upload.py | Sinon/microcosm-flask | c1404ebc94459c8156b04f5e04490a330117524c | [
"Apache-2.0"
]
| 139 | 2016-03-09T19:09:59.000Z | 2021-09-03T17:14:00.000Z | microcosm_flask/tests/conventions/test_upload.py | Sinon/microcosm-flask | c1404ebc94459c8156b04f5e04490a330117524c | [
"Apache-2.0"
]
| 10 | 2016-12-19T22:39:42.000Z | 2021-03-09T19:23:15.000Z | """
Alias convention tests.
"""
from io import BytesIO
from json import loads
from uuid import uuid4
from hamcrest import (
all_of,
anything,
assert_that,
contains,
equal_to,
has_entries,
has_entry,
has_item,
has_key,
is_,
is_not,
)
from marshmallow import Schema, fields
from microcosm.api import create_object_graph
from microcosm_flask.conventions.base import EndpointDefinition
from microcosm_flask.conventions.swagger import configure_swagger
from microcosm_flask.conventions.upload import configure_upload
from microcosm_flask.namespaces import Namespace
from microcosm_flask.operations import Operation
from microcosm_flask.swagger.definitions import build_path
from microcosm_flask.tests.conventions.fixtures import Person
class FileExtraSchema(Schema):
extra = fields.String(missing="something")
class FileResponseSchema(Schema):
id = fields.UUID(required=True)
class FileController:
def __init__(self):
self.calls = []
def upload(self, files, extra):
self.calls.append(
dict(
files=files,
extra=extra,
),
)
def upload_for_person(self, files, extra, person_id):
self.calls.append(
dict(
extra=extra,
files=files,
person_id=person_id,
),
)
return dict(
id=person_id,
)
class TestUpload:
def setup(self):
self.graph = create_object_graph(name="example", testing=True)
self.ns = Namespace(subject="file")
self.relation_ns = Namespace(subject=Person, object_="file")
self.controller = FileController()
UPLOAD_MAPPINGS = {
Operation.Upload: EndpointDefinition(
func=self.controller.upload,
request_schema=FileExtraSchema(),
),
}
UPLOAD_FOR_MAPPINGS = {
Operation.UploadFor: EndpointDefinition(
func=self.controller.upload_for_person,
request_schema=FileExtraSchema(),
response_schema=FileResponseSchema(),
),
}
configure_upload(self.graph, self.ns, UPLOAD_MAPPINGS)
configure_upload(self.graph, self.relation_ns, UPLOAD_FOR_MAPPINGS)
configure_swagger(self.graph)
self.client = self.graph.flask.test_client()
def test_upload_url_for(self):
with self.graph.app.test_request_context():
url = self.ns.url_for(Operation.Upload)
assert_that(url, is_(equal_to("http://localhost/api/file")))
def test_upload_for_url_for(self):
with self.graph.app.test_request_context():
url = self.relation_ns.url_for(Operation.UploadFor, person_id=1)
assert_that(url, is_(equal_to("http://localhost/api/person/1/file")))
def test_upload_swagger_path(self):
with self.graph.app.test_request_context():
path = build_path(Operation.Upload, self.ns)
assert_that(path, is_(equal_to("/api/file")))
def test_upload_for_swagger_path(self):
with self.graph.app.test_request_context():
path = build_path(Operation.UploadFor, self.relation_ns)
assert_that(path, is_(equal_to("/api/person/{person_id}/file")))
def test_swagger(self):
response = self.client.get("/api/swagger")
assert_that(response.status_code, is_(equal_to(200)))
data = loads(response.data)
upload = data["paths"]["/file"]["post"]
upload_for = data["paths"]["/person/{person_id}/file"]["post"]
# both endpoints return form data
assert_that(
upload["consumes"],
contains("multipart/form-data"),
)
assert_that(
upload_for["consumes"],
contains("multipart/form-data"),
)
# one endpoint gets an extra query string parameter (and the other doesn't)
assert_that(
upload["parameters"],
has_item(
has_entries(name="extra"),
),
)
assert_that(
upload_for["parameters"],
has_item(
is_not(has_entries(name="extra")),
),
)
# one endpoint gets a custom response type (and the other doesn't)
assert_that(
upload["responses"],
all_of(
has_key("204"),
is_not(has_key("200")),
has_entry("204", is_not(has_key("schema"))),
),
)
assert_that(
upload_for["responses"],
all_of(
has_key("200"),
is_not(has_key("204")),
has_entry("200", has_entry("schema", has_entry("$ref", "#/definitions/FileResponse"))),
),
)
def test_upload(self):
response = self.client.post(
"/api/file",
data=dict(
file=(BytesIO(b"Hello World\n"), "hello.txt"),
),
)
assert_that(response.status_code, is_(equal_to(204)))
assert_that(self.controller.calls, contains(
has_entries(
files=contains(contains("file", anything(), "hello.txt")),
extra="something",
),
))
def test_upload_for(self):
person_id = uuid4()
response = self.client.post(
"/api/person/{}/file".format(person_id),
data=dict(
file=(BytesIO(b"Hello World\n"), "hello.txt"),
),
)
assert_that(response.status_code, is_(equal_to(200)))
response_data = loads(response.get_data().decode("utf-8"))
assert_that(response_data, is_(equal_to(dict(
id=str(person_id),
))))
assert_that(self.controller.calls, contains(
has_entries(
files=contains(contains("file", anything(), "hello.txt")),
extra="something",
person_id=person_id,
),
))
def test_upload_multipart(self):
response = self.client.post(
"/api/file",
data=dict(
file=(BytesIO(b"Hello World\n"), "hello.txt"),
extra="special",
),
)
assert_that(response.status_code, is_(equal_to(204)))
assert_that(self.controller.calls, contains(
has_entries(
files=contains(contains("file", anything(), "hello.txt")),
extra="special",
),
))
| 29.823529 | 103 | 0.576847 | 5,801 | 0.88014 | 0 | 0 | 0 | 0 | 0 | 0 | 839 | 0.127295 |
4becdb4fe42c069830f83a3d86842e13caf2edcf | 135 | py | Python | molecool/io/__init__.py | nitrosx/molecool | 58ce78aceb707ff92b26bf6c90b3703714c09786 | [
"BSD-3-Clause"
]
| null | null | null | molecool/io/__init__.py | nitrosx/molecool | 58ce78aceb707ff92b26bf6c90b3703714c09786 | [
"BSD-3-Clause"
]
| null | null | null | molecool/io/__init__.py | nitrosx/molecool | 58ce78aceb707ff92b26bf6c90b3703714c09786 | [
"BSD-3-Clause"
]
| null | null | null | '''
molecool.io package
configure access to subpackage functions
'''
from .pdb import open_pdb
from .xyz import open_xyz, write_xyz
| 13.5 | 40 | 0.77037 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 69 | 0.511111 |
4bf119d7edb9acf18b1f1e428e435fcd728fc1f4 | 866 | py | Python | tests/check-result.py | getupcloud/tiny-controllers | e896b2015a9e29eab421225cb5a5f0d488df9e37 | [
"Apache-2.0"
]
| null | null | null | tests/check-result.py | getupcloud/tiny-controllers | e896b2015a9e29eab421225cb5a5f0d488df9e37 | [
"Apache-2.0"
]
| null | null | null | tests/check-result.py | getupcloud/tiny-controllers | e896b2015a9e29eab421225cb5a5f0d488df9e37 | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python
import sys
import json
from flatten_dict import flatten as _flatten
try:
data = json.load(sys.stdin)['object']
except Exception as ex:
print("Missing or invalid test data:", ex)
sys.exit(1)
try:
results = json.load(open(sys.argv[1], "r"))['results']
except Exception as ex:
print("Missing or invalid test results:", ex)
sys.exit(1)
def flatten(d):
return _flatten(d, reducer='dot', keep_empty_types=(dict,), enumerate_types=(list,))
data = flatten(data)
ok = True
for r in [ flatten(i) for i in results ]:
for k, v in r.items():
if k not in data:
print(f'{k} not found in {data}')
ok = False
elif v != data[k]:
print(f'{k}={data[k]} do not matches {k}={v}')
ok = False
else:
print(f"Match: {r}")
sys.exit(0 if ok else 1)
| 23.405405 | 88 | 0.590069 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 189 | 0.218245 |
4bf224e8c8f4fa354c35d1431a9957707b55eb9b | 331 | py | Python | thriftpy2_httpx_client/__init__.py | hans00/ThriftPy2-HTTPX-Client | e94944218915bcec6b2e0c00200f5d5e6f823053 | [
"MIT"
]
| null | null | null | thriftpy2_httpx_client/__init__.py | hans00/ThriftPy2-HTTPX-Client | e94944218915bcec6b2e0c00200f5d5e6f823053 | [
"MIT"
]
| 5 | 2021-07-13T13:56:17.000Z | 2022-03-02T02:43:46.000Z | thriftpy2_httpx_client/__init__.py | hans00/ThriftPy2-HTTPX-Client | e94944218915bcec6b2e0c00200f5d5e6f823053 | [
"MIT"
]
| 2 | 2021-07-13T06:08:59.000Z | 2022-03-16T22:15:57.000Z | __all__ = [
'make_aio_client',
'make_sync_client',
'TAsyncHTTPXClient',
'THTTPXClient',
]
from .aio import TAsyncHTTPXClient, make_client as make_aio_client
from .sync import THTTPXClient, make_client as make_sync_client
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| 23.642857 | 66 | 0.770393 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 77 | 0.232628 |
4bf41bde14de2173375d4d1e4381757de1699557 | 3,553 | py | Python | kalc/model/kinds/Node.py | KellyGriffin/kalc | 9b78c4177ed9ffccbf1ecfbf9a7946286cd7c583 | [
"Apache-2.0"
]
| null | null | null | kalc/model/kinds/Node.py | KellyGriffin/kalc | 9b78c4177ed9ffccbf1ecfbf9a7946286cd7c583 | [
"Apache-2.0"
]
| null | null | null | kalc/model/kinds/Node.py | KellyGriffin/kalc | 9b78c4177ed9ffccbf1ecfbf9a7946286cd7c583 | [
"Apache-2.0"
]
| null | null | null | import sys
import random
from kalc.model.system.base import ModularKind
from typing import Set
from kalc.model.system.primitives import Label, StatusNode
from kalc.model.system.base import HasLabel
from kalc.misc.util import cpuConvertToAbstractProblem, memConvertToAbstractProblem
from kalc.misc.const import STATUS_NODE
from kalc.model.system.globals import GlobalVar
class Node(ModularKind, HasLabel):
# k8s attributes
metadata_ownerReferences__name: str
metadata_name: str
spec_priorityClassName: str
labels: Set[Label]
# pods: Set[mpod.Pod]
cpuCapacity: int
memCapacity: int
currentFormalCpuConsumption: int
currentFormalMemConsumption: int
currentRealMemConsumption: int
currentRealCpuConsumption: int
AmountOfPodsOverwhelmingMemLimits: int
isNull: bool
status: StatusNode
amountOfActivePods: int
searchable: bool
isSearched: bool
different_than: Set["Node"]
allocatedPodList: Set["Pod"]
allocatedPodList_length: int
directedPodList: Set["Pod"]
directedPodList_length: int
daemonset_podList: Set["Pod"]
daemonset_podList_lenght: int
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.metadata_name = "modelNode"+str(random.randint(100000000, 999999999))
# self.metadata_name = "model-default-name"
self.AmountOfPodsOverwhelmingMemLimits = 0
self.currentFormalCpuConsumption = 0
self.currentFormalMemConsumption = 0
self.currentRealCpuConsumption = 0
self.currentRealMemConsumption = 0
self.cpuCapacity = 0
self.memCapacity = 0
self.isNull = False
self.status = STATUS_NODE["Active"]
self.amountOfActivePods = 0
self.searchable = True
self.isSearched = False
self.allocatedPodList_length = 0
self.directedPodList_length = 0
self.daemonset_podList_lenght = 0
def hook_after_create(self, object_space):
globalVar = next(filter(lambda x: isinstance(x, GlobalVar), object_space))
globalVar.amountOfNodes += 1
nodes = filter(lambda x: isinstance(x, Node), object_space)
for node in nodes:
if node != self:
self.different_than.add(node)
node.different_than.add(self)
def hook_after_load(self, object_space):
globalVar = next(filter(lambda x: isinstance(x, GlobalVar), object_space))
globalVar.amountOfNodes += 1
nodes = filter(lambda x: isinstance(x, Node), object_space)
for node in nodes:
if node != self:
self.different_than.add(node)
node.different_than.add(self)
@property
def status_allocatable_memory(self):
pass
@status_allocatable_memory.setter
def status_allocatable_memory(self, value):
self.memCapacity = memConvertToAbstractProblem(value)
@property
def status_allocatable_cpu(self):
pass
@status_allocatable_cpu.setter
def status_allocatable_cpu(self, value):
self.cpuCapacity = cpuConvertToAbstractProblem(value)
def __str__(self):
if str(self.metadata_name) == "None":
return "<unnamed node>"
return str(self.metadata_name)
# def __repr__(self):
# return 'Nodename : ' + str(self._get_value())
Node.NODE_NULL = Node("NULL")
Node.NODE_NULL.isNull = True
Node.NODE_NULL.status = STATUS_NODE["Inactive"]
Node.NODE_NULL.metadata_name = "Null-Node"
Node.NODE_NULL.searchable = False
| 33.838095 | 83 | 0.690684 | 2,994 | 0.842668 | 0 | 0 | 403 | 0.113425 | 0 | 0 | 242 | 0.068111 |
4bf46aef0cec7975f957c42ac0e9212705e2eac4 | 6,154 | py | Python | Betsy/Betsy/modules/summarize_fastqc_results.py | jefftc/changlab | 11da8c415afefcba0b0216238387c75aeb3a56ac | [
"MIT"
]
| 9 | 2017-01-13T02:38:41.000Z | 2021-04-08T00:44:39.000Z | Betsy/Betsy/modules/summarize_fastqc_results.py | jefftc/changlab | 11da8c415afefcba0b0216238387c75aeb3a56ac | [
"MIT"
]
| null | null | null | Betsy/Betsy/modules/summarize_fastqc_results.py | jefftc/changlab | 11da8c415afefcba0b0216238387c75aeb3a56ac | [
"MIT"
]
| 4 | 2017-01-05T16:25:25.000Z | 2019-12-12T20:07:38.000Z | from Module import AbstractModule
class Module(AbstractModule):
def __init__(self):
AbstractModule.__init__(self)
def run(
self, network, in_data, out_attributes, user_options, num_cores,
outfile):
import os
from genomicode import filelib
from genomicode import sortlib
from Betsy import module_utils as mlib
# Should be a folder of fastqc results.
fastqc_path = in_data.identifier
# Find all the FASTQC results.
x = filelib.list_files_in_path(fastqc_path, endswith="summary.txt")
x = [os.path.split(x)[0] for x in x]
paths = x
assert paths, "No FASTQC files found."
# Read the results.
all_results = [read_fastqc_results(x) for x in paths]
assert all_results
# Make table where the rows are the samples and the columns
# are the statistics.
sample2results = {}
for x in all_results:
assert x.sample not in sample2results
sample2results[x.sample] = x
all_statistics = all_results[0].statistics_order
all_samples = sortlib.sort_natural(sample2results)
table = []
header = [
"Sample", "Total Sequences", "Filtered Sequences",
"Sequence length", "GC"] + all_statistics
table.append(header)
for sample in all_samples:
results = sample2results[sample]
x1 = [sample]
x2 = [
results.total_sequences, results.filtered_sequences,
results.sequence_length, results.percent_gc]
x3 = [results.statistics[x] for x in all_statistics]
x = x1 + x2 + x3
assert len(x) == len(header)
table.append(x)
# Write out the table as text file.
TXT_FILE = "fastqc_summary.txt"
handle = open(TXT_FILE, 'w')
for x in table:
print >>handle, "\t".join(map(str, x))
handle.close()
x = mlib.get_config("txt2xls", which_assert_file=True, quote=True)
os.system("%s -b %s > %s" % (x, TXT_FILE, outfile))
filelib.assert_exists_nz(outfile)
def name_outfile(self, antecedents, user_options):
return "fastqc_summary.xls"
class FastQCResults:
def __init__(self, sample, total_sequences, filtered_sequences,
sequence_length, percent_gc, statistics, statistics_order):
# statistics is a dictionary of name of statistic -> status
# statistics_order is the order that the statistics were given
# in the fastqc output.
assert sorted(statistics) == sorted(statistics_order)
self.sample = sample
self.total_sequences = total_sequences
self.filtered_sequences = filtered_sequences
self.sequence_length = sequence_length
self.percent_gc = percent_gc
self.statistics = statistics.copy()
self.statistics_order = statistics_order[:]
def read_fastqc_results(fastqc_path):
import os
from genomicode import filelib
summary_file = os.path.join(fastqc_path, "summary.txt")
data_file = os.path.join(fastqc_path, "fastqc_data.txt")
filelib.assert_exists_nz(summary_file)
filelib.assert_exists_nz(data_file)
summary = read_fastqc_summary(summary_file)
data = read_fastqc_data(data_file)
# Figure out the sample names from the filenames.
samples = sorted([x[-1] for x in summary])
assert samples[0] == samples[-1], "%s %s" % (samples[0], samples[-1])
sample = samples[0]
if sample.lower().endswith(".gz"):
sample = sample[:-3]
if sample.lower().endswith(".fq"):
sample = sample[:-3]
if sample.lower().endswith(".fastq"):
sample = sample[:-6]
# Make the statistics dictionary.
statistics = {}
statistics_order = []
for x in summary:
status, statistic, x = x
assert statistic not in statistics
statistics[statistic] = status
statistics_order.append(statistic)
x = FastQCResults(
sample, data["total_sequences"], data["filtered_sequences"],
data["sequence_length"], data["percent_gc"],
statistics, statistics_order)
return x
def read_fastqc_summary(filename):
# Return list of (<status>, <statistic>, <filename>)
import os
from genomicode import filelib
assert os.path.exists(filename)
data = []
for x in filelib.read_cols(filename):
assert len(x) == 3
status, statistic, filename = x
data.append((status, statistic, filename))
return data
def read_fastqc_data(filename):
# Return a dictionary of:
# total_sequences <int>
# filtered_sequences <int>
# sequence_length <str> "205", "15-205"
# percent_gc <float>
from genomicode import parselib
data = {}
for line in open(filename):
# Line seems to end with:
# 'Total Sequences\t1056547\t\n'
# Not enough just to strip \r\n.
#cols = line.rstrip("\r\n").split("\t")
cols = line.rstrip().split("\t")
if line.startswith("Total Sequences"):
assert len(cols) == 2, repr(line)
data["total_sequences"] = int(cols[1])
elif line.startswith("Filtered Sequences"):
assert len(cols) == 2
data["filtered_sequences"] = int(cols[1])
elif line.startswith("Sequences flagged as poor quality"):
# Seems to be alternative to "Filtered Sequences".
assert len(cols) == 2
data["filtered_sequences"] = int(cols[1])
elif line.startswith("Sequence length"):
assert len(cols) == 2
data["sequence_length"] = cols[1]
elif line.startswith("%GC"):
assert len(cols) == 2
data["percent_gc"] = float(cols[1])/100
expected = [
"total_sequences", "filtered_sequences", "sequence_length",
"percent_gc"]
x = [x for x in expected if x not in data]
assert not x, "Missing (%s) from fastqc_data: %s" % (
parselib.pretty_list(x), filename)
return data
| 34.573034 | 76 | 0.614722 | 2,958 | 0.480663 | 0 | 0 | 0 | 0 | 0 | 0 | 1,393 | 0.226357 |
4bf674c2dd9e1aaac9f80a20682c800896278be3 | 792 | py | Python | propnet/models/__init__.py | nile0316/propnet | 3e1f1476c70a878c6eb43587c328d108b0e2a410 | [
"BSD-3-Clause-LBNL"
]
| 57 | 2018-01-09T14:56:20.000Z | 2022-02-24T11:44:42.000Z | propnet/models/__init__.py | ruriboshi/propnet | 770703fb4fc344f785f89c02f26b31ea5733d2bd | [
"BSD-3-Clause-LBNL"
]
| 214 | 2017-09-26T23:31:09.000Z | 2022-03-14T04:50:58.000Z | propnet/models/__init__.py | nile0316/propnet | 3e1f1476c70a878c6eb43587c328d108b0e2a410 | [
"BSD-3-Clause-LBNL"
]
| 26 | 2017-10-29T21:34:22.000Z | 2022-01-12T05:59:12.000Z | # noinspection PyUnresolvedReferences
import propnet.symbols
from propnet.models import serialized, python, composite
from propnet.core.registry import Registry
# This is just to enable importing the model directly from this module for example code generation
def _update_globals():
for name, model in Registry("models").items():
if model.is_builtin:
globals()[name] = model
def add_builtin_models_to_registry(register_symbols=True):
if register_symbols:
propnet.symbols.add_builtin_symbols_to_registry()
serialized.add_builtin_models_to_registry(register_symbols=False)
python.add_builtin_models_to_registry(register_symbols=False)
composite.add_builtin_models_to_registry(register_symbols=False)
_update_globals()
_update_globals()
| 33 | 98 | 0.792929 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 143 | 0.180556 |
4bf6a8cffebce41ae5095ad681541b2d2a477027 | 1,369 | py | Python | python/clean_dataset.py | catarinaacsilva/user_mapping_twitter | 7350ed35b465a7db6747c4035e7b119bff23131d | [
"MIT"
]
| null | null | null | python/clean_dataset.py | catarinaacsilva/user_mapping_twitter | 7350ed35b465a7db6747c4035e7b119bff23131d | [
"MIT"
]
| null | null | null | python/clean_dataset.py | catarinaacsilva/user_mapping_twitter | 7350ed35b465a7db6747c4035e7b119bff23131d | [
"MIT"
]
| null | null | null |
import csv
import re
regex = re.compile('[^a-zA-Z]')
def f7(seq):
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
def clean_dataset(screen_name, n_tweets=300):
# open CSV file
all_words = []
with open('%s_tweets.csv' % screen_name, 'r') as f:
reader = csv.reader(f)
c = 0
for row in reader:
if len(row) > 0:
c += 1
words = row[0].split()
for w in words:
s = regex.sub('', w.lower()).strip()
if(len(s) > 2 and len(s) < 13):
all_words.append(s)
if c >= n_tweets:
break
# Filter out repetition
# But since we are build shingles, there is no need
# final_words = f7(all_words)
#outtweets = [[word] for word in final_words]
outtweets = [[word] for word in all_words]
#print(final_words)
with open('%s_tweets_words.csv' % screen_name, 'w') as f:
writer = csv.writer(f, lineterminator='\n')
writer.writerows(outtweets)
if __name__ == '__main__':
for user in ['katyperry', 'TheEllenShow', 'YouTube', 'realDonaldTrump', 'BillGates',
'nytimes', 'CNN', 'espn', 'NASA', 'aliciakeys']:
clean_dataset(user)
| 30.422222 | 89 | 0.519357 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 357 | 0.260774 |
4bf72918258e1f5f04c1079f6fc0ade0637b2962 | 4,690 | py | Python | kpext/kp_crfsuite.py | snovd/sdavid-tests | c5f7e60f83ecb2d4cbaec18fff84861907f59c27 | [
"MIT"
]
| null | null | null | kpext/kp_crfsuite.py | snovd/sdavid-tests | c5f7e60f83ecb2d4cbaec18fff84861907f59c27 | [
"MIT"
]
| null | null | null | kpext/kp_crfsuite.py | snovd/sdavid-tests | c5f7e60f83ecb2d4cbaec18fff84861907f59c27 | [
"MIT"
]
| null | null | null | #!/usr/bin/python
import sys
import os
from nltk.tokenize import TreebankWordTokenizer as Tokenizer
from nltk.tag.perceptron import PerceptronTagger
import operator
from itertools import chain
import nltk
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.preprocessing import LabelBinarizer
import sklearn
import pycrfsuite
import re
import kpcommon as kpc
import mdb_common_lib as mdbcl
if __name__ == "__main__":
try:
debug = True if sys.argv[-1] == "debug" else False
debug_tests = 3
file_count = 0
dir_corpus = sys.argv[1]
dir_output = sys.argv[2]
try:
training_crfsuite = sys.argv[3]
except:
training_crfsuite = 'keyphrase.crfsuite'
tokenizer = Tokenizer()
#pos
tagger = PerceptronTagger()
extra_features = True
qr = mdbcl.QueryResources()
crftagger = pycrfsuite.Tagger()
crftagger.open(training_crfsuite)
#test_sents = []
for (dirname, _, filenames) in os.walk(dir_corpus):
for f in filenames:
ext = f[-4:]
if ext == '.ann':
file_count += 1
if debug and file_count > debug_tests:
break
file_text = os.path.join(dirname, f[:-4] + ".txt")
text_file = open(file_text, "r")
file_kpe = os.path.join(dir_output, f[:-4] + ".ann")
kpe_file = open(file_kpe, "w")
raw_text = unicode(text_file.read(), encoding="utf-8")
tokens = tokenizer.tokenize(raw_text)
tagged_text = [t + ("None",) for t in tagger.tag(tokens)]
text_file.close()
#test_sents.append(tagged_text)
if extra_features:
X_test = kpc.sent2features_extra(tagged_text, qr)
else:
X_test = kpc.sent2features(tagged_text)
is_not_kp = "None"
tmp_label = is_not_kp
new_kp = []
kp_list = []
for kp in zip(crftagger.tag(X_test), [tt[0] for tt in tagged_text]):
if debug and False:
print >> sys.stderr, " ---- ", kp
if kp[0][0:2] == "B-":
if new_kp and tmp_label != is_not_kp:
kp_list.append((tmp_label, " ".join(new_kp)))
tmp_label = kp[0][2:]
new_kp = []
new_kp.append(kp[1])
if new_kp:
kp_list.append((tmp_label, " ".join(new_kp)))
if debug and False:
print >> sys.stderr, raw_text
kp_index = 0
for kp in kp_list:
print kp
kp_iter_counter = 0
for m in re.finditer("\W?(" + re.escape(kp[1]) + ")\W", raw_text):
kp_iter_counter += 1
kp_index += 1
#print kp_iter_counter, m.groups()
start = m.start(1)
end = m.end(1)
term_string = "T" + str(kp_index) + "\t" + kp[0] + " " + str(start) + " " + str(end) + "\t" + raw_text[start:end]
term_string = term_string.encode("utf-8")
print >> kpe_file, term_string
#tmp_kps_candidates.append((start, end, m.span(1), kp, raw_text[start:end]))
if debug and kp_iter_counter == 0:
"""
There is an error here and in the projections.
The match is made by tokens.
When some of semi-colon, comma or ( ) there is an extra espace.
"""
#print >> sys.stderr, raw_text
print >> sys.stderr, kp_iter_counter, ": ", kp[1].encode("utf-8")
kpe_file.close()
except:
print >> sys.stderr
print >> sys.stderr, "usage: python", sys.argv[0], "<corpus_dir_path> <output_dir_path>"
print >> sys.stderr, "example:"
print >> sys.stderr, " python", sys.argv[0], "some/path/to/corpus/ some/path/to/output/"
print >> sys.stderr, "Error: ", sys.exc_info()
| 41.875 | 141 | 0.465885 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 754 | 0.160768 |
4bf9bd37e91a5feca68c63420808cdbf5f96022e | 6,736 | py | Python | models/analysis_transform.py | LiuLei95/PyTorch-Learned-Image-Compression-with-GMM-and-Attention | 484aced5bea25fbc1ba1380f4ab81bda9b099c1e | [
"Apache-2.0"
]
| 27 | 2021-07-28T01:33:02.000Z | 2022-03-18T04:01:02.000Z | models/analysis_transform.py | LiuLei95/PyTorch-Learned-Image-Compression-with-GMM-and-Attention | 484aced5bea25fbc1ba1380f4ab81bda9b099c1e | [
"Apache-2.0"
]
| 5 | 2021-11-13T05:58:51.000Z | 2022-02-13T09:07:44.000Z | models/analysis_transform.py | LiuLei95/PyTorch-Learned-Image-Compression-with-GMM-and-Attention | 484aced5bea25fbc1ba1380f4ab81bda9b099c1e | [
"Apache-2.0"
]
| 1 | 2021-08-21T13:14:28.000Z | 2021-08-21T13:14:28.000Z | #!/Library/Frameworks/Python.framework/Versions/3.5/bin/python3.5
import math
import torch.nn as nn
import torch
from .GDN import GDN
from .attention import Attention
# class Analysis_transform(nn.Module):
# def __init__(self, num_filters=128):
# super(Analysis_transform, self).__init__()
# self.conv_shortcut0 = nn.Conv2d(3, num_filters, 1, stride=2, padding=0)
# self.conv0 = nn.Conv2d(3, num_filters, 3, stride=2, padding=1)
# self.conv1 = nn.Conv2d(num_filters, num_filters, 3, stride=1, padding=1)
# self.leaky_relu1 = nn.LeakyReLU()
# self.conv2 = nn.Conv2d(num_filters, num_filters, 3, stride=1, padding=1)
# self.leaky_relu2 = nn.LeakyReLU()
# self.conv_shortcut = nn.Conv2d(num_filters, num_filters, 1, stride=2, padding=0)
# self.conv3 = nn.Conv2d(num_filters, num_filters, 3, stride=2, padding=1)
# self.leaky_relu3 = nn.LeakyReLU()
# self.conv4 = nn.Conv2d(num_filters, num_filters, 3, stride=1, padding=1)
# self.gdn = GDN(num_filters)
# # self.leaky_relu4 = nn.LeakyReLU()
# self.conv5 = nn.Conv2d(num_filters, num_filters, 3, stride=2, padding=1, bias=False)
# self.attention1 = Attention(num_filters)
# self.attention2 = Attention(num_filters)
#
#
# def forward(self, x):
# for i in range(4):
# if i > 0:
# x2 = self.conv1(x)
# x2 = self.leaky_relu1(x2)
# # print("a 3x3 1")
# # print("%d"%(i), x2.shape)
# x2 = self.conv2(x2)
# x2 = self.leaky_relu2(x2)
# # print("b 3x3 1")
# # print("%d"%(i), x2.shape)
# x = x + x2
# # print("resblock result: ", x.shape)
#
#
# if i == 0:
# shortcut_tensor = self.conv_shortcut0(x)
# x = self.conv0(x)
# x = self.leaky_relu3(x)
# # print("c 3x3 2")
# # print("%d"%(i), x.shape)
# x = self.conv4(x)
# # x = self.leaky_relu4(x)
# x = self.gdn(x)
# # print("d 3x3 1")
# # print("%d"%(i), x.shape)
# x = x + shortcut_tensor
# # print("resblock result: ", x.shape)
# elif i < 3:
# shortcut_tensor = self.conv_shortcut(x)
# x = self.conv3(x)
# x = self.leaky_relu3(x)
# # print("c 3x3 2")
# # print("%d"%(i), x.shape)
# x = self.conv4(x)
# # x = self.leaky_relu4(x)
# x = self.gdn(x)
# # print("d 3x3 1")
# # print("%d"%(i), x.shape)
# x = x + shortcut_tensor
# # print("resblock result: ", x.shape)
# if i == 1:
# # Attenation
# x = self.attention1(x)
#
# else:
# x = self.conv5(x)
# x = self.attention2(x)
#
# return x
class Analysis_transform(nn.Module):
def __init__(self, num_filters=128):
super(Analysis_transform, self).__init__()
# i = 0
self.b0_shortcut = nn.Conv2d(3, num_filters, 1, stride=2)
self.b0_layer2 = nn.Conv2d(3, num_filters, 3, stride=2, padding=1)
self.b0_layer2_relu = nn.LeakyReLU()
self.b0_layer3 = nn.Conv2d(num_filters, num_filters, 3, stride=1, padding=1)
self.b0_layer3_GDN = GDN(num_filters)
# i = 1
self.b1_layer0 = nn.Conv2d(num_filters, num_filters, 3, stride=1, padding=1)
self.b1_layer0_relu = nn.LeakyReLU()
self.b1_layer1 = nn.Conv2d(num_filters, num_filters, 3, stride=1, padding=1)
self.b1_layer1_relu = nn.LeakyReLU()
self.b1_shortcut = nn.Conv2d(num_filters, num_filters, 1, stride=2)
self.b1_layer2 = nn.Conv2d(num_filters, num_filters, 3, stride=2, padding=1)
self.b1_layer2_relu = nn.LeakyReLU()
self.b1_layer3 = nn.Conv2d(num_filters, num_filters, 3, stride=1, padding=1)
self.b1_layer3_GDN = GDN(num_filters)
self.attention1 = Attention(num_filters)
# i = 2
self.b2_layer0 = nn.Conv2d(num_filters, num_filters, 3, stride=1, padding=1)
self.b2_layer0_relu = nn.LeakyReLU()
self.b2_layer1 = nn.Conv2d(num_filters, num_filters, 3, stride=1, padding=1)
self.b2_layer1_relu = nn.LeakyReLU()
self.b2_shortcut = nn.Conv2d(num_filters, num_filters, 1, stride=2)
self.b2_layer2 = nn.Conv2d(num_filters, num_filters, 3, stride=2, padding=1)
self.b2_layer2_relu = nn.LeakyReLU()
self.b2_layer3 = nn.Conv2d(num_filters, num_filters, 3, stride=1, padding=1)
self.b2_layer3_GDN = GDN(num_filters)
# i = 3
self.b3_layer0 = nn.Conv2d(num_filters, num_filters, 3, stride=1, padding=1)
self.b3_layer0_relu = nn.LeakyReLU()
self.b3_layer1 = nn.Conv2d(num_filters, num_filters, 3, stride=1, padding=1)
self.b3_layer1_relu = nn.LeakyReLU()
self.b3_layer2 = nn.Conv2d(num_filters, num_filters, 3, stride=2, padding=1, bias=False)
self.attention2 = Attention(num_filters)
def forward(self, x):
# i = 0
shortcut0 = self.b0_shortcut(x)
b0 = self.b0_layer2(x)
b0 = self.b0_layer2_relu(b0)
b0 = self.b0_layer3(b0)
b0 = self.b0_layer3_GDN(b0)
b0 += shortcut0
# i = 1
b1 = self.b1_layer0(b0)
b1 = self.b1_layer0_relu(b1)
b1 = self.b1_layer1(b1)
b1 = self.b1_layer1_relu(b1)
b1 += b0
shortcut1 = self.b1_shortcut(b1)
b1 = self.b1_layer2(b1)
b1 = self.b1_layer2_relu(b1)
b1 = self.b1_layer3(b1)
b1 = self.b1_layer3_GDN(b1)
b1 += shortcut1
b1 = self.attention1(b1)
# i = 2
b2 = self.b2_layer0(b1)
b2 = self.b2_layer0_relu(b2)
b2 = self.b2_layer1(b2)
b2 = self.b2_layer1_relu(b2)
b2 += b1
shortcut2 = self.b2_shortcut(b2)
b2 = self.b2_layer2(b2)
b2 = self.b2_layer2_relu(b2)
b2 = self.b2_layer3(b2)
b2 = self.b2_layer3_GDN(b2)
b2 += shortcut2
# i = 3
b3 = self.b3_layer0(b2)
b3 = self.b3_layer0_relu(b3)
b3 = self.b3_layer1(b3)
b3 = self.b3_layer1_relu(b3)
b3 += b2
b3 = self.b3_layer2(b3)
b3 = self.attention2(b3)
return b3
if __name__ == "__main__":
analysis_transform = Analysis_transform()
input_image = torch.zeros([1,3,256,256])
feature = analysis_transform(input_image)
print(feature.shape)
| 38.936416 | 96 | 0.55478 | 3,442 | 0.510986 | 0 | 0 | 0 | 0 | 0 | 0 | 2,995 | 0.444626 |
4bf9cae86ed3b64532d63a132ed50c966d6bd0b4 | 826 | py | Python | app/models.py | Katze2/Flask-template | 99925f6bfbaf92ace9b0fd7c792b989ed90a7e00 | [
"MIT"
]
| null | null | null | app/models.py | Katze2/Flask-template | 99925f6bfbaf92ace9b0fd7c792b989ed90a7e00 | [
"MIT"
]
| null | null | null | app/models.py | Katze2/Flask-template | 99925f6bfbaf92ace9b0fd7c792b989ed90a7e00 | [
"MIT"
]
| null | null | null | # -*- encoding: utf-8 -*-
from app import db
class ModelExample(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(250))
content = db.Column(db.Text)
date = db.Column(db.DateTime)
class User(db.Model):
id = db.Column(db.Integer, primary_key = True)
user = db.Column(db.String(64), unique = True)
password = db.Column(db.String(500))
name = db.Column(db.String(500))
email = db.Column(db.String(120), unique = True)
# posts = db.relationship('Post', backref = 'author', lazy = 'dynamic')
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return unicode(self.id)
def __repr__(self):
return '<User %r>' % (self.nickname)
| 24.294118 | 75 | 0.634383 | 775 | 0.938257 | 0 | 0 | 0 | 0 | 0 | 0 | 107 | 0.12954 |
4bfa32e39dba78be88b0f520adb14e8e58f436e8 | 938 | py | Python | test/hlt/pytest/python/com/huawei/iotplatform/client/dto/DeviceServiceB.py | yuanyi-thu/AIOT- | 27f67d98324593c4c6c66bbd5e2a4aa7b9a4ac1e | [
"BSD-3-Clause"
]
| 128 | 2018-10-29T04:11:47.000Z | 2022-03-07T02:19:14.000Z | test/hlt/pytest/python/com/huawei/iotplatform/client/dto/DeviceServiceB.py | yuanyi-thu/AIOT- | 27f67d98324593c4c6c66bbd5e2a4aa7b9a4ac1e | [
"BSD-3-Clause"
]
| 40 | 2018-11-02T00:40:48.000Z | 2021-12-07T09:33:56.000Z | test/hlt/pytest/python/com/huawei/iotplatform/client/dto/DeviceServiceB.py | yuanyi-thu/AIOT- | 27f67d98324593c4c6c66bbd5e2a4aa7b9a4ac1e | [
"BSD-3-Clause"
]
| 118 | 2018-10-29T08:43:57.000Z | 2022-01-07T06:49:25.000Z | class DeviceServiceB(object):
def __init__(self):
self.serviceId = None
self.reportedProps = None
self.desiredProps = None
self.eventTime = None
self.serviceType = None
def getServiceId(self):
return self.serviceId
def setServiceId(self, serviceId):
self.serviceId = serviceId
def getReportedProps(self):
return self.reportedProps
def setReportedProps(self, reportedProps):
self.reportedProps = reportedProps
def getDesiredProps(self):
return self.desiredProps
def setDesiredProps(self, desiredProps):
self.desiredProps = desiredProps
def getEventTime(self):
return self.eventTime
def setEventTime(self, eventTime):
self.eventTime = eventTime
def getServiceType(self):
return self.serviceType
def setServiceType(self, serviceType):
self.serviceType = serviceType
| 24.051282 | 46 | 0.66951 | 937 | 0.998934 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
4bfb4d961bec58ff15fe5b25777f51138ea3c5dc | 1,516 | py | Python | tests/dataset_balancer_test.py | MarinkoBa/Hate-Speech-Classification | 72f6bbe93b823daefa138df4f81a3a4df5b34c4c | [
"MIT"
]
| null | null | null | tests/dataset_balancer_test.py | MarinkoBa/Hate-Speech-Classification | 72f6bbe93b823daefa138df4f81a3a4df5b34c4c | [
"MIT"
]
| null | null | null | tests/dataset_balancer_test.py | MarinkoBa/Hate-Speech-Classification | 72f6bbe93b823daefa138df4f81a3a4df5b34c4c | [
"MIT"
]
| 1 | 2020-12-14T13:56:50.000Z | 2020-12-14T13:56:50.000Z | # -*- coding: utf-8 -*-
from src.utils.get_data import load_data
from src.utils.get_data import get_datasets
from src.utils.get_data import concatenate_datasets
from src.utils.dataset_balancer import balance_data
import os
import pandas as pd
import unittest
class TestDataBalancer(unittest.TestCase):
def setUp(self):
self.df = load_data(os.path.join(os.path.pardir, 'src', 'data', 'tweets.csv'))
self.df2, self.df3 = get_datasets(os.path.join(os.path.pardir, 'src', 'data', 'labeled_data.csv'),
os.path.join(os.path.pardir, 'src', 'data',
'hatespeech_text_label_vote_RESTRICTED_100K.csv'))
self.df_concatenated = concatenate_datasets(os.path.join(os.path.pardir, 'src', 'data', 'tweets.csv'),
self.df2,
self.df3)
def test_balance_data(self):
x_balanced, y_balanced = balance_data(self.df_concatenated[['text']],
self.df_concatenated[['hate_speech']])
self.assertIsInstance(y_balanced,
pd.core.frame.DataFrame)
self.assertIsInstance(x_balanced,
pd.core.frame.DataFrame)
self.assertEquals(x_balanced.shape, y_balanced.shape)
if __name__ == "__main__":
unittest.main()
| 35.255814 | 110 | 0.550792 | 1,154 | 0.761214 | 0 | 0 | 0 | 0 | 0 | 0 | 186 | 0.122691 |
4bfb89534390da200300df58f33c846fbb2cba39 | 12,695 | py | Python | gptorch/models/sparse_gpr.py | cics-nd/gptorch | 80c62a227c466bb7fa29e11263e94c41f96ff93f | [
"MIT"
]
| 28 | 2018-11-05T03:01:18.000Z | 2021-04-02T18:11:05.000Z | gptorch/models/sparse_gpr.py | cics-nd/gptorch | 80c62a227c466bb7fa29e11263e94c41f96ff93f | [
"MIT"
]
| 7 | 2019-06-04T21:43:40.000Z | 2021-11-04T04:19:26.000Z | gptorch/models/sparse_gpr.py | cics-nd/gptorch | 80c62a227c466bb7fa29e11263e94c41f96ff93f | [
"MIT"
]
| 8 | 2019-04-03T12:28:05.000Z | 2021-12-23T10:15:34.000Z | #
# Yinhao Zhu, May 01, 2017
#
"""
Sparse GP regression, including variational GP and others.
"""
from __future__ import absolute_import
import torch
import numpy as np
from torch.utils.data import TensorDataset, DataLoader
from torch.distributions.transforms import LowerCholeskyTransform
from ..model import Param
from ..functions import cholesky, trtrs
from ..mean_functions import Zero
from ..likelihoods import Gaussian
from ..util import TensorType, torch_dtype, as_tensor, kmeans_centers
from .gpr import GPR
from .base import GPModel
class _InducingPointsGP(GPModel):
"""
Parent class for GPs with inducing points
"""
def __init__(
self,
x,
y,
kernel,
num_inducing_points=None,
inducing_points=None,
mean_function=None,
likelihood=None,
):
"""
Assume Gaussian likelihood
Args:
observations (np.ndarray): Y, n x p
input (np.ndarray): X, n x q
kernel (gptorch.Kernel):
inducing_points (np.ndarray, optional): Z, m x q
num_inducing (int), optional): number of inducing inputs
Input, observations, and kernel must be specified, if both
``inducing_points`` and ``num_inducing`` are not set, 1/10 th of total
points (up to 100) will be draw randomly from input as the inducing
points.
"""
super().__init__(x, y, kernel, likelihood, mean_function)
if inducing_points is None:
if num_inducing_points is None:
num_inducing_points = np.clip(x.shape[0] // 10, 1, 100)
inducing_points = kmeans_centers(x, num_inducing_points,
perturb_if_fail=True)
# indices = np.random.permutation(len(x))[:num_inducing_points]
# inducing_points = TensorType(x[indices])
# Z stands for inducing input points as standard in the literature
self.Z = Param(as_tensor(inducing_points))
@property
def num_inducing(self) -> int:
"""
Number of inducing points
"""
return self.Z.shape[0]
class FITC(_InducingPointsGP):
"""
Fully Independent Training Conditional approximation for GP
References:
Snelson, Edward, and Zoubin Ghahramani. "Sparse Gaussian processes
using pseudo-inputs." Advances in neural information processing
systems 18 (2006): 1257.
Quinonero-Candela, Joaquin, and Carl Edward Rasmussen. "A unifying
view of sparse approximate Gaussian process regression." Journal of
Machine Learning Research 6.Dec (2005): 1939-1959.
"""
# TODO: add FITC for sparse GP regression
pass
class VFE(_InducingPointsGP):
"""
Variational Free Energy approximation for GP
Reference:
Titsias, Michalis K. "Variational Learning of Inducing Variables
in Sparse Gaussian Processes." AISTATS. Vol. 5. 2009.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
assert isinstance(
self.mean_function, Zero
), "Mean functions not implemented for VFE yet."
def log_likelihood(self, x=None, y=None):
"""
Computes the variational lower bound of the true log marginal likelihood
Eqn (9) in Titsias, Michalis K. "Variational Learning of Inducing Variables
in Sparse Gaussian Processes." AISTATS. Vol. 5. 2009.
"""
x = x if x is not None else self.X
y = y if y is not None else self.Y
if not x.shape[0] == y.shape[0]:
raise ValueError("X and Y must have same # data.")
num_inducing = self.num_inducing
num_data = x.shape[0]
d_out = self.output_dimension
# TODO: add mean_functions
# err = self.Y - self.mean_function(x)
err = self.Y
Kff_diag = self.kernel.Kdiag(x)
Kuf = self.kernel.K(self.Z, x)
# add jitter
Kuu = self.kernel.K(self.Z)
L = cholesky(Kuu)
A = trtrs(Kuf, L)
AAT = A @ A.t() / self.likelihood.variance.transform().expand_as(Kuu)
B = AAT + torch.eye(num_inducing, dtype=torch_dtype).to(AAT.device)
LB = cholesky(B)
# divide variance at the end
c = trtrs(A @ err, LB) / self.likelihood.variance.transform()
# Evidence lower bound
elbo = TensorType([-0.5 * d_out * num_data * np.log(2 * np.pi)]).to(c.device)
elbo -= d_out * LB.diag().log().sum()
elbo -= (
0.5 * d_out * num_data * self.likelihood.variance.transform().log()
)
elbo -= (
0.5
* (err.pow(2).sum() + d_out * Kff_diag.sum())
/ self.likelihood.variance.transform()
)
elbo += 0.5 * c.pow(2).sum()
elbo += 0.5 * d_out * AAT.diag().sum()
return elbo[0]
def _predict(self, x_new: TensorType, diag=True, x=None):
"""
Compute posterior p(f*|y), integrating out induced outputs' posterior.
:return: (mean, var/cov)
"""
x = x if x is not None else self.X
z = self.Z
z.requires_grad_(False)
num_inducing = z.size(0)
# err = self.Y - self.mean_function(x)
err = self.Y
Kuf = self.kernel.K(z, x)
# add jitter
Kuu = self.kernel.K(z)
Kus = self.kernel.K(z, x_new)
L = cholesky(Kuu)
A = trtrs(Kuf, L)
AAT = A @ A.t() / self.likelihood.variance.transform().expand_as(Kuu)
B = AAT + torch.eye(num_inducing, dtype=torch_dtype).to(AAT.device)
LB = cholesky(B)
# divide variance at the end
c = trtrs(A @ err, LB) / self.likelihood.variance.transform()
tmp1 = trtrs(Kus, L)
tmp2 = trtrs(tmp1, LB)
mean = tmp2.t() @ c
if diag:
var = (
self.kernel.Kdiag(x_new)
- tmp1.pow(2).sum(0).squeeze()
+ tmp2.pow(2).sum(0).squeeze()
)[:, None].expand_as(mean)
else:
var = self.kernel.K(x_new) + tmp2.t() @ tmp2 - tmp1.t() @ tmp1
return mean, var
def minibatch(loss_func):
"""
Decorator to use minibatching for a loss function (e.g. SVGP)
"""
def wrapped(obj, x=None, y=None):
if x is not None:
assert y is not None
else:
# Get from model:
if obj.batch_size is not None:
i = np.random.permutation(obj.num_data)[: obj.batch_size]
x, y = obj.X[i, :], obj.Y[i, :]
else:
x, y = obj.X, obj.Y
return loss_func(obj, x, y)
return wrapped
class SVGP(_InducingPointsGP):
"""
Sparse variational Gaussian process.
James Hensman, Nicolo Fusi, and Neil D. Lawrence,
"Gaussian processes for Big Data" (2013)
James Hensman, Alexander Matthews, and Zoubin Ghahramani,
"Scalable variational Gaussian process classification", JMLR (2015).
"""
def __init__(
self,
y,
x,
kernel,
num_inducing_points=None,
inducing_points=None,
mean_function=None,
likelihood=Gaussian(),
batch_size=None,
):
"""
:param batch_size: How many points to process in a minibatch of
training. If None, no minibatches are used.
"""
super().__init__(
y,
x,
kernel,
num_inducing_points=num_inducing_points,
inducing_points=inducing_points,
mean_function=mean_function,
likelihood=likelihood,
)
# assert batch_size is None, "Minibatching not supported yet."
self.batch_size = batch_size
# Parameters for the Gaussian variational posterior over the induced
# outputs.
# Note: induced_output_mean does NOT include the contribution due to the
# mean function.
self.induced_output_mean, self.induced_output_chol_cov = self._init_posterior()
@minibatch
def log_likelihood(self, x, y):
"""
Variational bound.
"""
if not x.shape[0] == y.shape[0]:
raise ValueError("X and Y must have same # data.")
chol_kuu = cholesky(self.kernel.K(self.Z))
# Marginal posterior q(f)'s mean & variance
f_mean, f_var = self._predict(x, diag=True, chol_kuu=chol_kuu)
marginal_log_likelihood = torch.stack(
[
self.likelihood.propagate_log(
torch.distributions.Normal(loc_i, torch.sqrt(v_i)), yi
)
for loc_i, v_i, yi in zip(f_mean.t(), f_var.t(), y.t())
]
).sum()
# Account for size of minibatch relative to the total dataset size:
marginal_log_likelihood *= self.num_data / x.shape[0]
mu_xu = self.mean_function(self.Z) # Prior mean
qu_mean = self.induced_output_mean + mu_xu
qu_lc = self.induced_output_chol_cov.transform()
# Each output dimension has its own Multivariate normal (different
# means, shared covariance); the joint distribution is the product
# across output dimensions.
qus = [
torch.distributions.MultivariateNormal(qu_i, scale_tril=qu_lc)
for qu_i in qu_mean.t()
]
# Each dimension has its own prior as well due to the mean function
# Being potentially different for each output dimension.
pus = [
torch.distributions.MultivariateNormal(mi, scale_tril=chol_kuu)
for mi in mu_xu.t()
]
kl = torch.stack(
[torch.distributions.kl_divergence(qu, pu) for qu, pu in zip(qus, pus)]
).sum()
return marginal_log_likelihood - kl
def _init_posterior(self):
"""
Get an initial guess at the variational posterior over the induced
outputs.
Just build a GP out of a few data and use its posterior.
This could be far worse than expected if the likelihood is non-Gaussian,
but we don't need this to be great--just good enough to get started.
"""
i = np.random.permutation(self.num_data)[0 : min(self.num_data, 100)]
x, y = self.X[i].data.numpy(), self.Y[i].data.numpy()
# Likelihood needs to be Gaussian for exact inference in GPR
likelihood = (
self.likelihood
if isinstance(self.likelihood, Gaussian)
else Gaussian(variance=0.01 * y.var())
)
model = GPR(
x, y, self.kernel, mean_function=self.mean_function, likelihood=likelihood
)
mean, cov = model.predict_f(self.Z, diag=False)
mean -= self.mean_function(self.Z)
chol_cov = cholesky(cov)
return Param(mean), Param(chol_cov, transform=LowerCholeskyTransform())
def _predict(self, x_new: TensorType, diag=True, chol_kuu=None, **kwargs):
"""
SVGP Prediction uses inducing points as sufficient statistics for the
posterior.
Could implement Marginalization of Gaussians (cf. PRML p. 93), but
something specific to (positive-definite) kernel matrices should
perform better.
Shapes of outputs are:
diag: both are [N x dy]
not diag: mean is [N x dy], cov is [N x N]
:param x_new: inputs to predict on.
:param diag: if True, return variance of prediction; False=full cov
:param chol_kuu: The Cholesky of the kernel matrix for the inducing
inputs (to enable reuse when computing the training loss)
:return: (torch.Tensor, torch.Tensor) mean & [co]variance
"""
chol_kuu = cholesky(self.kernel.K(self.Z)) if chol_kuu is None else chol_kuu
kuf = self.kernel.K(self.Z, x_new)
alpha = trtrs(kuf, chol_kuu).t()
# beta @ beta.t() = inv(L) @ S @ inv(L'), S=post cov of induced outs
beta = trtrs(self.induced_output_chol_cov.transform(), chol_kuu)
mu_x = self.mean_function(x_new)
# Remember: induced_output_mean doesn't include mean function, so no
# need to subtract it.
f_mean = alpha @ trtrs(self.induced_output_mean, chol_kuu) + mu_x
# gamma @ gamma.t() = Kfu @ inv(Kuu) @ S @ inv(Kuu) @ Kuf
gamma = alpha @ beta
if diag:
f_cov = (
self.kernel.Kdiag(x_new)
- torch.sum(alpha ** 2, dim=1)
+ torch.sum(gamma ** 2, dim=1)
)[:, None].expand_as(f_mean)
else:
f_cov = self.kernel.K(x_new) - alpha @ alpha.t() + gamma @ gamma.t()
return f_mean, f_cov
| 33.232984 | 87 | 0.59228 | 11,610 | 0.914533 | 0 | 0 | 1,888 | 0.14872 | 0 | 0 | 4,901 | 0.386058 |
4bfe5926292aa222488a49dbf22dd03f8782815e | 1,405 | py | Python | exercises/pt/test_01_11.py | Jette16/spacy-course | 32df0c8f6192de6c9daba89740a28c0537e4d6a0 | [
"MIT"
]
| 2,085 | 2019-04-17T13:10:40.000Z | 2022-03-30T21:51:46.000Z | exercises/pt/test_01_11.py | Jette16/spacy-course | 32df0c8f6192de6c9daba89740a28c0537e4d6a0 | [
"MIT"
]
| 79 | 2019-04-18T14:42:55.000Z | 2022-03-07T08:15:43.000Z | exercises/pt/test_01_11.py | Jette16/spacy-course | 32df0c8f6192de6c9daba89740a28c0537e4d6a0 | [
"MIT"
]
| 361 | 2019-04-17T13:34:32.000Z | 2022-03-28T04:42:45.000Z | def test():
import spacy.matcher
assert isinstance(
matcher, spacy.matcher.Matcher
), "Você está inicializando o Comparador corretamente?"
assert (
"Matcher(nlp.vocab)" in __solution__
), "Você está inicializando o Comparador corretamente com o vocabulário compartilhado?"
assert (
len(pattern) == 2
), "A expressão deve descrever dois tokens (dois dicionários)."
assert isinstance(pattern[0], dict) and isinstance(
pattern[1], dict
), "Cada item da expressão deve conter um dicionário."
assert (
len(pattern[0]) == 1 and len(pattern[1]) == 1
), "Cada item na expressão deve conter apenas uma chave."
assert any(
pattern[0].get(key) == "iPhone" for key in ["text", "TEXT"]
), "Você está fazendo a comparação com o texto do token?"
assert any(
pattern[1].get(key) == "X" for key in ["text", "TEXT"]
), "Você está fazendo a comparação com o texto do token?"
assert (
'matcher.add("IPHONE_X_PATTERN"' in __solution__
), "Você está adicionando a expressão corretamente?"
assert (
"matches = matcher(doc)" in __solution__
), "Você está chamando o Comparador passando o doc como parâmetro?"
__msg__.good(
"Parabéns! Você identificou uma correspondência com sucesso: dois tokens "
"em doc[1:3] que correspondem a partição 'iPhone X'. "
)
| 39.027778 | 91 | 0.646263 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 790 | 0.550907 |
4bfe8f82bf9964afbee833e2a996e71d61b97873 | 1,638 | py | Python | Code/list.py | sunjinshuai/Python | b4d76bc20e9d740108c98cb8d023ca5da3e6c070 | [
"MIT"
]
| null | null | null | Code/list.py | sunjinshuai/Python | b4d76bc20e9d740108c98cb8d023ca5da3e6c070 | [
"MIT"
]
| null | null | null | Code/list.py | sunjinshuai/Python | b4d76bc20e9d740108c98cb8d023ca5da3e6c070 | [
"MIT"
]
| null | null | null | list1 = ['physics', 'chemistry', 1997, 2000]
list2 = [1, 2, 3, 4, 5 ]
list3 = ["a", "b", "c", "d"]
print list1, list2, list3
# 访问列表中的值
# 使用下标索引来访问列表中的值,同样你也可以使用方括号的形式截取字符,如下所示:
print "list1[0]: ", list1[0]
print "list2[1:5]: ", list2[1:5]
# 更新列表
# 你可以对列表的数据项进行修改或更新,你也可以使用append()方法来添加列表项,如下所示:
list = [] ## 空列表
list.append('Google') ## 使用 append() 添加元素
list.append('Python')
print list
# 删除列表元素
# 可以使用 del 语句来删除列表的元素,如下实例:
list1 = ['Python', 'iOS', 'Java', 'C++']
print list1
del list1[2]
print "After deleting value at index 2 : "
print list1
# Python列表脚本操作符
# 列表对 + 和 * 的操作符与字符串相似。+ 号用于组合列表,* 号用于重复列表。
list1 = ['Python', 'iOS', 'Java', 'C++']
print len(list1)
list2 = ['C', 'Ruby', 'Javastript']
print list1 + list2
print ['Python'] * 4
print 'iOS' in list1
for str in list1:
print str
# Python列表截取
list1 = ['Python', 'iOS', 'Java', 'C++']
print list1[2]
print list1[-2]
print list1[1:]
# cmp() 方法用于比较两个列表的元素。
# cmp()方法语法:
# cmp(list1, list2)
# 如果比较的元素是同类型的,则比较其值,返回结果。
# 如果两个元素不是同一种类型,则检查它们是否是数字。
# 如果是数字,执行必要的数字强制类型转换,然后比较。
# 如果有一方的元素是数字,则另一方的元素"大"(数字是"最小的")
# 否则,通过类型名字的字母顺序进行比较。
# 如果有一个列表首先到达末尾,则另一个长一点的列表"大"。
# 如果我们用尽了两个列表的元素而且所 有元素都是相等的,那么结果就是个平局,就是说返回一个 0。
list1, list2 = [123, 'xyz'], [456, 'abc']
print cmp(list1, list2);
print cmp(list2, list1);
list3 = list2 + [786];
list4 = [123, 'xyz']
print cmp(list2, list3)
print cmp(list1, list4)
# extend() 函数用于在列表末尾一次性追加另一个序列中的多个值(用新列表扩展原来的列表)。
# extend()方法语法:
# list.extend(seq)
# 该方法没有返回值,但会在已存在的列表中添加新的列表内容。
aList = [123, 'xyz', 'zara', 'abc', 123];
bList = [2009, 'manni'];
aList.extend(bList)
print "Extended List : ", aList | 20.222222 | 49 | 0.651404 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,753 | 0.699521 |
ef0025261578f6f3b594dd1953fdfd38e1b064c9 | 10,015 | py | Python | xyw_macro/notify.py | xue0228/keyboard | dcb0def1d87a9197676c0f405b980a67e128ab24 | [
"MIT"
]
| null | null | null | xyw_macro/notify.py | xue0228/keyboard | dcb0def1d87a9197676c0f405b980a67e128ab24 | [
"MIT"
]
| null | null | null | xyw_macro/notify.py | xue0228/keyboard | dcb0def1d87a9197676c0f405b980a67e128ab24 | [
"MIT"
]
| null | null | null | import tkinter as tk
import tkinter.font as tf
from tkinter import ttk
from tkinter import messagebox
from tkinter.filedialog import askopenfilename, askdirectory
import time
import threading
from functools import wraps
from xyw_macro.utils import SingletonType
from xyw_macro.contants import SLEEP_TIME
class Notification(metaclass=SingletonType):
def __init__(self, text='xyw', fg='white', bg='black'):
self.__text = text
self.__fg = fg
self.__bg = bg
self.__visible = False
self.__vnum = 0
self.__window, self.__label, self.__width = self.__init__window()
self.set_visible(self.__visible)
def show(self):
if self.__vnum == 0:
self.set_visible(True)
self.__vnum = self.__vnum + 1
def hide(self):
self.__vnum = self.__vnum - 1
if self.__vnum == 0:
self.set_visible(False)
def __init__window(self):
window = tk.Tk()
window.wm_attributes('-topmost', True)
screen_width = window.winfo_screenwidth()
screen_height = window.winfo_screenheight()
width = round(screen_width / 10)
height = round(screen_width / 10)
window.geometry('{}x{}+{}+{}'.format(width, height, (screen_width - width) // 2, (screen_height - height) // 2))
window.overrideredirect(True)
window.configure(background=self.__bg)
window.attributes('-alpha', 0.7)
font_size = self.__get_font_size(width)
outer_border_size = round(font_size * 0.08)
inner_border_size = round(font_size * 0.05)
font = tf.Font(size=font_size, weight=tf.BOLD)
label_border = tk.LabelFrame(window, background=self.__fg, relief='flat')
label = tk.Label(label_border, text=self.__text, font=font, bg=self.__bg, fg=self.__fg,
height=height, width=width, justify='center', anchor='center',
borderwidth=0, relief='flat')
label_border.pack(fill='both', expand=True, padx=outer_border_size, pady=outer_border_size)
label.pack(fill='both', expand=True, padx=inner_border_size, pady=inner_border_size)
return window, label, width
def get_text(self):
"""
获取标签文本
:return:
"""
return self.__text
def __get_font_size(self, width):
# 根据换行符拆分文本
texts = self.__text.split('\n')
# 英文半角字符集
alnum = r'abcdefghijklmnopqrstuvwxyz0123456789+-*/=`~!@#$%^&*()_\|?><.,'
# 计算最大单行字符长度
length = [1]
for item in texts:
tem = 0
for i in item:
if i.lower() in alnum:
# 英文半角字符算半个字符长度
tem = tem + 0.5
else:
# 其他字符算一个字符长度
tem = tem + 1
length.append(tem)
length = max(length)
# 根据字符长度动态更改字体尺寸
font_size = round(width * 0.6 / length)
return font_size
def set_text(self, text):
"""
设置标签文本
:param text:
:return:
"""
self.__text = text
font_size = self.__get_font_size(self.__width)
# 更改标签文本
font = tf.Font(size=font_size, weight=tf.BOLD)
self.__label.config(text=self.__text, font=font)
def get_visible(self):
"""
获取窗体可见性
:return:
"""
return self.__visible
def set_visible(self, visible):
"""
设置窗体可见性
:param visible:
:return:
"""
self.__visible = visible
if self.__visible:
self.__window.update()
self.__window.deiconify()
else:
self.__window.withdraw()
def run(self):
"""
启动窗体主循环
:return:
"""
self.__window.mainloop()
text = property(get_text, set_text)
visible = property(get_visible, set_visible)
class InputField:
def __init__(self, name, type='entry', default=None, options=None, focus=False):
self.name = name
self.type = type
self.default = default
self.options = options
self.focus = focus
@staticmethod
def select_file(var):
filepath = askopenfilename()
var.set(filepath)
@staticmethod
def select_dir(var):
dirpath = askdirectory()
var.set(dirpath)
def draw_frame(self, window):
var = tk.StringVar()
frame = tk.Frame(window, takefocus=True)
frame.pack(fill=tk.X, padx=10, pady=2, expand=1)
tk.Label(frame, text=self.name).pack(side=tk.TOP, anchor=tk.W)
if self.type == 'entry':
widget = tk.Entry(frame, show=None, textvariable=var)
widget.pack(fill=tk.X, side=tk.TOP)
if self.default is not None:
var.set(self.default)
elif self.type == 'file':
widget = tk.Entry(frame, show=None, textvariable=var, state=tk.DISABLED)
widget.pack(fill=tk.X, side=tk.LEFT, expand=1)
tk.Button(frame, text='选择文件', command=lambda var=var: self.select_file(var)) \
.pack(fill=tk.X, side=tk.LEFT)
if self.default is not None:
var.set(self.default)
elif self.type == 'dir':
widget = tk.Entry(frame, show=None, textvariable=var, state=tk.DISABLED)
widget.pack(fill=tk.X, side=tk.LEFT, expand=1)
tk.Button(frame, text='选择文件夹', command=lambda var=var: self.select_dir(var)) \
.pack(fill=tk.X, side=tk.LEFT)
if self.default is not None:
var.set(self.default)
elif self.type == 'combobox':
widget = ttk.Combobox(frame, textvariable=var)
widget['values'] = self.options
widget.pack(fill=tk.X, side=tk.TOP)
if self.default is None:
widget.current(0)
else:
widget.current(self.default)
else:
raise ValueError('there is no such type,select in "entry","file","dir" or "combobox"')
if self.focus:
widget.focus_set()
return var
class InputBox:
"""
参数输入框类
"""
def __init__(self, title='输入框', *args):
"""
初始化实例
:param title: 对话框标题
"""
self.title = title
self.__args = args
self.top = None
self.vars = []
self.values = []
def show(self):
"""
显示输入对话框
:return: 输入的参数列表
"""
return self.top_window()
def clear_all(self):
for var in self.vars:
var.set('')
def close_window(self, flag=False):
if flag:
self.values = None
else:
self.values = [var.get() for var in self.vars]
self.top.destroy()
def top_window(self):
self.top = tk.Toplevel()
self.top.withdraw()
self.top.update()
self.top.wm_attributes('-topmost', True)
self.top.attributes('-toolwindow', True)
self.top.title(self.title)
self.top.grab_set()
screen_width = self.top.winfo_screenwidth()
screen_height = self.top.winfo_screenheight()
width = 300
height = (len(self.__args) * 2 + 1) * 30
self.top.geometry('{}x{}+{}+{}'
.format(width, height, (screen_width - width) // 2, (screen_height - height) // 2))
for field in self.__args:
if not isinstance(field, InputField):
raise TypeError('args must be <class InputField>')
self.vars.append(field.draw_frame(self.top))
frame = tk.Frame(self.top, takefocus=True)
frame.pack(fill=tk.X, padx=10, pady=2, expand=1)
button1 = tk.Button(frame, text='确定', command=lambda: self.close_window(False))
button1.pack(side=tk.LEFT, fill=tk.X, expand=1)
button2 = tk.Button(frame, text='清空', command=self.clear_all)
button2.pack(side=tk.LEFT, fill=tk.X, expand=1)
self.top.protocol("WM_DELETE_WINDOW", lambda: self.close_window(True))
self.top.bind('<Return>', lambda event: self.close_window(False))
self.top.bind('<Escape>', lambda event: self.close_window(True))
self.top.deiconify()
self.top.focus_force()
self.top.focus_set()
self.top.wait_window()
return self.values
def input_box(*ags, title='输入框'):
"""
参数输入框装饰器
:param title: 输入框标题
:return:
"""
def decorator(f):
@wraps(f)
def decorated(*args, **kwargs):
time.sleep(SLEEP_TIME)
res = InputBox(title, *ags).show()
if res is not None:
return f(*res)
return decorated
return decorator
def confirm_box(message='确定执行此操作吗?'):
"""
操作确认框装饰器
:param message: 提示信息
:return:
"""
def decorator(f):
@wraps(f)
def decorated(*args, **kwargs):
time.sleep(SLEEP_TIME)
if messagebox.askokcancel('提示', message):
return f(*args, **kwargs)
return decorated
return decorator
if __name__ == '__main__':
def sub():
time.sleep(2)
notify.text = 'xue'
notify.show()
time.sleep(2)
notify.hide()
# notify = Notification()
# threading.Thread(target=auto_hide).start()
# notify.start()
# thd = threading.Thread(target=sub)
# thd.start()
# def auto_hide():
# time.sleep(2)
# # notify.destroy()
# # flag = False
# notify.hide()
notify = Notification('xyw_macro\n已启动')
threading.Thread(target=sub).start()
notify.run()
# notify.show(0.2)
# print('end')
# time.sleep(2)
# notify.set_text('changed')
# notify.show()
# notify.start()
# print('xue')
# print(type(notify.get_window()))
# notify.start()
# flag = True
# while flag:
# # notify.get_window().update_idletasks()
# notify.get_window().update()
| 30.348485 | 120 | 0.563155 | 8,361 | 0.804174 | 0 | 0 | 576 | 0.055401 | 0 | 0 | 1,984 | 0.190824 |
ef015b72b0d9f9a36582b5d4563b3165aa3bb897 | 1,206 | py | Python | tests/test_utils.py | yiannisha/dbmanage | 9e1e36e2b59e7e369595f4804bef2c2a7ec0ec56 | [
"Apache-2.0"
]
| null | null | null | tests/test_utils.py | yiannisha/dbmanage | 9e1e36e2b59e7e369595f4804bef2c2a7ec0ec56 | [
"Apache-2.0"
]
| 10 | 2021-11-06T18:12:54.000Z | 2021-12-01T18:49:29.000Z | tests/test_utils.py | yiannisha/dbmanage | 9e1e36e2b59e7e369595f4804bef2c2a7ec0ec56 | [
"Apache-2.0"
]
| null | null | null |
""" Utilities for testing """
import os
import json
TESTDATADIR = os.path.join(os.path.dirname(__file__), 'testdata')
def get_pass(pass_name : str) -> str:
""" Returns pass from test_credentials.json """
creds_path = os.path.join(os.path.dirname(__file__), 'test_credentials.json')
with open(creds_path, 'r', encoding='utf-8') as f:
for line in f.readlines():
creds = json.loads(line)
return creds[pass_name]
def read_temp_file(filename: str, delete = True, stdout: str = '', stderr: str = '') -> str:
""" Reads temp file and returns contents """
# wait for file to be generated
print(f'Waiting for {filename} file...')
try:
while(not os.path.exists(filename)):
pass
except KeyboardInterrupt as e:
error_msg = f'Stdout: {stdout}\nStderr: {stderr}\n'
raise Exception(error_msg)
# read file
with open(filename, 'r', encoding='utf-8') as f:
out_str = ''.join([line for line in f.readlines()])
# delete file
if delete and os.path.exists(filename):
try:
os.remove(filename)
except:
print(f'{filename} file already removed')
return out_str
| 28.046512 | 93 | 0.619403 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 340 | 0.281924 |
ef0261d204ca26d250b0a03064510e798b9c7feb | 152 | py | Python | ballistics/collision/dispatch/__init__.py | flupke/ballistics | 844ef7dd9fd55f6f7d0be04df6b564beaa5aaa1a | [
"Zlib"
]
| null | null | null | ballistics/collision/dispatch/__init__.py | flupke/ballistics | 844ef7dd9fd55f6f7d0be04df6b564beaa5aaa1a | [
"Zlib"
]
| null | null | null | ballistics/collision/dispatch/__init__.py | flupke/ballistics | 844ef7dd9fd55f6f7d0be04df6b564beaa5aaa1a | [
"Zlib"
]
| 1 | 2020-04-29T13:52:31.000Z | 2020-04-29T13:52:31.000Z | from ballistics.collision.dispatch.config import DefaultCollisionConfiguration
from ballistics.collision.dispatch.dispatcher import CollisionDispatcher
| 50.666667 | 78 | 0.907895 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
ef032589a15b54709c2cc0f764228c621cd157d2 | 750 | py | Python | venv/lib/python3.7/site-packages/webdriver_manager/chrome.py | wayshon/pylogin | 12ecfddc3ceaf552a42f62608027924541c63254 | [
"Apache-2.0"
]
| null | null | null | venv/lib/python3.7/site-packages/webdriver_manager/chrome.py | wayshon/pylogin | 12ecfddc3ceaf552a42f62608027924541c63254 | [
"Apache-2.0"
]
| 7 | 2019-12-04T23:08:08.000Z | 2022-02-10T12:47:38.000Z | venv/lib/python3.7/site-packages/webdriver_manager/chrome.py | wayshon/pylogin | 12ecfddc3ceaf552a42f62608027924541c63254 | [
"Apache-2.0"
]
| null | null | null | import os
from webdriver_manager.driver import ChromeDriver
from webdriver_manager.manager import DriverManager
from webdriver_manager import utils
class ChromeDriverManager(DriverManager):
def __init__(self, version=None, os_type=utils.os_type()):
# type: (str, str) -> None
super(ChromeDriverManager, self).__init__()
# there is no driver with 64 bit
if os_type == "win64":
os_type = "win32"
self.driver = ChromeDriver(version=version,
os_type=os_type)
def install(self, path=None):
# type: () -> str
bin_file = self._file_manager.download_driver(self.driver, path)
os.chmod(bin_file.path, 0o755)
return bin_file.path
| 34.090909 | 72 | 0.652 | 599 | 0.798667 | 0 | 0 | 0 | 0 | 0 | 0 | 89 | 0.118667 |
ef038b82c703bdd42d7eb00adaf52c73105e5c39 | 321 | py | Python | polling_stations/apps/data_importers/management/commands/import_brent.py | danielgriffin48/UK-Polling-Stations | 0e5273357a4fdc00c2af794c71558b6f8f2a0a49 | [
"BSD-3-Clause"
]
| null | null | null | polling_stations/apps/data_importers/management/commands/import_brent.py | danielgriffin48/UK-Polling-Stations | 0e5273357a4fdc00c2af794c71558b6f8f2a0a49 | [
"BSD-3-Clause"
]
| null | null | null | polling_stations/apps/data_importers/management/commands/import_brent.py | danielgriffin48/UK-Polling-Stations | 0e5273357a4fdc00c2af794c71558b6f8f2a0a49 | [
"BSD-3-Clause"
]
| null | null | null | from data_importers.management.commands import BaseDemocracyCountsCsvImporter
class Command(BaseDemocracyCountsCsvImporter):
council_id = "E09000005"
addresses_name = "europarl.2019-05-23/Version 1/DC PD.csv"
stations_name = "europarl.2019-05-23/Version 1/DC PS.csv"
elections = ["europarl.2019-05-23"]
| 35.666667 | 77 | 0.76947 | 240 | 0.747664 | 0 | 0 | 0 | 0 | 0 | 0 | 114 | 0.35514 |
ef0469d45705f95287d4ed042d4ea25304eabf8c | 3,217 | py | Python | tests/test_data/movies.py | jmolinski/traktpy | e6ff22acaf273b7b45070a4f8938c210fe4d63d7 | [
"MIT"
]
| null | null | null | tests/test_data/movies.py | jmolinski/traktpy | e6ff22acaf273b7b45070a4f8938c210fe4d63d7 | [
"MIT"
]
| 1 | 2019-04-13T10:15:48.000Z | 2019-04-13T10:15:48.000Z | tests/test_data/movies.py | jmolinski/traktpy | e6ff22acaf273b7b45070a4f8938c210fe4d63d7 | [
"MIT"
]
| null | null | null | MOVIE1 = {
"title": "Guardians of the Galaxy",
"year": 2014,
"ids": {
"trakt": 28,
"slug": "guardians-of-the-galaxy-2014",
"imdb": "tt2015381",
"tmdb": 118340,
},
}
MOVIE2 = {
"title": "Guardians of the Galaxy",
"year": 2014,
"ids": {
"trakt": 28,
"slug": "guardians-of-the-galaxy-2014",
"imdb": "tt2015381",
"tmdb": 118340,
},
}
MOVIE_PREMIERES = [
{"released": "2014-08-01", "movie": MOVIE1},
{"released": "2014-08-01", "movie": MOVIE2},
]
MOVIES = [MOVIE1, MOVIE2]
TRENDING_MOVIES = [{"watchers": 21, "movie": MOVIE1}, {"watchers": 17, "movie": MOVIE2}]
PLAYED_MOVIES = [
{
"watcher_count": 66667,
"play_count": 109736,
"collected_count": 27584,
"movie": MOVIE1,
},
{
"watcher_count": 76254,
"play_count": 104242,
"collected_count": 31877,
"movie": MOVIE2,
},
]
ANTICIPATED_MOVIES = [
{"list_count": 5362, "movie": MOVIE1},
{"list_count": 4405, "movie": MOVIE2},
]
BOX_OFFICE = [
{"revenue": 48464322, "movie": MOVIE1},
{"revenue": 17728313, "movie": MOVIE2},
]
UPDATED_MOVIES = [{"updated_at": "2014-09-22T21:56:03.000Z", "movie": MOVIE1}]
EXTENDED_MOVIE = {
"title": "TRON: Legacy",
"year": 2010,
"ids": {
"trakt": 343,
"slug": "tron-legacy-2010",
"imdb": "tt1104001",
"tmdb": 20526,
},
"tagline": "The Game Has Changed.",
"overview": "Sam Flynn, the tech-savvy and daring son of Kevin Flynn, investigates his father's disappearance and is pulled into The Grid. With the help of a mysterious program named Quorra, Sam quests to stop evil dictator Clu from crossing into the real world.",
"released": "2010-12-16",
"runtime": 125,
"country": "us",
"updated_at": "2014-07-23T03:21:46.000Z",
"trailer": None,
"homepage": "http://disney.go.com/tron/",
"rating": 8,
"votes": 111,
"comment_count": 92,
"language": "en",
"available_translations": ["en"],
"genres": ["action"],
"certification": "PG-13",
}
ALIASES = [
{"title": "Batman 1 - Batman Begins", "country": "ca"},
{"title": "Batman 5 Begins", "country": "br"},
]
RELEASES = [
{
"country": "us",
"certification": "PG",
"release_date": "2010-12-16",
"release_type": "theatrical",
"note": None,
},
{
"country": "gb",
"certification": "PG",
"release_date": "2010-12-17",
"release_type": "theatrical",
"note": None,
},
]
TRANSLATIONS = [
{
"title": "Batman Begins",
"overview": "...",
"tagline": "Das Böse fürchtet den Ritter.",
"language": "de",
}
]
RATINGS = {
"rating": 7.33778,
"votes": 7866,
"distribution": {
"1": 298,
"2": 46,
"3": 87,
"4": 178,
"5": 446,
"6": 1167,
"7": 1855,
"8": 1543,
"9": 662,
"10": 1583,
},
}
RELATED_MOVIES = [MOVIE1, MOVIE2]
MOVIE_STATS = {
"watchers": 39204,
"plays": 51033,
"collectors": 27379,
"comments": 36,
"lists": 4561,
"votes": 7866,
}
| 22.496503 | 269 | 0.520361 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,615 | 0.501709 |
ef051797168d89a7cce543aa7efcba75f787978c | 2,689 | py | Python | azext_iot/digitaltwins/common.py | v-andreaco/azure-iot-cli-extension | 18b20b0a6ba9f75556979eb905e6d2271eb27ddd | [
"MIT"
]
| null | null | null | azext_iot/digitaltwins/common.py | v-andreaco/azure-iot-cli-extension | 18b20b0a6ba9f75556979eb905e6d2271eb27ddd | [
"MIT"
]
| null | null | null | azext_iot/digitaltwins/common.py | v-andreaco/azure-iot-cli-extension | 18b20b0a6ba9f75556979eb905e6d2271eb27ddd | [
"MIT"
]
| null | null | null | # coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
"""
shared: Define shared data types(enums) and constant strings.
"""
from enum import Enum
# Retry constants
MAX_ADT_CREATE_RETRIES = 5
ADT_CREATE_RETRY_AFTER = 60
MAX_ADT_DH_CREATE_RETRIES = 20
# Data History strings
DT_IDENTITY_ERROR = "Digital Twins instance does not have System-Assigned Identity enabled. Please enable and try again."
FINISHED_CHECK_RESOURCE_LOG_MSG = "Finished checking the {0} resource."
ERROR_PREFIX = "Unable to"
FAIL_GENERIC_MSG = ERROR_PREFIX + " assign {0}. Please assign this role manually."
FAIL_RBAC_MSG = ERROR_PREFIX + " assign {0}. Please assign this role manually with the command `az {1}`."
ABORT_MSG = "Command was aborted."
CONT_INPUT_MSG = "Continue with Data History connection creation anyway?"
ADX_ROLE_MSG = "'Database Admin' permission on the Digital Twins instance for the Azure Data Explorer database '{0}'"
RBAC_ROLE_MSG = "'{0}' role on the Digital Twins instance for the scope '{1}'"
# Messages to be used with ADX_ROLE_MSG or RBAC_ROLE_MSG
# Example: "Trying to add the '{0}' role on the Digital Twins instance for the scope '{1}'.
TRY_ADD_ROLE_LOG_MSG = "Trying to add the {0}."
PRESENT_ADD_ROLE_LOG_MSG = "The {0} is already present."
FINISHED_ADD_ROLE_LOG_MSG = "Finished adding the {0}."
ADD_ROLE_INPUT_MSG = "Add the {0}?"
SKIP_ADD_ROLE_MSG = "Skipping addition of the {0}. This may prevent creation of the data history connection."
# Enums
class ADTEndpointType(Enum):
"""
ADT endpoint type.
"""
eventgridtopic = "eventgridtopic"
servicebus = "servicebus"
eventhub = "eventhub"
class ADTEndpointAuthType(Enum):
"""
ADT endpoint auth type.
"""
identitybased = "IdentityBased"
keybased = "KeyBased"
class ADTPrivateConnectionStatusType(Enum):
"""
ADT private endpoint connection status type.
"""
pending = "Pending"
approved = "Approved"
rejected = "Rejected"
disconnected = "Disconnected"
class ADTPublicNetworkAccessType(Enum):
"""
ADT private endpoint connection status type.
"""
enabled = "Enabled"
disabled = "Disabled"
class ProvisioningStateType(Enum):
"""
ARM poller provisioning states
"""
FINISHED = frozenset(['succeeded', 'canceled', 'failed'])
FAILED = frozenset(['canceled', 'failed'])
SUCCEEDED = frozenset(['succeeded'])
| 31.267442 | 121 | 0.670881 | 910 | 0.338416 | 0 | 0 | 0 | 0 | 0 | 0 | 1,735 | 0.645221 |
ef05389e99b6d9f3d5e451c4f3f4a586cd843bd5 | 7,580 | py | Python | lib/FeatureSetUtils/Utils/AveExpressionMatrixBuilder.py | mclark58/FeatureSetUtils | 2b84bc40d6a8f8aec878aa965ca567537c67267e | [
"MIT"
]
| 1 | 2020-01-13T19:38:50.000Z | 2020-01-13T19:38:50.000Z | lib/FeatureSetUtils/Utils/AveExpressionMatrixBuilder.py | mclark58/FeatureSetUtils | 2b84bc40d6a8f8aec878aa965ca567537c67267e | [
"MIT"
]
| 6 | 2017-09-19T17:46:03.000Z | 2020-06-09T04:28:36.000Z | lib/FeatureSetUtils/Utils/AveExpressionMatrixBuilder.py | mclark58/FeatureSetUtils | 2b84bc40d6a8f8aec878aa965ca567537c67267e | [
"MIT"
]
| 9 | 2017-06-30T16:01:48.000Z | 2020-08-13T20:19:42.000Z | import json
import time
import uuid
from installed_clients.DataFileUtilClient import DataFileUtil
from installed_clients.KBaseReportClient import KBaseReport
from installed_clients.WorkspaceClient import Workspace as Workspace
def log(message, prefix_newline=False):
"""Logging function, provides a hook to suppress or redirect log messages."""
print(('\n' if prefix_newline else '') + '{0:.2f}'.format(time.time()) + ': ' + str(message))
class AveExpressionMatrixBuilder:
def _validate_calculate_average_expression_matrix_params(self, params):
"""
_validate_calculate_average_expression_matrix_params:
validates params passed to calculate_average_expression_matrix method
"""
log('start validating calculate_average_expression_matrix params')
# check for required parameters
for p in ['expression_matrix_ref', 'output_suffix', 'workspace_name']:
if p not in params:
raise ValueError('"{}" parameter is required, but missing'.format(p))
def _generate_report(self, expression_matrix_ref, workspace_name):
"""
_generate_report: generate report
"""
objects_created = [{'ref': expression_matrix_ref,
'description': 'Average ExpressionMatrix'}]
report_params = {'message': '',
'workspace_name': workspace_name,
'objects_created': objects_created,
# 'html_links': output_html_files,
# 'direct_html_link_index': 0,
'html_window_height': 366,
'report_object_name': 'kb_ave_expr_matrix_report_' + str(uuid.uuid4())}
kbase_report_client = KBaseReport(self.callback_url, token=self.token)
output = kbase_report_client.create_extended_report(report_params)
report_output = {'report_name': output['name'], 'report_ref': output['ref']}
return report_output
def _save_expression_matrix(self, em_data, em_obj_name, workspace_name):
"""
_save_expression_matrix: saving ExpressionMatrix
"""
try:
log('saving ExpressionMatrix [{}]'.format(em_obj_name))
data_type = 'KBaseFeatureValues.ExpressionMatrix'
obj_info = self.dfu.save_objects({'id': self.dfu.ws_name_to_id(workspace_name),
'objects': [{'type': data_type,
'data': em_data,
'name': em_obj_name}]})[0]
except Exception as e:
log(e)
raise Exception('Failed Saving ExpressionMatrix to Workspace')
expression_matrix_ref = str(obj_info[6]) + '/' + str(obj_info[0]) + '/' + str(obj_info[4])
return expression_matrix_ref
def __init__(self, config):
self.ws_url = config["workspace-url"]
self.callback_url = config['SDK_CALLBACK_URL']
self.token = config['KB_AUTH_TOKEN']
self.shock_url = config['shock-url']
self.ws = Workspace(self.ws_url, token=self.token)
self.dfu = DataFileUtil(self.callback_url)
self.scratch = config['scratch']
def calculate_average_expression_matrix(self, params):
"""
calculate_average_expression_matrix: create an average ExpressionMatrix object
from a ExpressionMatrix object
required params:
expression_matrix_ref: ExpressionMatrix object reference
output_suffix: output average ExpressionMatrix name suffix
workspace_name: the name of the workspace it gets saved to
return:
average_expression_matrix_ref: generated average ExpressionMatrix object reference
report_name: report name generated by KBaseReport
report_ref: report reference generated by KBaseReport
"""
log('--->\nrunning AveExpressionMatrixBuilder.calculate_average_expression_matrix\n' +
'params:\n{}'.format(json.dumps(params, indent=1)))
self._validate_calculate_average_expression_matrix_params(params)
expression_matrix_ref = params.get('expression_matrix_ref')
expression_matrix = self.ws.get_objects2({'objects':
[{'ref':
expression_matrix_ref}]})['data'][0]
expression_matrix_data = expression_matrix['data']
expression_matrix_info = expression_matrix['info']
condition_map = expression_matrix_data['condition_mapping']
ori_data = expression_matrix_data['data']
ori_col_ids = ori_data['col_ids']
ori_row_ids = ori_data['row_ids']
ori_values = ori_data['values']
labels = list(condition_map.keys())
if set(labels) != set(ori_col_ids):
error_msg = 'available labels: {}\n'.format(ori_col_ids)
error_msg += 'labels in condition_mapping: {}'.format(labels)
raise ValueError(error_msg)
condition_pos = {}
for label, condition in condition_map.items():
if condition not in condition_pos:
condition_pos.update({condition: [ori_col_ids.index(label)]})
else:
condition_list = condition_pos[condition]
condition_list.append(ori_col_ids.index(label))
condition_pos.update({condition: condition_list})
conditions = list(condition_pos.keys())
ave_values = []
for ori_value in ori_values:
ave_value = [None] * len(conditions)
for condition, poss in condition_pos.items():
ave_pos = conditions.index(condition)
sum_value = 0.0
for pos in poss:
sum_value += round(float(ori_value[pos]), 3)
average = sum_value / len(poss)
ave_value[ave_pos] = round(average, 2)
ave_values.append(ave_value)
average_data = {}
average_data.update({'row_ids': ori_row_ids})
average_data.update({'col_ids': conditions})
average_data.update({'values': ave_values})
em_data = {}
genome_ref = expression_matrix_data.get('genome_ref')
if genome_ref:
em_data.update({'genome_ref': genome_ref})
em_data.update({'scale': expression_matrix_data.get('scale')})
em_data.update({'type': expression_matrix_data.get('type')})
em_data.update({'feature_mapping': expression_matrix_data.get('feature_mapping')})
em_data.update({'condition_mapping': expression_matrix_data.get('condition_mapping')})
em_data.update({'data': average_data})
expression_matrix_name = expression_matrix_info[1]
ave_expression_matrix_name = expression_matrix_name + params.get('output_suffix')
workspace_name = params.get('workspace_name')
ave_expression_matrix_ref = self._save_expression_matrix(em_data,
ave_expression_matrix_name,
workspace_name)
returnVal = {'average_expression_matrix_ref': ave_expression_matrix_ref}
report_output = self._generate_report(ave_expression_matrix_ref,
workspace_name)
returnVal.update(report_output)
return returnVal
| 41.648352 | 98 | 0.61504 | 7,127 | 0.940237 | 0 | 0 | 0 | 0 | 0 | 0 | 2,160 | 0.28496 |
ef055217f03abbaf7fba6a972f73a617fc132c0f | 838 | py | Python | src/python/modules/TensorflowCommon/utils.py | dsyme/ADBench | 87af0219a568807f8432754688ceb636efac12c6 | [
"MIT"
]
| 58 | 2019-12-30T16:22:01.000Z | 2022-01-23T12:26:51.000Z | src/python/modules/TensorflowCommon/utils.py | dsyme/ADBench | 87af0219a568807f8432754688ceb636efac12c6 | [
"MIT"
]
| 112 | 2019-05-25T07:26:58.000Z | 2019-12-28T13:55:33.000Z | src/python/modules/TensorflowCommon/utils.py | dsyme/ADBench | 87af0219a568807f8432754688ceb636efac12c6 | [
"MIT"
]
| 22 | 2020-03-12T16:37:55.000Z | 2022-02-23T10:14:37.000Z | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import tensorflow as tf
def to_tf_tensor(ndarray, dtype = tf.float64):
'''Converts the given multidimensional array to a tensorflow tensor.
Args:
ndarray (ndarray-like): parameter for conversion.
dtype (type, optional): defines a type of tensor elements. Defaults to
tf.float64.
Returns:
tensorflow tensor
'''
return tf.convert_to_tensor(ndarray, dtype = dtype)
def shape(tf_tensor):
'''Returns shape of a tensorflow tensor like a list if integers.'''
return tf_tensor.get_shape().as_list()
def flatten(tf_tensor, column_major = False):
'''Returns the flaten tensor.'''
if column_major:
tf_tensor = tf.transpose(tf_tensor)
return tf.reshape(tf_tensor, [-1]) | 22.648649 | 78 | 0.674224 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 458 | 0.546539 |
ef066c9d7e1e24986e561e37f408aef403cdc52a | 127 | py | Python | learning_sets.py | guppikan/PythonLearning | b1674b7187c783b682da26c2190e2b47938faa16 | [
"MIT"
]
| null | null | null | learning_sets.py | guppikan/PythonLearning | b1674b7187c783b682da26c2190e2b47938faa16 | [
"MIT"
]
| null | null | null | learning_sets.py | guppikan/PythonLearning | b1674b7187c783b682da26c2190e2b47938faa16 | [
"MIT"
]
| null | null | null | # this file describe sets data structures on python
thisSet={"Car","Bike","Truk"}
# Printing sets on terminal
print(thisSet) | 21.166667 | 52 | 0.740157 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 96 | 0.755906 |
ef071178a07b347765b3a959b7f835718f3934a3 | 588 | py | Python | s3bro/pool_map.py | rsavordelli/s3bro | e5b1d41052fd2491c08589b8a2bffeb6aae7cf33 | [
"MIT"
]
| 22 | 2018-03-13T18:46:33.000Z | 2021-11-03T09:41:39.000Z | s3bro/pool_map.py | rsavordelli/s3bro | e5b1d41052fd2491c08589b8a2bffeb6aae7cf33 | [
"MIT"
]
| 5 | 2018-06-26T21:39:06.000Z | 2020-08-03T12:53:10.000Z | s3bro/pool_map.py | rsavordelli/s3bro | e5b1d41052fd2491c08589b8a2bffeb6aae7cf33 | [
"MIT"
]
| 2 | 2019-09-04T06:40:09.000Z | 2020-07-06T01:56:44.000Z | from multiprocessing import Pool
import logging
def multi_process(func, data, workers):
logging.warning('Consuming list with %s workers' % workers)
p = Pool(workers)
try:
# the timeout(.get(9999999) is a workaround for the KeyboardInterrupt. without that it just does not work.
# Seem to be a bug on multiprocessing. Will investigate it later
p.map_async(func, data).get(9999999)
p.close()
except (KeyboardInterrupt, SystemExit):
print("Caught KeyboardInterrupt, terminating workers")
except Exception as e:
print(e)
| 32.666667 | 114 | 0.690476 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 249 | 0.423469 |
ef07256f31589e2d434bffa64e958f93097dc4b3 | 11,290 | py | Python | htmlmth/utils.py | ZwCreatePhoton/htmlmth | 74d23ca2fa53e11b2587251d2f71c8f275548182 | [
"MIT"
]
| null | null | null | htmlmth/utils.py | ZwCreatePhoton/htmlmth | 74d23ca2fa53e11b2587251d2f71c8f275548182 | [
"MIT"
]
| null | null | null | htmlmth/utils.py | ZwCreatePhoton/htmlmth | 74d23ca2fa53e11b2587251d2f71c8f275548182 | [
"MIT"
]
| null | null | null | import os
import yaml
from HTMLScriptExtractor import HTMLScriptExtractor
MIME_TYPE_MAP = {
'.htm': 'text/html',
'.html': 'text/html',
'.js': 'text/javascript',
'.vbs': 'text/vbscript',
'.txt': 'text/plain',
'.jpg': 'image/jpeg',
'.jpeg': 'image/jpeg'
}
# input:
# a function "mime_type_function_dict" a dictionary (mime type -> f) where "f" is a function that accepts the tuple: (string, MetaData) and returns the tuple: (string, MetaData)
# output:
# a function "g" that accepts a single argument of type list of tuple: (string, MetaData)
# # in this function, for each tuple in the list, the function mime_type_function_dict[tuple[1].mime_type] will be called with tuple as the argument
def mime_type_based_transform(mime_type_function_dict):
def g(list_of_tfarg):
new_list_of_tfarg = []
for tfarg in list_of_tfarg:
f = mime_type_function_dict.get(tfarg.metadata.mime_type, None)
ret = None
if callable(f):
ret = f(tfarg)
if isinstance(ret, TransformFunctionArgument):
new_list_of_tfarg.append(tfarg)
elif isinstance(ret, list):
new_list_of_tfarg += ret
else:
new_list_of_tfarg.append(tfarg)
return new_list_of_tfarg
return g
# for use with TransformFunctionArgument.content
# function(string) -> function(TransformFunctionArgument)
def string_to_tfarg_function(f):
def g(tfarg):
tfarg.content = f(tfarg.content)
return tfarg
return g
# for use with TransformFunctionArgument.metadata.http.normalized_headers
# function(list of headers) -> function(TransformFunctionArgument)
def normalized_headers_to_tfarg_function(f):
def g(tfarg):
is_list = isinstance(tfarg, list)
tfargs = tfarg if is_list else [tfarg]
for tfa in tfargs:
tfa.metadata.http.normalized_headers = f(tfa.metadata.http.normalized_headers)
if is_list:
return tfargs
else:
return tfarg
return g
# for use with TransformFunctionArgument.metadata.http.payload
# function(bytes) -> function(TransformFunctionArgument)
def http_payload_to_tfarg_function(f):
def g(tfarg):
is_list = isinstance(tfarg, list)
tfargs = tfarg if is_list else [tfarg]
for tfa in tfargs:
tfa.metadata.http.body = f(tfa.metadata.http.body)
if is_list:
return tfargs
else:
return tfarg
return g
def replace_apply_replace_back(f, s, sub):
def g(input):
output = input.replace(s, sub)
output = f(output)
output = output.replace(sub, s)
return output
return g
class TransformFunction():
def __init__(self, name=None, description=None, *args):
self._name = name
self._description = description
self._functions = args
self.parameters = {}
@property
def name(self):
return self._name
@property
def description(self):
if self._description:
return self._description
else:
return "; ".join(f.description for f in self._functions)
def __call__(self, *args, **kwargs):
ret = args[0]
for func in self._functions:
ret = func(ret)
return ret
def parameterize(self, **kwargs):
raise NotImplemented
@staticmethod
# clean up the descriptions of all TransformFunction objects in "transform_functions" using the name and description propteries of TransformFunction objects with an index < "index"
def cleanup_descriptions(transform_functions, index=0):
for j in reversed(range(len(transform_functions))):
test_case = transform_functions[j]
description = test_case.description
pieces = set(description.split("; "))
used_pieces = set()
new_descriptions = []
for i in range(index):
if i == j:
continue
tc = transform_functions[i]
tc_description = tc.description
tc_pieces = set(tc_description.split("; "))
has_all_pieces = all(p in pieces for p in tc_pieces)
if has_all_pieces:
used_pieces.update(tc_pieces)
new_descriptions.append(tc.name)
missing_pieces = pieces - used_pieces
test_case._description = "; ".join(new_descriptions + list(missing_pieces))
class TransformFunctionArgument():
def __init__(self, content=None, content_type=None):
self.content = content
self.metadata = MetaData(data=self, mime_type=content_type)
def __str__(self):
return self.content
def __len__(self):
return len(str(self))
class dotdict(dict):
"""dot.notation access to dictionary attributes"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
class MetaData():
def __init__(self, data, mime_type=None):
self.data = data
self.mime_type = mime_type
self.http = HttpMetaData(data, mime_type=mime_type)
class HttpMetaData():
NEWLINE = "\r\n"
def __init__(self, data, type="response", version="1.1", mime_type=None, content_length_header=True, content_type_header=False, server_header=False, connection_header=False):
self._body = None
self.data = data
self.type = type
self.host = ""
self.path = "/"
self.is_launch_path = False
self.version = version
self.status_code = 200
self.status_message = "OK"
self.mime_type = mime_type if mime_type is not None else "text/html"
self._headers = None
self._normalized_headers = None
self.server_header = server_header
self.server_header_value = ""
self.content_type_header = content_type_header
self.connection_header = connection_header
self.connection_header_value = "close"
self.content_length_header = content_length_header
@property
def normalized_headers(self):
if self._normalized_headers is None:
self._normalized_headers = []
if self.server_header:
h = "Server: {}".format(self.server_header_value)
self._normalized_headers.append(h)
if self.content_type_header:
h = "Content-Type: {}".format(self.mime_type)
self._normalized_headers.append(h)
if self.connection_header:
h = "Connection: {}".format(self.connection_header_value)
self._normalized_headers.append(h)
return self._normalized_headers
@normalized_headers.setter
def normalized_headers(self, normalized_headers):
self._normalized_headers = normalized_headers
@property
def headers(self):
if self._headers:
return self._headers
else:
headers_bytes = ""
if self.type == "response":
headers_bytes += "HTTP/{} {} {}".format(self.version, self.status_code, self.status_message) + HttpMetaData.NEWLINE
else:
pass # TODO
# Assumption: the "headers" property will only be called after modifications to the payload are complete
# -> content-length will not be updated after accessing this property for the first time
self.normalized_headers
if self.content_length_header:
h = "Content-Length: {}".format(len(self.body))
self._normalized_headers.append(h)
for h in self._normalized_headers:
headers_bytes += h + HttpMetaData.NEWLINE
headers_bytes += HttpMetaData.NEWLINE
self._headers = headers_bytes
return self._headers
@headers.setter
def headers(self, headers):
self._headers = headers
# normalized body: before chunking, compression, etc.
@property
def payload(self):
return self.data.content
# raw body: after chunking, compression, etc.
@property
def body(self):
if self._body is None:
return self.payload
else:
return self._body
@body.setter
def body(self, value):
self._body = value
@staticmethod
def copy_server_headers(input_hmd, output_hmd):
output_hmd.server_header = input_hmd.server_header
output_hmd.server_header_value = input_hmd.server_header_value
output_hmd.content_type_header = input_hmd.content_type_header
output_hmd.content_length_header = input_hmd.content_length_header
output_hmd.connection_header = input_hmd.connection_header
def IsYaml(filepath):
return os.path.splitext(filepath)[-1].lower() == ".yaml"
# returns list of baseline
# baseline := dictionary of "host", "path", "filepath", "content"
def ParseBaselineYaml(filepath):
filepath = os.path.normpath(filepath.replace("\\", "/")) # normalize
baselines = []
with open(filepath) as f:
data = yaml.load(f, Loader=yaml.FullLoader)
if "include" in data:
for include_yaml in data["include"]:
baselines.extend(ParseBaselineYaml(os.path.join(os.path.abspath(os.path.dirname(filepath)), include_yaml)))
else:
if data['baselines'] is None:
return baselines
for baseline in data['baselines']:
normalized_filepath = os.path.normpath(baseline["filepath"].replace("\\", "/"))
bl = {
"host": baseline["host"] if "host" in baseline else "",
"path": baseline["path"] if "path" in baseline else normalized_filepath.replace("\\", "/"),
"filepath": normalized_filepath,
"content": open(os.path.join(os.path.abspath(os.path.dirname(filepath)), normalized_filepath), "r").read(),
}
if bl["path"][0] != "/":
bl["path"] = "/" + bl["path"]
baselines.append(bl)
return baselines
# returns list of testcase
# testcase := dictionary of "host", "path", "casename"
def ParseTestcaseYaml(filepath):
filepath = os.path.normpath(filepath.replace("\\", "/")) # normalize
baselines = []
with open(filepath) as f:
data = yaml.load(f, Loader=yaml.FullLoader)
if data is None:
return baselines
if "include" in data:
for include_yaml in data["include"]:
baselines.extend(ParseTestcaseYaml(os.path.join(os.path.abspath(os.path.dirname(filepath)), include_yaml)))
else:
if data['baselines'] is None:
return baselines
for baseline in data['baselines']:
bl = {
"host": baseline["host"] if "host" in baseline else "",
"path": baseline["path"] if "path" in baseline else "",
"casename": baseline["casename"]
}
if bl["path"] and bl["path"][0] != "/":
bl["path"] = "/" + bl["path"]
baselines.append(bl)
return baselines
| 35.84127 | 184 | 0.615766 | 6,054 | 0.536227 | 0 | 0 | 3,884 | 0.344021 | 0 | 0 | 2,115 | 0.187334 |
ef094d452aa651937866c8d859cce7f5a8e866fa | 1,265 | py | Python | examples/235. Lowest Common Ancestor of a Binary Search Tree.py | yehzhang/RapidTest | 2302fc10ddafba1d16ef1d7448d46c66f5a05da2 | [
"MIT"
]
| null | null | null | examples/235. Lowest Common Ancestor of a Binary Search Tree.py | yehzhang/RapidTest | 2302fc10ddafba1d16ef1d7448d46c66f5a05da2 | [
"MIT"
]
| null | null | null | examples/235. Lowest Common Ancestor of a Binary Search Tree.py | yehzhang/RapidTest | 2302fc10ddafba1d16ef1d7448d46c66f5a05da2 | [
"MIT"
]
| null | null | null | from rapidtest import Test, Case, TreeNode
from solutions.lowest_common_ancestor_of_a_binary_search_tree import Solution
with Test(Solution, post_proc=TreeNode.get_val) as test:
root = TreeNode.from_iterable([6, 2, 8, 0, 4, 7, 9, None, None, 3, 5])
Case(root, TreeNode(2), TreeNode(4), result=TreeNode(2))
Case(root, TreeNode(4), TreeNode(2), result=TreeNode(2))
Case(root, TreeNode(2), TreeNode(8), result=TreeNode(6))
Case(root, TreeNode(8), TreeNode(2), result=TreeNode(6))
Case(root, TreeNode(3), TreeNode(7), result=TreeNode(6))
Case(root, TreeNode(0), TreeNode(4), result=TreeNode(2))
Case(root, TreeNode(0), TreeNode(5), result=TreeNode(2))
Case(root, TreeNode(2), TreeNode(6), result=TreeNode(6))
Case(root, TreeNode(6), TreeNode(2), result=TreeNode(6))
Case(root, TreeNode(6), TreeNode(2), result=TreeNode(6))
Case(root, TreeNode(0), TreeNode(0), result=TreeNode(0))
@test
def greater_children(i):
return Case(TreeNode.from_iterable([i, None, i + 1]), TreeNode(i), TreeNode(i + 1),
result=TreeNode(i))
@test
def smaller_children(i):
return Case(TreeNode.from_iterable([i, i - 1]), TreeNode(i), TreeNode(i - 1),
result=TreeNode(i))
| 43.62069 | 91 | 0.660079 | 0 | 0 | 0 | 0 | 326 | 0.257708 | 0 | 0 | 0 | 0 |
ef09cb460708054f80c71807033f5ec91f1f2963 | 12,087 | py | Python | proto/npu_utilization_pb2.py | akaczm/jun-telemetry | 84c7208669f4f1749f8db45f4815dafefdbec083 | [
"MIT"
]
| 4 | 2019-12-02T12:20:47.000Z | 2021-08-25T12:52:26.000Z | proto/npu_utilization_pb2.py | akaczm/jun-telemetry | 84c7208669f4f1749f8db45f4815dafefdbec083 | [
"MIT"
]
| null | null | null | proto/npu_utilization_pb2.py | akaczm/jun-telemetry | 84c7208669f4f1749f8db45f4815dafefdbec083 | [
"MIT"
]
| 1 | 2021-08-25T12:47:44.000Z | 2021-08-25T12:47:44.000Z | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: npu_utilization.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import telemetry_top_pb2 as telemetry__top__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='npu_utilization.proto',
package='',
syntax='proto2',
serialized_options=None,
serialized_pb=_b('\n\x15npu_utilization.proto\x1a\x13telemetry_top.proto\"C\n\x1bNetworkProcessorUtilization\x12$\n\x0enpu_util_stats\x18\x01 \x03(\x0b\x32\x0c.Utilization\"q\n\x0bUtilization\x12\x12\n\nidentifier\x18\x01 \x02(\t\x12\x13\n\x0butilization\x18\x02 \x01(\r\x12\x1c\n\x07packets\x18\x03 \x03(\x0b\x32\x0b.PacketLoad\x12\x1b\n\x06memory\x18\x04 \x03(\x0b\x32\x0b.MemoryLoad\"\xba\x01\n\nMemoryLoad\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x61verage_util\x18\x02 \x01(\r\x12\x14\n\x0chighest_util\x18\x03 \x01(\r\x12\x13\n\x0blowest_util\x18\x04 \x01(\r\x12\x1e\n\x16\x61verage_cache_hit_rate\x18\x05 \x01(\r\x12\x1e\n\x16highest_cache_hit_rate\x18\x06 \x01(\r\x12\x1d\n\x15lowest_cache_hit_rate\x18\x07 \x01(\r\"\xa2\x01\n\nPacketLoad\x12\x12\n\nidentifier\x18\x01 \x02(\t\x12\x0c\n\x04rate\x18\x02 \x01(\x04\x12\'\n\x1f\x61verage_instructions_per_packet\x18\x03 \x01(\r\x12&\n\x1e\x61verage_wait_cycles_per_packet\x18\x04 \x01(\r\x12!\n\x19\x61verage_cycles_per_packet\x18\x05 \x01(\r:W\n\x18jnpr_npu_utilization_ext\x12\x17.JuniperNetworksSensors\x18\x0c \x01(\x0b\x32\x1c.NetworkProcessorUtilization')
,
dependencies=[telemetry__top__pb2.DESCRIPTOR,])
JNPR_NPU_UTILIZATION_EXT_FIELD_NUMBER = 12
jnpr_npu_utilization_ext = _descriptor.FieldDescriptor(
name='jnpr_npu_utilization_ext', full_name='jnpr_npu_utilization_ext', index=0,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR)
_NETWORKPROCESSORUTILIZATION = _descriptor.Descriptor(
name='NetworkProcessorUtilization',
full_name='NetworkProcessorUtilization',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='npu_util_stats', full_name='NetworkProcessorUtilization.npu_util_stats', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=46,
serialized_end=113,
)
_UTILIZATION = _descriptor.Descriptor(
name='Utilization',
full_name='Utilization',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='identifier', full_name='Utilization.identifier', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='utilization', full_name='Utilization.utilization', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='packets', full_name='Utilization.packets', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='memory', full_name='Utilization.memory', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=115,
serialized_end=228,
)
_MEMORYLOAD = _descriptor.Descriptor(
name='MemoryLoad',
full_name='MemoryLoad',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='MemoryLoad.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='average_util', full_name='MemoryLoad.average_util', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='highest_util', full_name='MemoryLoad.highest_util', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='lowest_util', full_name='MemoryLoad.lowest_util', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='average_cache_hit_rate', full_name='MemoryLoad.average_cache_hit_rate', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='highest_cache_hit_rate', full_name='MemoryLoad.highest_cache_hit_rate', index=5,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='lowest_cache_hit_rate', full_name='MemoryLoad.lowest_cache_hit_rate', index=6,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=231,
serialized_end=417,
)
_PACKETLOAD = _descriptor.Descriptor(
name='PacketLoad',
full_name='PacketLoad',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='identifier', full_name='PacketLoad.identifier', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rate', full_name='PacketLoad.rate', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='average_instructions_per_packet', full_name='PacketLoad.average_instructions_per_packet', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='average_wait_cycles_per_packet', full_name='PacketLoad.average_wait_cycles_per_packet', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='average_cycles_per_packet', full_name='PacketLoad.average_cycles_per_packet', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=420,
serialized_end=582,
)
_NETWORKPROCESSORUTILIZATION.fields_by_name['npu_util_stats'].message_type = _UTILIZATION
_UTILIZATION.fields_by_name['packets'].message_type = _PACKETLOAD
_UTILIZATION.fields_by_name['memory'].message_type = _MEMORYLOAD
DESCRIPTOR.message_types_by_name['NetworkProcessorUtilization'] = _NETWORKPROCESSORUTILIZATION
DESCRIPTOR.message_types_by_name['Utilization'] = _UTILIZATION
DESCRIPTOR.message_types_by_name['MemoryLoad'] = _MEMORYLOAD
DESCRIPTOR.message_types_by_name['PacketLoad'] = _PACKETLOAD
DESCRIPTOR.extensions_by_name['jnpr_npu_utilization_ext'] = jnpr_npu_utilization_ext
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
NetworkProcessorUtilization = _reflection.GeneratedProtocolMessageType('NetworkProcessorUtilization', (_message.Message,), {
'DESCRIPTOR' : _NETWORKPROCESSORUTILIZATION,
'__module__' : 'npu_utilization_pb2'
# @@protoc_insertion_point(class_scope:NetworkProcessorUtilization)
})
_sym_db.RegisterMessage(NetworkProcessorUtilization)
Utilization = _reflection.GeneratedProtocolMessageType('Utilization', (_message.Message,), {
'DESCRIPTOR' : _UTILIZATION,
'__module__' : 'npu_utilization_pb2'
# @@protoc_insertion_point(class_scope:Utilization)
})
_sym_db.RegisterMessage(Utilization)
MemoryLoad = _reflection.GeneratedProtocolMessageType('MemoryLoad', (_message.Message,), {
'DESCRIPTOR' : _MEMORYLOAD,
'__module__' : 'npu_utilization_pb2'
# @@protoc_insertion_point(class_scope:MemoryLoad)
})
_sym_db.RegisterMessage(MemoryLoad)
PacketLoad = _reflection.GeneratedProtocolMessageType('PacketLoad', (_message.Message,), {
'DESCRIPTOR' : _PACKETLOAD,
'__module__' : 'npu_utilization_pb2'
# @@protoc_insertion_point(class_scope:PacketLoad)
})
_sym_db.RegisterMessage(PacketLoad)
jnpr_npu_utilization_ext.message_type = _NETWORKPROCESSORUTILIZATION
telemetry__top__pb2.JuniperNetworksSensors.RegisterExtension(jnpr_npu_utilization_ext)
# @@protoc_insertion_point(module_scope)
| 40.972881 | 1,125 | 0.755522 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,945 | 0.24365 |
ef0a465c711275ee344dd982144bb689f29fa28c | 4,409 | py | Python | tests/test_models.py | rramaa/pynnotate | 7cf983dd16726032d3d53340415a823c9e8bd76c | [
"MIT"
]
| 1 | 2019-07-24T12:56:16.000Z | 2019-07-24T12:56:16.000Z | tests/test_models.py | rramaa/pynnotate | 7cf983dd16726032d3d53340415a823c9e8bd76c | [
"MIT"
]
| 14 | 2019-03-12T08:49:34.000Z | 2019-04-04T09:51:16.000Z | tests/test_models.py | rramaa/pynnotate | 7cf983dd16726032d3d53340415a823c9e8bd76c | [
"MIT"
]
| 2 | 2019-10-13T14:45:11.000Z | 2019-12-24T22:22:46.000Z | from annotatelib.models import (
models, class_from_filename,
table_name_from_filename, _get_column_description_from_object,
_get_indices_description_from_oject
)
import sqlite3
from orator import DatabaseManager
import os
import sys
sys.path.insert(0, os.path.abspath(
os.path.join(os.path.dirname(__file__), '../')))
def test_models():
result = models('tests/fixture_models')
result.sort()
result = list(map(lambda x: os.path.split(x)[1], result))
assert result == ['fixture_model_1.py', 'fixture_model_2.py', 'tasks.py']
def test_class_from_filename():
assert class_from_filename('class_name.py') == 'ClassName'
def test_class_from_filename_multiple():
assert class_from_filename('class_name_sfsaa.py') == 'ClassNameSfsaa'
def test_table_name_from_filename():
assert table_name_from_filename(
'engine_model_names.py') == 'engine_model_names'
def test_get_column_description_from_object():
database = "test.db"
create_database(database)
config = {
'sqlite3': {
'driver': 'sqlite',
'database': database
}
}
db = DatabaseManager(config)
result = _get_column_description_from_object(
db.get_schema_manager(), 'tasks')
assert result == {
'id': {'unsigned': False, 'autoincrement': False, 'length': None, 'default': None,
'pk': 1, 'precision': 10, 'name': 'id', 'extra': {}, 'scale': 0, 'type': 'integer', 'notnull': False, 'fixed': False},
'status_id': {'unsigned': False, 'autoincrement': False, 'length': None, 'default': None,
'pk': 0, 'precision': 10, 'name': 'status_id', 'extra': {}, 'scale': 0, 'type': 'integer', 'notnull': True, 'fixed': False},
'project_id': {'unsigned': False, 'autoincrement': False, 'length': None, 'default': None,
'pk': 0, 'precision': 10, 'name': 'project_id', 'extra': {}, 'scale': 0, 'type': 'integer', 'notnull': True, 'fixed': False},
'name': {'unsigned': False, 'autoincrement': False, 'length': None, 'default': None,
'pk': 0, 'precision': 10, 'name': 'name', 'extra': {}, 'scale': 0, 'type': 'text', 'notnull': True, 'fixed': False},
'end_date': {'unsigned': False, 'autoincrement': False, 'length': None, 'default': None,
'pk': 0, 'precision': 10, 'name': 'end_date', 'extra': {}, 'scale': 0, 'type': 'text', 'notnull': True, 'fixed': False},
'priority': {'unsigned': False, 'autoincrement': False, 'length': None, 'default': None,
'pk': 0, 'precision': 10, 'name': 'priority', 'extra': {}, 'scale': 0, 'type': 'integer', 'notnull': False, 'fixed': False},
'begin_date': {'unsigned': False, 'autoincrement': False, 'length': None, 'default': None,
'pk': 0, 'precision': 10, 'name': 'begin_date', 'extra': {}, 'scale': 0, 'type': 'text', 'notnull': True, 'fixed': False}}
drop_database(database)
def test_get_indices_description_from_object():
database = "test.db"
create_database(database)
config = {
'sqlite3': {
'driver': 'sqlite',
'database': database
}
}
db = DatabaseManager(config)
result = _get_indices_description_from_oject(
db.get_schema_manager(), 'tasks')
assert result == {'primary': {'is_unique?': True,
'is_primary?': True, 'columns': ['id']}}
drop_database(database)
def create_database(database):
sql_create_tasks_table = """CREATE TABLE IF NOT EXISTS tasks (
id integer PRIMARY KEY,
name text NOT NULL,
priority integer,
status_id integer NOT NULL,
project_id integer NOT NULL,
begin_date text NOT NULL,
end_date text NOT NULL,
FOREIGN KEY (project_id) REFERENCES projects (id)
);"""
# create a database connection
conn = sqlite3.connect(database)
# create tasks table
c = conn.cursor()
c.execute(sql_create_tasks_table)
def drop_database(database):
os.remove(database)
def truncate_file(file_path):
with open(file_path, 'r+') as f:
f.truncate(0)
| 41.205607 | 148 | 0.577682 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,827 | 0.41438 |
ef0b1e90a414cd10b99ab947636c1ca2151cab55 | 430 | py | Python | flatlist/__init__.py | dwabece/flatlist | 61b6f7f70bf9db2bf14f8bfdebce2c4f9a95811f | [
"WTFPL"
]
| null | null | null | flatlist/__init__.py | dwabece/flatlist | 61b6f7f70bf9db2bf14f8bfdebce2c4f9a95811f | [
"WTFPL"
]
| null | null | null | flatlist/__init__.py | dwabece/flatlist | 61b6f7f70bf9db2bf14f8bfdebce2c4f9a95811f | [
"WTFPL"
]
| null | null | null | __version__ = '0.0.1'
def flatten_list(input_list):
"""
Flattens list with many nested lists.
>>> flatten_list([1, [2, [3], [4]]])
[1, 2, 3, 4]
"""
result = []
for item in input_list:
if isinstance(item, list):
result.extend(flatten_list(item))
# yield from flatten_list(item)
else:
result.append(item)
# yield item
return result
| 20.47619 | 45 | 0.532558 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 162 | 0.376744 |
ef0dbb4129bccb5de4e10f51b60990b9ac3393bb | 607 | py | Python | slackcast/token.py | rbdixon/slackcast | ac4ac4591bbcf62d64ec05b5479e6e8315f92a69 | [
"MIT"
]
| null | null | null | slackcast/token.py | rbdixon/slackcast | ac4ac4591bbcf62d64ec05b5479e6e8315f92a69 | [
"MIT"
]
| 1 | 2021-11-15T17:47:27.000Z | 2021-11-15T17:47:27.000Z | slackcast/token.py | rbdixon/slackcast | ac4ac4591bbcf62d64ec05b5479e6e8315f92a69 | [
"MIT"
]
| null | null | null | import os
import keyring
from prompt_toolkit import prompt
KEY = ('slackcast', 'token')
SLACKCAST_INSTALL_URL = os.environ.get(
'SLACKCAST_INSTALL_URL', 'https://slackcast.devtestit.com/install'
)
def get_token():
# For testing
token = os.environ.get('SLACKCAST_TOKEN', None)
if token is None:
token = keyring.get_password(*KEY)
if token is None:
raw_token = prompt(f'Visit {SLACKCAST_INSTALL_URL}, approve, and enter token: ')
if raw_token.startswith('xoxp-'):
token = raw_token
keyring.set_password(*KEY, token)
return token
| 22.481481 | 88 | 0.667216 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 179 | 0.294893 |
ef0f41777334766f27b085f4b278863d8beee416 | 790 | py | Python | baidupan.py | iSteveyang/GraduateDesign-pyqt | ce4e6c8b0de2398081a83c63fb98cc03126bc6d0 | [
"MIT"
]
| null | null | null | baidupan.py | iSteveyang/GraduateDesign-pyqt | ce4e6c8b0de2398081a83c63fb98cc03126bc6d0 | [
"MIT"
]
| null | null | null | baidupan.py | iSteveyang/GraduateDesign-pyqt | ce4e6c8b0de2398081a83c63fb98cc03126bc6d0 | [
"MIT"
]
| null | null | null | import progressbar
from baidupcsapi import PCS
class ProgressBar():
def __init__(self):
self.first_call = True
def __call__(self, *args, **kwargs):
if self.first_call:
self.widgets = [progressbar.Percentage(), ' ', progressbar.Bar(marker=progressbar.RotatingMarker('>')),
' ', progressbar.ETA()]
self.pbar = progressbar.ProgressBar(widgets=self.widgets, maxval=kwargs['size']).start()
self.first_call = False
if kwargs['size'] <= kwargs['progress']:
self.pbar.finish()
else:
self.pbar.update(kwargs['progress'])
pcs = PCS('username','password')
test_file = open('bigfile.pdf','rb').read()
ret = pcs.upload('/',test_file,'bigfile.pdf',callback=ProgressBar())
| 34.347826 | 115 | 0.611392 | 593 | 0.750633 | 0 | 0 | 0 | 0 | 0 | 0 | 94 | 0.118987 |
ef0f95f25a14e3a1c31217d9a079a1f1c52c743d | 541 | py | Python | pps/message.py | SeungUkLee/preview-pipfile-script | d28d963f1feee9ed1621a04b25c02d34a0919829 | [
"MIT"
]
| null | null | null | pps/message.py | SeungUkLee/preview-pipfile-script | d28d963f1feee9ed1621a04b25c02d34a0919829 | [
"MIT"
]
| null | null | null | pps/message.py | SeungUkLee/preview-pipfile-script | d28d963f1feee9ed1621a04b25c02d34a0919829 | [
"MIT"
]
| null | null | null | """
messages
"""
from .color import ENDC, FAIL, OKBLUE, YELLOW
EXE_SCRIPT_ERR_MSG = '{0}[!]{1} An error occurred while executing script in Pipfile'.format(
FAIL, ENDC
)
KEYWORD_NOT_FOUND_MSG = "{0}[!]{1} {2}Pipfile{1} in {3}[scripts]{1} keyword not found!".format(
FAIL, ENDC, OKBLUE, YELLOW
)
FILE_NOT_FOUND_MSG = "{0}[!]{1} {2}Pipfile{1} not found!".format(
FAIL, ENDC, OKBLUE
)
KEYBOARD_INTERRUPT_MSG = "{0}[!]{1} KeyboardInterrupt".format(FAIL, ENDC)
INQUIRER_MSG = "{0}Select Pipfile script to run{1}".format(YELLOW, ENDC)
| 31.823529 | 95 | 0.685767 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 243 | 0.449168 |
ef1093497c62d32b5e459bb8bfbe26c27ca18a49 | 2,101 | py | Python | lambdafunctions/LogEvent/LogEvent.py | rpetrina/slack-sentiment-bot | 47969d8a8c476aa60939fab88f0af793a24a4acc | [
"MIT"
]
| null | null | null | lambdafunctions/LogEvent/LogEvent.py | rpetrina/slack-sentiment-bot | 47969d8a8c476aa60939fab88f0af793a24a4acc | [
"MIT"
]
| null | null | null | lambdafunctions/LogEvent/LogEvent.py | rpetrina/slack-sentiment-bot | 47969d8a8c476aa60939fab88f0af793a24a4acc | [
"MIT"
]
| null | null | null | import sys
import logging
import pymysql
import json
import os
#rds settings - Lambda role must have RDS access
rds_host = os.environ['RDS_HOST'] # Set in Lambda Dashboard
name = os.environ['DB_USERNAME']
password = os.environ['DB_PW']
db_name = os.environ['DB_NAME']
db_table = os.environ['DB_TABLE']
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def connecttodb():
try:
conn = pymysql.connect(rds_host, user=name,
passwd=password, db=db_name, connect_timeout=5)
return conn
except:
logger.error(
"ERROR: Unexpected error: Could not connect to MySql instance.")
sys.exit()
logger.info("SUCCESS: Connection to RDS mysql instance succeeded")
def writemessagetodb(event):
conn = connecttodb()
_eventid = str(event["event_id"])
_userid = str(event["user"])
_msgtext = event["text"]
_timestamp = str(event["event_time"])
insertstatement = 'INSERT INTO `' + db_table + \
r"""` (`eventid`, `userid`, `msgtxt`) VALUES (%s, %s, %s)"""
with conn.cursor() as cur:
cur.execute(insertstatement, (_eventid, _userid, _msgtext))
conn.commit()
print("Message successfully inserted into DB")
def handler(event, context):
"""
This function handles SNS posts from Amazon SNS. Currently it:
1) Inserts the request into an RDS MySQL DB
Current Assumptions:
1) Messages don't contain special characters - i.e: '
2) Requests are correctly formated (contain body and event, and event contains the expected values)
"""
print("In logevent: ", event)
try:
slackevent = json.loads(event["Records"][0]["Sns"]["Message"])
writemessagetodb(slackevent)
response = response = {
"statusCode": 200,
"body": event
}
except Exception as e:
''' Just a stub. Please make this better in real use :) '''
logger.error(f"ERROR: {e}")
response = {
"statusCode": 400,
"body": event
}
return response
| 29.180556 | 107 | 0.619229 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 863 | 0.410757 |
ef124d3ce81475f29c8f62fc6238715aeebcf110 | 764 | py | Python | ACCNTS/migrations/0012_auto_20190329_0554.py | domambia/csdigital-gs1kenya-internal-erp | 6736d0e9a3a51653689f8ae921cf811f378d9d8e | [
"MIT"
]
| 12 | 2019-08-02T07:58:16.000Z | 2022-01-31T23:45:08.000Z | ACCNTS/migrations/0012_auto_20190329_0554.py | domambia/csdigital-gs1kenya-internal-erp | 6736d0e9a3a51653689f8ae921cf811f378d9d8e | [
"MIT"
]
| 8 | 2019-08-02T08:06:18.000Z | 2022-03-11T23:45:17.000Z | ACCNTS/migrations/0012_auto_20190329_0554.py | domambia/csdigital-gs1kenya-internal-erp | 6736d0e9a3a51653689f8ae921cf811f378d9d8e | [
"MIT"
]
| 11 | 2019-07-31T16:23:36.000Z | 2022-01-29T08:30:07.000Z | # Generated by Django 2.1.5 on 2019-03-29 05:54
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ACCNTS', '0011_asset_employeetax_income_liability'),
]
operations = [
migrations.AddField(
model_name='asset',
name='dated',
field=models.DateField(default=datetime.datetime.now),
),
migrations.AddField(
model_name='income',
name='dated',
field=models.DateField(default=datetime.datetime.now),
),
migrations.AddField(
model_name='liability',
name='dated',
field=models.DateField(default=datetime.datetime.now),
),
]
| 25.466667 | 66 | 0.590314 | 655 | 0.85733 | 0 | 0 | 0 | 0 | 0 | 0 | 143 | 0.187173 |
ef1252f9351ea7758743cb386119d19cc1470cf1 | 171 | py | Python | doacao/forms.py | CyberDagger/quatropatas | 7fd9b51dd65d6242112ab40c834a66c4cc8c8c73 | [
"MIT"
]
| null | null | null | doacao/forms.py | CyberDagger/quatropatas | 7fd9b51dd65d6242112ab40c834a66c4cc8c8c73 | [
"MIT"
]
| null | null | null | doacao/forms.py | CyberDagger/quatropatas | 7fd9b51dd65d6242112ab40c834a66c4cc8c8c73 | [
"MIT"
]
| 1 | 2019-04-16T19:19:10.000Z | 2019-04-16T19:19:10.000Z | from django import forms
from .models import Doacao
class DoacaoForm(forms.ModelForm):
class Meta:
model = Doacao
fields = ['nib', 'quantia',]
| 21.375 | 37 | 0.631579 | 113 | 0.660819 | 0 | 0 | 0 | 0 | 0 | 0 | 14 | 0.081871 |
ef12df78f36f2adabef28423fa54313ee1270534 | 1,707 | py | Python | data/build_wd_elastic_index.py | flaneuse/reframedb-backend | 863423fb9fad547aa8c2f826dc2d39939fe1b991 | [
"MIT"
]
| null | null | null | data/build_wd_elastic_index.py | flaneuse/reframedb-backend | 863423fb9fad547aa8c2f826dc2d39939fe1b991 | [
"MIT"
]
| null | null | null | data/build_wd_elastic_index.py | flaneuse/reframedb-backend | 863423fb9fad547aa8c2f826dc2d39939fe1b991 | [
"MIT"
]
| null | null | null | import requests
from elasticsearch import Elasticsearch, client
from elasticsearch.exceptions import RequestError
es = Elasticsearch()
# retrieve all QIDs from the populated reframe ES index
body = {
"_source": {
"includes": ["qid"],
},
"query": {
"query_string": {
"query": "Q*",
"fields": ['qid']
}
},
"from": 0, "size": 10000,
}
es.indices.refresh(index="reframe")
r = es.search(index="reframe", body=body)
bd = {
'mapping': {
'total_fields': {
'limit': 30000
}
}
}
c = client.IndicesClient(es)
# check if index exists, otherwise, create
if c.exists(index='wikidata'):
c.put_settings(index='wikidata', body=bd)
else:
c.create(index='wikidata', body=bd)
session = requests.Session()
for count, hit in enumerate(r['hits']['hits']):
qid = hit['_source']['qid']
header = {
'Accept': 'application/json'
}
r = session.get('http://www.wikidata.org/entity/{}'.format(qid), headers=header).json()
# print(r)
obj = r['entities'][qid]
del obj['descriptions']
for claim, value in obj['claims'].items():
# print(claim, value)
for x in value:
if 'references' in x:
del x['references']
if es.exists(index='wikidata', doc_type='compound', id=qid):
# print('this exists!!')
es.update(index='wikidata', id=qid, doc_type='compound', body={'doc': obj})
# pass
else:
try:
res = es.index(index="wikidata", doc_type='compound', id=qid, body=obj)
except RequestError as e:
print(e)
if count % 100 == 0:
print('imported ', count)
| 21.884615 | 91 | 0.565319 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 536 | 0.314001 |
ef135d999c596568c19df6fc41a299bbb48ab07f | 3,049 | py | Python | dj_twitter_clone_app/blog/views.py | ivanprytula/dj_demo_app | 49ca506b22d3d99608e192b28787e185b39d3c24 | [
"MIT"
]
| null | null | null | dj_twitter_clone_app/blog/views.py | ivanprytula/dj_demo_app | 49ca506b22d3d99608e192b28787e185b39d3c24 | [
"MIT"
]
| null | null | null | dj_twitter_clone_app/blog/views.py | ivanprytula/dj_demo_app | 49ca506b22d3d99608e192b28787e185b39d3c24 | [
"MIT"
]
| null | null | null | from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.urls import reverse_lazy
from django.views.generic import (ListView, CreateView, TemplateView, )
from django.views.generic.detail import DetailView
from django.views.generic.edit import (UpdateView, DeleteView, )
from blog.models import Post
class BlogListView(ListView):
"""Blog blog home page view with pagination."""
model = Post
template_name = 'blog/blog_list.html'
context_object_name = 'posts'
paginate_by = 5
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
posts = self.get_queryset()
page = self.request.GET.get('page')
paginator = Paginator(posts, self.paginate_by)
try:
posts = paginator.page(page)
except PageNotAnInteger:
posts = paginator.page(1)
except EmptyPage:
posts = paginator.page(paginator.num_pages)
context['posts'] = posts
return context
class PostCreateView(CreateView):
"""Post create view with all model fields."""
model = Post
template_name = 'blog/post_new.html'
fields = '__all__'
success_url = reverse_lazy('blog_list')
class PostDetailView(DetailView):
"""Post details view accessed by primary key."""
model = Post
template_name = 'blog/post_detail.html'
context_object_name = 'post'
# def post_detail(request, pk):
# post = Post.objects.get(pk=pk)
#
# # We create empty form when user visits a page
# form = CommentForm()
# if request.method == 'POST':
# form = CommentForm(request.POST)
# if form.is_valid():
# comment = Comment(
# author=form.cleaned_data['author'],
# content=form.cleaned_data['content'],
# post=post
# )
# comment.save()
#
# comments = Comment.objects.filter(post=post)
# context = {
# 'post': post,
# 'comments': comments,
# 'form': form,
# }
# return render(request, 'blog/post_detail.html', context)
class PostUpdateView(UpdateView):
model = Post
template_name = 'blog/post_update.html'
context_object_name = 'post'
fields = ('title', 'content', 'categories')
def get_success_url(self):
return reverse_lazy('post_detail', kwargs={'pk': self.object.id})
class PostDeleteView(DeleteView):
model = Post
template_name = 'blog/post_delete.html'
success_url = reverse_lazy('blog_list')
class BlogCategory(TemplateView):
"""It takes a category name as an argument and
query the Post database for all posts that have been assigned
the given category."""
template_name = 'blog/blog_category.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['category'] = kwargs.get('category')
context['posts'] = Post.objects. \
filter(categories__name__contains=context['category'])
return context
| 29.601942 | 73 | 0.648081 | 2,005 | 0.657593 | 0 | 0 | 0 | 0 | 0 | 0 | 1,234 | 0.404723 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.