hexsha
stringlengths
40
40
size
int64
5
2.06M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
248
max_stars_repo_name
stringlengths
5
125
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
sequencelengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
248
max_issues_repo_name
stringlengths
5
125
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
sequencelengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
248
max_forks_repo_name
stringlengths
5
125
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
sequencelengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
5
2.06M
avg_line_length
float64
1
1.02M
max_line_length
int64
3
1.03M
alphanum_fraction
float64
0
1
count_classes
int64
0
1.6M
score_classes
float64
0
1
count_generators
int64
0
651k
score_generators
float64
0
1
count_decorators
int64
0
990k
score_decorators
float64
0
1
count_async_functions
int64
0
235k
score_async_functions
float64
0
1
count_documentation
int64
0
1.04M
score_documentation
float64
0
1
7c5e0af3e6fbbe4ea83ab673bc82739437ec8f74
453
py
Python
python/day5-1.py
Aerdan/adventcode-2020
83120aa8c7fc9d1f2d34780610401e3c6d4f583b
[ "BSD-1-Clause" ]
null
null
null
python/day5-1.py
Aerdan/adventcode-2020
83120aa8c7fc9d1f2d34780610401e3c6d4f583b
[ "BSD-1-Clause" ]
null
null
null
python/day5-1.py
Aerdan/adventcode-2020
83120aa8c7fc9d1f2d34780610401e3c6d4f583b
[ "BSD-1-Clause" ]
null
null
null
#!/usr/bin/env python3 def binary(code, max, bits): ret = [] for i in range(max): ret.append(bits[code[i]]) return int(''.join(ret), base=2) mid = 0 with open('input5.txt') as f: for line in f.readlines(): line = line[:-1] row = binary(line[:7], 7, {'F': '0', 'B': '1'}) col = binary(line[7:], 3, {'R': '1', 'L': '0'}) sid = row * 8 + col mid = sid if sid > mid else mid print(mid)
19.695652
55
0.487859
0
0
0
0
0
0
0
0
60
0.13245
7c5e5eb731f86fd4dc537483a440f05753a38fab
600
py
Python
custom_stocks_py/base.py
baramsalem/Custom-stocks-py
5beeb7b6f93755ec7c00c25763accf6a52f8bbaf
[ "Unlicense" ]
null
null
null
custom_stocks_py/base.py
baramsalem/Custom-stocks-py
5beeb7b6f93755ec7c00c25763accf6a52f8bbaf
[ "Unlicense" ]
null
null
null
custom_stocks_py/base.py
baramsalem/Custom-stocks-py
5beeb7b6f93755ec7c00c25763accf6a52f8bbaf
[ "Unlicense" ]
null
null
null
""" custom_stocks_py base module. This is the principal module of the custom_stocks_py project. here you put your main classes and objects. Be creative! do whatever you want! If you want to replace this with a Flask application run: $ make init and then choose `flask` as template. """ class BaseClass: def base_method(self) -> str: """ Base method. """ return "hello from BaseClass" def __call__(self) -> str: return self.base_method() def base_function() -> str: """ Base function. """ return "hello from base function"
18.181818
61
0.646667
199
0.331667
0
0
0
0
0
0
408
0.68
7c5fa2ddc156126b4dccbe0c281c6059666eccf4
501
py
Python
dummy_server.py
dpmkl/heimdall
184f169f0be9f6b6b708364725f5db8b1f249d9c
[ "MIT" ]
null
null
null
dummy_server.py
dpmkl/heimdall
184f169f0be9f6b6b708364725f5db8b1f249d9c
[ "MIT" ]
null
null
null
dummy_server.py
dpmkl/heimdall
184f169f0be9f6b6b708364725f5db8b1f249d9c
[ "MIT" ]
null
null
null
#!/usr/bin/env python import SimpleHTTPServer import SocketServer import logging PORT = 8000 class GetHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): def do_GET(self): self.send_response(200) self.send_header('Content-type','text/html') self.end_headers() self.wfile.write("Hello World ! '{}'".format(self.path)) return for i in range(4): Handler = GetHandler httpd = SocketServer.TCPServer(("", PORT + i), Handler) httpd.serve_forever()
25.05
64
0.682635
274
0.546906
0
0
0
0
0
0
68
0.135729
7c5ff08483bb708102f0397eb8e76c57f5a75cff
59
py
Python
cimsparql/__init__.py
nalu-svk/cimsparql
e69b0799a2bbd70027e2c8bb9970574991597ca5
[ "MIT" ]
null
null
null
cimsparql/__init__.py
nalu-svk/cimsparql
e69b0799a2bbd70027e2c8bb9970574991597ca5
[ "MIT" ]
null
null
null
cimsparql/__init__.py
nalu-svk/cimsparql
e69b0799a2bbd70027e2c8bb9970574991597ca5
[ "MIT" ]
null
null
null
"""Library for CIM sparql queries""" __version__ = "1.9.0"
19.666667
36
0.677966
0
0
0
0
0
0
0
0
43
0.728814
7c605332955c4b043be9f4d88d8eb7ca6bb505c8
934
py
Python
scripts/49-cat-logs.py
jmviz/xd
f905e5c61b2835073b19cc3fa0d6917432fa7ece
[ "MIT" ]
179
2016-03-05T03:14:56.000Z
2022-02-12T22:48:55.000Z
scripts/49-cat-logs.py
jmviz/xd
f905e5c61b2835073b19cc3fa0d6917432fa7ece
[ "MIT" ]
24
2016-02-14T07:43:42.000Z
2021-12-14T01:09:54.000Z
scripts/49-cat-logs.py
jmviz/xd
f905e5c61b2835073b19cc3fa0d6917432fa7ece
[ "MIT" ]
25
2016-02-19T20:35:03.000Z
2022-01-31T09:15:44.000Z
#!/usr/bin/env python3 # Usage: # $0 -o log.txt products/ # # concatenates .log files (even those in subdirs or .zip) and combines into a single combined.log from xdfile.utils import find_files_with_time, open_output, get_args import boto3 # from boto.s3.connection import S3Connection import os def main(): args = get_args('aggregates all .log files') outf = open_output() s3 = boto3.resource('s3') s3path = "logs/" # bucket = conn.get_bucket(s3path) bucket = s3.Bucket(os.environ['DOMAIN']) for obj in sorted(bucket.objects.all(), key=lambda x: x.last_modified): # last_modified if s3path in obj.key: print("Name: %s LastModified:%s" % (obj.key.encode('utf-8'), obj.last_modified)) for fn, contents, dt in sorted(find_files_with_time(*args.inputs, ext=".log"), key=lambda x: x[2]): # earliest first outf.write_file(fn, contents.decode("utf-8")) main()
29.1875
121
0.671306
0
0
0
0
0
0
0
0
358
0.383298
7c606dd98dcd0e38522a604061eae8d10c8862e6
1,844
py
Python
manuscript/link_checker.py
wuyang1002431655/tango_with_django_19
42d5878e4a12037daf04d785826357cd4351a16d
[ "Apache-2.0" ]
244
2016-04-12T15:39:47.000Z
2021-09-10T07:43:55.000Z
manuscript/link_checker.py
wuyang1002431655/tango_with_django_19
42d5878e4a12037daf04d785826357cd4351a16d
[ "Apache-2.0" ]
57
2016-03-29T22:12:09.000Z
2019-08-26T07:50:11.000Z
manuscript/link_checker.py
wuyang1002431655/tango_with_django_19
42d5878e4a12037daf04d785826357cd4351a16d
[ "Apache-2.0" ]
311
2016-04-27T04:41:02.000Z
2021-09-19T14:03:35.000Z
# Checks for broken links in the book chapters, printing the status of each link found to stdout. # The Python package 'requests' must be installed and available for this simple module to work. # Author: David Maxwell # Date: 2017-02-14 import re import requests def main(chapters_list_filename, hide_success=True): """ hide_success = a boolean switch that determines whether to show URLs that return a HTTP 200. If set to true, only URLs that fail will be printed. """ chapters_f = open(chapters_list_filename, 'r') pattern = re.compile(r'\[([^]]+)]\(\s*(http[s]?://[^)]+)\s*\)') # http://stackoverflow.com/a/23395483 print 'filename\tline_no\ttitle\turl\tstatus_code' for filename in chapters_f: filename = filename.strip() if not filename or filename.startswith('{'): # Skip non-filename lines continue chapter_f = open(filename, 'r') line_no = 1 for line in chapter_f: line = line.strip() for match in re.findall(pattern, line): title = match[0] url = match[1] if '127.0.0.1' in url or 'localhost' in url: # Don't check localhost URLs continue request = None status_code = -1 try: request = requests.get(url) status_code = request.status_code except requests.exceptions.ConnectionError: request = None status_code = 'FAILED_TO_CONNECT' if hide_success and status_code == 200: continue title = title.replace('\t', ' ') print '{filename}\t{line_no}\t{title}\t{url}\t{status_code}'.format(filename=filename, line_no=line_no, title=title, url=url, status_code=status_code) line_no = line_no + 1 chapter_f.close() chapters_f.close() if __name__ == '__main__': main('Book.txt', hide_success=False)
28.369231
103
0.645879
0
0
0
0
0
0
0
0
695
0.376898
7c607c9719bd98d3bde94fd9eadb9fd81b05f7b7
116
py
Python
service/__init__.py
2890841438/fast-index.py
fa59f38ed009b4bdf5dbf27d8619d31f8b681118
[ "MIT" ]
4
2020-09-05T03:18:44.000Z
2020-09-15T05:56:54.000Z
utils/__init__.py
2890841438/fast-index.py
fa59f38ed009b4bdf5dbf27d8619d31f8b681118
[ "MIT" ]
null
null
null
utils/__init__.py
2890841438/fast-index.py
fa59f38ed009b4bdf5dbf27d8619d31f8b681118
[ "MIT" ]
null
null
null
# -*- coding = utf-8 -*- # @Time: 2020/9/4 18:52 # @Author: dimples_yj # @File: __init__.py.py # @Software: PyCharm
19.333333
24
0.612069
0
0
0
0
0
0
0
0
111
0.956897
7c62e1ba59e97f238e09a86895f6c890c24d960e
5,819
py
Python
CLIP-ViL-Direct/vqa/pythia_clip_grid_feature.py
HermannLiang/CLIP-ViL
49c28bc5ece1aacfcbfd9c8810db70663ca0516a
[ "MIT" ]
null
null
null
CLIP-ViL-Direct/vqa/pythia_clip_grid_feature.py
HermannLiang/CLIP-ViL
49c28bc5ece1aacfcbfd9c8810db70663ca0516a
[ "MIT" ]
null
null
null
CLIP-ViL-Direct/vqa/pythia_clip_grid_feature.py
HermannLiang/CLIP-ViL
49c28bc5ece1aacfcbfd9c8810db70663ca0516a
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 """ Grid features extraction script. """ import argparse import os import torch import tqdm from fvcore.common.file_io import PathManager from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.engine import default_setup from detectron2.evaluation import inference_context from detectron2.modeling import build_model import numpy as np from clip.clip import load import torch.nn as nn from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize from grid_feats import ( add_attribute_config, build_detection_test_loader_with_attributes, ) # from timm.models.vision_transformer import resize_pos_embed # A simple mapper from object detection dataset to VQA dataset names dataset_to_folder_mapper = {} dataset_to_folder_mapper['coco_2014_train'] = 'train2014' dataset_to_folder_mapper['coco_2014_val'] = 'val2014' #dataset_to_folder_mapper['coco_2014_val'] = 'trainval2014' #dataset_to_folder_mapper['coco_2014_train'] = 'trainval2014' # One may need to change the Detectron2 code to support coco_2015_test # insert "coco_2015_test": ("coco/test2015", "coco/annotations/image_info_test2015.json"), # at: https://github.com/facebookresearch/detectron2/blob/master/detectron2/data/datasets/builtin.py#L36 dataset_to_folder_mapper['coco_2015_test'] = 'test2015' dataset_to_folder_mapper['coco_2015_test-dev'] = 'test-dev2015' def extract_grid_feature_argument_parser(): parser = argparse.ArgumentParser(description="Grid feature extraction") parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file") parser.add_argument("--dataset", help="name of the dataset", default="coco_2014_train", choices=['coco_2014_train', 'coco_2014_val', 'coco_2015_test', 'coco_2015_test-dev']) parser.add_argument('--model_type', default='RN50', type=str, help='RN50, RN101, RN50x4, ViT-B/32, vit_base_patch32_224_in21k') parser.add_argument( "opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER, ) return parser def extract_grid_feature_on_dataset(model, data_loader, dump_folder): for idx, inputs in enumerate(tqdm.tqdm(data_loader)): with torch.no_grad(): image_id = inputs[0]['image_id'] file_name = '%d.pth' % image_id # compute features images = model.preprocess_image(inputs) features = model.backbone(images.tensor) outputs = model.roi_heads.get_conv5_features(features) # modify the filename file_name = inputs[0]['file_name'].split("/")[-1].replace("jpg", "npy") outputs = outputs.permute(0, 2, 3, 1) exit() with PathManager.open(os.path.join(dump_folder, file_name), "wb") as f: np.save(f, outputs.cpu().numpy()) def do_feature_extraction(cfg, model, dataset_name, args): with inference_context(model): dump_folder = os.path.join(cfg.OUTPUT_DIR, "features", dataset_to_folder_mapper[dataset_name]) PathManager.mkdirs(dump_folder) data_loader = build_detection_test_loader_with_attributes(cfg, dataset_name, model_type='clip') extract_clip_feature_on_dataset(model, data_loader, dump_folder, args) def setup(args): """ Create configs and perform basic setups. """ cfg = get_cfg() add_attribute_config(cfg) cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) # force the final residual block to have dilations 1 cfg.MODEL.RESNETS.RES5_DILATION = 1 cfg.freeze() default_setup(cfg, args) return cfg def extract_clip_feature_on_dataset(model, data_loader, dump_folder, args): save_args.model_type = args.model_type.split("-")[0] mean = torch.Tensor([0.48145466, 0.4578275, 0.40821073]).to("cuda").reshape(3, 1, 1) std = torch.Tensor([0.26862954, 0.26130258, 0.27577711]).to("cuda").reshape(3, 1, 1) dump_folder = f"clip/{save_args.model_type}/" + dump_folder.split("/")[-1] if args.model_type == "ViT-B/32": num_patches = 558 #600 * 1000 // 32 // 32 print(num_patches) pos_embed = nn.Parameter(torch.zeros(num_patches + 1, 768, device='cuda'),) pos_embed.weight = resize_pos_embed(model.visual.positional_embedding.unsqueeze(0), pos_embed.unsqueeze(0)) model.visual.positional_embedding = pos_embed print(model.visual.positional_embedding.device) # pass dump_folder.replace( "rscratch", "dnn" ) dump_folder = "/dnn/sheng.s/clip_boi/grid-feats-vqa/" + dump_folder if not os.path.exists(dump_folder): os.makedirs(dump_folder) for idx, inputs in enumerate(tqdm.tqdm(data_loader)): with torch.no_grad(): image_id = inputs[0]['image_id'] file_name = '%d.pth' % image_id # compute features image = inputs[0]['image'].to("cuda").float() / 255.0 image = (image - mean) / std image = image.unsqueeze(0) outputs = model.encode_image(image) if "RN" in args.model_type: outputs = outputs.permute(0, 2, 3, 1) else: outputs = outputs[:, :, :].reshape(1, 13, 43, 768) with PathManager.open(os.path.join(dump_folder, file_name), "wb") as f: # save as CPU tensors torch.save(outputs.cpu(), f) def main(args): cfg = setup(args) model, transform = load(args.model_type, jit=False) do_feature_extraction(cfg, model, args.dataset, args) if __name__ == "__main__": args = extract_grid_feature_argument_parser().parse_args() print("Command Line Args:", args) main(args)
40.978873
131
0.687231
0
0
0
0
0
0
0
0
1,469
0.252449
7c6577b07dcade6abb36fc14d4e83aa262bb9bef
2,514
py
Python
src/node.py
aerendon/blockchain-basics
e3168afd097b26d23a09fd30e74e07b695e577d1
[ "MIT" ]
6
2018-08-09T14:36:35.000Z
2021-03-23T06:53:01.000Z
src/node.py
aerendon/blockchain-basics
e3168afd097b26d23a09fd30e74e07b695e577d1
[ "MIT" ]
null
null
null
src/node.py
aerendon/blockchain-basics
e3168afd097b26d23a09fd30e74e07b695e577d1
[ "MIT" ]
null
null
null
from flask import Flask, request import time import requests import json from blockchain import Blockchain from block import Block app = Flask(__name__) blockchain = Blockchain() peers = set() @app.route('/add_nodes', methods=['POST']) def register_new_peers(): nodes = request.get_json() if not nodes: return "Invalid data", 400 for node in nodes: peers.add(node) return "Success", 201 @app.route('/new_transaction', methods=['POST']) def new_transaction(): tx_data = request.get_json() required_fields = ["author", "content"] for field in required_fields: if not tx_data.get(field): return "Invalid transaction data", 404 tx_data["timestamp"] = time.time() blockchain.add_new_transaction(tx_data) return "Sucess", 201 @app.route('/chain', methods=['GET']) def get_chain(): chain_data = [] for block in blockchain.chain: chain_data.append(block.__dict__) return json.dumps({ "length": len(chain_data), "chain": chain_data }) @app.route('/mine', methods=['GET']) def mine_unconfirmed_transactions(): result = blockchain.mine() if not result: return "No transactions to mine" return "Block #{} is mined.".format(result) @app.route('/pending_tx') def get_pending_tx(): return json.dumps(blockchain.unconfirmed_transactions) def consensus(): global blockchain longest_chain = None current_len = len(blockchain) for node in peers: response = requests.get('http://{}/chain'.format(node)) length = response.json()['length'] chain = response.json()['chain'] if length > current_len and blockchain.check_chain_validity(chain): current_len = length longest_chain = chain if longest_chain: blockchain = longest_chain return True return False @app.route('/add_block', methods=['POST']) def validate_and_add_block(): block_data = request.get_json() block = Block(block_data["index"], block_data["transactions"], block_data["timestamp", block_data["previous_hash"]]) proof = block_data['hash'] added = blockchain.add_block(block, proof) if not added: return "The block was discarded by the node", 400 return "Block added to the chain", 201 def announce_new_block(block): for peer in peers: url = "http://{}/add_block".format(peer) requests.post(url, data=json.dumps(block.__dict__, sort_keys=True)) app.run(debug=True, port=8000)
25.917526
120
0.668258
0
0
0
0
1,581
0.628878
0
0
413
0.16428
7c67194eb5ab82333266efd8ffcbf64d199afeff
637
py
Python
Luke 02/02.py
Nilzone-/Knowit-Julekalender-2017
66ef8a651277e0fef7d9278f3f129410b5b98ee0
[ "MIT" ]
null
null
null
Luke 02/02.py
Nilzone-/Knowit-Julekalender-2017
66ef8a651277e0fef7d9278f3f129410b5b98ee0
[ "MIT" ]
null
null
null
Luke 02/02.py
Nilzone-/Knowit-Julekalender-2017
66ef8a651277e0fef7d9278f3f129410b5b98ee0
[ "MIT" ]
null
null
null
import numpy as np size = 1000 def create_wall(x, y): return "{0:b}".format(x**3 + 12*x*y + 5*x*y**2).count("1") & 1 def build_grid(): return np.array([create_wall(j+1, i+1) for i in range(size) for j in range(size)]).reshape(size, size) def visit(grid, x=0, y=0): if grid[x][y]: return grid[x][y] = 1 if x > 0: visit(grid, x-1, y) if x < size-1: visit(grid, x+1, y) if y > 0: visit(grid, x, y-1) if y < size-1: visit(grid, x, y+1) grid = build_grid() print "Original grid\n" print grid visit(grid) print "\n\nAfter search\n" print grid print "\n%d unvisited points in grid" % (size**2 - np.count_nonzero(grid))
20.548387
104
0.620094
0
0
0
0
0
0
0
0
78
0.122449
7c67a7fccb58ad0744513e429cedf4044452005e
311
py
Python
databases/music.py
danielicapui/programa-o-avancada
d0e5b876b951ae04a46ffcda0dc0143e3f7114d9
[ "MIT" ]
null
null
null
databases/music.py
danielicapui/programa-o-avancada
d0e5b876b951ae04a46ffcda0dc0143e3f7114d9
[ "MIT" ]
null
null
null
databases/music.py
danielicapui/programa-o-avancada
d0e5b876b951ae04a46ffcda0dc0143e3f7114d9
[ "MIT" ]
null
null
null
from utills import * conn,cur=start('music') criarTabela("tracks","title text,plays integer") music=[('trunder',20), ('my way',15)] insertInto("tracks","title,plays",music) #cur.executemany("insert into tracks (title,plays) values (?,?)",music) buscaTabela("tracks","title") conn.commit() conn.close()
25.916667
71
0.691318
0
0
0
0
0
0
0
0
165
0.530547
7c684d5c56bbbdacbeb8612a9b08130a83635f9a
13,250
py
Python
video_analysis/code/scene_postprocess.py
pdxcycling/carv.io
cce0f91a76d3ceed714b3625d415131fd9540899
[ "MIT" ]
null
null
null
video_analysis/code/scene_postprocess.py
pdxcycling/carv.io
cce0f91a76d3ceed714b3625d415131fd9540899
[ "MIT" ]
null
null
null
video_analysis/code/scene_postprocess.py
pdxcycling/carv.io
cce0f91a76d3ceed714b3625d415131fd9540899
[ "MIT" ]
null
null
null
import pandas as pd import numpy as np import re from collections import Counter from flow_preprocess import FlowPreprocess class ScenePostprocess(object): """ Heavy-lifting macro-feature class """ def __init__(self, flow_df, quality_df, remove_transitions=False): """ Default constructor Args: flow_df: Optical flow dataframe quality_df: Image quality dataframe remove_transitions: whether to remove frames around scene transitions Returns: Nothing """ self.flow_df = flow_df.copy() self.quality_df = quality_df.copy() self.remove_transitions = remove_transitions self.is_static = None self.duration = self.get_duration() self.num_frames = quality_df.shape[0] ## Do some rudimentary cleaning of/addding to the flow data self.flow_df['distance'] = FlowPreprocess.flow_distances(self.flow_df) self.flow_df['angle'] = FlowPreprocess.flow_angles(self.flow_df) ## Add scene-centric timestamps ## TODO: This has a few issues with actual start times... scene_time_offset = self.quality_df['time'].min() self.flow_df['time_scene'] = self.flow_df['time'] - scene_time_offset self.quality_df['time_scene'] = self.quality_df['time'] - scene_time_offset self.min_time_scene = self.quality_df['time_scene'].min() self.max_time_scene =self.quality_df['time_scene'].max() self.min_frame_num = self.quality_df['frame_number'].min() self.max_frame_num = self.quality_df['frame_number'].max() def _find_columns_by_name(self, df, name_re): """ Helper function to find binned features by the prefixes in their names Args: df: Dataframe name_re: regular expression for finding colmns Returns: List of columns that have names that match name_re """ output = [] cols = df.columns for c in cols: if re.search(name_re, c): output.append(c) return output def get_duration(self): """ Find scene duration (in seconds) Args: None Returns: Duration of scene in seconds """ min_time = np.min(self.quality_df['time']) max_time = np.max(self.quality_df['time']) return max_time - min_time def get_avg_blur(self): """ Find average blur across entire scene NOTE: The higher the number, the less the blur. Args: None Returns: Average blur as single float value """ avg_blur = np.mean(self.quality_df['blur']) return avg_blur def get_blur_percentage(self, blur_threshold=100): """ Proportion of of frames in scene that are blurry. A frame is "blurry" if its average blur is below blur_threshold Args: blur_threshold: A float value that defines the threshold between blurry and non-blurry Returns: Flow value of the proportion of the scene's frames that are blurry """ blur_pct = 1. * np.sum(self.quality_df['blur'] < blur_threshold)/self.quality_df.shape[0] return blur_pct def get_top_colors(self, num_colors=10): """ Find the dominant colors in all frames across the scene NOTE: This can be sped if only a subset of frames are sampled. Need to run experiments on the optimal sampling rate. TODO: This approach should be changed in v2.0 Args: num_colors: The number of most common colors to return. This is 10 by default. Returns: Numpy array containing the most prevalent colors in the scene """ self.num_colors = num_colors max_color_array = np.array(str) cols = self._find_columns_by_name(self.quality_df, "hue") for frame_num in range(self.min_frame_num, self.max_frame_num + 1): frame_color_array = self.quality_df[cols].ix[frame_num].sort_values()[::-1].index.values[:self.num_colors] max_color_array = np.append(max_color_array, frame_color_array) ## Find most common colors color_count = Counter(max_color_array) return map(lambda x: x[0], color_count.most_common(self.num_colors)) def _get_values_from_bin_names(self, cols): """ From a list of columns representing bins, return a list of the values of those bins Args: cols: a list of column names of histogram bins Returns: A list of the value of each bin """ values = [] for c in cols: matches = re.search('_(\d+.\d+)', c) if matches: values.append(float(matches.groups(0)[0])) else: ## This should never happen, but just in case... values.append(None) return values def get_avg_saturation(self): """ Find the average saturation across all frames in the scene Args: None Returns: A float value of average scene saturation """ cols = self._find_columns_by_name(self.quality_df, "sat") vals = self._get_values_from_bin_names(cols) sums = self.quality_df[cols].sum() avg = np.sum((sums * vals).values)/np.sum(sums) return avg def get_avg_value(self): """ Find the average value (from HSV colorspace) across all frames in the scene Args: None Returns: A float value of average scene HSV value """ cols = self._find_columns_by_name(self.quality_df, "val") vals = self._get_values_from_bin_names(cols) sums = self.quality_df[cols].sum() avg = np.sum((sums * vals).values)/np.sum(sums) return avg def get_pixel_pct(self, col_name, frame_size=(480., 360.)): """ Calculates the number of pixels in a scene are in col_name Args: col_name: the name of column of interest frame_size: Returns: Proportion of pixels that are in the column of interest """ frame_pixels = frame_size[0] * frame_size[1] num_frames = self.quality_df.shape[0] total_pixels = frame_pixels * num_frames pixel_cnt = np.sum(self.quality_df[col_name]) return pixel_cnt / total_pixels """ vvv Flow calculations vvv """ def get_flow_percentile(self, percentile=0.5): """ Find the distance traveled by optical flow point, filtered by the specified percentile. Args: percentile: Flow distance percentile to return. Percentile is between 0 and 1. Returns: A float value of the flow distance """ return self.flow_df['distance'].quantile(percentile) def get_avg_flow(self): """ Find the average distance an optical flow point has traveled between frames. Args: None Returns: A float value of the average distance an optical flow point has traveled between frames """ return self.flow_df['distance'].mean() def get_shake(self): """ Return the shakiness of the scene. Shake is calculated by finding the median distance an optical flow point has traveled in each frame, and averaging these values. TODO: vector addition. Args: None. Returns: A float value representing the shakiness of a scene. """ if not self.flow_df.empty: shake = np.mean((self.flow_df.groupby('frame_number').median())['distance']) else: shake = 0 return shake def get_flow_angle(self): """ Find the average angle of travel of the optical flow points in a scene. Args: None Returns: A float value of the average optical flow angle """ return self.flow_df['angle'].mean() def get_flow_angle_std_dev(self): """ Find the standard devation of all optical flows in a scene Args: None Returns: A float value of the standard deviation of optical flow angle """ return self.flow_df['angle'].std() def is_static_scene(self, remove_transitions=False): """ Determines whether or not scene is a static scene (vs. action scene) TODO: Ignore some time around scene transitions because of fades. Ensure that scene is long enough. Args: remove_transitions: remove frames at beginning and end of scene Returns: A boolean value of whether a scene is static or not. """ is_static = None motion_threshold = 1 # one pixel of movement total_flow_points = self.flow_df.shape[0] ## number of frames in range thresholded_df = self.flow_df[self.flow_df['distance'] > motion_threshold].copy() if thresholded_df.empty: is_static = True else: ## Due to "artsy" transitions, ignore around beginning/end of scene if remove_transitions: ## Amount of transition time between scenes ## This could be a percentage... transition_time_buffer = 1 # in seconds ## Ensure that scene is long enough to remove buffer from analysis if self.max_time_scene > transition_time_buffer: thresholded_df = thresholded_df[thresholded_df['time_scene'] > transition_time_buffer] thresholded_df = thresholded_df[thresholded_df['time_scene'] < self.max_time_scene - transition_time_buffer] ## Do not remove transitions if scene is too short else: pass if not thresholded_df.empty: ##moving_flow_points = thresholded_df.shape[0] moving_frames = thresholded_df.groupby(by=['frame_number']).mean().shape[0] else: ##moving_flow_points = 0 moving_frames = 0 ##pts_ratio = 1. * moving_flow_points/self.num_frames pts_ratio = 1. * moving_frames/self.num_frames # less than 1 moving frame per 4 frames is_static = pts_ratio < .25 return is_static def num_trackable_points_per_frame(self): """ Find the total number of optical flow points that are trackable per frame. "Trackability" is defined as being able to find a specific optical flow point between frames. Args: None Returns: A dataframe with the number of trackable points, by frame. """ return self.flow_df.groupby('frame_number').size() def avg_num_trackable_points_per_frame(self): """ Find the average number of optical flow points that are trackable, over all frames in the frame. "Trackability" is defined as being able to find a specific optical flow point between frames. Args: None Returns: A float value of the average number of trackable optical flow points in all of the scene's frames """ return 1. * len(self.flow_df) / self.num_frames def to_df(self): """ Return a dataframe containing all features TODO: better type checking Args: None Returns: Dataframe with all features """ scene_df = pd.DataFrame(index=[0]) top_colors = self.get_top_colors() for n in range(self.num_colors): scene_df['top_color_' + str(n)] = top_colors[n] scene_df['avg_sat'] = self.get_avg_saturation() scene_df['avg_val'] = self.get_avg_value() scene_df['black_pixel_pct'] = self.get_pixel_pct('num_black_pixels') scene_df['white_pixel_pct'] = self.get_pixel_pct('num_white_pixels') scene_df['flow_percentile_25'] = self.get_flow_percentile(0.25) scene_df['flow_percentile_50'] = self.get_flow_percentile(0.25) scene_df['flow_percentile_75'] = self.get_flow_percentile(0.25) scene_df['flow_avg'] = self.get_avg_flow() scene_df['flow_angle'] = self.get_flow_angle() scene_df['flow_angle_std_dev'] = self.get_flow_angle_std_dev() scene_df['is_static_scene'] = self.is_static_scene() ##scene_df['action_peak_in_scene'] = None # where in scene does no scene_df['shake_coeff'] = self.get_shake() scene_df['avg_flow_pts_per_frame'] = self.avg_num_trackable_points_per_frame() scene_df['blur'] = self.get_avg_blur() scene_df['blur_pct'] = self.get_blur_percentage() scene_df['duration'] = self.get_duration() return scene_df
35.05291
128
0.60234
13,124
0.990491
0
0
0
0
0
0
6,820
0.514717
7c6a104628420af03301492fef43b77ba98e1a64
6,840
py
Python
examples/pytorch/mnist/plot.py
ThomasRot/rational_activations
1fa26d1ee5f3c916eda00c899afa96eccb960143
[ "MIT" ]
null
null
null
examples/pytorch/mnist/plot.py
ThomasRot/rational_activations
1fa26d1ee5f3c916eda00c899afa96eccb960143
[ "MIT" ]
null
null
null
examples/pytorch/mnist/plot.py
ThomasRot/rational_activations
1fa26d1ee5f3c916eda00c899afa96eccb960143
[ "MIT" ]
null
null
null
import torch import numpy as np import pickle torch.manual_seed(17) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False np.random.seed(17) import argparse import torch.nn as nn import torch.nn.functional as F import matplotlib import os from rational.torch import Rational, RecurrentRational, RecurrentRationalModule from torchvision import datasets, transforms from torch.utils.tensorboard import SummaryWriter from mnist import VGG, LeNet5, actfvs from matplotlib import pyplot as plt font = {'family': 'normal', 'weight': 'bold', 'size': 22} matplotlib.rc('font', **font) torch.set_anomaly_enabled(True) def test(args, model, device, test_loader, epoch): model.eval() test_loss = 0 correct = 0 with torch.no_grad(): for data, target in test_loader: data, target = data.to(device), target.to(device) output = model(data) test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability correct += pred.eq(target.view_as(pred)).sum().item() test_loss /= len(test_loader.dataset) acc = 100. * correct / len(test_loader.dataset) print('\nTest set: Epoch: {}, Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(epoch, test_loss, correct, len(test_loader.dataset), acc)) return acc def main(): # Training settings parser = argparse.ArgumentParser(description='PyTorch MNIST Example') parser.add_argument('--batch-size', type=int, default=64, metavar='N', help='input batch size for training (default: 64)') parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training') parser.add_argument('--seed', type=int, default=17, metavar='S', help='random seed (default: 1)') parser.add_argument('--log-interval', type=int, default=10, metavar='N', help='how many batches to wait before logging training status') parser.add_argument('--dataset', type=str, default='mnist', help='dataset to use') parser.add_argument('--arch', type=str, required=True) parser.add_argument('--init', type=str, default="", choices=["", "xavier", "he"]) args = parser.parse_args() networks = dict({ "vgg": VGG, "lenet": LeNet5, }) network = networks[args.arch] # activation_function_keys = [x for x in list(actfvs.keys()) if 'pau' in x] # activation_function_keys = ['pau'] # activation_function_keys = ['recurrent_pau'] activation_function_keys = ['pau', 'recurrent_pau'] optimizer = 'sgd' epochs = ['final'] for activation_function_key in activation_function_keys: for epoch in epochs: print("---" * 42) print("Starting with dataset: {}, activation function: {}".format(args.dataset, activation_function_key)) print("---" * 42) load_path = 'examples/runs/mnist/paper_{}_{}_{}{}_seed{}/'.format(args.dataset, args.arch, optimizer, "_init_{}".format(args.init) if args.init != "" else "", args.seed) + activation_function_key use_cuda = not args.no_cuda and torch.cuda.is_available() torch.manual_seed(args.seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False np.random.seed(args.seed) device = torch.device("cuda" if use_cuda else "cpu") kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {} if args.dataset == 'mnist': test_loader = torch.utils.data.DataLoader( datasets.MNIST('../data', train=False, transform=transforms.Compose([ transforms.Resize((32, 32)), transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=args.batch_size, shuffle=True, **kwargs) lr_scheduler_milestones = [30, 60, 90] # Simple CNN with 3 Conv # lr_scheduler_milestones = [40, 80] # VGG elif args.dataset == 'fmnist': test_loader = torch.utils.data.DataLoader( datasets.FashionMNIST('../data', train=False, transform=transforms.Compose([ transforms.Resize((32, 32)), transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=args.batch_size, shuffle=True, **kwargs) lr_scheduler_milestones = [40, 80] else: raise ValueError('dataset error') model = network(activation_func=activation_function_key).to(device) model.load_state_dict(torch.load(os.path.join(load_path, 'model_{}.pt'.format(epoch)))) paus = list() for name, layer in model.named_modules(): if isinstance(layer, Rational): layer.input_retrieve_mode(max_saves=10) paus.append(('rational', name, layer)) if isinstance(layer, RecurrentRationalModule): layer.input_retrieve_mode(max_saves=10) paus.append(('recurrent_rational', name, layer)) if len(paus) > 0: os.makedirs(os.path.join(load_path, 'plots'), exist_ok=True) # dict(model.named_parameters())["features.3.0.bias"][0] # dict(model.named_parameters())["features.4.2.numerator"][0] print("Starting model eval") acc = test(args, model, device, test_loader, epoch) print("Finished model eval -> Plot") # fig = plt.figure(1, figsize=(6*len(paus),6)) fig_dicts = [] for i, p in enumerate(paus): fig = p[2].show(display=False) print(fig) fig_dicts.append(fig) pickle.dump(fig_dicts, open(f'{args.dataset}_{args.arch}_{activation_function_key}_(acc{acc}%).fig.pkl', "wb")) else: print("No Rational Activations found. Exit without plotting") if __name__ == '__main__': main()
45.90604
134
0.55424
0
0
0
0
0
0
0
0
1,392
0.203509
7c6a234a8099f1c0f0c886e2b520d9f41e36c635
7,093
py
Python
fts/fluxrss.py
AetherBlack/Veille-Informatique
e80451c5eb21f43ac1a9baac3342ad0d4102d18b
[ "Linux-OpenIB" ]
null
null
null
fts/fluxrss.py
AetherBlack/Veille-Informatique
e80451c5eb21f43ac1a9baac3342ad0d4102d18b
[ "Linux-OpenIB" ]
null
null
null
fts/fluxrss.py
AetherBlack/Veille-Informatique
e80451c5eb21f43ac1a9baac3342ad0d4102d18b
[ "Linux-OpenIB" ]
null
null
null
#!/usr/bin/python3 from urllib.parse import urlparse import feedparser import requests import asyncio import discord import hashlib import os from const import CHANNEL_RSS, WAIT_UNTIL_NEW_CHECK, \ SQLITE_FOLDER_NAME, SQLITE_FILE_NAME from fts.database import Database from fts.cleandatabase import CleanDatabase class FluxRSS: """ Class of FluxRSS. Get news of the feedrss url parse in args. """ def __init__(self, bot, cwd): """ Initialize class @param => DiscordBot: `bot`: Discord Bot Instance. @param => str: `cwd`: Current Working Directory of main.py file. """ # Discord self.bot = bot self.bot_username = self.bot.user.name self.rss_channel = self.bot.get_channel(CHANNEL_RSS) # Path self.cwd = cwd # Database self.db_path = os.path.join(self.cwd, SQLITE_FOLDER_NAME) self.database = Database(self.db_path, SQLITE_FILE_NAME) def get_news(self, url): """ Get the news of the rss feed. @param => str: `url`: url of the rss feed. Return dict with an int index key and title, description and link in a list for the value. """ dict_news = dict() # Get the content of the requests content = requests.get(url).text # Parse the content parser = feedparser.parse(content) # Set the root parser = parser["entries"] # Get the number of news news_number = len(parser) # Construct the dict for index in range(news_number): # Get the title title = parser[index]["title"] # Get the description description = parser[index]["description"] # Get the link link = parser[index]["links"][0]["href"] # Set list args = [ title, description, link ] # Add the list to the dict dict_news[str(index)] = args # Return the dict return dict_news def is_new(self, root, name, title, description, link): """ Return True if the news in the feed is new. @param => str: `title`: Title of the news. @param => str: `description`: Description of the news. @param => str: `link`: Link of the rss feed. """ # Hash description hash_description = hashlib.sha256(bytes(description, "utf-8", errors="ignore")).hexdigest() # Return the check of the query return not self.database.isNewsExists(root, name, title, hash_description, link) def embeded_msg(self, root, name, title, content, link, color): """ Create the embeded message and send it to discord. @param => str: `root`: Name of the Website. @param => str: `name`: Name set in const. Categorie of the news @param => str: `title`: Title of the news. @param => str: `content`: Content description of the news. @param => str: `link`: Link of the news. @param => discord.Color: `color`: Color for the left panel. """ # Set the Name, description and color on the left news = discord.Embed(title="{0} - {1}".format(root, name), description="News :", color=(color or 0x00ff00)) #Set bot name and profil picture news.set_author(name=self.bot_username, icon_url=self.bot.user.avatar_url) #Set the description and the link for the main message content = content + "\n" + link news.add_field(name=title, value=content[:1024], inline=False) #Show the bot username in footer news.set_footer(text="Generate by @{0}".format(self.bot_username)) # Return the final Discord embeded message return news async def feedrss(self, json_rss): """ Get the news and send it to the channel. @param => dict: `json_rss`: JSON data of the RSS Flux. """ # Show const for the format self.json_rss = json_rss # While the connection is not closed while not self.bot.is_closed(): # For each key for key, sections in self.json_rss.items(): # Get the root name set in const root = key # For each sections for index_section, section in enumerate(sections): # Check customization of the section if "custom" in section.keys(): # Check color if "color" in section["custom"].keys(): color = getattr(discord.Color, section["custom"]["color"])() else: color = False else: color = False # Get the name of the section name = section["name"] # Get the time until the cleaning of the database for the root and name given wait_time = section["clean"] # Check if the cleaning database is already launched if isinstance(wait_time, str): # Launch the function to clean the database Thread = CleanDatabase(root, name, wait_time, self.db_path, SQLITE_FILE_NAME) Thread.start() # Change the variable type of the clean line in json_rss to launch relaunch the requests self.json_rss[root][index_section]["clean"] = True # For each link in the section for link in section["link"]: # Get title, description and link in a dict dict_news = self.get_news(link) # Verify if the news already exists for value in dict_news.values(): # Get title title = value[0] # Get description description = value[1] # Get link link = value[2] # Check if the news is new if self.is_new(root, name, title, description, link): # Hash the description hash_description = hashlib.sha256(bytes(description, "utf-8", errors="ignore")).hexdigest() # write the news into the database self.database.AddNews(root, name, title, hash_description, link) #Create the discord message message = self.embeded_msg(root, name, title, description, link, color) #Send to discord await self.rss_channel.send(embed=message) # Wait until the next verification await asyncio.sleep(WAIT_UNTIL_NEW_CHECK)
36.188776
123
0.537572
6,772
0.954744
0
0
0
0
3,272
0.4613
2,719
0.383336
7c6a29fe050821e14428d8ec0b7f5f5436d84fcb
11,691
py
Python
src/poke_env/player/player_network_interface.py
kiyohiro8/poke-env
7a1a4b155e8a73bd712d44e70c4192f8032d7e6f
[ "MIT" ]
null
null
null
src/poke_env/player/player_network_interface.py
kiyohiro8/poke-env
7a1a4b155e8a73bd712d44e70c4192f8032d7e6f
[ "MIT" ]
null
null
null
src/poke_env/player/player_network_interface.py
kiyohiro8/poke-env
7a1a4b155e8a73bd712d44e70c4192f8032d7e6f
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """This module defines a base class for communicating with showdown servers. """ import json import logging import requests import websockets # pyre-ignore from abc import ABC from abc import abstractmethod from asyncio import CancelledError from asyncio import ensure_future from asyncio import Event from asyncio import Lock from asyncio import sleep from time import perf_counter from typing import List from typing import Optional from aiologger import Logger # pyre-ignore from poke_env.exceptions import ShowdownException from poke_env.player_configuration import PlayerConfiguration from poke_env.server_configuration import ServerConfiguration class PlayerNetwork(ABC): """ Network interface of a player. Responsible for communicating with showdown servers. Also implements some higher level methods for basic tasks, such as changing avatar and low-level message handling. """ def __init__( self, player_configuration: PlayerConfiguration, *, avatar: Optional[int] = None, log_level: Optional[int] = None, server_configuration: ServerConfiguration, start_listening: bool = True, ) -> None: """ :param player_configuration: Player configuration. :type player_configuration: PlayerConfiguration :param avatar: Player avatar id. Optional. :type avatar: int, optional :param log_level: The player's logger level. :type log_level: int. Defaults to logging's default level. :param server_configuration: Server configuration. :type server_configuration: ServerConfiguration :param start_listening: Wheter to start listening to the server. Defaults to True. :type start_listening: bool """ self._authentication_url = server_configuration.authentication_url self._avatar = avatar self._password = player_configuration.password self._username = player_configuration.username self._server_url = server_configuration.server_url self._logged_in: Event = Event() self._sending_lock = Lock() self._websocket: websockets.client.WebSocketClientProtocol # pyre-ignore self._logger: Logger = self._create_player_logger(log_level) # pyre-ignore if start_listening: self._listening_coroutine = ensure_future(self.listen()) async def _accept_challenge(self, username: str) -> None: assert self.logged_in.is_set() await self._set_team() await self._send_message("/accept %s" % username) async def _challenge(self, username: str, format_: str): assert self.logged_in.is_set() await self._set_team() await self._send_message(f"/challenge {username}, {format_}") async def _change_avatar(self, avatar_id: Optional[int]) -> None: """Changes the player's avatar. :param avatar_id: The new avatar id. If None, nothing happens. :type avatar_id: int """ await self._wait_for_login() if avatar_id is not None: await self._send_message(f"/avatar {avatar_id}") def _create_player_logger(self, log_level: Optional[int]) -> Logger: # pyre-ignore """Creates a logger for the player. Returns a Logger displaying asctime and the player's username before messages. :param log_level: The logger's level. :type log_level: int :return: The logger. :rtype: Logger """ logger = logging.getLogger(self._username) stream_handler = logging.StreamHandler() if log_level is not None: logger.setLevel(log_level) formatter = logging.Formatter( "%(asctime)s - %(name)s - %(levelname)s - %(message)s" ) stream_handler.setFormatter(formatter) logger.addHandler(stream_handler) return logger async def _handle_message(self, message: str) -> None: """Handle received messages. :param message: The message to parse. :type message: str """ try: self.logger.debug("Received message to handle: %s", message) # Showdown websocket messages are pipe-separated sequences split_message = message.split("|") assert len(split_message) > 1 # The type of message is determined by the first entry in the message # For battles, this is the zero-th entry # Otherwise it is the one-th entry if split_message[1] == "challstr": # Confirms connection to the server: we can login await self._log_in(split_message) elif split_message[1] == "updateuser": if split_message[2] == " " + self._username: # Confirms successful login self.logged_in.set() elif not split_message[2].startswith(" Guest "): self.logger.warning( """Trying to login as %s, showdown returned %s """ """- this might prevent future actions from this agent. """ """Changing the agent's username might solve this problem.""", self.username, split_message[2], ) elif "updatechallenges" in split_message[1]: # Contain information about current challenge await self._update_challenges(split_message) elif split_message[0].startswith(">battle"): # Battle update await self._handle_battle_message(message) elif split_message[1] == "updatesearch": self.logger.debug("Ignored message: %s", message) pass elif split_message[1] == "popup": self.logger.warning("Popup message received: %s", message) elif split_message[1] in ["nametaken"]: self.logger.critical("Error message received: %s", message) raise ShowdownException("Error message received: %s", message) elif split_message[1] == "pm": self.logger.info("Received pm: %s", split_message) else: self.logger.critical("Unhandled message: %s", message) raise NotImplementedError("Unhandled message: %s" % message) except CancelledError as e: self.logger.critical("CancelledError intercepted. %s", e) except Exception as exception: self.logger.exception( "Unhandled exception raised while handling message:\n%s", message ) raise exception async def _log_in(self, split_message: List[str]) -> None: """Log the player with specified username and password. Split message contains information sent by the server. This information is necessary to log in. :param split_message: Message received from the server that triggers logging in. :type split_message: List[str] """ if self._password: log_in_request = requests.post( self._authentication_url, data={ "act": "login", "name": self._username, "pass": self._password, "challstr": split_message[2] + "%7C" + split_message[3], }, ) self.logger.info("Sending authentication request") assertion = json.loads(log_in_request.text[1:])["assertion"] else: self.logger.info("Bypassing authentication request") assertion = "" await self._send_message(f"/trn {self._username},0,{assertion}") await self._change_avatar(self._avatar) async def _search_ladder_game(self, format_): await self._set_team() await self._send_message(f"/search {format_}") async def _send_message( self, message: str, room: str = "", message_2: Optional[str] = None ) -> None: """Sends a message to the specified room. `message_2` can be used to send a sequence of length 2. :param message: The message to send. :type message: str :param room: The room to which the message should be sent. :type room: str :param message_2: Second element of the sequence to be sent. Optional. :type message_2: str, optional """ if message_2: to_send = "|".join([room, message, message_2]) else: to_send = "|".join([room, message]) await self._websocket.send(to_send) self.logger.info(">>> %s", to_send) async def _set_team(self): if self._team is not None: await self._send_message("/utm %s" % self._team.yield_team()) async def _wait_for_login( self, checking_interval: float = 0.001, wait_for: int = 5 ) -> None: start = perf_counter() while perf_counter() - start < wait_for: await sleep(checking_interval) if self.logged_in: return assert self.logged_in async def listen(self) -> None: """Listen to a showdown websocket and dispatch messages to be handled.""" self.logger.info("Starting listening to showdown websocket") coroutines = [] try: async with websockets.connect( self.websocket_url, max_queue=None ) as websocket: self._websocket = websocket async for message in websocket: self.logger.info("<<< %s", message) coroutines.append(ensure_future(self._handle_message(message))) except websockets.exceptions.ConnectionClosedOK: self.logger.warning( "Websocket connection with %s closed", self.websocket_url ) except (CancelledError, RuntimeError) as e: self.logger.critical("Listen interrupted by %s", e) except Exception as e: self.logger.exception(e) finally: for coroutine in coroutines: coroutine.cancel() async def stop_listening(self) -> None: if self._listening_coroutine is not None: self._listening_coroutine.cancel() await self._websocket.close() @abstractmethod async def _handle_battle_message(self, message: str) -> None: """Abstract method. Implementation should redirect messages to corresponding battles. """ @abstractmethod async def _update_challenges(self, split_message: List[str]) -> None: """Abstract method. Implementation should keep track of current challenges. """ @property def logged_in(self) -> Event: """Event object associated with user login. :return: The logged-in event :rtype: Event """ return self._logged_in @property def logger(self) -> Logger: # pyre-ignore """Logger associated with the player. :return: The logger. :rtype: Logger """ return self._logger @property def username(self) -> str: """The player's username. :return: The player's username. :rtype: str """ return self._username @property def websocket_url(self) -> str: """The websocket url. It is derived from the server url. :return: The websocket url. :rtype: str """ return f"ws://{self._server_url}/showdown/websocket"
36.307453
88
0.610042
11,007
0.941493
0
0
1,209
0.103413
7,530
0.644085
4,330
0.37037
7c6b5cb13f50ba4f535dc82987b58898ad693a5f
5,966
py
Python
data/external/repositories/42139/KDDCup13Track2-master/blocking.py
Keesiu/meta-kaggle
87de739aba2399fd31072ee81b391f9b7a63f540
[ "MIT" ]
null
null
null
data/external/repositories/42139/KDDCup13Track2-master/blocking.py
Keesiu/meta-kaggle
87de739aba2399fd31072ee81b391f9b7a63f540
[ "MIT" ]
null
null
null
data/external/repositories/42139/KDDCup13Track2-master/blocking.py
Keesiu/meta-kaggle
87de739aba2399fd31072ee81b391f9b7a63f540
[ "MIT" ]
1
2019-12-04T08:23:33.000Z
2019-12-04T08:23:33.000Z
#!/usr/bin/env python from common import * import csv import argparse from unidecode import unidecode from nameparser import constants as npc from collections import defaultdict import cPickle as pickle import re stopwords_custom = set(['document', 'preparation', 'system', 'consortium', 'committee', 'international', 'artificial', 'network', 'distributed', 'based', 'research', 'language', 'technology', 'project', 'design', 'computer', 'control', 'object', 'internet', 'propulsion', 'corp', 'workshop', 'xml', 'world', 'work', 'thesis', 'test', 'tool', 'structure', 'statistical', 'laboratory', 'ltd', 'objects', 'process', 'scheduling', 'september', 'special', 'student', 'programs', 'capacitated', 'balancing', 'assembly', 'aspect', 'model', 'inc', 'psychological', 'psychology', 'mohammed', 'computing', 'software', 'programming', 'new', 'applications', 'jet', 'propulsion', 'classification', 'recommendation']) stopwords = stopwords_custom | npc.TITLES | npc.PREFIXES | npc.SUFFIXES | npc.CONJUNCTIONS def bin_exactsamename(authors): bins = defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()): bins[a['fullname']].add(id) if (i+1) % 10000 == 0: print_err(i+1) return bins def bin_samename(authors): bins = defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()): bins[a['fullname_joined']].add(id) if (i+1) % 10000 == 0: print_err(i+1) return bins def bin_fFfL(authors): bins = defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()): bins[a['fFfL']].add(id) if (i+1) % 10000 == 0: print_err(i+1) return bins def bin_fF3L(authors, max_bin_size=20): bins = defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()): if ':' not in a['fFiL'] and len(a['name_last']) >= 3 and len(a['fFiL']) > 2: bins[a['fFiL'] + a['name_last'][1:3]].add(id) if (i+1) % 10000 == 0: print_err(i+1) bk = bins.keys() for b in bk: if len(bins[b]) > max_bin_size: del bins[b] return bins def bin_fFiL(authors, max_bin_size=20): bins = defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()): if len(a['fFiL']) > 2: bins[a['fFiL']].add(id) if (i+1) % 10000 == 0: print_err(i+1) bk = bins.keys() for b in bk: if len(bins[b]) > max_bin_size: del bins[b] return bins def bin_iFfL(authors): bins = defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()): bins[a['iFfL']].add(id) if (i+1) % 10000 == 0: print_err(i+1) return bins def bin_fullparsedname(authors): bins = defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()): bins[a['fullname_parsed']].add(id) if (i+1) % 10000 == 0: print_err(i+1) return bins def bin_iFoffbyoneL(authors, max_bin_size=30): bins = defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()): if ':' not in a['fullname'] and a['name_first'] and a['name_last']: bins[a['name_first'][0] + a['name_last']].add(id) if len(a['name_last']) > 1: bins[a['name_first'][0] + a['name_last'][:-1]].add(id) if (i+1) % 10000 == 0: print_err(i+1) bk = bins.keys() for b in bk: if len(bins[b]) > max_bin_size: del bins[b] return bins def bin_2FoffbyoneL(authors, max_bin_size=30): bins = defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()): if ':' not in a['fullname'] and len(a['name_first']) >= 2 and a['name_last']: bins[a['name_first'][0:2] + a['name_last']].add(id) if len(a['name_last']) > 1: bins[a['name_first'][0:2] + a['name_last'][:-1]].add(id) if (i+1) % 10000 == 0: print_err(i+1) bk = bins.keys() for b in bk: if len(bins[b]) > max_bin_size: del bins[b] return bins def bin_metaphone(authors): bins = defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()): if a['metaphone_fullname']: bins[a['metaphone_fullname']].add(id) if (i+1) % 10000 == 0: print_err(i+1) # bk = bins.keys() # for b in bk: # if len(bins[b]) > max_bin_size: # del bins[b] return bins def bin_offbylastone(authors): bins = defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()): if ':' not in a['fullname_joined']: bins[a['fullname_joined']].add(id) if len(a['fullname_joined']) > 1: bins[a['fullname_joined'][:-1]].add(id) if (i+1) % 10000 == 0: print_err(i+1) return bins def bin_token(authors, nw=2, max_bin_size=100): bins = defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()): if ':' not in a['name']: tokens = re.sub("[^\w]", " ", a['name']).split() tokens = [v for v in tokens if len(v) > 2 and v not in stopwords] ngrams = zip(*[tokens[j:] for j in range(nw)]) for p in ngrams: pg = ' '.join(p) if len(pg) > len(p)*2-1: bins[pg].add(id) if (i+1) % 10000 == 0: print_err(i+1) bk = bins.keys() for b in bk: if len(bins[b]) > max_bin_size: del bins[b] return bins def bin_ngrams(authors, n=15, max_bin_size=30): bins = defaultdict(set) for i, (id, a) in enumerate(authors.iteritems()): if ':' not in a['fullname']: lname = a['fullname'] ngrams = zip(*[lname[j:] for j in range(n)]) for p in ngrams: if not any(((s in p) for s in stopwords_custom)): bins[''.join(p)].add(id) if (i+1) % 10000 == 0: print_err(i+1) bk = bins.keys() for b in bk: if len(bins[b]) > max_bin_size: del bins[b] return bins def main(): parser = argparse.ArgumentParser() parser.add_argument('authorprefeat', nargs='?', default='generated/Author_prefeat.pickle') parser.add_argument('type', nargs='?', default='iFfL') args = parser.parse_args() print_err("Loading pickled author pre-features") authors = pickle.load(open(args.authorprefeat, 'rb')) bins = globals()["bin_"+args.type](authors) bins = sorted([(len(bv), blabel, bv) for blabel, bv in bins.iteritems()], reverse=True) for _, binlabel, binv in bins: print binlabel + ';' + ','.join(map(str, sorted(binv))) if __name__ == "__main__": main()
29.979899
703
0.632417
0
0
0
0
0
0
0
0
1,261
0.211364
7c6be61ef0d7cd7c0ad0f76e6b1f86ee30283323
1,180
py
Python
resources/dot_PyCharm/system/python_stubs/-762174762/PySide/QtCore/QAbstractFileEngineIterator.py
basepipe/developer_onboarding
05b6a776f8974c89517868131b201f11c6c2a5ad
[ "MIT" ]
1
2020-04-20T02:27:20.000Z
2020-04-20T02:27:20.000Z
resources/dot_PyCharm/system/python_stubs/cache/16012662ddca113c1f50140f9e0d3bd290a511015767475cf362e5267760f062/PySide/QtCore/QAbstractFileEngineIterator.py
basepipe/developer_onboarding
05b6a776f8974c89517868131b201f11c6c2a5ad
[ "MIT" ]
null
null
null
resources/dot_PyCharm/system/python_stubs/cache/16012662ddca113c1f50140f9e0d3bd290a511015767475cf362e5267760f062/PySide/QtCore/QAbstractFileEngineIterator.py
basepipe/developer_onboarding
05b6a776f8974c89517868131b201f11c6c2a5ad
[ "MIT" ]
null
null
null
# encoding: utf-8 # module PySide.QtCore # from C:\Python27\lib\site-packages\PySide\QtCore.pyd # by generator 1.147 # no doc # imports import Shiboken as __Shiboken class QAbstractFileEngineIterator(__Shiboken.Object): # no doc def currentFileInfo(self, *args, **kwargs): # real signature unknown pass def currentFileName(self, *args, **kwargs): # real signature unknown pass def currentFilePath(self, *args, **kwargs): # real signature unknown pass def filters(self, *args, **kwargs): # real signature unknown pass def hasNext(self, *args, **kwargs): # real signature unknown pass def nameFilters(self, *args, **kwargs): # real signature unknown pass def next(self, *args, **kwargs): # real signature unknown pass def path(self, *args, **kwargs): # real signature unknown pass def __init__(self, *args, **kwargs): # real signature unknown pass @staticmethod # known case of __new__ def __new__(S, *more): # real signature unknown; restored from __doc__ """ T.__new__(S, ...) -> a new object with type S, a subtype of T """ pass
25.652174
77
0.644915
1,008
0.854237
0
0
203
0.172034
0
0
493
0.417797
7c6cbf4764a2e4e9b78da1978c82aa4f5d7862ce
3,637
py
Python
tests/conftest.py
priyatharsan/beyond
1061b870407d316d43e4d1351a7ec026629685ae
[ "MIT" ]
null
null
null
tests/conftest.py
priyatharsan/beyond
1061b870407d316d43e4d1351a7ec026629685ae
[ "MIT" ]
null
null
null
tests/conftest.py
priyatharsan/beyond
1061b870407d316d43e4d1351a7ec026629685ae
[ "MIT" ]
null
null
null
import numpy as np from pytest import fixture, mark, skip from unittest.mock import patch from pathlib import Path from beyond.config import config from beyond.dates.eop import Eop from beyond.frames.stations import create_station from beyond.io.tle import Tle from beyond.propagators.keplernum import KeplerNum from beyond.dates import Date, timedelta from beyond.env.solarsystem import get_body np.set_printoptions(linewidth=200) @fixture(autouse=True, scope="session") def config_override(): """Create a dummy config dict containing basic data """ config.update({ "eop": { "missing_policy": "pass", } }) @fixture def common_env(): with patch('beyond.dates.date.EopDb.get') as m: m.return_value = Eop( x=-0.00951054166666622, y=0.31093590624999734, dpsi=-94.19544791666682, deps=-10.295645833333051, dy=-0.10067361111115315, dx=-0.06829513888889051, lod=1.6242802083331438, ut1_utc=0.01756018472222477, tai_utc=36.0 ) yield @fixture def station(common_env): return create_station('Toulouse', (43.604482, 1.443962, 172.)) @fixture def iss_tle(common_env): return Tle("""ISS (ZARYA) 1 25544U 98067A 18124.55610684 .00001524 00000-0 30197-4 0 9997 2 25544 51.6421 236.2139 0003381 47.8509 47.6767 15.54198229111731""") @fixture def molniya_tle(common_env): return Tle("""MOLNIYA 1-90 1 24960U 97054A 18123.22759647 .00000163 00000-0 24467-3 0 9999 2 24960 62.6812 182.7824 6470982 294.8616 12.8538 3.18684355160009""") @fixture(params=["tle", "ephem"]) def orbit(request, iss_tle): orb = iss_tle.orbit() if request.param == "tle": return orb elif request.param == "ephem": start = Date(2018, 4, 5, 16, 50) stop = timedelta(hours=6) step = timedelta(seconds=15) return orb.ephem(start=start, stop=stop, step=step) elif request.param == "kepler": orb.propagator = KeplerNum( timedelta(seconds=60), get_body('Earth') ) return orb @fixture(params=["tle", "ephem"]) def molniya(request, molniya_tle): orb = molniya_tle.orbit() if request.param == "tle": return orb elif request.param == "ephem": start = Date(2018, 4, 5, 16, 50) stop = timedelta(hours=15) step = timedelta(minutes=1) return orb.ephem(start=start, stop=stop, step=step) @fixture def jplfiles(): config['env'] = { 'jpl': [ str(Path(__file__).parent / "data" / "jpl" / "de403_2000-2020.bsp"), str(Path(__file__).parent / "data" / "jpl" / "pck00010.tpc"), str(Path(__file__).parent / "data" / "jpl" / "gm_de431.tpc"), ] } def _skip_if_no_mpl(): """Specific for dynamically skipping the test if matplotlib is not present as it is not a dependency of the library, but merely a convenience """ try: import matplotlib.pyplot as plt except ImportError: return True else: return False def pytest_configure(config): """Declare the skip_if_no_mpl marker in pytest's '--markers' helper option This has no actual effect on the tests """ config.addinivalue_line( "markers", "skip_if_no_mpl: skip if matplotlib is not installed" ) def pytest_runtest_setup(item): """This function is called for each test case. It looks if the test case has the skip_if_no_mpl decorator. If so, skip the test case """ if _skip_if_no_mpl() and list(item.iter_markers(name="skip_if_no_mpl")): skip("matplotlib not installed")
27.141791
89
0.653011
0
0
373
0.102557
2,300
0.632389
0
0
1,130
0.310696
7c6cc14ec8ce3c7dc9875cccdf742d57d079973d
10,181
py
Python
diofant/tests/integrals/test_heurisch.py
Electric-tric/diofant
92c4bf0ef301e5d6f0cfab545b036e1cb7de3c0a
[ "BSD-3-Clause" ]
1
2021-08-22T09:34:15.000Z
2021-08-22T09:34:15.000Z
diofant/tests/integrals/test_heurisch.py
Electric-tric/diofant
92c4bf0ef301e5d6f0cfab545b036e1cb7de3c0a
[ "BSD-3-Clause" ]
null
null
null
diofant/tests/integrals/test_heurisch.py
Electric-tric/diofant
92c4bf0ef301e5d6f0cfab545b036e1cb7de3c0a
[ "BSD-3-Clause" ]
null
null
null
import pytest from diofant import (Add, Derivative, Ei, Eq, Function, I, Integral, LambertW, Piecewise, Rational, Sum, Symbol, acos, asin, asinh, besselj, cos, cosh, diff, erf, exp, li, log, pi, ratsimp, root, simplify, sin, sinh, sqrt, symbols, tan) from diofant.integrals.heurisch import components, heurisch, heurisch_wrapper __all__ = () x, y, z, nu = symbols('x,y,z,nu') f = Function('f') def test_components(): assert components(x*y, x) == {x} assert components(1/(x + y), x) == {x} assert components(sin(x), x) == {sin(x), x} assert components(sin(x)*sqrt(log(x)), x) == \ {log(x), sin(x), sqrt(log(x)), x} assert components(x*sin(exp(x)*y), x) == \ {sin(y*exp(x)), x, exp(x)} assert components(x**Rational(17, 54)/sqrt(sin(x)), x) == \ {sin(x), root(x, 54), sqrt(sin(x)), x} assert components(f(x), x) == \ {x, f(x)} assert components(Derivative(f(x), x), x) == \ {x, f(x), Derivative(f(x), x)} assert components(f(x)*diff(f(x), x), x) == \ {x, f(x), Derivative(f(x), x), Derivative(f(x), x)} def test_heurisch_polynomials(): assert heurisch(1, x) == x assert heurisch(x, x) == x**2/2 assert heurisch(x**17, x) == x**18/18 def test_heurisch_fractions(): assert heurisch(1/x, x) == log(x) assert heurisch(1/(2 + x), x) == log(x + 2) assert heurisch(1/(x + sin(y)), x) == log(x + sin(y)) # Up to a constant, where C = 5*pi*I/12, Mathematica gives identical # result in the first case. The difference is because diofant changes # signs of expressions without any care. # XXX ^ ^ ^ is this still correct? assert heurisch(5*x**5/( 2*x**6 - 5), x) in [5*log(2*x**6 - 5) / 12, 5*log(-2*x**6 + 5) / 12] assert heurisch(5*x**5/(2*x**6 + 5), x) == 5*log(2*x**6 + 5) / 12 assert heurisch(1/x**2, x) == -1/x assert heurisch(-1/x**5, x) == 1/(4*x**4) def test_heurisch_log(): assert heurisch(log(x), x) == x*log(x) - x assert heurisch(log(3*x), x) == -x + x*log(3) + x*log(x) assert heurisch(log(x**2), x) in [x*log(x**2) - 2*x, 2*x*log(x) - 2*x] def test_heurisch_exp(): assert heurisch(exp(x), x) == exp(x) assert heurisch(exp(-x), x) == -exp(-x) assert heurisch(exp(17*x), x) == exp(17*x) / 17 assert heurisch(x*exp(x), x) == x*exp(x) - exp(x) assert heurisch(x*exp(x**2), x) == exp(x**2) / 2 assert heurisch(exp(-x**2), x) is None assert heurisch(2**x, x) == 2**x/log(2) assert heurisch(x*2**x, x) == x*2**x/log(2) - 2**x*log(2)**(-2) assert heurisch(Integral(x**z*y, (y, 1, 2), (z, 2, 3)).function, x) == (x*x**z*y)/(z+1) assert heurisch(Sum(x**z, (z, 1, 2)).function, z) == x**z/log(x) def test_heurisch_trigonometric(): assert heurisch(sin(x), x) == -cos(x) assert heurisch(pi*sin(x) + 1, x) == x - pi*cos(x) assert heurisch(cos(x), x) == sin(x) assert heurisch(tan(x), x) in [ log(1 + tan(x)**2)/2, log(tan(x) + I) + I*x, log(tan(x) - I) - I*x, ] assert heurisch(sin(x)*sin(y), x) == -cos(x)*sin(y) assert heurisch(sin(x)*sin(y), y) == -cos(y)*sin(x) # gives sin(x) in answer when run via setup.py and cos(x) when run via py.test assert heurisch(sin(x)*cos(x), x) in [sin(x)**2 / 2, -cos(x)**2 / 2] assert heurisch(cos(x)/sin(x), x) == log(sin(x)) assert heurisch(x*sin(7*x), x) == sin(7*x) / 49 - x*cos(7*x) / 7 assert heurisch(1/pi/4 * x**2*cos(x), x) == 1/pi/4*(x**2*sin(x) - 2*sin(x) + 2*x*cos(x)) assert heurisch(acos(x/4) * asin(x/4), x) == 2*x - (sqrt(16 - x**2))*asin(x/4) \ + (sqrt(16 - x**2))*acos(x/4) + x*asin(x/4)*acos(x/4) def test_heurisch_hyperbolic(): assert heurisch(sinh(x), x) == cosh(x) assert heurisch(cosh(x), x) == sinh(x) assert heurisch(x*sinh(x), x) == x*cosh(x) - sinh(x) assert heurisch(x*cosh(x), x) == x*sinh(x) - cosh(x) assert heurisch( x*asinh(x/2), x) == x**2*asinh(x/2)/2 + asinh(x/2) - x*sqrt(4 + x**2)/4 def test_heurisch_mixed(): assert heurisch(sin(x)*exp(x), x) == exp(x)*sin(x)/2 - exp(x)*cos(x)/2 def test_heurisch_radicals(): assert heurisch(1/sqrt(x), x) == 2*sqrt(x) assert heurisch(1/sqrt(x)**3, x) == -2/sqrt(x) assert heurisch(sqrt(x)**3, x) == 2*sqrt(x)**5/5 assert heurisch(sin(x)*sqrt(cos(x)), x) == -2*sqrt(cos(x))**3/3 y = Symbol('y') assert heurisch(sin(y*sqrt(x)), x) == 2/y**2*sin(y*sqrt(x)) - \ 2*sqrt(x)*cos(y*sqrt(x))/y assert heurisch_wrapper(sin(y*sqrt(x)), x) == Piecewise( (0, Eq(y, 0)), (-2*sqrt(x)*cos(sqrt(x)*y)/y + 2*sin(sqrt(x)*y)/y**2, True)) y = Symbol('y', positive=True) assert heurisch_wrapper(sin(y*sqrt(x)), x) == 2/y**2*sin(y*sqrt(x)) - \ 2*sqrt(x)*cos(y*sqrt(x))/y def test_heurisch_special(): assert heurisch(erf(x), x) == x*erf(x) + exp(-x**2)/sqrt(pi) assert heurisch(exp(-x**2)*erf(x), x) == sqrt(pi)*erf(x)**2 / 4 def test_heurisch_symbolic_coeffs(): assert heurisch(1/(x + y), x) == log(x + y) assert heurisch(1/(x + sqrt(2)), x) == log(x + sqrt(2)) assert simplify(diff(heurisch(log(x + y + z), y), y)) == log(x + y + z) def test_heurisch_symbolic_coeffs_1130(): y = Symbol('y') assert heurisch_wrapper(1/(x**2 + y), x) == Piecewise( (-1/x, Eq(y, 0)), (-I*log(x - I*sqrt(y))/(2*sqrt(y)) + I*log(x + I*sqrt(y))/(2*sqrt(y)), True)) y = Symbol('y', positive=True) assert heurisch_wrapper(1/(x**2 + y), x) in [I/sqrt(y)*log(x + sqrt(-y))/2 - I/sqrt(y)*log(x - sqrt(-y))/2, I*log(x + I*sqrt(y)) / (2*sqrt(y)) - I*log(x - I*sqrt(y))/(2*sqrt(y))] def test_heurisch_hacking(): assert (heurisch(sqrt(1 + 7*x**2), x, hints=[]) == x*sqrt(1 + 7*x**2)/2 + sqrt(7)*asinh(sqrt(7)*x)/14) assert (heurisch(sqrt(1 - 7*x**2), x, hints=[]) == x*sqrt(1 - 7*x**2)/2 + sqrt(7)*asin(sqrt(7)*x)/14) assert (heurisch(1/sqrt(1 + 7*x**2), x, hints=[]) == sqrt(7)*asinh(sqrt(7)*x)/7) assert (heurisch(1/sqrt(1 - 7*x**2), x, hints=[]) == sqrt(7)*asin(sqrt(7)*x)/7) assert (heurisch(exp(-7*x**2), x, hints=[]) == sqrt(7*pi)*erf(sqrt(7)*x)/14) assert heurisch(1/sqrt(9 - 4*x**2), x, hints=[]) == asin(2*x/3)/2 assert heurisch(1/sqrt(9 + 4*x**2), x, hints=[]) == asinh(2*x/3)/2 assert heurisch(li(x), x, hints=[]) == x*li(x) - Ei(2*log(x)) def test_heurisch_function(): assert heurisch(f(x), x) is None def test_heurisch_wrapper(): f = 1/(y + x) assert heurisch_wrapper(f, x) == log(x + y) f = 1/(y - x) assert heurisch_wrapper(f, x) == -log(x - y) f = 1/((y - x)*(y + x)) assert heurisch_wrapper(f, x) == \ Piecewise((1/x, Eq(y, 0)), (log(x + y)/2/y - log(x - y)/2/y, True)) # issue sympy/sympy#6926 f = sqrt(x**2/((y - x)*(y + x))) assert heurisch_wrapper(f, x) == x*sqrt(x**2)*sqrt(1/(-x**2 + y**2)) \ - y**2*sqrt(x**2)*sqrt(1/(-x**2 + y**2))/x def test_sympyissue_3609(): assert heurisch(1/(x * (1 + log(x)**2)), x) == I*log(log(x) + I)/2 - \ I*log(log(x) - I)/2 # These are examples from the Poor Man's Integrator # http://www-sop.inria.fr/cafe/Manuel.Bronstein/pmint/examples/ def test_pmint_rat(): # TODO: heurisch() is off by a constant: -3/4. Possibly different permutation # would give the optimal result? def drop_const(expr, x): if expr.is_Add: return Add(*[ arg for arg in expr.args if arg.has(x) ]) else: return expr f = (x**7 - 24*x**4 - 4*x**2 + 8*x - 8)/(x**8 + 6*x**6 + 12*x**4 + 8*x**2) g = (4 + 8*x**2 + 6*x + 3*x**3)/(x**5 + 4*x**3 + 4*x) + log(x) assert drop_const(ratsimp(heurisch(f, x)), x) == g def test_pmint_trig(): f = (x - tan(x)) / tan(x)**2 + tan(x) g = -x**2/2 - x/tan(x) + log(tan(x)**2 + 1)/2 assert heurisch(f, x) == g @pytest.mark.slow # 8 seconds on 3.4 GHz def test_pmint_logexp(): f = (1 + x + x*exp(x))*(x + log(x) + exp(x) - 1)/(x + log(x) + exp(x))**2/x g = log(x**2 + 2*x*exp(x) + 2*x*log(x) + exp(2*x) + 2*exp(x)*log(x) + log(x)**2)/2 + 1/(x + exp(x) + log(x)) # TODO: Optimal solution is g = 1/(x + log(x) + exp(x)) + log(x + log(x) + exp(x)), # but Diofant requires a lot of guidance to properly simplify heurisch() output. assert ratsimp(heurisch(f, x)) == g @pytest.mark.slow # 8 seconds on 3.4 GHz def test_pmint_erf(): f = exp(-x**2)*erf(x)/(erf(x)**3 - erf(x)**2 - erf(x) + 1) g = sqrt(pi)*log(erf(x) - 1)/8 - sqrt(pi)*log(erf(x) + 1)/8 - sqrt(pi)/(4*erf(x) - 4) assert ratsimp(heurisch(f, x)) == g def test_pmint_LambertW(): f = LambertW(x) g = x*LambertW(x) - x + x/LambertW(x) assert heurisch(f, x) == g @pytest.mark.xfail def test_pmint_besselj(): # TODO: in both cases heurisch() gives None. Wrong besselj() derivative? f = besselj(nu + 1, x)/besselj(nu, x) g = nu*log(x) - log(besselj(nu, x)) assert simplify(heurisch(f, x) - g) == 0 f = (nu*besselj(nu, x) - x*besselj(nu + 1, x))/x g = besselj(nu, x) assert simplify(heurisch(f, x) - g) == 0 @pytest.mark.slow def test_pmint_WrightOmega(): def omega(x): return LambertW(exp(x)) f = (1 + omega(x) * (2 + cos(omega(x)) * (x + omega(x))))/(1 + omega(x))/(x + omega(x)) g = log(x + LambertW(exp(x))) + sin(LambertW(exp(x))) assert heurisch(f, x) == g def test_RR(): # Make sure the algorithm does the right thing if the ring is RR. See # issue sympy/sympy#8685. assert heurisch(sqrt(1 + 0.25*x**2), x, hints=[]) == \ 0.5*x*sqrt(0.25*x**2 + 1) + 1.0*asinh(0.5*x) # TODO: convert the rest of PMINT tests: # Airy functions # f = (x - AiryAi(x)*AiryAi(1, x)) / (x**2 - AiryAi(x)**2) # g = Rational(1,2)*ln(x + AiryAi(x)) + Rational(1,2)*ln(x - AiryAi(x)) # f = x**2 * AiryAi(x) # g = -AiryAi(x) + AiryAi(1, x)*x # Whittaker functions # f = WhittakerW(mu + 1, nu, x) / (WhittakerW(mu, nu, x) * x) # g = x/2 - mu*ln(x) - ln(WhittakerW(mu, nu, x))
34.511864
112
0.534722
0
0
0
0
1,384
0.135939
0
0
1,304
0.128082
7c6d185f736a9be6f5e0a171cd9fc68f8a4ce031
12,105
py
Python
kornia/color/adjust.py
carlosb1/kornia
a2b34d497314e7ed65f114401efdd3cc9ba2077c
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
kornia/color/adjust.py
carlosb1/kornia
a2b34d497314e7ed65f114401efdd3cc9ba2077c
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
kornia/color/adjust.py
carlosb1/kornia
a2b34d497314e7ed65f114401efdd3cc9ba2077c
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
from typing import Union import torch import torch.nn as nn from kornia.color.hsv import rgb_to_hsv, hsv_to_rgb from kornia.constants import pi def adjust_saturation_raw(input: torch.Tensor, saturation_factor: Union[float, torch.Tensor]) -> torch.Tensor: r"""Adjust color saturation of an image. Expecting input to be in hsv format already. See :class:`~kornia.color.AdjustSaturation` for details. """ if not torch.is_tensor(input): raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}") if not isinstance(saturation_factor, (float, torch.Tensor,)): raise TypeError(f"The saturation_factor should be a float number or torch.Tensor." f"Got {type(saturation_factor)}") if isinstance(saturation_factor, float): saturation_factor = torch.tensor([saturation_factor]) saturation_factor = saturation_factor.to(input.device).to(input.dtype) if (saturation_factor < 0).any(): raise ValueError(f"Saturation factor must be non-negative. Got {saturation_factor}") for _ in input.shape[1:]: saturation_factor = torch.unsqueeze(saturation_factor, dim=-1) # unpack the hsv values h, s, v = torch.chunk(input, chunks=3, dim=-3) # transform the hue value and appl module s_out: torch.Tensor = torch.clamp(s * saturation_factor, min=0, max=1) # pack back back the corrected hue out: torch.Tensor = torch.cat([h, s_out, v], dim=-3) return out def adjust_saturation(input: torch.Tensor, saturation_factor: Union[float, torch.Tensor]) -> torch.Tensor: r"""Adjust color saturation of an image. See :class:`~kornia.color.AdjustSaturation` for details. """ # convert the rgb image to hsv x_hsv: torch.Tensor = rgb_to_hsv(input) # perform the conversion x_adjusted: torch.Tensor = adjust_saturation_raw(x_hsv, saturation_factor) # convert back to rgb out: torch.Tensor = hsv_to_rgb(x_adjusted) return out def adjust_hue_raw(input: torch.Tensor, hue_factor: Union[float, torch.Tensor]) -> torch.Tensor: r"""Adjust hue of an image. Expecting input to be in hsv format already. See :class:`~kornia.color.AdjustHue` for details. """ if not torch.is_tensor(input): raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}") if not isinstance(hue_factor, (float, torch.Tensor)): raise TypeError(f"The hue_factor should be a float number or torch.Tensor in the range between" f" [-PI, PI]. Got {type(hue_factor)}") if isinstance(hue_factor, float): hue_factor = torch.tensor([hue_factor]) hue_factor = hue_factor.to(input.device).to(input.dtype) if ((hue_factor < -pi) | (hue_factor > pi)).any(): raise ValueError(f"Hue-factor must be in the range [-PI, PI]. Got {hue_factor}") for _ in input.shape[1:]: hue_factor = torch.unsqueeze(hue_factor, dim=-1) # unpack the hsv values h, s, v = torch.chunk(input, chunks=3, dim=-3) # transform the hue value and appl module divisor: float = 2 * pi.item() h_out: torch.Tensor = torch.fmod(h + hue_factor, divisor) # pack back back the corrected hue out: torch.Tensor = torch.cat([h_out, s, v], dim=-3) return out def adjust_hue(input: torch.Tensor, hue_factor: Union[float, torch.Tensor]) -> torch.Tensor: r"""Adjust hue of an image. See :class:`~kornia.color.AdjustHue` for details. """ # convert the rgb image to hsv x_hsv: torch.Tensor = rgb_to_hsv(input) # perform the conversion x_adjusted: torch.Tensor = adjust_hue_raw(x_hsv, hue_factor) # convert back to rgb out: torch.Tensor = hsv_to_rgb(x_adjusted) return out def adjust_gamma(input: torch.Tensor, gamma: Union[float, torch.Tensor], gain: Union[float, torch.Tensor] = 1.) -> torch.Tensor: r"""Perform gamma correction on an image. See :class:`~kornia.color.AdjustGamma` for details. """ if not torch.is_tensor(input): raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}") if not isinstance(gamma, (float, torch.Tensor)): raise TypeError(f"The gamma should be a positive float or torch.Tensor. Got {type(gamma)}") if not isinstance(gain, (float, torch.Tensor)): raise TypeError(f"The gain should be a positive float or torch.Tensor. Got {type(gain)}") if isinstance(gamma, float): gamma = torch.tensor([gamma]) if isinstance(gain, float): gain = torch.tensor([gain]) gamma = gamma.to(input.device).to(input.dtype) gain = gain.to(input.device).to(input.dtype) if (gamma < 0.0).any(): raise ValueError(f"Gamma must be non-negative. Got {gamma}") if (gain < 0.0).any(): raise ValueError(f"Gain must be non-negative. Got {gain}") for _ in input.shape[1:]: gamma = torch.unsqueeze(gamma, dim=-1) gain = torch.unsqueeze(gain, dim=-1) # Apply the gamma correction x_adjust: torch.Tensor = gain * torch.pow(input, gamma) # Truncate between pixel values out: torch.Tensor = torch.clamp(x_adjust, 0.0, 1.0) return out def adjust_contrast(input: torch.Tensor, contrast_factor: Union[float, torch.Tensor]) -> torch.Tensor: r"""Adjust Contrast of an image. See :class:`~kornia.color.AdjustContrast` for details. """ if not torch.is_tensor(input): raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}") if not isinstance(contrast_factor, (float, torch.Tensor,)): raise TypeError(f"The factor should be either a float or torch.Tensor. " f"Got {type(contrast_factor)}") if isinstance(contrast_factor, float): contrast_factor = torch.tensor([contrast_factor]) contrast_factor = contrast_factor.to(input.device).to(input.dtype) if (contrast_factor < 0).any(): raise ValueError(f"Contrast factor must be non-negative. Got {contrast_factor}") for _ in input.shape[1:]: contrast_factor = torch.unsqueeze(contrast_factor, dim=-1) # Apply contrast factor to each channel x_adjust: torch.Tensor = input * contrast_factor # Truncate between pixel values out: torch.Tensor = torch.clamp(x_adjust, 0.0, 1.0) return out def adjust_brightness(input: torch.Tensor, brightness_factor: Union[float, torch.Tensor]) -> torch.Tensor: r"""Adjust Brightness of an image. See :class:`~kornia.color.AdjustBrightness` for details. """ if not torch.is_tensor(input): raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}") if not isinstance(brightness_factor, (float, torch.Tensor,)): raise TypeError(f"The factor should be either a float or torch.Tensor. " f"Got {type(brightness_factor)}") if isinstance(brightness_factor, float): brightness_factor = torch.tensor([brightness_factor]) brightness_factor = brightness_factor.to(input.device).to(input.dtype) for _ in input.shape[1:]: brightness_factor = torch.unsqueeze(brightness_factor, dim=-1) # Apply brightness factor to each channel x_adjust: torch.Tensor = input + brightness_factor # Truncate between pixel values out: torch.Tensor = torch.clamp(x_adjust, 0.0, 1.0) return out class AdjustSaturation(nn.Module): r"""Adjust color saturation of an image. The input image is expected to be an RGB image in the range of [0, 1]. Args: input (torch.Tensor): Image/Tensor to be adjusted in the shape of (\*, N). saturation_factor (float): How much to adjust the saturation. 0 will give a black and white image, 1 will give the original image while 2 will enhance the saturation by a factor of 2. Returns: torch.Tensor: Adjusted image. """ def __init__(self, saturation_factor: Union[float, torch.Tensor]) -> None: super(AdjustSaturation, self).__init__() self.saturation_factor: Union[float, torch.Tensor] = saturation_factor def forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore return adjust_saturation(input, self.saturation_factor) class AdjustHue(nn.Module): r"""Adjust hue of an image. The input image is expected to be an RGB image in the range of [0, 1]. Args: input (torch.Tensor): Image/Tensor to be adjusted in the shape of (\*, N). hue_factor (float): How much to shift the hue channel. Should be in [-PI, PI]. PI and -PI give complete reversal of hue channel in HSV space in positive and negative direction respectively. 0 means no shift. Therefore, both -PI and PI will give an image with complementary colors while 0 gives the original image. Returns: torch.Tensor: Adjusted image. """ def __init__(self, hue_factor: Union[float, torch.Tensor]) -> None: super(AdjustHue, self).__init__() self.hue_factor: Union[float, torch.Tensor] = hue_factor def forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore return adjust_hue(input, self.hue_factor) class AdjustGamma(nn.Module): r"""Perform gamma correction on an image. The input image is expected to be in the range of [0, 1]. Args: input (torch.Tensor): Image/Tensor to be adjusted in the shape of (\*, N). gamma (float): Non negative real number, same as γ\gammaγ in the equation. gamma larger than 1 make the shadows darker, while gamma smaller than 1 make dark regions lighter. gain (float, optional): The constant multiplier. Default 1. Returns: torch.Tensor: Adjusted image. """ def __init__(self, gamma: Union[float, torch.Tensor], gain: Union[float, torch.Tensor] = 1.) -> None: super(AdjustGamma, self).__init__() self.gamma: Union[float, torch.Tensor] = gamma self.gain: Union[float, torch.Tensor] = gain def forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore return adjust_gamma(input, self.gamma, self.gain) class AdjustContrast(nn.Module): r"""Adjust Contrast of an image. This implementation aligns OpenCV, not PIL. Hence, the output differs from TorchVision. The input image is expected to be in the range of [0, 1]. Args: input (torch.Tensor): Image to be adjusted in the shape of (\*, N). contrast_factor (Union[float, torch.Tensor]): Contrast adjust factor per element in the batch. 0 generates a compleatly black image, 1 does not modify the input image while any other non-negative number modify the brightness by this factor. Returns: torch.Tensor: Adjusted image. """ def __init__(self, contrast_factor: Union[float, torch.Tensor]) -> None: super(AdjustContrast, self).__init__() self.contrast_factor: Union[float, torch.Tensor] = contrast_factor def forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore return adjust_contrast(input, self.contrast_factor) class AdjustBrightness(nn.Module): r"""Adjust Brightness of an image. This implementation aligns OpenCV, not PIL. Hence, the output differs from TorchVision. The input image is expected to be in the range of [0, 1]. Args: input (torch.Tensor): Image/Input to be adjusted in the shape of (\*, N). brightness_factor (Union[float, torch.Tensor]): Brightness adjust factor per element in the batch. 0 does not modify the input image while any other number modify the brightness. Returns: torch.Tensor: Adjusted image. """ def __init__(self, brightness_factor: Union[float, torch.Tensor]) -> None: super(AdjustBrightness, self).__init__() self.brightness_factor: Union[float, torch.Tensor] = brightness_factor def forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore return adjust_brightness(input, self.brightness_factor)
34.884726
110
0.671871
4,697
0.387957
0
0
0
0
0
0
5,273
0.435533
7c6ea33d579371cc05a40f107c83af6d179fcd7a
1,418
py
Python
pommerman/__init__.py
rmccann01/playground
354041cd1d9b70ffe82c18fb5b4035fab721eb92
[ "Apache-2.0" ]
725
2018-02-14T09:48:18.000Z
2022-03-29T03:04:28.000Z
pommerman/__init__.py
rmccann01/playground
354041cd1d9b70ffe82c18fb5b4035fab721eb92
[ "Apache-2.0" ]
214
2018-02-16T22:00:41.000Z
2022-03-11T23:26:20.000Z
pommerman/__init__.py
rmccann01/playground
354041cd1d9b70ffe82c18fb5b4035fab721eb92
[ "Apache-2.0" ]
265
2018-02-15T05:33:46.000Z
2022-03-11T03:04:17.000Z
'''Entry point into the pommerman module''' import gym import inspect from . import agents from . import configs from . import constants from . import forward_model from . import helpers from . import utility from . import network gym.logger.set_level(40) REGISTRY = None def _register(): global REGISTRY REGISTRY = [] for name, f in inspect.getmembers(configs, inspect.isfunction): if not name.endswith('_env'): continue config = f() gym.envs.registration.register( id=config['env_id'], entry_point=config['env_entry_point'], kwargs=config['env_kwargs'] ) REGISTRY.append(config['env_id']) # Register environments with gym _register() def make(config_id, agent_list, game_state_file=None, render_mode='human'): '''Makes the pommerman env and registers it with gym''' assert config_id in REGISTRY, "Unknown configuration '{}'. " \ "Possible values: {}".format(config_id, REGISTRY) env = gym.make(config_id) for id_, agent in enumerate(agent_list): assert isinstance(agent, agents.BaseAgent) # NOTE: This is IMPORTANT so that the agent character is initialized agent.init_agent(id_, env.spec._kwargs['game_type']) env.set_agents(agent_list) env.set_init_game_state(game_state_file) env.set_render_mode(render_mode) return env from . import cli
26.754717
76
0.682652
0
0
0
0
0
0
0
0
318
0.22426
7c7069a54d49756f83e36923521eba70ab74f6c7
139
py
Python
demo/demo/accounts/urls.py
caravancoop/rest-auth-toolkit
425bf293987f7128d9538f27a5eca7e47ba84217
[ "MIT" ]
1
2019-12-23T21:51:06.000Z
2019-12-23T21:51:06.000Z
demo/demo/accounts/urls.py
caravancoop/rest-framework-auth-toolkit
425bf293987f7128d9538f27a5eca7e47ba84217
[ "MIT" ]
127
2017-10-27T15:20:01.000Z
2022-03-07T04:09:15.000Z
demo/demo/accounts/urls.py
caravancoop/rest-auth-toolkit
425bf293987f7128d9538f27a5eca7e47ba84217
[ "MIT" ]
2
2018-01-03T16:22:51.000Z
2019-12-23T21:51:54.000Z
from django.urls import path from .views import ProfileView urlpatterns = [ path('', ProfileView.as_view(), name='user-profile'), ]
15.444444
57
0.705036
0
0
0
0
0
0
0
0
16
0.115108
7c70c6e774d6a8ca53417d3cc9999e257be28aad
1,093
py
Python
test/test_pipeline/components/classification/test_passive_aggressive.py
vardaan-raj/auto-sklearn
4597152e3a60cd6f6e32719a3bef26e13951b102
[ "BSD-3-Clause" ]
1
2021-02-21T16:44:44.000Z
2021-02-21T16:44:44.000Z
test/test_pipeline/components/classification/test_passive_aggressive.py
vardaan-raj/auto-sklearn
4597152e3a60cd6f6e32719a3bef26e13951b102
[ "BSD-3-Clause" ]
9
2021-02-12T17:52:34.000Z
2021-06-26T11:37:41.000Z
test/test_pipeline/components/classification/test_passive_aggressive.py
vardaan-raj/auto-sklearn
4597152e3a60cd6f6e32719a3bef26e13951b102
[ "BSD-3-Clause" ]
1
2021-07-06T23:02:42.000Z
2021-07-06T23:02:42.000Z
import sklearn.linear_model from autosklearn.pipeline.components.classification.passive_aggressive import \ PassiveAggressive from .test_base import BaseClassificationComponentTest class PassiveAggressiveComponentTest(BaseClassificationComponentTest): __test__ = True res = dict() res["default_iris"] = 0.92 res["iris_n_calls"] = 5 res["default_iris_iterative"] = 0.92 res["iris_iterative_n_iter"] = 32 res["default_iris_proba"] = 0.29271032477461295 res["default_iris_sparse"] = 0.4 res["default_digits"] = 0.9156041287188829 res["digits_n_calls"] = 6 res["default_digits_iterative"] = 0.9156041287188829 res["digits_iterative_n_iter"] = 64 res["default_digits_binary"] = 0.9927140255009107 res["default_digits_multilabel"] = 0.90997912489192 res["default_digits_multilabel_proba"] = 1.0 res['ignore_hps'] = ['max_iter'] sk_mod = sklearn.linear_model.PassiveAggressiveClassifier module = PassiveAggressive step_hyperparameter = { 'name': 'max_iter', 'value': module.get_max_iter(), }
30.361111
79
0.725526
903
0.826167
0
0
0
0
0
0
327
0.299177
7c71eb8f52ad23f62b8d9e0d27dc37cf322f70c3
3,148
py
Python
tensorflow_datasets/structured/dart/dart_test.py
harsh020/datasets
b4ad3617b279ec65356e696c4c860458621976f6
[ "Apache-2.0" ]
1
2020-12-10T06:37:27.000Z
2020-12-10T06:37:27.000Z
tensorflow_datasets/structured/dart/dart_test.py
Jinwook-shim/datasets
815037e87150e3c8a557d91a68b07e8ffb6a2a86
[ "Apache-2.0" ]
null
null
null
tensorflow_datasets/structured/dart/dart_test.py
Jinwook-shim/datasets
815037e87150e3c8a557d91a68b07e8ffb6a2a86
[ "Apache-2.0" ]
null
null
null
# coding=utf-8 # Copyright 2020 The TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Dart dataset tests.""" import json import mock import tensorflow.compat.v2 as tf import tensorflow_datasets.public_api as tfds from tensorflow_datasets.structured.dart import dart class DartTest(tfds.testing.DatasetBuilderTestCase): DATASET_CLASS = dart.Dart SPLITS = { 'train': 2, 'validation': 1, 'test': 2, } def test_split_generators(self): json_str = """ [ { "tripleset": [ [ "Mars Hill College", "JOINED", "1973" ], [ "Mars Hill College", "LOCATION", "Mars Hill, North Carolina" ] ], "subtree_was_extended": true, "annotations": [ { "source": "WikiSQL_decl_sents", "text": "A school from Mars Hill, North Carolina, joined in 1973." } ] } ] """ expected_examples = [{ 'input_text': { 'table': [ { 'column_header': 'subject', 'row_number': 0, 'content': 'Mars Hill College', }, { 'column_header': 'predicate', 'row_number': 0, 'content': 'JOINED', }, { 'column_header': 'object', 'row_number': 0, 'content': '1973', }, { 'column_header': 'subject', 'row_number': 1, 'content': 'Mars Hill College', }, { 'column_header': 'predicate', 'row_number': 1, 'content': 'LOCATION', }, { 'column_header': 'object', 'row_number': 1, 'content': 'Mars Hill, North Carolina', }, ] }, 'target_text': 'A school from Mars Hill, North Carolina, joined in 1973.' }] dart_dataset = dart.Dart() with mock.patch.object( json, 'load', return_value=json.loads(json_str)), mock.patch.object( tf, 'io'): for i, (_, example) in enumerate(dart_dataset._generate_examples('')): self.assertCountEqual(example, expected_examples[i]) if __name__ == '__main__': tfds.testing.test_main()
28.880734
80
0.493011
2,292
0.728081
0
0
0
0
0
0
1,679
0.533355
7c72f8f31e7cf39a7edd3dbce8585cf8da069b38
9,085
py
Python
exp/exp_informer_dad.py
AdamLohSg/GTA
bf6a745a6e28e365466e76360a15ca10ce61e009
[ "Apache-2.0" ]
8
2022-01-19T20:47:36.000Z
2022-03-20T05:11:04.000Z
exp/exp_informer_dad.py
AdamLohSg/GTA
bf6a745a6e28e365466e76360a15ca10ce61e009
[ "Apache-2.0" ]
2
2022-02-17T06:14:25.000Z
2022-02-17T08:43:57.000Z
exp/exp_informer_dad.py
AdamLohSg/GTA
bf6a745a6e28e365466e76360a15ca10ce61e009
[ "Apache-2.0" ]
5
2022-02-15T04:16:27.000Z
2022-03-29T01:21:41.000Z
from data.data_loader_dad import ( NASA_Anomaly, WADI ) from exp.exp_basic import Exp_Basic from models.model import Informer from utils.tools import EarlyStopping, adjust_learning_rate from utils.metrics import metric from sklearn.metrics import classification_report import numpy as np import torch import torch.nn as nn from torch import optim from torch.utils.data import DataLoader import os import time import warnings warnings.filterwarnings('ignore') class Exp_Informer_DAD(Exp_Basic): def __init__(self, args): super(Exp_Informer_DAD, self).__init__(args) def _build_model(self): model_dict = { 'informer':Informer, } if self.args.model=='informer': model = model_dict[self.args.model]( self.args.enc_in, self.args.dec_in, self.args.c_out, self.args.seq_len, self.args.label_len, self.args.pred_len, self.args.factor, self.args.d_model, self.args.n_heads, self.args.e_layers, self.args.d_layers, self.args.d_ff, self.args.dropout, self.args.attn, self.args.embed, self.args.data[:-1], self.args.activation, self.device ) return model.double() def _get_data(self, flag): args = self.args data_dict = { 'SMAP':NASA_Anomaly, 'MSL':NASA_Anomaly, 'WADI':WADI, } Data = data_dict[self.args.data] if flag == 'test': shuffle_flag = False; drop_last = True; batch_size = args.batch_size else: shuffle_flag = True; drop_last = True; batch_size = args.batch_size data_set = Data( root_path=args.root_path, data_path=args.data_path, flag=flag, size=[args.seq_len, args.label_len, args.pred_len], features=args.features, target=args.target ) print(flag, len(data_set)) data_loader = DataLoader( data_set, batch_size=batch_size, shuffle=shuffle_flag, num_workers=args.num_workers, drop_last=drop_last) return data_set, data_loader def _select_optimizer(self): model_optim = optim.Adam(self.model.parameters(), lr=self.args.learning_rate) return model_optim def _select_criterion(self): criterion = nn.MSELoss() return criterion def vali(self, vali_data, vali_loader, criterion): self.model.eval() total_loss = [] for i, (batch_x,batch_y,batch_x_mark,batch_y_mark,batch_label) in enumerate(vali_loader): batch_x = batch_x.double().to(self.device) batch_y = batch_y.double() batch_x_mark = batch_x_mark.double().to(self.device) batch_y_mark = batch_y_mark.double().to(self.device) # decoder input dec_inp = torch.zeros_like(batch_y[:,-self.args.pred_len:,:]).double() dec_inp = torch.cat([batch_y[:,:self.args.label_len,:], dec_inp], dim=1).double().to(self.device) # encoder - decoder outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark) batch_y = batch_y[:,-self.args.pred_len:,:].to(self.device) pred = outputs.detach().cpu() true = batch_y.detach().cpu() loss = criterion(pred, true) total_loss.append(loss) total_loss = np.average(total_loss) self.model.train() return total_loss def train(self, setting): train_data, train_loader = self._get_data(flag = 'train') vali_data, vali_loader = self._get_data(flag = 'val') test_data, test_loader = self._get_data(flag = 'test') path = './checkpoints/'+setting if not os.path.exists(path): os.makedirs(path) time_now = time.time() train_steps = len(train_loader) early_stopping = EarlyStopping(patience=self.args.patience, verbose=True) model_optim = self._select_optimizer() criterion = self._select_criterion() for epoch in range(self.args.train_epochs): iter_count = 0 train_loss = [] self.model.train() for i, (batch_x,batch_y,batch_x_mark,batch_y_mark) in enumerate(train_loader): iter_count += 1 model_optim.zero_grad() batch_x = batch_x.double().to(self.device) batch_y = batch_y.double() batch_x_mark = batch_x_mark.double().to(self.device) batch_y_mark = batch_y_mark.double().to(self.device) # decoder input dec_inp = torch.zeros_like(batch_y[:,-self.args.pred_len:,:]).double() dec_inp = torch.cat([batch_y[:,:self.args.label_len,:], dec_inp], dim=1).double().to(self.device) # encoder - decoder outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark) batch_y = batch_y[:,-self.args.pred_len:,:].to(self.device) loss = criterion(outputs, batch_y) train_loss.append(loss.item()) if (i+1) % 100==0: print("\titers: {0}, epoch: {1} | loss: {2:.7f}".format(i + 1, epoch + 1, loss.item())) speed = (time.time()-time_now)/iter_count left_time = speed*((self.args.train_epochs - epoch)*train_steps - i) print('\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(speed, left_time)) iter_count = 0 time_now = time.time() loss.backward() model_optim.step() train_loss = np.average(train_loss) vali_loss = self.vali(vali_data, vali_loader, criterion) test_loss = self.vali(test_data, test_loader, criterion) print("Epoch: {0}, Steps: {1} | Train Loss: {2:.7f} Vali Loss: {3:.7f} Test Loss: {4:.7f}".format( epoch + 1, train_steps, train_loss, vali_loss, test_loss)) early_stopping(vali_loss, self.model, path) if early_stopping.early_stop: print("Early stopping") break adjust_learning_rate(model_optim, epoch+1, self.args) best_model_path = path+'/'+'checkpoint.pth' self.model.load_state_dict(torch.load(best_model_path)) return self.model def test(self, setting): test_data, test_loader = self._get_data(flag='test') self.model.eval() preds = [] trues = [] labels = [] with torch.no_grad(): for i, (batch_x,batch_y,batch_x_mark,batch_y_mark,batch_label) in enumerate(test_loader): batch_x = batch_x.double().to(self.device) batch_y = batch_y.double() batch_x_mark = batch_x_mark.double().to(self.device) batch_y_mark = batch_y_mark.double().to(self.device) # decoder input dec_inp = torch.zeros_like(batch_y[:,-self.args.pred_len:,:]).double() dec_inp = torch.cat([batch_y[:,:self.args.label_len,:], dec_inp], dim=1).double().to(self.device) # encoder - decoder outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark) batch_y = batch_y[:,-self.args.pred_len:,:].to(self.device) pred = outputs.detach().cpu().numpy()#.squeeze() true = batch_y.detach().cpu().numpy()#.squeeze() batch_label = batch_label.long().detach().numpy() preds.append(pred) trues.append(true) labels.append(batch_label) preds = np.array(preds) trues = np.array(trues) labels = np.array(labels) print('test shape:', preds.shape, trues.shape) preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1]) trues = trues.reshape(-1, trues.shape[-2], trues.shape[-1]) labels = labels.reshape(-1, labels.shape[-1]) print('test shape:', preds.shape, trues.shape) # result save folder_path = './results/' + setting +'/' if not os.path.exists(folder_path): os.makedirs(folder_path) mae, mse, rmse, mape, mspe = metric(preds, trues) print('mse:{}, mae:{}'.format(mse, mae)) np.save(folder_path+'metrics.npy', np.array([mae, mse, rmse, mape, mspe])) np.save(folder_path+'pred.npy', preds) np.save(folder_path+'true.npy', trues) np.save(folder_path+'label.npy', labels) return
36.051587
113
0.557292
8,612
0.947936
0
0
0
0
0
0
533
0.058668
7c73ce1a389f347a8681ff6c30c8fe84612d252e
9,270
py
Python
tests/components/mysensors/conftest.py
liangleslie/core
cc807b4d597daaaadc92df4a93c6e30da4f570c6
[ "Apache-2.0" ]
30,023
2016-04-13T10:17:53.000Z
2020-03-02T12:56:31.000Z
tests/components/mysensors/conftest.py
liangleslie/core
cc807b4d597daaaadc92df4a93c6e30da4f570c6
[ "Apache-2.0" ]
24,710
2016-04-13T08:27:26.000Z
2020-03-02T12:59:13.000Z
tests/components/mysensors/conftest.py
liangleslie/core
cc807b4d597daaaadc92df4a93c6e30da4f570c6
[ "Apache-2.0" ]
11,956
2016-04-13T18:42:31.000Z
2020-03-02T09:32:12.000Z
"""Provide common mysensors fixtures.""" from __future__ import annotations from collections.abc import AsyncGenerator, Callable, Generator import json from typing import Any from unittest.mock import AsyncMock, MagicMock, patch from mysensors import BaseSyncGateway from mysensors.persistence import MySensorsJSONDecoder from mysensors.sensor import Sensor import pytest from homeassistant.components.device_tracker.legacy import Device from homeassistant.components.mqtt import DOMAIN as MQTT_DOMAIN from homeassistant.components.mysensors.config_flow import DEFAULT_BAUD_RATE from homeassistant.components.mysensors.const import ( CONF_BAUD_RATE, CONF_DEVICE, CONF_GATEWAY_TYPE, CONF_GATEWAY_TYPE_SERIAL, CONF_VERSION, DOMAIN, ) from homeassistant.core import HomeAssistant from homeassistant.setup import async_setup_component from tests.common import MockConfigEntry, load_fixture @pytest.fixture(autouse=True) def device_tracker_storage(mock_device_tracker_conf: list[Device]) -> list[Device]: """Mock out device tracker known devices storage.""" devices = mock_device_tracker_conf return devices @pytest.fixture(name="mqtt") def mock_mqtt_fixture(hass: HomeAssistant) -> None: """Mock the MQTT integration.""" hass.config.components.add(MQTT_DOMAIN) @pytest.fixture(name="is_serial_port") def is_serial_port_fixture() -> Generator[MagicMock, None, None]: """Patch the serial port check.""" with patch("homeassistant.components.mysensors.gateway.cv.isdevice") as is_device: is_device.side_effect = lambda device: device yield is_device @pytest.fixture(name="gateway_nodes") def gateway_nodes_fixture() -> dict[int, Sensor]: """Return the gateway nodes dict.""" return {} @pytest.fixture(name="serial_transport") async def serial_transport_fixture( gateway_nodes: dict[int, Sensor], is_serial_port: MagicMock, ) -> AsyncGenerator[dict[int, Sensor], None]: """Mock a serial transport.""" with patch( "mysensors.gateway_serial.AsyncTransport", autospec=True ) as transport_class, patch("mysensors.task.OTAFirmware", autospec=True), patch( "mysensors.task.load_fw", autospec=True ), patch( "mysensors.task.Persistence", autospec=True ) as persistence_class: persistence = persistence_class.return_value mock_gateway_features(persistence, transport_class, gateway_nodes) yield transport_class def mock_gateway_features( persistence: MagicMock, transport_class: MagicMock, nodes: dict[int, Sensor] ) -> None: """Mock the gateway features.""" async def mock_schedule_save_sensors() -> None: """Load nodes from via persistence.""" gateway = transport_class.call_args[0][0] gateway.sensors.update(nodes) persistence.schedule_save_sensors = AsyncMock( side_effect=mock_schedule_save_sensors ) # For some reason autospeccing does not recognize these methods. persistence.safe_load_sensors = MagicMock() persistence.save_sensors = MagicMock() async def mock_connect() -> None: """Mock the start method.""" transport.connect_task = MagicMock() gateway = transport_class.call_args[0][0] gateway.on_conn_made(gateway) transport = transport_class.return_value transport.connect_task = None transport.connect.side_effect = mock_connect @pytest.fixture(name="transport") def transport_fixture(serial_transport: MagicMock) -> MagicMock: """Return the default mocked transport.""" return serial_transport @pytest.fixture def transport_write(transport: MagicMock) -> MagicMock: """Return the transport mock that accepts string messages.""" return transport.return_value.send @pytest.fixture(name="serial_entry") async def serial_entry_fixture(hass: HomeAssistant) -> MockConfigEntry: """Create a config entry for a serial gateway.""" entry = MockConfigEntry( domain=DOMAIN, data={ CONF_GATEWAY_TYPE: CONF_GATEWAY_TYPE_SERIAL, CONF_VERSION: "2.3", CONF_DEVICE: "/test/device", CONF_BAUD_RATE: DEFAULT_BAUD_RATE, }, ) return entry @pytest.fixture(name="config_entry") def config_entry_fixture(serial_entry: MockConfigEntry) -> MockConfigEntry: """Provide the config entry used for integration set up.""" return serial_entry @pytest.fixture(name="integration") async def integration_fixture( hass: HomeAssistant, transport: MagicMock, config_entry: MockConfigEntry ) -> AsyncGenerator[MockConfigEntry, None]: """Set up the mysensors integration with a config entry.""" config: dict[str, Any] = {} config_entry.add_to_hass(hass) with patch("homeassistant.components.mysensors.device.UPDATE_DELAY", new=0): await async_setup_component(hass, DOMAIN, config) await hass.async_block_till_done() yield config_entry @pytest.fixture def receive_message( transport: MagicMock, integration: MockConfigEntry ) -> Callable[[str], None]: """Receive a message for the gateway.""" def receive_message_callback(message_string: str) -> None: """Receive a message with the transport. The message_string parameter is a string in the MySensors message format. """ gateway = transport.call_args[0][0] # node_id;child_id;command;ack;type;payload\n gateway.logic(message_string) return receive_message_callback @pytest.fixture(name="gateway") def gateway_fixture( transport: MagicMock, integration: MockConfigEntry ) -> BaseSyncGateway: """Return a setup gateway.""" return transport.call_args[0][0] def load_nodes_state(fixture_path: str) -> dict: """Load mysensors nodes fixture.""" return json.loads(load_fixture(fixture_path), cls=MySensorsJSONDecoder) def update_gateway_nodes( gateway_nodes: dict[int, Sensor], nodes: dict[int, Sensor] ) -> dict: """Update the gateway nodes.""" gateway_nodes.update(nodes) return nodes @pytest.fixture(name="gps_sensor_state", scope="session") def gps_sensor_state_fixture() -> dict: """Load the gps sensor state.""" return load_nodes_state("mysensors/gps_sensor_state.json") @pytest.fixture def gps_sensor(gateway_nodes: dict[int, Sensor], gps_sensor_state: dict) -> Sensor: """Load the gps sensor.""" nodes = update_gateway_nodes(gateway_nodes, gps_sensor_state) node = nodes[1] return node @pytest.fixture(name="power_sensor_state", scope="session") def power_sensor_state_fixture() -> dict: """Load the power sensor state.""" return load_nodes_state("mysensors/power_sensor_state.json") @pytest.fixture def power_sensor(gateway_nodes: dict[int, Sensor], power_sensor_state: dict) -> Sensor: """Load the power sensor.""" nodes = update_gateway_nodes(gateway_nodes, power_sensor_state) node = nodes[1] return node @pytest.fixture(name="energy_sensor_state", scope="session") def energy_sensor_state_fixture() -> dict: """Load the energy sensor state.""" return load_nodes_state("mysensors/energy_sensor_state.json") @pytest.fixture def energy_sensor( gateway_nodes: dict[int, Sensor], energy_sensor_state: dict ) -> Sensor: """Load the energy sensor.""" nodes = update_gateway_nodes(gateway_nodes, energy_sensor_state) node = nodes[1] return node @pytest.fixture(name="sound_sensor_state", scope="session") def sound_sensor_state_fixture() -> dict: """Load the sound sensor state.""" return load_nodes_state("mysensors/sound_sensor_state.json") @pytest.fixture def sound_sensor(gateway_nodes: dict[int, Sensor], sound_sensor_state: dict) -> Sensor: """Load the sound sensor.""" nodes = update_gateway_nodes(gateway_nodes, sound_sensor_state) node = nodes[1] return node @pytest.fixture(name="distance_sensor_state", scope="session") def distance_sensor_state_fixture() -> dict: """Load the distance sensor state.""" return load_nodes_state("mysensors/distance_sensor_state.json") @pytest.fixture def distance_sensor( gateway_nodes: dict[int, Sensor], distance_sensor_state: dict ) -> Sensor: """Load the distance sensor.""" nodes = update_gateway_nodes(gateway_nodes, distance_sensor_state) node = nodes[1] return node @pytest.fixture(name="temperature_sensor_state", scope="session") def temperature_sensor_state_fixture() -> dict: """Load the temperature sensor state.""" return load_nodes_state("mysensors/temperature_sensor_state.json") @pytest.fixture def temperature_sensor( gateway_nodes: dict[int, Sensor], temperature_sensor_state: dict ) -> Sensor: """Load the temperature sensor.""" nodes = update_gateway_nodes(gateway_nodes, temperature_sensor_state) node = nodes[1] return node @pytest.fixture(name="text_node_state", scope="session") def text_node_state_fixture() -> dict: """Load the text node state.""" return load_nodes_state("mysensors/text_node_state.json") @pytest.fixture def text_node(gateway_nodes: dict[int, Sensor], text_node_state: dict) -> Sensor: """Load the text child node.""" nodes = update_gateway_nodes(gateway_nodes, text_node_state) node = nodes[1] return node
31.530612
87
0.73247
0
0
1,414
0.152535
6,975
0.752427
1,934
0.20863
2,247
0.242395
7c7633cae0980db6c9c40b9c34972bdb7f5c0282
7,139
py
Python
Detect.py
SymenYang/Vanish-Point-Detect
0e83e2b2a86e9523ed4a86f592f3a8dee594d691
[ "MIT" ]
2
2017-10-17T10:08:25.000Z
2017-10-17T11:17:39.000Z
Detect.py
SymenYang/Vanish-Point-Detect
0e83e2b2a86e9523ed4a86f592f3a8dee594d691
[ "MIT" ]
null
null
null
Detect.py
SymenYang/Vanish-Point-Detect
0e83e2b2a86e9523ed4a86f592f3a8dee594d691
[ "MIT" ]
null
null
null
import cv2 as cv import numpy as np import copy import math import Edges import INTPoint eps = 1e-7 votes = {} Groups = [] VPoints = [] Centers = [] Cluster = [] voters = {} def getEdges(image): #moved to Edges.py return Edges.getEdges(image) def getLines(edges): #moved to Edges.py return Edges.getLines(edges) def checkRound(pos,edges): #moved to Edges.py return Edges.checkRound(pos,edges) def outOfSize(pos,edges): #moved to Edges.py return Edges.outOfSize(pos,edges) def extenLine(line,edges): #moved to Edges.py return Edges.extenLine(line,edges) def extenLines(lines,edges): #moved to Edges.py return Edges.extenLines(lines,edges) def shouldMerge(line1,line2): #moved to Edges.py return Edges.shouldMerge(line1,line2) def mergeLines(lines): #moved to Edges.py return Edges.mergeLines(lines) def getLineABC(line): #moved to Edges.py return Edges.getLineABC(line) def getCirAnch(a,b): #moved to Edges.py return Edges.getCirAnch(a,b) def getCrossPoint(linea,lineb): #moved to INTPoint.py return INTPoint.getIntersectPoint(linea,lineb) def sortLines(lines): #moved to Edges.py return Edges.sortLines(lines) def getVPoints2(lines,arange = 0.2617): #moved to INTPoint.py global VPoints VPoints = INTPoint.getVPoints2(lines,arange) return VPoints def getVPoints(num = 16): #this function is fallen into disuse because of the low speed for i in range(0,num + 1,1): lens = len(Groups[i]) for j in range(0,lens,1): for k in range(j+1,lens,1): VPoints.append(getCrossPoint(Groups[i][j],Groups[i][k])) def removeSame(list): #moved to INTPoint.py return INTPoint.removeSame(list) def getLinesLength(line): #moved to INTPoint.py return INTPoint.getLinesLength(line) def getMidPoint(line): #moved to INTPoint.py return INTPoint.getMidPoint(line) def getArch(line,point): #moved to INTPoint.py return INTPoint.getArch(line,point) def voteForPoint(lines): #moved to INTPoint.py global votes global voters votes,voters = INTPoint.voteForPoint(lines,VPoints) return def getGraPoint(points): count = 1.0 sumx = 0.0 sumy = 0.0 for point in points: w = votes[point] count += w sumx += w * point[0] sumy += w * point[1] return (sumx/count,sumy/count) def devideIntoPoints(Points): global Cluster lens = len(Cluster) for i in range(0,lens,1): Cluster[i] = [] for point in Points: if point[0] == 'p' or point[0] == 'h' or point[0] == 'v': continue if votes[point] == 0: continue minlens = 1e15 minpos = 0 now = -1 for cen in Centers: now += 1 lens = getLinesLength((point[0],point[1],cen[0],cen[1])) if lens < minlens: minlens = lens minpos = now Cluster[minpos].append(point) def KMean(points,K = 3,step = 50): global Cluster global Centers Cluster = [] Centers = [] if K == 1: step = 1 for i in range(0,K,1): Cluster.append([]) Centers.append([0,0]) count = 0 for point in points: if point[0] != 'p' and point[0] != 'v' and point[0] != 'h' and votes[point] != 0: Centers[count][0] = point[0] Centers[count][1] = point[1] count += 1 if count == K: break for i in range(0,step,1): devideIntoPoints(points) for i in range(0,K,1): Centers[i] = getGraPoint(Cluster[i]) def getFinal(points): count = 0.0 num = 0 p1 = 0.0 ret1 = [] p2 = 0.0 ret2 = [] for item in votes: if item[0] == 'p' or item[0] == 'h' or item[0] == 'v': if votes[item] > p1: p2 = p1 ret2 = ret1 p1 = votes[item] ret1 = item else: if votes[item] > p2: p2 = votes[item] ret2 = item else: count += votes[item] num += 1 K = 3 ret = [] count = count / num * 0.1 if p1 > count: K -= 1 ret.append(ret1) if p2 > count: K -= 1 ret.append(ret2) KMean(points,K) for i in range(0,K,1): ret.append(Centers[i]) return ret def deal(inputname,outputname): global votes global Groups global VPoints global Centers global Cluster global voters votes = {} Groups = [] VPoints = [] Centers = [] Cluster = [] voters = {} image = cv.imread(inputname) edges = getEdges(image) cv.imwrite(outputname + 'edges.jpg',edges) lines = getLines(edges) lines2 = copy.deepcopy(lines) lines2 = extenLines(lines2,edges) lines2 = mergeLines(lines2) #devideIntoGroups(lines2,3) lines2 = sortLines(lines2) getVPoints2(lines2) VPoints = removeSame(VPoints) voteForPoint(lines2) votes2 = sorted(votes.iteritems(),key=lambda votes:votes[1],reverse=True) lenofvotes = min(len(votes2),max(5,int(len(votes2) * 0.2))) votesFinal = {} VPoints = [] for i in range(0,lenofvotes,1): votesFinal[votes2[i][0]] = votes2[i][1] VPoints.append(votes2[i][0]) for i in range(lenofvotes,len(votes2),1): if votes2[i][0][0] == 'h' or votes2[i][0][0] == 'v' or votes2[i][0][0] == 'p': votesFinal[votes2[i][0]] = votes2[i][1] VPoints.append(votes2[i][0]) votes = votesFinal ans = getFinal(VPoints) print ans edges = cv.cvtColor(edges,cv.COLOR_GRAY2BGR) edges2 = copy.deepcopy(edges) for item in lines: if item[0] == 'N': continue cv.line(edges,(item[0],item[1]),(item[2],item[3]),(0,0,255),2) for item in lines2: cv.line(edges2,(item[0],item[1]),(item[2],item[3]),(0,0,255),2) color = [255,0,0,0] for clu in Cluster: for i in range(0,4,1): if color[i] == 255: color[i+1] = 255 color[i] = 0 break for point in clu: if point[0] > 0 and point[1] > 0: if point[0] < edges.shape[1] and point[1] < edges.shape[0]: if votes[point] == 0: continue cv.line(edges2,(int(point[0]),int(point[1])),(int(point[0]),int(point[1])),(color[1],color[2],color[3]),10) for point in ans: if point[0] > 0 and point[1] > 0: if point[0] < edges.shape[1] and point[1] < edges.shape[0]: cv.line(edges2,(int(point[0]),int(point[1])),(int(point[0]),int(point[1])),(255,255,255),10) cv.imwrite(outputname + 'linedetect.jpg',edges) cv.imwrite(outputname + 'answer.jpg',edges2) fd = open(outputname + 'answer.txt','w') fd.write('(' + str(ans[0][0]) + ',' + str(ans[0][1]) + ')(' + str(ans[1][0]) + ',' + str(ans[1][1]) + ')(' + str(ans[2][0]) + ',' + str(ans[2][1]) + ')') fd.close deal("data/1.jpg",'1')
26.838346
157
0.559462
0
0
0
0
0
0
0
0
564
0.079003
7c7664cf829c84ce53f7c105e72a7861e60af5ad
1,971
py
Python
test/test_files.py
wanasit/labelling-notebook
c9e7f6895cd4672e3b5af603bdddf08246d35094
[ "MIT" ]
null
null
null
test/test_files.py
wanasit/labelling-notebook
c9e7f6895cd4672e3b5af603bdddf08246d35094
[ "MIT" ]
null
null
null
test/test_files.py
wanasit/labelling-notebook
c9e7f6895cd4672e3b5af603bdddf08246d35094
[ "MIT" ]
null
null
null
def test_list_example_directory(client): response = client.get("/api/files") assert response.status_code == 200 file_list = response.get_json() assert len(file_list) == 5 assert file_list[0]['key'] == 'image_annotated.jpg' assert file_list[1]['key'] == 'image.jpg' assert file_list[2]['key'] == 'more_images/' assert file_list[3]['key'] == 'more_images/01.jpg' assert file_list[4]['key'] == 'more_images/02.png' def test_list_example_directory_nested(client): response = client.get("/api/files?path=more_images") assert response.status_code == 200 file_list = response.get_json() assert len(file_list) == 2 assert file_list[0]['key'] == '01.jpg' assert file_list[1]['key'] == '02.png' def test_get_example_image(client): response = client.get("/api/files/image/x.jpg") assert response.status_code == 404 response = client.get("/api/files/image/image.jpg") assert response.status_code == 200 response = client.get("/api/files/image/more_images/01.jpg") assert response.status_code == 200 def test_get_example_image_data(client): response = client.get("/api/files/image_data/image.jpg") assert response.status_code == 404 response = client.get("/api/files/image_data/image_annotated.jpg") assert response.status_code == 200 data = response.get_json() assert 'annotations' in data assert 'tags' in data def test_put_example_image_data(client): response = client.get("/api/files/image_data/image.jpg") assert response.status_code == 404 response = client.put("/api/files/image_data/image.jpg", json={ 'annotations': [{'width': 10, 'height': 10, 'x': 0, 'y': 0}], 'tags': ['a', 'b'] }) assert response.status_code == 200 response = client.get("/api/files/image_data/image.jpg") assert response.status_code == 200 data = response.get_json() assert 'annotations' in data assert 'tags' in data
31.285714
70
0.67377
0
0
0
0
0
0
0
0
526
0.26687
7c76c121d957b364e4b6f2fa9125b58b9c909aee
4,086
py
Python
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/course_groups/migrations/0001_initial.py
osoco/better-ways-of-thinking-about-software
83e70d23c873509e22362a09a10d3510e10f6992
[ "MIT" ]
3
2021-12-15T04:58:18.000Z
2022-02-06T12:15:37.000Z
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/course_groups/migrations/0001_initial.py
osoco/better-ways-of-thinking-about-software
83e70d23c873509e22362a09a10d3510e10f6992
[ "MIT" ]
null
null
null
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/course_groups/migrations/0001_initial.py
osoco/better-ways-of-thinking-about-software
83e70d23c873509e22362a09a10d3510e10f6992
[ "MIT" ]
1
2019-01-02T14:38:50.000Z
2019-01-02T14:38:50.000Z
from django.db import migrations, models from django.conf import settings from opaque_keys.edx.django.models import CourseKeyField class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='CohortMembership', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('course_id', CourseKeyField(max_length=255)), ], ), migrations.CreateModel( name='CourseCohort', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('assignment_type', models.CharField(default='manual', max_length=20, choices=[('random', 'Random'), ('manual', 'Manual')])), ], ), migrations.CreateModel( name='CourseCohortsSettings', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('is_cohorted', models.BooleanField(default=False)), ('course_id', CourseKeyField(help_text='Which course are these settings associated with?', unique=True, max_length=255, db_index=True)), ('_cohorted_discussions', models.TextField(null=True, db_column='cohorted_discussions', blank=True)), ('always_cohort_inline_discussions', models.BooleanField(default=True)), ], ), migrations.CreateModel( name='CourseUserGroup', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(help_text='What is the name of this group? Must be unique within a course.', max_length=255)), ('course_id', CourseKeyField(help_text='Which course is this group associated with?', max_length=255, db_index=True)), ('group_type', models.CharField(max_length=20, choices=[('cohort', 'Cohort')])), ('users', models.ManyToManyField(help_text='Who is in this group?', related_name='course_groups', to=settings.AUTH_USER_MODEL, db_index=True)), ], ), migrations.CreateModel( name='CourseUserGroupPartitionGroup', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('partition_id', models.IntegerField(help_text='contains the id of a cohorted partition in this course')), ('group_id', models.IntegerField(help_text='contains the id of a specific group within the cohorted partition')), ('created_at', models.DateTimeField(auto_now_add=True)), ('updated_at', models.DateTimeField(auto_now=True)), ('course_user_group', models.OneToOneField(to='course_groups.CourseUserGroup', on_delete=models.CASCADE)), ], ), migrations.AddField( model_name='coursecohort', name='course_user_group', field=models.OneToOneField(related_name='cohort', to='course_groups.CourseUserGroup', on_delete=models.CASCADE), ), migrations.AddField( model_name='cohortmembership', name='course_user_group', field=models.ForeignKey(to='course_groups.CourseUserGroup', on_delete=models.CASCADE), ), migrations.AddField( model_name='cohortmembership', name='user', field=models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE), ), migrations.AlterUniqueTogether( name='courseusergroup', unique_together={('name', 'course_id')}, ), migrations.AlterUniqueTogether( name='cohortmembership', unique_together={('user', 'course_id')}, ), ]
49.829268
159
0.618698
3,952
0.967205
0
0
0
0
0
0
1,019
0.249388
7c76d6a2f8e354238a96f859815250852db8cda1
738
py
Python
kafka-rockset-integration/generate_customers_data.py
farkaskid/recipes
8eef799cda899ea266f2849d485917f9b0d83190
[ "Apache-2.0" ]
21
2019-02-27T22:30:28.000Z
2021-07-18T17:26:56.000Z
kafka-rockset-integration/generate_customers_data.py
farkaskid/recipes
8eef799cda899ea266f2849d485917f9b0d83190
[ "Apache-2.0" ]
16
2019-07-03T22:04:21.000Z
2022-02-26T18:34:05.000Z
kafka-rockset-integration/generate_customers_data.py
farkaskid/recipes
8eef799cda899ea266f2849d485917f9b0d83190
[ "Apache-2.0" ]
11
2019-03-13T08:55:31.000Z
2022-02-07T08:35:16.000Z
"""Generate Customer Data""" import csv import random from config import MIN_CUSTOMER_ID, MAX_CUSTOMER_ID ACQUISITION_SOURCES = [ 'OrganicSearch', 'PaidSearch', 'Email', 'SocialMedia', 'Display', 'Affiliate' 'Referral' ] def main(): with open('customers.csv', 'w') as fout: writer = csv.DictWriter(fout, fieldnames=['CustomerID', 'AcquisitionSource']) writer.writeheader() for customer_id in range(MIN_CUSTOMER_ID, MAX_CUSTOMER_ID + 1): record = { 'CustomerID': int(customer_id), 'AcquisitionSource': random.choices(ACQUISITION_SOURCES).pop() } writer.writerow(record) if __name__ == '__main__': main()
22.363636
85
0.617886
0
0
0
0
0
0
0
0
195
0.264228
7c77100c5bc822f15ee0cc031b607fff7a7b2f70
899
py
Python
parsl/tests/test_error_handling/test_resource_spec.py
MatthewBM/parsl
f11417a0255ed290fd0d78ffa1bc52cfe7a06301
[ "Apache-2.0" ]
null
null
null
parsl/tests/test_error_handling/test_resource_spec.py
MatthewBM/parsl
f11417a0255ed290fd0d78ffa1bc52cfe7a06301
[ "Apache-2.0" ]
null
null
null
parsl/tests/test_error_handling/test_resource_spec.py
MatthewBM/parsl
f11417a0255ed290fd0d78ffa1bc52cfe7a06301
[ "Apache-2.0" ]
null
null
null
import parsl from parsl.app.app import python_app from parsl.tests.configs.local_threads import config from parsl.executors.errors import UnsupportedFeatureError from parsl.executors import WorkQueueExecutor @python_app def double(x, parsl_resource_specification={}): return x * 2 def test_resource(n=2): spec = {'cores': 2, 'memory': '1GiB'} fut = double(n, parsl_resource_specification=spec) try: fut.result() except Exception as e: assert isinstance(e, UnsupportedFeatureError) else: executors = parsl.dfk().executors executor = None for label in executors: if label != 'data_manager': executor = executors[label] break assert isinstance(executor, WorkQueueExecutor) if __name__ == '__main__': local_config = config parsl.load(local_config) x = test_resource(2)
26.441176
58
0.676307
0
0
0
0
76
0.084538
0
0
45
0.050056
7c78f1b09da753afd4fbe81d818781bc202c7f29
9,565
py
Python
cincan/file_tool.py
cincanproject/cincan-command
b8cde81931b1c8583ac7daa1327520fb9f06856e
[ "MIT" ]
1
2022-03-11T02:37:42.000Z
2022-03-11T02:37:42.000Z
cincan/file_tool.py
cincanproject/cincan-command
b8cde81931b1c8583ac7daa1327520fb9f06856e
[ "MIT" ]
null
null
null
cincan/file_tool.py
cincanproject/cincan-command
b8cde81931b1c8583ac7daa1327520fb9f06856e
[ "MIT" ]
null
null
null
import pathlib import re from typing import List, Optional, Dict, Set, Tuple, Iterable import shlex class FileMatcher: """Match files based on a pattern""" def __init__(self, match_string: str, include: bool): self.match_string = match_string self.exact = '*' not in match_string self.absolute_path = match_string.startswith('/') self.include = include @classmethod def parse(cls, match_strings: List[str]) -> List['FileMatcher']: """Parse pattens from a list""" res = [] for m in match_strings: if m.startswith('^'): res.append(FileMatcher(m[1:], include=False)) else: res.append(FileMatcher(m, include=True)) return res def filter_upload_files(self, files: List[pathlib.Path]) -> List[pathlib.Path]: """Filter uploaded files by this pattern""" return list(filter(lambda f: self.__match(f.as_posix()) == self.include, files)) def filter_download_files(self, files: List[str], work_dir: str) -> List[str]: """Filter downloaded files by this pattern""" if self.absolute_path: # matching absolute files res = [] for file in files: if self.__match(file) == self.include: res.append(file) return res else: # matching files relative to working directory res = [] for file in files: try: rel_file = pathlib.Path(file).relative_to(work_dir).as_posix() except ValueError: if not self.include: res.append(file) continue if self.__match(rel_file) == self.include: res.append(file) return res def __match(self, value: str) -> bool: """Match value with this pattern""" if self.exact: return self.match_string == value split = self.match_string.split("*") i = 0 off = 0 len_v = len(value) s = split[0] len_s = len(s) if len_s > 0: if len_v < i + len_s or value[i:i + len_s] != s: return False off += len_s i += 1 while i < len(split): s = split[i] len_s = len(s) if len_s > 0: off = value.find(s, off) if off < 0: return False i += 1 off += len_s if split[-1] != '' and off != len_v: return False return True class FileResolver: """Resolve files from command line arguments""" def __init__(self, args: List[str], directory: pathlib.Path, output_dirs: List[str] = None, do_resolve: bool = True, input_filters: List[FileMatcher] = None): self.original_args = args self.directory = directory self.host_files: List[pathlib.Path] = [] self.command_args = args.copy() # Additional punctuation chars, whereas we might split command (On top of shlex basic) self.additional_punc_chars = "=," # these are output directories, upload them without contents for dir in output_dirs or []: self.host_files.append(pathlib.Path(dir)) self.output_dirs = set([pathlib.Path(d) for d in (output_dirs or [])]) if do_resolve: # autodetect input files self.__analyze() # exclude files by filters, perhaps? for filth in input_filters or []: self.host_files = filth.filter_upload_files(self.host_files) def __file_exists(self, path: str, already_listed: Set[pathlib.Path], parent_check: bool = True) -> Optional[str]: """ Method for evaluating the possible existence of input files and potential output directories. If there is local match for file/directory, it is marked as uploadable file into container, and path is changed to be relative of working directory of container, when command is passed into container. Special case: when possible argument is coming from first layer (not quoted) of arguments, is valid path and has no whitespace in arguments, we are processing this part later, because we can support special markups such as % and & in here. """ o_file = pathlib.Path(path) # does file/dir exists? No attempt to copy '/', leave it as it is... file_exists = o_file.exists() and not all([c == '/' for c in path]) # When filename contains potentially spaces, were are only interested about absolute path # Not checking parents if not file_exists and not parent_check and not " " in path: return None if not file_exists and not o_file.is_absolute() and '..' not in o_file.as_posix(): # the file does not exist, but it is relative path to a file/directory... o_parent = o_file.parent while not file_exists and o_parent and o_parent.as_posix() != '.': if o_parent.is_dir() and o_parent not in self.host_files: file_exists = True # ...and there is existing parent directory, perhaps for output o_parent = o_parent.parent if file_exists: h_file, a_name = self.__archive_name_for(o_file) if h_file not in already_listed: self.host_files.append(h_file) already_listed.add(h_file) # '/' in the end gets eaten away... fix for p in range(len(path) - 1, 0, -1): if path[p] != '/': break a_name += '/' if file_exists and o_file.is_dir() and o_file not in self.output_dirs: # include files in sub directories self.__include_sub_dirs(o_file.iterdir(), already_listed) if file_exists: return a_name else: return None def __analyze(self): """Analyze the command line""" self.command_args = [] already_listed: Set[pathlib.Path] = self.output_dirs.copy() for o_arg in self.original_args: a_name = self.__file_exists(o_arg, already_listed, parent_check=False) # Potential path as argument, not dividing it pieces yet for further analysis if a_name: self.command_args.append(a_name) continue # NOTE: Shlex not Windows compatible! lex = shlex.shlex(o_arg, posix=True, punctuation_chars=self.additional_punc_chars) split = list(lex) modified_paths = [] for part in split: a_name = self.__file_exists(part, already_listed) if a_name: modified_paths.append((part, a_name)) for m_part, m_name in modified_paths: o_arg = o_arg.replace(m_part, m_name) self.command_args.append(o_arg) def __include_sub_dirs(self, files: Iterable[pathlib.Path], file_set: Set[pathlib.Path]): """Include files from sub directories""" for f in files: if f not in file_set: self.host_files.append(f) file_set.add(f) if f.is_dir(): self.__include_sub_dirs(f.iterdir(), file_set) def resolve_upload_files(self, upload_files: Dict[pathlib.Path, str]): """Resolve the files to upload""" for up_file in self.detect_upload_files(): host_file, arc_name = self.__archive_name_for(up_file) upload_files[host_file] = arc_name cmd_args = self.command_args return cmd_args def detect_upload_files(self, files: Optional[Iterable[pathlib.Path]] = None) -> List[pathlib.Path]: """Detect files to upload""" it_files = sorted(self.host_files) if files is None else files res = [] # filter out files which do not exist nor should exists for file in it_files: if file.exists() or file in self.output_dirs: res.append(file) if files is None: # make sure also paths leading to output files are uploaded all_dirs = set() for file in res: all_dirs.add(file) for p in file.parents: all_dirs.add(p) for file in filter(lambda f: not f.exists(), it_files): # file not exists, but marked for upload - must mean some sub directory for output p = file.parent while not p.exists(): p = p.parent if p not in all_dirs: res.append(p) return res @classmethod def __archive_name_for(cls, file: pathlib.Path) -> Tuple[pathlib.Path, str]: """Resolve host file and archive name for uploaded file""" if cls.__use_absolute_path(file): h_file = file.resolve() a_file = file.resolve().as_posix() a_file = a_file[1:] if a_file.startswith('/') else a_file else: h_file = file a_file = file.as_posix() return h_file, a_file @classmethod def __use_absolute_path(cls, file: pathlib.Path) -> bool: """Should use absolute path to refer a file path?""" # - use absolute paths, if /../ used (ok, quite weak) return file.is_absolute() or (".." in file.as_posix())
41.228448
119
0.576477
9,460
0.989022
0
0
1,082
0.113121
0
0
2,163
0.226137
7c7958cdc1aac4d3672c25246775beb5da7fc72d
997
py
Python
aws_interface/cloud/auth/set_me.py
hubaimaster/aws-interface
162dd056546d58b6eb29afcae1c3c2d78e4309b2
[ "Apache-2.0" ]
53
2018-10-02T05:58:54.000Z
2020-09-15T08:58:26.000Z
aws_interface/cloud/auth/set_me.py
hubaimaster/aws-interface
162dd056546d58b6eb29afcae1c3c2d78e4309b2
[ "Apache-2.0" ]
52
2018-09-26T05:16:09.000Z
2022-03-11T23:51:14.000Z
aws_interface/cloud/auth/set_me.py
hubaimaster/aws-interface
162dd056546d58b6eb29afcae1c3c2d78e4309b2
[ "Apache-2.0" ]
10
2019-03-11T16:35:14.000Z
2019-10-23T08:03:54.000Z
from cloud.permission import Permission, NeedPermission from cloud.message import error # Define the input output format of the function. # This information is used when creating the *SDK*. info = { 'input_format': { 'session_id': 'str', 'field': 'str', 'value?': 'str', }, 'output_format': { 'user_id?': 'str', }, 'description': 'Set my information' } @NeedPermission(Permission.Run.Auth.set_me) def do(data, resource): body = {} params = data['params'] user = data['user'] user_id = user['id'] field = params.get('field') value = params.get('value', None) user = resource.db_get_item(user_id) # For security if field in ['id', 'email', 'password_hash', 'salt', 'groups', 'login_method']: body['error'] = error.FORBIDDEN_MODIFICATION return body else: user[field] = value resource.db_update_item(user_id, user) body['user_id'] = user_id return body
24.317073
83
0.608826
0
0
0
0
587
0.588766
0
0
335
0.336008
7c79d2fe84aae88ef213fa559ea2499797887d57
959
py
Python
doc/gallery-src/analysis/run_blockMcnpMaterialCard.py
celikten/armi
4e100dd514a59caa9c502bd5a0967fd77fdaf00e
[ "Apache-2.0" ]
1
2021-05-29T16:02:31.000Z
2021-05-29T16:02:31.000Z
doc/gallery-src/analysis/run_blockMcnpMaterialCard.py
celikten/armi
4e100dd514a59caa9c502bd5a0967fd77fdaf00e
[ "Apache-2.0" ]
null
null
null
doc/gallery-src/analysis/run_blockMcnpMaterialCard.py
celikten/armi
4e100dd514a59caa9c502bd5a0967fd77fdaf00e
[ "Apache-2.0" ]
null
null
null
""" Write MCNP Material Cards ========================= Here we load a test reactor and write each component of one fuel block out as MCNP material cards. Normally, code-specific utility code would belong in a code-specific ARMI plugin. But in this case, the need for MCNP materials cards is so pervasive that it made it into the framework. """ from armi.reactor.tests import test_reactors from armi.reactor.flags import Flags from armi.utils.densityTools import formatMaterialCard from armi.nucDirectory import nuclideBases as nb from armi import configure configure(permissive=True) _o, r = test_reactors.loadTestReactor() bFuel = r.core.getBlocks(Flags.FUEL)[0] for ci, component in enumerate(bFuel, start=1): ndens = component.getNumberDensities() # convert nucName (str) keys to nuclideBase keys ndensByBase = {nb.byName[nucName]: dens for nucName, dens in ndens.items()} print("".join(formatMaterialCard(ndensByBase, matNum=ci)))
31.966667
79
0.755996
0
0
0
0
0
0
0
0
396
0.41293
7c79e12b0a22b9ba1c999ecbf405c389b15998f7
6,612
py
Python
life_line_chart/_autogenerate_data.py
mustaqimM/life_line_chart
a9bbbbdeb5568aa0cc3b3b585337a3d655f4b2d6
[ "MIT" ]
null
null
null
life_line_chart/_autogenerate_data.py
mustaqimM/life_line_chart
a9bbbbdeb5568aa0cc3b3b585337a3d655f4b2d6
[ "MIT" ]
null
null
null
life_line_chart/_autogenerate_data.py
mustaqimM/life_line_chart
a9bbbbdeb5568aa0cc3b3b585337a3d655f4b2d6
[ "MIT" ]
null
null
null
import names import os import datetime from random import random def generate_gedcom_file(): """generate some gedcom file""" db = {} db['n_individuals'] = 0 db['max_individuals'] = 8000 db['n_families'] = 0 db['yougest'] = None gedcom_content = """ 0 HEAD 1 SOUR Gramps 2 VERS 3.3.0 2 NAME Gramps 1 DATE {} 2 TIME 15:35:24 1 SUBM @SUBM@ 1 COPR Copyright (c) 2020 Christian Schulze,,,. 1 GEDC 2 VERS 5.5 1 CHAR UTF-8 1 LANG German """.format(datetime.date.today()) def generate_individual(db, birth_year, sex=None, last_name=None): if not sex: sex = 'F' if random() < 0.5 else 'M' first_name = names.get_first_name( gender='male' if sex == 'M' else 'female') if random() < 0.3: first_name += ' ' + \ names.get_first_name(gender='male' if sex == 'M' else 'female') if not last_name: last_name = names.get_last_name() birth_place = 'Paris' if random() < 0.5 else 'Rome' death_place = 'Zorge' if random() < 0.5 else 'Bruegge' db['n_individuals'] += 1 individual_id = '@I{}@'.format(db["n_individuals"]) death_year = birth_year + 40 + int(random()*20) db[individual_id] = { 'birth': birth_year, 'death': death_year, 'sex': sex, 'last_name': last_name } birth_date = '1 JUN {}'.format(birth_year) death_date = '1 JUN {}'.format(birth_year) if not db['yougest']: db['yougest'] = individual_id elif db[db['yougest']]['birth'] < birth_year: db['yougest'] = individual_id db[individual_id]['string'] = """0 {individual_id} INDI 1 NAME {first_name} /{last_name}/ 1 SEX {sex} 1 BIRT 2 DATE {birth_date} 2 PLAC {birth_place} 1 DEAT 2 DATE {death_date} 2 PLAC {death_place} """.format(**locals()) return individual_id def generate_family(db, husband_id, wife_id, children_ids, marriage_year, marriage_place=None): if not marriage_place: marriage_place = 'London' if random() < 0.5 else 'Tokio' db['n_families'] += 1 marriage_date = '1 MAY {}'.format(marriage_year) family_id = "@F{}@".format(db['n_families']) db[family_id] = {'string': """0 {family_id} FAM 1 HUSB {husband_id} 1 WIFE {wife_id} 1 MARR 2 DATE {marriage_date} 2 PLAC {marriage_place} """.format( **locals() )} for child_id in children_ids: db[family_id]['string'] += "1 CHIL {}\n".format(child_id) return family_id def find_by_birth_date(db, from_year, to_year, sex, exclude=[]): ids = [] for individual_id, data in db.items(): if not individual_id.startswith('@I'): continue if 'famc' in data: if data['birth'] > from_year and data['birth'] < to_year: if sex == data['sex']: if individual_id not in exclude: ids.append(individual_id) if ids: return ids[int(random()*len(ids))] return None def generate_recursive_family(db, start_year=1000, generations=2, husband_id=None, wife_id=None, siblings=[], max_children=5): if not husband_id: if random() < 0.2: exclude = siblings.copy() if wife_id: exclude += [wife_id] husband_id = find_by_birth_date( db, start_year, start_year + 10, sex='M', exclude=exclude) if not husband_id: husband_id = generate_individual( db, start_year + int(random()*5), sex='M') else: print('reused {}'.format(husband_id)) if not wife_id: if random() < 10.9: exclude = siblings.copy() + [husband_id] wife_id = find_by_birth_date( db, start_year, start_year + 10, sex='F', exclude=exclude) if not wife_id: wife_id = generate_individual( db, start_year + int(random()*5), sex='F') else: print('reused {}'.format(wife_id)) n_children = int((1+random()*(max_children-1)) * (1 - db['n_individuals'] / db['max_individuals'])) marriage_year = start_year + 20 + int(random()*5) children_ids = [] for i in range(n_children): children_ids.append(generate_individual( db, birth_year=marriage_year + 1 + int(random()*10), last_name=db[husband_id]['last_name'])) family_id = generate_family( db, husband_id, wife_id, children_ids, marriage_year) for i in range(n_children): db[children_ids[i]]['string'] += "1 FAMC "+family_id + '\n' db[children_ids[i]]['famc'] = family_id if generations > 0: generate_recursive_family( db, db[children_ids[i]]['birth'], generations - 1, children_ids[i] if db[children_ids[i] ]['sex'] == 'M' else None, children_ids[i] if db[children_ids[i] ]['sex'] == 'F' else None, children_ids) db[husband_id]['string'] += "1 FAMS "+family_id + '\n' db[wife_id]['string'] += "1 FAMS "+family_id + '\n' generate_recursive_family(db, generations=8, max_children=4) for k, v in db.items(): if k.startswith('@I'): gedcom_content += v['string'] for k, v in db.items(): if k.startswith('@F'): gedcom_content += v['string'] gedcom_content += '0 TRLR\n' open(os.path.join(os.path.dirname(__file__), '..', 'tests', 'autogenerated.ged'), 'w').write(gedcom_content) # generate_gedcom_file() def generate_individual_images(): from PIL import Image, ImageDraw, ImageFont def generate_one_image(filename, text, font_size=22, pos=(15, 40), size=(100, 100), color=(160, 160, 160)): img = Image.new('RGB', size, color=color) d = ImageDraw.Draw(img) font = ImageFont.truetype(r'arial.ttf', font_size) d.text(pos, text, fill=(0, 0, 0), font=font) img.save(filename) for i in range(20): generate_one_image( 'tests/images/individual_I6_image_age_{}.png'.format( 1+i*4 ), 'Age {}'.format( 1+i*4, )) generate_individual_images()
35.548387
130
0.545977
0
0
0
0
0
0
0
0
1,216
0.183908
7c79e8c0feadf546c1f7ffb56f2c6aded823808d
4,647
py
Python
arcade/examples/sprite_bullets_enemy_aims.py
LiorAvrahami/arcade
fce254a9eb89629de1f99d57a63759a2953184e9
[ "MIT" ]
1
2020-01-18T04:48:38.000Z
2020-01-18T04:48:38.000Z
arcade/examples/sprite_bullets_enemy_aims.py
LiorAvrahami/arcade
fce254a9eb89629de1f99d57a63759a2953184e9
[ "MIT" ]
1
2019-08-11T18:47:27.000Z
2019-08-12T03:02:11.000Z
arcade/examples/sprite_bullets_enemy_aims.py
LiorAvrahami/arcade
fce254a9eb89629de1f99d57a63759a2953184e9
[ "MIT" ]
null
null
null
""" Show how to have enemies shoot bullets aimed at the player. If Python and Arcade are installed, this example can be run from the command line with: python -m arcade.examples.sprite_bullets_enemy_aims """ import arcade import math import os SCREEN_WIDTH = 800 SCREEN_HEIGHT = 600 SCREEN_TITLE = "Sprites and Bullets Enemy Aims Example" BULLET_SPEED = 4 class MyGame(arcade.Window): """ Main application class """ def __init__(self, width, height, title): super().__init__(width, height, title) # Set the working directory (where we expect to find files) to the same # directory this .py file is in. You can leave this out of your own # code, but it is needed to easily run the examples using "python -m" # as mentioned at the top of this program. file_path = os.path.dirname(os.path.abspath(__file__)) os.chdir(file_path) arcade.set_background_color(arcade.color.BLACK) self.frame_count = 0 self.enemy_list = None self.bullet_list = None self.player_list = None self.player = None def setup(self): self.enemy_list = arcade.SpriteList() self.bullet_list = arcade.SpriteList() self.player_list = arcade.SpriteList() # Add player ship self.player = arcade.Sprite(":resources:images/space_shooter/playerShip1_orange.png", 0.5) self.player_list.append(self.player) # Add top-left enemy ship enemy = arcade.Sprite(":resources:images/space_shooter/playerShip1_green.png", 0.5) enemy.center_x = 120 enemy.center_y = SCREEN_HEIGHT - enemy.height enemy.angle = 180 self.enemy_list.append(enemy) # Add top-right enemy ship enemy = arcade.Sprite(":resources:images/space_shooter/playerShip1_green.png", 0.5) enemy.center_x = SCREEN_WIDTH - 120 enemy.center_y = SCREEN_HEIGHT - enemy.height enemy.angle = 180 self.enemy_list.append(enemy) def on_draw(self): """Render the screen. """ arcade.start_render() self.enemy_list.draw() self.bullet_list.draw() self.player_list.draw() def on_update(self, delta_time): """All the logic to move, and the game logic goes here. """ self.frame_count += 1 # Loop through each enemy that we have for enemy in self.enemy_list: # First, calculate the angle to the player. We could do this # only when the bullet fires, but in this case we will rotate # the enemy to face the player each frame, so we'll do this # each frame. # Position the start at the enemy's current location start_x = enemy.center_x start_y = enemy.center_y # Get the destination location for the bullet dest_x = self.player.center_x dest_y = self.player.center_y # Do math to calculate how to get the bullet to the destination. # Calculation the angle in radians between the start points # and end points. This is the angle the bullet will travel. x_diff = dest_x - start_x y_diff = dest_y - start_y angle = math.atan2(y_diff, x_diff) # Set the enemy to face the player. enemy.angle = math.degrees(angle)-90 # Shoot every 60 frames change of shooting each frame if self.frame_count % 60 == 0: bullet = arcade.Sprite(":resources:images/space_shooter/laserBlue01.png") bullet.center_x = start_x bullet.center_y = start_y # Angle the bullet sprite bullet.angle = math.degrees(angle) # Taking into account the angle, calculate our change_x # and change_y. Velocity is how fast the bullet travels. bullet.change_x = math.cos(angle) * BULLET_SPEED bullet.change_y = math.sin(angle) * BULLET_SPEED self.bullet_list.append(bullet) # Get rid of the bullet when it flies off-screen for bullet in self.bullet_list: if bullet.top < 0: bullet.remove_from_sprite_lists() self.bullet_list.update() def on_mouse_motion(self, x, y, delta_x, delta_y): """Called whenever the mouse moves. """ self.player.center_x = x self.player.center_y = y def main(): """ Main method """ window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE) window.setup() arcade.run() if __name__ == "__main__": main()
32.957447
98
0.624919
4,108
0.884011
0
0
0
0
0
0
1,744
0.375296
7c7a936052804b42678eb433f6f64454107e4317
450
py
Python
app1.py
FreakX23/EBook_Training
de445b0a9e56a1f1ffc51ae3c5e10ebe8297e9b6
[ "MIT" ]
null
null
null
app1.py
FreakX23/EBook_Training
de445b0a9e56a1f1ffc51ae3c5e10ebe8297e9b6
[ "MIT" ]
null
null
null
app1.py
FreakX23/EBook_Training
de445b0a9e56a1f1ffc51ae3c5e10ebe8297e9b6
[ "MIT" ]
null
null
null
# This Part will gather Infos and demonstrate the use of Variables. usrName = input("What is your Name?") usrAge = int(input("What is your Age?")) usrGPA = float(input("What is your GPA?")) print () #cheap way to get a new line print ("Hello, %s" % (usrName)) print ("Did you know that in two years you will be %d years old? " % (usrAge +2)) print ("Also you need to improve your GPA by %f points to have a perfect score." % (4.0 - usrGPA)) print ()
45
98
0.682222
0
0
0
0
0
0
0
0
296
0.657778
7c7af573be1400de8cf6ff87c171a26f3cda1e1f
96
py
Python
borze.py
AmitHasanShuvo/Programming
f47ecc626e518a0bf5f9f749afd15ce67bbe737b
[ "MIT" ]
8
2019-05-26T19:24:13.000Z
2021-03-24T17:36:14.000Z
borze.py
AmitHasanShuvo/Programming
f47ecc626e518a0bf5f9f749afd15ce67bbe737b
[ "MIT" ]
null
null
null
borze.py
AmitHasanShuvo/Programming
f47ecc626e518a0bf5f9f749afd15ce67bbe737b
[ "MIT" ]
1
2020-04-19T04:59:54.000Z
2020-04-19T04:59:54.000Z
a = input() a = a.replace('--', '2') a = a.replace('-.', '1') a = a.replace('.', '0') print(a)  
16
24
0.4375
0
0
0
0
0
0
0
0
20
0.206186
7c7ce13176c091aaa43308e8a58ace22a4dd604d
684
py
Python
distalg/message.py
charlesemurray/DistributedProgramming
f7b5001a6acb0583cd6b7bb611f27893b830c296
[ "MIT" ]
null
null
null
distalg/message.py
charlesemurray/DistributedProgramming
f7b5001a6acb0583cd6b7bb611f27893b830c296
[ "MIT" ]
null
null
null
distalg/message.py
charlesemurray/DistributedProgramming
f7b5001a6acb0583cd6b7bb611f27893b830c296
[ "MIT" ]
null
null
null
class Message: def __init__(self, from_channel=None, **kwargs): self._channel = from_channel if kwargs is not None: for key, value in kwargs.items(): setattr(self, key, value) @property def carrier(self): return self._channel def sender(self): return self._channel.sender def receiver(self): return self._channel.receiver class CallbackMessage(Message): def __init__(self, function): super(CallbackMessage, self).__init__(function=function) if __name__ == "__main__": msg = Message(sender="A", receiver="B") assert msg.sender is "A" assert msg.receiver is "B"
23.586207
64
0.630117
542
0.792398
0
0
61
0.089181
0
0
22
0.032164
7c7d98835e8aa5d863003dad874d15530ea2ef72
7,799
py
Python
myenv/lib/python3.5/site-packages/tests/handlers/logging/logging_tests.py
rupeshparab/techscan
ce2558602ddad31873d7129f25b1cc61895b9939
[ "MIT" ]
1
2019-11-01T11:45:22.000Z
2019-11-01T11:45:22.000Z
myenv/lib/python3.5/site-packages/tests/handlers/logging/logging_tests.py
rupeshparab/techscan
ce2558602ddad31873d7129f25b1cc61895b9939
[ "MIT" ]
3
2020-02-11T23:03:45.000Z
2021-06-10T18:05:11.000Z
myenv/lib/python3.5/site-packages/tests/handlers/logging/logging_tests.py
rupeshparab/techscan
ce2558602ddad31873d7129f25b1cc61895b9939
[ "MIT" ]
1
2019-11-01T11:38:54.000Z
2019-11-01T11:38:54.000Z
import logging from opbeat.handlers.logging import OpbeatHandler from opbeat.utils.stacks import iter_stack_frames from tests.helpers import get_tempstoreclient from tests.utils.compat import TestCase class LoggingIntegrationTest(TestCase): def setUp(self): self.client = get_tempstoreclient(include_paths=['tests', 'opbeat']) self.handler = OpbeatHandler(self.client) self.logger = logging.getLogger(__name__) self.logger.handlers = [] self.logger.addHandler(self.handler) def test_logger_basic(self): self.logger.error('This is a test error') self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertEquals(event['logger'], __name__) self.assertEquals(event['level'], "error") self.assertEquals(event['message'], 'This is a test error') self.assertFalse('stacktrace' in event) self.assertFalse('exception' in event) self.assertTrue('param_message' in event) msg = event['param_message'] self.assertEquals(msg['message'], 'This is a test error') self.assertEquals(msg['params'], ()) def test_logger_warning(self): self.logger.warning('This is a test warning') self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertEquals(event['logger'], __name__) self.assertEquals(event['level'], "warning") self.assertFalse('stacktrace' in event) self.assertFalse('exception' in event) self.assertTrue('param_message' in event) msg = event['param_message'] self.assertEquals(msg['message'], 'This is a test warning') self.assertEquals(msg['params'], ()) def test_logger_extra_data(self): self.logger.info('This is a test info with a url', extra=dict( data=dict( url='http://example.com', ), )) self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertEquals(event['extra']['url'], 'http://example.com') self.assertFalse('stacktrace' in event) self.assertFalse('exception' in event) self.assertTrue('param_message' in event) msg = event['param_message'] self.assertEquals(msg['message'], 'This is a test info with a url') self.assertEquals(msg['params'], ()) def test_logger_exc_info(self): try: raise ValueError('This is a test ValueError') except ValueError: self.logger.info('This is a test info with an exception', exc_info=True) self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) # self.assertEquals(event['message'], 'This is a test info with an exception') self.assertTrue('stacktrace' in event) self.assertTrue('exception' in event) exc = event['exception'] self.assertEquals(exc['type'], 'ValueError') self.assertEquals(exc['value'], 'This is a test ValueError') self.assertTrue('param_message' in event) msg = event['param_message'] self.assertEquals(msg['message'], 'This is a test info with an exception') self.assertEquals(msg['params'], ()) def test_message_params(self): self.logger.info('This is a test of %s', 'args') self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) # self.assertEquals(event['message'], 'This is a test of args') # print event.keys() self.assertFalse('stacktrace' in event) self.assertFalse('exception' in event) self.assertTrue('param_message' in event) msg = event['param_message'] self.assertEquals(msg['message'], 'This is a test of %s') self.assertEquals(msg['params'], ('args',)) def test_record_stack(self): self.logger.info('This is a test of stacks', extra={'stack': True}) self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertTrue('stacktrace' in event) frames = event['stacktrace']['frames'] self.assertNotEquals(len(frames), 1) frame = frames[0] self.assertEquals(frame['module'], __name__) self.assertFalse('exception' in event) self.assertTrue('param_message' in event) msg = event['param_message'] self.assertEquals(msg['message'], 'This is a test of stacks') self.assertEquals(msg['params'], ()) self.assertEquals(event['culprit'], 'tests.handlers.logging.logging_tests.test_record_stack') self.assertEquals(event['message'], 'This is a test of stacks') def test_no_record_stack(self): self.logger.info('This is a test of no stacks', extra={'stack': False}) self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertEquals(event.get('culprit'), None) self.assertEquals(event['message'], 'This is a test of no stacks') self.assertFalse('stacktrace' in event) self.assertFalse('exception' in event) self.assertTrue('param_message' in event) msg = event['param_message'] self.assertEquals(msg['message'], 'This is a test of no stacks') self.assertEquals(msg['params'], ()) def test_explicit_stack(self): self.logger.info('This is a test of stacks', extra={'stack': iter_stack_frames()}) self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertTrue('culprit' in event, event) self.assertEquals(event['culprit'], 'tests.handlers.logging.logging_tests.test_explicit_stack') self.assertTrue('message' in event, event) self.assertEquals(event['message'], 'This is a test of stacks') self.assertFalse('exception' in event) self.assertTrue('param_message' in event) msg = event['param_message'] self.assertEquals(msg['message'], 'This is a test of stacks') self.assertEquals(msg['params'], ()) self.assertTrue('stacktrace' in event) def test_extra_culprit(self): self.logger.info('This is a test of stacks', extra={'culprit': 'foo.bar'}) self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertEquals(event['culprit'], 'foo.bar') def test_logger_exception(self): try: raise ValueError('This is a test ValueError') except ValueError: self.logger.exception('This is a test with an exception') self.assertEquals(len(self.client.events), 1) event = self.client.events.pop(0) self.assertEquals(event['message'], 'This is a test with an exception') self.assertTrue('stacktrace' in event) self.assertTrue('exception' in event) exc = event['exception'] self.assertEquals(exc['type'], 'ValueError') self.assertEquals(exc['value'], 'This is a test ValueError') self.assertTrue('param_message' in event) msg = event['param_message'] self.assertEquals(msg['message'], 'This is a test with an exception') self.assertEquals(msg['params'], ()) class LoggingHandlerTest(TestCase): def test_client_arg(self): client = get_tempstoreclient(include_paths=['tests']) handler = OpbeatHandler(client) self.assertEquals(handler.client, client) def test_client_kwarg(self): client = get_tempstoreclient(include_paths=['tests']) handler = OpbeatHandler(client=client) self.assertEquals(handler.client, client) def test_invalid_first_arg_type(self): self.assertRaises(ValueError, OpbeatHandler, object)
43.569832
103
0.647391
7,591
0.97333
0
0
0
0
0
0
2,070
0.265419
7c7e4ec9d240f0bbb6bcb11b797135aad6a43254
1,342
py
Python
amnesia/modules/mime/model.py
silenius/amnesia
ba5e3ac79a89da599c22206ad1fd17541855f74c
[ "BSD-2-Clause" ]
4
2015-05-08T10:57:56.000Z
2021-05-17T04:32:11.000Z
amnesia/modules/mime/model.py
silenius/amnesia
ba5e3ac79a89da599c22206ad1fd17541855f74c
[ "BSD-2-Clause" ]
6
2019-12-26T16:43:41.000Z
2022-02-28T11:07:54.000Z
amnesia/modules/mime/model.py
silenius/amnesia
ba5e3ac79a89da599c22206ad1fd17541855f74c
[ "BSD-2-Clause" ]
1
2019-09-23T14:08:11.000Z
2019-09-23T14:08:11.000Z
# -*- coding: utf-8 -*- # pylint: disable=E1101 from sqlalchemy import sql from sqlalchemy import orm from sqlalchemy.orm.exc import NoResultFound from .. import Base # http://www.iana.org/assignments/media-types/media-types.xhtml class MimeMajor(Base): """Mime major""" def __init__(self, name): super().__init__() self.name = name class Mime(Base): def __init__(self, name, template, major): super().__init__() self.name = name self.template = template self.major = major @property def full(self): return '{0}/{1}'.format(self.major.name, self.name) @staticmethod def q_major_minor(dbsession, major, minor): cond = sql.and_( MimeMajor.name == major, Mime.name == minor ) result = dbsession.execute( sql.select(Mime).join(Mime.major).options( orm.contains_eager(Mime.major) ).filter(cond) ).scalar_one_or_none() return result ########### # Filters # ########### @classmethod def filter_mime(cls, value): (major, minor) = value.split('/') cond = sql.and_() cond.append(MimeMajor.name == major) if minor and minor != '*': cond.append(Mime.name == minor) return cond
21.645161
63
0.568554
1,100
0.819672
0
0
730
0.543964
0
0
173
0.128912
7c7e5ef5e8a7277261b9729c9f251391fd2d29dc
1,415
py
Python
apps/goods/views_base.py
sunwei19910119/DjangoShop
188102dc8ef9f4751f4eeeb7574e95c8cc270484
[ "MIT" ]
3
2018-08-22T02:41:55.000Z
2022-03-03T08:49:38.000Z
apps/goods/views_base.py
sunwei19910119/DjangoShop
188102dc8ef9f4751f4eeeb7574e95c8cc270484
[ "MIT" ]
null
null
null
apps/goods/views_base.py
sunwei19910119/DjangoShop
188102dc8ef9f4751f4eeeb7574e95c8cc270484
[ "MIT" ]
1
2019-10-23T12:24:08.000Z
2019-10-23T12:24:08.000Z
# encoding: utf-8 from goods.models import Goods from django.views.generic.base import View class GoodsListView(View): def get(self, request): """ 通过django的view实现商品列表页 """ json_list = [] goods = Goods.objects.all()[:10] # for good in goods: # json_dict = {} # json_dict["name"] = good.name # json_dict["category"] = good.category.name # json_dict["market_price"] = good.market_price # json_dict["add_time"] = good.add_time # json_list.append(json_dict) # from django.http import HttpResponse # import json # return HttpResponse(json.dumps(json_list),content_type="application/json") from django.forms.models import model_to_dict for good in goods: json_dict = model_to_dict(good) json_list.append(json_dict) import json from django.core import serializers json_data = serializers.serialize('json', goods) json_data = json.loads(json_data) from django.http import HttpResponse, JsonResponse # jsonResponse做的工作也就是加上了dumps和content_type # return HttpResponse(json.dumps(json_data), content_type="application/json") # 注释掉loads,下面语句正常 # return HttpResponse(json_data, content_type="application/json") return JsonResponse(json_data, safe=False)
32.159091
85
0.633922
1,379
0.933649
0
0
0
0
0
0
707
0.478673
7c7ea1a87be56599bff87dd5b87938ba5b672c0b
14,385
py
Python
launcher/src/main/scripts/bin/launcher.py
iyersathya/airlift
27e981a50cee655ff4e1e13801ba5a55991f93ce
[ "Apache-2.0" ]
null
null
null
launcher/src/main/scripts/bin/launcher.py
iyersathya/airlift
27e981a50cee655ff4e1e13801ba5a55991f93ce
[ "Apache-2.0" ]
35
2019-09-27T23:27:54.000Z
2021-10-06T14:57:28.000Z
launcher/src/main/scripts/bin/launcher.py
iyersathya/airlift
27e981a50cee655ff4e1e13801ba5a55991f93ce
[ "Apache-2.0" ]
21
2019-09-21T06:13:58.000Z
2021-08-10T20:05:09.000Z
#!/usr/bin/env python import errno import os import platform import sys import traceback from fcntl import flock, LOCK_EX, LOCK_NB from optparse import OptionParser from os import O_RDWR, O_CREAT, O_WRONLY, O_APPEND from os.path import basename, dirname, exists, realpath from os.path import join as pathjoin from signal import SIGTERM, SIGKILL from stat import S_ISLNK from time import sleep COMMANDS = ['run', 'start', 'stop', 'restart', 'kill', 'status'] LSB_NOT_RUNNING = 3 LSB_STATUS_UNKNOWN = 4 def find_install_path(f): """Find canonical parent of bin/launcher.py""" if basename(f) != 'launcher.py': raise Exception("Expected file '%s' to be 'launcher.py' not '%s'" % (f, basename(f))) p = realpath(dirname(f)) if basename(p) != 'bin': raise Exception("Expected file '%s' directory to be 'bin' not '%s" % (f, basename(p))) return dirname(p) def makedirs(p): """Create directory and all intermediate ones""" try: os.makedirs(p) except OSError as e: if e.errno != errno.EEXIST: raise def load_properties(f): """Load key/value pairs from a file""" properties = {} for line in load_lines(f): k, v = line.split('=', 1) properties[k.strip()] = v.strip() return properties def load_lines(f): """Load lines from a file, ignoring blank or comment lines""" lines = [] for line in open(f, 'r').readlines(): line = line.strip() if len(line) > 0 and not line.startswith('#'): lines.append(line) return lines def try_lock(f): """Try to open an exclusive lock (inheritable) on a file""" try: flock(f, LOCK_EX | LOCK_NB) return True except (IOError, OSError): # IOError in Python 2, OSError in Python 3. return False def open_read_write(f, mode): """Open file in read/write mode (without truncating it)""" return os.fdopen(os.open(f, O_RDWR | O_CREAT, mode), 'r+') class Process: def __init__(self, path): makedirs(dirname(path)) self.path = path self.pid_file = open_read_write(path, 0o600) self.refresh() def refresh(self): self.locked = try_lock(self.pid_file) def clear_pid(self): assert self.locked, 'pid file not locked by us' self.pid_file.seek(0) self.pid_file.truncate() def write_pid(self, pid): self.clear_pid() self.pid_file.write(str(pid) + '\n') self.pid_file.flush() def alive(self): self.refresh() if self.locked: return False pid = self.read_pid() try: os.kill(pid, 0) return True except OSError as e: raise Exception('Signaling pid %s failed: %s' % (pid, e)) def read_pid(self): assert not self.locked, 'pid file is locked by us' self.pid_file.seek(0) line = self.pid_file.readline().strip() if len(line) == 0: raise Exception("Pid file '%s' is empty" % self.path) try: pid = int(line) except ValueError: raise Exception("Pid file '%s' contains garbage: %s" % (self.path, line)) if pid <= 0: raise Exception("Pid file '%s' contains an invalid pid: %s" % (self.path, pid)) return pid def redirect_stdin_to_devnull(): """Redirect stdin to /dev/null""" fd = os.open(os.devnull, O_RDWR) os.dup2(fd, sys.stdin.fileno()) os.close(fd) def open_append(f): """Open a raw file descriptor in append mode""" # noinspection PyTypeChecker return os.open(f, O_WRONLY | O_APPEND | O_CREAT, 0o644) def redirect_output(fd): """Redirect stdout and stderr to a file descriptor""" os.dup2(fd, sys.stdout.fileno()) os.dup2(fd, sys.stderr.fileno()) def symlink_exists(p): """Check if symlink exists and raise if another type of file exists""" try: st = os.lstat(p) if not S_ISLNK(st.st_mode): raise Exception('Path exists and is not a symlink: %s' % p) return True except OSError as e: if e.errno != errno.ENOENT: raise return False def create_symlink(source, target): """Create a symlink, removing the target first if it is a symlink""" if symlink_exists(target): os.remove(target) if exists(source): os.symlink(source, target) def create_app_symlinks(options): """ Symlink the 'etc' and 'plugin' directory into the data directory. This is needed to support programs that reference 'etc/xyz' from within their config files: log.levels-file=etc/log.properties """ if options.etc_dir != pathjoin(options.data_dir, 'etc'): create_symlink( options.etc_dir, pathjoin(options.data_dir, 'etc')) if options.install_path != options.data_dir: create_symlink( pathjoin(options.install_path, 'plugin'), pathjoin(options.data_dir, 'plugin')) def build_java_execution(options, daemon): if not exists(options.config_path): raise Exception('Config file is missing: %s' % options.config_path) if not exists(options.jvm_config): raise Exception('JVM config file is missing: %s' % options.jvm_config) if not exists(options.launcher_config): raise Exception('Launcher config file is missing: %s' % options.launcher_config) if options.log_levels_set and not exists(options.log_levels): raise Exception('Log levels file is missing: %s' % options.log_levels) properties = options.properties.copy() if exists(options.log_levels): properties['log.levels-file'] = options.log_levels if daemon: properties['log.output-file'] = options.server_log properties['log.enable-console'] = 'false' jvm_properties = load_lines(options.jvm_config) launcher_properties = load_properties(options.launcher_config) try: main_class = launcher_properties['main-class'] except KeyError: raise Exception("Launcher config is missing 'main-class' property") properties['config'] = options.config_path system_properties = ['-D%s=%s' % i for i in properties.items()] classpath = pathjoin(options.install_path, 'lib', '*') command = ['java', '-cp', classpath] command += jvm_properties + system_properties command += [main_class] command += options.arguments if options.verbose: print(command) print("") env = os.environ.copy() # set process name: https://github.com/electrum/procname process_name = launcher_properties.get('process-name', '') if len(process_name) > 0: system = platform.system() + '-' + platform.machine() shim = pathjoin(options.install_path, 'bin', 'procname', system, 'libprocname.so') if exists(shim): env['LD_PRELOAD'] = (env.get('LD_PRELOAD', '') + ':' + shim).strip() env['PROCNAME'] = process_name return command, env def run(process, options): if process.alive(): print('Already running as %s' % process.read_pid()) return create_app_symlinks(options) args, env = build_java_execution(options, False) makedirs(options.data_dir) os.chdir(options.data_dir) process.write_pid(os.getpid()) redirect_stdin_to_devnull() os.execvpe(args[0], args, env) def start(process, options): if process.alive(): print('Already running as %s' % process.read_pid()) return create_app_symlinks(options) args, env = build_java_execution(options, True) makedirs(dirname(options.launcher_log)) log = open_append(options.launcher_log) makedirs(options.data_dir) os.chdir(options.data_dir) pid = os.fork() if pid > 0: process.write_pid(pid) print('Started as %s' % pid) return if hasattr(os, "set_inheritable"): # See https://docs.python.org/3/library/os.html#inheritance-of-file-descriptors # Since Python 3.4 os.set_inheritable(process.pid_file.fileno(), True) os.setsid() redirect_stdin_to_devnull() redirect_output(log) os.close(log) os.execvpe(args[0], args, env) def terminate(process, signal, message): if not process.alive(): print('Not running') return pid = process.read_pid() while True: try: os.kill(pid, signal) except OSError as e: if e.errno != errno.ESRCH: raise Exception('Signaling pid %s failed: %s' % (pid, e)) if not process.alive(): process.clear_pid() break sleep(0.1) print('%s %s' % (message, pid)) def stop(process): terminate(process, SIGTERM, 'Stopped') def kill(process): terminate(process, SIGKILL, 'Killed') def status(process): if not process.alive(): print('Not running') sys.exit(LSB_NOT_RUNNING) print('Running as %s' % process.read_pid()) def handle_command(command, options): process = Process(options.pid_file) if command == 'run': run(process, options) elif command == 'start': start(process, options) elif command == 'stop': stop(process) elif command == 'restart': stop(process) start(process, options) elif command == 'kill': kill(process) elif command == 'status': status(process) else: raise AssertionError('Unhandled command: ' + command) def create_parser(): commands = 'Commands: ' + ', '.join(COMMANDS) parser = OptionParser(prog='launcher', usage='usage: %prog [options] command', description=commands) parser.add_option('-v', '--verbose', action='store_true', default=False, help='Run verbosely') parser.add_option('--etc-dir', metavar='DIR', help='Defaults to INSTALL_PATH/etc') parser.add_option('--launcher-config', metavar='FILE', help='Defaults to INSTALL_PATH/bin/launcher.properties') parser.add_option('--node-config', metavar='FILE', help='Defaults to ETC_DIR/node.properties') parser.add_option('--jvm-config', metavar='FILE', help='Defaults to ETC_DIR/jvm.config') parser.add_option('--config', metavar='FILE', help='Defaults to ETC_DIR/config.properties') parser.add_option('--log-levels-file', metavar='FILE', help='Defaults to ETC_DIR/log.properties') parser.add_option('--data-dir', metavar='DIR', help='Defaults to INSTALL_PATH') parser.add_option('--pid-file', metavar='FILE', help='Defaults to DATA_DIR/var/run/launcher.pid') parser.add_option('--arg', action='append', metavar='ARG', dest='arguments', help='Add a program argument of the Java application') parser.add_option('--launcher-log-file', metavar='FILE', help='Defaults to DATA_DIR/var/log/launcher.log (only in daemon mode)') parser.add_option('--server-log-file', metavar='FILE', help='Defaults to DATA_DIR/var/log/server.log (only in daemon mode)') parser.add_option('-D', action='append', metavar='NAME=VALUE', dest='properties', help='Set a Java system property') return parser def parse_properties(parser, args): properties = {} for arg in args: if '=' not in arg: parser.error('property is malformed: %s' % arg) key, value = [i.strip() for i in arg.split('=', 1)] if key == 'config': parser.error('cannot specify config using -D option (use --config)') if key == 'log.output-file': parser.error('cannot specify server log using -D option (use --server-log-file)') if key == 'log.levels-file': parser.error('cannot specify log levels using -D option (use --log-levels-file)') properties[key] = value return properties def print_options(options): if options.verbose: for i in sorted(vars(options)): print("%-15s = %s" % (i, getattr(options, i))) print("") class Options: pass def main(): parser = create_parser() (options, args) = parser.parse_args() if len(args) != 1: if len(args) == 0: parser.error('command name not specified') else: parser.error('too many arguments') command = args[0] if command not in COMMANDS: parser.error('unsupported command: %s' % command) try: install_path = find_install_path(sys.argv[0]) except Exception as e: print('ERROR: %s' % e) sys.exit(LSB_STATUS_UNKNOWN) o = Options() o.verbose = options.verbose o.install_path = install_path o.launcher_config = realpath(options.launcher_config or pathjoin(o.install_path, 'bin/launcher.properties')) o.etc_dir = realpath(options.etc_dir or pathjoin(o.install_path, 'etc')) o.node_config = realpath(options.node_config or pathjoin(o.etc_dir, 'node.properties')) o.jvm_config = realpath(options.jvm_config or pathjoin(o.etc_dir, 'jvm.config')) o.config_path = realpath(options.config or pathjoin(o.etc_dir, 'config.properties')) o.log_levels = realpath(options.log_levels_file or pathjoin(o.etc_dir, 'log.properties')) o.log_levels_set = bool(options.log_levels_file) if options.node_config and not exists(o.node_config): parser.error('Node config file is missing: %s' % o.node_config) node_properties = {} if exists(o.node_config): node_properties = load_properties(o.node_config) data_dir = node_properties.get('node.data-dir') o.data_dir = realpath(options.data_dir or data_dir or o.install_path) o.pid_file = realpath(options.pid_file or pathjoin(o.data_dir, 'var/run/launcher.pid')) o.launcher_log = realpath(options.launcher_log_file or pathjoin(o.data_dir, 'var/log/launcher.log')) o.server_log = realpath(options.server_log_file or pathjoin(o.data_dir, 'var/log/server.log')) o.properties = parse_properties(parser, options.properties or {}) for k, v in node_properties.items(): if k not in o.properties: o.properties[k] = v o.arguments = options.arguments or [] if o.verbose: print_options(o) try: handle_command(command, o) except SystemExit: raise except Exception as e: if o.verbose: traceback.print_exc() else: print('ERROR: %s' % e) sys.exit(LSB_STATUS_UNKNOWN) if __name__ == '__main__': main()
31.136364
135
0.639694
1,377
0.095725
0
0
0
0
0
0
3,542
0.246229
7c7f557e50cc992f1ad5414b88efb2c8bf4f59f5
1,213
py
Python
code/sim/test.py
vectorcrumb/Ballbot_IEE2913
5ab54825b2bfadae251e2c6bfaaa7f8fcdae77a0
[ "MIT" ]
null
null
null
code/sim/test.py
vectorcrumb/Ballbot_IEE2913
5ab54825b2bfadae251e2c6bfaaa7f8fcdae77a0
[ "MIT" ]
null
null
null
code/sim/test.py
vectorcrumb/Ballbot_IEE2913
5ab54825b2bfadae251e2c6bfaaa7f8fcdae77a0
[ "MIT" ]
null
null
null
from direct.showbase.ShowBase import ShowBase from direct.task import Task from direct.actor.Actor import Actor import numpy as np class MyApp(ShowBase): def __init__(self): ShowBase.__init__(self) # Load environment model self.scene = self.loader.loadModel("models/environment") # Reparent model to render self.scene.reparentTo(self.render) # Scale and position model self.scene.setScale(0.25, 0.25, 0.25) self.scene.setPos(-8, 42, 0) # Add spinCameraTask to task manager to execute self.taskMgr.add(self.spinCameraTask, "SpinCameraTask") # Load and transform panda actor self.pandaActor = Actor("models/panda-model", {"walk": "models/panda-walk4"}) self.pandaActor.setScale(0.005, 0.005, 0.005) self.pandaActor.reparentTo(self.render) # Loop animation self.pandaActor.loop("walk") def spinCameraTask(self, task): angleDegs = task.time * 6.0 angleRads = angleDegs * (np.pi / 180.0) self.camera.setPos(20*np.sin(angleRads), -20.0 * np.cos(angleRads), 3) self.camera.setHpr(angleDegs, 0, 0) return Task.cont app = MyApp() app.run()
32.783784
85
0.649629
1,056
0.870569
0
0
0
0
0
0
259
0.21352
7c80c3cc37ddb266e34cc1676cdc4a68cdabc9ff
32
py
Python
run_locally.py
nationalarchives/tdr-service-unavailable
fcb5930f57459b1e4e6d2d14244ebeecee2f6907
[ "MIT" ]
null
null
null
run_locally.py
nationalarchives/tdr-service-unavailable
fcb5930f57459b1e4e6d2d14244ebeecee2f6907
[ "MIT" ]
null
null
null
run_locally.py
nationalarchives/tdr-service-unavailable
fcb5930f57459b1e4e6d2d14244ebeecee2f6907
[ "MIT" ]
null
null
null
from app import app app.run()
8
20
0.6875
0
0
0
0
0
0
0
0
0
0
7c80d22f73704982f5f02b4193bf4d13e0699eda
5,914
py
Python
src/pandas_profiling/model/describe.py
briangrahamww/pandas-profiling
62f8e3fd81720d444041069191c4aacd03d79ad5
[ "MIT" ]
null
null
null
src/pandas_profiling/model/describe.py
briangrahamww/pandas-profiling
62f8e3fd81720d444041069191c4aacd03d79ad5
[ "MIT" ]
4
2021-11-01T15:17:07.000Z
2022-01-26T15:22:15.000Z
src/pandas_profiling/model/describe.py
briangrahamww/pandas-profiling
62f8e3fd81720d444041069191c4aacd03d79ad5
[ "MIT" ]
null
null
null
"""Organize the calculation of statistics for each series in this DataFrame.""" import warnings from datetime import datetime from typing import Optional import pandas as pd from tqdm.auto import tqdm from visions import VisionsTypeset from pandas_profiling.config import Settings from pandas_profiling.model.correlations import calculate_correlation from pandas_profiling.model.duplicates import get_duplicates from pandas_profiling.model.sample import Sample, get_sample from pandas_profiling.model.summarizer import BaseSummarizer from pandas_profiling.model.summary import ( get_messages, get_missing_diagrams, get_scatter_matrix, get_series_descriptions, get_table_stats, ) from pandas_profiling.version import __version__ def describe( config: Settings, df: pd.DataFrame, summarizer: BaseSummarizer, typeset: VisionsTypeset, sample: Optional[dict] = None, ) -> dict: """Calculate the statistics for each series in this DataFrame. Args: config: report Settings object df: DataFrame. sample: optional, dict with custom sample Returns: This function returns a dictionary containing: - table: overall statistics. - variables: descriptions per series. - correlations: correlation matrices. - missing: missing value diagrams. - messages: direct special attention to these patterns in your data. - package: package details. """ if df is None: raise ValueError("Can not describe a `lazy` ProfileReport without a DataFrame.") if not isinstance(df, pd.DataFrame): warnings.warn("df is not of type pandas.DataFrame") disable_progress_bar = not config.progress_bar date_start = datetime.utcnow() correlation_names = [ correlation_name for correlation_name in [ "pearson", "spearman", "kendall", "phi_k", "cramers", ] if config.correlations[correlation_name].calculate ] number_of_tasks = 8 + len(df.columns) + len(correlation_names) with tqdm( total=number_of_tasks, desc="Summarize dataset", disable=disable_progress_bar ) as pbar: series_description = get_series_descriptions( config, df, summarizer, typeset, pbar ) pbar.set_postfix_str("Get variable types") variables = { column: description["type"] for column, description in series_description.items() } supported_columns = [ column for column, type_name in variables.items() if type_name != "Unsupported" ] interval_columns = [ column for column, type_name in variables.items() if type_name == "Numeric" ] pbar.update() # Get correlations correlations = {} for correlation_name in correlation_names: pbar.set_postfix_str(f"Calculate {correlation_name} correlation") correlations[correlation_name] = calculate_correlation( config, df, correlation_name, series_description ) pbar.update() # make sure correlations is not None correlations = { key: value for key, value in correlations.items() if value is not None } # Scatter matrix pbar.set_postfix_str("Get scatter matrix") scatter_matrix = get_scatter_matrix(config, df, interval_columns) pbar.update() # Table statistics pbar.set_postfix_str("Get table statistics") table_stats = get_table_stats(config, df, series_description) pbar.update() # missing diagrams pbar.set_postfix_str("Get missing diagrams") missing = get_missing_diagrams(config, df, table_stats) pbar.update() # Sample pbar.set_postfix_str("Take sample") if sample is None: samples = get_sample(config, df) else: if "name" not in sample: sample["name"] = None if "caption" not in sample: sample["caption"] = None samples = [ Sample( id="custom", data=sample["data"], name=sample["name"], caption=sample["caption"], ) ] pbar.update() # Duplicates pbar.set_postfix_str("Locating duplicates") metrics, duplicates = get_duplicates(config, df, supported_columns) table_stats.update(metrics) pbar.update() # Messages pbar.set_postfix_str("Get messages/warnings") messages = get_messages(config, table_stats, series_description, correlations) pbar.update() pbar.set_postfix_str("Get reproduction details") package = { "pandas_profiling_version": __version__, "pandas_profiling_config": config.json(), } pbar.update() pbar.set_postfix_str("Completed") date_end = datetime.utcnow() analysis = { "title": config.title, "date_start": date_start, "date_end": date_end, "duration": date_end - date_start, } return { # Analysis metadata "analysis": analysis, # Overall dataset description "table": table_stats, # Per variable descriptions "variables": series_description, # Bivariate relations "scatter": scatter_matrix, # Correlation matrices "correlations": correlations, # Missing values "missing": missing, # Warnings "messages": messages, # Package "package": package, # Sample "sample": samples, # Duplicates "duplicates": duplicates, }
30.328205
88
0.615996
0
0
0
0
0
0
0
0
1,617
0.273419
7c813b2cc84c9caa5444e2c87441c4626db990da
1,114
py
Python
maxOfferNum.py
Ruanxingzhi/King-of-Pigeon
38d6191c93c2d485b2e5cf163f06b9f2a5dacbec
[ "MIT" ]
null
null
null
maxOfferNum.py
Ruanxingzhi/King-of-Pigeon
38d6191c93c2d485b2e5cf163f06b9f2a5dacbec
[ "MIT" ]
null
null
null
maxOfferNum.py
Ruanxingzhi/King-of-Pigeon
38d6191c93c2d485b2e5cf163f06b9f2a5dacbec
[ "MIT" ]
null
null
null
import operator class Std(object): def __init__(self): self.name = '' self.offerNum = 0 self.offers = [] stds = [] stdsDict = {} index = 0 def readStd(name,camper): global stds global stdsDict global index if name not in stdsDict: newStd = Std() newStd.name = name stds.append(newStd) stdsDict[name] = index index += 1 if camper not in stds[stdsDict[name]].offers: stds[stdsDict[name]].offers.append(camper) stds[stdsDict[name]].offerNum += 1 if __name__ == "__main__": campers = ['PKUxk','THUsz_ai','THUsz_cs','THUsz_data','USTC_cs'] for camper in campers: filename = camper + '.txt' with open('data/%s'%(filename), "r") as f: data = f.readlines() for std in data: readStd(std,camper) cmpfun = operator.attrgetter('offerNum','name') stds.sort(key = cmpfun,reverse = True) for std in stds: if std.name[-1] == '\n': std.name = std.name[:-1] print(f'{std.name} 拿了 {std.offerNum} 个 offer: {std.offers}')
26.52381
68
0.56553
116
0.103571
0
0
0
0
0
0
157
0.140179
7c81a099c1328ddb836ac7f6bc808bcec8ce85e6
5,525
py
Python
tabnine-vim/third_party/ycmd/third_party/python-future/setup.py
MrMonk3y/vimrc
950230fb3fd7991d1234c2ab516ec03245945677
[ "MIT" ]
2
2018-04-16T03:08:42.000Z
2021-01-06T10:21:49.000Z
tabnine-vim/third_party/ycmd/third_party/python-future/setup.py
MrMonk3y/vimrc
950230fb3fd7991d1234c2ab516ec03245945677
[ "MIT" ]
null
null
null
tabnine-vim/third_party/ycmd/third_party/python-future/setup.py
MrMonk3y/vimrc
950230fb3fd7991d1234c2ab516ec03245945677
[ "MIT" ]
null
null
null
#!/usr/bin/env python from __future__ import absolute_import, print_function import os import os.path import sys try: from setuptools import setup except ImportError: from distutils.core import setup if sys.argv[-1] == 'publish': os.system('python setup.py sdist upload') sys.exit() NAME = "future" PACKAGES = ["future", "future.builtins", "future.types", "future.standard_library", "future.backports", "future.backports.email", "future.backports.email.mime", "future.backports.html", "future.backports.http", "future.backports.test", "future.backports.urllib", "future.backports.xmlrpc", "future.moves", "future.moves.dbm", "future.moves.html", "future.moves.http", "future.moves.test", "future.moves.tkinter", "future.moves.urllib", "future.moves.xmlrpc", "future.tests", # for future.tests.base # "future.tests.test_email", "future.utils", "past", "past.builtins", "past.types", "past.utils", # "past.tests", "past.translation", "libfuturize", "libfuturize.fixes", "libpasteurize", "libpasteurize.fixes", ] # PEP 3108 stdlib moves: if sys.version_info[:2] < (3, 0): PACKAGES += [ "builtins", "configparser", "copyreg", "html", "http", "queue", "reprlib", "socketserver", "tkinter", "winreg", "xmlrpc", "_dummy_thread", "_markupbase", "_thread", ] PACKAGE_DATA = {'': [ 'README.rst', 'LICENSE.txt', 'futurize.py', 'pasteurize.py', 'discover_tests.py', 'check_rst.sh', 'TESTING.txt', ], 'tests': ['*.py'], } REQUIRES = [] TEST_REQUIRES = [] if sys.version_info[:2] == (2, 6): REQUIRES += ['importlib', 'argparse'] TEST_REQUIRES += ['unittest2'] import src.future VERSION = src.future.__version__ DESCRIPTION = "Clean single-source support for Python 3 and 2" LONG_DESC = src.future.__doc__ AUTHOR = "Ed Schofield" AUTHOR_EMAIL = "[email protected]" URL="https://python-future.org" LICENSE = "MIT" KEYWORDS = "future past python3 migration futurize backport six 2to3 modernize pasteurize 3to2" CLASSIFIERS = [ "Programming Language :: Python", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "License :: OSI Approved", "License :: OSI Approved :: MIT License", "Development Status :: 4 - Beta", "Intended Audience :: Developers", ] setup_kwds = {} # * Important * # We forcibly remove the build folder to avoid breaking the # user's Py3 installation if they run "python2 setup.py # build" and then "python3 setup.py install". try: # If the user happens to run: # python2 setup.py build # python3 setup.py install # then folders like "configparser" will be in build/lib. # If so, we CANNOT let the user install this, because # this may break his/her Python 3 install, depending on the folder order in # sys.path. (Running "import configparser" etc. may pick up our Py2 # substitute packages, instead of the intended system stdlib modules.) SYSTEM_MODULES = set([ '_dummy_thread', '_markupbase', '_thread', 'builtins', 'configparser', 'copyreg', 'html', 'http', 'queue', 'reprlib', 'socketserver', 'tkinter', 'winreg', 'xmlrpc' ]) if sys.version_info[0] >= 3: # Do any of the above folders exist in build/lib? files = os.listdir(os.path.join('build', 'lib')) if len(set(files) & set(SYSTEM_MODULES)) > 0: print('ERROR: Your build folder is in an inconsistent state for ' 'a Python 3.x install. Please remove it manually and run ' 'setup.py again.', file=sys.stderr) sys.exit(1) except OSError: pass setup(name=NAME, version=VERSION, author=AUTHOR, author_email=AUTHOR_EMAIL, url=URL, description=DESCRIPTION, long_description=LONG_DESC, license=LICENSE, keywords=KEYWORDS, entry_points={ 'console_scripts': [ 'futurize = libfuturize.main:main', 'pasteurize = libpasteurize.main:main' ] }, package_dir={'': 'src'}, packages=PACKAGES, package_data=PACKAGE_DATA, include_package_data=True, install_requires=REQUIRES, classifiers=CLASSIFIERS, test_suite = "discover_tests", tests_require=TEST_REQUIRES, **setup_kwds )
29.864865
95
0.523439
0
0
0
0
0
0
0
0
2,589
0.468597
7c81cc51df1ab53c03a469cdc7c5c3c8cd7e2980
508
py
Python
url_shortener/src/__init__.py
Andrelpoj/hire.me
79428e2094a6b56e762a7f958e1b75f395f59cef
[ "Apache-2.0" ]
null
null
null
url_shortener/src/__init__.py
Andrelpoj/hire.me
79428e2094a6b56e762a7f958e1b75f395f59cef
[ "Apache-2.0" ]
null
null
null
url_shortener/src/__init__.py
Andrelpoj/hire.me
79428e2094a6b56e762a7f958e1b75f395f59cef
[ "Apache-2.0" ]
null
null
null
from flask import Flask from .extensions import db from .routes import short from . import config def create_app(): """ Creates Flask App, connect to Database and register Blueprint of routes""" app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI'] = config.DATABASE_CONNECTION_URI app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False app.app_context().push() db.init_app(app) db.create_all() app.register_blueprint(short) return app
28.222222
83
0.690945
0
0
0
0
0
0
0
0
135
0.265748
7c82276d6def1d1d6f137aa1788b787b2da8110f
3,009
py
Python
python-百度翻译调用/Baidu_translate/com/translate/baidu/stackoverflow_question_handler.py
wangchuanli001/Project-experience
b563c5c3afc07c913c2e1fd25dff41c70533f8de
[ "Apache-2.0" ]
12
2019-12-07T01:44:55.000Z
2022-01-27T14:13:30.000Z
python-百度翻译调用/Baidu_translate/com/translate/baidu/stackoverflow_question_handler.py
hujiese/Project-experience
b563c5c3afc07c913c2e1fd25dff41c70533f8de
[ "Apache-2.0" ]
23
2020-05-23T03:56:33.000Z
2022-02-28T07:54:45.000Z
python-百度翻译调用/Baidu_translate/com/translate/baidu/stackoverflow_question_handler.py
hujiese/Project-experience
b563c5c3afc07c913c2e1fd25dff41c70533f8de
[ "Apache-2.0" ]
7
2019-12-20T04:48:56.000Z
2021-11-19T02:23:45.000Z
import requests from bs4 import BeautifulSoup import urllib.request import os import random import time def html(url): user_agents = [ 'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11', 'Opera/9.25 (Windows NT 5.1; U; en)', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)', 'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12', 'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9', "Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.7 (KHTML, like Gecko) Ubuntu/11.04 Chromium/16.0.912.77 Chrome/16.0.912.77 Safari/535.7", "Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0 "] user_agent = random.choice(user_agents) headers = { 'User-Agent': user_agent, 'Accept-Encoding': 'gzip'} req = requests.get(url=url, headers=headers) html_doc = req.text soup = BeautifulSoup(html_doc, "html.parser") times = soup.select("time") views = soup.select("p.label-key > b") active_str = str(views[2]) active = active_str[active_str.find("title=\"") + 7:active_str.find("Z")] answers = soup.select("#answers-header > div > h2 >span") question_content = soup.select("div.post-text") tags = soup.select("#question > div.post-layout > div.postcell.post-layout--right > " "div.post-taglist.grid.gs4.gsy.fd-column > div >a") title = soup.select("h1 >a") tags_str = "" item = [] for tag in tags: tags_str += tag.get_text() + "," answer_contetnts = [] for i in range(1, len(question_content)): answer_contetnts.append(question_content[i]) for i in range(len(times)): if len(times[i].get_text()) > 1: asked_time = times[i].get("datetime").replace("T", " ") item.append(title[ 0].get_text()) # title views answersnum asked_time tag_str active_time quest_content_ text answer_content_list item.append(views[1].get_text()) item.append(answers[0].get_text()) item.append(asked_time) item.append(tags_str) item.append(active) item.append(question_content[0]) item.append(answer_contetnts) print(item) # updatetosql(item) def updatetosql(item): ansers_text = "[split]".join(item[7]) updatesql = "UPDATE `t_stackoverflow_question` " \ "SET `tags`='%s', `views`='%s', `answers_num`='%s', `asked_time`='%s', `last_active_time`='%s', `question_content`='%s', `answers_contetnt`='%s' " \ "WHERE (`question_id`='%s') " \ % (item[4], item[1], item[2], item[3], item[5], item[6], ansers_text, item[0],) pass if __name__ == '__main__': html("https://stackoverflow.com/questions/50119673/nginx-fast-cgi-cache-on-error-page-404")
42.985714
164
0.623463
0
0
0
0
0
0
0
0
1,400
0.465271
7c82c0f597ec23a15334ec51934c9484615b1b1f
2,541
py
Python
Research/data_loader.py
ALEXKIRNAS/Kaggle-C-CORE-Iceberg-Classifier-Challenge
d8b06969c9393cfce6d9ac96b58c9d365ff4369d
[ "MIT" ]
null
null
null
Research/data_loader.py
ALEXKIRNAS/Kaggle-C-CORE-Iceberg-Classifier-Challenge
d8b06969c9393cfce6d9ac96b58c9d365ff4369d
[ "MIT" ]
null
null
null
Research/data_loader.py
ALEXKIRNAS/Kaggle-C-CORE-Iceberg-Classifier-Challenge
d8b06969c9393cfce6d9ac96b58c9d365ff4369d
[ "MIT" ]
null
null
null
import os import numpy as np import pandas as pd from keras.utils import to_categorical from sklearn.model_selection import KFold, train_test_split def load_data(path): train = pd.read_json(os.path.join(path, "./train.json")) test = pd.read_json(os.path.join(path, "./test.json")) return (train, test) def preprocess(df, means=(-22.159262, -24.953745, 40.021883465782651), stds=(5.33146, 4.5463958, 4.0815391476694414)): X_band_1 = np.array([np.array(band).astype(np.float32).reshape(75, 75) for band in df["band_1"]]) X_band_2 = np.array([np.array(band).astype(np.float32).reshape(75, 75) for band in df["band_2"]]) angl = df['inc_angle'].map(lambda x: np.cos(x * np.pi / 180) if x != 'na' else means[3]) angl = np.array([np.full(shape=(75, 75), fill_value=angel).astype(np.float32) for angel in angl]) X_band_1 = (X_band_1 - means[0]) / stds[0] X_band_2 = (X_band_2 - means[1]) / stds[1] angl = (angl - means[2]) / stds[2] images = np.concatenate([X_band_1[:, :, :, np.newaxis], X_band_2[:, :, :, np.newaxis], angl[:, :, :, np.newaxis]], axis=-1) return images def prepare_data_cv(path): train, test = load_data(path) X_train, y_train = (preprocess(train), to_categorical(train['is_iceberg'].as_matrix().reshape(-1, 1))) kfold_data = [] kf = KFold(n_splits=5, shuffle=True, random_state=0xCAFFE) for train_indices, val_indices in kf.split(y_train): X_train_cv = X_train[train_indices] y_train_cv = y_train[train_indices] X_val = X_train[val_indices] y_val = y_train[val_indices] kfold_data.append((X_train_cv, y_train_cv, X_val, y_val)) X_test = preprocess(test) return (kfold_data, X_test) def prepare_data(path): train, test = load_data(path) X_train, y_train = (preprocess(train), to_categorical(train['is_iceberg'].as_matrix().reshape(-1, 1))) X_train_cv, X_valid, y_train_cv, y_valid = train_test_split(X_train, y_train, random_state=0xCAFFE, train_size=0.8) X_test = preprocess(test) return ([(X_train_cv, y_train_cv, X_valid, y_valid)], X_test)
34.337838
92
0.562377
0
0
0
0
0
0
0
0
82
0.032271
7c82fafc5019f5e066e5d9af9ec1a1742645a993
27,180
py
Python
polyaxon_cli/cli/experiment.py
tiagopms/polyaxon-cli
eb13e3b8389ccf069a421a4dabc87aaa506ab61c
[ "MIT" ]
null
null
null
polyaxon_cli/cli/experiment.py
tiagopms/polyaxon-cli
eb13e3b8389ccf069a421a4dabc87aaa506ab61c
[ "MIT" ]
null
null
null
polyaxon_cli/cli/experiment.py
tiagopms/polyaxon-cli
eb13e3b8389ccf069a421a4dabc87aaa506ab61c
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function import sys import click import rhea from polyaxon_cli.cli.getters.experiment import ( get_experiment_job_or_local, get_project_experiment_or_local ) from polyaxon_cli.cli.upload import upload from polyaxon_cli.client import PolyaxonClient from polyaxon_cli.client.exceptions import PolyaxonHTTPError, PolyaxonShouldExitError from polyaxon_cli.logger import clean_outputs from polyaxon_cli.managers.experiment import ExperimentManager from polyaxon_cli.managers.experiment_job import ExperimentJobManager from polyaxon_cli.utils import cache from polyaxon_cli.utils.formatting import ( Printer, dict_tabulate, get_meta_response, get_resources, list_dicts_to_tabulate ) from polyaxon_cli.utils.log_handler import get_logs_handler from polyaxon_cli.utils.validation import validate_tags from polyaxon_client.exceptions import PolyaxonClientException def get_experiment_details(experiment): # pylint:disable=redefined-outer-name if experiment.description: Printer.print_header("Experiment description:") click.echo('{}\n'.format(experiment.description)) if experiment.resources: get_resources(experiment.resources.to_dict(), header="Experiment resources:") if experiment.declarations: Printer.print_header("Experiment declarations:") dict_tabulate(experiment.declarations) if experiment.last_metric: Printer.print_header("Experiment last metrics:") dict_tabulate(experiment.last_metric) response = experiment.to_light_dict( humanize_values=True, exclude_attrs=[ 'uuid', 'config', 'project', 'experiments', 'description', 'declarations', 'last_metric', 'resources', 'jobs', 'run_env' ]) Printer.print_header("Experiment info:") dict_tabulate(Printer.add_status_color(response)) @click.group() @click.option('--project', '-p', type=str, help="The project name, e.g. 'mnist' or 'adam/mnist'.") @click.option('--experiment', '-xp', type=int, help="The experiment id number.") @click.pass_context @clean_outputs def experiment(ctx, project, experiment): # pylint:disable=redefined-outer-name """Commands for experiments.""" ctx.obj = ctx.obj or {} ctx.obj['project'] = project ctx.obj['experiment'] = experiment @experiment.command() @click.option('--job', '-j', type=int, help="The job id.") @click.pass_context @clean_outputs def get(ctx, job): """Get experiment or experiment job. Uses [Caching](/references/polyaxon-cli/#caching) Examples for getting an experiment: \b ```bash $ polyaxon experiment get # if experiment is cached ``` \b ```bash $ polyaxon experiment --experiment=1 get ``` \b ```bash $ polyaxon experiment -xp 1 --project=cats-vs-dogs get ``` \b ```bash $ polyaxon experiment -xp 1 -p alain/cats-vs-dogs get ``` Examples for getting an experiment job: \b ```bash $ polyaxon experiment get -j 1 # if experiment is cached ``` \b ```bash $ polyaxon experiment --experiment=1 get --job=10 ``` \b ```bash $ polyaxon experiment -xp 1 --project=cats-vs-dogs get -j 2 ``` \b ```bash $ polyaxon experiment -xp 1 -p alain/cats-vs-dogs get -j 2 ``` """ def get_experiment(): try: response = PolyaxonClient().experiment.get_experiment(user, project_name, _experiment) cache.cache(config_manager=ExperimentManager, response=response) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not load experiment `{}` info.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) get_experiment_details(response) def get_experiment_job(): try: response = PolyaxonClient().experiment_job.get_job(user, project_name, _experiment, _job) cache.cache(config_manager=ExperimentJobManager, response=response) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not get job `{}`.'.format(_job)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) if response.resources: get_resources(response.resources.to_dict(), header="Job resources:") response = Printer.add_status_color(response.to_light_dict( humanize_values=True, exclude_attrs=['uuid', 'definition', 'experiment', 'unique_name', 'resources'] )) Printer.print_header("Job info:") dict_tabulate(response) user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if job: _job = get_experiment_job_or_local(job) get_experiment_job() else: get_experiment() @experiment.command() @click.pass_context @clean_outputs def delete(ctx): """Delete experiment. Uses [Caching](/references/polyaxon-cli/#caching) Example: \b ```bash $ polyaxon experiment delete ``` """ user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if not click.confirm("Are sure you want to delete experiment `{}`".format(_experiment)): click.echo('Existing without deleting experiment.') sys.exit(1) try: response = PolyaxonClient().experiment.delete_experiment( user, project_name, _experiment) # Purge caching ExperimentManager.purge() except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not delete experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) if response.status_code == 204: Printer.print_success("Experiment `{}` was delete successfully".format(_experiment)) @experiment.command() @click.option('--name', type=str, help='Name of the experiment, must be unique within the project, could be none.') @click.option('--description', type=str, help='Description of the experiment.') @click.option('--tags', type=str, help='Tags of the experiment, comma separated values.') @click.pass_context @clean_outputs def update(ctx, name, description, tags): """Update experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \b ```bash $ polyaxon experiment -xp 2 update --description="new description for my experiments" ``` \b ```bash $ polyaxon experiment -xp 2 update --tags="foo, bar" --name="unique-name" ``` """ user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) update_dict = {} if name: update_dict['name'] = name if description: update_dict['description'] = description tags = validate_tags(tags) if tags: update_dict['tags'] = tags if not update_dict: Printer.print_warning('No argument was provided to update the experiment.') sys.exit(0) try: response = PolyaxonClient().experiment.update_experiment( user, project_name, _experiment, update_dict) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not update experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) Printer.print_success("Experiment updated.") get_experiment_details(response) @experiment.command() @click.option('--yes', '-y', is_flag=True, default=False, help="Automatic yes to prompts. " "Assume \"yes\" as answer to all prompts and run non-interactively.") @click.pass_context @clean_outputs def stop(ctx, yes): """Stop experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \b ```bash $ polyaxon experiment stop ``` \b ```bash $ polyaxon experiment -xp 2 stop ``` """ user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if not yes and not click.confirm("Are sure you want to stop " "experiment `{}`".format(_experiment)): click.echo('Existing without stopping experiment.') sys.exit(0) try: PolyaxonClient().experiment.stop(user, project_name, _experiment) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not stop experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) Printer.print_success("Experiment is being stopped.") @experiment.command() @click.option('--copy', '-c', is_flag=True, default=False, help="To copy the experiment before restarting.") @click.option('--file', '-f', multiple=True, type=click.Path(exists=True), help="The polyaxon files to update with.") @click.option('-u', is_flag=True, default=False, help="To upload the repo before restarting.") @click.pass_context @clean_outputs def restart(ctx, copy, file, u): # pylint:disable=redefined-builtin """Restart experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \b ```bash $ polyaxon experiment --experiment=1 restart ``` """ config = None update_code = None if file: config = rhea.read(file) # Check if we need to upload if u: ctx.invoke(upload, sync=False) update_code = True user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try: if copy: response = PolyaxonClient().experiment.copy( user, project_name, _experiment, config=config, update_code=update_code) Printer.print_success('Experiment was copied with id {}'.format(response.id)) else: response = PolyaxonClient().experiment.restart( user, project_name, _experiment, config=config, update_code=update_code) Printer.print_success('Experiment was restarted with id {}'.format(response.id)) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not restart experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) @experiment.command() @click.option('--file', '-f', multiple=True, type=click.Path(exists=True), help="The polyaxon files to update with.") @click.option('-u', is_flag=True, default=False, help="To upload the repo before resuming.") @click.pass_context @clean_outputs def resume(ctx, file, u): # pylint:disable=redefined-builtin """Resume experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \b ```bash $ polyaxon experiment --experiment=1 resume ``` """ config = None update_code = None if file: config = rhea.read(file) # Check if we need to upload if u: ctx.invoke(upload, sync=False) update_code = True user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try: response = PolyaxonClient().experiment.resume( user, project_name, _experiment, config=config, update_code=update_code) Printer.print_success('Experiment was resumed with id {}'.format(response.id)) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not resume experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) @experiment.command() @click.option('--page', type=int, help="To paginate through the list of jobs.") @click.pass_context @clean_outputs def jobs(ctx, page): """List jobs for experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \b ```bash $ polyaxon experiment --experiment=1 jobs ``` """ user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) page = page or 1 try: response = PolyaxonClient().experiment.list_jobs( user, project_name, _experiment, page=page) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not get jobs for experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) meta = get_meta_response(response) if meta: Printer.print_header('Jobs for experiment `{}`.'.format(_experiment)) Printer.print_header('Navigation:') dict_tabulate(meta) else: Printer.print_header('No jobs found for experiment `{}`.'.format(_experiment)) objects = [Printer.add_status_color(o.to_light_dict(humanize_values=True)) for o in response['results']] objects = list_dicts_to_tabulate(objects) if objects: Printer.print_header("Jobs:") objects.pop('experiment', None) dict_tabulate(objects, is_list_dict=True) @experiment.command() @click.option('--job', '-j', type=int, help="The job id.") @click.option('--page', type=int, help="To paginate through the list of statuses.") @click.pass_context @clean_outputs def statuses(ctx, job, page): """Get experiment or experiment job statuses. Uses [Caching](/references/polyaxon-cli/#caching) Examples getting experiment statuses: \b ```bash $ polyaxon experiment statuses ``` \b ```bash $ polyaxon experiment -xp 1 statuses ``` Examples getting experiment job statuses: \b ```bash $ polyaxon experiment statuses -j 3 ``` \b ```bash $ polyaxon experiment -xp 1 statuses --job 1 ``` """ def get_experiment_statuses(): try: response = PolyaxonClient().experiment.get_statuses( user, project_name, _experiment, page=page) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could get status for experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) meta = get_meta_response(response) if meta: Printer.print_header('Statuses for experiment `{}`.'.format(_experiment)) Printer.print_header('Navigation:') dict_tabulate(meta) else: Printer.print_header('No statuses found for experiment `{}`.'.format(_experiment)) objects = list_dicts_to_tabulate( [Printer.add_status_color(o.to_light_dict(humanize_values=True), status_key='status') for o in response['results']]) if objects: Printer.print_header("Statuses:") objects.pop('experiment', None) dict_tabulate(objects, is_list_dict=True) def get_experiment_job_statuses(): try: response = PolyaxonClient().experiment_job.get_statuses(user, project_name, _experiment, _job, page=page) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not get status for job `{}`.'.format(job)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) meta = get_meta_response(response) if meta: Printer.print_header('Statuses for Job `{}`.'.format(_job)) Printer.print_header('Navigation:') dict_tabulate(meta) else: Printer.print_header('No statuses found for job `{}`.'.format(_job)) objects = list_dicts_to_tabulate( [Printer.add_status_color(o.to_light_dict(humanize_values=True), status_key='status') for o in response['results']]) if objects: Printer.print_header("Statuses:") objects.pop('job', None) dict_tabulate(objects, is_list_dict=True) page = page or 1 user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if job: _job = get_experiment_job_or_local(job) get_experiment_job_statuses() else: get_experiment_statuses() @experiment.command() @click.option('--job', '-j', type=int, help="The job id.") @click.option('--gpu', '-g', is_flag=True, help="List experiment GPU resources.") @click.pass_context @clean_outputs def resources(ctx, job, gpu): """Get experiment or experiment job resources. Uses [Caching](/references/polyaxon-cli/#caching) Examples for getting experiment resources: \b ```bash $ polyaxon experiment -xp 19 resources ``` For GPU resources \b ```bash $ polyaxon experiment -xp 19 resources --gpu ``` Examples for getting experiment job resources: \b ```bash $ polyaxon experiment -xp 19 resources -j 1 ``` For GPU resources \b ```bash $ polyaxon experiment -xp 19 resources -j 1 --gpu ``` """ def get_experiment_resources(): try: message_handler = Printer.gpu_resources if gpu else Printer.resources PolyaxonClient().experiment.resources( user, project_name, _experiment, message_handler=message_handler) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not get resources for experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) def get_experiment_job_resources(): try: message_handler = Printer.gpu_resources if gpu else Printer.resources PolyaxonClient().experiment_job.resources(user, project_name, _experiment, _job, message_handler=message_handler) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not get resources for job `{}`.'.format(_job)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if job: _job = get_experiment_job_or_local(job) get_experiment_job_resources() else: get_experiment_resources() @experiment.command() @click.option('--job', '-j', type=int, help="The job id.") @click.option('--past', '-p', is_flag=True, help="Show the past logs.") @click.option('--follow', '-f', is_flag=True, default=False, help="Stream logs after showing past logs.") @click.option('--hide_time', is_flag=True, default=False, help="Whether or not to hide timestamps from the log stream.") @click.pass_context @clean_outputs def logs(ctx, job, past, follow, hide_time): """Get experiment or experiment job logs. Uses [Caching](/references/polyaxon-cli/#caching) Examples for getting experiment logs: \b ```bash $ polyaxon experiment logs ``` \b ```bash $ polyaxon experiment -xp 10 -p mnist logs ``` Examples for getting experiment job logs: \b ```bash $ polyaxon experiment -xp 1 -j 1 logs ``` """ def get_experiment_logs(): if past: try: response = PolyaxonClient().experiment.logs( user, project_name, _experiment, stream=False) get_logs_handler(handle_job_info=True, show_timestamp=not hide_time, stream=False)(response.content.decode().split('\n')) print() if not follow: return except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: if not follow: Printer.print_error( 'Could not get logs for experiment `{}`.'.format(_experiment)) Printer.print_error( 'Error message `{}`.'.format(e)) sys.exit(1) try: PolyaxonClient().experiment.logs( user, project_name, _experiment, message_handler=get_logs_handler(handle_job_info=True, show_timestamp=not hide_time)) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not get logs for experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) def get_experiment_job_logs(): if past: try: response = PolyaxonClient().experiment_job.logs( user, project_name, _experiment, _job, stream=False) get_logs_handler(handle_job_info=True, show_timestamp=not hide_time, stream=False)(response.content.decode().split('\n')) print() if not follow: return except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: if not follow: Printer.print_error( 'Could not get logs for experiment `{}`.'.format(_experiment)) Printer.print_error( 'Error message `{}`.'.format(e)) sys.exit(1) try: PolyaxonClient().experiment_job.logs( user, project_name, _experiment, _job, message_handler=get_logs_handler(handle_job_info=True, show_timestamp=not hide_time)) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not get logs for job `{}`.'.format(_job)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) if job: _job = get_experiment_job_or_local(job) get_experiment_job_logs() else: get_experiment_logs() @experiment.command() @click.pass_context @clean_outputs def outputs(ctx): """Download outputs for experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \b ```bash $ polyaxon experiment -xp 1 outputs ``` """ user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try: PolyaxonClient().experiment.download_outputs(user, project_name, _experiment) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not download outputs for experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) Printer.print_success('Files downloaded.') @experiment.command() @click.pass_context @clean_outputs def bookmark(ctx): """Bookmark experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \b ```bash $ polyaxon experiment bookmark ``` \b ```bash $ polyaxon experiment -xp 2 bookmark ``` """ user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try: PolyaxonClient().experiment.bookmark(user, project_name, _experiment) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not bookmark experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) Printer.print_success("Experiment is bookmarked.") @experiment.command() @click.pass_context @clean_outputs def unbookmark(ctx): """Unbookmark experiment. Uses [Caching](/references/polyaxon-cli/#caching) Examples: \b ```bash $ polyaxon experiment unbookmark ``` \b ```bash $ polyaxon experiment -xp 2 unbookmark ``` """ user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'), ctx.obj.get('experiment')) try: PolyaxonClient().experiment.unbookmark(user, project_name, _experiment) except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error('Could not unbookmark experiment `{}`.'.format(_experiment)) Printer.print_error('Error message `{}`.'.format(e)) sys.exit(1) Printer.print_success("Experiment is unbookmarked.")
33.84807
99
0.606659
0
0
0
0
25,205
0.927336
0
0
8,067
0.296799
7c839f4dc74ac86e89c284ecfbdaf987fd07d858
554
py
Python
Problem_09.py
Habbo3/Project-Euler
1a01d67f72b9cfb606d13df91af89159b588216e
[ "MIT" ]
null
null
null
Problem_09.py
Habbo3/Project-Euler
1a01d67f72b9cfb606d13df91af89159b588216e
[ "MIT" ]
null
null
null
Problem_09.py
Habbo3/Project-Euler
1a01d67f72b9cfb606d13df91af89159b588216e
[ "MIT" ]
null
null
null
""" A Pythagorean triplet is a set of three natural numbers, a < b < c, for which, a2 + b2 = c2 For example, 32 + 42 = 9 + 16 = 25 = 52. There exists exactly one Pythagorean triplet for which a + b + c = 1000. Find the product abc. """ solved = False for a in range(1, 1000): for b in range(1, 1000): for c in range(1, 1000): if a < b < c: if a + b + c == 1000: if a**2 + b**2 == c**2: solved = True break if solved: break if solved: break product = a*b*c print("The product of only triplet who exists is : ", product)
24.086957
78
0.601083
0
0
0
0
0
0
0
0
281
0.50722
7c83aa67c0a65ae58c0709d1dc148cd1d75e4a56
2,862
py
Python
fanscribed/apps/transcripts/tests/test_transcripts.py
fanscribed/fanscribed
89b14496459f81a152df38ed5098fba2b087a1d7
[ "MIT" ]
8
2015-01-05T07:04:02.000Z
2016-07-19T17:56:46.000Z
fanscribed/apps/transcripts/tests/test_transcripts.py
fanscribed/fanscribed
89b14496459f81a152df38ed5098fba2b087a1d7
[ "MIT" ]
32
2015-03-18T18:51:00.000Z
2021-06-10T20:37:33.000Z
fanscribed/apps/transcripts/tests/test_transcripts.py
fanscribed/fanscribed
89b14496459f81a152df38ed5098fba2b087a1d7
[ "MIT" ]
5
2015-02-10T21:15:32.000Z
2016-06-02T17:26:14.000Z
from decimal import Decimal import os from django.test import TestCase from unipath import Path from ....utils import refresh from ...media import tests from ..models import Transcript, TranscriptMedia MEDIA_TESTDATA_PATH = Path(tests.__file__).parent.child('testdata') RAW_MEDIA_PATH = MEDIA_TESTDATA_PATH.child('raw').child( 'NA-472-2012-12-23-Final-excerpt.mp3').absolute() class TranscriptsTestCase(TestCase): def test_transcript_starts_out_with_unknown_length(self): transcript = Transcript.objects.create(title='test') self.assertEqual(transcript.length, None) def test_setting_transcript_length_creates_fragments_and_stitches(self): t = Transcript.objects.create(title='test') t.set_length('3.33') f0, = t.fragments.all() self.assertEqual(f0.start, Decimal('0.00')) self.assertEqual(f0.end, Decimal('3.33')) self.assertEqual(t.stitches.count(), 0) t = Transcript.objects.create(title='test') t.set_length('7.77') f0, = t.fragments.all() self.assertEqual(f0.start, Decimal('0.00')) self.assertEqual(f0.end, Decimal('7.77')) self.assertEqual(t.stitches.count(), 0) t = Transcript.objects.create(title='test') t.set_length('17.77') f0, f1, f2 = t.fragments.all() self.assertEqual(f0.start, Decimal('0.00')) self.assertEqual(f0.end, Decimal('5.00')) self.assertEqual(f1.start, Decimal('5.00')) self.assertEqual(f1.end, Decimal('10.00')) self.assertEqual(f2.start, Decimal('10.00')) self.assertEqual(f2.end, Decimal('17.77')) s0, s1 = t.stitches.all() self.assertEqual(s0.left, f0) self.assertEqual(s0.right, f1) self.assertEqual(s0.state, 'notready') self.assertEqual(s1.left, f1) self.assertEqual(s1.right, f2) self.assertEqual(s1.state, 'notready') if os.environ.get('FAST_TEST') != '1': from django.core.files import File class SlowTranscriptsTestCase(TestCase): def test_transcript_with_processed_media_has_length(self): transcript = Transcript.objects.create( title='test transcript', ) raw_media = TranscriptMedia( transcript=transcript, is_processed=False, is_full_length=True, ) with open(RAW_MEDIA_PATH, 'rb') as f: raw_media.file.save('{transcript.id}_raw.mp3'.format(**locals()), File(f)) raw_media.save() # Process raw media. raw_media.create_processed_task() transcript = refresh(transcript) # Check length. expected_length = 5 * 60 # 5 minutes. self.assertAlmostEqual( transcript.length, expected_length, delta=0.2)
33.27907
90
0.628931
2,385
0.833333
0
0
0
0
0
0
285
0.099581
7c83fd89c702ba9d9dcb725c78535f9419ea8d70
2,771
py
Python
buildAncestryFeats.py
BurcinSayin/pf2
bcd362dc0a750b8ee59cd19ecff9cf5be4f34b19
[ "MIT" ]
null
null
null
buildAncestryFeats.py
BurcinSayin/pf2
bcd362dc0a750b8ee59cd19ecff9cf5be4f34b19
[ "MIT" ]
null
null
null
buildAncestryFeats.py
BurcinSayin/pf2
bcd362dc0a750b8ee59cd19ecff9cf5be4f34b19
[ "MIT" ]
null
null
null
from bs4 import BeautifulSoup import requests import json import datetime import codecs import re featHolder = {} featHolder['name'] = 'Pathfinder 2.0 Ancestry feat list' featHolder['date'] = datetime.date.today().strftime("%B %d, %Y") def get_details(link): res = requests.get(link) res.raise_for_status() soup = BeautifulSoup(res.text, 'lxml') feat = soup.find_all("div", {'class':'main'}) detailraw = soup.find("meta", {'name':'description'})['content'] #First we grab the content from the meta tag detailsplit = re.split('<(.*?)>', detailraw) #Now we split it into groups of strings seperated by < >, to pull out any links detail = ''.join(detailsplit[::2]) #Finally, we join every other group together (passing over the link groups) into one string #print(detail) return detail def get_feats(link): feats = [] res = requests.get(link) res.raise_for_status() soup = BeautifulSoup(res.text, 'lxml') table = soup.find(lambda tag: tag.name=='table' and tag.has_attr('id') and tag['id']=="ctl00_MainContent_TableElement") rows = table.findAll(lambda tag: tag.name=='tr') t = 0 for row in rows: t += 1 #print(row) #print("-----------------------------------") feat = {} entries = row.find_all(lambda tag: tag.name=='td') if entries is not None: if len(entries) > 0: name = entries[0].find("a").next_sibling.text #We do next_sibling here because the source puts PFS links first, which we want to skip over. link = entries[0].find("a").next_sibling.a['href'] #for entry in entries: # print(entry) # print("row---------------") level = entries[1].text traits = entries[2].text prereq = entries[3].text source = entries[4].text feat['name'] = name feat['level'] = level feat['traits'] = traits.split(", ") feat['link'] = "https://2e.aonprd.com/" +link feat['prereq'] = prereq feat['benefits'] = source details = get_details(feat['link']) feat['text'] = details feats.append(feat) #if t > 5: #break return feats listOfPages = codecs.open("ancestryFeats.csv", encoding='utf-8') for line in listOfPages: featMD = line.split(",") print("Getting feats for :", featMD[0],"This url:", featMD[2]) featHolder[featMD[1]] = get_feats(featMD[2].strip('\n')) json_data = json.dumps(featHolder, indent=4) #print(json_data) filename = "ancestry-feats-pf2.json" f = open(filename, "w") f.write(json_data) f.close
34.209877
155
0.572717
0
0
0
0
0
0
0
0
857
0.309275
7c84005ad03ff1fb7961f46195db1060fc63cb16
861
py
Python
Random_item_selector_module.py
Jahronimo/public_question_book_framework
812bd11b104de013e930536713b8134d046642d5
[ "MIT" ]
null
null
null
Random_item_selector_module.py
Jahronimo/public_question_book_framework
812bd11b104de013e930536713b8134d046642d5
[ "MIT" ]
null
null
null
Random_item_selector_module.py
Jahronimo/public_question_book_framework
812bd11b104de013e930536713b8134d046642d5
[ "MIT" ]
1
2020-03-07T10:53:30.000Z
2020-03-07T10:53:30.000Z
import random def Randomise(questions_lists): import random import secrets secure_random = secrets.SystemRandom()# creates a secure random object. group_of_items = questions_lists num_qustion_t_select = num_question_to_display list_of_random_items = secure_random.sample(group_of_items, num_qustion_t_select) # randomly selecting from strings within each question list for each_question in range (0, num_qustion_t_select): # I think this is where i need to add in some information but don't understand. #printing some kind of structure with numbers of question and space to answer. print (("Q."),(each_question + 1),((list_of_random_items[each_question]))) print (("A."),(each_question + 1),("_______________________")) print ("\n")
47.833333
86
0.682927
0
0
0
0
0
0
0
0
286
0.332172
7c8421979f69cbc7cf5cd9ec5a87a153ab3efc74
1,228
py
Python
python_scrape/test_functions.py
jose-marquez89/tech-job-landscape
0b509536e7ba22885f50c82da8cf990b65373090
[ "MIT" ]
null
null
null
python_scrape/test_functions.py
jose-marquez89/tech-job-landscape
0b509536e7ba22885f50c82da8cf990b65373090
[ "MIT" ]
null
null
null
python_scrape/test_functions.py
jose-marquez89/tech-job-landscape
0b509536e7ba22885f50c82da8cf990b65373090
[ "MIT" ]
null
null
null
import unittest import scrape class TestScrapeFunctions(unittest.TestCase): def test_build_url(self): url = scrape.build_url("indeed", "/jobs?q=Data+Scientist&l=Texas&start=10", join_next=True) expected = ("https://www.indeed.com/" "jobs?q=Data+Scientist&l=Texas&start=10") url2 = scrape.build_url("indeed", job="Data Scientist", state="Texas") expected2 = ("https://www.indeed.com/" "jobs?q=Data%20Scientist&l=Texas&start=0") self.assertEqual(url, expected) self.assertEqual(url2, expected2) def test_fetch_page(self): fpl = scrape.fetch_page_listings job_data = fpl("indeed", job="Data Scientist", state="Texas") self.assertNotEqual(len(job_data), 0) self.assertIsInstance(job_data, tuple) self.assertIsInstance(job_data[0][0], dict) self.assertIsInstance(job_data[1], str) job_data = fpl("indeed", next_page="/jobs?q=Data+Scientist" "&l=Texas&start=10") if __name__ == '__main__': unittest.main()
34.111111
78
0.556189
1,146
0.933225
0
0
0
0
0
0
303
0.246743
7c84e9b3f92ddbf93482eff72a312c6afff49d17
173
py
Python
Level1_Input_Output/10172.py
jaeheeLee17/BOJ_Algorithms
c14641693d7ef0f5bba0a6637166c7ceadb2a0be
[ "MIT" ]
null
null
null
Level1_Input_Output/10172.py
jaeheeLee17/BOJ_Algorithms
c14641693d7ef0f5bba0a6637166c7ceadb2a0be
[ "MIT" ]
null
null
null
Level1_Input_Output/10172.py
jaeheeLee17/BOJ_Algorithms
c14641693d7ef0f5bba0a6637166c7ceadb2a0be
[ "MIT" ]
null
null
null
def main(): print("|\_/|") print("|q p| /}") print("( 0 )\"\"\"\\") print("|\"^\"` |") print("||_/=\\\\__|") if __name__ == "__main__": main()
17.3
26
0.352601
0
0
0
0
0
0
0
0
72
0.416185
7c85f2097ce6518402e3aa24b38cc365cc5ffeaa
4,981
py
Python
Whats Cooking/KaggleCookingComparison.py
rupakc/Kaggle-Compendium
61634ba742f9a0239f2d1e45973c4bb477ac6306
[ "MIT" ]
17
2018-01-11T05:49:06.000Z
2021-08-22T16:50:10.000Z
Whats Cooking/KaggleCookingComparison.py
Tuanlase02874/Machine-Learning-Kaggle
c31651acd8f2407d8b60774e843a2527ce19b013
[ "MIT" ]
null
null
null
Whats Cooking/KaggleCookingComparison.py
Tuanlase02874/Machine-Learning-Kaggle
c31651acd8f2407d8b60774e843a2527ce19b013
[ "MIT" ]
8
2017-11-27T06:58:50.000Z
2021-08-22T16:50:13.000Z
# -*- coding: utf-8 -*- """ Created on Sat Dec 26 13:20:45 2015 Code for Kaggle What's Cooking Competition It uses the following classifiers with tf-idf,hashvectors and bag_of_words approach 1. Adaboost 2. Extratrees 3. Bagging 4. Random Forests @author: Rupak Chakraborty """ import numpy as np import time import json import ClassificationUtils from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.feature_extraction.text import HashingVectorizer from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import BaggingClassifier from sklearn import metrics # Create the feature extractors bag_of_words = CountVectorizer(stop_words='english') tfidf = TfidfVectorizer(stop_words='english') hashvec = HashingVectorizer(stop_words='english') # Create the Classifier objects adaboost = AdaBoostClassifier() randomforest = RandomForestClassifier() extratrees = ExtraTreesClassifier() bagging = BaggingClassifier() filepath = "train.json" f = open(filepath,"r") content = f.read() jsonData = json.loads(content) cuisine_set = set([]) ingredient_set = set([]) cuisine_map = {} cuisine_numerical_map = {} ingredient_numerical_map = {} ingredient_map = {} ingredient_list = list([]) c = 0 print "Size of the data set : ", len(jsonData) print "Starting Loading of Data Set...." start = time.time() for recipe in jsonData: if "cuisine" in recipe: s = "" if recipe["cuisine"] in cuisine_set: cuisine_map[recipe["cuisine"]] = cuisine_map[recipe["cuisine"]] + 1 else: cuisine_map[recipe["cuisine"]] = 1 cuisine_set.add(recipe["cuisine"]) for ingredient in recipe["ingredients"]: if ingredient in ingredient_set: ingredient_map[ingredient] = ingredient_map[ingredient] + 1 else: ingredient_map[ingredient] = 1 ingredient_set.add(ingredient) s = s + " " + ingredient ingredient_list.append(s) end = time.time() print "Time Taken to Load the Dataset : ",end-start for cuisine in cuisine_set: cuisine_numerical_map[cuisine] = c c = c+1 c = 0 for ingredient in ingredient_set: ingredient_numerical_map[ingredient] = c c = c+1 print "Starting Feature Extracting ......" start = time.time() train_labels = np.zeros(len(ingredient_list)) train_data_tfidf = tfidf.fit_transform(ingredient_list) train_data_hash = hashvec.fit_transform(ingredient_list) train_data_bag = bag_of_words.fit_transform(ingredient_list) c = 0 for recipe in jsonData: if "cuisine" in recipe: train_labels[c] = cuisine_numerical_map[recipe["cuisine"]] c = c+1 end = time.time() print "Time Taken to Train Extract Different Features : ", end-start test_labels = train_labels[1:30000] test_data_tfidf = tfidf.transform(ingredient_list[1:30000]) test_data_hash = hashvec.transform(ingredient_list[1:30000]) test_data_bag = bag_of_words.transform(ingredient_list[1:30000]) print "Starting Training of Models for Hash Vectorizer Feature....." start = time.time() adaboost.fit(train_data_bag,train_labels) randomforest.fit(train_data_bag,train_labels) extratrees.fit(train_data_bag,train_labels) bagging.fit(train_data_bag,train_labels) end=time.time() print "Time Taken to train all Ensemble Models : ", end-start print "Starting Prediction of Test Labels ...." start = time.time() ada_predict = adaboost.predict(test_data_bag) rf_predict = randomforest.predict(test_data_bag) extree_predict = extratrees.predict(test_data_bag) bagging_predict = bagging.predict(test_data_bag) end = time.time() print "Time Taken to Test the models : ", end-start print "Accuracy of AdaBoost Algorithm : ", metrics.accuracy_score(test_labels,ada_predict) print "Accuracy of Random Forests : ", metrics.accuracy_score(test_labels,rf_predict) print "Accuracy of Extra Trees : ", metrics.accuracy_score(test_labels,extree_predict) print "Accuracy of Bagging : ", metrics.accuracy_score(test_labels,bagging_predict) # Saving the tf-idf model and classifiers ClassificationUtils.save_classifier("ada_bag_cook.pickle",adaboost) ClassificationUtils.save_classifier("rf_bag_cook.pickle",randomforest) ClassificationUtils.save_classifier("extree_bag_cook.pickle",extratrees) ClassificationUtils.save_classifier("bagging_bag_cook.pickle",bagging) ClassificationUtils.save_classifier("bag_of_words.pickle",tfidf) def printIngredientDistribution(): print "----------- Distribution of the Recipe Ingredients ------------------" for key in ingredient_map.keys(): print key, " : " ,ingredient_map[key] def printCuisineDistribution(): print "----------- Distribution of the Cuisines ------------------" for key in cuisine_map.keys(): print key, " : " ,cuisine_map[key]
32.344156
90
0.739611
0
0
0
0
0
0
0
0
1,244
0.249749
7c85f5102089b2dbe1aa3c33bc6b5354992888f4
466
py
Python
pybook/ch10/DeckOfCards.py
YanhaoXu/python-learning
856687a71635a2ca67dab49d396c238f128e5ec0
[ "MIT" ]
2
2021-12-06T13:29:48.000Z
2022-01-20T11:39:45.000Z
pybook/ch10/DeckOfCards.py
YanhaoXu/python-learning
856687a71635a2ca67dab49d396c238f128e5ec0
[ "MIT" ]
null
null
null
pybook/ch10/DeckOfCards.py
YanhaoXu/python-learning
856687a71635a2ca67dab49d396c238f128e5ec0
[ "MIT" ]
null
null
null
import random # Create a deck of cards deck = [x for x in range(52)] # Create suits and ranks lists suits = ["Spades", "Hearts", "Diamonds", "Clubs"] ranks = ["Ace", "2", "3", "4", "5", "6", "7", "8", "9", "10", "Jack", "Queen", "King"] # Shuffle the cards random.shuffle(deck) # Display the first four cards for i in range(4): suit = suits[deck[i] // 13] rank = ranks[deck[i] % 13] print("Card number", deck[i], "is the", rank, "of", suit)
24.526316
61
0.575107
0
0
0
0
0
0
0
0
213
0.457082
7c8673116b02c8c1dd21b123ad5da8653dbefe4c
3,410
py
Python
nlpgnn/gnn/RGCNConv.py
ojipadeson/NLPGNN
7c43d2f0cb2b16c046c930037fd505c5c4f36db4
[ "MIT" ]
263
2020-05-19T10:40:26.000Z
2022-03-25T05:22:49.000Z
nlpgnn/gnn/RGCNConv.py
Kuan-Louis/NLPGNN
b9ecec2c6df1b3e40a54511366dcb6085cf90c34
[ "MIT" ]
7
2020-05-18T23:02:55.000Z
2021-04-29T18:27:43.000Z
nlpgnn/gnn/RGCNConv.py
Kuan-Louis/NLPGNN
b9ecec2c6df1b3e40a54511366dcb6085cf90c34
[ "MIT" ]
56
2020-05-19T05:59:36.000Z
2022-03-14T06:21:33.000Z
#! usr/bin/env python3 # -*- coding:utf-8 -*- """ @Author:Kaiyin Zhou Usage: node_embeddings = tf.random.normal(shape=(5, 3)) adjacency_lists = [ tf.constant([[0, 1], [2, 4], [2, 4]], dtype=tf.int32), tf.constant([[0, 1], [2, 4], [2, 4]], dtype=tf.int32) ] layer = RGraphConvolution(out_features=12) x = layer(GNNInput(node_embeddings, adjacency_lists)) """ import tensorflow as tf from nlpgnn.gnn.messagepassing import MessagePassing class RGraphConvolution(MessagePassing): def __init__(self, out_features, epsion=1e-7, aggr="sum", normalize=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', use_bias=True, **kwargs): super(RGraphConvolution, self).__init__(aggr, **kwargs) self.kernel_initializer = tf.keras.initializers.get(kernel_initializer) self.bias_initializer = tf.keras.initializers.get(bias_initializer) self.use_bias = use_bias self.normalize = normalize self.out_features = out_features self.epsion = epsion def build(self, input_shapes): node_embedding_shapes = input_shapes.node_embeddings adjacency_list_shapes = input_shapes.adjacency_lists num_edge_type = len(adjacency_list_shapes) in_features = node_embedding_shapes[-1] self._edge_type_weights = [] self._edge_type_bias = [] for i in range(num_edge_type): weight = self.add_weight( shape=(in_features, self.out_features), initializer=self.kernel_initializer, name='wt_{}'.format(i), ) self._edge_type_weights.append(weight) if self.use_bias: self.bias = self.add_weight( shape=(self.out_features), initializer=self.bias_initializer, name='b', ) else: self.bias = None self.weight_o = self.add_weight( shape=(in_features, self.out_features), initializer=self.kernel_initializer, name='wo', ) self.built = True def message_function(self, edge_source_states, edge_target_states, num_incoming_to_node_per_message, num_outing_to_node_per_message, edge_type_idx): """ :param edge_source_states: [M,H] :param edge_target_states: [M,H] :param num_incoming_to_node_per_message:[M] :param edge_type_idx: :param training: :return: """ weight_r = self._edge_type_weights[edge_type_idx] messages = tf.linalg.matmul(edge_source_states, weight_r) if self.normalize: messages = ( tf.expand_dims(1.0 / (tf.cast(num_incoming_to_node_per_message, tf.float32) + self.epsion), axis=-1) * messages ) return messages def call(self, inputs): aggr_out = self.propagate(inputs) # message_passing + update aggr_out += tf.linalg.matmul(inputs.node_embeddings, self.weight_o) if self.bias is not None: aggr_out += self.bias return aggr_out
35.894737
97
0.575367
2,913
0.854252
0
0
0
0
0
0
702
0.205865
7c872854a67dcbee173ef18681a5116e43865d52
53,677
py
Python
automl/google/cloud/automl_v1beta1/gapic/auto_ml_client.py
erikwebb/google-cloud-python
288a878e9a07239015c78a193eca1cc15e926127
[ "Apache-2.0" ]
1
2019-04-16T08:13:06.000Z
2019-04-16T08:13:06.000Z
automl/google/cloud/automl_v1beta1/gapic/auto_ml_client.py
erikwebb/google-cloud-python
288a878e9a07239015c78a193eca1cc15e926127
[ "Apache-2.0" ]
null
null
null
automl/google/cloud/automl_v1beta1/gapic/auto_ml_client.py
erikwebb/google-cloud-python
288a878e9a07239015c78a193eca1cc15e926127
[ "Apache-2.0" ]
1
2020-11-15T11:44:36.000Z
2020-11-15T11:44:36.000Z
# -*- coding: utf-8 -*- # # Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Accesses the google.cloud.automl.v1beta1 AutoMl API.""" import functools import pkg_resources import warnings from google.oauth2 import service_account import google.api_core.gapic_v1.client_info import google.api_core.gapic_v1.config import google.api_core.gapic_v1.method import google.api_core.grpc_helpers import google.api_core.operation import google.api_core.operations_v1 import google.api_core.page_iterator import google.api_core.path_template import grpc from google.cloud.automl_v1beta1.gapic import auto_ml_client_config from google.cloud.automl_v1beta1.gapic import enums from google.cloud.automl_v1beta1.gapic.transports import auto_ml_grpc_transport from google.cloud.automl_v1beta1.proto import data_items_pb2 from google.cloud.automl_v1beta1.proto import dataset_pb2 from google.cloud.automl_v1beta1.proto import io_pb2 from google.cloud.automl_v1beta1.proto import model_evaluation_pb2 from google.cloud.automl_v1beta1.proto import model_pb2 from google.cloud.automl_v1beta1.proto import operations_pb2 as proto_operations_pb2 from google.cloud.automl_v1beta1.proto import prediction_service_pb2 from google.cloud.automl_v1beta1.proto import prediction_service_pb2_grpc from google.cloud.automl_v1beta1.proto import service_pb2 from google.cloud.automl_v1beta1.proto import service_pb2_grpc from google.longrunning import operations_pb2 as longrunning_operations_pb2 from google.protobuf import empty_pb2 _GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-automl").version class AutoMlClient(object): """ AutoML Server API. The resource names are assigned by the server. The server never reuses names that it has created after the resources with those names are deleted. An ID of a resource is the last element of the item's resource name. For ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}``, then the id for the item is ``{dataset_id}``. """ SERVICE_ADDRESS = "automl.googleapis.com:443" """The default address of the service.""" # The name of the interface for this client. This is the key used to # find the method configuration in the client_config dictionary. _INTERFACE_NAME = "google.cloud.automl.v1beta1.AutoMl" @classmethod def from_service_account_file(cls, filename, *args, **kwargs): """Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: AutoMlClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_file(filename) kwargs["credentials"] = credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file @classmethod def location_path(cls, project, location): """Return a fully-qualified location string.""" return google.api_core.path_template.expand( "projects/{project}/locations/{location}", project=project, location=location, ) @classmethod def dataset_path(cls, project, location, dataset): """Return a fully-qualified dataset string.""" return google.api_core.path_template.expand( "projects/{project}/locations/{location}/datasets/{dataset}", project=project, location=location, dataset=dataset, ) @classmethod def model_path(cls, project, location, model): """Return a fully-qualified model string.""" return google.api_core.path_template.expand( "projects/{project}/locations/{location}/models/{model}", project=project, location=location, model=model, ) @classmethod def model_evaluation_path(cls, project, location, model, model_evaluation): """Return a fully-qualified model_evaluation string.""" return google.api_core.path_template.expand( "projects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}", project=project, location=location, model=model, model_evaluation=model_evaluation, ) def __init__( self, transport=None, channel=None, credentials=None, client_config=None, client_info=None, ): """Constructor. Args: transport (Union[~.AutoMlGrpcTransport, Callable[[~.Credentials, type], ~.AutoMlGrpcTransport]): A transport instance, responsible for actually making the API calls. The default transport uses the gRPC protocol. This argument may also be a callable which returns a transport instance. Callables will be sent the credentials as the first argument and the default transport class as the second argument. channel (grpc.Channel): DEPRECATED. A ``Channel`` instance through which to make calls. This argument is mutually exclusive with ``credentials``; providing both will raise an exception. credentials (google.auth.credentials.Credentials): The authorization credentials to attach to requests. These credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. This argument is mutually exclusive with providing a transport instance to ``transport``; doing so will raise an exception. client_config (dict): DEPRECATED. A dictionary of call options for each method. If not specified, the default configuration is used. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. """ # Raise deprecation warnings for things we want to go away. if client_config is not None: warnings.warn( "The `client_config` argument is deprecated.", PendingDeprecationWarning, stacklevel=2, ) else: client_config = auto_ml_client_config.config if channel: warnings.warn( "The `channel` argument is deprecated; use " "`transport` instead.", PendingDeprecationWarning, stacklevel=2, ) # Instantiate the transport. # The transport is responsible for handling serialization and # deserialization and actually sending data to the service. if transport: if callable(transport): self.transport = transport( credentials=credentials, default_class=auto_ml_grpc_transport.AutoMlGrpcTransport, ) else: if credentials: raise ValueError( "Received both a transport instance and " "credentials; these are mutually exclusive." ) self.transport = transport else: self.transport = auto_ml_grpc_transport.AutoMlGrpcTransport( address=self.SERVICE_ADDRESS, channel=channel, credentials=credentials ) if client_info is None: client_info = google.api_core.gapic_v1.client_info.ClientInfo( gapic_version=_GAPIC_LIBRARY_VERSION ) else: client_info.gapic_version = _GAPIC_LIBRARY_VERSION self._client_info = client_info # Parse out the default settings for retry and timeout for each RPC # from the client configuration. # (Ordinarily, these are the defaults specified in the `*_config.py` # file next to this one.) self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( client_config["interfaces"][self._INTERFACE_NAME] ) # Save a dictionary of cached API call functions. # These are the actual callables which invoke the proper # transport methods, wrapped with `wrap_method` to add retry, # timeout, and the like. self._inner_api_calls = {} # Service calls def create_dataset( self, parent, dataset, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Creates a dataset. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> >>> parent = client.location_path('[PROJECT]', '[LOCATION]') >>> >>> # TODO: Initialize `dataset`: >>> dataset = {} >>> >>> response = client.create_dataset(parent, dataset) Args: parent (str): The resource name of the project to create the dataset for. dataset (Union[dict, ~google.cloud.automl_v1beta1.types.Dataset]): The dataset to create. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.automl_v1beta1.types.Dataset` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types.Dataset` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "create_dataset" not in self._inner_api_calls: self._inner_api_calls[ "create_dataset" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.create_dataset, default_retry=self._method_configs["CreateDataset"].retry, default_timeout=self._method_configs["CreateDataset"].timeout, client_info=self._client_info, ) request = service_pb2.CreateDatasetRequest(parent=parent, dataset=dataset) return self._inner_api_calls["create_dataset"]( request, retry=retry, timeout=timeout, metadata=metadata ) def get_dataset( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Gets a dataset. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> >>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') >>> >>> response = client.get_dataset(name) Args: name (str): The resource name of the dataset to retrieve. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types.Dataset` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "get_dataset" not in self._inner_api_calls: self._inner_api_calls[ "get_dataset" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.get_dataset, default_retry=self._method_configs["GetDataset"].retry, default_timeout=self._method_configs["GetDataset"].timeout, client_info=self._client_info, ) request = service_pb2.GetDatasetRequest(name=name) return self._inner_api_calls["get_dataset"]( request, retry=retry, timeout=timeout, metadata=metadata ) def list_datasets( self, parent, filter_=None, page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Lists datasets in a project. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> >>> parent = client.location_path('[PROJECT]', '[LOCATION]') >>> >>> # Iterate over all results >>> for element in client.list_datasets(parent): ... # process element ... pass >>> >>> >>> # Alternatively: >>> >>> # Iterate over results one page at a time >>> for page in client.list_datasets(parent).pages: ... for element in page: ... # process element ... pass Args: parent (str): The resource name of the project from which to list datasets. filter_ (str): An expression for filtering the results of the request. - ``dataset_metadata`` - for existence of the case. An example of using the filter is: - ``translation_dataset_metadata:*`` --> The dataset has translation\_dataset\_metadata. page_size (int): The maximum number of resources contained in the underlying API response. If page streaming is performed per- resource, this parameter does not affect the return value. If page streaming is performed per-page, this determines the maximum number of resources in a page. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.gax.PageIterator` instance. By default, this is an iterable of :class:`~google.cloud.automl_v1beta1.types.Dataset` instances. This object can also be configured to iterate over the pages of the response through the `options` parameter. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "list_datasets" not in self._inner_api_calls: self._inner_api_calls[ "list_datasets" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.list_datasets, default_retry=self._method_configs["ListDatasets"].retry, default_timeout=self._method_configs["ListDatasets"].timeout, client_info=self._client_info, ) request = service_pb2.ListDatasetsRequest( parent=parent, filter=filter_, page_size=page_size ) iterator = google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( self._inner_api_calls["list_datasets"], retry=retry, timeout=timeout, metadata=metadata, ), request=request, items_field="datasets", request_token_field="page_token", response_token_field="next_page_token", ) return iterator def delete_dataset( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Deletes a dataset and all of its contents. Returns empty response in the ``response`` field when it completes, and ``delete_details`` in the ``metadata`` field. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> >>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') >>> >>> response = client.delete_dataset(name) >>> >>> def callback(operation_future): ... # Handle result. ... result = operation_future.result() >>> >>> response.add_done_callback(callback) >>> >>> # Handle metadata. >>> metadata = response.metadata() Args: name (str): The resource name of the dataset to delete. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "delete_dataset" not in self._inner_api_calls: self._inner_api_calls[ "delete_dataset" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.delete_dataset, default_retry=self._method_configs["DeleteDataset"].retry, default_timeout=self._method_configs["DeleteDataset"].timeout, client_info=self._client_info, ) request = service_pb2.DeleteDatasetRequest(name=name) operation = self._inner_api_calls["delete_dataset"]( request, retry=retry, timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic( operation, self.transport._operations_client, empty_pb2.Empty, metadata_type=proto_operations_pb2.OperationMetadata, ) def import_data( self, name, input_config, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Imports data into a dataset. Returns an empty response in the ``response`` field when it completes. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> >>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') >>> >>> # TODO: Initialize `input_config`: >>> input_config = {} >>> >>> response = client.import_data(name, input_config) >>> >>> def callback(operation_future): ... # Handle result. ... result = operation_future.result() >>> >>> response.add_done_callback(callback) >>> >>> # Handle metadata. >>> metadata = response.metadata() Args: name (str): Required. Dataset name. Dataset must already exist. All imported annotations and examples will be added. input_config (Union[dict, ~google.cloud.automl_v1beta1.types.InputConfig]): Required. The desired input location. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.automl_v1beta1.types.InputConfig` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "import_data" not in self._inner_api_calls: self._inner_api_calls[ "import_data" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.import_data, default_retry=self._method_configs["ImportData"].retry, default_timeout=self._method_configs["ImportData"].timeout, client_info=self._client_info, ) request = service_pb2.ImportDataRequest(name=name, input_config=input_config) operation = self._inner_api_calls["import_data"]( request, retry=retry, timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic( operation, self.transport._operations_client, empty_pb2.Empty, metadata_type=proto_operations_pb2.OperationMetadata, ) def export_data( self, name, output_config, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Exports dataset's data to a Google Cloud Storage bucket. Returns an empty response in the ``response`` field when it completes. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> >>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') >>> >>> # TODO: Initialize `output_config`: >>> output_config = {} >>> >>> response = client.export_data(name, output_config) >>> >>> def callback(operation_future): ... # Handle result. ... result = operation_future.result() >>> >>> response.add_done_callback(callback) >>> >>> # Handle metadata. >>> metadata = response.metadata() Args: name (str): Required. The resource name of the dataset. output_config (Union[dict, ~google.cloud.automl_v1beta1.types.OutputConfig]): Required. The desired output location. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.automl_v1beta1.types.OutputConfig` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "export_data" not in self._inner_api_calls: self._inner_api_calls[ "export_data" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.export_data, default_retry=self._method_configs["ExportData"].retry, default_timeout=self._method_configs["ExportData"].timeout, client_info=self._client_info, ) request = service_pb2.ExportDataRequest(name=name, output_config=output_config) operation = self._inner_api_calls["export_data"]( request, retry=retry, timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic( operation, self.transport._operations_client, empty_pb2.Empty, metadata_type=proto_operations_pb2.OperationMetadata, ) def create_model( self, parent, model, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Creates a model. Returns a Model in the ``response`` field when it completes. When you create a model, several model evaluations are created for it: a global evaluation, and one evaluation for each annotation spec. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> >>> parent = client.location_path('[PROJECT]', '[LOCATION]') >>> >>> # TODO: Initialize `model`: >>> model = {} >>> >>> response = client.create_model(parent, model) >>> >>> def callback(operation_future): ... # Handle result. ... result = operation_future.result() >>> >>> response.add_done_callback(callback) >>> >>> # Handle metadata. >>> metadata = response.metadata() Args: parent (str): Resource name of the parent project where the model is being created. model (Union[dict, ~google.cloud.automl_v1beta1.types.Model]): The model to create. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.automl_v1beta1.types.Model` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "create_model" not in self._inner_api_calls: self._inner_api_calls[ "create_model" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.create_model, default_retry=self._method_configs["CreateModel"].retry, default_timeout=self._method_configs["CreateModel"].timeout, client_info=self._client_info, ) request = service_pb2.CreateModelRequest(parent=parent, model=model) operation = self._inner_api_calls["create_model"]( request, retry=retry, timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic( operation, self.transport._operations_client, model_pb2.Model, metadata_type=proto_operations_pb2.OperationMetadata, ) def get_model( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Gets a model. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> >>> response = client.get_model(name) Args: name (str): Resource name of the model. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types.Model` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "get_model" not in self._inner_api_calls: self._inner_api_calls[ "get_model" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.get_model, default_retry=self._method_configs["GetModel"].retry, default_timeout=self._method_configs["GetModel"].timeout, client_info=self._client_info, ) request = service_pb2.GetModelRequest(name=name) return self._inner_api_calls["get_model"]( request, retry=retry, timeout=timeout, metadata=metadata ) def list_models( self, parent, filter_=None, page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Lists models. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> >>> parent = client.location_path('[PROJECT]', '[LOCATION]') >>> >>> # Iterate over all results >>> for element in client.list_models(parent): ... # process element ... pass >>> >>> >>> # Alternatively: >>> >>> # Iterate over results one page at a time >>> for page in client.list_models(parent).pages: ... for element in page: ... # process element ... pass Args: parent (str): Resource name of the project, from which to list the models. filter_ (str): An expression for filtering the results of the request. - ``model_metadata`` - for existence of the case. - ``dataset_id`` - for = or !=. Some examples of using the filter are: - ``image_classification_model_metadata:*`` --> The model has image\_classification\_model\_metadata. - ``dataset_id=5`` --> The model was created from a sibling dataset with ID 5. page_size (int): The maximum number of resources contained in the underlying API response. If page streaming is performed per- resource, this parameter does not affect the return value. If page streaming is performed per-page, this determines the maximum number of resources in a page. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.gax.PageIterator` instance. By default, this is an iterable of :class:`~google.cloud.automl_v1beta1.types.Model` instances. This object can also be configured to iterate over the pages of the response through the `options` parameter. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "list_models" not in self._inner_api_calls: self._inner_api_calls[ "list_models" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.list_models, default_retry=self._method_configs["ListModels"].retry, default_timeout=self._method_configs["ListModels"].timeout, client_info=self._client_info, ) request = service_pb2.ListModelsRequest( parent=parent, filter=filter_, page_size=page_size ) iterator = google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( self._inner_api_calls["list_models"], retry=retry, timeout=timeout, metadata=metadata, ), request=request, items_field="model", request_token_field="page_token", response_token_field="next_page_token", ) return iterator def delete_model( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Deletes a model. If a model is already deployed, this only deletes the model in AutoML BE, and does not change the status of the deployed model in the production environment. Returns ``google.protobuf.Empty`` in the ``response`` field when it completes, and ``delete_details`` in the ``metadata`` field. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> >>> response = client.delete_model(name) >>> >>> def callback(operation_future): ... # Handle result. ... result = operation_future.result() >>> >>> response.add_done_callback(callback) >>> >>> # Handle metadata. >>> metadata = response.metadata() Args: name (str): Resource name of the model being deleted. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "delete_model" not in self._inner_api_calls: self._inner_api_calls[ "delete_model" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.delete_model, default_retry=self._method_configs["DeleteModel"].retry, default_timeout=self._method_configs["DeleteModel"].timeout, client_info=self._client_info, ) request = service_pb2.DeleteModelRequest(name=name) operation = self._inner_api_calls["delete_model"]( request, retry=retry, timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic( operation, self.transport._operations_client, empty_pb2.Empty, metadata_type=proto_operations_pb2.OperationMetadata, ) def deploy_model( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Deploys model. Returns a ``DeployModelResponse`` in the ``response`` field when it completes. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> >>> response = client.deploy_model(name) Args: name (str): Resource name of the model to deploy. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types.Operation` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "deploy_model" not in self._inner_api_calls: self._inner_api_calls[ "deploy_model" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.deploy_model, default_retry=self._method_configs["DeployModel"].retry, default_timeout=self._method_configs["DeployModel"].timeout, client_info=self._client_info, ) request = service_pb2.DeployModelRequest(name=name) return self._inner_api_calls["deploy_model"]( request, retry=retry, timeout=timeout, metadata=metadata ) def undeploy_model( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Undeploys model. Returns an ``UndeployModelResponse`` in the ``response`` field when it completes. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> >>> response = client.undeploy_model(name) Args: name (str): Resource name of the model to undeploy. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types.Operation` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "undeploy_model" not in self._inner_api_calls: self._inner_api_calls[ "undeploy_model" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.undeploy_model, default_retry=self._method_configs["UndeployModel"].retry, default_timeout=self._method_configs["UndeployModel"].timeout, client_info=self._client_info, ) request = service_pb2.UndeployModelRequest(name=name) return self._inner_api_calls["undeploy_model"]( request, retry=retry, timeout=timeout, metadata=metadata ) def get_model_evaluation( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Gets a model evaluation. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> >>> name = client.model_evaluation_path('[PROJECT]', '[LOCATION]', '[MODEL]', '[MODEL_EVALUATION]') >>> >>> response = client.get_model_evaluation(name) Args: name (str): Resource name for the model evaluation. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types.ModelEvaluation` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "get_model_evaluation" not in self._inner_api_calls: self._inner_api_calls[ "get_model_evaluation" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.get_model_evaluation, default_retry=self._method_configs["GetModelEvaluation"].retry, default_timeout=self._method_configs["GetModelEvaluation"].timeout, client_info=self._client_info, ) request = service_pb2.GetModelEvaluationRequest(name=name) return self._inner_api_calls["get_model_evaluation"]( request, retry=retry, timeout=timeout, metadata=metadata ) def list_model_evaluations( self, parent, filter_=None, page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Lists model evaluations. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> >>> parent = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> >>> # Iterate over all results >>> for element in client.list_model_evaluations(parent): ... # process element ... pass >>> >>> >>> # Alternatively: >>> >>> # Iterate over results one page at a time >>> for page in client.list_model_evaluations(parent).pages: ... for element in page: ... # process element ... pass Args: parent (str): Resource name of the model to list the model evaluations for. If modelId is set as "-", this will list model evaluations from across all models of the parent location. filter_ (str): An expression for filtering the results of the request. - ``annotation_spec_id`` - for =, != or existence. See example below for the last. Some examples of using the filter are: - ``annotation_spec_id!=4`` --> The model evaluation was done for annotation spec with ID different than 4. - ``NOT annotation_spec_id:*`` --> The model evaluation was done for aggregate of all annotation specs. page_size (int): The maximum number of resources contained in the underlying API response. If page streaming is performed per- resource, this parameter does not affect the return value. If page streaming is performed per-page, this determines the maximum number of resources in a page. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.gax.PageIterator` instance. By default, this is an iterable of :class:`~google.cloud.automl_v1beta1.types.ModelEvaluation` instances. This object can also be configured to iterate over the pages of the response through the `options` parameter. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "list_model_evaluations" not in self._inner_api_calls: self._inner_api_calls[ "list_model_evaluations" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.list_model_evaluations, default_retry=self._method_configs["ListModelEvaluations"].retry, default_timeout=self._method_configs["ListModelEvaluations"].timeout, client_info=self._client_info, ) request = service_pb2.ListModelEvaluationsRequest( parent=parent, filter=filter_, page_size=page_size ) iterator = google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( self._inner_api_calls["list_model_evaluations"], retry=retry, timeout=timeout, metadata=metadata, ), request=request, items_field="model_evaluation", request_token_field="page_token", response_token_field="next_page_token", ) return iterator
42.198899
128
0.596512
51,555
0.960467
0
0
2,098
0.039086
0
0
35,469
0.660786
7c87af0c38dbd1633d14f5192f2da57d1ebe0d89
73,923
py
Python
addons/project/models/project.py
SHIVJITH/Odoo_Machine_Test
310497a9872db7844b521e6dab5f7a9f61d365a4
[ "Apache-2.0" ]
null
null
null
addons/project/models/project.py
SHIVJITH/Odoo_Machine_Test
310497a9872db7844b521e6dab5f7a9f61d365a4
[ "Apache-2.0" ]
null
null
null
addons/project/models/project.py
SHIVJITH/Odoo_Machine_Test
310497a9872db7844b521e6dab5f7a9f61d365a4
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. import ast from datetime import timedelta, datetime from random import randint from odoo import api, fields, models, tools, SUPERUSER_ID, _ from odoo.exceptions import UserError, AccessError, ValidationError, RedirectWarning from odoo.tools.misc import format_date, get_lang from odoo.osv.expression import OR from .project_task_recurrence import DAYS, WEEKS class ProjectTaskType(models.Model): _name = 'project.task.type' _description = 'Task Stage' _order = 'sequence, id' def _get_default_project_ids(self): default_project_id = self.env.context.get('default_project_id') return [default_project_id] if default_project_id else None active = fields.Boolean('Active', default=True) name = fields.Char(string='Stage Name', required=True, translate=True) description = fields.Text(translate=True) sequence = fields.Integer(default=1) project_ids = fields.Many2many('project.project', 'project_task_type_rel', 'type_id', 'project_id', string='Projects', default=_get_default_project_ids) legend_blocked = fields.Char( 'Red Kanban Label', default=lambda s: _('Blocked'), translate=True, required=True, help='Override the default value displayed for the blocked state for kanban selection, when the task or issue is in that stage.') legend_done = fields.Char( 'Green Kanban Label', default=lambda s: _('Ready'), translate=True, required=True, help='Override the default value displayed for the done state for kanban selection, when the task or issue is in that stage.') legend_normal = fields.Char( 'Grey Kanban Label', default=lambda s: _('In Progress'), translate=True, required=True, help='Override the default value displayed for the normal state for kanban selection, when the task or issue is in that stage.') mail_template_id = fields.Many2one( 'mail.template', string='Email Template', domain=[('model', '=', 'project.task')], help="If set an email will be sent to the customer when the task or issue reaches this step.") fold = fields.Boolean(string='Folded in Kanban', help='This stage is folded in the kanban view when there are no records in that stage to display.') rating_template_id = fields.Many2one( 'mail.template', string='Rating Email Template', domain=[('model', '=', 'project.task')], help="If set and if the project's rating configuration is 'Rating when changing stage', then an email will be sent to the customer when the task reaches this step.") auto_validation_kanban_state = fields.Boolean('Automatic kanban status', default=False, help="Automatically modify the kanban state when the customer replies to the feedback for this stage.\n" " * A good feedback from the customer will update the kanban state to 'ready for the new stage' (green bullet).\n" " * A medium or a bad feedback will set the kanban state to 'blocked' (red bullet).\n") is_closed = fields.Boolean('Closing Stage', help="Tasks in this stage are considered as closed.") disabled_rating_warning = fields.Text(compute='_compute_disabled_rating_warning') def unlink_wizard(self, stage_view=False): self = self.with_context(active_test=False) # retrieves all the projects with a least 1 task in that stage # a task can be in a stage even if the project is not assigned to the stage readgroup = self.with_context(active_test=False).env['project.task'].read_group([('stage_id', 'in', self.ids)], ['project_id'], ['project_id']) project_ids = list(set([project['project_id'][0] for project in readgroup] + self.project_ids.ids)) wizard = self.with_context(project_ids=project_ids).env['project.task.type.delete.wizard'].create({ 'project_ids': project_ids, 'stage_ids': self.ids }) context = dict(self.env.context) context['stage_view'] = stage_view return { 'name': _('Delete Stage'), 'view_mode': 'form', 'res_model': 'project.task.type.delete.wizard', 'views': [(self.env.ref('project.view_project_task_type_delete_wizard').id, 'form')], 'type': 'ir.actions.act_window', 'res_id': wizard.id, 'target': 'new', 'context': context, } def write(self, vals): if 'active' in vals and not vals['active']: self.env['project.task'].search([('stage_id', 'in', self.ids)]).write({'active': False}) return super(ProjectTaskType, self).write(vals) @api.depends('project_ids', 'project_ids.rating_active') def _compute_disabled_rating_warning(self): for stage in self: disabled_projects = stage.project_ids.filtered(lambda p: not p.rating_active) if disabled_projects: stage.disabled_rating_warning = '\n'.join('- %s' % p.name for p in disabled_projects) else: stage.disabled_rating_warning = False class Project(models.Model): _name = "project.project" _description = "Project" _inherit = ['portal.mixin', 'mail.alias.mixin', 'mail.thread', 'rating.parent.mixin'] _order = "sequence, name, id" _rating_satisfaction_days = False # takes all existing ratings _check_company_auto = True def _compute_attached_docs_count(self): Attachment = self.env['ir.attachment'] for project in self: project.doc_count = Attachment.search_count([ '|', '&', ('res_model', '=', 'project.project'), ('res_id', '=', project.id), '&', ('res_model', '=', 'project.task'), ('res_id', 'in', project.task_ids.ids) ]) def _compute_task_count(self): task_data = self.env['project.task'].read_group([('project_id', 'in', self.ids), '|', '&', ('stage_id.is_closed', '=', False), ('stage_id.fold', '=', False), ('stage_id', '=', False)], ['project_id'], ['project_id']) result = dict((data['project_id'][0], data['project_id_count']) for data in task_data) for project in self: project.task_count = result.get(project.id, 0) def attachment_tree_view(self): action = self.env['ir.actions.act_window']._for_xml_id('base.action_attachment') action['domain'] = str([ '|', '&', ('res_model', '=', 'project.project'), ('res_id', 'in', self.ids), '&', ('res_model', '=', 'project.task'), ('res_id', 'in', self.task_ids.ids) ]) action['context'] = "{'default_res_model': '%s','default_res_id': %d}" % (self._name, self.id) return action def _compute_is_favorite(self): for project in self: project.is_favorite = self.env.user in project.favorite_user_ids def _inverse_is_favorite(self): favorite_projects = not_fav_projects = self.env['project.project'].sudo() for project in self: if self.env.user in project.favorite_user_ids: favorite_projects |= project else: not_fav_projects |= project # Project User has no write access for project. not_fav_projects.write({'favorite_user_ids': [(4, self.env.uid)]}) favorite_projects.write({'favorite_user_ids': [(3, self.env.uid)]}) def _get_default_favorite_user_ids(self): return [(6, 0, [self.env.uid])] name = fields.Char("Name", index=True, required=True, tracking=True) description = fields.Html() active = fields.Boolean(default=True, help="If the active field is set to False, it will allow you to hide the project without removing it.") sequence = fields.Integer(default=10, help="Gives the sequence order when displaying a list of Projects.") partner_id = fields.Many2one('res.partner', string='Customer', auto_join=True, tracking=True, domain="['|', ('company_id', '=', False), ('company_id', '=', company_id)]") partner_email = fields.Char( compute='_compute_partner_email', inverse='_inverse_partner_email', string='Email', readonly=False, store=True, copy=False) partner_phone = fields.Char( compute='_compute_partner_phone', inverse='_inverse_partner_phone', string="Phone", readonly=False, store=True, copy=False) company_id = fields.Many2one('res.company', string='Company', required=True, default=lambda self: self.env.company) currency_id = fields.Many2one('res.currency', related="company_id.currency_id", string="Currency", readonly=True) analytic_account_id = fields.Many2one('account.analytic.account', string="Analytic Account", copy=False, ondelete='set null', domain="['|', ('company_id', '=', False), ('company_id', '=', company_id)]", check_company=True, help="Analytic account to which this project is linked for financial management. " "Use an analytic account to record cost and revenue on your project.") favorite_user_ids = fields.Many2many( 'res.users', 'project_favorite_user_rel', 'project_id', 'user_id', default=_get_default_favorite_user_ids, string='Members') is_favorite = fields.Boolean(compute='_compute_is_favorite', inverse='_inverse_is_favorite', string='Show Project on dashboard', help="Whether this project should be displayed on your dashboard.") label_tasks = fields.Char(string='Use Tasks as', default='Tasks', help="Label used for the tasks of the project.", translate=True) tasks = fields.One2many('project.task', 'project_id', string="Task Activities") resource_calendar_id = fields.Many2one( 'resource.calendar', string='Working Time', related='company_id.resource_calendar_id') type_ids = fields.Many2many('project.task.type', 'project_task_type_rel', 'project_id', 'type_id', string='Tasks Stages') task_count = fields.Integer(compute='_compute_task_count', string="Task Count") task_ids = fields.One2many('project.task', 'project_id', string='Tasks', domain=['|', ('stage_id.fold', '=', False), ('stage_id', '=', False)]) color = fields.Integer(string='Color Index') user_id = fields.Many2one('res.users', string='Project Manager', default=lambda self: self.env.user, tracking=True) alias_enabled = fields.Boolean(string='Use email alias', compute='_compute_alias_enabled', readonly=False) alias_id = fields.Many2one('mail.alias', string='Alias', ondelete="restrict", required=True, help="Internal email associated with this project. Incoming emails are automatically synchronized " "with Tasks (or optionally Issues if the Issue Tracker module is installed).") privacy_visibility = fields.Selection([ ('followers', 'Invited internal users'), ('employees', 'All internal users'), ('portal', 'Invited portal users and all internal users'), ], string='Visibility', required=True, default='portal', help="Defines the visibility of the tasks of the project:\n" "- Invited internal users: employees may only see the followed project and tasks.\n" "- All internal users: employees may see all project and tasks.\n" "- Invited portal and all internal users: employees may see everything." " Portal users may see project and tasks followed by\n" " them or by someone of their company.") allowed_user_ids = fields.Many2many('res.users', compute='_compute_allowed_users', inverse='_inverse_allowed_user') allowed_internal_user_ids = fields.Many2many('res.users', 'project_allowed_internal_users_rel', string="Allowed Internal Users", default=lambda self: self.env.user, domain=[('share', '=', False)]) allowed_portal_user_ids = fields.Many2many('res.users', 'project_allowed_portal_users_rel', string="Allowed Portal Users", domain=[('share', '=', True)]) doc_count = fields.Integer(compute='_compute_attached_docs_count', string="Number of documents attached") date_start = fields.Date(string='Start Date') date = fields.Date(string='Expiration Date', index=True, tracking=True) subtask_project_id = fields.Many2one('project.project', string='Sub-task Project', ondelete="restrict", help="Project in which sub-tasks of the current project will be created. It can be the current project itself.") allow_subtasks = fields.Boolean('Sub-tasks', default=lambda self: self.env.user.has_group('project.group_subtask_project')) allow_recurring_tasks = fields.Boolean('Recurring Tasks', default=lambda self: self.env.user.has_group('project.group_project_recurring_tasks')) # rating fields rating_request_deadline = fields.Datetime(compute='_compute_rating_request_deadline', store=True) rating_active = fields.Boolean('Customer Ratings', default=lambda self: self.env.user.has_group('project.group_project_rating')) rating_status = fields.Selection( [('stage', 'Rating when changing stage'), ('periodic', 'Periodical Rating') ], 'Customer Ratings Status', default="stage", required=True, help="How to get customer feedback?\n" "- Rating when changing stage: an email will be sent when a task is pulled in another stage.\n" "- Periodical Rating: email will be sent periodically.\n\n" "Don't forget to set up the mail templates on the stages for which you want to get the customer's feedbacks.") rating_status_period = fields.Selection([ ('daily', 'Daily'), ('weekly', 'Weekly'), ('bimonthly', 'Twice a Month'), ('monthly', 'Once a Month'), ('quarterly', 'Quarterly'), ('yearly', 'Yearly')], 'Rating Frequency', required=True, default='monthly') _sql_constraints = [ ('project_date_greater', 'check(date >= date_start)', 'Error! project start-date must be lower than project end-date.') ] @api.depends('partner_id.email') def _compute_partner_email(self): for project in self: if project.partner_id and project.partner_id.email != project.partner_email: project.partner_email = project.partner_id.email def _inverse_partner_email(self): for project in self: if project.partner_id and project.partner_email != project.partner_id.email: project.partner_id.email = project.partner_email @api.depends('partner_id.phone') def _compute_partner_phone(self): for project in self: if project.partner_id and project.partner_phone != project.partner_id.phone: project.partner_phone = project.partner_id.phone def _inverse_partner_phone(self): for project in self: if project.partner_id and project.partner_phone != project.partner_id.phone: project.partner_id.phone = project.partner_phone @api.onchange('alias_enabled') def _onchange_alias_name(self): if not self.alias_enabled: self.alias_name = False def _compute_alias_enabled(self): for project in self: project.alias_enabled = project.alias_domain and project.alias_id.alias_name @api.depends('allowed_internal_user_ids', 'allowed_portal_user_ids') def _compute_allowed_users(self): for project in self: users = project.allowed_internal_user_ids | project.allowed_portal_user_ids project.allowed_user_ids = users def _inverse_allowed_user(self): for project in self: allowed_users = project.allowed_user_ids project.allowed_portal_user_ids = allowed_users.filtered('share') project.allowed_internal_user_ids = allowed_users - project.allowed_portal_user_ids def _compute_access_url(self): super(Project, self)._compute_access_url() for project in self: project.access_url = '/my/project/%s' % project.id def _compute_access_warning(self): super(Project, self)._compute_access_warning() for project in self.filtered(lambda x: x.privacy_visibility != 'portal'): project.access_warning = _( "The project cannot be shared with the recipient(s) because the privacy of the project is too restricted. Set the privacy to 'Visible by following customers' in order to make it accessible by the recipient(s).") @api.depends('rating_status', 'rating_status_period') def _compute_rating_request_deadline(self): periods = {'daily': 1, 'weekly': 7, 'bimonthly': 15, 'monthly': 30, 'quarterly': 90, 'yearly': 365} for project in self: project.rating_request_deadline = fields.datetime.now() + timedelta(days=periods.get(project.rating_status_period, 0)) @api.model def _map_tasks_default_valeus(self, task, project): """ get the default value for the copied task on project duplication """ return { 'stage_id': task.stage_id.id, 'name': task.name, 'company_id': project.company_id.id, } def map_tasks(self, new_project_id): """ copy and map tasks from old to new project """ project = self.browse(new_project_id) tasks = self.env['project.task'] # We want to copy archived task, but do not propagate an active_test context key task_ids = self.env['project.task'].with_context(active_test=False).search([('project_id', '=', self.id)], order='parent_id').ids old_to_new_tasks = {} for task in self.env['project.task'].browse(task_ids): # preserve task name and stage, normally altered during copy defaults = self._map_tasks_default_valeus(task, project) if task.parent_id: # set the parent to the duplicated task defaults['parent_id'] = old_to_new_tasks.get(task.parent_id.id, False) new_task = task.copy(defaults) old_to_new_tasks[task.id] = new_task.id tasks += new_task return project.write({'tasks': [(6, 0, tasks.ids)]}) @api.returns('self', lambda value: value.id) def copy(self, default=None): if default is None: default = {} if not default.get('name'): default['name'] = _("%s (copy)") % (self.name) project = super(Project, self).copy(default) if self.subtask_project_id == self: project.subtask_project_id = project for follower in self.message_follower_ids: project.message_subscribe(partner_ids=follower.partner_id.ids, subtype_ids=follower.subtype_ids.ids) if 'tasks' not in default: self.map_tasks(project.id) return project @api.model def create(self, vals): # Prevent double project creation self = self.with_context(mail_create_nosubscribe=True) project = super(Project, self).create(vals) if not vals.get('subtask_project_id'): project.subtask_project_id = project.id if project.privacy_visibility == 'portal' and project.partner_id.user_ids: project.allowed_user_ids |= project.partner_id.user_ids return project def write(self, vals): allowed_users_changed = 'allowed_portal_user_ids' in vals or 'allowed_internal_user_ids' in vals if allowed_users_changed: allowed_users = {project: project.allowed_user_ids for project in self} # directly compute is_favorite to dodge allow write access right if 'is_favorite' in vals: vals.pop('is_favorite') self._fields['is_favorite'].determine_inverse(self) res = super(Project, self).write(vals) if vals else True if allowed_users_changed: for project in self: permission_removed = allowed_users.get(project) - project.allowed_user_ids allowed_portal_users_removed = permission_removed.filtered('share') project.message_unsubscribe(allowed_portal_users_removed.partner_id.commercial_partner_id.ids) for task in project.task_ids: task.allowed_user_ids -= permission_removed if 'allow_recurring_tasks' in vals and not vals.get('allow_recurring_tasks'): self.env['project.task'].search([('project_id', 'in', self.ids), ('recurring_task', '=', True)]).write({'recurring_task': False}) if 'active' in vals: # archiving/unarchiving a project does it on its tasks, too self.with_context(active_test=False).mapped('tasks').write({'active': vals['active']}) if vals.get('partner_id') or vals.get('privacy_visibility'): for project in self.filtered(lambda project: project.privacy_visibility == 'portal'): project.allowed_user_ids |= project.partner_id.user_ids return res def action_unlink(self): wizard = self.env['project.delete.wizard'].create({ 'project_ids': self.ids }) return { 'name': _('Confirmation'), 'view_mode': 'form', 'res_model': 'project.delete.wizard', 'views': [(self.env.ref('project.project_delete_wizard_form').id, 'form')], 'type': 'ir.actions.act_window', 'res_id': wizard.id, 'target': 'new', 'context': self.env.context, } def unlink(self): # Check project is empty for project in self.with_context(active_test=False): if project.tasks: raise UserError(_('You cannot delete a project containing tasks. You can either archive it or first delete all of its tasks.')) # Delete the empty related analytic account analytic_accounts_to_delete = self.env['account.analytic.account'] for project in self: if project.analytic_account_id and not project.analytic_account_id.line_ids: analytic_accounts_to_delete |= project.analytic_account_id result = super(Project, self).unlink() analytic_accounts_to_delete.unlink() return result def message_subscribe(self, partner_ids=None, channel_ids=None, subtype_ids=None): """ Subscribe to all existing active tasks when subscribing to a project And add the portal user subscribed to allowed portal users """ res = super(Project, self).message_subscribe(partner_ids=partner_ids, channel_ids=channel_ids, subtype_ids=subtype_ids) project_subtypes = self.env['mail.message.subtype'].browse(subtype_ids) if subtype_ids else None task_subtypes = (project_subtypes.mapped('parent_id') | project_subtypes.filtered(lambda sub: sub.internal or sub.default)).ids if project_subtypes else None if not subtype_ids or task_subtypes: self.mapped('tasks').message_subscribe( partner_ids=partner_ids, channel_ids=channel_ids, subtype_ids=task_subtypes) if partner_ids: all_users = self.env['res.partner'].browse(partner_ids).user_ids portal_users = all_users.filtered('share') internal_users = all_users - portal_users self.allowed_portal_user_ids |= portal_users self.allowed_internal_user_ids |= internal_users return res def message_unsubscribe(self, partner_ids=None, channel_ids=None): """ Unsubscribe from all tasks when unsubscribing from a project """ self.mapped('tasks').message_unsubscribe(partner_ids=partner_ids, channel_ids=channel_ids) return super(Project, self).message_unsubscribe(partner_ids=partner_ids, channel_ids=channel_ids) def _alias_get_creation_values(self): values = super(Project, self)._alias_get_creation_values() values['alias_model_id'] = self.env['ir.model']._get('project.task').id if self.id: values['alias_defaults'] = defaults = ast.literal_eval(self.alias_defaults or "{}") defaults['project_id'] = self.id return values # --------------------------------------------------- # Actions # --------------------------------------------------- def toggle_favorite(self): favorite_projects = not_fav_projects = self.env['project.project'].sudo() for project in self: if self.env.user in project.favorite_user_ids: favorite_projects |= project else: not_fav_projects |= project # Project User has no write access for project. not_fav_projects.write({'favorite_user_ids': [(4, self.env.uid)]}) favorite_projects.write({'favorite_user_ids': [(3, self.env.uid)]}) def action_view_tasks(self): action = self.with_context(active_id=self.id, active_ids=self.ids) \ .env.ref('project.act_project_project_2_project_task_all') \ .sudo().read()[0] action['display_name'] = self.name return action def action_view_account_analytic_line(self): """ return the action to see all the analytic lines of the project's analytic account """ action = self.env["ir.actions.actions"]._for_xml_id("analytic.account_analytic_line_action") action['context'] = {'default_account_id': self.analytic_account_id.id} action['domain'] = [('account_id', '=', self.analytic_account_id.id)] return action def action_view_all_rating(self): """ return the action to see all the rating of the project and activate default filters""" action = self.env['ir.actions.act_window']._for_xml_id('project.rating_rating_action_view_project_rating') action['name'] = _('Ratings of %s') % (self.name,) action_context = ast.literal_eval(action['context']) if action['context'] else {} action_context.update(self._context) action_context['search_default_parent_res_name'] = self.name action_context.pop('group_by', None) return dict(action, context=action_context) # --------------------------------------------------- # Business Methods # --------------------------------------------------- @api.model def _create_analytic_account_from_values(self, values): analytic_account = self.env['account.analytic.account'].create({ 'name': values.get('name', _('Unknown Analytic Account')), 'company_id': values.get('company_id') or self.env.company.id, 'partner_id': values.get('partner_id'), 'active': True, }) return analytic_account def _create_analytic_account(self): for project in self: analytic_account = self.env['account.analytic.account'].create({ 'name': project.name, 'company_id': project.company_id.id, 'partner_id': project.partner_id.id, 'active': True, }) project.write({'analytic_account_id': analytic_account.id}) # --------------------------------------------------- # Rating business # --------------------------------------------------- # This method should be called once a day by the scheduler @api.model def _send_rating_all(self): projects = self.search([ ('rating_active', '=', True), ('rating_status', '=', 'periodic'), ('rating_request_deadline', '<=', fields.Datetime.now()) ]) for project in projects: project.task_ids._send_task_rating_mail() project._compute_rating_request_deadline() self.env.cr.commit() class Task(models.Model): _name = "project.task" _description = "Task" _date_name = "date_assign" _inherit = ['portal.mixin', 'mail.thread.cc', 'mail.activity.mixin', 'rating.mixin'] _mail_post_access = 'read' _order = "priority desc, sequence, id desc" _check_company_auto = True def _get_default_stage_id(self): """ Gives default stage_id """ project_id = self.env.context.get('default_project_id') if not project_id: return False return self.stage_find(project_id, [('fold', '=', False), ('is_closed', '=', False)]) @api.model def _default_company_id(self): if self._context.get('default_project_id'): return self.env['project.project'].browse(self._context['default_project_id']).company_id return self.env.company @api.model def _read_group_stage_ids(self, stages, domain, order): search_domain = [('id', 'in', stages.ids)] if 'default_project_id' in self.env.context: search_domain = ['|', ('project_ids', '=', self.env.context['default_project_id'])] + search_domain stage_ids = stages._search(search_domain, order=order, access_rights_uid=SUPERUSER_ID) return stages.browse(stage_ids) active = fields.Boolean(default=True) name = fields.Char(string='Title', tracking=True, required=True, index=True) description = fields.Html(string='Description') priority = fields.Selection([ ('0', 'Normal'), ('1', 'Important'), ], default='0', index=True, string="Priority") sequence = fields.Integer(string='Sequence', index=True, default=10, help="Gives the sequence order when displaying a list of tasks.") stage_id = fields.Many2one('project.task.type', string='Stage', compute='_compute_stage_id', store=True, readonly=False, ondelete='restrict', tracking=True, index=True, default=_get_default_stage_id, group_expand='_read_group_stage_ids', domain="[('project_ids', '=', project_id)]", copy=False) tag_ids = fields.Many2many('project.tags', string='Tags') kanban_state = fields.Selection([ ('normal', 'In Progress'), ('done', 'Ready'), ('blocked', 'Blocked')], string='Kanban State', copy=False, default='normal', required=True) kanban_state_label = fields.Char(compute='_compute_kanban_state_label', string='Kanban State Label', tracking=True) create_date = fields.Datetime("Created On", readonly=True, index=True) write_date = fields.Datetime("Last Updated On", readonly=True, index=True) date_end = fields.Datetime(string='Ending Date', index=True, copy=False) date_assign = fields.Datetime(string='Assigning Date', index=True, copy=False, readonly=True) date_deadline = fields.Date(string='Deadline', index=True, copy=False, tracking=True) date_last_stage_update = fields.Datetime(string='Last Stage Update', index=True, copy=False, readonly=True) project_id = fields.Many2one('project.project', string='Project', compute='_compute_project_id', store=True, readonly=False, index=True, tracking=True, check_company=True, change_default=True) planned_hours = fields.Float("Initially Planned Hours", help='Time planned to achieve this task (including its sub-tasks).', tracking=True) subtask_planned_hours = fields.Float("Sub-tasks Planned Hours", compute='_compute_subtask_planned_hours', help="Sum of the time planned of all the sub-tasks linked to this task. Usually less or equal to the initially time planned of this task.") user_id = fields.Many2one('res.users', string='Assigned to', default=lambda self: self.env.uid, index=True, tracking=True) partner_id = fields.Many2one('res.partner', string='Customer', compute='_compute_partner_id', store=True, readonly=False, domain="['|', ('company_id', '=', False), ('company_id', '=', company_id)]") partner_is_company = fields.Boolean(related='partner_id.is_company', readonly=True) commercial_partner_id = fields.Many2one(related='partner_id.commercial_partner_id') partner_email = fields.Char( compute='_compute_partner_email', inverse='_inverse_partner_email', string='Email', readonly=False, store=True, copy=False) partner_phone = fields.Char( compute='_compute_partner_phone', inverse='_inverse_partner_phone', string="Phone", readonly=False, store=True, copy=False) ribbon_message = fields.Char('Ribbon message', compute='_compute_ribbon_message') partner_city = fields.Char(related='partner_id.city', readonly=False) manager_id = fields.Many2one('res.users', string='Project Manager', related='project_id.user_id', readonly=True) company_id = fields.Many2one( 'res.company', string='Company', compute='_compute_company_id', store=True, readonly=False, required=True, copy=True, default=_default_company_id) color = fields.Integer(string='Color Index') user_email = fields.Char(related='user_id.email', string='User Email', readonly=True, related_sudo=False) attachment_ids = fields.One2many('ir.attachment', compute='_compute_attachment_ids', string="Main Attachments", help="Attachment that don't come from message.") # In the domain of displayed_image_id, we couln't use attachment_ids because a one2many is represented as a list of commands so we used res_model & res_id displayed_image_id = fields.Many2one('ir.attachment', domain="[('res_model', '=', 'project.task'), ('res_id', '=', id), ('mimetype', 'ilike', 'image')]", string='Cover Image') legend_blocked = fields.Char(related='stage_id.legend_blocked', string='Kanban Blocked Explanation', readonly=True, related_sudo=False) legend_done = fields.Char(related='stage_id.legend_done', string='Kanban Valid Explanation', readonly=True, related_sudo=False) legend_normal = fields.Char(related='stage_id.legend_normal', string='Kanban Ongoing Explanation', readonly=True, related_sudo=False) is_closed = fields.Boolean(related="stage_id.is_closed", string="Closing Stage", readonly=True, related_sudo=False) parent_id = fields.Many2one('project.task', string='Parent Task', index=True) child_ids = fields.One2many('project.task', 'parent_id', string="Sub-tasks", context={'active_test': False}) subtask_project_id = fields.Many2one('project.project', related="project_id.subtask_project_id", string='Sub-task Project', readonly=True) allow_subtasks = fields.Boolean(string="Allow Sub-tasks", related="project_id.allow_subtasks", readonly=True) subtask_count = fields.Integer("Sub-task count", compute='_compute_subtask_count') email_from = fields.Char(string='Email From', help="These people will receive email.", index=True, compute='_compute_email_from', store="True", readonly=False) allowed_user_ids = fields.Many2many('res.users', string="Visible to", groups='project.group_project_manager', compute='_compute_allowed_user_ids', store=True, readonly=False, copy=False) project_privacy_visibility = fields.Selection(related='project_id.privacy_visibility', string="Project Visibility") # Computed field about working time elapsed between record creation and assignation/closing. working_hours_open = fields.Float(compute='_compute_elapsed', string='Working hours to assign', store=True, group_operator="avg") working_hours_close = fields.Float(compute='_compute_elapsed', string='Working hours to close', store=True, group_operator="avg") working_days_open = fields.Float(compute='_compute_elapsed', string='Working days to assign', store=True, group_operator="avg") working_days_close = fields.Float(compute='_compute_elapsed', string='Working days to close', store=True, group_operator="avg") # customer portal: include comment and incoming emails in communication history website_message_ids = fields.One2many(domain=lambda self: [('model', '=', self._name), ('message_type', 'in', ['email', 'comment'])]) # recurrence fields allow_recurring_tasks = fields.Boolean(related='project_id.allow_recurring_tasks') recurring_task = fields.Boolean(string="Recurrent") recurring_count = fields.Integer(string="Tasks in Recurrence", compute='_compute_recurring_count') recurrence_id = fields.Many2one('project.task.recurrence', copy=False) recurrence_update = fields.Selection([ ('this', 'This task'), ('subsequent', 'This and following tasks'), ('all', 'All tasks'), ], default='this', store=False) recurrence_message = fields.Char(string='Next Recurrencies', compute='_compute_recurrence_message') repeat_interval = fields.Integer(string='Repeat Every', default=1, compute='_compute_repeat', readonly=False) repeat_unit = fields.Selection([ ('day', 'Days'), ('week', 'Weeks'), ('month', 'Months'), ('year', 'Years'), ], default='week', compute='_compute_repeat', readonly=False) repeat_type = fields.Selection([ ('forever', 'Forever'), ('until', 'End Date'), ('after', 'Number of Repetitions'), ], default="forever", string="Until", compute='_compute_repeat', readonly=False) repeat_until = fields.Date(string="End Date", compute='_compute_repeat', readonly=False) repeat_number = fields.Integer(string="Repetitions", default=1, compute='_compute_repeat', readonly=False) repeat_on_month = fields.Selection([ ('date', 'Date of the Month'), ('day', 'Day of the Month'), ], default='date', compute='_compute_repeat', readonly=False) repeat_on_year = fields.Selection([ ('date', 'Date of the Year'), ('day', 'Day of the Year'), ], default='date', compute='_compute_repeat', readonly=False) mon = fields.Boolean(string="Mon", compute='_compute_repeat', readonly=False) tue = fields.Boolean(string="Tue", compute='_compute_repeat', readonly=False) wed = fields.Boolean(string="Wed", compute='_compute_repeat', readonly=False) thu = fields.Boolean(string="Thu", compute='_compute_repeat', readonly=False) fri = fields.Boolean(string="Fri", compute='_compute_repeat', readonly=False) sat = fields.Boolean(string="Sat", compute='_compute_repeat', readonly=False) sun = fields.Boolean(string="Sun", compute='_compute_repeat', readonly=False) repeat_day = fields.Selection([ (str(i), str(i)) for i in range(1, 32) ], compute='_compute_repeat', readonly=False) repeat_week = fields.Selection([ ('first', 'First'), ('second', 'Second'), ('third', 'Third'), ('last', 'Last'), ], default='first', compute='_compute_repeat', readonly=False) repeat_weekday = fields.Selection([ ('mon', 'Monday'), ('tue', 'Tuesday'), ('wed', 'Wednesday'), ('thu', 'Thursday'), ('fri', 'Friday'), ('sat', 'Saturday'), ('sun', 'Sunday'), ], string='Day Of The Week', compute='_compute_repeat', readonly=False) repeat_month = fields.Selection([ ('january', 'January'), ('february', 'February'), ('march', 'March'), ('april', 'April'), ('may', 'May'), ('june', 'June'), ('july', 'July'), ('august', 'August'), ('september', 'September'), ('october', 'October'), ('november', 'November'), ('december', 'December'), ], compute='_compute_repeat', readonly=False) repeat_show_dow = fields.Boolean(compute='_compute_repeat_visibility') repeat_show_day = fields.Boolean(compute='_compute_repeat_visibility') repeat_show_week = fields.Boolean(compute='_compute_repeat_visibility') repeat_show_month = fields.Boolean(compute='_compute_repeat_visibility') @api.model def _get_recurrence_fields(self): return ['repeat_interval', 'repeat_unit', 'repeat_type', 'repeat_until', 'repeat_number', 'repeat_on_month', 'repeat_on_year', 'mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun', 'repeat_day', 'repeat_week', 'repeat_month', 'repeat_weekday'] @api.depends('recurring_task', 'repeat_unit', 'repeat_on_month', 'repeat_on_year') def _compute_repeat_visibility(self): for task in self: task.repeat_show_day = task.recurring_task and (task.repeat_unit == 'month' and task.repeat_on_month == 'date') or (task.repeat_unit == 'year' and task.repeat_on_year == 'date') task.repeat_show_week = task.recurring_task and (task.repeat_unit == 'month' and task.repeat_on_month == 'day') or (task.repeat_unit == 'year' and task.repeat_on_year == 'day') task.repeat_show_dow = task.recurring_task and task.repeat_unit == 'week' task.repeat_show_month = task.recurring_task and task.repeat_unit == 'year' @api.depends('recurring_task') def _compute_repeat(self): rec_fields = self._get_recurrence_fields() defaults = self.default_get(rec_fields) for task in self: for f in rec_fields: if task.recurrence_id: task[f] = task.recurrence_id[f] else: if task.recurring_task: task[f] = defaults.get(f) else: task[f] = False def _get_weekdays(self, n=1): self.ensure_one() if self.repeat_unit == 'week': return [fn(n) for day, fn in DAYS.items() if self[day]] return [DAYS.get(self.repeat_weekday)(n)] @api.depends( 'recurring_task', 'repeat_interval', 'repeat_unit', 'repeat_type', 'repeat_until', 'repeat_number', 'repeat_on_month', 'repeat_on_year', 'mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun', 'repeat_day', 'repeat_week', 'repeat_month', 'repeat_weekday') def _compute_recurrence_message(self): self.recurrence_message = False for task in self.filtered(lambda t: t.recurring_task and t._is_recurrence_valid()): date = fields.Date.today() number_occurrences = min(5, task.repeat_number if task.repeat_type == 'after' else 5) delta = task.repeat_interval if task.repeat_unit == 'day' else 1 recurring_dates = self.env['project.task.recurrence']._get_next_recurring_dates( date + timedelta(days=delta), task.repeat_interval, task.repeat_unit, task.repeat_type, task.repeat_until, task.repeat_on_month, task.repeat_on_year, task._get_weekdays(WEEKS.get(task.repeat_week)), task.repeat_day, task.repeat_week, task.repeat_month, count=number_occurrences) date_format = self.env['res.lang']._lang_get(self.env.user.lang).date_format task.recurrence_message = '<ul>' for date in recurring_dates[:5]: task.recurrence_message += '<li>%s</li>' % date.strftime(date_format) if task.repeat_type == 'after' and task.repeat_number > 5 or task.repeat_type == 'forever' or len(recurring_dates) > 5: task.recurrence_message += '<li>...</li>' task.recurrence_message += '</ul>' if task.repeat_type == 'until': task.recurrence_message += _('<p><em>Number of tasks: %(tasks_count)s</em></p>') % {'tasks_count': len(recurring_dates)} def _is_recurrence_valid(self): self.ensure_one() return self.repeat_interval > 0 and\ (not self.repeat_show_dow or self._get_weekdays()) and\ (self.repeat_type != 'after' or self.repeat_number) and\ (self.repeat_type != 'until' or self.repeat_until and self.repeat_until > fields.Date.today()) @api.depends('recurrence_id') def _compute_recurring_count(self): self.recurring_count = 0 recurring_tasks = self.filtered(lambda l: l.recurrence_id) count = self.env['project.task'].read_group([('recurrence_id', 'in', recurring_tasks.recurrence_id.ids)], ['id'], 'recurrence_id') tasks_count = {c.get('recurrence_id')[0]: c.get('recurrence_id_count') for c in count} for task in recurring_tasks: task.recurring_count = tasks_count.get(task.recurrence_id.id, 0) @api.depends('partner_id.email') def _compute_partner_email(self): for task in self: if task.partner_id and task.partner_id.email != task.partner_email: task.partner_email = task.partner_id.email def _inverse_partner_email(self): for task in self: if task.partner_id and task.partner_email != task.partner_id.email: task.partner_id.email = task.partner_email @api.depends('partner_id.phone') def _compute_partner_phone(self): for task in self: if task.partner_id and task.partner_phone != task.partner_id.phone: task.partner_phone = task.partner_id.phone def _inverse_partner_phone(self): for task in self: if task.partner_id and task.partner_phone != task.partner_id.phone: task.partner_id.phone = task.partner_phone @api.depends('partner_email', 'partner_phone', 'partner_id') def _compute_ribbon_message(self): for task in self: will_write_email = task.partner_id and task.partner_email != task.partner_id.email will_write_phone = task.partner_id and task.partner_phone != task.partner_id.phone if will_write_email and will_write_phone: task.ribbon_message = _('By saving this change, the customer email and phone number will also be updated.') elif will_write_email: task.ribbon_message = _('By saving this change, the customer email will also be updated.') elif will_write_phone: task.ribbon_message = _('By saving this change, the customer phone number will also be updated.') else: task.ribbon_message = False @api.constrains('parent_id') def _check_parent_id(self): if not self._check_recursion(): raise ValidationError(_('Error! You cannot create recursive hierarchy of tasks.')) @api.constrains('allowed_user_ids') def _check_no_portal_allowed(self): for task in self.filtered(lambda t: t.project_id.privacy_visibility != 'portal'): portal_users = task.allowed_user_ids.filtered('share') if portal_users: user_names = ', '.join(portal_users[:10].mapped('name')) raise ValidationError(_("The project visibility setting doesn't allow portal users to see the project's tasks. (%s)", user_names)) def _compute_attachment_ids(self): for task in self: attachment_ids = self.env['ir.attachment'].search([('res_id', '=', task.id), ('res_model', '=', 'project.task')]).ids message_attachment_ids = task.mapped('message_ids.attachment_ids').ids # from mail_thread task.attachment_ids = [(6, 0, list(set(attachment_ids) - set(message_attachment_ids)))] @api.depends('project_id.allowed_user_ids', 'project_id.privacy_visibility') def _compute_allowed_user_ids(self): for task in self: portal_users = task.allowed_user_ids.filtered('share') internal_users = task.allowed_user_ids - portal_users if task.project_id.privacy_visibility == 'followers': task.allowed_user_ids |= task.project_id.allowed_internal_user_ids task.allowed_user_ids -= portal_users elif task.project_id.privacy_visibility == 'portal': task.allowed_user_ids |= task.project_id.allowed_portal_user_ids if task.project_id.privacy_visibility != 'portal': task.allowed_user_ids -= portal_users elif task.project_id.privacy_visibility != 'followers': task.allowed_user_ids -= internal_users @api.depends('create_date', 'date_end', 'date_assign') def _compute_elapsed(self): task_linked_to_calendar = self.filtered( lambda task: task.project_id.resource_calendar_id and task.create_date ) for task in task_linked_to_calendar: dt_create_date = fields.Datetime.from_string(task.create_date) if task.date_assign: dt_date_assign = fields.Datetime.from_string(task.date_assign) duration_data = task.project_id.resource_calendar_id.get_work_duration_data(dt_create_date, dt_date_assign, compute_leaves=True) task.working_hours_open = duration_data['hours'] task.working_days_open = duration_data['days'] else: task.working_hours_open = 0.0 task.working_days_open = 0.0 if task.date_end: dt_date_end = fields.Datetime.from_string(task.date_end) duration_data = task.project_id.resource_calendar_id.get_work_duration_data(dt_create_date, dt_date_end, compute_leaves=True) task.working_hours_close = duration_data['hours'] task.working_days_close = duration_data['days'] else: task.working_hours_close = 0.0 task.working_days_close = 0.0 (self - task_linked_to_calendar).update(dict.fromkeys( ['working_hours_open', 'working_hours_close', 'working_days_open', 'working_days_close'], 0.0)) @api.depends('stage_id', 'kanban_state') def _compute_kanban_state_label(self): for task in self: if task.kanban_state == 'normal': task.kanban_state_label = task.legend_normal elif task.kanban_state == 'blocked': task.kanban_state_label = task.legend_blocked else: task.kanban_state_label = task.legend_done def _compute_access_url(self): super(Task, self)._compute_access_url() for task in self: task.access_url = '/my/task/%s' % task.id def _compute_access_warning(self): super(Task, self)._compute_access_warning() for task in self.filtered(lambda x: x.project_id.privacy_visibility != 'portal'): task.access_warning = _( "The task cannot be shared with the recipient(s) because the privacy of the project is too restricted. Set the privacy of the project to 'Visible by following customers' in order to make it accessible by the recipient(s).") @api.depends('child_ids.planned_hours') def _compute_subtask_planned_hours(self): for task in self: task.subtask_planned_hours = sum(child_task.planned_hours + child_task.subtask_planned_hours for child_task in task.child_ids) @api.depends('child_ids') def _compute_subtask_count(self): for task in self: task.subtask_count = len(task._get_all_subtasks()) @api.onchange('company_id') def _onchange_task_company(self): if self.project_id.company_id != self.company_id: self.project_id = False @api.depends('project_id.company_id') def _compute_company_id(self): for task in self.filtered(lambda task: task.project_id): task.company_id = task.project_id.company_id @api.depends('project_id') def _compute_stage_id(self): for task in self: if task.project_id: if task.project_id not in task.stage_id.project_ids: task.stage_id = task.stage_find(task.project_id.id, [ ('fold', '=', False), ('is_closed', '=', False)]) else: task.stage_id = False @api.returns('self', lambda value: value.id) def copy(self, default=None): if default is None: default = {} if not default.get('name'): default['name'] = _("%s (copy)", self.name) if self.recurrence_id: default['recurrence_id'] = self.recurrence_id.copy().id return super(Task, self).copy(default) @api.constrains('parent_id') def _check_parent_id(self): for task in self: if not task._check_recursion(): raise ValidationError(_('Error! You cannot create recursive hierarchy of task(s).')) @api.model def get_empty_list_help(self, help): tname = _("task") project_id = self.env.context.get('default_project_id', False) if project_id: name = self.env['project.project'].browse(project_id).label_tasks if name: tname = name.lower() self = self.with_context( empty_list_help_id=self.env.context.get('default_project_id'), empty_list_help_model='project.project', empty_list_help_document_name=tname, ) return super(Task, self).get_empty_list_help(help) def message_subscribe(self, partner_ids=None, channel_ids=None, subtype_ids=None): """ Add the users subscribed to allowed portal users """ res = super(Task, self).message_subscribe(partner_ids=partner_ids, channel_ids=channel_ids, subtype_ids=subtype_ids) if partner_ids: new_allowed_users = self.env['res.partner'].browse(partner_ids).user_ids.filtered('share') tasks = self.filtered(lambda task: task.project_id.privacy_visibility == 'portal') tasks.sudo().write({'allowed_user_ids': [(4, user.id) for user in new_allowed_users]}) return res # ---------------------------------------- # Case management # ---------------------------------------- def stage_find(self, section_id, domain=[], order='sequence'): """ Override of the base.stage method Parameter of the stage search taken from the lead: - section_id: if set, stages must belong to this section or be a default stage; if not set, stages must be default stages """ # collect all section_ids section_ids = [] if section_id: section_ids.append(section_id) section_ids.extend(self.mapped('project_id').ids) search_domain = [] if section_ids: search_domain = [('|')] * (len(section_ids) - 1) for section_id in section_ids: search_domain.append(('project_ids', '=', section_id)) search_domain += list(domain) # perform search, return the first found return self.env['project.task.type'].search(search_domain, order=order, limit=1).id # ------------------------------------------------ # CRUD overrides # ------------------------------------------------ @api.model def default_get(self, default_fields): vals = super(Task, self).default_get(default_fields) days = list(DAYS.keys()) week_start = fields.Datetime.today().weekday() if all(d in default_fields for d in days): vals[days[week_start]] = True if 'repeat_day' in default_fields: vals['repeat_day'] = str(fields.Datetime.today().day) if 'repeat_month' in default_fields: vals['repeat_month'] = self._fields.get('repeat_month').selection[fields.Datetime.today().month - 1][0] if 'repeat_until' in default_fields: vals['repeat_until'] = fields.Date.today() + timedelta(days=7) if 'repeat_weekday' in default_fields: vals['repeat_weekday'] = self._fields.get('repeat_weekday').selection[week_start][0] return vals @api.model_create_multi def create(self, vals_list): default_stage = dict() for vals in vals_list: project_id = vals.get('project_id') or self.env.context.get('default_project_id') if project_id and not "company_id" in vals: vals["company_id"] = self.env["project.project"].browse( project_id ).company_id.id or self.env.company.id if project_id and "stage_id" not in vals: # 1) Allows keeping the batch creation of tasks # 2) Ensure the defaults are correct (and computed once by project), # by using default get (instead of _get_default_stage_id or _stage_find), if project_id not in default_stage: default_stage[project_id] = self.with_context( default_project_id=project_id ).default_get(['stage_id']).get('stage_id') vals["stage_id"] = default_stage[project_id] # user_id change: update date_assign if vals.get('user_id'): vals['date_assign'] = fields.Datetime.now() # Stage change: Update date_end if folded stage and date_last_stage_update if vals.get('stage_id'): vals.update(self.update_date_end(vals['stage_id'])) vals['date_last_stage_update'] = fields.Datetime.now() # recurrence rec_fields = vals.keys() & self._get_recurrence_fields() if rec_fields and vals.get('recurring_task') is True: rec_values = {rec_field: vals[rec_field] for rec_field in rec_fields} rec_values['next_recurrence_date'] = fields.Datetime.today() recurrence = self.env['project.task.recurrence'].create(rec_values) vals['recurrence_id'] = recurrence.id tasks = super().create(vals_list) for task in tasks: if task.project_id.privacy_visibility == 'portal': task._portal_ensure_token() return tasks def write(self, vals): now = fields.Datetime.now() if 'parent_id' in vals and vals['parent_id'] in self.ids: raise UserError(_("Sorry. You can't set a task as its parent task.")) if 'active' in vals and not vals.get('active') and any(self.mapped('recurrence_id')): # TODO: show a dialog to stop the recurrence raise UserError(_('You cannot archive recurring tasks. Please, disable the recurrence first.')) # stage change: update date_last_stage_update if 'stage_id' in vals: vals.update(self.update_date_end(vals['stage_id'])) vals['date_last_stage_update'] = now # reset kanban state when changing stage if 'kanban_state' not in vals: vals['kanban_state'] = 'normal' # user_id change: update date_assign if vals.get('user_id') and 'date_assign' not in vals: vals['date_assign'] = now # recurrence fields rec_fields = vals.keys() & self._get_recurrence_fields() if rec_fields: rec_values = {rec_field: vals[rec_field] for rec_field in rec_fields} for task in self: if task.recurrence_id: task.recurrence_id.write(rec_values) elif vals.get('recurring_task'): rec_values['next_recurrence_date'] = fields.Datetime.today() recurrence = self.env['project.task.recurrence'].create(rec_values) task.recurrence_id = recurrence.id if 'recurring_task' in vals and not vals.get('recurring_task'): self.recurrence_id.unlink() tasks = self recurrence_update = vals.pop('recurrence_update', 'this') if recurrence_update != 'this': recurrence_domain = [] if recurrence_update == 'subsequent': for task in self: recurrence_domain = OR([recurrence_domain, ['&', ('recurrence_id', '=', task.recurrence_id.id), ('create_date', '>=', task.create_date)]]) else: recurrence_domain = [('recurrence_id', 'in', self.recurrence_id.ids)] tasks |= self.env['project.task'].search(recurrence_domain) result = super(Task, tasks).write(vals) # rating on stage if 'stage_id' in vals and vals.get('stage_id'): self.filtered(lambda x: x.project_id.rating_active and x.project_id.rating_status == 'stage')._send_task_rating_mail(force_send=True) return result def update_date_end(self, stage_id): project_task_type = self.env['project.task.type'].browse(stage_id) if project_task_type.fold or project_task_type.is_closed: return {'date_end': fields.Datetime.now()} return {'date_end': False} def unlink(self): if any(self.mapped('recurrence_id')): # TODO: show a dialog to stop the recurrence raise UserError(_('You cannot delete recurring tasks. Please, disable the recurrence first.')) return super().unlink() # --------------------------------------------------- # Subtasks # --------------------------------------------------- @api.depends('parent_id.partner_id', 'project_id.partner_id') def _compute_partner_id(self): """ If a task has no partner_id, use the project partner_id if any, or else the parent task partner_id. Once the task partner_id has been set: 1) if the project partner_id changes, the task partner_id is automatically changed also. 2) if the parent task partner_id changes, the task partner_id remains the same. """ for task in self: if task.partner_id: if task.project_id.partner_id: task.partner_id = task.project_id.partner_id else: task.partner_id = task.project_id.partner_id or task.parent_id.partner_id @api.depends('partner_id.email', 'parent_id.email_from') def _compute_email_from(self): for task in self: task.email_from = task.partner_id.email or ((task.partner_id or task.parent_id) and task.email_from) or task.parent_id.email_from @api.depends('parent_id.project_id.subtask_project_id') def _compute_project_id(self): for task in self: if not task.project_id: task.project_id = task.parent_id.project_id.subtask_project_id # --------------------------------------------------- # Mail gateway # --------------------------------------------------- def _track_template(self, changes): res = super(Task, self)._track_template(changes) test_task = self[0] if 'stage_id' in changes and test_task.stage_id.mail_template_id: res['stage_id'] = (test_task.stage_id.mail_template_id, { 'auto_delete_message': True, 'subtype_id': self.env['ir.model.data'].xmlid_to_res_id('mail.mt_note'), 'email_layout_xmlid': 'mail.mail_notification_light' }) return res def _creation_subtype(self): return self.env.ref('project.mt_task_new') def _track_subtype(self, init_values): self.ensure_one() if 'kanban_state_label' in init_values and self.kanban_state == 'blocked': return self.env.ref('project.mt_task_blocked') elif 'kanban_state_label' in init_values and self.kanban_state == 'done': return self.env.ref('project.mt_task_ready') elif 'stage_id' in init_values: return self.env.ref('project.mt_task_stage') return super(Task, self)._track_subtype(init_values) def _notify_get_groups(self, msg_vals=None): """ Handle project users and managers recipients that can assign tasks and create new one directly from notification emails. Also give access button to portal users and portal customers. If they are notified they should probably have access to the document. """ groups = super(Task, self)._notify_get_groups(msg_vals=msg_vals) local_msg_vals = dict(msg_vals or {}) self.ensure_one() project_user_group_id = self.env.ref('project.group_project_user').id group_func = lambda pdata: pdata['type'] == 'user' and project_user_group_id in pdata['groups'] if self.project_id.privacy_visibility == 'followers': allowed_user_ids = self.project_id.allowed_internal_user_ids.partner_id.ids group_func = lambda pdata: pdata['type'] == 'user' and project_user_group_id in pdata['groups'] and pdata['id'] in allowed_user_ids new_group = ('group_project_user', group_func, {}) if not self.user_id and not self.stage_id.fold: take_action = self._notify_get_action_link('assign', **local_msg_vals) project_actions = [{'url': take_action, 'title': _('I take it')}] new_group[2]['actions'] = project_actions groups = [new_group] + groups if self.project_id.privacy_visibility == 'portal': allowed_user_ids = self.project_id.allowed_portal_user_ids.partner_id.ids groups.insert(0, ( 'allowed_portal_users', lambda pdata: pdata['type'] == 'portal' and pdata['id'] in allowed_user_ids, {} )) portal_privacy = self.project_id.privacy_visibility == 'portal' for group_name, group_method, group_data in groups: if group_name in ('customer', 'user') or group_name == 'portal_customer' and not portal_privacy: group_data['has_button_access'] = False elif group_name == 'portal_customer' and portal_privacy: group_data['has_button_access'] = True return groups def _notify_get_reply_to(self, default=None, records=None, company=None, doc_names=None): """ Override to set alias of tasks to their project if any. """ aliases = self.sudo().mapped('project_id')._notify_get_reply_to(default=default, records=None, company=company, doc_names=None) res = {task.id: aliases.get(task.project_id.id) for task in self} leftover = self.filtered(lambda rec: not rec.project_id) if leftover: res.update(super(Task, leftover)._notify_get_reply_to(default=default, records=None, company=company, doc_names=doc_names)) return res def email_split(self, msg): email_list = tools.email_split((msg.get('to') or '') + ',' + (msg.get('cc') or '')) # check left-part is not already an alias aliases = self.mapped('project_id.alias_name') return [x for x in email_list if x.split('@')[0] not in aliases] @api.model def message_new(self, msg, custom_values=None): """ Overrides mail_thread message_new that is called by the mailgateway through message_process. This override updates the document according to the email. """ # remove default author when going through the mail gateway. Indeed we # do not want to explicitly set user_id to False; however we do not # want the gateway user to be responsible if no other responsible is # found. create_context = dict(self.env.context or {}) create_context['default_user_id'] = False if custom_values is None: custom_values = {} defaults = { 'name': msg.get('subject') or _("No Subject"), 'email_from': msg.get('from'), 'planned_hours': 0.0, 'partner_id': msg.get('author_id') } defaults.update(custom_values) task = super(Task, self.with_context(create_context)).message_new(msg, custom_values=defaults) email_list = task.email_split(msg) partner_ids = [p.id for p in self.env['mail.thread']._mail_find_partner_from_emails(email_list, records=task, force_create=False) if p] task.message_subscribe(partner_ids) return task def message_update(self, msg, update_vals=None): """ Override to update the task according to the email. """ email_list = self.email_split(msg) partner_ids = [p.id for p in self.env['mail.thread']._mail_find_partner_from_emails(email_list, records=self, force_create=False) if p] self.message_subscribe(partner_ids) return super(Task, self).message_update(msg, update_vals=update_vals) def _message_get_suggested_recipients(self): recipients = super(Task, self)._message_get_suggested_recipients() for task in self: if task.partner_id: reason = _('Customer Email') if task.partner_id.email else _('Customer') task._message_add_suggested_recipient(recipients, partner=task.partner_id, reason=reason) elif task.email_from: task._message_add_suggested_recipient(recipients, email=task.email_from, reason=_('Customer Email')) return recipients def _notify_email_header_dict(self): headers = super(Task, self)._notify_email_header_dict() if self.project_id: current_objects = [h for h in headers.get('X-Odoo-Objects', '').split(',') if h] current_objects.insert(0, 'project.project-%s, ' % self.project_id.id) headers['X-Odoo-Objects'] = ','.join(current_objects) if self.tag_ids: headers['X-Odoo-Tags'] = ','.join(self.tag_ids.mapped('name')) return headers def _message_post_after_hook(self, message, msg_vals): if message.attachment_ids and not self.displayed_image_id: image_attachments = message.attachment_ids.filtered(lambda a: a.mimetype == 'image') if image_attachments: self.displayed_image_id = image_attachments[0] if self.email_from and not self.partner_id: # we consider that posting a message with a specified recipient (not a follower, a specific one) # on a document without customer means that it was created through the chatter using # suggested recipients. This heuristic allows to avoid ugly hacks in JS. new_partner = message.partner_ids.filtered(lambda partner: partner.email == self.email_from) if new_partner: self.search([ ('partner_id', '=', False), ('email_from', '=', new_partner.email), ('stage_id.fold', '=', False)]).write({'partner_id': new_partner.id}) return super(Task, self)._message_post_after_hook(message, msg_vals) def action_assign_to_me(self): self.write({'user_id': self.env.user.id}) # If depth == 1, return only direct children # If depth == 3, return children to third generation # If depth <= 0, return all children without depth limit def _get_all_subtasks(self, depth=0): children = self.mapped('child_ids').filtered(lambda children: children.active) if not children: return self.env['project.task'] if depth == 1: return children return children + children._get_all_subtasks(depth - 1) def action_open_parent_task(self): return { 'name': _('Parent Task'), 'view_mode': 'form', 'res_model': 'project.task', 'res_id': self.parent_id.id, 'type': 'ir.actions.act_window', 'context': dict(self._context, create=False) } def action_subtask(self): action = self.env["ir.actions.actions"]._for_xml_id("project.project_task_action_sub_task") # display all subtasks of current task action['domain'] = [('id', 'child_of', self.id), ('id', '!=', self.id)] # update context, with all default values as 'quick_create' does not contains all field in its view if self._context.get('default_project_id'): default_project = self.env['project.project'].browse(self.env.context['default_project_id']) else: default_project = self.project_id.subtask_project_id or self.project_id ctx = dict(self.env.context) ctx = {k: v for k, v in ctx.items() if not k.startswith('search_default_')} ctx.update({ 'default_name': self.env.context.get('name', self.name) + ':', 'default_parent_id': self.id, # will give default subtask field in `default_get` 'default_company_id': default_project.company_id.id if default_project else self.env.company.id, }) action['context'] = ctx return action def action_recurring_tasks(self): return { 'name': 'Tasks in Recurrence', 'type': 'ir.actions.act_window', 'res_model': 'project.task', 'view_mode': 'tree,form', 'domain': [('recurrence_id', 'in', self.recurrence_id.ids)], } # --------------------------------------------------- # Rating business # --------------------------------------------------- def _send_task_rating_mail(self, force_send=False): for task in self: rating_template = task.stage_id.rating_template_id if rating_template: task.rating_send_request(rating_template, lang=task.partner_id.lang, force_send=force_send) def rating_get_partner_id(self): res = super(Task, self).rating_get_partner_id() if not res and self.project_id.partner_id: return self.project_id.partner_id return res def rating_apply(self, rate, token=None, feedback=None, subtype_xmlid=None): return super(Task, self).rating_apply(rate, token=token, feedback=feedback, subtype_xmlid="project.mt_task_rating") def _rating_get_parent_field_name(self): return 'project_id' class ProjectTags(models.Model): """ Tags of project's tasks """ _name = "project.tags" _description = "Project Tags" def _get_default_color(self): return randint(1, 11) name = fields.Char('Name', required=True) color = fields.Integer(string='Color', default=_get_default_color) _sql_constraints = [ ('name_uniq', 'unique (name)', "Tag name already exists!"), ]
51.550209
249
0.645577
73,451
0.993615
0
0
21,094
0.285351
0
0
22,570
0.305318
7c8849369fcbb1dad3eb48e7b50645532c6e90e9
1,670
py
Python
app/config.py
Maethorin/pivocram
f1709f5ee76d0280601efa87f3af8e89c2968f43
[ "MIT" ]
5
2016-04-02T15:07:03.000Z
2021-06-25T14:48:55.000Z
app/config.py
Maethorin/pivocram
f1709f5ee76d0280601efa87f3af8e89c2968f43
[ "MIT" ]
2
2016-04-28T20:14:04.000Z
2016-05-01T18:37:05.000Z
app/config.py
Maethorin/pivocram
f1709f5ee76d0280601efa87f3af8e89c2968f43
[ "MIT" ]
1
2018-07-27T10:52:04.000Z
2018-07-27T10:52:04.000Z
# -*- coding: utf-8 -*- """ Config File for enviroment variables """ import os from importlib import import_module class Config(object): """ Base class for all config variables """ DEBUG = False TESTING = False DEVELOPMENT = False CSRF_ENABLED = True SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL'] SECRET_KEY = os.environ['SECRET_KEY'] class ProductionConfig(Config): """ Production Config... this is the real thing """ DEBUG = False class StagingConfig(Config): """ Staging Config is for... staging things """ DEBUG = True class DevelopmentConfig(Config): """ Development Config... this is your home developer! """ DEVELOPMENT = True DEBUG = True class TestingConfig(Config): """ Test Config... You should be testing right now instead reading docs!!! """ TESTING = True KEY_ON_TEST = 'KEY ON TEST' class ConfigClassNotFound(Exception): """ Raises when the APP_SETTINGS environment variable have a value which does not point to an uninstantiable class. """ pass def get_config(): """ Get the Config Class instance defined in APP_SETTINGS environment variable :return The config class instance :rtype: Config """ config_imports = os.environ['APP_SETTINGS'].split('.') config_class_name = config_imports[-1] config_module = import_module('.'.join(config_imports[:-1])) config_class = getattr(config_module, config_class_name, None) if not config_class: raise ConfigClassNotFound('Unable to find a config class in {}'.format(os.environ['APP_SETTINGS'])) return config_class()
23.521127
115
0.671856
971
0.581437
0
0
0
0
0
0
769
0.460479
7c88b8dca0946deb62b53070c85ee8a8bd47974e
845
py
Python
initial_load.py
hongyuanChrisLi/RealEstateDBConvert
0fd04f5213ff3fd3548db3f322828bd80cf41791
[ "Apache-2.0" ]
null
null
null
initial_load.py
hongyuanChrisLi/RealEstateDBConvert
0fd04f5213ff3fd3548db3f322828bd80cf41791
[ "Apache-2.0" ]
null
null
null
initial_load.py
hongyuanChrisLi/RealEstateDBConvert
0fd04f5213ff3fd3548db3f322828bd80cf41791
[ "Apache-2.0" ]
null
null
null
from mysql_dao.select_dao import SelectDao as MysqlSelectDao from postgres_dao.ddl_dao import DdlDao from postgres_dao.dml_dao import DmlDao as PsqlDmlDao psql_ddl_dao = DdlDao() mysql_select_dao = MysqlSelectDao() psql_dml_dao = PsqlDmlDao() psql_ddl_dao.create_tables() county_data = mysql_select_dao.select_all_counties() psql_dml_dao.insert_county(county_data) city_data = mysql_select_dao.select_all_cities() psql_dml_dao.insert_city(city_data) zipcode_data = mysql_select_dao.select_all_zipcodes() psql_dml_dao.insert_zipcode(zipcode_data) data = mysql_select_dao.select_full_addr_month_rpt() psql_dml_dao.trunc_addr_month_rpt() psql_dml_dao.insert_addr_month_rpt(data) data = mysql_select_dao.select_full_mls_daily_rpt() psql_dml_dao.trunc_mls_rpt() psql_dml_dao.insert_mls_rpt(data) mysql_select_dao.close() psql_dml_dao.close()
28.166667
60
0.857988
0
0
0
0
0
0
0
0
0
0
7c88cdba00ccf459ff19909681f6bd97e0741c61
6,306
py
Python
pytests/docs/docs.py
ramalingam-cb/testrunner
81cea7a5a493cf0c67fca7f97c667cd3c6ad2142
[ "Apache-2.0" ]
null
null
null
pytests/docs/docs.py
ramalingam-cb/testrunner
81cea7a5a493cf0c67fca7f97c667cd3c6ad2142
[ "Apache-2.0" ]
null
null
null
pytests/docs/docs.py
ramalingam-cb/testrunner
81cea7a5a493cf0c67fca7f97c667cd3c6ad2142
[ "Apache-2.0" ]
null
null
null
import time import logger from basetestcase import BaseTestCase from couchbase_helper.documentgenerator import DocumentGenerator from membase.api.rest_client import RestConnection from couchbase_helper.documentgenerator import BlobGenerator class DocsTests(BaseTestCase): def setUp(self): super(DocsTests, self).setUp() def tearDown(self): super(DocsTests, self).tearDown() def test_docs_int_big_values(self): degree = self.input.param("degree", 53) error = self.input.param("error", False) number = 2**degree first = ['james', 'sharon'] template = '{{ "number": {0}, "first_name": "{1}" }}' gen_load = DocumentGenerator('test_docs', template, [number,], first, start=0, end=self.num_items) self.log.info("create %s documents..." % (self.num_items)) try: self._load_all_buckets(self.master, gen_load, "create", 0) self._verify_stats_all_buckets([self.master]) except Exception as e: if error: self.log.info("Unable to create documents as expected: %s" % str(e)) else: raise e else: if error: self.fail("Able to create documents with value: %s" % str(number)) #docs.docs.DocsTests.test_load_memory,nodes_init=3,standard_buckets=3,memcached_buckets=1,replicas=2,quota_percent=75 """ 1) Configure a cluster with 4 Couchbase Buckets and 1 Memcached Buckets. 2) Total memory quota allocated for Couchbase should be approx. 75% (12G) of total RAM. 3) Load initial data on all buckets upto 60% of each memory quota 4) Pick one bucket and do the following (5) to (8) 5) Insert new items upto high_wat_mark (75% of memory quota) 6) Expire/Delete/update random items (ratio of expiration vs delete ~= 8:2) 7) Repeat (6) until "ep_total_del_items" is ~= (3 X # of items being loaded in (3)) 8) Expire 90% of remaining items 9) Insert new items or update existing items across buckets 10) See if we can run into "Hard out of Memory" error (UI) """ def test_load_memory(self): num_items = self.quota * 1024 * 0.6 / self.value_size num_items = num_items / len(self.buckets) self.log.info("Load initial data on all buckets upto 60% of each memory quota") gen_load = BlobGenerator('mike', 'mike-', self.value_size, start=0, end=num_items) self._load_all_buckets(self.master, gen_load, "create", 0) self.log.info("Insert new items upto high_wat_mark (75% of memory quota)") for bucket in self.buckets: if bucket.type != 'memcached': bucket_to_load = bucket break new_num_items = self.quota * 1024 * 0.15 / self.value_size gen_load = BlobGenerator('mike', 'mike-', self.value_size, start=num_items, end=new_num_items + num_items) load = self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_load, bucket_to_load.kvs[1], 'create', compression=self.sdk_compression) load.result() end_time = time.time() + 60*60*3 while time.time() < end_time: self.log.info("check memUsed") rest = RestConnection(self.master) for bucket in rest.get_buckets(): self.log.info("*****************************\ bucket %s: memUsed %s\ ****************************" % (bucket.name, bucket.stats.memUsed)) self.log.info("Expire/Delete/update random items (ratio \ of expiration vs delete ~= 8:2)") current_num = 0 wait_task = self.cluster.async_wait_for_stats(self.servers[:self.nodes_init], bucket_to_load, 'all', 'ep_total_del_items', '==', num_items * 3) while wait_task.state != "FINISHED": gen_update = BlobGenerator('mike', 'mike-', self.value_size, start=current_num, end=current_num + 5000) gen_expire = BlobGenerator('mike', 'mike-', self.value_size, start=current_num + 5000, end=current_num + 6600) gen_delete = BlobGenerator('mike', 'mike-', self.value_size, start=current_num + 6600, end=current_num + 7000) tasks = [] tasks.append(self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_update, bucket_to_load.kvs[1], 'update', compression=self.sdk_compression)) tasks.append(self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_expire, bucket_to_load.kvs[1], 'update', exp=1, compression=self.sdk_compression)) tasks.append(self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_delete, bucket_to_load.kvs[1], 'delete', compression=self.sdk_compression)) for task in tasks: task.result() current_num += 7000 self.log.info("Expire 90% of remaining items") remain_keys, _ = bucket_to_load.kvs[1].key_set() last_key_to_expire = remain_keys[0.9 * len(remain_keys)][4:] gen_expire = BlobGenerator('mike', 'mike-', self.value_size, start=0, end=last_key_to_expire) load = self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_expire, bucket_to_load.kvs[1], 'update', exp=1, compression=self.sdk_compression) load.result() self.log.info("Insert new items or update existing items across buckets") gen_load = BlobGenerator('mike', 'mike-', self.value_size, start=new_num_items + num_items, end=new_num_items * 2 + num_items) self._load_all_buckets(self.master, gen_load, "create", 0)
55.80531
121
0.579607
6,063
0.961465
0
0
0
0
0
0
1,695
0.268792
7c898d721c85859465a77ce43f10791adda1d063
1,890
py
Python
lichthi.py
truongaxin123/lichthidtu
77ba75974769ab1fdd1281b6088a1734dc0a3a83
[ "MIT" ]
null
null
null
lichthi.py
truongaxin123/lichthidtu
77ba75974769ab1fdd1281b6088a1734dc0a3a83
[ "MIT" ]
null
null
null
lichthi.py
truongaxin123/lichthidtu
77ba75974769ab1fdd1281b6088a1734dc0a3a83
[ "MIT" ]
null
null
null
from bs4 import BeautifulSoup import requests from urllib.request import urlretrieve ROOT = 'http://pdaotao.duytan.edu.vn' def get_url_sub(sub, id_, page): all_td_tag = [] for i in range(1, page+1): print('http://pdaotao.duytan.edu.vn/EXAM_LIST/?page={}&lang=VN'.format(i)) r = requests.get('http://pdaotao.duytan.edu.vn/EXAM_LIST/?page={}&lang=VN'.format(i)) soup = BeautifulSoup(r.text, 'lxml') list_td_tag = soup.find_all('td', attrs={'style': 'padding-top:10px'}) all_td_tag = all_td_tag + list_td_tag for td_tag in all_td_tag: if (((sub+id_) in str(td_tag.a.contents[0])) or ((sub+' '+id_) in str(td_tag.a.contents[0])) or ((sub+'_'+id_) in str(td_tag.a.contents[0]))): print('\nComplete!!!') print(' '.join(str(td_tag.a.string).split())) print(str(td_tag.a['href']).replace('..', ROOT)) return str(td_tag.a['href']).replace('..', ROOT) def get_excel_url(url): r = requests.get(url) soup = BeautifulSoup(r.text,'lxml') list_span_tags = soup.find_all('span',class_='txt_l4') excel_url = list_span_tags[1].a['href'].replace('..',ROOT) return excel_url # a = get_excel_url('http://pdaotao.duytan.edu.vn/EXAM_LIST_Detail/?ID=52289&lang=VN') def main(): sub = input('Nhap ten mon: ') id_ = input('Nhap id mon: ') url = get_url_sub(sub,id_,4) if url == None: print('Khong tim thay mon nao nhu nay ({} {}) ca :('.format(sub, id_)) return else: print('get excel URL!!!') excel_url = get_excel_url(url) excel_url = excel_url.replace(' ','%20') print('Download excel file!!!') save_at = 'C:/Users/truon/Desktop/' filename = save_at + excel_url.split('/')[-1].replace('%20',' ') urlretrieve(excel_url,filename) print('Done!') main()
35
93
0.595767
0
0
0
0
0
0
0
0
509
0.269312
7c8a2cc8e8cd0ae17cdb81c0889eb3b2e10339c2
10,998
py
Python
appengine/uploader/main.py
isabella232/feedloader
c0417480804d406a83d1aedcb7e7d719058fdbfd
[ "Apache-2.0" ]
5
2021-02-15T12:49:12.000Z
2022-01-12T06:28:41.000Z
appengine/uploader/main.py
google/feedloader
f6a25569bc3d7d4ee326961fd3b01e45fc3858e4
[ "Apache-2.0" ]
1
2021-06-18T15:30:16.000Z
2021-06-18T15:30:16.000Z
appengine/uploader/main.py
isabella232/feedloader
c0417480804d406a83d1aedcb7e7d719058fdbfd
[ "Apache-2.0" ]
4
2021-02-16T17:28:00.000Z
2021-06-18T15:27:52.000Z
# coding=utf-8 # Copyright 2021 Google LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Uploader module that handles batch jobs sent from Task Queue. This module receives batch jobs from TaskQueue. For each job, the module loads data from BigQuery and sends it to Merchant Center. """ import http import json import logging import socket from typing import List, Tuple import flask from google.cloud import bigquery from google.cloud import logging as cloud_logging from googleapiclient import errors import batch_creator import bigquery_client import constants import content_api_client import result_recorder import shoptimizer_client from models import failure from models import process_result from models import upload_task app = flask.Flask(__name__) _logging_client = cloud_logging.Client() _logging_client.setup_logging(log_level=logging.DEBUG) _SHOPTIMIZER_CONFIG_FILE_PATH = 'config/shoptimizer_config.json' OPERATION_TO_METHOD = { constants.Operation.UPSERT: constants.Method.INSERT, constants.Operation.DELETE: constants.Method.DELETE, constants.Operation.PREVENT_EXPIRING: constants.Method.INSERT } # Used to check if this is the last retry for alerting purposes. # Should match task_retry_limit in appengine/initiator/queue.yaml. TASK_RETRY_LIMIT = 5 @app.route('/insert_items', methods=['POST']) def run_insert_process() -> Tuple[str, http.HTTPStatus]: """Handles uploading tasks pushed from Task Queue.""" return _run_process(constants.Operation.UPSERT) @app.route('/delete_items', methods=['POST']) def run_delete_process() -> Tuple[str, http.HTTPStatus]: """Handles deleting tasks pushed from Task Queue.""" return _run_process(constants.Operation.DELETE) @app.route('/prevent_expiring_items', methods=['POST']) def run_prevent_expiring_process() -> Tuple[str, http.HTTPStatus]: """Handles prevent expiring tasks pushed from Task Queue.""" return _run_process(constants.Operation.PREVENT_EXPIRING) def _run_process(operation: constants.Operation) -> Tuple[str, http.HTTPStatus]: """Handles tasks pushed from Task Queue. When tasks are enqueued to Task Queue by initiator, this method will be called. It extracts necessary information from a Task Queue message. The following processes are executed in this function: - Loading items to process from BigQuery. - Converts items into a batch that can be sent to Content API for Shopping. - Sending items to Content API for Shopping (Merchant Center). - Records the results of the Content API for Shopping call. Args: operation: Type of operation to perform on the items. Returns: The result of HTTP request. """ request_body = json.loads(flask.request.data.decode('utf-8')) task = upload_task.UploadTask.from_json(request_body) if task.batch_size == 0: return 'OK', http.HTTPStatus.OK batch_number = int(task.start_index / task.batch_size) + 1 logging.info( '%s started. Batch #%d info: start_index: %d, batch_size: %d,' 'initiation timestamp: %s', operation.value, batch_number, task.start_index, task.batch_size, task.timestamp) try: items = _load_items_from_bigquery(operation, task) except errors.HttpError: return 'Error loading items from BigQuery', http.HTTPStatus.INTERNAL_SERVER_ERROR result = process_result.ProcessResult([], [], []) try: if not items: logging.error( 'Batch #%d, operation %s: 0 items loaded from BigQuery so batch not sent to Content API. Start_index: %d, batch_size: %d,' 'initiation timestamp: %s', batch_number, operation.value, task.start_index, task.batch_size, task.timestamp) return 'No items to process', http.HTTPStatus.OK method = OPERATION_TO_METHOD.get(operation) # Creates batch from items loaded from BigQuery original_batch, skipped_item_ids, batch_id_to_item_id = batch_creator.create_batch( batch_number, items, method) # Optimizes batch via Shoptimizer for upsert/prevent_expiring operations if operation != constants.Operation.DELETE and constants.SHOPTIMIZER_API_INTEGRATION_ON: batch_to_send_to_content_api = _create_optimized_batch( original_batch, batch_number, operation) else: batch_to_send_to_content_api = original_batch # Sends batch of items to Content API for Shopping api_client = content_api_client.ContentApiClient() successful_item_ids, item_failures = api_client.process_items( batch_to_send_to_content_api, batch_number, batch_id_to_item_id, method) result = process_result.ProcessResult( successfully_processed_item_ids=successful_item_ids, content_api_failures=item_failures, skipped_item_ids=skipped_item_ids) except errors.HttpError as http_error: error_status_code = http_error.resp.status error_reason = http_error.resp.reason result = _handle_content_api_error(error_status_code, error_reason, batch_number, http_error, items, operation, task) return error_reason, error_status_code except socket.timeout as timeout_error: error_status_code = http.HTTPStatus.REQUEST_TIMEOUT error_reason = 'Socket timeout' result = _handle_content_api_error(error_status_code, error_reason, batch_number, timeout_error, items, operation, task) return error_reason, error_status_code else: logging.info( 'Batch #%d with operation %s and initiation timestamp %s successfully processed %s items, failed to process %s items and skipped %s items.', batch_number, operation.value, task.timestamp, result.get_success_count(), result.get_failure_count(), result.get_skipped_count()) finally: recorder = result_recorder.ResultRecorder.from_service_account_json( constants.GCP_SERVICE_ACCOUNT_PATH, constants.DATASET_ID_FOR_MONITORING, constants.TABLE_ID_FOR_RESULT_COUNTS_MONITORING, constants.TABLE_ID_FOR_ITEM_RESULTS_MONITORING) recorder.insert_result(operation.value, result, task.timestamp, batch_number) return 'OK', http.HTTPStatus.OK def _load_items_from_bigquery( operation: constants.Operation, task: upload_task.UploadTask) -> List[bigquery.Row]: """Loads items from BigQuery. Args: operation: The operation to be performed on this batch of items. task: The Cloud Task object that initiated this request. Returns: The list of items loaded from BigQuery. """ table_id = f'process_items_to_{operation.value}_{task.timestamp}' bq_client = bigquery_client.BigQueryClient.from_service_account_json( constants.GCP_SERVICE_ACCOUNT_PATH, constants.DATASET_ID_FOR_PROCESSING, table_id) try: items_iterator = bq_client.load_items(task.start_index, task.batch_size) except errors.HttpError as http_error: logging.exception( 'Error loading items from %s.%s. HTTP status: %s. Error: %s', constants.DATASET_ID_FOR_PROCESSING, table_id, http_error.resp.status, http_error.resp.reason) raise return list(items_iterator) def _create_optimized_batch(batch: constants.Batch, batch_number: int, operation: constants.Operation) -> constants.Batch: """Creates an optimized batch by calling the Shoptimizer API. Args: batch: The batch of product data to be optimized. batch_number: The number that identifies this batch. operation: The operation to be performed on this batch (upsert, delete, prevent_expiring). Returns: The batch returned from the Shoptimizer API Client. """ try: optimization_client = shoptimizer_client.ShoptimizerClient( batch_number, operation) except (OSError, ValueError): return batch return optimization_client.shoptimize(batch) def _handle_content_api_error( error_status_code: int, error_reason: str, batch_num: int, error: Exception, item_rows: List[bigquery.Row], operation: constants.Operation, task: upload_task.UploadTask) -> process_result.ProcessResult: """Logs network related errors returned from Content API and returns a list of item failures. Args: error_status_code: HTTP status code from Content API. error_reason: The reason for the error. batch_num: The batch number. error: The error thrown by Content API. item_rows: The items being processed in this batch. operation: The operation to be performed on this batch of items. task: The Cloud Task object that initiated this request. Returns: The list of items that failed due to the error, wrapped in a process_result. """ logging.warning( 'Batch #%d with operation %s and initiation timestamp %s failed. HTTP status: %s. Error: %s', batch_num, operation.value, task.timestamp, error_status_code, error_reason) # If the batch API call received an HttpError, mark every id as failed. item_failures = [ failure.Failure(str(item_row.get('item_id', 'Missing ID')), error_reason) for item_row in item_rows ] api_result = process_result.ProcessResult([], item_failures, []) if content_api_client.suggest_retry( error_status_code) and _get_execution_attempt() < TASK_RETRY_LIMIT: logging.warning( 'Batch #%d with operation %s and initiation timestamp %s will be requeued for retry', batch_num, operation.value, task.timestamp) else: logging.error( 'Batch #%d with operation %s and initiation timestamp %s failed and will not be retried. Error: %s', batch_num, operation.value, task.timestamp, error) return api_result def _get_execution_attempt() -> int: """Returns the number of times this task has previously been executed. If the execution count header does not exist, it means the request did not come from Cloud Tasks. In this case, there will be no retry, so set execution attempt to the retry limit. Returns: int, the number of times this task has previously been executed. """ execution_attempt = flask.request.headers.get( 'X-AppEngine-TaskExecutionCount', '') if execution_attempt: return int(execution_attempt) else: return TASK_RETRY_LIMIT if __name__ == '__main__': # This is used when running locally. Gunicorn is used to run the # application on Google App Engine. See entrypoint in app.yaml. app.run(host='127.0.0.1', port=8080, debug=True)
38.055363
148
0.738498
0
0
0
0
660
0.060011
0
0
4,582
0.416621
7c8a6aee7b7a77f1d1c85df07a12dedc044587d5
17,730
py
Python
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/transforms.py
BadDevCode/lumberyard
3d688932f919dbf5821f0cb8a210ce24abe39e9e
[ "AML" ]
1,738
2017-09-21T10:59:12.000Z
2022-03-31T21:05:46.000Z
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/transforms.py
olivier-be/lumberyard
3d688932f919dbf5821f0cb8a210ce24abe39e9e
[ "AML" ]
427
2017-09-29T22:54:36.000Z
2022-02-15T19:26:50.000Z
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/transforms.py
olivier-be/lumberyard
3d688932f919dbf5821f0cb8a210ce24abe39e9e
[ "AML" ]
671
2017-09-21T08:04:01.000Z
2022-03-29T14:30:07.000Z
""" Implement transformation on Numba IR """ from __future__ import absolute_import, print_function from collections import namedtuple, defaultdict import logging from numba.analysis import compute_cfg_from_blocks, find_top_level_loops from numba import ir, errors, ir_utils from numba.analysis import compute_use_defs _logger = logging.getLogger(__name__) def _extract_loop_lifting_candidates(cfg, blocks): """ Returns a list of loops that are candidate for loop lifting """ # check well-formed-ness of the loop def same_exit_point(loop): "all exits must point to the same location" outedges = set() for k in loop.exits: succs = set(x for x, _ in cfg.successors(k)) if not succs: # If the exit point has no successor, it contains an return # statement, which is not handled by the looplifting code. # Thus, this loop is not a candidate. _logger.debug("return-statement in loop.") return False outedges |= succs ok = len(outedges) == 1 _logger.debug("same_exit_point=%s (%s)", ok, outedges) return ok def one_entry(loop): "there is one entry" ok = len(loop.entries) == 1 _logger.debug("one_entry=%s", ok) return ok def cannot_yield(loop): "cannot have yield inside the loop" insiders = set(loop.body) | set(loop.entries) | set(loop.exits) for blk in map(blocks.__getitem__, insiders): for inst in blk.body: if isinstance(inst, ir.Assign): if isinstance(inst.value, ir.Yield): _logger.debug("has yield") return False _logger.debug("no yield") return True _logger.info('finding looplift candidates') # the check for cfg.entry_point in the loop.entries is to prevent a bad # rewrite where a prelude for a lifted loop would get written into block -1 # if a loop entry were in block 0 candidates = [] for loop in find_top_level_loops(cfg): _logger.debug("top-level loop: %s", loop) if (same_exit_point(loop) and one_entry(loop) and cannot_yield(loop) and cfg.entry_point() not in loop.entries): candidates.append(loop) _logger.debug("add candidate: %s", loop) return candidates def find_region_inout_vars(blocks, livemap, callfrom, returnto, body_block_ids): """Find input and output variables to a block region. """ inputs = livemap[callfrom] outputs = livemap[returnto] # ensure live variables are actually used in the blocks, else remove, # saves having to create something valid to run through postproc # to achieve similar loopblocks = {} for k in body_block_ids: loopblocks[k] = blocks[k] used_vars = set() def_vars = set() defs = compute_use_defs(loopblocks) for vs in defs.usemap.values(): used_vars |= vs for vs in defs.defmap.values(): def_vars |= vs used_or_defined = used_vars | def_vars # note: sorted for stable ordering inputs = sorted(set(inputs) & used_or_defined) outputs = sorted(set(outputs) & used_or_defined & def_vars) return inputs, outputs _loop_lift_info = namedtuple('loop_lift_info', 'loop,inputs,outputs,callfrom,returnto') def _loop_lift_get_candidate_infos(cfg, blocks, livemap): """ Returns information on looplifting candidates. """ loops = _extract_loop_lifting_candidates(cfg, blocks) loopinfos = [] for loop in loops: [callfrom] = loop.entries # requirement checked earlier an_exit = next(iter(loop.exits)) # anyone of the exit block if len(loop.exits) > 1: # Pre-Py3.8 may have multiple exits [(returnto, _)] = cfg.successors(an_exit) # requirement checked earlier else: # Post-Py3.8 DO NOT have multiple exits returnto = an_exit local_block_ids = set(loop.body) | set(loop.entries) inputs, outputs = find_region_inout_vars( blocks=blocks, livemap=livemap, callfrom=callfrom, returnto=returnto, body_block_ids=local_block_ids, ) lli = _loop_lift_info(loop=loop, inputs=inputs, outputs=outputs, callfrom=callfrom, returnto=returnto) loopinfos.append(lli) return loopinfos def _loop_lift_modify_call_block(liftedloop, block, inputs, outputs, returnto): """ Transform calling block from top-level function to call the lifted loop. """ scope = block.scope loc = block.loc blk = ir.Block(scope=scope, loc=loc) ir_utils.fill_block_with_call( newblock=blk, callee=liftedloop, label_next=returnto, inputs=inputs, outputs=outputs, ) return blk def _loop_lift_prepare_loop_func(loopinfo, blocks): """ Inplace transform loop blocks for use as lifted loop. """ entry_block = blocks[loopinfo.callfrom] scope = entry_block.scope loc = entry_block.loc # Lowering assumes the first block to be the one with the smallest offset firstblk = min(blocks) - 1 blocks[firstblk] = ir_utils.fill_callee_prologue( block=ir.Block(scope=scope, loc=loc), inputs=loopinfo.inputs, label_next=loopinfo.callfrom, ) blocks[loopinfo.returnto] = ir_utils.fill_callee_epilogue( block=ir.Block(scope=scope, loc=loc), outputs=loopinfo.outputs, ) def _loop_lift_modify_blocks(func_ir, loopinfo, blocks, typingctx, targetctx, flags, locals): """ Modify the block inplace to call to the lifted-loop. Returns a dictionary of blocks of the lifted-loop. """ from numba.dispatcher import LiftedLoop # Copy loop blocks loop = loopinfo.loop loopblockkeys = set(loop.body) | set(loop.entries) if len(loop.exits) > 1: # Pre-Py3.8 may have multiple exits loopblockkeys |= loop.exits loopblocks = dict((k, blocks[k].copy()) for k in loopblockkeys) # Modify the loop blocks _loop_lift_prepare_loop_func(loopinfo, loopblocks) # Create a new IR for the lifted loop lifted_ir = func_ir.derive(blocks=loopblocks, arg_names=tuple(loopinfo.inputs), arg_count=len(loopinfo.inputs), force_non_generator=True) liftedloop = LiftedLoop(lifted_ir, typingctx, targetctx, flags, locals) # modify for calling into liftedloop callblock = _loop_lift_modify_call_block(liftedloop, blocks[loopinfo.callfrom], loopinfo.inputs, loopinfo.outputs, loopinfo.returnto) # remove blocks for k in loopblockkeys: del blocks[k] # update main interpreter callsite into the liftedloop blocks[loopinfo.callfrom] = callblock return liftedloop def loop_lifting(func_ir, typingctx, targetctx, flags, locals): """ Loop lifting transformation. Given a interpreter `func_ir` returns a 2 tuple of `(toplevel_interp, [loop0_interp, loop1_interp, ....])` """ blocks = func_ir.blocks.copy() cfg = compute_cfg_from_blocks(blocks) loopinfos = _loop_lift_get_candidate_infos(cfg, blocks, func_ir.variable_lifetime.livemap) loops = [] if loopinfos: _logger.debug('loop lifting this IR with %d candidates:\n%s', len(loopinfos), func_ir.dump_to_string()) for loopinfo in loopinfos: lifted = _loop_lift_modify_blocks(func_ir, loopinfo, blocks, typingctx, targetctx, flags, locals) loops.append(lifted) # Make main IR main = func_ir.derive(blocks=blocks) return main, loops def canonicalize_cfg_single_backedge(blocks): """ Rewrite loops that have multiple backedges. """ cfg = compute_cfg_from_blocks(blocks) newblocks = blocks.copy() def new_block_id(): return max(newblocks.keys()) + 1 def has_multiple_backedges(loop): count = 0 for k in loop.body: blk = blocks[k] edges = blk.terminator.get_targets() # is a backedge? if loop.header in edges: count += 1 if count > 1: # early exit return True return False def yield_loops_with_multiple_backedges(): for lp in cfg.loops().values(): if has_multiple_backedges(lp): yield lp def replace_target(term, src, dst): def replace(target): return (dst if target == src else target) if isinstance(term, ir.Branch): return ir.Branch(cond=term.cond, truebr=replace(term.truebr), falsebr=replace(term.falsebr), loc=term.loc) elif isinstance(term, ir.Jump): return ir.Jump(target=replace(term.target), loc=term.loc) else: assert not term.get_targets() return term def rewrite_single_backedge(loop): """ Add new tail block that gathers all the backedges """ header = loop.header tailkey = new_block_id() for blkkey in loop.body: blk = newblocks[blkkey] if header in blk.terminator.get_targets(): newblk = blk.copy() # rewrite backedge into jumps to new tail block newblk.body[-1] = replace_target(blk.terminator, header, tailkey) newblocks[blkkey] = newblk # create new tail block entryblk = newblocks[header] tailblk = ir.Block(scope=entryblk.scope, loc=entryblk.loc) # add backedge tailblk.append(ir.Jump(target=header, loc=tailblk.loc)) newblocks[tailkey] = tailblk for loop in yield_loops_with_multiple_backedges(): rewrite_single_backedge(loop) return newblocks def canonicalize_cfg(blocks): """ Rewrite the given blocks to canonicalize the CFG. Returns a new dictionary of blocks. """ return canonicalize_cfg_single_backedge(blocks) def with_lifting(func_ir, typingctx, targetctx, flags, locals): """With-lifting transformation Rewrite the IR to extract all withs. Only the top-level withs are extracted. Returns the (the_new_ir, the_lifted_with_ir) """ from numba import postproc def dispatcher_factory(func_ir, objectmode=False, **kwargs): from numba.dispatcher import LiftedWith, ObjModeLiftedWith myflags = flags.copy() if objectmode: # Lifted with-block cannot looplift myflags.enable_looplift = False # Lifted with-block uses object mode myflags.enable_pyobject = True myflags.force_pyobject = True myflags.no_cpython_wrapper = False cls = ObjModeLiftedWith else: cls = LiftedWith return cls(func_ir, typingctx, targetctx, myflags, locals, **kwargs) postproc.PostProcessor(func_ir).run() # ensure we have variable lifetime assert func_ir.variable_lifetime vlt = func_ir.variable_lifetime blocks = func_ir.blocks.copy() # find where with-contexts regions are withs = find_setupwiths(blocks) cfg = vlt.cfg _legalize_withs_cfg(withs, cfg, blocks) # For each with-regions, mutate them according to # the kind of contextmanager sub_irs = [] for (blk_start, blk_end) in withs: body_blocks = [] for node in _cfg_nodes_in_region(cfg, blk_start, blk_end): body_blocks.append(node) _legalize_with_head(blocks[blk_start]) # Find the contextmanager cmkind, extra = _get_with_contextmanager(func_ir, blocks, blk_start) # Mutate the body and get new IR sub = cmkind.mutate_with_body(func_ir, blocks, blk_start, blk_end, body_blocks, dispatcher_factory, extra) sub_irs.append(sub) if not sub_irs: # Unchanged new_ir = func_ir else: new_ir = func_ir.derive(blocks) return new_ir, sub_irs def _get_with_contextmanager(func_ir, blocks, blk_start): """Get the global object used for the context manager """ _illegal_cm_msg = "Illegal use of context-manager." def get_var_dfn(var): """Get the definition given a variable""" return func_ir.get_definition(var) def get_ctxmgr_obj(var_ref): """Return the context-manager object and extra info. The extra contains the arguments if the context-manager is used as a call. """ # If the contextmanager used as a Call dfn = func_ir.get_definition(var_ref) if isinstance(dfn, ir.Expr) and dfn.op == 'call': args = [get_var_dfn(x) for x in dfn.args] kws = {k: get_var_dfn(v) for k, v in dfn.kws} extra = {'args': args, 'kwargs': kws} var_ref = dfn.func else: extra = None ctxobj = ir_utils.guard(ir_utils.find_global_value, func_ir, var_ref) # check the contextmanager object if ctxobj is ir.UNDEFINED: raise errors.CompilerError( "Undefined variable used as context manager", loc=blocks[blk_start].loc, ) if ctxobj is None: raise errors.CompilerError(_illegal_cm_msg, loc=dfn.loc) return ctxobj, extra # Scan the start of the with-region for the contextmanager for stmt in blocks[blk_start].body: if isinstance(stmt, ir.EnterWith): var_ref = stmt.contextmanager ctxobj, extra = get_ctxmgr_obj(var_ref) if not hasattr(ctxobj, 'mutate_with_body'): raise errors.CompilerError( "Unsupported context manager in use", loc=blocks[blk_start].loc, ) return ctxobj, extra # No contextmanager found? raise errors.CompilerError( "malformed with-context usage", loc=blocks[blk_start].loc, ) def _legalize_with_head(blk): """Given *blk*, the head block of the with-context, check that it doesn't do anything else. """ counters = defaultdict(int) for stmt in blk.body: counters[type(stmt)] += 1 if counters.pop(ir.EnterWith) != 1: raise errors.CompilerError( "with's head-block must have exactly 1 ENTER_WITH", loc=blk.loc, ) if counters.pop(ir.Jump) != 1: raise errors.CompilerError( "with's head-block must have exactly 1 JUMP", loc=blk.loc, ) # Can have any number of del counters.pop(ir.Del, None) # There MUST NOT be any other statements if counters: raise errors.CompilerError( "illegal statements in with's head-block", loc=blk.loc, ) def _cfg_nodes_in_region(cfg, region_begin, region_end): """Find the set of CFG nodes that are in the given region """ region_nodes = set() stack = [region_begin] while stack: tos = stack.pop() succs, _ = zip(*cfg.successors(tos)) nodes = set([node for node in succs if node not in region_nodes and node != region_end]) stack.extend(nodes) region_nodes |= nodes return region_nodes def _legalize_withs_cfg(withs, cfg, blocks): """Verify the CFG of the with-context(s). """ doms = cfg.dominators() postdoms = cfg.post_dominators() # Verify that the with-context has no side-exits for s, e in withs: loc = blocks[s].loc if s not in doms[e]: # Not sure what condition can trigger this error. msg = "Entry of with-context not dominating the exit." raise errors.CompilerError(msg, loc=loc) if e not in postdoms[s]: msg = ( "Does not support with-context that contain branches " "(i.e. break/return/raise) that can leave the with-context. " "Details: exit of with-context not post-dominating the entry. " ) raise errors.CompilerError(msg, loc=loc) def find_setupwiths(blocks): """Find all top-level with. Returns a list of ranges for the with-regions. """ def find_ranges(blocks): for blk in blocks.values(): for ew in blk.find_insts(ir.EnterWith): yield ew.begin, ew.end def previously_occurred(start, known_ranges): for a, b in known_ranges: if s >= a and s < b: return True return False known_ranges = [] for s, e in sorted(find_ranges(blocks)): if not previously_occurred(s, known_ranges): if e not in blocks: # this's possible if there's an exit path in the with-block raise errors.CompilerError( 'unsupported controlflow due to return/raise ' 'statements inside with block' ) assert s in blocks, 'starting offset is not a label' known_ranges.append((s, e)) return known_ranges
33.579545
84
0.60846
0
0
3,279
0.184941
0
0
0
0
4,457
0.251382
7c8a815c2ee01b343fc690c138951a4c479fece7
6,453
py
Python
tests/test_masked_inference_wsi_dataset.py
HabibMrad/MONAI
1314701c15623422574b0153d746666dc6004454
[ "Apache-2.0" ]
1
2022-01-04T21:38:23.000Z
2022-01-04T21:38:23.000Z
tests/test_masked_inference_wsi_dataset.py
HabibMrad/MONAI
1314701c15623422574b0153d746666dc6004454
[ "Apache-2.0" ]
null
null
null
tests/test_masked_inference_wsi_dataset.py
HabibMrad/MONAI
1314701c15623422574b0153d746666dc6004454
[ "Apache-2.0" ]
null
null
null
import os import unittest from unittest import skipUnless import numpy as np from numpy.testing import assert_array_equal from parameterized import parameterized from monai.apps.pathology.datasets import MaskedInferenceWSIDataset from monai.apps.utils import download_url from monai.utils import optional_import from tests.utils import skip_if_quick _, has_cim = optional_import("cucim") _, has_osl = optional_import("openslide") FILE_URL = "http://openslide.cs.cmu.edu/download/openslide-testdata/Generic-TIFF/CMU-1.tiff" FILE_PATH = os.path.join(os.path.dirname(__file__), "testing_data", os.path.basename(FILE_URL)) MASK1 = os.path.join(os.path.dirname(__file__), "testing_data", "tissue_mask1.npy") MASK2 = os.path.join(os.path.dirname(__file__), "testing_data", "tissue_mask2.npy") MASK4 = os.path.join(os.path.dirname(__file__), "testing_data", "tissue_mask4.npy") HEIGHT = 32914 WIDTH = 46000 def prepare_data(): mask = np.zeros((WIDTH // 2, HEIGHT // 2)) mask[100, 100] = 1 np.save(MASK1, mask) mask[100, 100:102] = 1 np.save(MASK2, mask) mask[100:102, 100:102] = 1 np.save(MASK4, mask) TEST_CASE_0 = [ { "data": [ {"image": FILE_PATH, "mask": MASK1}, ], "patch_size": 1, "image_reader_name": "cuCIM", }, [ { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), "name": "CMU-1", "mask_location": [100, 100], }, ], ] TEST_CASE_1 = [ { "data": [{"image": FILE_PATH, "mask": MASK2}], "patch_size": 1, "image_reader_name": "cuCIM", }, [ { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), "name": "CMU-1", "mask_location": [100, 100], }, { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), "name": "CMU-1", "mask_location": [101, 100], }, ], ] TEST_CASE_2 = [ { "data": [{"image": FILE_PATH, "mask": MASK4}], "patch_size": 1, "image_reader_name": "cuCIM", }, [ { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), "name": "CMU-1", "mask_location": [100, 100], }, { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), "name": "CMU-1", "mask_location": [100, 101], }, { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), "name": "CMU-1", "mask_location": [101, 100], }, { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), "name": "CMU-1", "mask_location": [101, 101], }, ], ] TEST_CASE_3 = [ { "data": [ {"image": FILE_PATH, "mask": MASK1}, ], "patch_size": 2, "image_reader_name": "cuCIM", }, [ { "image": np.array( [ [[243, 243], [243, 243]], [[243, 243], [243, 243]], [[243, 243], [243, 243]], ], dtype=np.uint8, ), "name": "CMU-1", "mask_location": [100, 100], }, ], ] TEST_CASE_4 = [ { "data": [ {"image": FILE_PATH, "mask": MASK1}, {"image": FILE_PATH, "mask": MASK2}, ], "patch_size": 1, "image_reader_name": "cuCIM", }, [ { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), "name": "CMU-1", "mask_location": [100, 100], }, { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), "name": "CMU-1", "mask_location": [100, 100], }, { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), "name": "CMU-1", "mask_location": [101, 100], }, ], ] TEST_CASE_OPENSLIDE_0 = [ { "data": [ {"image": FILE_PATH, "mask": MASK1}, ], "patch_size": 1, "image_reader_name": "OpenSlide", }, [ { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), "name": "CMU-1", "mask_location": [100, 100], }, ], ] TEST_CASE_OPENSLIDE_1 = [ { "data": [{"image": FILE_PATH, "mask": MASK2}], "patch_size": 1, "image_reader_name": "OpenSlide", }, [ { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), "name": "CMU-1", "mask_location": [100, 100], }, { "image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8), "name": "CMU-1", "mask_location": [101, 100], }, ], ] class TestMaskedInferenceWSIDataset(unittest.TestCase): def setUp(self): prepare_data() download_url(FILE_URL, FILE_PATH, "5a3cfd4fd725c50578ddb80b517b759f") @parameterized.expand( [ TEST_CASE_0, TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, ] ) @skipUnless(has_cim, "Requires CuCIM") @skip_if_quick def test_read_patches_cucim(self, input_parameters, expected): dataset = MaskedInferenceWSIDataset(**input_parameters) self.compare_samples_expected(dataset, expected) @parameterized.expand( [ TEST_CASE_OPENSLIDE_0, TEST_CASE_OPENSLIDE_1, ] ) @skipUnless(has_osl, "Requires OpenSlide") @skip_if_quick def test_read_patches_openslide(self, input_parameters, expected): dataset = MaskedInferenceWSIDataset(**input_parameters) self.compare_samples_expected(dataset, expected) def compare_samples_expected(self, dataset, expected): for i in range(len(dataset)): self.assertTupleEqual(dataset[i][0]["image"].shape, expected[i]["image"].shape) self.assertIsNone(assert_array_equal(dataset[i][0]["image"], expected[i]["image"])) self.assertEqual(dataset[i][0]["name"], expected[i]["name"]) self.assertListEqual(dataset[i][0]["mask_location"], expected[i]["mask_location"]) if __name__ == "__main__": unittest.main()
27
95
0.501937
1,442
0.223462
0
0
799
0.123818
0
0
1,269
0.196653
7c8d38953001878c9a523157e3f09b0df0983623
913
py
Python
manga_py/providers/doujins_com.py
paulolimac/manga-py
3d180846750a4e770b5024eb8cd15629362875b1
[ "MIT" ]
1
2020-11-19T00:40:49.000Z
2020-11-19T00:40:49.000Z
manga_py/providers/doujins_com.py
paulolimac/manga-py
3d180846750a4e770b5024eb8cd15629362875b1
[ "MIT" ]
null
null
null
manga_py/providers/doujins_com.py
paulolimac/manga-py
3d180846750a4e770b5024eb8cd15629362875b1
[ "MIT" ]
null
null
null
from manga_py.provider import Provider from .helpers.std import Std class DoujinsCom(Provider, Std): img_selector = '#image-container img.doujin' def get_archive_name(self) -> str: return 'archive' def get_chapter_index(self) -> str: return '0' def get_main_content(self): return self._get_content('{}/gallery/{}') def get_manga_name(self) -> str: return self._get_name('/gallery/([^/]+)') def get_chapters(self): return [b''] def get_files(self): items = self.document_fromstring(self.content, self.img_selector) return [i.get('data-file').replace('&amp;', '&') for i in items] def get_cover(self) -> str: return self._cover_from_content(self.img_selector) def book_meta(self) -> dict: # todo meta pass def chapter_for_json(self): return self.get_url() main = DoujinsCom
23.410256
73
0.634173
822
0.900329
0
0
0
0
0
0
109
0.119387
7c8e9965cc893f149c68d0938c7cdd288fb5e3a7
980
py
Python
src/urh/ui/delegates/CheckBoxDelegate.py
awesome-archive/urh
c8c3aabc9d637ca660d8c72c3d8372055e0f3ec7
[ "Apache-2.0" ]
1
2017-06-21T02:37:16.000Z
2017-06-21T02:37:16.000Z
src/urh/ui/delegates/CheckBoxDelegate.py
dspmandavid/urh
30643c1a68634b1c97eb9989485a4e96a3b038ae
[ "Apache-2.0" ]
null
null
null
src/urh/ui/delegates/CheckBoxDelegate.py
dspmandavid/urh
30643c1a68634b1c97eb9989485a4e96a3b038ae
[ "Apache-2.0" ]
null
null
null
from PyQt5.QtCore import QModelIndex, QAbstractItemModel, Qt, pyqtSlot from PyQt5.QtWidgets import QItemDelegate, QWidget, QStyleOptionViewItem, QCheckBox class CheckBoxDelegate(QItemDelegate): def __init__(self, parent=None): super().__init__(parent) self.enabled = True def createEditor(self, parent: QWidget, option: QStyleOptionViewItem, index: QModelIndex): editor = QCheckBox(parent) editor.stateChanged.connect(self.stateChanged) return editor def setEditorData(self, editor: QCheckBox, index: QModelIndex): editor.blockSignals(True) editor.setChecked(index.model().data(index)) self.enabled = editor.isChecked() editor.blockSignals(False) def setModelData(self, editor: QCheckBox, model: QAbstractItemModel, index: QModelIndex): model.setData(index, editor.isChecked(), Qt.EditRole) @pyqtSlot() def stateChanged(self): self.commitData.emit(self.sender())
37.692308
94
0.715306
823
0.839796
0
0
83
0.084694
0
0
0
0
7c8eb61b685c469f781463c9f7be05e90e8308c7
1,408
py
Python
neural_network/backup_casestudy/denbigh/tf_RNN.py
acceleratedmaterials/AMDworkshop_demo
e7c2b931e023fc00ff7494b8acb2181f5c75bc4e
[ "MIT" ]
5
2019-04-02T03:20:43.000Z
2021-07-13T18:23:26.000Z
neural_network/backup_casestudy/denbigh/tf_RNN.py
NUS-SSE/AMDworkshop_demo
edbd6c60957dd0d83c3ef43c7e9e28ef1fef3bd9
[ "MIT" ]
null
null
null
neural_network/backup_casestudy/denbigh/tf_RNN.py
NUS-SSE/AMDworkshop_demo
edbd6c60957dd0d83c3ef43c7e9e28ef1fef3bd9
[ "MIT" ]
5
2019-05-12T17:41:58.000Z
2021-06-08T04:38:35.000Z
# -*- coding: utf-8 -*- ''' Framework: Tensorflow Training samples: 1600 Validation samples: 400 RNN with 128 units Optimizer: Adam Epoch: 100 Loss: Cross Entropy Activation function: Relu for network and Soft-max for regression Regularization: Drop-out, keep_prob = 0.8 Accuracy of Validation set: 95% ''' from __future__ import division, print_function, absolute_import import tflearn from tflearn.data_utils import to_categorical, pad_sequences from data_denbigh import * X, Y = getDenbighData() #Hyperparams neurons_num = 128 # Number of neurons in the RNN layer keep_prob = 0.5 # Keep probability for the drop-out regularization learning_rate = 0.001 # Learning rate for mini-batch SGD batch_size = 32 # Batch size n_epoch = 100 # Number of epoch #Data preprocessing/ Converting data to vector for the X = pad_sequences(X, maxlen=5, value=0.) Y = to_categorical(Y, 2) #Build the network net = tflearn.input_data([None, 5]) net = tflearn.embedding(net, input_dim=10000, output_dim=128) net = tflearn.simple_rnn(net, neurons_num, dropout=keep_prob) net = tflearn.fully_connected(net, 2, activation='softmax') net = tflearn.regression(net, optimizer='adam', learning_rate=learning_rate, loss='categorical_crossentropy') model = tflearn.DNN(net, tensorboard_verbose=0) model.fit(X, Y, validation_set=0.2, show_metric=True, batch_size=batch_size, n_epoch=n_epoch) model.save('./model.tfl')
37.052632
76
0.769176
0
0
0
0
0
0
0
0
593
0.421165
7c8eba30a07960e7e0f748300f8823eed9acd88c
5,569
py
Python
code/tests/test_tile_tf.py
Nocty-chan/cs224n-squad
0c0b342621e038aba8e20ff411da13dfa173351d
[ "Apache-2.0" ]
2
2018-04-15T06:13:41.000Z
2019-07-25T20:22:34.000Z
code/tests/test_tile_tf.py
Nocty-chan/cs224n-squad
0c0b342621e038aba8e20ff411da13dfa173351d
[ "Apache-2.0" ]
1
2020-11-10T04:51:36.000Z
2020-11-10T04:51:36.000Z
code/tests/test_tile_tf.py
Nocty-chan/cs224n-squad
0c0b342621e038aba8e20ff411da13dfa173351d
[ "Apache-2.0" ]
3
2018-08-08T08:48:04.000Z
2020-02-10T09:52:41.000Z
import numpy as np import tensorflow as tf H = 2 N = 2 M = 3 BS = 10 def my_softmax(arr): max_elements = np.reshape(np.max(arr, axis = 2), (BS, N, 1)) arr = arr - max_elements exp_array = np.exp(arr) print (exp_array) sum_array = np.reshape(np.sum(exp_array, axis=2), (BS, N, 1)) return exp_array /sum_array def masked_softmax(logits, mask, dim): """ Takes masked softmax over given dimension of logits. Inputs: logits: Numpy array. We want to take softmax over dimension dim. mask: Numpy array of same shape as logits. Has 1s where there's real data in logits, 0 where there's padding dim: int. dimension over which to take softmax Returns: masked_logits: Numpy array same shape as logits. This is the same as logits, but with 1e30 subtracted (i.e. very large negative number) in the padding locations. prob_dist: Numpy array same shape as logits. The result of taking softmax over masked_logits in given dimension. Should be 0 in padding locations. Should sum to 1 over given dimension. """ exp_mask = (1 - tf.cast(mask, 'float64')) * (-1e30) # -large where there's padding, 0 elsewhere print (exp_mask) masked_logits = tf.add(logits, exp_mask) # where there's padding, set logits to -large prob_dist = tf.nn.softmax(masked_logits, dim) return masked_logits, prob_dist def test_build_similarity(contexts, questions): w_sim_1 = tf.get_variable('w_sim_1', initializer=w_1) # 2 * H w_sim_2 = tf.get_variable('w_sim_2', initializer=w_2) # 2 * self.hidden_size w_sim_3 = tf.get_variable('w_sim_3', initializer=w_3) # 2 * self.hidden_size q_tile = tf.tile(tf.expand_dims(questions, 0), [N, 1, 1, 1]) # N x BS x M x 2H q_tile = tf.transpose(q_tile, (1, 0, 3, 2)) # BS x N x 2H x M contexts = tf.expand_dims(contexts, -1) # BS x N x 2H x 1 result = (contexts * q_tile) # BS x N x 2H x M tf.assert_equal(tf.shape(result), [BS, N, 2 * H, M]) result = tf.transpose(result, (0, 1, 3, 2)) # BS x N x M x 2H result = tf.reshape(result, (-1, N * M, 2 * H)) # BS x (NxM) x 2H tf.assert_equal(tf.shape(result), [BS, N*M, 2*H]) # w_sim_1 = tf.tile(tf.expand_dims(w_sim_1, 0), [BS, 1]) # w_sim_2 = tf.tile(tf.expand_dims(w_sim_2, 0), [BS, 1]) # w_sim_3 = tf.tile(tf.expand_dims(w_sim_3, 0), [BS, 1]) term1 = tf.matmul(tf.reshape(contexts, (BS * N, 2*H)), tf.expand_dims(w_sim_1, -1)) # BS x N term1 = tf.reshape(term1, (-1, N)) term2 = tf.matmul(tf.reshape(questions, (BS * M, 2*H)), tf.expand_dims(w_sim_2, -1)) # BS x M term2 = tf.reshape(term2, (-1, M)) term3 = tf.matmul(tf.reshape(result, (BS * N * M, 2* H)), tf.expand_dims(w_sim_3, -1)) term3 = tf.reshape(term3, (-1, N, M)) # BS x N x M S = tf.reshape(term1,(-1, N, 1)) + term3 + tf.reshape(term2, (-1, 1, M)) return S def test_build_sim_mask(): context_mask = np.array([True, True]) # BS x N question_mask = np.array([True, True, False]) # BS x M context_mask = np.tile(context_mask, [BS, 1]) question_mask = np.tile(question_mask, [BS, 1]) context_mask = tf.get_variable('context_mask', initializer=context_mask) question_mask = tf.get_variable('question_mask', initializer=question_mask) context_mask = tf.expand_dims(context_mask, -1) # BS x N x 1 question_mask = tf.expand_dims(question_mask, -1) # BS x M x 1 question_mask = tf.transpose(question_mask, (0, 2, 1)) # BS x 1 x M sim_mask = tf.matmul(tf.cast(context_mask, dtype=tf.int32), tf.cast(question_mask, dtype=tf.int32)) # BS x N x M return sim_mask def test_build_c2q(S, S_mask, questions): _, alpha = masked_softmax(S, mask, 2) # BS x N x M return tf.matmul(alpha, questions) def test_build_q2c(S, S_mask, contexts): # S = BS x N x M # contexts = BS x N x 2H m = tf.reduce_max(S * tf.cast(S_mask, dtype=tf.float64), axis=2) # BS x N beta = tf.expand_dims(tf.nn.softmax(m), -1) # BS x N x 1 beta = tf.transpose(beta, (0, 2, 1)) q2c = tf.matmul(beta, contexts) return m, beta, q2c def test_concatenation(c2q, q2c): q2c = tf.tile(q2c, (1, N, 1)) output = tf.concat([c2q, q2c], axis=2) tf.assert_equal(tf.shape(output), [BS, N, 4*H]) return output if __name__== "__main__": w_1 = np.array([1., 2., 3., 4.]) w_2 = np.array([5., 6., 7., 8.]) w_3 = np.array([13., 12., 11., 10.]) c = np.array([[[1., 2., 3., 4.], [5., 6., 7., 8.]]]) # BS x N x 2H q = np.array([[[1., 2., 3., 0.], [5., 6., 7., 4.], [8., 9. , 10., 11.]]]) # BS x M x 2H c = np.tile(c, [BS, 1, 1]) q = np.tile(q, [BS, 1, 1]) questions = tf.get_variable('questions', initializer=q) contexts = tf.get_variable('contexts', initializer=c) S = test_build_similarity(contexts, questions) mask = test_build_sim_mask() c2q = test_build_c2q(S, mask, questions) m, beta, q2c = test_build_q2c(S, mask, contexts) output = test_concatenation(c2q, q2c) init = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) S_result, mask_result, c2q_r = sess.run([S, mask, c2q]) actual_result = np.tile(np.array([[228, 772, 1372], [548, 1828, 3140]]), [BS, 1, 1]) assert np.array_equal(actual_result, S_result), 'Arrays are not equal' print ("Building similarity matrix is successful!") print ("Context 2 Question attention") m_r, beta_r, q2c_r = sess.run([m, beta, q2c]) output_r = sess.run(output)
41.87218
99
0.625606
0
0
0
0
0
0
0
0
1,532
0.275094
7c8edd5a1cedfd0895ce2bb9c6148ce0241c7af7
7,174
py
Python
specutils/tests/test_smoothing.py
hamogu/specutils
b873f2ac9b3c207c9e670246d102f46a9606d6ed
[ "BSD-3-Clause" ]
null
null
null
specutils/tests/test_smoothing.py
hamogu/specutils
b873f2ac9b3c207c9e670246d102f46a9606d6ed
[ "BSD-3-Clause" ]
null
null
null
specutils/tests/test_smoothing.py
hamogu/specutils
b873f2ac9b3c207c9e670246d102f46a9606d6ed
[ "BSD-3-Clause" ]
null
null
null
import numpy as np import pytest from astropy import convolution from scipy.signal import medfilt import astropy.units as u from ..spectra.spectrum1d import Spectrum1D from ..tests.spectral_examples import simulated_spectra from ..manipulation.smoothing import (convolution_smooth, box_smooth, gaussian_smooth, trapezoid_smooth, median_smooth) def compare_flux(flux_smooth1, flux_smooth2, flux_original, rtol=0.01): """ There are two things to compare for each set of smoothing: 1. Compare the smoothed flux from the astropy machinery vs the smoothed flux from specutils. This is done by comparing flux_smooth1 and flux_smooth2. 2. Next we want to compare the smoothed flux to the original flux. This is a little more difficult as smoothing will make a difference for median filter, but less so for convolution based smoothing if the kernel is normalized (area under the kernel = 1). In this second case the rtol (relative tolerance) is used judiciously. """ # Compare, element by element, the two smoothed fluxes. assert np.allclose(flux_smooth1, flux_smooth2) # Compare the total spectral flux of the smoothed to the original. assert np.allclose(sum(flux_smooth1), sum(flux_original), rtol=rtol) def test_smooth_custom_kernel(simulated_spectra): """ Test CustomKernel smoothing with correct parmaeters. """ # Create the original spectrum spec1 = simulated_spectra.s1_um_mJy_e1 flux_original = spec1.flux # Create a custom kernel (some weird asymmetric-ness) numpy_kernel = np.array([0.5, 1, 2, 0.5, 0.2]) numpy_kernel = numpy_kernel / np.sum(numpy_kernel) custom_kernel = convolution.CustomKernel(numpy_kernel) flux_smoothed_astropy = convolution.convolve(flux_original, custom_kernel) # Calculate the custom smoothed spec1_smoothed = convolution_smooth(spec1, custom_kernel) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value) @pytest.mark.parametrize("width", [1, 2.3]) def test_smooth_box_good(simulated_spectra, width): """ Test Box1DKernel smoothing with correct parmaeters. Width values need to be a number greater than 0. """ # Create the original spectrum spec1 = simulated_spectra.s1_um_mJy_e1 flux_original = spec1.flux # Calculate the smoothed flux using Astropy box_kernel = convolution.Box1DKernel(width) flux_smoothed_astropy = convolution.convolve(flux_original, box_kernel) # Calculate the box smoothed spec1_smoothed = box_smooth(spec1, width) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value) # Check the input and output units assert spec1.wavelength.unit == spec1_smoothed.wavelength.unit assert spec1.flux.unit == spec1_smoothed.flux.unit @pytest.mark.parametrize("width", [-1, 0, 'a']) def test_smooth_box_bad(simulated_spectra, width): """ Test Box1DKernel smoothing with incorrect parmaeters. Width values need to be a number greater than 0. """ # Create the spectrum spec1 = simulated_spectra.s1_um_mJy_e1 # Test bad input parameters with pytest.raises(ValueError): box_smooth(spec1, width) @pytest.mark.parametrize("stddev", [1, 2.3]) def test_smooth_gaussian_good(simulated_spectra, stddev): """ Test Gaussian1DKernel smoothing with correct parmaeters. Standard deviation values need to be a number greater than 0. """ # Create the spectrum spec1 = simulated_spectra.s1_um_mJy_e1 flux_original = spec1.flux # Calculate the smoothed flux using Astropy gaussian_kernel = convolution.Gaussian1DKernel(stddev) flux_smoothed_astropy = convolution.convolve(flux_original, gaussian_kernel) # Test gaussian smoothing spec1_smoothed = gaussian_smooth(spec1, stddev) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value, rtol=0.02) # Check the input and output units assert spec1.wavelength.unit == spec1_smoothed.wavelength.unit assert spec1.flux.unit == spec1_smoothed.flux.unit @pytest.mark.parametrize("stddev", [-1, 0, 'a']) def test_smooth_gaussian_bad(simulated_spectra, stddev): """ Test MexicanHat1DKernel smoothing with incorrect parmaeters. Standard deviation values need to be a number greater than 0. """ # Create the spectrum spec1 = simulated_spectra.s1_um_mJy_e1 # Test bad input paramters with pytest.raises(ValueError): gaussian_smooth(spec1, stddev) @pytest.mark.parametrize("stddev", [1, 2.3]) def test_smooth_trapezoid_good(simulated_spectra, stddev): """ Test Trapezoid1DKernel smoothing with correct parmaeters. Standard deviation values need to be a number greater than 0. """ # Create the spectrum spec1 = simulated_spectra.s1_um_mJy_e1 flux_original = spec1.flux # Create the flux_smoothed which is what we want to compare to trapezoid_kernel = convolution.Trapezoid1DKernel(stddev) flux_smoothed_astropy = convolution.convolve(flux_original, trapezoid_kernel) # Test trapezoid smoothing spec1_smoothed = trapezoid_smooth(spec1, stddev) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value) # Check the input and output units assert spec1.wavelength.unit == spec1_smoothed.wavelength.unit assert spec1.flux.unit == spec1_smoothed.flux.unit @pytest.mark.parametrize("stddev", [-1, 0, 'a']) def test_smooth_trapezoid_bad(simulated_spectra, stddev): """ Test Trapezoid1DKernel smoothing with incorrect parmaeters. Standard deviation values need to be a number greater than 0. """ # Create the spectrum spec1 = simulated_spectra.s1_um_mJy_e1 # Test bad parameters with pytest.raises(ValueError): trapezoid_smooth(spec1, stddev) @pytest.mark.parametrize("width", [1, 3, 9]) def test_smooth_median_good(simulated_spectra, width): """ Test Median smoothing with correct parmaeters. Width values need to be a number greater than 0. """ # Create the spectrum spec1 = simulated_spectra.s1_um_mJy_e1 flux_original = spec1.flux # Create the flux_smoothed which is what we want to compare to flux_smoothed_astropy = medfilt(flux_original, width) # Test median smoothing spec1_smoothed = median_smooth(spec1, width) compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value, rtol=0.15) # Check the input and output units assert spec1.wavelength.unit == spec1_smoothed.wavelength.unit assert spec1.flux.unit == spec1_smoothed.flux.unit @pytest.mark.parametrize("width", [-1, 0, 'a']) def test_smooth_median_bad(simulated_spectra, width): """ Test Median smoothing with incorrect parmaeters. Width values need to be a number greater than 0. """ # Create the spectrum spec1 = simulated_spectra.s1_um_mJy_e1 # Test bad parameters with pytest.raises(ValueError): median_smooth(spec1, width)
32.609091
98
0.730694
0
0
0
0
5,049
0.703791
0
0
2,764
0.38528
7c909452f19de7c50d60c569038b33d1b55f15c0
909
py
Python
modules/interpolator.py
buulikduong/1d_sgl_solver
03ce0b362d45acbbd3bb35e7b604ba97982eea92
[ "BSD-2-Clause" ]
null
null
null
modules/interpolator.py
buulikduong/1d_sgl_solver
03ce0b362d45acbbd3bb35e7b604ba97982eea92
[ "BSD-2-Clause" ]
null
null
null
modules/interpolator.py
buulikduong/1d_sgl_solver
03ce0b362d45acbbd3bb35e7b604ba97982eea92
[ "BSD-2-Clause" ]
2
2020-09-01T13:02:49.000Z
2021-08-15T09:10:17.000Z
"""Module interpolating mathematical functions out of support points""" from scipy.interpolate import interp1d, lagrange, CubicSpline def interpolator(x_sup, y_sup, method): """Interpolates a mathematical function from a given set of points using either linear, polynomial or cubic spline for the interpolation. Args: x_sup (list): x-coordinates of the function y_sup (list): y-coordinates of the function method (string): name of the interpolation method to be used Returns: intfunc: interpolated function """ if method == "linear": intfunc = interp1d(x_sup, y_sup, kind="linear") return intfunc elif method == "polynomial": intfunc = lagrange(x_sup, y_sup) return intfunc elif method == "cspline": intfunc = CubicSpline(x_sup, y_sup, bc_type="natural") return intfunc return None
29.322581
71
0.672167
0
0
0
0
0
0
0
0
507
0.557756
7c9109fd0312f441ea7db6be13582d7563d361c0
196
py
Python
frappe/patches/v13_0/remove_web_view.py
chentaoz/frappe
ee3c4943bf6177ad3b410cdb0d802af486751a65
[ "MIT" ]
3,755
2015-01-06T07:47:43.000Z
2022-03-31T20:54:23.000Z
frappe/patches/v13_0/remove_web_view.py
chentaoz/frappe
ee3c4943bf6177ad3b410cdb0d802af486751a65
[ "MIT" ]
7,369
2015-01-01T19:59:41.000Z
2022-03-31T23:02:05.000Z
frappe/patches/v13_0/remove_web_view.py
chentaoz/frappe
ee3c4943bf6177ad3b410cdb0d802af486751a65
[ "MIT" ]
2,685
2015-01-07T17:51:03.000Z
2022-03-31T23:16:24.000Z
import frappe def execute(): frappe.delete_doc_if_exists("DocType", "Web View") frappe.delete_doc_if_exists("DocType", "Web View Component") frappe.delete_doc_if_exists("DocType", "CSS Class")
32.666667
61
0.77551
0
0
0
0
0
0
0
0
68
0.346939
7c9227a3cbdbdfda32f8e1f7af19e23d5f84fca1
946
py
Python
games.py
cpratim/DSA-Research-Paper
ebb856ef62f8a04aa72380e39afdde958eed529a
[ "MIT" ]
null
null
null
games.py
cpratim/DSA-Research-Paper
ebb856ef62f8a04aa72380e39afdde958eed529a
[ "MIT" ]
null
null
null
games.py
cpratim/DSA-Research-Paper
ebb856ef62f8a04aa72380e39afdde958eed529a
[ "MIT" ]
null
null
null
import json import matplotlib.pyplot as plt from pprint import pprint import numpy as np from scipy.stats import linregress from util.stats import * with open('data/game_stats.json', 'r') as f: df = json.load(f) X, y = [], [] for match, stats in df.items(): home, away = stats['home'], stats['away'] if home['mp'] != away['mp'] != '240': continue try: ft_dif = float(home['fta']) - float(away['fta']) pt_dif = float(home['pts']) - float(away['pts']) if abs(pt_dif) > 10: continue except: continue X.append(ft_dif) y.append(pt_dif) c = 0 for f, p in zip(X, y): if f * p > 0: c += 1 print(c / len(X)) slope, intercept, r, p, std = linregress(X, y) f = lambda x: x*slope + intercept fit_y = [f(min(X)), f(max(X))] plt.xlabel('Free Throw Attempts') plt.ylabel('Point Differential') plt.title('FTA vs Point Differential') print(correlation(X, y)) plt.plot([min(X), max(X)], fit_y, color = 'red') plt.scatter(X, y) plt.show()
22
51
0.64482
0
0
0
0
0
0
0
0
143
0.151163
7c924b0af1eb750ce0d3f38bab21b79619b4ba48
6,255
py
Python
src/generate_data.py
gycggd/leaf-classification
b37dd4a6a262562c454038218c1472329e54128b
[ "MIT" ]
null
null
null
src/generate_data.py
gycggd/leaf-classification
b37dd4a6a262562c454038218c1472329e54128b
[ "MIT" ]
null
null
null
src/generate_data.py
gycggd/leaf-classification
b37dd4a6a262562c454038218c1472329e54128b
[ "MIT" ]
null
null
null
import os import numpy as np import pandas as pd import tensorflow as tf from keras.preprocessing.image import ImageDataGenerator from keras.preprocessing.image import img_to_array, load_img from keras.utils.np_utils import to_categorical from sklearn.model_selection import StratifiedShuffleSplit from sklearn.preprocessing import LabelEncoder, StandardScaler def load_numeric_training(standardize=True): data = pd.read_csv('../train.csv') ID = data.pop('id') y = data.pop('species') y = LabelEncoder().fit(y).transform(y) X = StandardScaler().fit(data).transform(data) if standardize else data.values return ID.values, X, y def load_numeric_test(standardize=True): data = pd.read_csv('../test.csv') ID = data.pop('id') test = StandardScaler().fit(data).transform(data) if standardize else data.values return ID.values, test def resize_img(img, max_dim=96): max_axis = np.argmax(img.size) scale = max_dim / img.size[max_axis] return img.resize((int(img.size[0] * scale), int(img.size[1] * scale))) def load_img_data(ids, max_dim=96, center=True): X = np.empty((len(ids), max_dim, max_dim, 1)) for i, id in enumerate(ids): img = load_img('../images/{}.jpg'.format(id), grayscale=True) img = resize_img(img, max_dim=max_dim) x = img_to_array(img) h, w = x.shape[:2] if center: h1 = (max_dim - h) >> 1 h2 = h1 + h w1 = (max_dim - w) >> 1 w2 = w1 + w else: h1, h2, w1, w2 = 0, h, 0, w X[i][h1:h2, w1:w2][:] = x return np.around(X / 255) def load_train_data(split=0.9, random_state=7): ID, X_num_train, y = load_numeric_training() X_img_train = load_img_data(ID) sss = StratifiedShuffleSplit(n_splits=1, train_size=split, test_size=1 - split, random_state=random_state) train_idx, val_idx = next(sss.split(X_num_train, y)) ID_tr, X_num_tr, X_img_tr, y_tr = ID[train_idx], X_num_train[train_idx], X_img_train[train_idx], y[train_idx] ID_val, X_num_val, X_img_val, y_val = ID[val_idx], X_num_train[val_idx], X_img_train[val_idx], y[val_idx] return (ID_tr, X_num_tr, X_img_tr, y_tr), (ID_val, X_num_val, X_img_val, y_val) def load_test_data(): ID, X_num_test = load_numeric_test() X_img_test = load_img_data(ID) return ID, X_num_test, X_img_test print('Loading train data ...') (ID_train, X_num_tr, X_img_tr, y_tr), (ID_val, X_num_val, X_img_val, y_val) = load_train_data() # Prepare ID-to-label and ID-to-numerical dictionary ID_y_dic, ID_num_dic = {}, {} for i in range(len(ID_train)): ID_y_dic[ID_train[i]] = y_tr[i] ID_num_dic[ID_train[i]] = X_num_tr[i, :] print('Loading test data ...') ID_test, X_num_test, X_img_test = load_test_data() # Convert label to categorical/one-hot ID_train, y_tr, y_val = to_categorical(ID_train), to_categorical(y_tr), to_categorical((y_val)) def _bytes_feature(value): return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def _int64_feature(value): return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) def _float32_feature(value): return tf.train.Feature(float_list=tf.train.FloatList(value=value)) def write_val_data(): val_data_path = '../tfrecords/val_data_1.tfrecords' if os.path.exists(val_data_path): print('Warning: old file exists, removed.') os.remove(val_data_path) val_image, val_num, val_label = X_img_val.astype(np.bool), X_num_val.astype(np.float64), y_val.astype(np.bool) print(val_image.shape, val_num.shape, val_label.shape) val_writer = tf.python_io.TFRecordWriter(val_data_path) print('Writing data into tfrecord ...') for i in range(len(val_image)): image, num, label = val_image[i], val_num[i], val_label[i] feature = {'image': _bytes_feature(image.tostring()), 'num': _bytes_feature(num.tostring()), 'label': _bytes_feature(label.tostring())} example = tf.train.Example(features=tf.train.Features(feature=feature)) val_writer.write(example.SerializeToString()) print('Done!') def write_train_data(): imgen = ImageDataGenerator(rotation_range=20, zoom_range=0.2, horizontal_flip=True, vertical_flip=True, fill_mode='nearest') imgen_train = imgen.flow(X_img_tr, ID_train, batch_size=32, seed=7) print('Generating augmented images') all_images = [] all_ID = [] p = True for i in range(28 * 200): print('Generating augmented images for epoch {}, batch {}'.format(i // 28, i % 28)) X, ID = imgen_train.next() all_images.append(X) all_ID.append(np.argmax(ID, axis=1)) all_images = np.concatenate(all_images).astype(np.bool) all_ID = np.concatenate(all_ID) all_y = np.zeros(all_ID.shape) all_nums = np.zeros((all_ID.shape[0], X_num_tr.shape[1])) for i in range(len(all_ID)): all_nums[i, :] = ID_num_dic[all_ID[i]] all_y[i] = ID_y_dic[all_ID[i]] all_y = to_categorical(all_y).astype(np.bool) print('Data shapes:') print('Image:', all_images.shape) print('Label:', all_y.shape) print('Numerical:', all_nums.shape) train_data_path = '../tfrecords/train_data_1.tfrecords' if os.path.exists(train_data_path): print('Warning: old file exists, removed.') os.remove(train_data_path) # compression = tf.python_io.TFRecordCompressionType.GZIP # train_writer = tf.python_io.TFRecordWriter(train_data_path, options=tf.python_io.TFRecordOptions(compression)) train_writer = tf.python_io.TFRecordWriter(train_data_path) print('Writing data into tfrecord ...') for i in range(len(all_images)): if i % 891 == 0: print('Writing {} th epoch data ...'.format(i // 891)) image, num, label = all_images[i], all_nums[i], all_y[i] feature = {'image': _bytes_feature(image.tostring()), 'num': _bytes_feature(num.tostring()), 'label': _bytes_feature(label.tostring())} example = tf.train.Example(features=tf.train.Features(feature=feature)) train_writer.write(example.SerializeToString()) print('Done!') write_val_data()
36.794118
116
0.672422
0
0
0
0
0
0
0
0
790
0.126299
7c9293b09122efb5181f7494471359a909feb339
201
py
Python
2650-construindo-muralhas.py
ErickSimoes/URI-Online-Judge
7e6f141db2647b1d0d69951b064bd95b0ce4ba1a
[ "MIT" ]
null
null
null
2650-construindo-muralhas.py
ErickSimoes/URI-Online-Judge
7e6f141db2647b1d0d69951b064bd95b0ce4ba1a
[ "MIT" ]
null
null
null
2650-construindo-muralhas.py
ErickSimoes/URI-Online-Judge
7e6f141db2647b1d0d69951b064bd95b0ce4ba1a
[ "MIT" ]
1
2019-10-29T16:51:29.000Z
2019-10-29T16:51:29.000Z
# -*- coding: utf-8 -*- n, w = map(int, input().split()) for _ in range(n): entrada = input() last_space = entrada.rfind(' ') if int(entrada[last_space:]) > w: print(entrada[:last_space])
20.1
36
0.59204
0
0
0
0
0
0
0
0
26
0.129353
7c938029fd9d5d4852f7e0ef36d2f9a92b855733
2,962
py
Python
tests/assemblers/test_ensemble.py
yarix/m2cgen
f1aa01e4c70a6d1a8893e27bfbe3c36fcb1e8546
[ "MIT" ]
1
2021-05-28T06:59:21.000Z
2021-05-28T06:59:21.000Z
tests/assemblers/test_ensemble.py
yarix/m2cgen
f1aa01e4c70a6d1a8893e27bfbe3c36fcb1e8546
[ "MIT" ]
null
null
null
tests/assemblers/test_ensemble.py
yarix/m2cgen
f1aa01e4c70a6d1a8893e27bfbe3c36fcb1e8546
[ "MIT" ]
null
null
null
from sklearn import ensemble from m2cgen import assemblers, ast from tests import utils def test_single_condition(): estimator = ensemble.RandomForestRegressor(n_estimators=2, random_state=1) estimator.fit([[1], [2]], [1, 2]) assembler = assemblers.RandomForestModelAssembler(estimator) actual = assembler.assemble() expected = ast.BinNumExpr( ast.BinNumExpr( ast.NumVal(1.0), ast.IfExpr( ast.CompExpr( ast.FeatureRef(0), ast.NumVal(1.5), ast.CompOpType.LTE), ast.NumVal(1.0), ast.NumVal(2.0)), ast.BinNumOpType.ADD), ast.NumVal(0.5), ast.BinNumOpType.MUL) assert utils.cmp_exprs(actual, expected) def test_two_conditions(): estimator = ensemble.RandomForestRegressor(n_estimators=2, random_state=13) estimator.fit([[1], [2], [3]], [1, 2, 3]) assembler = assemblers.RandomForestModelAssembler(estimator) actual = assembler.assemble() expected = ast.BinNumExpr( ast.BinNumExpr( ast.IfExpr( ast.CompExpr( ast.FeatureRef(0), ast.NumVal(1.5), ast.CompOpType.LTE), ast.NumVal(1.0), ast.NumVal(2.0)), ast.IfExpr( ast.CompExpr( ast.FeatureRef(0), ast.NumVal(2.5), ast.CompOpType.LTE), ast.NumVal(2.0), ast.NumVal(3.0)), ast.BinNumOpType.ADD), ast.NumVal(0.5), ast.BinNumOpType.MUL) assert utils.cmp_exprs(actual, expected) def test_multi_class(): estimator = ensemble.RandomForestClassifier( n_estimators=2, random_state=13) estimator.fit([[1], [2], [3]], [1, -1, 1]) assembler = assemblers.RandomForestModelAssembler(estimator) actual = assembler.assemble() expected = ast.BinVectorNumExpr( ast.BinVectorExpr( ast.IfExpr( ast.CompExpr( ast.FeatureRef(0), ast.NumVal(1.5), ast.CompOpType.LTE), ast.VectorVal([ ast.NumVal(0.0), ast.NumVal(1.0)]), ast.VectorVal([ ast.NumVal(1.0), ast.NumVal(0.0)])), ast.IfExpr( ast.CompExpr( ast.FeatureRef(0), ast.NumVal(2.5), ast.CompOpType.LTE), ast.VectorVal([ ast.NumVal(1.0), ast.NumVal(0.0)]), ast.VectorVal([ ast.NumVal(0.0), ast.NumVal(1.0)])), ast.BinNumOpType.ADD), ast.NumVal(0.5), ast.BinNumOpType.MUL) assert utils.cmp_exprs(actual, expected)
29.326733
79
0.502701
0
0
0
0
0
0
0
0
0
0
7c93f115e357ee6abe4ee6a425a0e90b87246382
1,834
py
Python
setup.py
Parquery/pynumenc
f14abab40b7d08c55824bf1da5b2a7026c0a7282
[ "MIT" ]
1
2018-11-09T16:16:08.000Z
2018-11-09T16:16:08.000Z
setup.py
Parquery/numenc-py
f14abab40b7d08c55824bf1da5b2a7026c0a7282
[ "MIT" ]
2
2018-11-09T12:51:40.000Z
2018-11-09T12:53:55.000Z
setup.py
Parquery/pynumenc
f14abab40b7d08c55824bf1da5b2a7026c0a7282
[ "MIT" ]
2
2019-02-26T12:40:11.000Z
2019-06-17T07:42:35.000Z
"""A setuptools based setup module. See: https://packaging.python.org/en/latest/distributing.html https://github.com/pypa/sampleproject """ import os from setuptools import setup, find_packages, Extension import pynumenc_meta # pylint: disable=redefined-builtin here = os.path.abspath(os.path.dirname(__file__)) # pylint: disable=invalid-name with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f: long_description = f.read() # pylint: disable=invalid-name setup( name=pynumenc_meta.__title__, version=pynumenc_meta.__version__, description=pynumenc_meta.__description__, long_description=long_description, url=pynumenc_meta.__url__, author=pynumenc_meta.__author__, author_email=pynumenc_meta.__author_email__, classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: End Users/Desktop', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6' ], license='License :: OSI Approved :: MIT License', keywords='C++ encode decode bytes encoding decoding sorted', packages=find_packages(exclude=['docs', 'tests']), install_requires=[], extras_require={ 'dev': [ # yapf: disable, 'docutils>=0.14,<1', 'mypy==0.641', 'hypothesis==3.82.1', 'pygments>=2.2.0,<3', 'pydocstyle>=3.0.0,<4', 'pylint==2.1.1', 'yapf==0.24.0' # yapf: enable ] }, ext_modules=[ Extension('numenc', sources=['numenc-cpp/encoder_decoder.cpp']) ], scripts=['bin/pynumenc'], py_modules=['pynumenc_meta'], package_data={'pynumenc': ['py.typed']}, data_files=[('.', ['LICENSE.txt', 'README.rst'])])
31.084746
81
0.630316
0
0
0
0
0
0
0
0
835
0.455289
7c95786ebe742f8164fbbe85994a95220ade7338
3,074
py
Python
Models/License-Plate-Recognition-Nigerian-vehicles-master/License-Plate-Recognition-Nigerian-vehicles-master/ocr.py
nipunjain099/AutoGuard
8217cd03af7927590ef3a160ecb7d9bc9f50d101
[ "MIT" ]
147
2018-12-23T09:44:36.000Z
2022-03-03T15:38:33.000Z
Models/License-Plate-Recognition-Nigerian-vehicles-master/License-Plate-Recognition-Nigerian-vehicles-master/ocr.py
nipunjain099/AutoGuard
8217cd03af7927590ef3a160ecb7d9bc9f50d101
[ "MIT" ]
17
2018-12-25T16:04:34.000Z
2022-01-13T00:44:21.000Z
Models/License-Plate-Recognition-Nigerian-vehicles-master/License-Plate-Recognition-Nigerian-vehicles-master/ocr.py
nipunjain099/AutoGuard
8217cd03af7927590ef3a160ecb7d9bc9f50d101
[ "MIT" ]
77
2018-12-19T03:03:14.000Z
2022-03-13T17:00:38.000Z
import numpy as np from skimage.transform import resize from skimage import measure from skimage.measure import regionprops class OCROnObjects(): def __init__(self, license_plate): character_objects = self.identify_boundary_objects(license_plate) self.get_regions(character_objects, license_plate) def identify_boundary_objects(self, a_license_plate): labelImage = measure.label(a_license_plate) character_dimensions = (0.4*a_license_plate.shape[0], 0.85*a_license_plate.shape[0], 0.04*a_license_plate.shape[1], 0.15*a_license_plate.shape[1]) minHeight, maxHeight, minWidth, maxWidth = character_dimensions regionLists = regionprops(labelImage) return regionLists def get_regions(self, character_objects, a_license_plate): """ used to map out regions where the license plate charcters are the principle of connected component analysis and labelling were used Parameters: ----------- a_license_plate: 2D numpy binary image of the license plate Returns: -------- a dictionary containing the index fullscale: 3D array containig 2D array of each character columnsVal: 1D array the starting column of each character coordinates: """ cord = [] counter=0 column_list = [] character_dimensions = (0.35*a_license_plate.shape[0], 0.60*a_license_plate.shape[0], 0.05*a_license_plate.shape[1], 0.15*a_license_plate.shape[1]) minHeight, maxHeight, minWidth, maxWidth = character_dimensions for regions in character_objects: minimumRow, minimumCol, maximumRow, maximumCol = regions.bbox character_height = maximumRow - minimumRow character_width = maximumCol - minimumCol roi = a_license_plate[minimumRow:maximumRow, minimumCol:maximumCol] if character_height > minHeight and character_height < maxHeight and character_width > minWidth and character_width < maxWidth: if counter == 0: samples = resize(roi, (20,20)) cord.append(regions.bbox) counter += 1 elif counter == 1: roismall = resize(roi, (20,20)) samples = np.concatenate((samples[None,:,:], roismall[None,:,:]), axis=0) cord.append(regions.bbox) counter+=1 else: roismall = resize(roi, (20,20)) samples = np.concatenate((samples[:,:,:], roismall[None,:,:]), axis=0) cord.append(regions.bbox) column_list.append(minimumCol) if len(column_list) == 0: self.candidates = {} else: self.candidates = { 'fullscale': samples, 'coordinates': np.array(cord), 'columnsVal': column_list } return self.candidates
43.914286
155
0.59987
2,949
0.959336
0
0
0
0
0
0
548
0.178269
7c9666a6d0704c6c5a1d15ed10e9ce79d7670676
3,215
py
Python
project/server/models.py
mvlima/flask-jwt-auth
6cb210b50888b1e9a41ea9e63a80eafcbe436560
[ "MIT" ]
null
null
null
project/server/models.py
mvlima/flask-jwt-auth
6cb210b50888b1e9a41ea9e63a80eafcbe436560
[ "MIT" ]
null
null
null
project/server/models.py
mvlima/flask-jwt-auth
6cb210b50888b1e9a41ea9e63a80eafcbe436560
[ "MIT" ]
null
null
null
# project/server/models.py import jwt import datetime from project.server import app, db, bcrypt class User(db.Model): """ User Model for storing user related details """ __tablename__ = "users" id = db.Column(db.Integer, primary_key=True, autoincrement=True) username = db.Column(db.String(255), unique=True, nullable=False) email = db.Column(db.String(255), unique=True, nullable=False) password = db.Column(db.String(255), nullable=False) name = db.Column(db.String(255), nullable=False) age = db.Column(db.Integer, nullable=False) address = db.Column(db.Integer(255), nullable=False) registered_on = db.Column(db.DateTime, nullable=False) admin = db.Column(db.Boolean, nullable=False, default=False) def __init__(self, email, username, password, name, age, address, admin=False): self.email = email self.username = username self.password = bcrypt.generate_password_hash( password, app.config.get('BCRYPT_LOG_ROUNDS') ).decode() self.name = name self.age = age self.address = address self.registered_on = datetime.datetime.now() self.admin = admin def encode_auth_token(self, user_id): """ Generates the Auth Token :return: string """ try: payload = { 'exp': datetime.datetime.utcnow() + datetime.timedelta(days=0, seconds=5), 'iat': datetime.datetime.utcnow(), 'sub': user_id } return jwt.encode( payload, app.config.get('SECRET_KEY'), algorithm='HS256' ) except Exception as e: return e @staticmethod def decode_auth_token(auth_token): """ Validates the auth token :param auth_token: :return: integer|string """ try: payload = jwt.decode(auth_token, app.config.get('SECRET_KEY')) is_blacklisted_token = BlacklistToken.check_blacklist(auth_token) if is_blacklisted_token: return 'Token blacklisted. Please log in again.' else: return payload['sub'] except jwt.ExpiredSignatureError: return 'Signature expired. Please log in again.' except jwt.InvalidTokenError: return 'Invalid token. Please log in again.' class BlacklistToken(db.Model): """ Token Model for storing JWT tokens """ __tablename__ = 'blacklist_tokens' id = db.Column(db.Integer, primary_key=True, autoincrement=True) token = db.Column(db.String(500), unique=True, nullable=False) blacklisted_on = db.Column(db.DateTime, nullable=False) def __init__(self, token): self.token = token self.blacklisted_on = datetime.datetime.now() def __repr__(self): return '<id: token: {}'.format(self.token) @staticmethod def check_blacklist(auth_token): # Check whether auth token has been blacklisted res = BlacklistToken.query.filter_by(token=str(auth_token)).first() if res: return True else: return False
32.806122
90
0.612753
3,111
0.967652
0
0
951
0.295801
0
0
583
0.181337
7c974ea9b476fd86b7ac61a4ae4dbd0512a02f64
1,711
py
Python
letsencrypt/setup.py
ccppuu/certbot
9fead41aaf93dde0d36d4aef6fded8dd306c1ddc
[ "Apache-2.0" ]
1
2017-12-20T20:06:11.000Z
2017-12-20T20:06:11.000Z
letsencrypt/setup.py
cpu/certbot
9fead41aaf93dde0d36d4aef6fded8dd306c1ddc
[ "Apache-2.0" ]
null
null
null
letsencrypt/setup.py
cpu/certbot
9fead41aaf93dde0d36d4aef6fded8dd306c1ddc
[ "Apache-2.0" ]
null
null
null
import codecs import os import sys from setuptools import setup from setuptools import find_packages def read_file(filename, encoding='utf8'): """Read unicode from given file.""" with codecs.open(filename, encoding=encoding) as fd: return fd.read() here = os.path.abspath(os.path.dirname(__file__)) readme = read_file(os.path.join(here, 'README.rst')) # This package is a simple shim around certbot install_requires = ['certbot'] version = '0.7.0.dev0' setup( name='letsencrypt', version=version, description="ACME client", long_description=readme, url='https://github.com/letsencrypt/letsencrypt', author="Certbot Project", author_email='[email protected]', license='Apache License 2.0', classifiers=[ 'Development Status :: 3 - Alpha', 'Environment :: Console', 'Environment :: Console :: Curses', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: Apache Software License', 'Operating System :: POSIX :: Linux', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Topic :: Internet :: WWW/HTTP', 'Topic :: Security', 'Topic :: System :: Installation/Setup', 'Topic :: System :: Networking', 'Topic :: System :: Systems Administration', 'Topic :: Utilities', ], packages=find_packages(), include_package_data=True, install_requires=install_requires, entry_points={ 'console_scripts': [ 'letsencrypt = certbot.main:main', ], }, )
27.15873
61
0.630625
0
0
0
0
0
0
0
0
858
0.501461
7c98495a22a6d3d8755497c989624d8a5c427192
60,943
py
Python
elastalert/alerts.py
dekhrekh/elastalert
0c1ce30302c575bd0be404582cd452f38c01c774
[ "Apache-2.0" ]
null
null
null
elastalert/alerts.py
dekhrekh/elastalert
0c1ce30302c575bd0be404582cd452f38c01c774
[ "Apache-2.0" ]
null
null
null
elastalert/alerts.py
dekhrekh/elastalert
0c1ce30302c575bd0be404582cd452f38c01c774
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- import copy import datetime import json import logging import subprocess import sys import warnings from email.mime.text import MIMEText from email.utils import formatdate from smtplib import SMTP from smtplib import SMTP_SSL from smtplib import SMTPAuthenticationError from smtplib import SMTPException from socket import error import boto3 import requests import stomp from exotel import Exotel from jira.client import JIRA from jira.exceptions import JIRAError from requests.exceptions import RequestException from staticconf.loader import yaml_loader from texttable import Texttable from twilio.base.exceptions import TwilioRestException from twilio.rest import Client as TwilioClient from util import EAException from util import elastalert_logger from util import lookup_es_key from util import pretty_ts from util import ts_now from util import ts_to_dt class DateTimeEncoder(json.JSONEncoder): def default(self, obj): if hasattr(obj, 'isoformat'): return obj.isoformat() else: return json.JSONEncoder.default(self, obj) class BasicMatchString(object): """ Creates a string containing fields in match for the given rule. """ def __init__(self, rule, match): self.rule = rule self.match = match def _ensure_new_line(self): while self.text[-2:] != '\n\n': self.text += '\n' def _add_custom_alert_text(self): missing = '<MISSING VALUE>' alert_text = unicode(self.rule.get('alert_text', '')) if 'alert_text_args' in self.rule: alert_text_args = self.rule.get('alert_text_args') alert_text_values = [lookup_es_key(self.match, arg) for arg in alert_text_args] # Support referencing other top-level rule properties # This technically may not work if there is a top-level rule property with the same name # as an es result key, since it would have been matched in the lookup_es_key call above for i in xrange(len(alert_text_values)): if alert_text_values[i] is None: alert_value = self.rule.get(alert_text_args[i]) if alert_value: alert_text_values[i] = alert_value alert_text_values = [missing if val is None else val for val in alert_text_values] alert_text = alert_text.format(*alert_text_values) elif 'alert_text_kw' in self.rule: kw = {} for name, kw_name in self.rule.get('alert_text_kw').items(): val = lookup_es_key(self.match, name) # Support referencing other top-level rule properties # This technically may not work if there is a top-level rule property with the same name # as an es result key, since it would have been matched in the lookup_es_key call above if val is None: val = self.rule.get(name) kw[kw_name] = missing if val is None else val alert_text = alert_text.format(**kw) self.text += alert_text def _add_rule_text(self): self.text += self.rule['type'].get_match_str(self.match) def _add_top_counts(self): for key, counts in self.match.items(): if key.startswith('top_events_'): self.text += '%s:\n' % (key[11:]) top_events = counts.items() if not top_events: self.text += 'No events found.\n' else: top_events.sort(key=lambda x: x[1], reverse=True) for term, count in top_events: self.text += '%s: %s\n' % (term, count) self.text += '\n' def _add_match_items(self): match_items = self.match.items() match_items.sort(key=lambda x: x[0]) for key, value in match_items: if key.startswith('top_events_'): continue value_str = unicode(value) value_str.replace('\\n', '\n') if type(value) in [list, dict]: try: value_str = self._pretty_print_as_json(value) except TypeError: # Non serializable object, fallback to str pass self.text += '%s: %s\n' % (key, value_str) def _pretty_print_as_json(self, blob): try: return json.dumps(blob, cls=DateTimeEncoder, sort_keys=True, indent=4, ensure_ascii=False) except UnicodeDecodeError: # This blob contains non-unicode, so lets pretend it's Latin-1 to show something return json.dumps(blob, cls=DateTimeEncoder, sort_keys=True, indent=4, encoding='Latin-1', ensure_ascii=False) def __str__(self): self.text = '' if 'alert_text' not in self.rule: self.text += self.rule['name'] + '\n\n' self._add_custom_alert_text() self._ensure_new_line() if self.rule.get('alert_text_type') != 'alert_text_only': self._add_rule_text() self._ensure_new_line() if self.rule.get('top_count_keys'): self._add_top_counts() if self.rule.get('alert_text_type') != 'exclude_fields': self._add_match_items() return self.text class JiraFormattedMatchString(BasicMatchString): def _add_match_items(self): match_items = dict([(x, y) for x, y in self.match.items() if not x.startswith('top_events_')]) json_blob = self._pretty_print_as_json(match_items) preformatted_text = u'{{code:json}}{0}{{code}}'.format(json_blob) self.text += preformatted_text class Alerter(object): """ Base class for types of alerts. :param rule: The rule configuration. """ required_options = frozenset([]) def __init__(self, rule): self.rule = rule # pipeline object is created by ElastAlerter.send_alert() # and attached to each alerters used by a rule before calling alert() self.pipeline = None self.resolve_rule_references(self.rule) def resolve_rule_references(self, root): # Support referencing other top-level rule properties to avoid redundant copy/paste if type(root) == list: # Make a copy since we may be modifying the contents of the structure we're walking for i, item in enumerate(copy.copy(root)): if type(item) == dict or type(item) == list: self.resolve_rule_references(root[i]) else: root[i] = self.resolve_rule_reference(item) elif type(root) == dict: # Make a copy since we may be modifying the contents of the structure we're walking for key, value in root.copy().iteritems(): if type(value) == dict or type(value) == list: self.resolve_rule_references(root[key]) else: root[key] = self.resolve_rule_reference(value) def resolve_rule_reference(self, value): strValue = unicode(value) if strValue.startswith('$') and strValue.endswith('$') and strValue[1:-1] in self.rule: if type(value) == int: return int(self.rule[strValue[1:-1]]) else: return self.rule[strValue[1:-1]] else: return value def alert(self, match): """ Send an alert. Match is a dictionary of information about the alert. :param match: A dictionary of relevant information to the alert. """ raise NotImplementedError() def get_info(self): """ Returns a dictionary of data related to this alert. At minimum, this should contain a field type corresponding to the type of Alerter. """ return {'type': 'Unknown'} def create_title(self, matches): """ Creates custom alert title to be used, e.g. as an e-mail subject or JIRA issue summary. :param matches: A list of dictionaries of relevant information to the alert. """ if 'alert_subject' in self.rule: return self.create_custom_title(matches) return self.create_default_title(matches) def create_custom_title(self, matches): alert_subject = unicode(self.rule['alert_subject']) if 'alert_subject_args' in self.rule: alert_subject_args = self.rule['alert_subject_args'] alert_subject_values = [lookup_es_key(matches[0], arg) for arg in alert_subject_args] # Support referencing other top-level rule properties # This technically may not work if there is a top-level rule property with the same name # as an es result key, since it would have been matched in the lookup_es_key call above for i in xrange(len(alert_subject_values)): if alert_subject_values[i] is None: alert_value = self.rule.get(alert_subject_args[i]) if alert_value: alert_subject_values[i] = alert_value alert_subject_values = ['<MISSING VALUE>' if val is None else val for val in alert_subject_values] return alert_subject.format(*alert_subject_values) return alert_subject def create_alert_body(self, matches): body = self.get_aggregation_summary_text(matches) for match in matches: body += unicode(BasicMatchString(self.rule, match)) # Separate text of aggregated alerts with dashes if len(matches) > 1: body += '\n----------------------------------------\n' return body def get_aggregation_summary_text(self, matches): text = '' if 'aggregation' in self.rule and 'summary_table_fields' in self.rule: summary_table_fields = self.rule['summary_table_fields'] if not isinstance(summary_table_fields, list): summary_table_fields = [summary_table_fields] # Include a count aggregation so that we can see at a glance how many of each aggregation_key were encountered summary_table_fields_with_count = summary_table_fields + ['count'] text += "Aggregation resulted in the following data for summary_table_fields ==> {0}:\n\n".format( summary_table_fields_with_count ) text_table = Texttable() text_table.header(summary_table_fields_with_count) match_aggregation = {} # Maintain an aggregate count for each unique key encountered in the aggregation period for match in matches: key_tuple = tuple([unicode(lookup_es_key(match, key)) for key in summary_table_fields]) if key_tuple not in match_aggregation: match_aggregation[key_tuple] = 1 else: match_aggregation[key_tuple] = match_aggregation[key_tuple] + 1 for keys, count in match_aggregation.iteritems(): text_table.add_row([key for key in keys] + [count]) text += text_table.draw() + '\n\n' return unicode(text) def create_default_title(self, matches): return self.rule['name'] def get_account(self, account_file): """ Gets the username and password from an account file. :param account_file: Name of the file which contains user and password information. """ account_conf = yaml_loader(account_file) if 'user' not in account_conf or 'password' not in account_conf: raise EAException('Account file must have user and password fields') self.user = account_conf['user'] self.password = account_conf['password'] class StompAlerter(Alerter): """ The stomp alerter publishes alerts via stomp to a broker. """ required_options = frozenset(['stomp_hostname', 'stomp_hostport', 'stomp_login', 'stomp_password']) def alert(self, matches): alerts = [] qk = self.rule.get('query_key', None) fullmessage = {} for match in matches: if qk in match: elastalert_logger.info( 'Alert for %s, %s at %s:' % (self.rule['name'], match[qk], lookup_es_key(match, self.rule['timestamp_field']))) alerts.append( '1)Alert for %s, %s at %s:' % (self.rule['name'], match[qk], lookup_es_key(match, self.rule['timestamp_field'])) ) fullmessage['match'] = match[qk] else: elastalert_logger.info('Alert for %s at %s:' % (self.rule['name'], lookup_es_key(match, self.rule['timestamp_field']))) alerts.append( '2)Alert for %s at %s:' % (self.rule['name'], lookup_es_key(match, self.rule['timestamp_field'])) ) fullmessage['match'] = lookup_es_key(match, self.rule['timestamp_field']) elastalert_logger.info(unicode(BasicMatchString(self.rule, match))) fullmessage['alerts'] = alerts fullmessage['rule'] = self.rule['name'] fullmessage['matching'] = unicode(BasicMatchString(self.rule, match)) fullmessage['alertDate'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") fullmessage['body'] = self.create_alert_body(matches) self.stomp_hostname = self.rule.get('stomp_hostname', 'localhost') self.stomp_hostport = self.rule.get('stomp_hostport', '61613') self.stomp_login = self.rule.get('stomp_login', 'admin') self.stomp_password = self.rule.get('stomp_password', 'admin') self.stomp_destination = self.rule.get('stomp_destination', '/queue/ALERT') conn = stomp.Connection([(self.stomp_hostname, self.stomp_hostport)]) conn.start() conn.connect(self.stomp_login, self.stomp_password) conn.send(self.stomp_destination, json.dumps(fullmessage)) conn.disconnect() def get_info(self): return {'type': 'stomp'} class DebugAlerter(Alerter): """ The debug alerter uses a Python logger (by default, alerting to terminal). """ def alert(self, matches): qk = self.rule.get('query_key', None) for match in matches: if qk in match: elastalert_logger.info( 'Alert for %s, %s at %s:' % (self.rule['name'], match[qk], lookup_es_key(match, self.rule['timestamp_field']))) else: elastalert_logger.info('Alert for %s at %s:' % (self.rule['name'], lookup_es_key(match, self.rule['timestamp_field']))) elastalert_logger.info(unicode(BasicMatchString(self.rule, match))) def get_info(self): return {'type': 'debug'} class EmailAlerter(Alerter): """ Sends an email alert """ required_options = frozenset(['email']) def __init__(self, *args): super(EmailAlerter, self).__init__(*args) self.smtp_host = self.rule.get('smtp_host', 'localhost') self.smtp_ssl = self.rule.get('smtp_ssl', False) self.from_addr = self.rule.get('from_addr', 'ElastAlert') self.smtp_port = self.rule.get('smtp_port') if self.rule.get('smtp_auth_file'): self.get_account(self.rule['smtp_auth_file']) self.smtp_key_file = self.rule.get('smtp_key_file') self.smtp_cert_file = self.rule.get('smtp_cert_file') # Convert email to a list if it isn't already if isinstance(self.rule['email'], basestring): self.rule['email'] = [self.rule['email']] # If there is a cc then also convert it a list if it isn't cc = self.rule.get('cc') if cc and isinstance(cc, basestring): self.rule['cc'] = [self.rule['cc']] # If there is a bcc then also convert it to a list if it isn't bcc = self.rule.get('bcc') if bcc and isinstance(bcc, basestring): self.rule['bcc'] = [self.rule['bcc']] add_suffix = self.rule.get('email_add_domain') if add_suffix and not add_suffix.startswith('@'): self.rule['email_add_domain'] = '@' + add_suffix def alert(self, matches): body = self.create_alert_body(matches) # Add JIRA ticket if it exists if self.pipeline is not None and 'jira_ticket' in self.pipeline: url = '%s/browse/%s' % (self.pipeline['jira_server'], self.pipeline['jira_ticket']) body += '\nJIRA ticket: %s' % (url) to_addr = self.rule['email'] if 'email_from_field' in self.rule: recipient = lookup_es_key(matches[0], self.rule['email_from_field']) if isinstance(recipient, basestring): if '@' in recipient: to_addr = [recipient] elif 'email_add_domain' in self.rule: to_addr = [recipient + self.rule['email_add_domain']] elif isinstance(recipient, list): to_addr = recipient if 'email_add_domain' in self.rule: to_addr = [name + self.rule['email_add_domain'] for name in to_addr] email_msg = MIMEText(body.encode('UTF-8'), _charset='UTF-8') email_msg['Subject'] = self.create_title(matches) email_msg['To'] = ', '.join(to_addr) email_msg['From'] = self.from_addr email_msg['Reply-To'] = self.rule.get('email_reply_to', email_msg['To']) email_msg['Date'] = formatdate() if self.rule.get('cc'): email_msg['CC'] = ','.join(self.rule['cc']) to_addr = to_addr + self.rule['cc'] if self.rule.get('bcc'): to_addr = to_addr + self.rule['bcc'] try: if self.smtp_ssl: if self.smtp_port: self.smtp = SMTP_SSL(self.smtp_host, self.smtp_port, keyfile=self.smtp_key_file, certfile=self.smtp_cert_file) else: self.smtp = SMTP_SSL(self.smtp_host, keyfile=self.smtp_key_file, certfile=self.smtp_cert_file) else: if self.smtp_port: self.smtp = SMTP(self.smtp_host, self.smtp_port) else: self.smtp = SMTP(self.smtp_host) self.smtp.ehlo() if self.smtp.has_extn('STARTTLS'): self.smtp.starttls(keyfile=self.smtp_key_file, certfile=self.smtp_cert_file) if 'smtp_auth_file' in self.rule: self.smtp.login(self.user, self.password) except (SMTPException, error) as e: raise EAException("Error connecting to SMTP host: %s" % (e)) except SMTPAuthenticationError as e: raise EAException("SMTP username/password rejected: %s" % (e)) self.smtp.sendmail(self.from_addr, to_addr, email_msg.as_string()) self.smtp.close() elastalert_logger.info("Sent email to %s" % (to_addr)) def create_default_title(self, matches): subject = 'ElastAlert: %s' % (self.rule['name']) # If the rule has a query_key, add that value plus timestamp to subject if 'query_key' in self.rule: qk = matches[0].get(self.rule['query_key']) if qk: subject += ' - %s' % (qk) return subject def get_info(self): return {'type': 'email', 'recipients': self.rule['email']} class JiraAlerter(Alerter): """ Creates a Jira ticket for each alert """ required_options = frozenset(['jira_server', 'jira_account_file', 'jira_project', 'jira_issuetype']) # Maintain a static set of built-in fields that we explicitly know how to set # For anything else, we will do best-effort and try to set a string value known_field_list = [ 'jira_account_file', 'jira_assignee', 'jira_bump_after_inactivity', 'jira_bump_in_statuses', 'jira_bump_not_in_statuses', 'jira_bump_tickets', 'jira_component', 'jira_components', 'jira_description', 'jira_ignore_in_title', 'jira_issuetype', 'jira_label', 'jira_labels', 'jira_max_age', 'jira_priority', 'jira_project', 'jira_server', 'jira_watchers', ] # Some built-in jira types that can be used as custom fields require special handling # Here is a sample of one of them: # {"id":"customfield_12807","name":"My Custom Field","custom":true,"orderable":true,"navigable":true,"searchable":true, # "clauseNames":["cf[12807]","My Custom Field"],"schema":{"type":"array","items":"string", # "custom":"com.atlassian.jira.plugin.system.customfieldtypes:multiselect","customId":12807}} # There are likely others that will need to be updated on a case-by-case basis custom_string_types_with_special_handling = [ 'com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes', 'com.atlassian.jira.plugin.system.customfieldtypes:multiselect', 'com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons', ] def __init__(self, rule): super(JiraAlerter, self).__init__(rule) self.server = self.rule['jira_server'] self.get_account(self.rule['jira_account_file']) self.project = self.rule['jira_project'] self.issue_type = self.rule['jira_issuetype'] # We used to support only a single component. This allows us to maintain backwards compatibility # while also giving the user-facing API a more representative name self.components = self.rule.get('jira_components', self.rule.get('jira_component')) # We used to support only a single label. This allows us to maintain backwards compatibility # while also giving the user-facing API a more representative name self.labels = self.rule.get('jira_labels', self.rule.get('jira_label')) self.description = self.rule.get('jira_description', '') self.assignee = self.rule.get('jira_assignee') self.max_age = self.rule.get('jira_max_age', 30) self.priority = self.rule.get('jira_priority') self.bump_tickets = self.rule.get('jira_bump_tickets', False) self.bump_not_in_statuses = self.rule.get('jira_bump_not_in_statuses') self.bump_in_statuses = self.rule.get('jira_bump_in_statuses') self.bump_after_inactivity = self.rule.get('jira_bump_after_inactivity', self.max_age) self.watchers = self.rule.get('jira_watchers') if self.bump_in_statuses and self.bump_not_in_statuses: msg = 'Both jira_bump_in_statuses (%s) and jira_bump_not_in_statuses (%s) are set.' % \ (','.join(self.bump_in_statuses), ','.join(self.bump_not_in_statuses)) intersection = list(set(self.bump_in_statuses) & set(self.bump_in_statuses)) if intersection: msg = '%s Both have common statuses of (%s). As such, no tickets will ever be found.' % ( msg, ','.join(intersection)) msg += ' This should be simplified to use only one or the other.' logging.warning(msg) self.jira_args = {'project': {'key': self.project}, 'issuetype': {'name': self.issue_type}} if self.components: # Support single component or list if type(self.components) != list: self.jira_args['components'] = [{'name': self.components}] else: self.jira_args['components'] = [{'name': component} for component in self.components] if self.labels: # Support single label or list if type(self.labels) != list: self.labels = [self.labels] self.jira_args['labels'] = self.labels if self.watchers: # Support single watcher or list if type(self.watchers) != list: self.watchers = [self.watchers] if self.assignee: self.jira_args['assignee'] = {'name': self.assignee} try: self.client = JIRA(self.server, basic_auth=(self.user, self.password)) self.get_priorities() self.get_arbitrary_fields() except JIRAError as e: # JIRAError may contain HTML, pass along only first 1024 chars raise EAException("Error connecting to JIRA: %s" % (str(e)[:1024])) try: if self.priority is not None: self.jira_args['priority'] = {'id': self.priority_ids[self.priority]} except KeyError: logging.error("Priority %s not found. Valid priorities are %s" % (self.priority, self.priority_ids.keys())) def get_arbitrary_fields(self): # This API returns metadata about all the fields defined on the jira server (built-ins and custom ones) fields = self.client.fields() for jira_field, value in self.rule.iteritems(): # If we find a field that is not covered by the set that we are aware of, it means it is either: # 1. A built-in supported field in JIRA that we don't have on our radar # 2. A custom field that a JIRA admin has configured if jira_field.startswith('jira_') and jira_field not in self.known_field_list: # Remove the jira_ part. Convert underscores to spaces normalized_jira_field = jira_field[5:].replace('_', ' ').lower() # All jira fields should be found in the 'id' or the 'name' field. Therefore, try both just in case for identifier in ['name', 'id']: field = next((f for f in fields if normalized_jira_field == f[identifier].replace('_', ' ').lower()), None) if field: break if not field: # Log a warning to ElastAlert saying that we couldn't find that type? # OR raise and fail to load the alert entirely? Probably the latter... raise Exception("Could not find a definition for the jira field '{0}'".format(normalized_jira_field)) arg_name = field['id'] # Check the schema information to decide how to set the value correctly # If the schema information is not available, raise an exception since we don't know how to set it # Note this is only the case for two built-in types, id: issuekey and id: thumbnail if not ('schema' in field or 'type' in field['schema']): raise Exception("Could not determine schema information for the jira field '{0}'".format(normalized_jira_field)) arg_type = field['schema']['type'] # Handle arrays of simple types like strings or numbers if arg_type == 'array': # As a convenience, support the scenario wherein the user only provides # a single value for a multi-value field e.g. jira_labels: Only_One_Label if type(value) != list: value = [value] array_items = field['schema']['items'] # Simple string types if array_items in ['string', 'date', 'datetime']: # Special case for multi-select custom types (the JIRA metadata says that these are strings, but # in reality, they are required to be provided as an object. if 'custom' in field['schema'] and field['schema']['custom'] in self.custom_string_types_with_special_handling: self.jira_args[arg_name] = [{'value': v} for v in value] else: self.jira_args[arg_name] = value elif array_items == 'number': self.jira_args[arg_name] = [int(v) for v in value] # Also attempt to handle arrays of complex types that have to be passed as objects with an identifier 'key' elif array_items == 'option': self.jira_args[arg_name] = [{'value': v} for v in value] else: # Try setting it as an object, using 'name' as the key # This may not work, as the key might actually be 'key', 'id', 'value', or something else # If it works, great! If not, it will manifest itself as an API error that will bubble up self.jira_args[arg_name] = [{'name': v} for v in value] # Handle non-array types else: # Simple string types if arg_type in ['string', 'date', 'datetime']: # Special case for custom types (the JIRA metadata says that these are strings, but # in reality, they are required to be provided as an object. if 'custom' in field['schema'] and field['schema']['custom'] in self.custom_string_types_with_special_handling: self.jira_args[arg_name] = {'value': value} else: self.jira_args[arg_name] = value # Number type elif arg_type == 'number': self.jira_args[arg_name] = int(value) elif arg_type == 'option': self.jira_args[arg_name] = {'value': value} # Complex type else: self.jira_args[arg_name] = {'name': value} def get_priorities(self): """ Creates a mapping of priority index to id. """ priorities = self.client.priorities() self.priority_ids = {} for x in range(len(priorities)): self.priority_ids[x] = priorities[x].id def set_assignee(self, assignee): self.assignee = assignee if assignee: self.jira_args['assignee'] = {'name': assignee} elif 'assignee' in self.jira_args: self.jira_args.pop('assignee') def find_existing_ticket(self, matches): # Default title, get stripped search version if 'alert_subject' not in self.rule: title = self.create_default_title(matches, True) else: title = self.create_title(matches) if 'jira_ignore_in_title' in self.rule: title = title.replace(matches[0].get(self.rule['jira_ignore_in_title'], ''), '') # This is necessary for search to work. Other special characters and dashes # directly adjacent to words appear to be ok title = title.replace(' - ', ' ') title = title.replace('\\', '\\\\') date = (datetime.datetime.now() - datetime.timedelta(days=self.max_age)).strftime('%Y-%m-%d') jql = 'project=%s AND summary~"%s" and created >= "%s"' % (self.project, title, date) if self.bump_in_statuses: jql = '%s and status in (%s)' % (jql, ','.join(self.bump_in_statuses)) if self.bump_not_in_statuses: jql = '%s and status not in (%s)' % (jql, ','.join(self.bump_not_in_statuses)) try: issues = self.client.search_issues(jql) except JIRAError as e: logging.exception("Error while searching for JIRA ticket using jql '%s': %s" % (jql, e)) return None if len(issues): return issues[0] def comment_on_ticket(self, ticket, match): text = unicode(JiraFormattedMatchString(self.rule, match)) timestamp = pretty_ts(lookup_es_key(match, self.rule['timestamp_field'])) comment = "This alert was triggered again at %s\n%s" % (timestamp, text) self.client.add_comment(ticket, comment) def alert(self, matches): title = self.create_title(matches) if self.bump_tickets: ticket = self.find_existing_ticket(matches) if ticket: inactivity_datetime = ts_now() - datetime.timedelta(days=self.bump_after_inactivity) if ts_to_dt(ticket.fields.updated) >= inactivity_datetime: if self.pipeline is not None: self.pipeline['jira_ticket'] = None self.pipeline['jira_server'] = self.server return None elastalert_logger.info('Commenting on existing ticket %s' % (ticket.key)) for match in matches: try: self.comment_on_ticket(ticket, match) except JIRAError as e: logging.exception("Error while commenting on ticket %s: %s" % (ticket, e)) if self.pipeline is not None: self.pipeline['jira_ticket'] = ticket self.pipeline['jira_server'] = self.server return None self.jira_args['summary'] = title self.jira_args['description'] = self.create_alert_body(matches) try: self.issue = self.client.create_issue(**self.jira_args) # You can not add watchers on initial creation. Only as a follow-up action if self.watchers: for watcher in self.watchers: try: self.client.add_watcher(self.issue.key, watcher) except Exception as ex: # Re-raise the exception, preserve the stack-trace, and give some # context as to which watcher failed to be added raise Exception( "Exception encountered when trying to add '{0}' as a watcher. Does the user exist?\n{1}" .format( watcher, ex )), None, sys.exc_info()[2] except JIRAError as e: raise EAException("Error creating JIRA ticket using jira_args (%s): %s" % (self.jira_args, e)) elastalert_logger.info("Opened Jira ticket: %s" % (self.issue)) if self.pipeline is not None: self.pipeline['jira_ticket'] = self.issue self.pipeline['jira_server'] = self.server def create_alert_body(self, matches): body = self.description + '\n' body += self.get_aggregation_summary_text(matches) for match in matches: body += unicode(JiraFormattedMatchString(self.rule, match)) if len(matches) > 1: body += '\n----------------------------------------\n' return body def get_aggregation_summary_text(self, matches): text = super(JiraAlerter, self).get_aggregation_summary_text(matches) if text: text = u'{{noformat}}{0}{{noformat}}'.format(text) return text def create_default_title(self, matches, for_search=False): # If there is a query_key, use that in the title if 'query_key' in self.rule and lookup_es_key(matches[0], self.rule['query_key']): title = 'ElastAlert: %s matched %s' % (lookup_es_key(matches[0], self.rule['query_key']), self.rule['name']) else: title = 'ElastAlert: %s' % (self.rule['name']) if for_search: return title title += ' - %s' % (pretty_ts(matches[0][self.rule['timestamp_field']], self.rule.get('use_local_time'))) # Add count for spikes count = matches[0].get('spike_count') if count: title += ' - %s+ events' % (count) return title def get_info(self): return {'type': 'jira'} class CommandAlerter(Alerter): required_options = set(['command']) def __init__(self, *args): super(CommandAlerter, self).__init__(*args) self.last_command = [] self.shell = False if isinstance(self.rule['command'], basestring): self.shell = True if '%' in self.rule['command']: logging.warning('Warning! You could be vulnerable to shell injection!') self.rule['command'] = [self.rule['command']] self.new_style_string_format = False if 'new_style_string_format' in self.rule and self.rule['new_style_string_format']: self.new_style_string_format = True def alert(self, matches): # Format the command and arguments try: if self.new_style_string_format: command = [command_arg.format(match=matches[0]) for command_arg in self.rule['command']] else: command = [command_arg % matches[0] for command_arg in self.rule['command']] self.last_command = command except KeyError as e: raise EAException("Error formatting command: %s" % (e)) # Run command and pipe data try: subp = subprocess.Popen(command, stdin=subprocess.PIPE, shell=self.shell) if self.rule.get('pipe_match_json'): match_json = json.dumps(matches, cls=DateTimeEncoder) + '\n' stdout, stderr = subp.communicate(input=match_json) if self.rule.get("fail_on_non_zero_exit", False) and subp.wait(): raise EAException("Non-zero exit code while running command %s" % (' '.join(command))) except OSError as e: raise EAException("Error while running command %s: %s" % (' '.join(command), e)) def get_info(self): return {'type': 'command', 'command': ' '.join(self.last_command)} class SnsAlerter(Alerter): """ Send alert using AWS SNS service """ required_options = frozenset(['sns_topic_arn']) def __init__(self, *args): super(SnsAlerter, self).__init__(*args) self.sns_topic_arn = self.rule.get('sns_topic_arn', '') self.aws_access_key_id = self.rule.get('aws_access_key_id') self.aws_secret_access_key = self.rule.get('aws_secret_access_key') self.aws_region = self.rule.get('aws_region', 'us-east-1') self.profile = self.rule.get('boto_profile', None) # Deprecated self.profile = self.rule.get('aws_profile', None) def create_default_title(self, matches): subject = 'ElastAlert: %s' % (self.rule['name']) return subject def alert(self, matches): body = self.create_alert_body(matches) session = boto3.Session( aws_access_key_id=self.aws_access_key_id, aws_secret_access_key=self.aws_secret_access_key, region_name=self.aws_region, profile_name=self.profile ) sns_client = session.client('sns') sns_client.publish( TopicArn=self.sns_topic_arn, Message=body, Subject=self.create_title(matches) ) elastalert_logger.info("Sent sns notification to %s" % (self.sns_topic_arn)) class HipChatAlerter(Alerter): """ Creates a HipChat room notification for each alert """ required_options = frozenset(['hipchat_auth_token', 'hipchat_room_id']) def __init__(self, rule): super(HipChatAlerter, self).__init__(rule) self.hipchat_msg_color = self.rule.get('hipchat_msg_color', 'red') self.hipchat_message_format = self.rule.get('hipchat_message_format', 'html') self.hipchat_auth_token = self.rule['hipchat_auth_token'] self.hipchat_room_id = self.rule['hipchat_room_id'] self.hipchat_domain = self.rule.get('hipchat_domain', 'api.hipchat.com') self.hipchat_ignore_ssl_errors = self.rule.get('hipchat_ignore_ssl_errors', False) self.hipchat_notify = self.rule.get('hipchat_notify', True) self.hipchat_from = self.rule.get('hipchat_from', '') self.url = 'https://%s/v2/room/%s/notification?auth_token=%s' % ( self.hipchat_domain, self.hipchat_room_id, self.hipchat_auth_token) self.hipchat_proxy = self.rule.get('hipchat_proxy', None) def alert(self, matches): body = self.create_alert_body(matches) # HipChat sends 400 bad request on messages longer than 10000 characters if (len(body) > 9999): body = body[:9980] + '..(truncated)' # Use appropriate line ending for text/html if self.hipchat_message_format == 'html': body = body.replace('\n', '<br />') # Post to HipChat headers = {'content-type': 'application/json'} # set https proxy, if it was provided proxies = {'https': self.hipchat_proxy} if self.hipchat_proxy else None payload = { 'color': self.hipchat_msg_color, 'message': body, 'message_format': self.hipchat_message_format, 'notify': self.hipchat_notify, 'from': self.hipchat_from } try: if self.hipchat_ignore_ssl_errors: requests.packages.urllib3.disable_warnings() response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, verify=not self.hipchat_ignore_ssl_errors, proxies=proxies) warnings.resetwarnings() response.raise_for_status() except RequestException as e: raise EAException("Error posting to HipChat: %s" % e) elastalert_logger.info("Alert sent to HipChat room %s" % self.hipchat_room_id) def get_info(self): return {'type': 'hipchat', 'hipchat_room_id': self.hipchat_room_id} class MsTeamsAlerter(Alerter): """ Creates a Microsoft Teams Conversation Message for each alert """ required_options = frozenset(['ms_teams_webhook_url', 'ms_teams_alert_summary']) def __init__(self, rule): super(MsTeamsAlerter, self).__init__(rule) self.ms_teams_webhook_url = self.rule['ms_teams_webhook_url'] if isinstance(self.ms_teams_webhook_url, basestring): self.ms_teams_webhook_url = [self.ms_teams_webhook_url] self.ms_teams_proxy = self.rule.get('ms_teams_proxy', None) self.ms_teams_alert_summary = self.rule.get('ms_teams_alert_summary', 'ElastAlert Message') self.ms_teams_alert_fixed_width = self.rule.get('ms_teams_alert_fixed_width', False) self.ms_teams_theme_color = self.rule.get('ms_teams_theme_color', '') def format_body(self, body): body = body.encode('UTF-8') if self.ms_teams_alert_fixed_width: body = body.replace('`', "'") body = "```{0}```".format('```\n\n```'.join(x for x in body.split('\n'))).replace('\n``````', '') return body def alert(self, matches): body = self.create_alert_body(matches) body = self.format_body(body) # post to Teams headers = {'content-type': 'application/json'} # set https proxy, if it was provided proxies = {'https': self.ms_teams_proxy} if self.ms_teams_proxy else None payload = { '@type': 'MessageCard', '@context': 'http://schema.org/extensions', 'summary': self.ms_teams_alert_summary, 'title': self.create_title(matches), 'text': body } if self.ms_teams_theme_color != '': payload['themeColor'] = self.ms_teams_theme_color for url in self.ms_teams_webhook_url: try: response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) response.raise_for_status() except RequestException as e: raise EAException("Error posting to ms teams: %s" % e) elastalert_logger.info("Alert sent to MS Teams") def get_info(self): return {'type': 'ms_teams', 'ms_teams_webhook_url': self.ms_teams_webhook_url} class SlackAlerter(Alerter): """ Creates a Slack room message for each alert """ required_options = frozenset(['slack_webhook_url']) def __init__(self, rule): super(SlackAlerter, self).__init__(rule) self.slack_webhook_url = self.rule['slack_webhook_url'] if isinstance(self.slack_webhook_url, basestring): self.slack_webhook_url = [self.slack_webhook_url] self.slack_proxy = self.rule.get('slack_proxy', None) self.slack_username_override = self.rule.get('slack_username_override', 'elastalert') self.slack_channel_override = self.rule.get('slack_channel_override', '') self.slack_emoji_override = self.rule.get('slack_emoji_override', ':ghost:') self.slack_icon_url_override = self.rule.get('slack_icon_url_override', '') self.slack_msg_color = self.rule.get('slack_msg_color', 'danger') self.slack_parse_override = self.rule.get('slack_parse_override', 'none') self.slack_text_string = self.rule.get('slack_text_string', '') def format_body(self, body): # https://api.slack.com/docs/formatting body = body.encode('UTF-8') body = body.replace('&', '&amp;') body = body.replace('<', '&lt;') body = body.replace('>', '&gt;') return body def alert(self, matches): body = self.create_alert_body(matches) body = self.format_body(body) # post to slack headers = {'content-type': 'application/json'} # set https proxy, if it was provided proxies = {'https': self.slack_proxy} if self.slack_proxy else None payload = { 'username': self.slack_username_override, 'channel': self.slack_channel_override, 'parse': self.slack_parse_override, 'text': self.slack_text_string, 'attachments': [ { 'color': self.slack_msg_color, 'title': self.create_title(matches), 'text': body, 'mrkdwn_in': ['text', 'pretext'], 'fields': [] } ] } if self.slack_icon_url_override != '': payload['icon_url'] = self.slack_icon_url_override else: payload['icon_emoji'] = self.slack_emoji_override for url in self.slack_webhook_url: try: response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) response.raise_for_status() except RequestException as e: raise EAException("Error posting to slack: %s" % e) elastalert_logger.info("Alert sent to Slack") def get_info(self): return {'type': 'slack', 'slack_username_override': self.slack_username_override, 'slack_webhook_url': self.slack_webhook_url} class PagerDutyAlerter(Alerter): """ Create an incident on PagerDuty for each alert """ required_options = frozenset(['pagerduty_service_key', 'pagerduty_client_name']) def __init__(self, rule): super(PagerDutyAlerter, self).__init__(rule) self.pagerduty_service_key = self.rule['pagerduty_service_key'] self.pagerduty_client_name = self.rule['pagerduty_client_name'] self.pagerduty_incident_key = self.rule.get('pagerduty_incident_key', '') self.pagerduty_incident_key_args = self.rule.get('pagerduty_incident_key_args', None) self.pagerduty_proxy = self.rule.get('pagerduty_proxy', None) self.url = 'https://events.pagerduty.com/generic/2010-04-15/create_event.json' def alert(self, matches): body = self.create_alert_body(matches) # post to pagerduty headers = {'content-type': 'application/json'} payload = { 'service_key': self.pagerduty_service_key, 'description': self.create_title(matches), 'event_type': 'trigger', 'incident_key': self.get_incident_key(matches), 'client': self.pagerduty_client_name, 'details': { "information": body.encode('UTF-8'), }, } # set https proxy, if it was provided proxies = {'https': self.pagerduty_proxy} if self.pagerduty_proxy else None try: response = requests.post( self.url, data=json.dumps(payload, cls=DateTimeEncoder, ensure_ascii=False), headers=headers, proxies=proxies ) response.raise_for_status() except RequestException as e: raise EAException("Error posting to pagerduty: %s" % e) elastalert_logger.info("Trigger sent to PagerDuty") def get_incident_key(self, matches): if self.pagerduty_incident_key_args: incident_key_values = [lookup_es_key(matches[0], arg) for arg in self.pagerduty_incident_key_args] # Populate values with rule level properties too for i in range(len(incident_key_values)): if incident_key_values[i] is None: key_value = self.rule.get(self.pagerduty_incident_key_args[i]) if key_value: incident_key_values[i] = key_value incident_key_values = ['<MISSING VALUE>' if val is None else val for val in incident_key_values] return self.pagerduty_incident_key.format(*incident_key_values) else: return self.pagerduty_incident_key def get_info(self): return {'type': 'pagerduty', 'pagerduty_client_name': self.pagerduty_client_name} class ExotelAlerter(Alerter): required_options = frozenset(['exotel_account_sid', 'exotel_auth_token', 'exotel_to_number', 'exotel_from_number']) def __init__(self, rule): super(ExotelAlerter, self).__init__(rule) self.exotel_account_sid = self.rule['exotel_account_sid'] self.exotel_auth_token = self.rule['exotel_auth_token'] self.exotel_to_number = self.rule['exotel_to_number'] self.exotel_from_number = self.rule['exotel_from_number'] self.sms_body = self.rule.get('exotel_message_body', '') def alert(self, matches): client = Exotel(self.exotel_account_sid, self.exotel_auth_token) try: message_body = self.rule['name'] + self.sms_body response = client.sms(self.rule['exotel_from_number'], self.rule['exotel_to_number'], message_body) if response != 200: raise EAException("Error posting to Exotel, response code is %s" % response) except: raise EAException("Error posting to Exotel"), None, sys.exc_info()[2] elastalert_logger.info("Trigger sent to Exotel") def get_info(self): return {'type': 'exotel', 'exotel_account': self.exotel_account_sid} class TwilioAlerter(Alerter): required_options = frozenset(['twilio_account_sid', 'twilio_auth_token', 'twilio_to_number', 'twilio_from_number']) def __init__(self, rule): super(TwilioAlerter, self).__init__(rule) self.twilio_account_sid = self.rule['twilio_account_sid'] self.twilio_auth_token = self.rule['twilio_auth_token'] self.twilio_to_number = self.rule['twilio_to_number'] self.twilio_from_number = self.rule['twilio_from_number'] def alert(self, matches): client = TwilioClient(self.twilio_account_sid, self.twilio_auth_token) try: client.messages.create(body=self.rule['name'], to=self.twilio_to_number, from_=self.twilio_from_number) except TwilioRestException as e: raise EAException("Error posting to twilio: %s" % e) elastalert_logger.info("Trigger sent to Twilio") def get_info(self): return {'type': 'twilio', 'twilio_client_name': self.twilio_from_number} class VictorOpsAlerter(Alerter): """ Creates a VictorOps Incident for each alert """ required_options = frozenset(['victorops_api_key', 'victorops_routing_key', 'victorops_message_type']) def __init__(self, rule): super(VictorOpsAlerter, self).__init__(rule) self.victorops_api_key = self.rule['victorops_api_key'] self.victorops_routing_key = self.rule['victorops_routing_key'] self.victorops_message_type = self.rule['victorops_message_type'] self.victorops_entity_display_name = self.rule.get('victorops_entity_display_name', 'no entity display name') self.url = 'https://alert.victorops.com/integrations/generic/20131114/alert/%s/%s' % ( self.victorops_api_key, self.victorops_routing_key) self.victorops_proxy = self.rule.get('victorops_proxy', None) def alert(self, matches): body = self.create_alert_body(matches) # post to victorops headers = {'content-type': 'application/json'} # set https proxy, if it was provided proxies = {'https': self.victorops_proxy} if self.victorops_proxy else None payload = { "message_type": self.victorops_message_type, "entity_display_name": self.victorops_entity_display_name, "monitoring_tool": "ElastAlert", "state_message": body } try: response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) response.raise_for_status() except RequestException as e: raise EAException("Error posting to VictorOps: %s" % e) elastalert_logger.info("Trigger sent to VictorOps") def get_info(self): return {'type': 'victorops', 'victorops_routing_key': self.victorops_routing_key} class TelegramAlerter(Alerter): """ Send a Telegram message via bot api for each alert """ required_options = frozenset(['telegram_bot_token', 'telegram_room_id']) def __init__(self, rule): super(TelegramAlerter, self).__init__(rule) self.telegram_bot_token = self.rule['telegram_bot_token'] self.telegram_room_id = self.rule['telegram_room_id'] self.telegram_api_url = self.rule.get('telegram_api_url', 'api.telegram.org') self.url = 'https://%s/bot%s/%s' % (self.telegram_api_url, self.telegram_bot_token, "sendMessage") self.telegram_proxy = self.rule.get('telegram_proxy', None) def alert(self, matches): body = u'⚠ *%s* ⚠ ```\n' % (self.create_title(matches)) for match in matches: body += unicode(BasicMatchString(self.rule, match)) # Separate text of aggregated alerts with dashes if len(matches) > 1: body += '\n----------------------------------------\n' body += u' ```' headers = {'content-type': 'application/json'} # set https proxy, if it was provided proxies = {'https': self.telegram_proxy} if self.telegram_proxy else None payload = { 'chat_id': self.telegram_room_id, 'text': body, 'parse_mode': 'markdown', 'disable_web_page_preview': True } try: response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) warnings.resetwarnings() response.raise_for_status() except RequestException as e: raise EAException("Error posting to Telegram: %s" % e) elastalert_logger.info( "Alert sent to Telegram room %s" % self.telegram_room_id) def get_info(self): return {'type': 'telegram', 'telegram_room_id': self.telegram_room_id} class GitterAlerter(Alerter): """ Creates a Gitter activity message for each alert """ required_options = frozenset(['gitter_webhook_url']) def __init__(self, rule): super(GitterAlerter, self).__init__(rule) self.gitter_webhook_url = self.rule['gitter_webhook_url'] self.gitter_proxy = self.rule.get('gitter_proxy', None) self.gitter_msg_level = self.rule.get('gitter_msg_level', 'error') def alert(self, matches): body = self.create_alert_body(matches) # post to Gitter headers = {'content-type': 'application/json'} # set https proxy, if it was provided proxies = {'https': self.gitter_proxy} if self.gitter_proxy else None payload = { 'message': body, 'level': self.gitter_msg_level } try: response = requests.post(self.gitter_webhook_url, json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) response.raise_for_status() except RequestException as e: raise EAException("Error posting to Gitter: %s" % e) elastalert_logger.info("Alert sent to Gitter") def get_info(self): return {'type': 'gitter', 'gitter_webhook_url': self.gitter_webhook_url} class ServiceNowAlerter(Alerter): """ Creates a ServiceNow alert """ required_options = set([ 'username', 'password', 'servicenow_rest_url', 'short_description', 'comments', 'assignment_group', 'category', 'subcategory', 'cmdb_ci', 'caller_id' ]) def __init__(self, rule): super(ServiceNowAlerter, self).__init__(rule) self.servicenow_rest_url = self.rule['servicenow_rest_url'] self.servicenow_proxy = self.rule.get('servicenow_proxy', None) def alert(self, matches): for match in matches: # Parse everything into description. description = str(BasicMatchString(self.rule, match)) # Set proper headers headers = { "Content-Type": "application/json", "Accept": "application/json;charset=utf-8" } proxies = {'https': self.servicenow_proxy} if self.servicenow_proxy else None payload = { "description": description, "short_description": self.rule['short_description'], "comments": self.rule['comments'], "assignment_group": self.rule['assignment_group'], "category": self.rule['category'], "subcategory": self.rule['subcategory'], "cmdb_ci": self.rule['cmdb_ci'], "caller_id": self.rule["caller_id"] } try: response = requests.post( self.servicenow_rest_url, auth=(self.rule['username'], self.rule['password']), headers=headers, data=json.dumps(payload, cls=DateTimeEncoder), proxies=proxies ) response.raise_for_status() except RequestException as e: raise EAException("Error posting to ServiceNow: %s" % e) elastalert_logger.info("Alert sent to ServiceNow") def get_info(self): return {'type': 'ServiceNow', 'self.servicenow_rest_url': self.servicenow_rest_url} class HTTPPostAlerter(Alerter): """ Requested elasticsearch indices are sent by HTTP POST. Encoded with JSON. """ def __init__(self, rule): super(HTTPPostAlerter, self).__init__(rule) post_url = self.rule.get('http_post_url') if isinstance(post_url, basestring): post_url = [post_url] self.post_url = post_url self.post_proxy = self.rule.get('http_post_proxy') self.post_payload = self.rule.get('http_post_payload', {}) self.post_static_payload = self.rule.get('http_post_static_payload', {}) self.post_all_values = self.rule.get('http_post_all_values', not self.post_payload) def alert(self, matches): """ Each match will trigger a POST to the specified endpoint(s). """ for match in matches: payload = match if self.post_all_values else {} payload.update(self.post_static_payload) for post_key, es_key in self.post_payload.items(): payload[post_key] = lookup_es_key(match, es_key) headers = { "Content-Type": "application/json", "Accept": "application/json;charset=utf-8" } proxies = {'https': self.post_proxy} if self.post_proxy else None for url in self.post_url: try: response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies) response.raise_for_status() except RequestException as e: raise EAException("Error posting HTTP Post alert: %s" % e) elastalert_logger.info("HTTP Post alert sent.") def get_info(self): return {'type': 'http_post', 'http_post_webhook_url': self.post_url}
44.289971
137
0.607814
59,998
0.984429
0
0
0
0
0
0
17,304
0.283919
7c9ab847564a9551bd26274412cd272cd155cf72
69,601
py
Python
tests/unit/python/fledge/services/core/scheduler/test_scheduler.py
DDC-NDRS/fledge-iot_fledge
27a5e66a55daaab1aca14ce6e66f9f1e6efaef51
[ "Apache-2.0" ]
69
2019-12-03T17:54:33.000Z
2022-03-13T07:05:23.000Z
tests/unit/python/fledge/services/core/scheduler/test_scheduler.py
DDC-NDRS/fledge-iot_fledge
27a5e66a55daaab1aca14ce6e66f9f1e6efaef51
[ "Apache-2.0" ]
125
2020-02-13T15:11:28.000Z
2022-03-29T14:42:36.000Z
tests/unit/python/fledge/services/core/scheduler/test_scheduler.py
DDC-NDRS/fledge-iot_fledge
27a5e66a55daaab1aca14ce6e66f9f1e6efaef51
[ "Apache-2.0" ]
24
2019-12-27T07:48:45.000Z
2022-03-13T07:05:28.000Z
# -*- coding: utf-8 -*- # FLEDGE_BEGIN # See: http://fledge-iot.readthedocs.io/ # FLEDGE_END import asyncio import datetime import uuid import time import json from unittest.mock import MagicMock, call import sys import copy import pytest from fledge.services.core.scheduler.scheduler import Scheduler, AuditLogger, ConfigurationManager from fledge.services.core.scheduler.entities import * from fledge.services.core.scheduler.exceptions import * from fledge.common.storage_client.storage_client import StorageClientAsync __author__ = "Amarendra K Sinha" __copyright__ = "Copyright (c) 2017 OSIsoft, LLC" __license__ = "Apache 2.0" __version__ = "${VERSION}" async def mock_task(): return "" async def mock_process(): m = MagicMock() m.pid = 9999 m.terminate = lambda: True return m @pytest.allure.feature("unit") @pytest.allure.story("scheduler") class TestScheduler: async def scheduler_fixture(self, mocker): # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. if sys.version_info.major == 3 and sys.version_info.minor >= 8: _rv = await mock_process() else: _rv = asyncio.ensure_future(mock_process()) scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_paused', False) mocker.patch.object(scheduler, '_process_scripts', return_value="North Readings to PI") mocker.patch.object(scheduler, '_wait_for_task_completion', return_value=asyncio.ensure_future(mock_task())) mocker.patch.object(scheduler, '_terminate_child_processes') mocker.patch.object(asyncio, 'create_subprocess_exec', return_value=_rv) await scheduler._get_schedules() schedule = scheduler._ScheduleRow( id=uuid.UUID("2b614d26-760f-11e7-b5a5-be2e44b06b34"), process_name="North Readings to PI", name="OMF to PI north", type=Schedule.Type.INTERVAL, repeat=datetime.timedelta(seconds=30), repeat_seconds=30, time=None, day=None, exclusive=True, enabled=True) log_exception = mocker.patch.object(scheduler._logger, "exception") log_error = mocker.patch.object(scheduler._logger, "error") log_debug = mocker.patch.object(scheduler._logger, "debug") log_info = mocker.patch.object(scheduler._logger, "info") return scheduler, schedule, log_info, log_exception, log_error, log_debug @pytest.mark.asyncio async def test__resume_check_schedules(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) # WHEN # Check IF part mocker.patch.object(scheduler, '_scheduler_loop_sleep_task', asyncio.Task(asyncio.sleep(5))) scheduler._resume_check_schedules() # THEN assert scheduler._check_processes_pending is False # WHEN # Check ELSE part mocker.patch.object(scheduler, '_scheduler_loop_sleep_task', None) scheduler._resume_check_schedules() # THEN assert scheduler._check_processes_pending is True @pytest.mark.asyncio async def test__wait_for_task_completion(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) log_info = mocker.patch.object(scheduler._logger, "info") mock_schedules = dict() mock_schedule = scheduler._ScheduleRow( id=uuid.UUID("2b614d26-760f-11e7-b5a5-be2e44b06b34"), process_name="North Readings to PI", name="OMF to PI north", type=Schedule.Type.INTERVAL, repeat=datetime.timedelta(seconds=30), repeat_seconds=30, time=None, day=None, exclusive=True, enabled=True) mock_schedules[mock_schedule.id] = mock_schedule mock_task_process = scheduler._TaskProcess() mock_task_processes = dict() mock_task_process.process = await asyncio.create_subprocess_exec("sleep", ".1") mock_task_process.schedule = mock_schedule mock_task_id = uuid.uuid4() mock_task_process.task_id = mock_task_id mock_task_processes[mock_task_process.task_id] = mock_task_process mock_schedule_executions = dict() mock_schedule_execution = scheduler._ScheduleExecution() mock_schedule_executions[mock_schedule.id] = mock_schedule_execution mock_schedule_executions[mock_schedule.id].task_processes[mock_task_id] = mock_task_process mocker.patch.object(scheduler, '_resume_check_schedules') mocker.patch.object(scheduler, '_schedule_next_task') mocker.patch.multiple(scheduler, _schedules=mock_schedules, _task_processes=mock_task_processes, _schedule_executions=mock_schedule_executions) mocker.patch.object(scheduler, '_process_scripts', return_value="North Readings to PI") # WHEN await scheduler._wait_for_task_completion(mock_task_process) # THEN # After task completion, sleep above, no task processes should be left pending assert 0 == len(scheduler._task_processes) assert 0 == len(scheduler._schedule_executions[mock_schedule.id].task_processes) args, kwargs = log_info.call_args_list[0] assert 'OMF to PI north' in args assert 'North Readings to PI' in args @pytest.mark.asyncio async def test__start_task(self, mocker): # TODO: Mandatory - Add negative tests for full code coverage # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) log_info = mocker.patch.object(scheduler._logger, "info") mocker.patch.object(scheduler, '_schedule_first_task') await scheduler._get_schedules() schedule = scheduler._ScheduleRow( id=uuid.UUID("2b614d26-760f-11e7-b5a5-be2e44b06b34"), process_name="North Readings to PI", name="OMF to PI north", type=Schedule.Type.INTERVAL, repeat=datetime.timedelta(seconds=30), repeat_seconds=30, time=None, day=None, exclusive=True, enabled=True) mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_resume_check_schedules') # Assert that there is no task queued for mock_schedule with pytest.raises(KeyError) as excinfo: assert scheduler._schedule_executions[schedule.id] is True # Now queue task and assert that the task has been queued await scheduler.queue_task(schedule.id) assert isinstance(scheduler._schedule_executions[schedule.id], scheduler._ScheduleExecution) # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. if sys.version_info.major == 3 and sys.version_info.minor >= 8: _rv = await mock_process() else: _rv = asyncio.ensure_future(mock_process()) mocker.patch.object(asyncio, 'create_subprocess_exec', return_value=_rv) mocker.patch.object(asyncio, 'ensure_future', return_value=asyncio.ensure_future(mock_task())) mocker.patch.object(scheduler, '_resume_check_schedules') mocker.patch.object(scheduler, '_process_scripts', return_value="North Readings to PI") mocker.patch.object(scheduler, '_wait_for_task_completion') # Confirm that task has not started yet assert 0 == len(scheduler._schedule_executions[schedule.id].task_processes) # WHEN await scheduler._start_task(schedule) # THEN # Confirm that task has started assert 1 == len(scheduler._schedule_executions[schedule.id].task_processes) assert 1 == log_info.call_count # assert call("Queued schedule '%s' for execution", 'OMF to PI north') == log_info.call_args_list[0] args, kwargs = log_info.call_args_list[0] assert "Process started: Schedule '%s' process '%s' task %s pid %s, %s running tasks\n%s" in args assert 'OMF to PI north' in args assert 'North Readings to PI' in args @pytest.mark.asyncio async def test_purge_tasks(self, mocker): # TODO: Mandatory - Add negative tests for full code coverage # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.multiple(scheduler, _ready=True, _paused=False) mocker.patch.object(scheduler, '_max_completed_task_age', datetime.datetime.now()) # WHEN await scheduler.purge_tasks() # THEN assert scheduler._purge_tasks_task is None assert scheduler._last_task_purge_time is not None @pytest.mark.asyncio async def test__check_purge_tasks(self, mocker): # TODO: Mandatory - Add negative tests for full code coverage # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.multiple(scheduler, _purge_tasks_task=None, _last_task_purge_time=None) mocker.patch.object(scheduler, 'purge_tasks', return_value=asyncio.ensure_future(mock_task())) # WHEN scheduler._check_purge_tasks() # THEN assert scheduler._purge_tasks_task is not None @pytest.mark.asyncio async def test__check_schedules(self, mocker): # TODO: Mandatory - Add negative tests for full code coverage # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) log_info = mocker.patch.object(scheduler._logger, "info") current_time = time.time() mocker.patch.multiple(scheduler, _max_running_tasks=10, _start_time=current_time) await scheduler._get_schedules() mocker.patch.object(scheduler, '_start_task', return_value=asyncio.ensure_future(mock_task())) # WHEN earliest_start_time = await scheduler._check_schedules() # THEN assert earliest_start_time is not None assert 3 == log_info.call_count args0, kwargs0 = log_info.call_args_list[0] args1, kwargs1 = log_info.call_args_list[1] args2, kwargs2 = log_info.call_args_list[2] assert 'stats collection' in args0 assert 'COAP listener south' in args1 assert 'OMF to PI north' in args2 @pytest.mark.asyncio @pytest.mark.skip("_scheduler_loop() not suitable for unit testing. Will be tested during System tests.") async def test__scheduler_loop(self, mocker): pass @pytest.mark.asyncio async def test__schedule_next_timed_task(self, mocker): # TODO: Mandatory - Add negative tests for full code coverage # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) log_info = mocker.patch.object(scheduler._logger, "info") current_time = time.time() mocker.patch.multiple(scheduler, _max_running_tasks=10, _start_time=current_time) await scheduler._get_schedules() sch_id = uuid.UUID("2176eb68-7303-11e7-8cf7-a6006ad3dba0") # stat collector sch = scheduler._schedules[sch_id] sch_execution = scheduler._schedule_executions[sch_id] time_before_call = sch_execution.next_start_time # WHEN next_dt = datetime.datetime.fromtimestamp(sch_execution.next_start_time) next_dt += datetime.timedelta(seconds=sch.repeat_seconds) scheduler._schedule_next_timed_task(sch, sch_execution, next_dt) time_after_call = sch_execution.next_start_time # THEN assert time_after_call > time_before_call assert 3 == log_info.call_count args0, kwargs0 = log_info.call_args_list[0] args1, kwargs1 = log_info.call_args_list[1] args2, kwargs2 = log_info.call_args_list[2] assert 'stats collection' in args0 assert 'COAP listener south' in args1 assert 'OMF to PI north' in args2 @pytest.mark.asyncio async def test__schedule_next_task(self, mocker): # TODO: Mandatory - Add negative tests for full code coverage # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) log_info = mocker.patch.object(scheduler._logger, "info") current_time = time.time() mocker.patch.multiple(scheduler, _max_running_tasks=10, _start_time=current_time-3600) await scheduler._get_schedules() sch_id = uuid.UUID("2176eb68-7303-11e7-8cf7-a6006ad3dba0") # stat collector sch = scheduler._schedules[sch_id] sch_execution = scheduler._schedule_executions[sch_id] time_before_call = sch_execution.next_start_time # WHEN scheduler._schedule_next_task(sch) time_after_call = sch_execution.next_start_time # THEN assert time_after_call > time_before_call assert 4 == log_info.call_count args0, kwargs0 = log_info.call_args_list[0] args1, kwargs1 = log_info.call_args_list[1] args2, kwargs2 = log_info.call_args_list[2] args3, kwargs3 = log_info.call_args_list[3] assert 'stats collection' in args0 assert 'COAP listener south' in args1 assert 'OMF to PI north' in args2 # As part of scheduler._get_schedules(), scheduler._schedule_first_task() also gets executed, hence # "stat collector" appears twice in this list. assert 'stats collection' in args3 @pytest.mark.asyncio async def test__schedule_first_task(self, mocker): # TODO: Mandatory - Add negative tests for full code coverage # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) log_info = mocker.patch.object(scheduler._logger, "info") current_time = time.time() curr_time = datetime.datetime.fromtimestamp(current_time) mocker.patch.multiple(scheduler, _max_running_tasks=10, _start_time=current_time) await scheduler._get_schedules() sch_id = uuid.UUID("2176eb68-7303-11e7-8cf7-a6006ad3dba0") # stat collector sch = scheduler._schedules[sch_id] sch_execution = scheduler._schedule_executions[sch_id] # WHEN scheduler._schedule_first_task(sch, current_time) time_after_call = sch_execution.next_start_time # THEN assert time_after_call > time.mktime(curr_time.timetuple()) assert 4 == log_info.call_count args0, kwargs0 = log_info.call_args_list[0] args1, kwargs1 = log_info.call_args_list[1] args2, kwargs2 = log_info.call_args_list[2] args3, kwargs3 = log_info.call_args_list[3] assert 'stats collection' in args0 assert 'COAP listener south' in args1 assert 'OMF to PI north' in args2 # As part of scheduler._get_schedules(), scheduler._schedule_first_task() also gets executed, hence # "stat collector" appears twice in this list. assert 'stats collection' in args3 @pytest.mark.asyncio async def test__get_process_scripts(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) # WHEN await scheduler._get_process_scripts() # THEN assert len(scheduler._storage_async.scheduled_processes) == len(scheduler._process_scripts) @pytest.mark.asyncio async def test__get_process_scripts_exception(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) log_debug = mocker.patch.object(scheduler._logger, "debug", side_effect=Exception()) log_exception = mocker.patch.object(scheduler._logger, "exception") # WHEN # THEN with pytest.raises(Exception): await scheduler._get_process_scripts() log_args = 'Query failed: %s', 'scheduled_processes' log_exception.assert_called_once_with(*log_args) @pytest.mark.asyncio @pytest.mark.parametrize("test_interval, is_exception", [ ('"Blah" 0 days', True), ('12:30:11', False), ('0 day 12:30:11', False), ('1 day 12:40:11', False), ('2 days', True), ('2 days 00:00:59', False), ('00:25:61', True) ]) async def test__get_schedules(self, test_interval, is_exception, mocker): # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') log_exception = mocker.patch.object(scheduler._logger, "exception") new_schedules = copy.deepcopy(MockStorageAsync.schedules) new_schedules[5]['schedule_interval'] = test_interval mocker.patch.object(MockStorageAsync, 'schedules', new_schedules) # WHEN # THEN if is_exception is True: with pytest.raises(Exception): await scheduler._get_schedules() assert 1 == log_exception.call_count else: await scheduler._get_schedules() assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) @pytest.mark.asyncio async def test__get_schedules_exception(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) log_debug = mocker.patch.object(scheduler._logger, "debug", side_effect=Exception()) log_exception = mocker.patch.object(scheduler._logger, "exception") mocker.patch.object(scheduler, '_schedule_first_task', side_effect=Exception()) # WHEN # THEN with pytest.raises(Exception): await scheduler._get_schedules() log_args = 'Query failed: %s', 'schedules' log_exception.assert_called_once_with(*log_args) @pytest.mark.asyncio async def test__read_storage(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') # WHEN await scheduler._read_storage() # THEN assert len(scheduler._storage_async.scheduled_processes) == len(scheduler._process_scripts) assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) @pytest.mark.asyncio @pytest.mark.skip("_mark_tasks_interrupted() not implemented in main Scheduler class.") async def test__mark_tasks_interrupted(self, mocker): pass @pytest.mark.asyncio async def test__read_config(self, mocker): async def get_cat(): return { "max_running_tasks": { "description": "The maximum number of tasks that can be running at any given time", "type": "integer", "default": str(Scheduler._DEFAULT_MAX_RUNNING_TASKS), "value": str(Scheduler._DEFAULT_MAX_RUNNING_TASKS) }, "max_completed_task_age_days": { "description": "The maximum age, in days (based on the start time), for a rows " "in the tasks table that do not have a status of running", "type": "integer", "default": str(Scheduler._DEFAULT_MAX_COMPLETED_TASK_AGE_DAYS), "value": str(Scheduler._DEFAULT_MAX_COMPLETED_TASK_AGE_DAYS) }, } # Changed in version 3.8: patch() now returns an AsyncMock if the target is an async function. if sys.version_info.major == 3 and sys.version_info.minor >= 8: _rv = await get_cat() else: _rv = asyncio.ensure_future(get_cat()) # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) cr_cat = mocker.patch.object(ConfigurationManager, "create_category", return_value=asyncio.ensure_future(mock_task())) get_cat = mocker.patch.object(ConfigurationManager, "get_category_all_items", return_value=_rv) # WHEN assert scheduler._max_running_tasks is None assert scheduler._max_completed_task_age is None await scheduler._read_config() # THEN assert 1 == cr_cat.call_count assert 1 == get_cat.call_count assert scheduler._max_running_tasks is not None assert scheduler._max_completed_task_age is not None @pytest.mark.asyncio async def test_start(self, mocker): # TODO: Mandatory - Add negative tests for full code coverage # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) log_debug = mocker.patch.object(scheduler._logger, "debug") log_info = mocker.patch.object(scheduler._logger, "info") current_time = time.time() mocker.patch.object(scheduler, '_schedule_first_task') mocker.patch.object(scheduler, '_scheduler_loop', return_value=asyncio.ensure_future(mock_task())) mocker.patch.multiple(scheduler, _core_management_port=9999, _core_management_host="0.0.0.0", current_time=current_time - 3600) # TODO: Remove after implementation of above test test__read_config() mocker.patch.object(scheduler, '_read_config', return_value=asyncio.ensure_future(mock_task())) assert scheduler._ready is False # WHEN await scheduler.start() # THEN assert scheduler._ready is True assert len(scheduler._storage_async.scheduled_processes) == len(scheduler._process_scripts) assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) calls = [call('Starting'), call('Starting Scheduler: Management port received is %d', 9999)] log_info.assert_has_calls(calls, any_order=True) calls = [call('Database command: %s', 'scheduled_processes'), call('Database command: %s', 'schedules')] log_debug.assert_has_calls(calls, any_order=True) @pytest.mark.asyncio async def test_stop(self, mocker): # TODO: Mandatory - Add negative tests for full code coverage # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) log_info = mocker.patch.object(scheduler._logger, "info") log_exception = mocker.patch.object(scheduler._logger, "exception") mocker.patch.object(scheduler, '_scheduler_loop', return_value=asyncio.ensure_future(mock_task())) mocker.patch.object(scheduler, '_resume_check_schedules', return_value=asyncio.ensure_future(mock_task())) mocker.patch.object(scheduler, '_purge_tasks_task', return_value=asyncio.ensure_future(asyncio.sleep(.1))) mocker.patch.object(scheduler, '_scheduler_loop_task', return_value=asyncio.ensure_future(asyncio.sleep(.1))) current_time = time.time() mocker.patch.multiple(scheduler, _core_management_port=9999, _core_management_host="0.0.0.0", _start_time=current_time - 3600, _paused=False, _task_processes={}) # WHEN retval = await scheduler.stop() # THEN assert retval is True assert scheduler._schedule_executions is None assert scheduler._task_processes is None assert scheduler._schedules is None assert scheduler._process_scripts is None assert scheduler._ready is False assert scheduler._paused is False assert scheduler._start_time is None calls = [call('Processing stop request'), call('Stopped')] log_info.assert_has_calls(calls, any_order=True) # TODO: Find why these exceptions are being raised despite mocking _purge_tasks_task, _scheduler_loop_task calls = [call('An exception was raised by Scheduler._purge_tasks %s', "object MagicMock can't be used in 'await' expression"), call('An exception was raised by Scheduler._scheduler_loop %s', "object MagicMock can't be used in 'await' expression")] log_exception.assert_has_calls(calls) @pytest.mark.asyncio async def test_get_scheduled_processes(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) await scheduler._get_process_scripts() mocker.patch.object(scheduler, '_ready', True) # WHEN processes = await scheduler.get_scheduled_processes() # THEN assert len(scheduler._storage_async.scheduled_processes) == len(processes) @pytest.mark.asyncio async def test_schedule_row_to_schedule(self, mocker): # GIVEN scheduler = Scheduler() schedule_id = uuid.uuid4() schedule_row = scheduler._ScheduleRow( id=schedule_id, name='Test Schedule', type=Schedule.Type.INTERVAL, day=0, time=0, repeat=10, repeat_seconds=10, exclusive=False, enabled=True, process_name='TestProcess') # WHEN schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) # THEN assert isinstance(schedule, Schedule) assert schedule.schedule_id == schedule_row[0] assert schedule.name == schedule_row[1] assert schedule.schedule_type == schedule_row[2] assert schedule_row[3] is 0 # 0 for Interval Schedule assert schedule_row[4] is 0 # 0 for Interval Schedule assert schedule.repeat == schedule_row[5] assert schedule.exclusive == schedule_row[7] assert schedule.enabled == schedule_row[8] assert schedule.process_name == schedule_row[9] @pytest.mark.asyncio async def test_get_schedules(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) # WHEN schedules = await scheduler.get_schedules() # THEN assert len(scheduler._storage_async.schedules) == len(schedules) @pytest.mark.asyncio async def test_get_schedule(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) schedule_id = uuid.UUID("cea17db8-6ccc-11e7-907b-a6006ad3dba0") # purge schedule # WHEN schedule = await scheduler.get_schedule(schedule_id) # THEN assert isinstance(schedule, Schedule) assert schedule.schedule_id == schedule_id assert schedule.name == "purge" assert schedule.schedule_type == Schedule.Type.MANUAL assert schedule.repeat == datetime.timedelta(0, 3600) assert schedule.exclusive is True assert schedule.enabled is True assert schedule.process_name == "purge" @pytest.mark.asyncio async def test_get_schedule_exception(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) schedule_id = uuid.uuid4() # WHEN # THEN with pytest.raises(ScheduleNotFoundError): schedule = await scheduler.get_schedule(schedule_id) @pytest.mark.asyncio async def test_save_schedule_new(self, mocker): @asyncio.coroutine def mock_coro(): return "" # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) audit_logger = mocker.patch.object(AuditLogger, 'information', return_value=asyncio.ensure_future(mock_task())) first_task = mocker.patch.object(scheduler, '_schedule_first_task') resume_sch = mocker.patch.object(scheduler, '_resume_check_schedules') log_info = mocker.patch.object(scheduler._logger, "info") enable_schedule = mocker.patch.object(scheduler, "enable_schedule", return_value=mock_coro()) disable_schedule = mocker.patch.object(scheduler, "disable_schedule", return_value=mock_coro()) schedule_id = uuid.uuid4() schedule_row = scheduler._ScheduleRow( id=schedule_id, name='Test Schedule', type=Schedule.Type.INTERVAL, day=0, time=0, repeat=datetime.timedelta(seconds=30), repeat_seconds=30, exclusive=False, enabled=True, process_name='TestProcess') schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) # WHEN await scheduler.save_schedule(schedule) # THEN assert len(scheduler._storage_async.schedules) + 1 == len(scheduler._schedules) assert 1 == audit_logger.call_count calls =[call('SCHAD', {'schedule': {'name': 'Test Schedule', 'processName': 'TestProcess', 'type': Schedule.Type.INTERVAL, 'repeat': 30.0, 'enabled': True, 'exclusive': False}})] audit_logger.assert_has_calls(calls, any_order=True) assert 1 == first_task.call_count assert 1 == resume_sch.call_count assert 0 == enable_schedule.call_count assert 0 == disable_schedule.call_count @pytest.mark.asyncio async def test_save_schedule_new_with_enable_modified(self, mocker): @asyncio.coroutine def mock_coro(): return "" # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) audit_logger = mocker.patch.object(AuditLogger, 'information', return_value=asyncio.ensure_future(mock_task())) first_task = mocker.patch.object(scheduler, '_schedule_first_task') resume_sch = mocker.patch.object(scheduler, '_resume_check_schedules') log_info = mocker.patch.object(scheduler._logger, "info") enable_schedule = mocker.patch.object(scheduler, "enable_schedule", return_value=mock_coro()) disable_schedule = mocker.patch.object(scheduler, "disable_schedule", return_value=mock_coro()) schedule_id = uuid.uuid4() schedule_row = scheduler._ScheduleRow( id=schedule_id, name='Test Schedule', type=Schedule.Type.INTERVAL, day=0, time=0, repeat=datetime.timedelta(seconds=30), repeat_seconds=30, exclusive=False, enabled=True, process_name='TestProcess') schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) # WHEN await scheduler.save_schedule(schedule, is_enabled_modified=True) # THEN assert len(scheduler._storage_async.schedules) + 1 == len(scheduler._schedules) assert 1 == audit_logger.call_count calls =[call('SCHAD', {'schedule': {'name': 'Test Schedule', 'processName': 'TestProcess', 'type': Schedule.Type.INTERVAL, 'repeat': 30.0, 'enabled': True, 'exclusive': False}})] audit_logger.assert_has_calls(calls, any_order=True) assert 1 == first_task.call_count assert 1 == resume_sch.call_count assert 1 == enable_schedule.call_count assert 0 == disable_schedule.call_count # WHEN await scheduler.save_schedule(schedule, is_enabled_modified=False) # THEN assert 1 == disable_schedule.call_count @pytest.mark.asyncio async def test_save_schedule_update(self, mocker): @asyncio.coroutine def mock_coro(): return "" # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) audit_logger = mocker.patch.object(AuditLogger, 'information', return_value=asyncio.ensure_future(mock_task())) first_task = mocker.patch.object(scheduler, '_schedule_first_task') resume_sch = mocker.patch.object(scheduler, '_resume_check_schedules') log_info = mocker.patch.object(scheduler._logger, "info") schedule_id = uuid.UUID("2b614d26-760f-11e7-b5a5-be2e44b06b34") # OMF to PI North schedule_row = scheduler._ScheduleRow( id=schedule_id, name='Test Schedule', type=Schedule.Type.TIMED, day=1, time=datetime.time(), repeat=datetime.timedelta(seconds=30), repeat_seconds=30, exclusive=False, enabled=True, process_name='TestProcess') schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) enable_schedule = mocker.patch.object(scheduler, "enable_schedule", return_value=mock_coro()) disable_schedule = mocker.patch.object(scheduler, "disable_schedule", return_value=mock_coro()) # WHEN await scheduler.save_schedule(schedule) # THEN assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) assert 1 == audit_logger.call_count calls = [call('SCHCH', {'schedule': {'name': 'Test Schedule', 'enabled': True, 'repeat': 30.0, 'exclusive': False, 'day': 1, 'time': '0:0:0', 'processName': 'TestProcess', 'type': Schedule.Type.TIMED}})] audit_logger.assert_has_calls(calls, any_order=True) assert 1 == first_task.call_count assert 1 == resume_sch.call_count assert 0 == enable_schedule.call_count assert 0 == disable_schedule.call_count @pytest.mark.asyncio async def test_save_schedule_update_with_enable_modified(self, mocker): @asyncio.coroutine def mock_coro(): return "" # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) audit_logger = mocker.patch.object(AuditLogger, 'information', return_value=asyncio.ensure_future(mock_task())) first_task = mocker.patch.object(scheduler, '_schedule_first_task') resume_sch = mocker.patch.object(scheduler, '_resume_check_schedules') log_info = mocker.patch.object(scheduler._logger, "info") schedule_id = uuid.UUID("2b614d26-760f-11e7-b5a5-be2e44b06b34") # OMF to PI North schedule_row = scheduler._ScheduleRow( id=schedule_id, name='Test Schedule', type=Schedule.Type.TIMED, day=1, time=datetime.time(), repeat=datetime.timedelta(seconds=30), repeat_seconds=30, exclusive=False, enabled=True, process_name='TestProcess') schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) enable_schedule = mocker.patch.object(scheduler, "enable_schedule", return_value=mock_coro()) disable_schedule = mocker.patch.object(scheduler, "disable_schedule", return_value=mock_coro()) # WHEN await scheduler.save_schedule(schedule, is_enabled_modified=True) # THEN assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) assert 1 == audit_logger.call_count calls = [call('SCHCH', {'schedule': {'name': 'Test Schedule', 'enabled': True, 'repeat': 30.0, 'exclusive': False, 'day': 1, 'time': '0:0:0', 'processName': 'TestProcess', 'type': Schedule.Type.TIMED}})] audit_logger.assert_has_calls(calls, any_order=True) assert 1 == first_task.call_count assert 1 == resume_sch.call_count assert 1 == enable_schedule.call_count assert 0 == disable_schedule.call_count # WHEN await scheduler.save_schedule(schedule, is_enabled_modified=False) # THEN assert 1 == disable_schedule.call_count @pytest.mark.asyncio async def test_save_schedule_exception(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) schedule_id = uuid.uuid4() schedule_row = scheduler._ScheduleRow( id=schedule_id, name='Test Schedule', type=Schedule.Type.TIMED, day=0, time=0, repeat=datetime.timedelta(seconds=30), repeat_seconds=30, exclusive=False, enabled=True, process_name='TestProcess') # WHEN # THEN with pytest.raises(ValueError) as ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.name = None await scheduler.save_schedule(temp_schedule) del temp_schedule assert str(ex).endswith("name can not be empty") with pytest.raises(ValueError) as ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.name = "" await scheduler.save_schedule(temp_schedule) del temp_schedule assert str(ex).endswith("name can not be empty") with pytest.raises(ValueError) as ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.repeat = 1234 await scheduler.save_schedule(temp_schedule) del temp_schedule assert str(ex).endswith('repeat must be of type datetime.timedelta') with pytest.raises(ValueError) as ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.exclusive = None await scheduler.save_schedule(temp_schedule) del temp_schedule assert str(ex).endswith('exclusive can not be None') with pytest.raises(ValueError) as ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.time = 1234 await scheduler.save_schedule(temp_schedule) del temp_schedule assert str(ex).endswith('time must be of type datetime.time') with pytest.raises(ValueError) as ex: temp_schedule = scheduler._schedule_row_to_schedule(schedule_id, schedule_row) temp_schedule.day = 0 temp_schedule.time = datetime.time() await scheduler.save_schedule(temp_schedule) del temp_schedule assert str(ex).endswith('day must be between 1 and 7') @pytest.mark.asyncio @pytest.mark.skip(reason="To be done") async def test_remove_service_from_task_processes(self): pass @pytest.mark.asyncio async def test_disable_schedule(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') await scheduler._get_schedules() mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_task_processes') audit_logger = mocker.patch.object(AuditLogger, 'information', return_value=asyncio.ensure_future(mock_task())) log_info = mocker.patch.object(scheduler._logger, "info") sch_id = uuid.UUID("2b614d26-760f-11e7-b5a5-be2e44b06b34") # OMF to PI North # WHEN status, message = await scheduler.disable_schedule(sch_id) # THEN assert status is True assert message == "Schedule successfully disabled" assert (scheduler._schedules[sch_id]).id == sch_id assert (scheduler._schedules[sch_id]).enabled is False assert 2 == log_info.call_count calls = [call('No Task running for Schedule %s', '2b614d26-760f-11e7-b5a5-be2e44b06b34'), call("Disabled Schedule '%s/%s' process '%s'\n", 'OMF to PI north', '2b614d26-760f-11e7-b5a5-be2e44b06b34', 'North Readings to PI')] log_info.assert_has_calls(calls) assert 1 == audit_logger.call_count calls = [call('SCHCH', {'schedule': {'name': 'OMF to PI north', 'repeat': 30.0, 'enabled': False, 'type': Schedule.Type.INTERVAL, 'exclusive': True, 'processName': 'North Readings to PI'}})] audit_logger.assert_has_calls(calls, any_order=True) @pytest.mark.asyncio async def test_disable_schedule_wrong_schedule_id(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') await scheduler._get_schedules() mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_task_processes') log_exception = mocker.patch.object(scheduler._logger, "exception") random_schedule_id = uuid.uuid4() # WHEN await scheduler.disable_schedule(random_schedule_id) # THEN log_params = "No such Schedule %s", str(random_schedule_id) log_exception.assert_called_with(*log_params) @pytest.mark.asyncio async def test_disable_schedule_already_disabled(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') await scheduler._get_schedules() mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_task_processes') log_info = mocker.patch.object(scheduler._logger, "info") sch_id = uuid.UUID("d1631422-9ec6-11e7-abc4-cec278b6b50a") # backup # WHEN status, message = await scheduler.disable_schedule(sch_id) # THEN assert status is True assert message == "Schedule {} already disabled".format(str(sch_id)) assert (scheduler._schedules[sch_id]).id == sch_id assert (scheduler._schedules[sch_id]).enabled is False log_params = "Schedule %s already disabled", str(sch_id) log_info.assert_called_with(*log_params) @pytest.mark.asyncio async def test_enable_schedule(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) sch_id = uuid.UUID("d1631422-9ec6-11e7-abc4-cec278b6b50a") # backup queue_task = mocker.patch.object(scheduler, 'queue_task', return_value=asyncio.ensure_future(mock_task())) audit_logger = mocker.patch.object(AuditLogger, 'information', return_value=asyncio.ensure_future(mock_task())) # WHEN status, message = await scheduler.enable_schedule(sch_id) # THEN assert status is True assert message == "Schedule successfully enabled" assert (scheduler._schedules[sch_id]).id == sch_id assert (scheduler._schedules[sch_id]).enabled is True assert 1 == queue_task.call_count calls = [call("Enabled Schedule '%s/%s' process '%s'\n", 'backup hourly', 'd1631422-9ec6-11e7-abc4-cec278b6b50a', 'backup')] log_info.assert_has_calls(calls, any_order=True) assert 1 == audit_logger.call_count calls = [call('SCHCH', {'schedule': {'name': 'backup hourly', 'type': Schedule.Type.INTERVAL, 'processName': 'backup', 'exclusive': True, 'repeat': 3600.0, 'enabled': True}})] audit_logger.assert_has_calls(calls, any_order=True) @pytest.mark.asyncio async def test_enable_schedule_already_enabled(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) sch_id = uuid.UUID("ada12840-68d3-11e7-907b-a6006ad3dba0") #Coap mocker.patch.object(scheduler, 'queue_task', return_value=asyncio.ensure_future(mock_task())) # WHEN status, message = await scheduler.enable_schedule(sch_id) # THEN assert status is True assert message == "Schedule is already enabled" assert (scheduler._schedules[sch_id]).id == sch_id assert (scheduler._schedules[sch_id]).enabled is True log_params = "Schedule %s already enabled", str(sch_id) log_info.assert_called_with(*log_params) @pytest.mark.asyncio async def test_enable_schedule_wrong_schedule_id(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) random_schedule_id = uuid.uuid4() # WHEN await scheduler.enable_schedule(random_schedule_id) # THEN log_params = "No such Schedule %s", str(random_schedule_id) log_exception.assert_called_with(*log_params) @pytest.mark.asyncio async def test_queue_task(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') # log_info = mocker.patch.object(scheduler._logger, "info") await scheduler._get_schedules() sch_id = uuid.UUID("cea17db8-6ccc-11e7-907b-a6006ad3dba0") # backup mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_resume_check_schedules') # Assert that there is no task queued for this schedule at first with pytest.raises(KeyError) as excinfo: assert scheduler._schedule_executions[sch_id] is True # WHEN await scheduler.queue_task(sch_id) # THEN assert isinstance(scheduler._schedule_executions[sch_id], scheduler._ScheduleExecution) # log_params = "Queued schedule '%s' for execution", 'purge' # log_info.assert_called_with(*log_params) @pytest.mark.asyncio async def test_queue_task_schedule_not_found(self, mocker): # GIVEN scheduler = Scheduler() scheduler._storage = MockStorage(core_management_host=None, core_management_port=None) scheduler._storage_async = MockStorageAsync(core_management_host=None, core_management_port=None) mocker.patch.object(scheduler, '_schedule_first_task') mocker.patch.object(scheduler, '_ready', True) mocker.patch.object(scheduler, '_resume_check_schedules') # WHEN # THEN with pytest.raises(ScheduleNotFoundError) as excinfo: await scheduler.queue_task(uuid.uuid4()) @pytest.mark.asyncio async def test_delete_schedule(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) sch_id = uuid.UUID("d1631422-9ec6-11e7-abc4-cec278b6b50a") # backup await scheduler._get_schedules() # Confirm no. of schedules assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) mocker.patch.object(scheduler, '_ready', True) # WHEN # Now delete schedule await scheduler.delete_schedule(sch_id) # THEN # Now confirm there is one schedule less assert len(scheduler._storage_async.schedules) - 1 == len(scheduler._schedules) @pytest.mark.asyncio async def test_delete_schedule_enabled_schedule(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) sch_id = uuid.UUID("ada12840-68d3-11e7-907b-a6006ad3dba0") #Coap await scheduler._get_schedules() mocker.patch.object(scheduler, '_ready', True) # Confirm there are 14 schedules assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) # WHEN # Now delete schedule with pytest.raises(RuntimeWarning): await scheduler.delete_schedule(sch_id) # THEN # Now confirm no schedule is deleted assert len(scheduler._storage_async.schedules) == len(scheduler._schedules) assert 1 == log_exception.call_count log_params = 'Attempt to delete an enabled Schedule %s. Not deleted.', str(sch_id) log_exception.assert_called_with(*log_params) @pytest.mark.asyncio async def test_delete_schedule_exception(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) log_debug = mocker.patch.object(scheduler._logger, 'debug', side_effect=Exception()) sch_id = uuid.UUID("d1631422-9ec6-11e7-abc4-cec278b6b50a") # backup # WHEN # THEN with pytest.raises(ScheduleNotFoundError) as excinfo: await scheduler.delete_schedule(uuid.uuid4()) @pytest.mark.asyncio async def test_delete_schedule_not_found(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) # WHEN # THEN with pytest.raises(ScheduleNotFoundError) as excinfo: await scheduler.delete_schedule(uuid.uuid4()) @pytest.mark.asyncio async def test_get_running_tasks(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) # Assert that there is no task queued for schedule with pytest.raises(KeyError) as excinfo: assert scheduler._schedule_executions[schedule.id] is True # Now queue task and assert that the task has been queued await scheduler.queue_task(schedule.id) assert isinstance(scheduler._schedule_executions[schedule.id], scheduler._ScheduleExecution) # Confirm that no task has started yet assert 0 == len(scheduler._schedule_executions[schedule.id].task_processes) await scheduler._start_task(schedule) # Confirm that task has started assert 1 == len(scheduler._schedule_executions[schedule.id].task_processes) # WHEN tasks = await scheduler.get_running_tasks() # THEN assert 1 == len(tasks) assert schedule.process_name == tasks[0].process_name assert tasks[0].reason is None assert tasks[0].state == Task.State.RUNNING assert tasks[0].cancel_requested is None assert tasks[0].start_time is not None assert tasks[0].end_time is None assert tasks[0].exit_code is None @pytest.mark.asyncio async def test_get_task(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) # Assert that there is no North task queued for schedule with pytest.raises(KeyError) as excinfo: assert scheduler._schedule_executions[schedule.id] is True # Now queue task and assert that the North task has been queued await scheduler.queue_task(schedule.id) assert isinstance(scheduler._schedule_executions[schedule.id], scheduler._ScheduleExecution) # Confirm that no task has started yet assert 0 == len(scheduler._schedule_executions[schedule.id].task_processes) await scheduler._start_task(schedule) # Confirm that task has started assert 1 == len(scheduler._schedule_executions[schedule.id].task_processes) task_id = list(scheduler._schedule_executions[schedule.id].task_processes.keys())[0] # WHEN task = await scheduler.get_task(task_id) # THEN assert schedule.process_name == task.process_name assert task.reason is '' assert task.state is not None assert task.cancel_requested is None assert task.start_time is not None assert task.end_time is not None assert task.exit_code is '0' @pytest.mark.skip("Need a suitable fixture") @pytest.mark.asyncio async def test_get_task_not_found(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) # WHEN # THEN with pytest.raises(TaskNotFoundError) as excinfo: tasks = await scheduler.get_task(uuid.uuid4()) @pytest.mark.asyncio async def test_get_task_exception(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) log_debug = mocker.patch.object(scheduler._logger, 'debug', side_effect=Exception()) # WHEN # THEN task_id = uuid.uuid4() with pytest.raises(Exception) as excinfo: await scheduler.get_task(task_id) # THEN payload = {"return": ["id", "process_name", "schedule_name", "state", {"alias": "start_time", "format": "YYYY-MM-DD HH24:MI:SS.MS", "column": "start_time"}, {"alias": "end_time", "format": "YYYY-MM-DD HH24:MI:SS.MS", "column": "end_time"}, "reason", "exit_code"], "where": {"column": "id", "condition": "=", "value": str(task_id)}} args, kwargs = log_exception.call_args assert 'Query failed: %s' == args[0] p = json.loads(args[1]) assert payload == p @pytest.mark.asyncio async def test_get_tasks(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) # Assert that there is no North task queued for schedule with pytest.raises(KeyError) as excinfo: assert scheduler._schedule_executions[schedule.id] is True # Now queue task and assert that the North task has been queued await scheduler.queue_task(schedule.id) assert isinstance(scheduler._schedule_executions[schedule.id], scheduler._ScheduleExecution) # Confirm that no task has started yet assert 0 == len(scheduler._schedule_executions[schedule.id].task_processes) await scheduler._start_task(schedule) # Confirm that task has started assert 1 == len(scheduler._schedule_executions[schedule.id].task_processes) task_id = list(scheduler._schedule_executions[schedule.id].task_processes.keys())[0] # WHEN tasks = await scheduler.get_tasks() # THEN assert schedule.process_name == tasks[0].process_name assert tasks[0].reason is '' assert tasks[0].state is not None assert tasks[0].cancel_requested is None assert tasks[0].start_time is not None assert tasks[0].end_time is not None assert tasks[0].exit_code is '0' @pytest.mark.asyncio async def test_get_tasks_exception(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) log_debug = mocker.patch.object(scheduler._logger, 'debug', side_effect=Exception()) # WHEN with pytest.raises(Exception) as excinfo: tasks = await scheduler.get_tasks() # THEN payload = {"return": ["id", "process_name", "schedule_name", "state", {"alias": "start_time", "column": "start_time", "format": "YYYY-MM-DD HH24:MI:SS.MS"}, {"alias": "end_time", "column": "end_time", "format": "YYYY-MM-DD HH24:MI:SS.MS"}, "reason", "exit_code"], "limit": 100} args, kwargs = log_exception.call_args assert 'Query failed: %s' == args[0] p = json.loads(args[1]) assert payload == p @pytest.mark.asyncio async def test_cancel_task_all_ok(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) # Assert that there is no task queued for schedule with pytest.raises(KeyError) as excinfo: assert scheduler._schedule_executions[schedule.id] is True # Now queue task and assert that the task has been queued await scheduler.queue_task(schedule.id) assert isinstance(scheduler._schedule_executions[schedule.id], scheduler._ScheduleExecution) # Confirm that no task has started yet assert 0 == len(scheduler._schedule_executions[schedule.id].task_processes) await scheduler._start_task(schedule) # Confirm that task has started assert 1 == len(scheduler._schedule_executions[schedule.id].task_processes) task_id = list(scheduler._schedule_executions[schedule.id].task_processes.keys())[0] # Confirm that cancel request has not been made assert scheduler._schedule_executions[schedule.id].task_processes[task_id].cancel_requested is None # WHEN await scheduler.cancel_task(task_id) # THEN assert scheduler._schedule_executions[schedule.id].task_processes[task_id].cancel_requested is not None assert 2 == log_info.call_count # args, kwargs = log_info.call_args_list[0] # assert ("Queued schedule '%s' for execution", 'OMF to PI north') == args args, kwargs = log_info.call_args_list[0] assert "Process started: Schedule '%s' process '%s' task %s pid %s, %s running tasks\n%s" in args assert 'OMF to PI north' in args assert 'North Readings to PI' in args args, kwargs = log_info.call_args_list[1] assert "Stopping process: Schedule '%s' process '%s' task %s pid %s\n%s" in args assert 'OMF to PI north' in args assert 'North Readings to PI' in args @pytest.mark.asyncio async def test_cancel_task_exception(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) # WHEN # THEN with pytest.raises(TaskNotRunningError) as excinfo: await scheduler.cancel_task(uuid.uuid4()) @pytest.mark.asyncio async def test_not_ready_and_paused(self, mocker): # GIVEN scheduler, schedule, log_info, log_exception, log_error, log_debug = await self.scheduler_fixture(mocker) mocker.patch.object(scheduler, '_ready', False) mocker.patch.object(scheduler, '_paused', True) # WHEN # THEN with pytest.raises(NotReadyError) as excinfo: await scheduler.start() with pytest.raises(NotReadyError) as excinfo: await scheduler.get_scheduled_processes() with pytest.raises(NotReadyError) as excinfo: await scheduler.get_schedules() with pytest.raises(NotReadyError) as excinfo: await scheduler.get_schedule(uuid.uuid4()) with pytest.raises(NotReadyError) as excinfo: await scheduler.save_schedule(Schedule(Schedule.Type.INTERVAL)) with pytest.raises(NotReadyError) as excinfo: await scheduler.disable_schedule(uuid.uuid4()) with pytest.raises(NotReadyError) as excinfo: await scheduler.enable_schedule(uuid.uuid4()) with pytest.raises(NotReadyError) as excinfo: await scheduler.queue_task(uuid.uuid4()) with pytest.raises(NotReadyError) as excinfo: await scheduler.delete_schedule(uuid.uuid4()) with pytest.raises(NotReadyError) as excinfo: await scheduler.get_running_tasks() with pytest.raises(NotReadyError) as excinfo: await scheduler.cancel_task(uuid.uuid4()) @pytest.mark.skip("_terminate_child_processes() not fit for unit test.") @pytest.mark.asyncio async def test__terminate_child_processes(self, mocker): pass class MockStorage(StorageClientAsync): def __init__(self, core_management_host=None, core_management_port=None): super().__init__(core_management_host, core_management_port) def _get_storage_service(self, host, port): return { "id": uuid.uuid4(), "name": "Fledge Storage", "type": "Storage", "service_port": 9999, "management_port": 9999, "address": "0.0.0.0", "protocol": "http" } class MockStorageAsync(StorageClientAsync): schedules = [ { "id": "cea17db8-6ccc-11e7-907b-a6006ad3dba0", "process_name": "purge", "schedule_name": "purge", "schedule_type": 4, "schedule_interval": "01:00:00", "schedule_time": "", "schedule_day": 0, "exclusive": "t", "enabled": "t" }, { "id": "2176eb68-7303-11e7-8cf7-a6006ad3dba0", "process_name": "stats collector", "schedule_name": "stats collection", "schedule_type": 2, "schedule_interval": "00:00:15", "schedule_time": "00:00:15", "schedule_day": 3, "exclusive": "f", "enabled": "t" }, { "id": "d1631422-9ec6-11e7-abc4-cec278b6b50a", "process_name": "backup", "schedule_name": "backup hourly", "schedule_type": 3, "schedule_interval": "01:00:00", "schedule_time": "", "schedule_day": 0, "exclusive": "t", "enabled": "f" }, { "id": "ada12840-68d3-11e7-907b-a6006ad3dba0", "process_name": "COAP", "schedule_name": "COAP listener south", "schedule_type": 1, "schedule_interval": "00:00:00", "schedule_time": "", "schedule_day": 0, "exclusive": "t", "enabled": "t" }, { "id": "2b614d26-760f-11e7-b5a5-be2e44b06b34", "process_name": "North Readings to PI", "schedule_name": "OMF to PI north", "schedule_type": 3, "schedule_interval": "00:00:30", "schedule_time": "", "schedule_day": 0, "exclusive": "t", "enabled": "t" }, { "id": "5d7fed92-fb9a-11e7-8c3f-9a214cf093ae", "process_name": "North Readings to OCS", "schedule_name": "OMF to OCS north", "schedule_type": 3, "schedule_interval": "1 day 00:00:40", "schedule_time": "", "schedule_day": 0, "exclusive": "t", "enabled": "f" }, ] scheduled_processes = [ { "name": "purge", "script": [ "tasks/purge" ] }, { "name": "stats collector", "script": [ "tasks/statistics" ] }, { "name": "backup", "script": [ "tasks/backup_postgres" ] }, { "name": "COAP", "script": [ "services/south" ] }, { "name": "North Readings to PI", "script": [ "tasks/north", "--stream_id", "1", "--debug_level", "1" ] }, { "name": "North Readings to OCS", "script": [ "tasks/north", "--stream_id", "4", "--debug_level", "1" ] }, ] tasks = [ { "id": "259b8570-65c1-4b92-8c62-e9642631a600", "process_name": "North Readings to PI", "state": 1, "start_time": "2018-02-06 13:28:14.477868", "end_time": "2018-02-06 13:28:14.856375", "exit_code": "0", "reason": "" } ] def __init__(self, core_management_host=None, core_management_port=None): super().__init__(core_management_host, core_management_port) def _get_storage_service(self, host, port): return { "id": uuid.uuid4(), "name": "Fledge Storage", "type": "Storage", "service_port": 9999, "management_port": 9999, "address": "0.0.0.0", "protocol": "http" } @classmethod async def insert_into_tbl(cls, table_name, payload): pass @classmethod async def update_tbl(cls, table_name, payload): # Only valid for test_save_schedule_update if table_name == "schedules": return {"count": 1} @classmethod async def delete_from_tbl(cls, table_name, condition=None): pass @classmethod async def query_tbl_with_payload(cls, table_name, query_payload): if table_name == 'tasks': return { "count": len(MockStorageAsync.tasks), "rows": MockStorageAsync.tasks } @classmethod async def query_tbl(cls, table_name, query=None): if table_name == 'schedules': return { "count": len(MockStorageAsync.schedules), "rows": MockStorageAsync.schedules } if table_name == 'scheduled_processes': return { "count": len(MockStorageAsync.scheduled_processes), "rows": MockStorageAsync.scheduled_processes }
42.621555
339
0.652936
68,719
0.987328
0
0
64,097
0.920921
61,791
0.887789
12,842
0.184509
7c9af51ba1243be5af3bd0e724c771174bb964d2
1,007
py
Python
problem_solving/python/algorithms/greedy/marcs_cakewalk.py
kcc3/hackerrank-solutions
f862b44b840bd447d99dc148f6bb5e2f5bfb8a86
[ "MIT" ]
null
null
null
problem_solving/python/algorithms/greedy/marcs_cakewalk.py
kcc3/hackerrank-solutions
f862b44b840bd447d99dc148f6bb5e2f5bfb8a86
[ "MIT" ]
null
null
null
problem_solving/python/algorithms/greedy/marcs_cakewalk.py
kcc3/hackerrank-solutions
f862b44b840bd447d99dc148f6bb5e2f5bfb8a86
[ "MIT" ]
1
2020-06-04T09:23:19.000Z
2020-06-04T09:23:19.000Z
def marcs_cakewalk(calorie): """Hackerrank Problem: https://www.hackerrank.com/challenges/marcs-cakewalk/problem Marc loves cupcakes, but he also likes to stay fit. Each cupcake has a calorie count, and Marc can walk a distance to expend those calories. If Marc has eaten j cupcakes so far, after eating a cupcake with c calories he must walk at least 2**j x c miles to maintain his weight. Solve: To calculate the minimum miles, you solve based on the highest calorie to lowest calorie cupcake Args: calorie (list): List of integers denoting the calories for each cupcake Returns: int: The minimum number of miels Marc must walk to maintain his weight """ calories = 0 for i, c in enumerate(sorted(calorie, reverse=True)): calories += (2 ** i * c) return calories if __name__ == "__main__": assert marcs_cakewalk([5, 10, 7]) == 44 assert marcs_cakewalk([1, 3, 2]) == 11 assert marcs_cakewalk([7, 4, 9, 6]) == 79
37.296296
118
0.683217
0
0
0
0
0
0
0
0
693
0.688183
7c9bc57e7e9891072399e9288ee87401c640bfb4
1,583
py
Python
coronaindiatracker/coronatracker/views.py
ankitgoswami23/CoronaIndiaTracker
b2e116a595b3c69ccefa93b60833c09aa07b5eed
[ "Unlicense" ]
2
2020-07-26T05:57:27.000Z
2020-07-26T07:12:15.000Z
coronaindiatracker/coronatracker/views.py
ankee23/CoronaIndiaTracker
b2e116a595b3c69ccefa93b60833c09aa07b5eed
[ "Unlicense" ]
null
null
null
coronaindiatracker/coronatracker/views.py
ankee23/CoronaIndiaTracker
b2e116a595b3c69ccefa93b60833c09aa07b5eed
[ "Unlicense" ]
1
2020-11-26T08:52:11.000Z
2020-11-26T08:52:11.000Z
from django.shortcuts import render import requests from bs4 import BeautifulSoup def corona_data(request): "Testaaaa" corona_html = requests.get("https://www.mygov.in/covid-19") soup = BeautifulSoup(corona_html.content, 'html.parser') state_wise_data = soup.find_all('div', class_='views-row') information = soup.find('div', class_='information_row') info = { 'update_data': information.find('div', class_='info_title').find('span').string, 'active_case': information.find('div', class_='active-case').find('span', class_='icount').string, 'discharge': information.find('div', class_='discharge').find('span', class_='icount').string, 'death': information.find('div', class_='death_case').find('span', class_='icount').string } corona_info = [ { "state_name": state.find_all('span', class_='st_name')[0].string, "confirm_case": state.find_all('div', class_='tick-confirmed')[0].find_all('small')[0].string, "active_case": state.find_all('div', class_='tick-active')[0].find_all('small')[0].string, "discharge": state.find_all('div', class_='tick-discharged')[0].find_all('small')[0].string, "death": state.find_all('div', class_='tick-death')[0].find_all('small')[0].string } for state in state_wise_data ] context = { 'corona_info': info, 'data': sorted(corona_info, key=lambda i: int(''.join(i['confirm_case'].replace(',', ''))), reverse=True) } return render(request, 'coronainfo/index.html', context)
45.228571
113
0.642451
0
0
0
0
0
0
0
0
493
0.311434
7c9c1524555fded271e617bca48b5b1e6a1e9ace
6,082
py
Python
compare.py
geohackweek/ghw2019_wiggles
9b636db8d97986e038a301e36b808e820ccc525f
[ "BSD-3-Clause" ]
3
2019-10-09T19:42:12.000Z
2021-05-28T00:10:54.000Z
compare.py
geohackweek/ghw2019_wiggles
9b636db8d97986e038a301e36b808e820ccc525f
[ "BSD-3-Clause" ]
1
2019-09-11T16:37:59.000Z
2019-09-11T16:37:59.000Z
compare.py
geohackweek/ghw2019_wiggles
9b636db8d97986e038a301e36b808e820ccc525f
[ "BSD-3-Clause" ]
3
2019-09-10T20:41:59.000Z
2019-09-10T20:42:57.000Z
# Script tests GPD model using UW truth data # Test outputs: # - type of event tested [EQS, EQP, SUS, SUP, THS, THP, SNS, SNP, PXS, PXP] # - phase [P, S, N] Note: N - not detected # - model time offset (t_truth - t_model_pick) import numpy import math import string import datetime import sys import os import csv from datetime import datetime from datetime import timedelta # params padding_time = 10 fudge_factor = timedelta(seconds=27) time_diff = timedelta(seconds=10) # file dirs parsed_arrivals = [] model_in = [] model_out = [] comp_out = [] for etype in ['EQS','EQP','SUS','SUP','THS','THP','SNS','SNP','PXS','PXP']: arrival = "parsed_arrivals/" + etype + ".arrivals.txt" infile = "input_files/GPD." + etype + ".in" outfile = "output_files/GPD." + etype + ".out" parsed_arrivals.append(arrival) model_in.append(infile) model_out.append(outfile) comp_out.append("comparison_out/comp." + etype + ".out") # ------------------ # read in UW arrival times as an array def read_arrivals_to_arr(filename): model_list = [] with open(filename) as f: for ln in f: row = ln.split() line = [] line.extend([row[0].strip(), row[1].strip(), row[2].strip()]) formatted_time = datetime.strptime(row[3], "%Y-%m-%dT%H:%M:%S.%f") - fudge_factor line.extend([formatted_time, row[4].strip(), row[5].strip()]) model_list.append(line) return model_list def arrivals_to_dictionary(arrivals): picks = {} for arr in arrivals: key = datetime.strftime(arr[3], "%Y-%m-%dT%H:%M:%S.%f") key = key[0:-7] picks[key] = arr return picks def model_in_to_array(file): timestamps = [] with open(file) as f: for ln in f: entry = ln.split() entry = entry[0].strip() entry = entry[len(entry)-20:len(entry)-6] entry = entry[0:4] + "-" + entry[4:6] + "-" + entry[6:8] + "T" + entry[8:10] + ":" + entry[10:12] + ":" + entry[12:14] # ------------- TIME STAMP ISSUES -------------------- # case 1: run if .mseed files have correct timestamps """ time = datetime.strptime(entry, "%Y-%m-%dT%H:%M:%S") - fudge_factor # + time_diff (might need to add this) time = datetime.strftime(time, "%Y-%m-%dT%H:%M:%S") """ # case 2: run if .mseed files have buggy minutes in the timestamps time = datetime.strptime(entry, "%Y-%m-%dT%H:%M:%S") if time.second >=37 and time.second <=51: time = time + timedelta(seconds=23) + time_diff time = datetime.strftime(time, "%Y-%m-%dT%H:%M:%S") else: sec_int = time.second + 23 if sec_int > 59: sec_int = sec_int - 60 sec_int = str(sec_int).zfill(2) time = datetime.strftime(time, "%Y-%m-%dT%H:%M:%S") time = time[:-2] + sec_int time = datetime.strptime(time, "%Y-%m-%dT%H:%M:%S") + time_diff time = datetime.strftime(time, "%Y-%m-%dT%H:%M:%S") # ----------------------------------------------------- timestamps.append(time) return timestamps def filter_times(arrivals, model_in): filtered = [] for key in model_in: if key in arrivals: filtered.append(arrivals[key]) return filtered # read in Caltech model output and create a dictionary def read_output_to_dict(filename): model_dict = {} with open(filename) as f: for line in f: tmp = line.split() key = tmp[0] + "-" + tmp[1] + "-" + tmp[2] try: # fails if date is missing floating point numbers formatted_time = datetime.strptime(tmp[3], "%Y-%m-%dT%H:%M:%S.%f") if key not in model_dict: model_dict[key] = [] model_dict[key].append(formatted_time) except: pass return model_dict # lookup time in the dictionary def key_lookup(event, phase, model_dict): key = event[0] + "-" + event[1] + "-" + phase times = [] if key in model_dict.keys(): times = model_dict[key] times = time_lookup(event[3], times) return times # search for arrivals within the padding time window def time_lookup(t, time_arr): t_lower = t - timedelta(seconds=padding_time) t_upper = t + timedelta(seconds=padding_time) offsets = [] for time in time_arr: if time > t_lower and time < t_upper: offset = t - time # or format time to absolute value: abs(t - time) offset = offset.total_seconds() offsets.append('{:.6f}'.format(offset)) return offsets def execute_script(arrival, inf, outf, comp_out): # write outputs to file outp_file = open(comp_out, 'w') truth_arr = read_arrivals_to_arr(arrival) # read in the arrival times to a list truth_dict = arrivals_to_dictionary(truth_arr) # convert arrivals to a dictionary (key=truncated timestamp) model_in = model_in_to_array(inf) # read in model .in file as a list truth_arr = filter_times(truth_dict, model_in) # filter arrivals to picks that were passed to the model (.in file) model_dict = read_output_to_dict(outf) # read output file for event in truth_arr: phase = event[2] times = key_lookup(event, phase, model_dict) if len(times) == 0: if phase == 'P': phase = 'S' else: phase = 'P' times = key_lookup(event, phase, model_dict) if len(times) == 0: phase = 'N' times = ['nan'] outp_file.write(str(event[5]) + " " + phase) for offset in times: outp_file.write(" " + str(offset)) outp_file.write('\n') outp_file.close() for i in range(len(model_out)): execute_script(parsed_arrivals[i], model_in[i], model_out[i], comp_out[i])
37.312883
132
0.561822
0
0
0
0
0
0
0
0
1,603
0.263565
7c9c7b65355934d322e4085f42e442dbe2ee0d7d
7,012
py
Python
ultitrackerapi/ultitrackerapi/extract_and_upload_video.py
atheheath/ultitracker-api
5d7ea7ae97c53faf02416f17baf11ed09fd55276
[ "MIT" ]
null
null
null
ultitrackerapi/ultitrackerapi/extract_and_upload_video.py
atheheath/ultitracker-api
5d7ea7ae97c53faf02416f17baf11ed09fd55276
[ "MIT" ]
7
2020-03-27T03:33:52.000Z
2020-03-30T02:33:04.000Z
ultitrackerapi/ultitrackerapi/extract_and_upload_video.py
atheheath/ultitracker-api
5d7ea7ae97c53faf02416f17baf11ed09fd55276
[ "MIT" ]
null
null
null
import argparse import boto3 import datetime import json import os import posixpath import re import shutil import tempfile import uuid from concurrent import futures from multiprocessing import Pool from ultitrackerapi import get_backend, get_logger, get_s3Client, video backend_instance = get_backend() logger = get_logger(__name__, level="DEBUG") s3Client = get_s3Client() def update_game_video_length(game_id, video_length): command = """ UPDATE ultitracker.game_metadata SET data = jsonb_set(data, '{{length}}', '"{video_length}"', true) WHERE game_id = '{game_id}' """.format( video_length=video_length, game_id=game_id ) backend_instance.client.execute(command) def get_frame_number(key, chunk_multiplier=60): frame_number = int(posixpath.splitext(posixpath.basename(key))[0].split("_")[1]) chunk_number = int(posixpath.basename(posixpath.dirname(key)).split("_")[1]) return chunk_number * chunk_multiplier + frame_number def insert_images( img_raw_paths, img_types, img_metadatas, game_id, frame_numbers ): command = """ INSERT INTO ultitracker.img_location (img_id, img_raw_path, img_type, img_metadata, game_id, frame_number) VALUES """ for i, (img_raw_path, img_type, img_metadata, frame_number) in enumerate(zip(img_raw_paths, img_types, img_metadatas, frame_numbers)): command += """('{img_id}', '{img_raw_path}', '{img_type}', '{img_metadata}', '{game_id}', {frame_number}){include_comma} """.format( img_id=uuid.uuid4(), img_raw_path=img_raw_path, img_type=img_type, img_metadata=json.dumps(img_metadata), game_id=game_id, frame_number=frame_number, include_comma="," if i < (len(img_raw_paths) - 1) else "" ) backend_instance.client.execute(command) def extract_and_upload_video( bucket, video_filename, thumbnail_filename, video_key, thumbnail_key, game_id ): logger.debug("extract_and_upload_video: Getting video length") video_length_seconds = int(video.get_video_duration(video_filename)) video_length = str(datetime.timedelta(seconds=video_length_seconds)) logger.debug("extract_and_upload_video: Finished getting video length") logger.debug("extract_and_upload_video: Getting video height and width") video_height_width = video.get_video_height_width(video_filename) logger.debug("extract_and_upload_video: Finished getting height and width") logger.debug("extract_and_upload_video: Updating length in db") update_game_video_length(game_id, video_length) logger.debug("extract_and_upload_video: Finished updating length in db") logger.debug("extract_and_upload_video: Extracting thumbnail") video.get_thumbnail(video_filename, thumbnail_filename, time=video_length_seconds // 2) logger.debug("extract_and_upload_video: Finished extracting thumbnail") logger.debug("extract_and_upload_video: Uploading thumbnail") s3Client.upload_file( thumbnail_filename, bucket, thumbnail_key ) logger.debug("extract_and_upload_video: Finished uploading thumbnail") logger.debug("extract_and_upload_video: Uploading video to S3") s3Client.upload_file( video_filename, bucket, video_key ) logger.debug("extract_and_upload_video: Finished uploading video to S3") logger.debug("extract_and_upload_video: Chunking video") chunked_video_dir = tempfile.mkdtemp() video.chunk_video(video_filename, chunked_video_dir, chunk_size=60) logger.debug("extract_and_upload_video: Finished chunking video") logger.debug("extract_and_upload_video: Uploading video chunks") with futures.ThreadPoolExecutor(8) as ex: for vid in os.listdir(chunked_video_dir): ex.submit( s3Client.upload_file, os.path.join(chunked_video_dir, vid), bucket, posixpath.join( posixpath.dirname(video_key), "chunks", vid ) ) logger.debug("extract_and_upload_video: Finished uploading video chunks") logger.debug("extract_and_upload_video: Submitting lambda frame extraction") aws_lambda_payloads = [ json.dumps({ "s3_bucket_path": bucket, "s3_video_path": posixpath.join(posixpath.dirname(video_key), "chunks", basename), "s3_output_frames_path": posixpath.join(posixpath.dirname(video_key), "frames", posixpath.splitext(basename)[0]), "video_metadata": video_height_width }).encode() for basename in os.listdir(chunked_video_dir) ] client = boto3.client('lambda') aws_lambda_responses = [] with futures.ThreadPoolExecutor(max_workers=16) as ex: result_futures = [] for payload in aws_lambda_payloads: result_futures.append(ex.submit( client.invoke, FunctionName="extractFrames", # InvocationType="Event", Payload=payload )) logger.debug("extract_and_upload_video: Submitted lambda frame extraction") for result_future in futures.as_completed(result_futures): aws_lambda_response = json.loads(result_future.result()["Payload"].read().decode("utf-8")) aws_lambda_responses.append(aws_lambda_response) raw_paths = ["s3://" + posixpath.join(frame["bucket"], frame["key"]) for frame in aws_lambda_response["frames"]] img_types = ["png" for frame in aws_lambda_response["frames"]] metadatas = [ {"bucket": bucket} for frame in aws_lambda_response["frames"] ] frame_numbers = [-1 for frame in aws_lambda_response["frames"]] insert_images( raw_paths, img_types, metadatas, game_id, frame_numbers ) logger.debug("extract_and_upload_video: Received all lambda responses") logger.debug("extract_and_upload_video: Finished inserting image metadata") os.remove(video_filename) os.remove(thumbnail_filename) shutil.rmtree(chunked_video_dir) def main(): parser = argparse.ArgumentParser() parser.add_argument("bucket") parser.add_argument("video_filename") parser.add_argument("thumbnail_filename") parser.add_argument("video_key") parser.add_argument("thumbnail_key") parser.add_argument("game_id") args = parser.parse_args() extract_and_upload_video( bucket=args.bucket, video_filename=args.video_filename, thumbnail_filename=args.thumbnail_filename, video_key=args.video_key, thumbnail_key=args.thumbnail_key, game_id=args.game_id ) if __name__ == "__main__": main()
33.711538
138
0.673987
0
0
0
0
0
0
0
0
1,820
0.259555
7c9c87d15e24804e84e87528b8a9f5ba5b08422f
3,265
py
Python
Chapter03/scikit_soft_voting_2knn.py
PacktPublishing/Hands-On-Ensemble-Learning-with-Python
db9b90189dbebbc6ab5ebba0e2e173ba80197c35
[ "MIT" ]
31
2019-07-21T00:36:52.000Z
2022-02-25T15:38:21.000Z
Chapter03/scikit_soft_voting_2knn.py
tokiran/Hands-On-Ensemble-Learning-with-Python
739ecda33fb75dc1df1366abf4a79c34cc0c2026
[ "MIT" ]
null
null
null
Chapter03/scikit_soft_voting_2knn.py
tokiran/Hands-On-Ensemble-Learning-with-Python
739ecda33fb75dc1df1366abf4a79c34cc0c2026
[ "MIT" ]
30
2019-07-06T00:22:44.000Z
2022-02-04T02:44:17.000Z
# --- SECTION 1 --- # Import the required libraries from sklearn import datasets, naive_bayes, svm, neighbors from sklearn.ensemble import VotingClassifier from sklearn.metrics import accuracy_score # Load the dataset breast_cancer = datasets.load_breast_cancer() x, y = breast_cancer.data, breast_cancer.target # Split the train and test samples test_samples = 100 x_train, y_train = x[:-test_samples], y[:-test_samples] x_test, y_test = x[-test_samples:], y[-test_samples:] # --- SECTION 2 --- # Instantiate the learners (classifiers) learner_1 = neighbors.KNeighborsClassifier(n_neighbors=5) learner_2 = naive_bayes.GaussianNB() learner_3 = neighbors.KNeighborsClassifier(n_neighbors=50) # --- SECTION 3 --- # Instantiate the voting classifier voting = VotingClassifier([('5NN', learner_1), ('NB', learner_2), ('50NN', learner_3)], voting='soft') # --- SECTION 4 --- # Fit classifier with the training data voting.fit(x_train, y_train) learner_1.fit(x_train, y_train) learner_2.fit(x_train, y_train) learner_3.fit(x_train, y_train) # --- SECTION 5 --- # Predict the most probable class hard_predictions = voting.predict(x_test) # --- SECTION 6 --- # Get the base learner predictions predictions_1 = learner_1.predict(x_test) predictions_2 = learner_2.predict(x_test) predictions_3 = learner_3.predict(x_test) # --- SECTION 7 --- # Accuracies of base learners print('L1:', accuracy_score(y_test, predictions_1)) print('L2:', accuracy_score(y_test, predictions_2)) print('L3:', accuracy_score(y_test, predictions_3)) # Accuracy of hard voting print('-'*30) print('Hard Voting:', accuracy_score(y_test, hard_predictions)) # --- SECTION 1 --- # Import the required libraries import matplotlib as mpl import matplotlib.pyplot as plt mpl.style.use('seaborn-paper') # --- SECTION 2 --- # Get the wrongly predicted instances # and the predicted probabilities for the whole test set errors = y_test-hard_predictions probabilities_1 = learner_1.predict_proba(x_test) probabilities_2 = learner_2.predict_proba(x_test) probabilities_3 = learner_3.predict_proba(x_test) # --- SECTION 2 --- # Store the predicted probability for # each wrongly predicted instance, for each base learner # as well as the average predicted probability # x=[] y_1=[] y_2=[] y_3=[] y_avg=[] for i in range(len(errors)): if not errors[i] == 0: x.append(i) y_1.append(probabilities_1[i][0]) y_2.append(probabilities_2[i][0]) y_3.append(probabilities_3[i][0]) y_avg.append((probabilities_1[i][0]+probabilities_2[i][0]+probabilities_3[i][0])/3) # --- SECTION 3 --- # Plot the predicted probaiblity of each base learner as # a bar and the average probability as an X plt.bar(x, y_1, 3, label='5NN') plt.bar(x, y_2, 2, label='NB') plt.bar(x, y_3, 1, label='50NN') plt.scatter(x, y_avg, marker='x', c='k', s=150, label='Average Positive', zorder=10) y = [0.5 for x in range(len(errors))] plt.plot(y, c='k', linestyle='--') plt.title('Positive Probability') plt.xlabel('Test sample') plt.ylabel('probability') plt.legend()
28.640351
92
0.67902
0
0
0
0
0
0
0
0
1,093
0.334763
7c9e60fb8b9a1847e8db908d6cfa14b5a53e1aaf
623
py
Python
API/migrations/0005_alter_news_date_time_alter_news_headline.py
kgarchie/ReSTful-Django-API
851c76eb75747042ceac0a6c164266409ca935d4
[ "MIT" ]
null
null
null
API/migrations/0005_alter_news_date_time_alter_news_headline.py
kgarchie/ReSTful-Django-API
851c76eb75747042ceac0a6c164266409ca935d4
[ "MIT" ]
null
null
null
API/migrations/0005_alter_news_date_time_alter_news_headline.py
kgarchie/ReSTful-Django-API
851c76eb75747042ceac0a6c164266409ca935d4
[ "MIT" ]
null
null
null
# Generated by Django 4.0.3 on 2022-03-23 14:31 import datetime from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('API', '0004_alter_news_date_time_alter_news_headline'), ] operations = [ migrations.AlterField( model_name='news', name='date_time', field=models.DateTimeField(default=datetime.datetime(2022, 3, 23, 17, 31, 17, 27766)), ), migrations.AlterField( model_name='news', name='headline', field=models.CharField(max_length=100), ), ]
24.92
98
0.603531
514
0.82504
0
0
0
0
0
0
132
0.211878
7ca170e48f979878209316e327d77080c8c15058
2,662
py
Python
qiskit/ml/datasets/iris.py
stefan-woerner/aqua
12e1b867e254977d9c5992612a7919d8fe016cb4
[ "Apache-2.0" ]
504
2018-12-15T16:34:03.000Z
2022-03-26T11:24:53.000Z
qiskit/ml/datasets/iris.py
stefan-woerner/aqua
12e1b867e254977d9c5992612a7919d8fe016cb4
[ "Apache-2.0" ]
746
2018-12-16T16:44:42.000Z
2021-07-10T16:59:43.000Z
qiskit/ml/datasets/iris.py
stefan-woerner/aqua
12e1b867e254977d9c5992612a7919d8fe016cb4
[ "Apache-2.0" ]
421
2018-12-22T14:49:00.000Z
2022-03-04T09:47:07.000Z
# This code is part of Qiskit. # # (C) Copyright IBM 2018, 2020. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """ iris dataset """ import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler, MinMaxScaler from sklearn.decomposition import PCA from qiskit.aqua import MissingOptionalLibraryError def iris(training_size, test_size, n, plot_data=False): """ returns iris dataset """ class_labels = [r'A', r'B', r'C'] data, target = datasets.load_iris(return_X_y=True) sample_train, sample_test, label_train, label_test = \ train_test_split(data, target, test_size=1, random_state=42) # Now we standardize for gaussian around 0 with unit variance std_scale = StandardScaler().fit(sample_train) sample_train = std_scale.transform(sample_train) sample_test = std_scale.transform(sample_test) # Now reduce number of features to number of qubits pca = PCA(n_components=n).fit(sample_train) sample_train = pca.transform(sample_train) sample_test = pca.transform(sample_test) # Scale to the range (-1,+1) samples = np.append(sample_train, sample_test, axis=0) minmax_scale = MinMaxScaler((-1, 1)).fit(samples) sample_train = minmax_scale.transform(sample_train) sample_test = minmax_scale.transform(sample_test) # Pick training size number of samples from each distro training_input = {key: (sample_train[label_train == k, :])[:training_size] for k, key in enumerate(class_labels)} test_input = {key: (sample_test[label_test == k, :])[:test_size] for k, key in enumerate(class_labels)} if plot_data: try: import matplotlib.pyplot as plt except ImportError as ex: raise MissingOptionalLibraryError( libname='Matplotlib', name='iris', pip_install='pip install matplotlib') from ex for k in range(0, 3): plt.scatter(sample_train[label_train == k, 0][:training_size], sample_train[label_train == k, 1][:training_size]) plt.title("Iris dataset") plt.show() return sample_train, training_input, test_input, class_labels
38.028571
78
0.696093
0
0
0
0
0
0
0
0
783
0.29414
7ca18b95086348a6dec0e89454f15ffded086574
16,864
py
Python
tests/h/views/api_auth_test.py
discodavey/h
7bff8478b3a5b936de82ac9fcd89b355f4afd3aa
[ "MIT" ]
null
null
null
tests/h/views/api_auth_test.py
discodavey/h
7bff8478b3a5b936de82ac9fcd89b355f4afd3aa
[ "MIT" ]
5
2017-12-26T14:22:20.000Z
2018-04-02T02:56:38.000Z
tests/h/views/api_auth_test.py
discodavey/h
7bff8478b3a5b936de82ac9fcd89b355f4afd3aa
[ "MIT" ]
1
2021-03-12T09:45:04.000Z
2021-03-12T09:45:04.000Z
# -*- coding: utf-8 -*- from __future__ import unicode_literals import datetime import json import mock import pytest from oauthlib.oauth2 import InvalidRequestFatalError from oauthlib.common import Request as OAuthRequest from pyramid import httpexceptions from h._compat import urlparse from h.exceptions import OAuthTokenError from h.models.auth_client import ResponseType from h.services.auth_token import auth_token_service_factory from h.services.oauth_provider import OAuthProviderService from h.services.oauth_validator import DEFAULT_SCOPES from h.services.user import user_service_factory from h.util.datetime import utc_iso8601 from h.views import api_auth as views @pytest.mark.usefixtures('routes', 'oauth_provider', 'user_svc') class TestOAuthAuthorizeController(object): @pytest.mark.usefixtures('authenticated_user') @pytest.mark.parametrize('view_name', ['get', 'get_web_message']) def test_get_validates_request(self, controller, pyramid_request, view_name): view = getattr(controller, view_name) view() controller.oauth.validate_authorization_request.assert_called_once_with( pyramid_request.url) @pytest.mark.parametrize('view_name', ['get', 'get_web_message']) def test_get_raises_for_invalid_request(self, controller, view_name): controller.oauth.validate_authorization_request.side_effect = InvalidRequestFatalError('boom!') with pytest.raises(InvalidRequestFatalError) as exc: view = getattr(controller, view_name) view() assert exc.value.description == 'boom!' @pytest.mark.parametrize('view_name', ['get', 'get_web_message']) def test_get_redirects_to_login_when_not_authenticated(self, controller, pyramid_request, view_name): with pytest.raises(httpexceptions.HTTPFound) as exc: view = getattr(controller, view_name) view() parsed_url = urlparse.urlparse(exc.value.location) assert parsed_url.path == '/login' assert urlparse.parse_qs(parsed_url.query) == {'next': [pyramid_request.url], 'for_oauth': ['True']} @pytest.mark.parametrize('response_mode,view_name', [ (None, 'get'), ('web_message', 'get_web_message'), ]) def test_get_returns_expected_context(self, controller, auth_client, authenticated_user, oauth_request, response_mode, view_name): oauth_request.response_mode = response_mode view = getattr(controller, view_name) assert view() == { 'client_id': auth_client.id, 'client_name': auth_client.name, 'response_mode': response_mode, 'response_type': auth_client.response_type.value, 'state': 'foobar', 'username': authenticated_user.username, } @pytest.mark.parametrize('view_name', ['get', 'get_web_message']) def test_get_creates_authorization_response_for_trusted_clients(self, controller, auth_client, authenticated_user, pyramid_request, view_name): auth_client.trusted = True view = getattr(controller, view_name) view() controller.oauth.create_authorization_response.assert_called_once_with( pyramid_request.url, credentials={'user': authenticated_user}, scopes=DEFAULT_SCOPES) def test_get_returns_redirect_immediately_for_trusted_clients(self, controller, auth_client, authenticated_user, pyramid_request): auth_client.trusted = True response = controller.get() expected = '{}?code=abcdef123456&state=foobar'.format(auth_client.redirect_uri) assert response.location == expected @pytest.mark.usefixtures('authenticated_user') def test_get_web_message_renders_template_for_trusted_clients(self, controller, auth_client): auth_client.trusted = True assert controller.request.override_renderer is None controller.get_web_message() assert controller.request.override_renderer == 'h:templates/oauth/authorize_web_message.html.jinja2' @pytest.mark.usefixtures('authenticated_user') def test_get_web_message_returns_context_for_trusted_clients(self, controller, auth_client): auth_client.trusted = True response = controller.get_web_message() assert response == { 'code': 'abcdef123456', 'origin': 'http://client.com', 'state': 'foobar', } @pytest.mark.usefixtures('authenticated_user') def test_get_web_message_allows_empty_state_in_context_for_trusted_clients(self, controller, auth_client, oauth_provider): auth_client.trusted = True headers = {'Location': '{}?code=abcdef123456'.format(auth_client.redirect_uri)} oauth_provider.create_authorization_response.return_value = (headers, None, 302) response = controller.get_web_message() assert response['state'] is None @pytest.mark.parametrize('view_name', ['post', 'post_web_message']) def test_post_creates_authorization_response(self, controller, pyramid_request, authenticated_user, view_name): pyramid_request.url = 'http://example.com/auth?client_id=the-client-id' + \ '&response_type=code' + \ '&state=foobar' + \ '&scope=exploit' view = getattr(controller, view_name) view() controller.oauth.create_authorization_response.assert_called_once_with( pyramid_request.url, credentials={'user': authenticated_user}, scopes=DEFAULT_SCOPES) @pytest.mark.usefixtures('authenticated_user') @pytest.mark.parametrize('view_name', ['post', 'post_web_message']) def test_post_raises_for_invalid_request(self, controller, view_name): controller.oauth.create_authorization_response.side_effect = InvalidRequestFatalError('boom!') with pytest.raises(InvalidRequestFatalError) as exc: view = getattr(controller, view_name) view() assert exc.value.description == 'boom!' def test_post_redirects_to_client(self, controller, auth_client): response = controller.post() expected = '{}?code=abcdef123456&state=foobar'.format(auth_client.redirect_uri) assert response.location == expected def test_post_web_message_returns_expected_context(self, controller, auth_client): response = controller.post_web_message() assert response == { 'code': 'abcdef123456', 'origin': 'http://client.com', 'state': 'foobar', } def test_post_web_message_allows_empty_state_in_context(self, controller, auth_client, oauth_provider): auth_client.trusted = True headers = {'Location': '{}?code=abcdef123456'.format(auth_client.redirect_uri)} oauth_provider.create_authorization_response.return_value = (headers, None, 302) response = controller.post_web_message() assert response['state'] is None @pytest.fixture def controller(self, pyramid_request): pyramid_request.override_renderer = None return views.OAuthAuthorizeController(None, pyramid_request) @pytest.fixture def oauth_request(self): return OAuthRequest('/') @pytest.fixture def oauth_provider(self, pyramid_config, auth_client, oauth_request): svc = mock.create_autospec(OAuthProviderService, instance=True) scopes = ['annotation:read', 'annotation:write'] credentials = {'client_id': auth_client.id, 'state': 'foobar', 'request': oauth_request} svc.validate_authorization_request.return_value = (scopes, credentials) headers = {'Location': '{}?code=abcdef123456&state=foobar'.format(auth_client.redirect_uri)} body = None status = 302 svc.create_authorization_response.return_value = (headers, body, status) pyramid_config.register_service(svc, name='oauth_provider') return svc @pytest.fixture def auth_client(self, factories): return factories.AuthClient(name='Test Client', redirect_uri='http://client.com/auth/callback', response_type=ResponseType.code) @pytest.fixture def user_svc(self, pyramid_config, pyramid_request): svc = mock.Mock(spec_set=user_service_factory(None, pyramid_request)) pyramid_config.register_service(svc, name='user') return svc @pytest.fixture def pyramid_request(self, pyramid_request): pyramid_request.url = 'http://example.com/auth?client_id=the-client-id&response_type=code&state=foobar' return pyramid_request @pytest.fixture def authenticated_user(self, factories, pyramid_config, user_svc): user = factories.User.build() pyramid_config.testing_securitypolicy(user.userid) def fake_fetch(userid): if userid == user.userid: return user user_svc.fetch.side_effect = fake_fetch return user @pytest.fixture def routes(self, pyramid_config): pyramid_config.add_route('login', '/login') @pytest.mark.usefixtures('oauth_provider') class TestOAuthAccessTokenController(object): def test_it_creates_token_response(self, pyramid_request, controller, oauth_provider): controller.post() oauth_provider.create_token_response.assert_called_once_with( pyramid_request.url, pyramid_request.method, pyramid_request.POST, pyramid_request.headers) def test_it_returns_correct_response_on_success(self, controller, oauth_provider): body = json.dumps({'access_token': 'the-access-token'}) oauth_provider.create_token_response.return_value = ({}, body, 200) assert controller.post() == {'access_token': 'the-access-token'} def test_it_raises_when_error(self, controller, oauth_provider): body = json.dumps({'error': 'invalid_request'}) oauth_provider.create_token_response.return_value = ({}, body, 400) with pytest.raises(httpexceptions.HTTPBadRequest) as exc: controller.post() assert exc.value.body == body @pytest.fixture def controller(self, pyramid_request): pyramid_request.method = 'POST' pyramid_request.POST['grant_type'] = 'authorization_code' pyramid_request.POST['code'] = 'the-authz-code' pyramid_request.headers = {'X-Test-ID': '1234'} return views.OAuthAccessTokenController(pyramid_request) @pytest.fixture def oauth_provider(self, pyramid_config): svc = mock.Mock(spec_set=['create_token_response']) svc.create_token_response.return_value = ({}, '{}', 200) pyramid_config.register_service(svc, name='oauth_provider') return svc @pytest.mark.usefixtures('oauth_provider') class TestOAuthRevocationController(object): def test_it_creates_revocation_response(self, pyramid_request, controller, oauth_provider): controller.post() oauth_provider.create_revocation_response.assert_called_once_with( pyramid_request.url, pyramid_request.method, pyramid_request.POST, pyramid_request.headers) def test_it_returns_empty_response_on_success(self, controller): response = controller.post() assert response == {} def test_it_raises_when_error(self, controller, oauth_provider): body = json.dumps({'error': 'invalid_request'}) oauth_provider.create_revocation_response.return_value = ({}, body, 400) with pytest.raises(httpexceptions.HTTPBadRequest) as exc: controller.post() assert exc.value.body == body @pytest.fixture def controller(self, pyramid_request): pyramid_request.method = 'POST' pyramid_request.POST['token'] = 'the-token' pyramid_request.headers = {'X-Test-ID': '1234'} return views.OAuthRevocationController(pyramid_request) @pytest.fixture def oauth_provider(self, pyramid_config): svc = mock.Mock(spec_set=['create_revocation_response']) svc.create_revocation_response.return_value = ({}, '{}', 200) pyramid_config.register_service(svc, name='oauth_provider') return svc class TestDebugToken(object): def test_it_raises_error_when_token_is_missing(self, pyramid_request): pyramid_request.auth_token = None with pytest.raises(OAuthTokenError) as exc: views.debug_token(pyramid_request) assert exc.value.type == 'missing_token' assert 'Bearer token is missing' in exc.value.message def test_it_raises_error_when_token_is_empty(self, pyramid_request): pyramid_request.auth_token = '' with pytest.raises(OAuthTokenError) as exc: views.debug_token(pyramid_request) assert exc.value.type == 'missing_token' assert 'Bearer token is missing' in exc.value.message def test_it_validates_token(self, pyramid_request, token_service): pyramid_request.auth_token = 'the-access-token' views.debug_token(pyramid_request) token_service.validate.assert_called_once_with('the-access-token') def test_it_raises_error_when_token_is_invalid(self, pyramid_request, token_service): pyramid_request.auth_token = 'the-token' token_service.validate.return_value = None with pytest.raises(OAuthTokenError) as exc: views.debug_token(pyramid_request) assert exc.value.type == 'missing_token' assert 'Bearer token does not exist or is expired' in exc.value.message def test_returns_debug_data_for_oauth_token(self, pyramid_request, token_service, oauth_token): pyramid_request.auth_token = oauth_token.value token_service.fetch.return_value = oauth_token result = views.debug_token(pyramid_request) assert result == {'userid': oauth_token.userid, 'client': {'id': oauth_token.authclient.id, 'name': oauth_token.authclient.name}, 'issued_at': utc_iso8601(oauth_token.created), 'expires_at': utc_iso8601(oauth_token.expires), 'expired': oauth_token.expired} def test_returns_debug_data_for_developer_token(self, pyramid_request, token_service, developer_token): pyramid_request.auth_token = developer_token.value token_service.fetch.return_value = developer_token result = views.debug_token(pyramid_request) assert result == {'userid': developer_token.userid, 'issued_at': utc_iso8601(developer_token.created), 'expires_at': None, 'expired': False} @pytest.fixture def token_service(self, pyramid_config, pyramid_request): svc = mock.Mock(spec_set=auth_token_service_factory(None, pyramid_request)) pyramid_config.register_service(svc, name='auth_token') return svc @pytest.fixture def oauth_token(self, factories): authclient = factories.AuthClient(name='Example Client') expires = datetime.datetime.utcnow() + datetime.timedelta(minutes=10) return factories.DeveloperToken(authclient=authclient, expires=expires) @pytest.fixture def developer_token(self, factories): return factories.DeveloperToken() class TestAPITokenError(object): def test_it_sets_the_response_status_code(self, pyramid_request): context = OAuthTokenError('the error message', 'error_type', status_code=403) views.api_token_error(context, pyramid_request) assert pyramid_request.response.status_code == 403 def test_it_returns_the_error(self, pyramid_request): context = OAuthTokenError('', 'error_type') result = views.api_token_error(context, pyramid_request) assert result['error'] == 'error_type' def test_it_returns_error_description(self, pyramid_request): context = OAuthTokenError('error description', 'error_type') result = views.api_token_error(context, pyramid_request) assert result['error_description'] == 'error description' def test_it_skips_description_when_missing(self, pyramid_request): context = OAuthTokenError(None, 'invalid_request') result = views.api_token_error(context, pyramid_request) assert 'error_description' not in result def test_it_skips_description_when_empty(self, pyramid_request): context = OAuthTokenError('', 'invalid_request') result = views.api_token_error(context, pyramid_request) assert 'error_description' not in result
41.131707
147
0.694141
16,016
0.949715
0
0
12,338
0.731618
0
0
2,150
0.127491
7ca1d5b32a32a25d088eb63410921b9a5e64742f
1,306
py
Python
tools/build/v2/test/conditionals.py
juslee/boost-svn
6d5a03c1f5ed3e2b23bd0f3ad98d13ff33d4dcbb
[ "BSL-1.0" ]
1
2018-12-15T19:55:56.000Z
2018-12-15T19:55:56.000Z
tools/build/v2/test/conditionals.py
smart-make/boost
46509a094f8a844eefd5bb8a0030b739a04d79e1
[ "BSL-1.0" ]
null
null
null
tools/build/v2/test/conditionals.py
smart-make/boost
46509a094f8a844eefd5bb8a0030b739a04d79e1
[ "BSL-1.0" ]
null
null
null
#!/usr/bin/python # Copyright 2003 Dave Abrahams # Copyright 2002, 2003, 2004 Vladimir Prus # Distributed under the Boost Software License, Version 1.0. # (See accompanying file LICENSE_1_0.txt or copy at # http://www.boost.org/LICENSE_1_0.txt) # Test conditional properties. import BoostBuild t = BoostBuild.Tester() # Arrange a project which will build only if 'a.cpp' is compiled with "STATIC" # define. t.write("a.cpp", """\ #ifdef STATIC int main() {} #endif """) # Test conditionals in target requirements. t.write("jamroot.jam", "exe a : a.cpp : <link>static:<define>STATIC ;") t.run_build_system(["link=static"]) t.expect_addition("bin/$toolset/debug/link-static/a.exe") t.rm("bin") # Test conditionals in project requirements. t.write("jamroot.jam", """ project : requirements <link>static:<define>STATIC ; exe a : a.cpp ; """) t.run_build_system(["link=static"]) t.expect_addition("bin/$toolset/debug/link-static/a.exe") t.rm("bin") # Regression test for a bug found by Ali Azarbayejani. Conditionals inside # usage requirement were not being evaluated. t.write("jamroot.jam", """ lib l : l.cpp : : : <link>static:<define>STATIC ; exe a : a.cpp l ; """) t.write("l.cpp", "int i;") t.run_build_system(["link=static"]) t.expect_addition("bin/$toolset/debug/link-static/a.exe") t.cleanup()
26.653061
78
0.712864
0
0
0
0
0
0
0
0
1,027
0.786371
7ca33bba047d555eff412922059b6da8837f7980
270
py
Python
examples/setuptools-rust-starter/tests/test_setuptools_rust_starter.py
FriendRat/pyo3
5446fe2062cb3bf11bf61bd4a2c58a7ed8b408d2
[ "Apache-2.0" ]
1
2021-06-18T16:27:31.000Z
2021-06-18T16:27:31.000Z
examples/setuptools-rust-starter/tests/test_setuptools_rust_starter.py
FriendRat/pyo3
5446fe2062cb3bf11bf61bd4a2c58a7ed8b408d2
[ "Apache-2.0" ]
5
2021-11-08T22:05:41.000Z
2022-03-28T22:07:04.000Z
examples/setuptools-rust-starter/tests/test_setuptools_rust_starter.py
FriendRat/pyo3
5446fe2062cb3bf11bf61bd4a2c58a7ed8b408d2
[ "Apache-2.0" ]
null
null
null
from setuptools_rust_starter import PythonClass, ExampleClass def test_python_class() -> None: py_class = PythonClass(value=10) assert py_class.value == 10 def test_example_class() -> None: example = ExampleClass(value=11) assert example.value == 11
22.5
61
0.733333
0
0
0
0
0
0
0
0
0
0
7ca44058ba24c0424d8558e54e0f3abd230491fa
12,813
py
Python
spiders/juejin_spider.py
sunhailin-Leo/TeamLeoX_BlogsCrawler
389ff31e02bdff415c8bc470a3a48da1acb14c4c
[ "MIT" ]
null
null
null
spiders/juejin_spider.py
sunhailin-Leo/TeamLeoX_BlogsCrawler
389ff31e02bdff415c8bc470a3a48da1acb14c4c
[ "MIT" ]
null
null
null
spiders/juejin_spider.py
sunhailin-Leo/TeamLeoX_BlogsCrawler
389ff31e02bdff415c8bc470a3a48da1acb14c4c
[ "MIT" ]
null
null
null
import time from typing import Dict, List, Tuple, Optional from utils.logger_utils import LogManager from utils.str_utils import check_is_json from config import LOG_LEVEL, PROCESS_STATUS_FAIL from utils.time_utils import datetime_str_change_fmt from utils.exception_utils import LoginException, ParseDataException from spiders import BaseSpider, BaseSpiderParseMethodType, CookieUtils from utils.str_utils import check_is_phone_number, check_is_email_address logger = LogManager(__name__).get_logger_and_add_handlers( formatter_template=5, log_level_int=LOG_LEVEL ) class JuejinSpider(BaseSpider): def __init__(self, task_id: str, username: str, password: str): self._main_url = "https://juejin.im/auth/type" self._blogs_url = "https://timeline-merger-ms.juejin.im/v1/get_entry_by_self" self._like_blogs_url = "https://user-like-wrapper-ms.juejin.im/v1/user" self._task_id = task_id self._login_username = username self._login_password = password self._spider_name: str = f"juejin:{self._login_username}" self._login_cookies: Optional[str] = None self._login_token: Optional[str] = None self._login_uid: Optional[str] = None self._login_client_id: Optional[str] = None self._response_data = None self._blogs_data: List = [] self._like_blogs_data: List = [] self._like_blogs_total_page: int = 0 super().__init__() self._login_cookies = self.get_cookies(spider_name=self._spider_name) def _check_username(self) -> Optional[Tuple[str, Dict]]: """ 解析用户名 :return: 结果 """ phone_login = check_is_phone_number(data=self._login_username) email_login = check_is_email_address(data=self._login_username) login_data: Dict = {"password": self._login_password} if phone_login is None and email_login is None: raise ValueError("Your login username is illegal!") if phone_login is not None: login_data.update(phoneNumber=self._login_username) return f"{self._main_url}/phoneNumber", login_data if email_login is not None: login_data.update(email=self._login_username) return f"{self._main_url}/email", login_data return None def parse_data_with_method(self, method: str): if method == BaseSpiderParseMethodType.LoginResult: self._parse_login_data() elif method == BaseSpiderParseMethodType.PersonalBlogs: self._parse_personal_blogs() self._parse_personal_like_blogs() elif method == BaseSpiderParseMethodType.Finish: self.send_data() def login(self): if self._login_cookies is None: login_url, login_data = self._check_username() response = self.make_request( url=login_url, headers=self._common_headers, method="POST", json=login_data, ) if response.content.decode() != "": logger.info("登录成功!") self._response_data = response.json() self._login_cookies = CookieUtils( cookie_list=response.cookies.items() ).to_str() logger.debug(self._login_cookies) self.set_cookies( spider_name=self._spider_name, cookies=self._login_cookies ) self.parse_data_with_method( method=BaseSpiderParseMethodType.LoginResult ) else: logger.error("登录失败!") raise LoginException() else: get_result: str = self.get_data(spider_name=f"{self._spider_name}:params") if get_result is None: self.parse_data_with_method( method=BaseSpiderParseMethodType.LoginResult ) else: try: login_params = get_result.split("&")[1:-1] self._login_uid = [d for d in login_params if "uid" in d][ 0 ].replace("uid=", "") self._login_token = [d for d in login_params if "token" in d][ 0 ].replace("token=", "") self._login_client_id = [ d for d in login_params if "device_id" in d ][0].replace("device_id=", "") self.parse_data_with_method( method=BaseSpiderParseMethodType.PersonalBlogs ) except Exception as err: logger.error(f"解析 Redis 返回数据失败! 错误原因: {err}") self.parse_data_with_method( method=BaseSpiderParseMethodType.LoginResult ) def _parse_login_data(self): # 公共参数 self._login_token = self._response_data["token"] self._login_uid = self._response_data["userId"] self._login_client_id = self._response_data["clientId"] # 重要参数持久化 params: str = f"?src=web&uid={self._login_uid}" f"&token={self._login_token}" f"&device_id={self._login_client_id}" f"&current_uid={self._login_uid}" self.set_data(spider_name=f"{self._spider_name}:params", data=params) # 个人数据 username = self._response_data["user"]["username"] description = self._response_data["user"]["selfDescription"] avatar_img = self._response_data["user"]["avatarLarge"] followee = self._response_data["user"]["followeesCount"] follower = self._response_data["user"]["followersCount"] like_blogs = self._response_data["user"]["collectedEntriesCount"] personal_data: Dict = { "username": username, "description": description, "avatarImg": avatar_img, "followee": followee, "follower": follower, "likeBlogs": like_blogs, } logger.debug(personal_data) self.data_model.set_personal_data(data=personal_data) self.parse_data_with_method(method=BaseSpiderParseMethodType.PersonalBlogs) def _parse_personal_blogs(self, next_params: Optional[str] = None): req_data: dict = { "src": "web", "uid": self._login_uid, "device_id": self._login_client_id, "token": self._login_token, "targetUid": self._login_uid, "type": "post", "limit": "20", "order": "createdAt", } if next_params is not None: req_data.update(before=next_params) url_params: str = "" for index, data in enumerate(req_data.items()): if index == 0: url_params += f"?{data[0]}={data[1]}" else: url_params += f"&{data[0]}={data[1]}" blogs_url: str = f"{self._blogs_url}{url_params}" response = self.make_request(url=blogs_url, headers=self._common_headers) if response.content.decode() != "": self._response_data = response.json() if self._response_data is not None and self._response_data["m"] == "ok": next_page_variable = None entry_list = self._response_data["d"]["entrylist"] if len(entry_list) > 0: for personal_blog in entry_list: blog_create_time = datetime_str_change_fmt( time_str=personal_blog["createdAt"], prev_fmt="%Y-%m-%dT%H:%M:%S.%fZ", ) blog_data: Dict = { "blogId": personal_blog["objectId"], "blogTitle": personal_blog["title"], "blogHref": personal_blog["originalUrl"], "blogViewers": personal_blog["viewsCount"], "blogCreateTime": blog_create_time, } self._blogs_data.append(blog_data) next_page_variable = personal_blog["verifyCreatedAt"] if self._response_data["d"]["total"] > 20: time.sleep(0.5) self._parse_personal_blogs(next_params=next_page_variable) else: logger.debug(self._blogs_data) self.data_model.set_personal_blogs_data(data=self._blogs_data) logger.info("获取个人博客数据成功!") else: logger.error("查询个人博客失败!") self.update_task_status( task_id=self._task_id, data=str(PROCESS_STATUS_FAIL) ) raise LoginException() def _parse_personal_like_blogs(self, page_no: int = 0): like_blogs_url: str = f"{self._like_blogs_url}/{self._login_uid}/like/entry?page={page_no}&pageSize=20" self._common_headers.update( { "X-Juejin-Client": str(self._login_client_id), "X-Juejin-Src": "web", "X-Juejin-Token": self._login_token, "X-Juejin-Uid": self._login_uid, } ) response = self.make_request(url=like_blogs_url, headers=self._common_headers) if response.content.decode() != "": self._response_data = response.json() if ( self._response_data is not None and self._response_data["m"] == "success" ): logger.info(f"当前正在获取第{page_no + 1}页的数据!") if page_no == 0: total_count = self._response_data["d"]["total"] total_pages = total_count // 20 rest_count = total_count % 20 if rest_count != 0: total_pages += 1 self._like_blogs_total_page = total_pages entry_list = self._response_data["d"]["entryList"] if len(entry_list) > 0: for entry_data in entry_list: if entry_data is None: continue blog_data: Dict = { "blogId": entry_data["objectId"], "blogTitle": entry_data["title"], "blogHref": entry_data["originalUrl"], "blogViewers": entry_data["viewsCount"], "blogCreateTime": datetime_str_change_fmt( time_str=entry_data["createdAt"], prev_fmt="%Y-%m-%dT%H:%M:%S.%fZ", ), } self._like_blogs_data.append(blog_data) page_no += 1 if page_no <= self._like_blogs_total_page: # TODO 后面考虑多线程进行任务拆分,并发获取数据 time.sleep(0.5) self._parse_personal_like_blogs(page_no=page_no) else: # logger.debug(self._like_blogs_data) logger.debug(f"获取到 {len(self._like_blogs_data)} 条个人点赞博客") self.data_model.set_personal_like_blogs_data( data=self._like_blogs_data ) logger.info("获取个人点赞博客成功!") # 任务末尾 self.parse_data_with_method(method=BaseSpiderParseMethodType.Finish) else: logger.error("查询个人点赞博客失败!") self.update_task_status( task_id=self._task_id, data=str(PROCESS_STATUS_FAIL) ) raise ParseDataException() def _test_cookies(self, cookies: Optional[str] = None) -> bool: params = self.get_data(spider_name=f"{self._spider_name}:params") if params is None: return False test_user_url: str = f"https://user-storage-api-ms.juejin.im/v1/getUserInfo{params}" test_request_headers: Dict = self.get_default_headers() test_response = self.make_request( url=test_user_url, headers=test_request_headers ) if ( test_response.status_code != 200 or check_is_json(test_response.content.decode()) is not True ): logger.error(f"当前掘金账号登录状态: 已退出!") self._async_task.remove_async_scheduler(job_id=self._spider_name) return False test_json_response = test_response.json() if test_json_response["s"] == 1: logger.info(f"当前掘金账号为: {self._login_username}, 状态: 已登录") return True else: logger.error(f"当前掘金账号登录状态: 已退出!") return False
42.287129
157
0.558807
12,563
0.956161
0
0
0
0
0
0
2,220
0.168963
7ca486af10b1cca3904ea233b441a3077ec0bb6b
3,653
py
Python
NAS/PaddleSlim/train_supernet.py
naviocean/SimpleCVReproduction
61b43e3583977f42e6f91ef176ec5e1701e98d33
[ "Apache-2.0" ]
923
2020-01-11T06:36:53.000Z
2022-03-31T00:26:57.000Z
NAS/PaddleSlim/train_supernet.py
Twenty3hree/SimpleCVReproduction
9939f8340c54dbd69b0017cecad875dccf428f26
[ "Apache-2.0" ]
25
2020-02-27T08:35:46.000Z
2022-01-25T08:54:19.000Z
NAS/PaddleSlim/train_supernet.py
Twenty3hree/SimpleCVReproduction
9939f8340c54dbd69b0017cecad875dccf428f26
[ "Apache-2.0" ]
262
2020-01-02T02:19:40.000Z
2022-03-23T04:56:16.000Z
from paddle.vision.transforms import ( ToTensor, RandomHorizontalFlip, RandomResizedCrop, SaturationTransform, Compose, HueTransform, BrightnessTransform, ContrastTransform, RandomCrop, Normalize, RandomRotation ) from paddle.vision.datasets import Cifar100 from paddle.io import DataLoader from paddle.optimizer.lr import CosineAnnealingDecay, MultiStepDecay, LinearWarmup import random from resnet20 import * import paddle # supernet trainning 基于paddleslim模型压缩包 # https://github.com/PaddlePaddle/PaddleSlim 欢迎大家多多star from paddleslim.nas.ofa.convert_super import Convert, supernet from paddleslim.nas.ofa import OFA, RunConfig, DistillConfig from paddleslim.nas.ofa.utils import utils channel_list = [] for i in range(1, 21): if 0 < i <= 7: # channel_list.append(random.choice([ 4, 8, 12, 16])) channel_list.append(16) elif 7 < i <= 13: # channel_list.append(random.choice([ 4, 8, 12, 16, 20, 24, 28, 32])) channel_list.append(32) elif 13 < i <= 19: # channel_list.append(random.choice([ 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56,60, 64])) channel_list.append(64) else: # channel_list.append(random.choice([ 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56,60, 64])) channel_list.append(64) net = ResNet20(100, channel_list) net2 = ResNet20(100, channel_list) net2.set_state_dict(paddle.load('./pretrained_model/resnet20.pdparams')) channel_optional = [] for i in range(0, 23): if i <= 7: channel_optional.append([4, 8, 12, 16]) # channel_optional.append([12, 16]) elif 7 < i <= 14: channel_optional.append([4, 8, 12, 16, 20, 24, 28, 32]) # channel_optional.append([20, 24, 28, 32]) elif 14 < i <= 21: channel_optional.append( [4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64]) # channel_optional.append([36, 40, 44, 48, 52, 56,60, 64]) else: channel_optional.append( [4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64]) # channel_optional.append([36, 40, 44, 48, 52, 56,60, 64]) distill_config = DistillConfig(teacher_model=net2) sp_net_config = supernet(channel=channel_optional) sp_model = Convert(sp_net_config).convert(net) ofa_net = OFA(sp_model, distill_config=distill_config) ofa_net.set_task('channel') model = paddle.Model(ofa_net) MAX_EPOCH = 300 LR = 0.1 WEIGHT_DECAY = 5e-4 MOMENTUM = 0.9 BATCH_SIZE = 128 CIFAR_MEAN = [0.5071, 0.4865, 0.4409] CIFAR_STD = [0.1942, 0.1918, 0.1958] DATA_FILE = './data/data76994/cifar-100-python.tar.gz' model.prepare( paddle.optimizer.Momentum( learning_rate=LinearWarmup( CosineAnnealingDecay(LR, MAX_EPOCH), 2000, 0., LR), momentum=MOMENTUM, parameters=model.parameters(), weight_decay=WEIGHT_DECAY), CrossEntropyLoss(), paddle.metric.Accuracy(topk=(1, 5))) transforms = Compose([ RandomCrop(32, padding=4), RandomApply(BrightnessTransform(0.1)), RandomApply(ContrastTransform(0.1)), RandomHorizontalFlip(), RandomRotation(15), ToArray(), Normalize(CIFAR_MEAN, CIFAR_STD), ]) val_transforms = Compose([ToArray(), Normalize(CIFAR_MEAN, CIFAR_STD)]) train_set = Cifar100(DATA_FILE, mode='train', transform=transforms) test_set = Cifar100(DATA_FILE, mode='test', transform=val_transforms) callbacks = [LRSchedulerM(), callbacks.VisualDL('vis_logs/ofa_resnet20')] model.fit( train_set, test_set, epochs=MAX_EPOCH, batch_size=BATCH_SIZE, save_dir='checkpoints', save_freq=100, shuffle=True, num_workers=4, verbose=1, callbacks=callbacks, )
33.209091
108
0.680537
0
0
0
0
0
0
0
0
773
0.210111
7ca4b5308f48cb161081920789f0cfaed577f79d
28,560
py
Python
slashtags/mixins/commands.py
Myst1c-a/phen-cogs
672f9022ddbbd9a84b0a05357347e99e64a776fc
[ "MIT" ]
null
null
null
slashtags/mixins/commands.py
Myst1c-a/phen-cogs
672f9022ddbbd9a84b0a05357347e99e64a776fc
[ "MIT" ]
null
null
null
slashtags/mixins/commands.py
Myst1c-a/phen-cogs
672f9022ddbbd9a84b0a05357347e99e64a776fc
[ "MIT" ]
null
null
null
""" MIT License Copyright (c) 2020-present phenom4n4n Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import asyncio import logging import re import types from collections import Counter from copy import copy from typing import Dict, List, Union import discord from redbot.core import commands from redbot.core.utils.chat_formatting import box, humanize_list, inline, pagify from redbot.core.utils.menus import DEFAULT_CONTROLS, menu from redbot.core.utils.predicates import MessagePredicate from tabulate import tabulate from ..abc import MixinMeta from ..converters import ( GlobalTagConverter, GuildTagConverter, PastebinConverter, TagConverter, TagName, TagScriptConverter, ) from ..http import ApplicationOptionChoice, SlashOptionType from ..objects import ApplicationCommand, ApplicationCommandType, SlashOption, SlashTag from ..testing.button_menus import menu as button_menu from ..utils import ARGUMENT_NAME_DESCRIPTION, chunks, dev_check TAG_RE = re.compile(r"(?i)(\[p\])?\b(slash\s?)?tag'?s?\b") CHOICE_RE = re.compile(r".{1,100}:.{1,100}") CHOICE_LIMIT = 25 log = logging.getLogger("red.phenom4n4n.slashtags.commands") def _sub(match: re.Match) -> str: if match.group(1): return "[p]slashtag global" repl = "global " name = match.group(0) repl += name if name.istitle(): repl = repl.title() return repl def copy_doc(original: Union[commands.Command, types.FunctionType]): def decorator(overriden: Union[commands.Command, types.FunctionType]): doc = original.help if isinstance(original, commands.Command) else original.__doc__ doc = TAG_RE.sub(_sub, doc) if isinstance(overriden, commands.Command): overriden._help_override = doc else: overriden.__doc__ = doc return overriden return decorator class Commands(MixinMeta): @commands.guild_only() @commands.group(aliases=["st"]) async def slashtag(self, ctx: commands.Context): """ Slash Tag management with TagScript. These commands use TagScriptEngine. [This site](https://phen-cogs.readthedocs.io/en/latest/index.html) has documentation on how to use TagScript blocks. """ @commands.mod_or_permissions(manage_guild=True) @slashtag.command("add", aliases=["create", "+"]) async def slashtag_add( self, ctx: commands.Context, tag_name: TagName(check_global=False), *, tagscript: TagScriptConverter, ): """ Add a slash tag with TagScript. [Slash tag usage guide](https://phen-cogs.readthedocs.io/en/latest/slashtags/slashtags.html) """ await self.create_slash_tag(ctx, tag_name, tagscript, is_global=False) async def create_slash_tag( self, ctx: commands.Context, tag_name: str, tagscript: str, *, is_global: bool = False, command_type: ApplicationCommandType = ApplicationCommandType.CHAT_INPUT, ): options: List[SlashOption] = [] guild_id = None if is_global else ctx.guild.id if command_type == ApplicationCommandType.CHAT_INPUT: try: description = await self.send_and_query_response( ctx, "What should the tag description to be? (maximum 100 characters)", pred=MessagePredicate.length_less(101, ctx), ) except asyncio.TimeoutError: return await ctx.send("Tag addition timed out.") else: description = "" if command_type == ApplicationCommandType.CHAT_INPUT: pred = MessagePredicate.yes_or_no(ctx) try: await self.send_and_query_response( ctx, "Would you like to add arguments to this tag? (Y/n)", pred ) except asyncio.TimeoutError: await ctx.send("Query timed out, not adding arguments.") else: if pred.result is True: await self.get_options(ctx, options) command = ApplicationCommand( self, name=tag_name, description=description, guild_id=guild_id, options=options, type=command_type, ) try: await command.register() except discord.Forbidden as error: log.error( "Failed to create command {command!r} on guild {ctx.guild!r}", exc_info=error ) text = ( "Looks like I don't have permission to add Slash Commands here. Reinvite me " "with this invite link and try again: <https://discordapp.com/oauth2/authorize" f"?client_id={self.bot.user.id}&scope=bot%20applications.commands>" ) return await ctx.send(text) except Exception: log.error("Failed to create command {command!r} on guild {ctx.guild!r}") # exc info unneeded since error handler should print it, however info on the command options is needed raise tag = SlashTag( self, tagscript, guild_id=guild_id, author_id=ctx.author.id, command=command, ) await ctx.send(await tag.initialize()) async def get_options( self, ctx: commands.Context, options: List[SlashOption] ) -> List[SlashOption]: added_required = False for i in range(1, 11): try: option = await self.get_option(ctx, added_required=added_required) if not option.required: added_required = True except asyncio.TimeoutError: await ctx.send("Adding this argument timed out.", delete_after=15) break options.append(option) if i == 10: break pred = MessagePredicate.yes_or_no(ctx) try: await self.send_and_query_response( ctx, "Would you like to add another argument? (Y/n)", pred ) except asyncio.TimeoutError: await ctx.send("Query timed out, not adding additional arguments.") break if pred.result is False: break return options async def send_and_query_response( self, ctx: commands.Context, query: str, pred: MessagePredicate = None, *, timeout: int = 60, ) -> str: if pred is None: pred = MessagePredicate.same_context(ctx) ask = await ctx.send(query) try: message = await self.bot.wait_for("message", check=pred, timeout=timeout) except asyncio.TimeoutError: await self.delete_quietly(ask) raise await self.delete_quietly(ask) await self.delete_quietly(message) return message.content async def get_choices(self, ctx: commands.Context) -> List[ApplicationOptionChoice]: query = ( "Send the list of choice names and values you would like to add as choices to " "the tag. Choice names and values should be seperated by `:`, and each choice " "should be seperated by `|`. Example:\n`dog:Doggo|cat:Catto`" ) response = await self.send_and_query_response(ctx, query) choices = [] for choice_text in response.split("|"): if ":" not in choice_text: await ctx.send( f"Failed to parse `{choice_text}` to a choice as its name and value " "weren't seperated by a `:`.", delete_after=15, ) continue if not CHOICE_RE.match(choice_text): await ctx.send( f"Failed to parse `{choice_text}` to a choice as " "its name or value exceeded the 100 character limit.", delete_after=15, ) continue choice = ApplicationOptionChoice(*choice_text.split(":", 1)) choices.append(choice) if len(choices) >= CHOICE_LIMIT: await ctx.send(f"Reached max choices ({CHOICE_LIMIT}).") break return choices async def get_option( self, ctx: commands.Context, *, added_required: bool = False ) -> SlashOption: name_desc = [ "What should the argument name be and description be?", "The argument name and description should be split by a `:`.", "Example: `member:A member of this server.`\n", "*Slash argument names may not exceed 32 characters and can only contain characters " "that are alphanumeric or '_' or '-'.", "The argument description must be less than or equal to 100 characters.*", ] name_pred = MessagePredicate.regex(ARGUMENT_NAME_DESCRIPTION, ctx) await self.send_and_query_response(ctx, "\n".join(name_desc), name_pred) match = name_pred.result name, description = match.group(1), match.group(2) valid_option_types = [ name.lower() for name in SlashOptionType.__members__.keys() if not name.startswith("SUB") ] valid_option_types.append("choices") option_query = [ "What should the argument type be?", f"Valid option types: {humanize_list([inline(n) for n in valid_option_types])}", "(select `string` if you don't understand)", ] option_type = await self.send_and_query_response( ctx, "\n".join(option_query), MessagePredicate.lower_contained_in(valid_option_types, ctx), ) if option_type.lower() == "choices": choices = await self.get_choices(ctx) option_type = "STRING" else: choices = [] option_type = SlashOptionType[option_type.upper()] if not added_required: pred = MessagePredicate.yes_or_no(ctx) await self.send_and_query_response( ctx, "Is this argument required? (Y/n)\n*Keep in mind that if you choose to make this argument optional, all following arguments must also be optional.*", pred, ) required = pred.result else: await ctx.send( "This argument was automatically made optional as the previous one was optional.", delete_after=15, ) required = False return SlashOption( name=name.lower(), description=description, option_type=option_type, required=required, choices=choices, ) @commands.mod_or_permissions(manage_guild=True) @slashtag.command("message") async def slashtag_message( self, ctx: commands.Context, tag_name: TagName(check_global=False, check_regex=False), *, tagscript: TagScriptConverter, ): """ Add a message command tag with TagScript. [Slash tag usage guide](https://phen-cogs.readthedocs.io/en/latest/slashtags/slashtags.html) """ await self.create_slash_tag( ctx, tag_name, tagscript, is_global=False, command_type=ApplicationCommandType.MESSAGE ) @commands.mod_or_permissions(manage_guild=True) @slashtag.command("user") async def slashtag_user( self, ctx: commands.Context, tag_name: TagName(check_global=False, check_regex=False), *, tagscript: TagScriptConverter, ): """ Add a user command tag with TagScript. [Slash tag usage guide](https://phen-cogs.readthedocs.io/en/latest/slashtags/slashtags.html) """ await self.create_slash_tag( ctx, tag_name, tagscript, is_global=False, command_type=ApplicationCommandType.USER ) @commands.mod_or_permissions(manage_guild=True) @slashtag.command("pastebin", aliases=["++"]) async def slashtag_pastebin( self, ctx: commands.Context, tag_name: TagName(check_global=False), *, link: PastebinConverter, ): """ Add a slash tag with a Pastebin link. """ await self.create_slash_tag(ctx, tag_name, link, is_global=False) @commands.mod_or_permissions(manage_guild=True) @slashtag.group("edit", aliases=["e"], invoke_without_command=True) async def slashtag_edit( self, ctx: commands.Context, tag: GuildTagConverter, *, tagscript: TagScriptConverter ): """Edit a slash tag.""" await ctx.send(await tag.edit_tagscript(tagscript)) @slashtag_edit.command("tagscript") async def slashtag_edit_tagscript( self, ctx: commands.Context, tag: GuildTagConverter, *, tagscript: TagScriptConverter ): """Edit a slash tag's TagScript.""" await self.slashtag_edit(ctx, tag, tagscript=tagscript) @slashtag_edit.command("name") async def slashtag_edit_name( self, ctx: commands.Context, tag: GuildTagConverter, *, name: TagName(check_global=False) ): """Edit a slash tag's name.""" await ctx.send(await tag.edit_name(name)) @slashtag_edit.command("description") async def slashtag_edit_description( self, ctx: commands.Context, tag: GuildTagConverter, *, description: str ): """Edit a slash tag's description.""" await ctx.send(await tag.edit_description(description)) @slashtag_edit.command("arguments", aliases=["options"]) async def slashtag_edit_arguments(self, ctx: commands.Context, tag: GuildTagConverter): """ Edit a slash tag's arguments. See [this documentation page](https://phen-cogs.readthedocs.io/en/latest/slashtags/slash_arguments.html) for more information on slash tag arguments. """ await tag.edit_options(ctx) @slashtag_edit.command("argument", aliases=["option"]) async def slashtag_edit_argument( self, ctx: commands.Context, tag: GuildTagConverter, argument: str ): """Edit a single slash tag's argument by name.""" await tag.edit_single_option(ctx, argument) @commands.mod_or_permissions(manage_guild=True) @slashtag.command("remove", aliases=["delete", "-"]) async def slashtag_remove(self, ctx: commands.Context, *, tag: GuildTagConverter): """Delete a slash tag.""" await ctx.send(await tag.delete()) @slashtag.command("info") async def slashtag_info(self, ctx: commands.Context, *, tag: TagConverter): """Get info about a slash tag that is stored on this server.""" await tag.send_info(ctx) @slashtag.command("raw") async def slashtag_raw(self, ctx: commands.Context, *, tag: GuildTagConverter): """Get a slash tag's raw content.""" await tag.send_raw_tagscript(ctx) @classmethod def format_tagscript(cls, tag: SlashTag, limit: int = 60) -> str: title = f"`{tag.type.get_prefix()}{tag.name}` - " limit -= len(title) tagscript = tag.tagscript if len(tagscript) > limit - 3: tagscript = tagscript[:limit] + "..." tagscript = tagscript.replace("\n", " ") return f"{title}{discord.utils.escape_markdown(tagscript)}" async def view_slash_tags( self, ctx: commands.Context, tags: Dict[int, SlashTag], *, is_global: bool, ): description = [ self.format_tagscript(tag) for tag in sorted(tags.values(), key=lambda t: t.name) ] description = "\n".join(description) e = discord.Embed(color=await ctx.embed_color()) if is_global: slash_tags = "global slash tags" e.set_author(name="Global Slash Tags", icon_url=ctx.me.avatar_url) else: slash_tags = "slash tags" e.set_author(name="Stored Slash Tags", icon_url=ctx.guild.icon_url) embeds = [] pages = list(pagify(description)) for index, page in enumerate(pages, 1): embed = e.copy() embed.description = page embed.set_footer(text=f"{index}/{len(pages)} | {len(tags)} {slash_tags}") embeds.append(embed) # await menu(ctx, embeds, DEFAULT_CONTROLS) await button_menu(ctx, embeds) @slashtag.command("list") async def slashtag_list(self, ctx: commands.Context): """View stored slash tags.""" tags = self.guild_tag_cache[ctx.guild.id] if not tags: return await ctx.send("There are no slash tags on this server.") await self.view_slash_tags(ctx, tags, is_global=False) async def show_slash_tag_usage(self, ctx: commands.Context, guild: discord.Guild = None): tags = self.guild_tag_cache[guild.id] if guild else self.global_tag_cache if not tags: message = ( "This server has no slash tags." if guild else "There are no global slash tags." ) return await ctx.send(message) counter = Counter({tag.name: tag.uses for tag in tags.copy().values()}) e = discord.Embed(title="Slash Tag Stats", color=await ctx.embed_color()) embeds = [] for usage_data in chunks(counter.most_common(), 10): usage_chart = box(tabulate(usage_data, headers=("Tag", "Uses")), "prolog") embed = e.copy() embed.description = usage_chart embeds.append(embed) await menu(ctx, embeds, DEFAULT_CONTROLS) @slashtag.command("usage", aliases=["stats"]) async def slashtag_usage(self, ctx: commands.Context): """ See this slash tag usage stats. **Example:** `[p]slashtag usage` """ await self.show_slash_tag_usage(ctx, ctx.guild) @commands.is_owner() @slashtag.command("restore", hidden=True) async def slashtag_restore(self, ctx: commands.Context): """Restore all slash tags from the database.""" await self.restore_tags(ctx, ctx.guild) @commands.is_owner() @slashtag.command("clear", hidden=True) async def slashtag_clear(self, ctx: commands.Context): """Clear all slash tags for this server.""" pred = MessagePredicate.yes_or_no(ctx) try: await self.send_and_query_response( ctx, "Are you sure you want to delete all slash tags on this server? (Y/n)", pred ) except asyncio.TimeoutError: return await ctx.send("Timed out, not deleting slash tags.") if not pred.result: return await ctx.send("Ok, not deleting slash tags.") guild: discord.Guild = ctx.guild await self.http.put_guild_slash_commands(guild.id, []) for tag in copy(self.guild_tag_cache[guild.id]).values(): tag.remove_from_cache() tag.command.remove_from_cache() del tag self.guild_tag_cache[guild.id].clear() await self.config.guild(guild).tags.clear() await ctx.send("Tags deleted.") @commands.is_owner() @slashtag.group("global") @copy_doc(slashtag) async def slashtag_global(self, ctx: commands.Context): pass @slashtag_global.command("add") @copy_doc(slashtag_add) async def slashtag_global_add( self, ctx: commands.Context, tag_name: TagName(global_priority=True), *, tagscript: TagScriptConverter, ): await self.create_slash_tag(ctx, tag_name, tagscript, is_global=True) @commands.mod_or_permissions(manage_guild=True) @slashtag_global.command("message") @copy_doc(slashtag_message) async def slashtag_global_message( self, ctx: commands.Context, tag_name: TagName(global_priority=True, check_regex=False), *, tagscript: TagScriptConverter, ): await self.create_slash_tag( ctx, tag_name, tagscript, is_global=True, command_type=ApplicationCommandType.MESSAGE ) @commands.mod_or_permissions(manage_guild=True) @slashtag_global.command("user") @copy_doc(slashtag_user) async def slashtag_global_user( self, ctx: commands.Context, tag_name: TagName(global_priority=True, check_regex=False), *, tagscript: TagScriptConverter, ): await self.create_slash_tag( ctx, tag_name, tagscript, is_global=True, command_type=ApplicationCommandType.USER ) @slashtag_global.command("pastebin", aliases=["++"]) @copy_doc(slashtag_pastebin) async def slashtag_global_pastebin( self, ctx: commands.Context, tag_name: TagName(check_global=False), *, link: PastebinConverter, ): await self.create_slash_tag(ctx, tag_name, link, is_global=True) @slashtag_global.group("edit", aliases=["e"], invoke_without_command=True) @copy_doc(slashtag_edit) async def slashtag_global_edit( self, ctx: commands.Context, tag: GlobalTagConverter, *, tagscript: TagScriptConverter ): await ctx.send(await tag.edit_tagscript(tagscript)) @slashtag_global_edit.command("tagscript") @copy_doc(slashtag_edit_tagscript) async def slashtag_global_edit_tagscript( self, ctx: commands.Context, tag: GlobalTagConverter, *, tagscript: TagScriptConverter ): await self.slashtag_global_edit(ctx, tag, tagscript=tagscript) @slashtag_global_edit.command("name") @copy_doc(slashtag_edit_name) async def slashtag_global_edit_name( self, ctx: commands.Context, tag: GlobalTagConverter, *, name: TagName(global_priority=True), ): await ctx.send(await tag.edit_name(name)) @slashtag_global_edit.command("description") @copy_doc(slashtag_edit_description) async def slashtag_global_edit_description( self, ctx: commands.Context, tag: GlobalTagConverter, *, description: str ): await ctx.send(await tag.edit_description(description)) @slashtag_global_edit.command("arguments", aliases=["options"]) @copy_doc(slashtag_edit_arguments) async def slashtag_global_edit_arguments(self, ctx: commands.Context, tag: GlobalTagConverter): await tag.edit_options(ctx) @slashtag_global_edit.command("argument", aliases=["option"]) @copy_doc(slashtag_edit_argument) async def slashtag_global_edit_argument( self, ctx: commands.Context, tag: GuildTagConverter, argument: str ): await tag.edit_single_option(ctx, argument) @slashtag_global.command("remove", aliases=["delete", "-"]) @copy_doc(slashtag_remove) async def slashtag_global_remove(self, ctx: commands.Context, *, tag: GlobalTagConverter): await ctx.send(await tag.delete()) @slashtag_global.command("raw") @copy_doc(slashtag_raw) async def slashtag_global_raw(self, ctx: commands.Context, *, tag: GlobalTagConverter): await tag.send_raw_tagscript(ctx) @slashtag_global.command("list") @copy_doc(slashtag_list) async def slashtag_global_list(self, ctx: commands.Context): tags = self.global_tag_cache if not tags: return await ctx.send("There are no global slash tags.") await self.view_slash_tags(ctx, tags, is_global=True) @slashtag_global.command("usage", aliases=["stats"]) @copy_doc(slashtag_usage) async def slashtag_global_usage(self, ctx: commands.Context): await self.show_slash_tag_usage(ctx) @slashtag_global.command("restore", hidden=True) @copy_doc(slashtag_restore) async def slashtag_global_restore(self, ctx: commands.Context): await self.restore_tags(ctx, None) @commands.is_owner() @commands.group(aliases=["slashset"]) async def slashtagset(self, ctx: commands.Context): """Manage SlashTags settings.""" @slashtagset.command("settings") async def slashtagset_settings(self, ctx: commands.Context): """View SlashTags settings.""" eval_command = f"✅ (**{self.eval_command}**)" if self.eval_command else "❎" testing_enabled = "✅" if self.testing_enabled else "❎" description = [ f"Application ID: **{self.application_id}**", f"Eval command: {eval_command}", f"Test cog loaded: {testing_enabled}", ] embed = discord.Embed( color=0xC9C9C9, title="SlashTags Settings", description="\n".join(description) ) await ctx.send(embed=embed) @slashtagset.command("appid") async def slashtagset_appid(self, ctx: commands.Context, id: int = None): """ Manually set the application ID for [botname] slash commands if it differs from the bot user ID. This only applies to legacy bots. If you don't know what this means, you don't need to worry about it. """ app_id = id or self.bot.user.id await self.config.application_id.set(app_id) self.application_id = app_id await ctx.send(f"Application ID set to `{id}`.") @commands.check(dev_check) @slashtagset.command("addeval") async def slashtagset_addeval(self, ctx: commands.Context): """Add a slash eval command for debugging.""" if self.eval_command: return await ctx.send("An eval command is already registered.") slasheval = ApplicationCommand( self, name="eval", description="SlashTags debugging eval command. Only bot owners can use this.", options=[ SlashOption(name="body", description="Code body to evaluate.", required=True) ], ) await slasheval.register() await self.config.eval_command.set(slasheval.id) self.eval_command = slasheval.id await ctx.send("`/eval` has been registered.") @commands.check(dev_check) @slashtagset.command("rmeval") async def slashtagset_rmeval(self, ctx: commands.Context): """Remove the slash eval command.""" if not self.eval_command: return await ctx.send("The eval command hasn't been registered.") try: await self.http.remove_slash_command(self.eval_command) except discord.HTTPException: pass await self.config.eval_command.clear() self.eval_command = None await ctx.send("`/eval` has been deleted.") @slashtagset.command("testing") async def slashtagset_testing(self, ctx: commands.Context, true_or_false: bool = None): """ Load or unload the SlashTag interaction development test cog. """ target_state = ( true_or_false if true_or_false is not None else not await self.config.testing_enabled() ) if target_state is self.testing_enabled: loaded = "loaded" if target_state else "unloaded" return await ctx.send(f"The SlashTag interaction testing cog is already {loaded}.") await self.config.testing_enabled.set(target_state) if target_state: loaded = "Loaded" self.add_test_cog() else: loaded = "Unloaded" self.remove_test_cog() await ctx.send(f"{loaded} the SlashTag interaction testing cog.")
37.777778
165
0.630882
25,728
0.900588
0
0
15,367
0.53791
22,070
0.772543
6,813
0.238484