hexsha
stringlengths
40
40
size
int64
5
2.06M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
248
max_stars_repo_name
stringlengths
5
125
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
sequencelengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
248
max_issues_repo_name
stringlengths
5
125
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
sequencelengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
248
max_forks_repo_name
stringlengths
5
125
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
sequencelengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
5
2.06M
avg_line_length
float64
1
1.02M
max_line_length
int64
3
1.03M
alphanum_fraction
float64
0
1
count_classes
int64
0
1.6M
score_classes
float64
0
1
count_generators
int64
0
651k
score_generators
float64
0
1
count_decorators
int64
0
990k
score_decorators
float64
0
1
count_async_functions
int64
0
235k
score_async_functions
float64
0
1
count_documentation
int64
0
1.04M
score_documentation
float64
0
1
be4037367a1afa83a7501ca75f082c616c63c62c
625
py
Python
ros_tf_publisher.py
BrightLamp/PyLearningCodes
ed237528c41ab2a9832b88806732097ffae0a0ed
[ "MIT" ]
null
null
null
ros_tf_publisher.py
BrightLamp/PyLearningCodes
ed237528c41ab2a9832b88806732097ffae0a0ed
[ "MIT" ]
null
null
null
ros_tf_publisher.py
BrightLamp/PyLearningCodes
ed237528c41ab2a9832b88806732097ffae0a0ed
[ "MIT" ]
null
null
null
# encoding=utf-8 import rospy import tf if __name__ == '__main__': rospy.init_node('py_tf_broadcaster') br = tf.TransformBroadcaster() x = 0.0 y = 0.0 z = 0.0 roll = 0 pitch = 0 yaw = 1.57 rate = rospy.Rate(1) while not rospy.is_shutdown(): yaw = yaw + 0.1 roll = roll + 0.1 br.sendTransform((x, y, z), tf.transformations.quaternion_from_euler(roll, pitch, yaw), rospy.Time.now(), "base_link", "front_caster") # 发布base_link到link1的平移和翻转 rate.sleep()
24.038462
84
0.5104
0
0
0
0
0
0
0
0
113
0.175739
be40e740adf7c24c5c205687723b024d4eaf9752
2,674
py
Python
dataset_manager/technical_indicators.py
NightingaleV/bakalarska_prace-ann-algotrading
07866e092cb527a7e1d9d7050790d9ffd611dc83
[ "MIT" ]
null
null
null
dataset_manager/technical_indicators.py
NightingaleV/bakalarska_prace-ann-algotrading
07866e092cb527a7e1d9d7050790d9ffd611dc83
[ "MIT" ]
null
null
null
dataset_manager/technical_indicators.py
NightingaleV/bakalarska_prace-ann-algotrading
07866e092cb527a7e1d9d7050790d9ffd611dc83
[ "MIT" ]
null
null
null
# Imports import numpy as np class TechnicalIndicators: cci_constant = 0.015 def __init__(self): self.df = None # Exponentially-weighted moving average def ewma(self, periods): indicator = 'EWMA{}'.format(periods) self.df[indicator] = self.df['close'].ewm(span=periods).mean() return self # Stochastic Oscillator def stochastic_oscilator(self, k_period, d_period, smooth=1): lows = 'l{}'.format(k_period) highs = 'h{}'.format(k_period) self.df = self.calc_roll_min(self.df, k_period) self.df = self.calc_roll_max(self.df, k_period) self.df = self.stok(self.df, k_period) if smooth >= 1: self.df = self.smooth_stok(self.df, smooth) self.df = self.stod(self.df, d_period) self.df.drop([lows, highs], axis=1, inplace=True) return self @staticmethod def calc_roll_min(dataset, k_period): lows = 'l{}'.format(k_period) dataset[lows] = dataset['low'].rolling(window=k_period).min() return dataset @staticmethod def calc_roll_max(dataset, k_period): highs = 'h{}'.format(k_period) dataset[highs] = dataset['high'].rolling(window=k_period).max() return dataset @staticmethod def stok(dataset, k_period): lows = 'l{}'.format(k_period) highs = 'h{}'.format(k_period) dataset['%k'] = ((dataset['close'] - dataset[lows]) / ( dataset[highs] - dataset[lows])) * 100 return dataset @staticmethod def smooth_stok(dataset, smooth): dataset['%k'] = dataset['%k'].rolling(window=smooth).mean() return dataset @staticmethod def stod(dataset, d_period): dataset['%d'] = dataset['%k'].rolling(window=d_period).mean() return dataset # RSI - Relative Strength Index def rsi_indicator(self, period): rsi = 'rsi{}'.format(period) # Calculate differences between prices deltas = np.diff(self.df['close']) # For every row calculate rsi for i, row in self.df.iterrows(): if i < period: self.df.loc[i, rsi] = 0 else: self.df.loc[i, rsi] = self.calc_rsi(i, period, deltas) return self @staticmethod def calc_rsi(index, period, deltas): seed = deltas[index - period:index] average_gain = seed[seed >= 0].sum() / period average_loss = seed[seed < 0].sum() / period if abs(average_loss) == 0: rs = 0 else: rs = average_gain / abs(average_loss) rsi = 100. - (100. / (1 + rs)) return rsi
31.093023
71
0.5819
2,642
0.988033
0
0
1,303
0.487285
0
0
266
0.099476
be4201706e45a3d4dd6cd9622ea3645d54ac325f
440
py
Python
users/models.py
makutas/CocktailWebsite
c5192e5fc2b750a32500f5c3421ed07e89c9c7e1
[ "MIT" ]
null
null
null
users/models.py
makutas/CocktailWebsite
c5192e5fc2b750a32500f5c3421ed07e89c9c7e1
[ "MIT" ]
null
null
null
users/models.py
makutas/CocktailWebsite
c5192e5fc2b750a32500f5c3421ed07e89c9c7e1
[ "MIT" ]
null
null
null
from django.db import models from django.contrib.auth.models import User class UserProfile(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE) user_description = models.CharField(max_length=200, null=True) user_avatar = models.ImageField(null=True, blank=True) user_uploaded_recipes = models.IntegerField() # Increment by 1 on upload def __str__(self): return f"{self.user.username}"
31.428571
77
0.747727
364
0.827273
0
0
0
0
0
0
49
0.111364
be43dfd884e7a14b827d8c59b29470159f680616
5,332
py
Python
deploy/trained_model.py
Samyak005/Multi-Hop-QG
15cc794a48ac9df058689c410007ea52b0e12a6a
[ "MIT" ]
null
null
null
deploy/trained_model.py
Samyak005/Multi-Hop-QG
15cc794a48ac9df058689c410007ea52b0e12a6a
[ "MIT" ]
null
null
null
deploy/trained_model.py
Samyak005/Multi-Hop-QG
15cc794a48ac9df058689c410007ea52b0e12a6a
[ "MIT" ]
null
null
null
import torch import logging # Transformer version 4.9.1 - Newer versions may not work. from transformers import AutoTokenizer from trained_gpt_model import get_inference2 def t5_supp_inference(review_text): device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # CPU may not work, got to check. # device = torch.device('cpu') print('Using device:' + str(device)) PRETRAINED_MODEL = 't5-base' SEQ_LENGTH = 600 tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL) tokenizer.add_special_tokens( {'additional_special_tokens': ['<answer>', '<context>']} ) model = torch.load("../trained_models/t5_model_hotpot_supporting_facts_last.pth") model.eval() encoded_text = tokenizer( review_text, padding=True, max_length=SEQ_LENGTH, truncation=True, return_tensors="pt" ).to(device) input_ids = encoded_text['input_ids'] with torch.no_grad(): output = model.generate(input_ids) decoded_string = tokenizer.decode(output[0], skip_special_tokens=True) logging.debug("Decoded string" + decoded_string) print(decoded_string) # device.empty_cache() del model del tokenizer return decoded_string def t5_full_inference(review_text): device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # CPU may not work, got to check. # device = torch.device('cpu') print('Using device:' + str(device)) PRETRAINED_MODEL = 't5-base' SEQ_LENGTH = 600 tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL) tokenizer.add_special_tokens( {'additional_special_tokens': ['<answer>', '<context>']} ) model = torch.load("../trained_models/t5_model_hotpot_full_context_last.pth") model.eval() encoded_text = tokenizer( review_text, padding=True, max_length=SEQ_LENGTH, truncation=True, return_tensors="pt" ).to(device) input_ids = encoded_text['input_ids'] with torch.no_grad(): output = model.generate(input_ids) decoded_string = tokenizer.decode(output[0], skip_special_tokens=True) logging.debug("Decoded string" + decoded_string) print(decoded_string) # device.empty_cache() del model del tokenizer return decoded_string def bart_supp_inference(review_text): device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # CPU may not work, got to check. # device = torch.device('cpu') print('Using device:' + str(device)) PRETRAINED_MODEL = 'facebook/bart-base' SEQ_LENGTH = 600 tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL) tokenizer.add_special_tokens( {'additional_special_tokens': ['<answer>', '<context>']} ) model = torch.load("../trained_models/bart_model_hotpot_supporting_facts_last.pth") model.eval() encoded_text = tokenizer( review_text, padding=True, max_length=SEQ_LENGTH, truncation=True, return_tensors="pt" ).to(device) input_ids = encoded_text['input_ids'] with torch.no_grad(): output = model.generate(input_ids) decoded_string = tokenizer.decode(output[0], skip_special_tokens=True) logging.debug("Decoded string" + decoded_string) print(decoded_string) # device.empty_cache() del model del tokenizer return decoded_string def bart_full_inference(review_text): device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # CPU may not work, got to check. # device = torch.device('cpu') print('Using device:' + str(device)) PRETRAINED_MODEL = 'facebook/bart-base' SEQ_LENGTH = 600 tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL) tokenizer.add_special_tokens( {'additional_special_tokens': ['<answer>', '<context>']} ) model = torch.load("../trained_models/bart_model_hotpot_full_context_last.pth") model.eval() encoded_text = tokenizer( review_text, padding=True, max_length=SEQ_LENGTH, truncation=True, return_tensors="pt" ).to(device) input_ids = encoded_text['input_ids'] with torch.no_grad(): output = model.generate(input_ids) decoded_string = tokenizer.decode(output[0], skip_special_tokens=True) logging.debug("Decoded string" + decoded_string) print(decoded_string) # device.empty_cache() del model del tokenizer return decoded_string # if __name__ == "__main__": # review_text = "<answer> a fusional language <context> Typologically, Estonian represents a transitional form from an agglutinating language to a fusional language. The canonical word order is SVO (subject–verb–object)." # t5_supp_inference(review_text, md2, device) def get_inference(answer, context, model_name): valuation_text = "<answer> " + answer + " <context> " + context if model_name == 't5_supp': return t5_supp_inference(valuation_text) elif model_name == 't5_full': return t5_full_inference(valuation_text) elif model_name == 'bart_supp': return bart_supp_inference(valuation_text) elif model_name == 'bart_full': return bart_full_inference(valuation_text) elif model_name == 'gpt2': return get_inference2(answer, context)
33.534591
225
0.69036
0
0
0
0
0
0
0
0
1,492
0.27961
be44513cd298d38b88ee6e7730ed73cc8a97d105
5,979
py
Python
parlai/agents/drqa/config.py
shagunsodhani/ParlAI
5b634b844807372adfb0f6d6e5c42341ac8138f0
[ "BSD-3-Clause" ]
1
2017-06-26T07:46:33.000Z
2017-06-26T07:46:33.000Z
parlai/agents/drqa/config.py
shagunsodhani/ParlAI
5b634b844807372adfb0f6d6e5c42341ac8138f0
[ "BSD-3-Clause" ]
null
null
null
parlai/agents/drqa/config.py
shagunsodhani/ParlAI
5b634b844807372adfb0f6d6e5c42341ac8138f0
[ "BSD-3-Clause" ]
null
null
null
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. An additional grant # of patent rights can be found in the PATENTS file in the same directory. import os import sys import logging def str2bool(v): return v.lower() in ('yes', 'true', 't', '1', 'y') def add_cmdline_args(parser): # Runtime environment agent = parser.add_argument_group('DrQA Arguments') agent.add_argument('--no_cuda', type='bool', default=False) agent.add_argument('--gpu', type=int, default=-1) agent.add_argument('--random_seed', type=int, default=1013) # Basics agent.add_argument('--embedding_file', type=str, default=None, help='File of space separated embeddings: w e1 ... ed') agent.add_argument('--pretrained_model', type=str, default=None, help='Load dict/features/weights/opts from this file') agent.add_argument('--log_file', type=str, default=None) # Model details agent.add_argument('--fix_embeddings', type='bool', default=True) agent.add_argument('--tune_partial', type=int, default=0, help='Train the K most frequent word embeddings') agent.add_argument('--embedding_dim', type=int, default=300, help=('Default embedding size if ' 'embedding_file is not given')) agent.add_argument('--hidden_size', type=int, default=128, help='Hidden size of RNN units') agent.add_argument('--doc_layers', type=int, default=3, help='Number of RNN layers for passage') agent.add_argument('--question_layers', type=int, default=3, help='Number of RNN layers for question') agent.add_argument('--rnn_type', type=str, default='lstm', help='RNN type: lstm (default), gru, or rnn') # Optimization details agent.add_argument('--valid_metric', type=str, choices=['accuracy', 'f1'], default='f1', help='Metric for choosing best valid model') agent.add_argument('--max_len', type=int, default=15, help='The max span allowed during decoding') agent.add_argument('--rnn_padding', type='bool', default=False) agent.add_argument('--display_iter', type=int, default=10, help='Print train error after every \ <display_iter> epoches (default 10)') agent.add_argument('--dropout_emb', type=float, default=0.4, help='Dropout rate for word embeddings') agent.add_argument('--dropout_rnn', type=float, default=0.4, help='Dropout rate for RNN states') agent.add_argument('--dropout_rnn_output', type='bool', default=True, help='Whether to dropout the RNN output') agent.add_argument('--optimizer', type=str, default='adamax', help='Optimizer: sgd or adamax (default)') agent.add_argument('--learning_rate', '-lr', type=float, default=0.1, help='Learning rate for SGD (default 0.1)') agent.add_argument('--grad_clipping', type=float, default=10, help='Gradient clipping (default 10.0)') agent.add_argument('--weight_decay', type=float, default=0, help='Weight decay (default 0)') agent.add_argument('--momentum', type=float, default=0, help='Momentum (default 0)') # Model-specific agent.add_argument('--concat_rnn_layers', type='bool', default=True) agent.add_argument('--question_merge', type=str, default='self_attn', help='The way of computing question representation') agent.add_argument('--use_qemb', type='bool', default=True, help='Whether to use weighted question embeddings') agent.add_argument('--use_in_question', type='bool', default=True, help='Whether to use in_question features') agent.add_argument('--use_tf', type='bool', default=True, help='Whether to use tf features') agent.add_argument('--use_time', type=int, default=0, help='Time features marking how recent word was said') def set_defaults(opt): # Embeddings options if opt.get('embedding_file'): if not os.path.isfile(opt['embedding_file']): raise IOError('No such file: %s' % args.embedding_file) with open(opt['embedding_file']) as f: dim = len(f.readline().strip().split(' ')) - 1 opt['embedding_dim'] = dim elif not opt.get('embedding_dim'): raise RuntimeError(('Either embedding_file or embedding_dim ' 'needs to be specified.')) # Make sure tune_partial and fix_embeddings are consistent if opt['tune_partial'] > 0 and opt['fix_embeddings']: print('Setting fix_embeddings to False as tune_partial > 0.') opt['fix_embeddings'] = False # Make sure fix_embeddings and embedding_file are consistent if opt['fix_embeddings']: if not opt.get('embedding_file') and not opt.get('pretrained_model'): print('Setting fix_embeddings to False as embeddings are random.') opt['fix_embeddings'] = False def override_args(opt, override_opt): # Major model args are reset to the values in override_opt. # Non-architecture args (like dropout) are kept. args = set(['embedding_file', 'embedding_dim', 'hidden_size', 'doc_layers', 'question_layers', 'rnn_type', 'optimizer', 'concat_rnn_layers', 'question_merge', 'use_qemb', 'use_in_question', 'use_tf', 'vocab_size', 'num_features', 'use_time']) for k, v in override_opt.items(): if k in args: opt[k] = v
51.102564
80
0.616993
0
0
0
0
0
0
0
0
2,770
0.463288
be44bd30d7e94517cda605e3c7b74f2c0cefb67c
4,919
py
Python
gen4service/gen4bean.py
yongli82/CodeGenerator
4ca9255c3c4c5392e45815fd20f605ccbbfd2325
[ "MIT" ]
null
null
null
gen4service/gen4bean.py
yongli82/CodeGenerator
4ca9255c3c4c5392e45815fd20f605ccbbfd2325
[ "MIT" ]
null
null
null
gen4service/gen4bean.py
yongli82/CodeGenerator
4ca9255c3c4c5392e45815fd20f605ccbbfd2325
[ "MIT" ]
null
null
null
#!/usr/bin/python # -*- coding: utf-8 -*- import os import sys reload(sys) sys.path.append("..") sys.setdefaultencoding('utf-8') from jinja2 import Environment from jinja2 import Template import re from sqlalchemy import schema, types from sqlalchemy.engine import create_engine import yyutil import CodeGen project_name = "budget" data_name = "BudgetReport" table_name = "FC_BudgetBaseInfo" searchBeanPackage="com.dianping.ba.finance.budget.api.beans" searchBeanName="BudgetReportSearchBean" searchBeanField=""" private int budgetTypeId; private int costDepartmentId; private String budgetOwnerNo; private boolean exceedBudget; private boolean withExpenseType; private int beginYear; private int beginMonth; private int endYear; private int endMonth; """ dataBeanPackage="com.dianping.ba.finance.budget.api.beans" dataBeanName="BudgetYearReportDisplayBean" dataBeanField=""" private int budgetYear; private int budgetTypeId; private String budgetTypeNo; private String budgetTypeName; private int costDepartmentId; private String costDepartmentName; private String budgetOwnerNo; private String budgetOwnerName; private int budgetStatus; private String budgetStatusName; private int budgetPlanId; private String budgetPlanNo; private int strategyId; private int strategyPeriodType; private String strategyPeriodTypeName; private BigDecimal yearTotalAmount; private BigDecimal yearAvailableAmount; private BigDecimal yearUsedAmount; private BigDecimal yearFrozenAmount; private BigDecimal quarterTotalAmount1; private BigDecimal quarterAvailableAmount1; private BigDecimal quarterUsedAmount1; private BigDecimal quarterFrozenAmount1; private BigDecimal quarterTotalAmount2; private BigDecimal quarterAvailableAmount2; private BigDecimal quarterUsedAmount2; private BigDecimal quarterFrozenAmount2; private BigDecimal quarterTotalAmount3; private BigDecimal quarterAvailableAmount3; private BigDecimal quarterUsedAmount3; private BigDecimal quarterFrozenAmount3; private BigDecimal quarterTotalAmount4; private BigDecimal quarterAvailableAmount4; private BigDecimal quarterUsedAmount4; private BigDecimal quarterFrozenAmount4; private BigDecimal monthTotalAmount1; private BigDecimal monthAvailableAmount1; private BigDecimal monthUsedAmount1; private BigDecimal monthFrozenAmount1; private BigDecimal monthTotalAmount2; private BigDecimal monthAvailableAmount2; private BigDecimal monthUsedAmount2; private BigDecimal monthFrozenAmount2; private BigDecimal monthTotalAmount3; private BigDecimal monthAvailableAmount3; private BigDecimal monthUsedAmount3; private BigDecimal monthFrozenAmount3; private BigDecimal monthTotalAmount4; private BigDecimal monthAvailableAmount4; private BigDecimal monthUsedAmount4; private BigDecimal monthFrozenAmount4; private BigDecimal monthTotalAmount5; private BigDecimal monthAvailableAmount5; private BigDecimal monthUsedAmount5; private BigDecimal monthFrozenAmount5; private BigDecimal monthTotalAmount6; private BigDecimal monthAvailableAmount6; private BigDecimal monthUsedAmount6; private BigDecimal monthFrozenAmount6; private BigDecimal monthTotalAmount7; private BigDecimal monthAvailableAmount7; private BigDecimal monthUsedAmount7; private BigDecimal monthFrozenAmount7; private BigDecimal monthTotalAmount8; private BigDecimal monthAvailableAmount8; private BigDecimal monthUsedAmount8; private BigDecimal monthFrozenAmount8; private BigDecimal monthTotalAmount9; private BigDecimal monthAvailableAmount9; private BigDecimal monthUsedAmount9; private BigDecimal monthFrozenAmount9; private BigDecimal monthTotalAmount10; private BigDecimal monthAvailableAmount10; private BigDecimal monthUsedAmount10; private BigDecimal monthFrozenAmount10; private BigDecimal monthTotalAmount11; private BigDecimal monthAvailableAmount11; private BigDecimal monthUsedAmount11; private BigDecimal monthFrozenAmount11; private BigDecimal monthTotalAmount12; private BigDecimal monthAvailableAmount12; private BigDecimal monthUsedAmount12; private BigDecimal monthFrozenAmount12; """ columns = yyutil.convert_bean_to_columns(dataBeanField) search_columns = yyutil.convert_bean_to_columns(searchBeanField) jinja2_env = CodeGen.getEnvironment("gen4service") template = jinja2_env.get_template("bean_code_template.md") #snippet = template.render(table_name=table_name, data_name=data_name, columns=columns) snippet = template.render(locals()) print snippet with open(data_name + "_generate.md", 'wb') as f: f.write(snippet) f.flush() f.close() os.system("open " + data_name + "_generate.md")
32.793333
87
0.795893
0
0
0
0
0
0
0
0
4,149
0.843464
be451445b545eb79b0e3f43bb3bb14e581f5720c
2,333
py
Python
Log_tao.py
zigzax/Basic_Python
d9d3256f2ac627e6e98991f73ab67ef8fcc4172d
[ "MIT" ]
null
null
null
Log_tao.py
zigzax/Basic_Python
d9d3256f2ac627e6e98991f73ab67ef8fcc4172d
[ "MIT" ]
null
null
null
Log_tao.py
zigzax/Basic_Python
d9d3256f2ac627e6e98991f73ab67ef8fcc4172d
[ "MIT" ]
null
null
null
Python 3.9.0 (tags/v3.9.0:9cf6752, Oct 5 2020, 15:34:40) [MSC v.1927 64 bit (AMD64)] on win32 Type "help", "copyright", "credits" or "license()" for more information. >>> import turtle >>> tao = turtle.Turtle() >>> tao.shape('turtle') >>> tao.forward(100) >>> tao.left(90) >>> tao.forward(100) >>> tao.left(90) >>> tao.forward(100) >>> tao.left(90) >>> tao.forward(100) >>> tao.left(90) >>> tao.reset <bound method RawTurtle.reset of <turtle.Turtle object at 0x000001F98553ECA0>> >>> tao.reset() >>> for i in range(4) SyntaxError: invalid syntax >>> for i in range(4): tao.forward(100)tao.left(90) SyntaxError: invalid syntax >>> for i in range(4): tao.forward(100) tao.left(90) >>> range (4) range(0, 4) >>> list (range(4)) [0, 1, 2, 3] >>> for i in range(5) SyntaxError: invalid syntax >>> for i in range(5): print(i) 0 1 2 3 4 \ >>> for i in range(5): print(i) 0 1 2 3 4 >>> for i in range[10,50,90]: print(i) Traceback (most recent call last): File "<pyshell#28>", line 1, in <module> for i in range[10,50,90]: TypeError: 'type' object is not subscriptable >>> for i in[10,50,90]: print(i) 10 50 90 >>> range (1,10) range(1, 10) >>> list (range(1,10)) [1, 2, 3, 4, 5, 6, 7, 8, 9] >>> tao.reset() >>> for i in range (4): tao.forward(100) tao.left(90) print('No.',i) No. 0 No. 1 No. 2 No. 3 >>> tao.reset <bound method RawTurtle.reset of <turtle.Turtle object at 0x000001F98553ECA0>> >>> tao.reset() >>> tao.forward(100) >>> tao.left(45) >>> tao.forward(100) >>> tao.left(45) >>> tao.forward(100) >>> tao.left(45) >>> tao.forward(100) >>> tao.left(45) >>> tao.forward(100) >>> tao.left(45) >>> tao.forward(100) >>> tao.left(45) >>> tao.forward(100) >>> tao.left(45) >>> tao.forward(100) >>> for i in range (8): tao.forward(100) tao.left(45) print('No.',i) No. 0 No. 1 No. 2 No. 3 No. 4 No. 5 No. 6 No. 7 >>> tao.reset() >>> for i in range (8): tao.forward(100) tao.left(45) print('No.',i) No. 0 No. 1 No. 2 No. 3 No. 4 No. 5 No. 6 No. 7 >>> tao.reset() >>> def regtangle(): for i in range(4): tao.forward(100) tao.left(90) >>> regtangle() >>> tao.reset() >>> for i in range(10): regtangle() tao.left(36) >>> tao.reset() >>>
16.089655
95
0.562366
0
0
0
0
0
0
0
0
80
0.034291
be451a5cb8b5c7262021b6003b4a6ffdd2ef5a5f
424
py
Python
run.py
pome-ta/CodeMirror
ef39c3032ea128d988c263ed97851860db9f977c
[ "MIT" ]
null
null
null
run.py
pome-ta/CodeMirror
ef39c3032ea128d988c263ed97851860db9f977c
[ "MIT" ]
null
null
null
run.py
pome-ta/CodeMirror
ef39c3032ea128d988c263ed97851860db9f977c
[ "MIT" ]
null
null
null
""" Pythonista3 app CodeMirror """ import pythonista.wkwebview as wkwebview import ui import pathlib uri = pathlib.Path('./main_index.html') class View(ui.View): def __init__(self): self.wv = wkwebview.WKWebView(flex='WH') self.wv.load_url(str(uri)) self.add_subview(self.wv) def will_close(self): self.wv.clear_cache() _view = View() _view.present(style='fullscreen', orientations=['portrait'])
16.96
60
0.707547
199
0.46934
0
0
0
0
0
0
79
0.186321
be466292d2d3ccf1cddc1f8ecf7d02c60e49df95
1,363
py
Python
gen_cnn_dataset.py
NPCai/graphene-py
50163eb65f55c25a3d090bad03e34304b1cb3037
[ "MIT" ]
5
2018-09-10T15:33:51.000Z
2020-07-28T05:46:59.000Z
gen_cnn_dataset.py
NPCai/graphene-py
50163eb65f55c25a3d090bad03e34304b1cb3037
[ "MIT" ]
null
null
null
gen_cnn_dataset.py
NPCai/graphene-py
50163eb65f55c25a3d090bad03e34304b1cb3037
[ "MIT" ]
null
null
null
import wrapper as w from multiprocessing import Process import atexit import time from queue import Queue ''' 8 Processes, 24 threads per process = 192 threads ''' NUM_PROCESSES = 8 workerList = [] # Worker processes class Worker(Process): # Need multiple threads or else it takes forever def __init__(self, queue): # filNum is the id of the file to extract from super().__init__() self.queue = queue self.outQueue = Queue() def run(self): with concurrent.futures.ThreadPoolExecutor(max_workers=24) as executor: executor.submit(loadUrl()) def loadUrl(): while not self.queue.empty(): sentence = self.queue.get() ex = w.GrapheneExtract(sentence) self.outQueue.put(sentence.strip() + "\t" + str(ex.json) + "\n") queues = [] # Use seperate queues to avoid waiting for locks with open("data/all_news.txt", "r") as news: for line in news[::len(news) / NUM_PROCESSES]: queue = Queue() queue.put(line.strip()) print("Queue populated") for i in range(NUM_PROCESSES): worker = Worker(queues[i]) worker.daemon = True worker.start() workerList.append(worker) def close_running_threads(): for thread in workerList: thread.join() atexit.register(close_running_threads) print("All threads registered and working.") while True: print(queue.qsize() " sentences remaining to be requested") time.sleep(2) # Print every two seconds
26.72549
74
0.726339
519
0.380778
0
0
0
0
0
0
364
0.267058
be47030ab919977e3706aa43ef448dd537100bbd
2,702
py
Python
torch/_prims/context.py
EikanWang/pytorch
823ddb6e87e8111c9b5a99523503172e5bf62c49
[ "Intel" ]
null
null
null
torch/_prims/context.py
EikanWang/pytorch
823ddb6e87e8111c9b5a99523503172e5bf62c49
[ "Intel" ]
1
2022-01-10T18:39:28.000Z
2022-01-10T19:15:57.000Z
torch/_prims/context.py
HaoZeke/pytorch
4075972c2675ef34fd85efd60c9bad75ad06d386
[ "Intel" ]
null
null
null
from typing import Callable, Sequence, Any, Dict import functools import torch import torch.overrides from torch._prims.utils import torch_function_passthrough import torch._refs as refs import torch._refs import torch._refs.nn import torch._refs.nn.functional import torch._refs.special import torch._prims # TODO: automap torch operations to references # (need to throw a good assertion if the mapping doesn't exist) _torch_to_reference_map = { torch.add: refs.add, # torch.div: refs.div, torch.mul: refs.mul, torch.ge: refs.ge, torch.gt: refs.gt, torch.le: refs.le, torch.lt: refs.lt, } @functools.lru_cache(None) def torch_to_refs_map(): """ Mapping of torch API functions to torch._refs functions. E.g. torch_to_refs_map()[torch.add] == torch._refs.add """ modules = [ (torch, torch._refs), (torch.nn, torch._refs.nn), (torch.nn.functional, torch._refs.nn.functional), (torch.special, torch._refs.special), ] r = {} for mod_torch, mod_refs in modules: for s in mod_refs.__all__: # type: ignore[attr-defined] r[mod_torch.__dict__.get(s)] = mod_refs.__dict__.get(s) return r @functools.lru_cache(None) def all_prims(): """ Set of all prim functions, e.g., torch._prims.add in all_prims() """ return {torch._prims.__dict__.get(s) for s in torch._prims.__all__} class TorchRefsMode(torch.overrides.TorchFunctionMode): """ Switches the interpretation of torch.* functions and Tensor methods to use PrimTorch refs in torch._refs. (Direct calls to _refs are unaffected.) >>> with TorchRefsMode.push(): ... torch.add(x, y) # calls torch._refs.add(x, y) By default, this context manager will fall back on the torch.* if the ref does not exist; set strict=True to error if this occurs. """ def __init__(self, strict=False): self.strict = strict def __torch_function__( self, orig_func: Callable, types: Sequence, args: Sequence[Any] = (), kwargs: Dict = None, ): if kwargs is None: kwargs = {} # For primitive operations, run them as is without interception if orig_func in torch_function_passthrough or orig_func in all_prims(): return orig_func(*args, **kwargs) mapping = torch_to_refs_map() func = mapping.get(orig_func, None) if func is not None: return func(*args, **kwargs) if self.strict: raise RuntimeError( f"no _refs support for {torch.overrides.resolve_name(orig_func)}" ) return orig_func(*args, **kwargs)
28.442105
81
0.650259
1,291
0.477794
0
0
776
0.287195
0
0
900
0.333087
be47dbc95464f47bb2c554b62349cf2699343260
1,868
py
Python
search/tests/test_read_similarities.py
cotsog/pathways-backend
9231731359fc97833dbdbca33ac23eebeac4f715
[ "BSD-3-Clause" ]
null
null
null
search/tests/test_read_similarities.py
cotsog/pathways-backend
9231731359fc97833dbdbca33ac23eebeac4f715
[ "BSD-3-Clause" ]
null
null
null
search/tests/test_read_similarities.py
cotsog/pathways-backend
9231731359fc97833dbdbca33ac23eebeac4f715
[ "BSD-3-Clause" ]
null
null
null
from django.test import TestCase from search.read_similarities import build_manual_similarity_map from common.testhelpers.random_test_values import a_string, a_float class TestReadingManualTaskSimilarities(TestCase): def test_convert_matrix_to_map_from_topic_to_array_of_services(self): data = [ ['topic1', 'topic2'], ['service1', 'service2'], ] expected_result = { 'topic1': ['service1'], 'topic2': ['service2'], } result = build_manual_similarity_map(data) self.assertEqual(result, expected_result) def test_can_handle_multiple_services_for_a_topic(self): data = [ ['topic1', ], ['service1'], ['service2'], ['service3'], ] expected_result = { 'topic1': ['service1', 'service2', 'service3'], } result = build_manual_similarity_map(data) self.assertEqual(result, expected_result) def test_can_handle_different_numbers_of_services_for_different_topics(self): data = [ ['topic1', 'topic2'], ['service1', 'service2'], ['service3'], ] expected_result = { 'topic1': ['service1', 'service3'], 'topic2': ['service2'], } result = build_manual_similarity_map(data) self.assertEqual(result, expected_result) def test_can_handle_empty_entries(self): data = [ ['topic1', 'topic2'], ['service1', 'service2'], ['', 'service3'], [None, 'service4'], ] expected_result = { 'topic1': ['service1'], 'topic2': ['service2', 'service3', 'service4'], } result = build_manual_similarity_map(data) self.assertEqual(result, expected_result)
32.206897
81
0.571734
1,699
0.909529
0
0
0
0
0
0
354
0.189507
be47eadfdaf03e7261eb7070f1efcdf27e299506
7,535
py
Python
fortuna/fortuna.py
Zabamund/HackCPH18
3855547824c6277ca6f4e7b97c3ad0b3829e266b
[ "MIT" ]
3
2018-06-09T08:03:31.000Z
2018-11-23T20:18:06.000Z
fortuna/fortuna.py
Zabamund/HackCPH18
3855547824c6277ca6f4e7b97c3ad0b3829e266b
[ "MIT" ]
1
2020-03-30T20:23:17.000Z
2020-03-30T20:23:17.000Z
fortuna/fortuna.py
Zabamund/HackCPH18
3855547824c6277ca6f4e7b97c3ad0b3829e266b
[ "MIT" ]
2
2018-06-09T06:45:53.000Z
2018-06-09T15:36:36.000Z
""" Fortuna Python project to visualize uncertatinty in probabilistic exploration models. Created on 09/06/2018 @authors: Natalia Shchukina, Graham Brew, Marco van Veen, Behrooz Bashokooh, Tobias Stål, Robert Leckenby """ # Import libraries import numpy as np import glob from matplotlib import pyplot as plt import pandas as pd import xarray as xr import pyproj as proj from scipy.stats import norm class Fortuna(object): """ Class to load the fortuna dataset and call different methods for visualization in a web frontend. Args: There are no required arguments at the moment. Input files could be defined. """ def __init__(self, **kwargs): """ Method that is called when a object of the class Fortuna is initiated, it imports the data and directly creates some important variables. """ # hardcode geometry self.size_raster = (250,162) self.X_corner = 390885 self.Y_corner = 7156947 self.dx, self.dy, self.dz = 25, 25, 100 self.top_model = 950 self.bottom_model = 1050 self.base_cube = None self.top_cube = None self.base_n = None self.top_n = None self.vol = None # Create empty xarray dataset self.ds = xr.Dataset() self.xx = None self.yy = None self.zz = None self.model = None self.base_mean = None self.base_std = None self.top_mean = None self.top_std = None ## Initial methods to load self.import_data() self.calc_xarray() self.calc_stat() ### Methods for initiating the object def folder2cube(self, files): """ Method to read a file. """ base_set = glob.glob(files) cube = np.zeros(self.size_raster + (len(base_set),)) for i, model in enumerate(base_set): cube[:, :, i] = np.loadtxt(model, skiprows=1).reshape(self.size_raster) return cube, len(base_set) def import_data(self): """ Method to load different data objects from files. """ self.base_cube, self.base_n = self.folder2cube('data/Hackaton/BaseSet/MapSimu__*.data') self.top_cube, self.top_n = self.folder2cube('data/Hackaton/TopSet/MapSimu__*.data') self.vol = pd.read_csv('data/Hackaton/VolumeDistribution/Volumes', delim_whitespace=True) def calc_xarray (self): self.xx = np.linspace(self.X_corner, self.X_corner + self.size_raster[0] * self.dx, self.size_raster[0]) self.yy = np.linspace(self.Y_corner, self.Y_corner + self.size_raster[1] * self.dy, self.size_raster[1]) self.zz = np.linspace(self.top_model, self.bottom_model, self.dz) self.model = np.linspace(0, self.top_model, self.base_n) self.ds.coords['X'] = self.xx self.ds.coords['Y'] = self.yy self.ds.coords['Z'] = self.zz self.ds.coords['MODEL'] = self.model self.ds['BASE'] = (('X', 'Y', 'MODEL'), self.base_cube) self.ds['TOP'] = (('X', 'Y', 'MODEL'), self.top_cube) def calc_stat (self): self.base_mean = self.ds['BASE'].mean(dim='MODEL') self.base_std = self.ds['BASE'].std(dim='MODEL') self.top_mean = self.ds['TOP'].mean(dim='MODEL') self.top_std = self.ds['TOP'].std(dim='MODEL') ## Data Management methods def load_pickle(self, path): return np.load(path) ## Methods to compute different uncertatinty cubes --> cubes to be displayed in the frontend def calc_lithology(self, iterations = 2): """ Sample from both distributions and fill each z-stack accordingly """ # create empty array block = np.zeros((iterations, self.size_raster[0], self.size_raster[1], self.zz.size), dtype='int8') for i in range(iterations): for j in range(self.size_raster[0]): # size_raster[0] for k in range(self.size_raster[1]): # sample from top and base distributions for specific x,y position top = np.random.normal(self.top_mean[j, k], self.top_std[j, k]) base = np.random.normal(self.base_mean[j, k], self.base_std[j, k]) # iterate over vertical z-stack for l in range(self.zz.size): if self.zz[l] <= top: block[i, j, k, l] = 1 elif self.zz[l] > base: block[i, j, k, l] = 3 elif ((self.zz[l] > top) and (l <= base)): block[i, j, k, l] = 2 return block def calc_lithology_vect(self, iterations=2): """ Resample from z value statistics and fill each z-stack in a lithology block accordingly. This is the new method with vectorized operations to speed up calculations. """ # create empty array block = np.zeros((iterations, self.xx.size, self.yy.size, self.zz.size), dtype='int8') for i in range(iterations): # create meshgrids grid for coordinate-wise iterations mesh_x, mesh_y, mesh_z = np.meshgrid(np.arange(self.xx.size), np.arange(self.yy.size), np.arange(self.zz.size)) # sample from top and base distributions for specific x,y position top = np.zeros([self.xx.size, self.yy.size]) base = np.zeros([self.xx.size, self.yy.size]) top[mesh_x, mesh_y] = np.random.normal(self.top_mean.values[mesh_x, mesh_y], self.top_std.values[mesh_x, mesh_y]) base[mesh_x, mesh_y] = np.random.normal(self.top_mean.values[mesh_x, mesh_y], self.top_std.values[mesh_x, mesh_y]) # compare each cell to resampled reference values # TODO generalize for any number of lithologies block[i, mesh_x, mesh_y, mesh_z] = np.where(self.zz < top[mesh_x, mesh_y], 1, np.where(self.zz < base[mesh_x, mesh_y], 2, 3)) return block ### Modifyed from GemPy! def calc_probability_lithology(self, cube): """Blocks must be just the lith blocks!""" lith_blocks = cube.reshape([cube.shape[0], (self.xx.size * self.yy.size * self.zz.size)]) lith_id = np.unique(lith_blocks) # lith_count = np.zeros_like(lith_blocks[0:len(lith_id)]) lith_count = np.zeros((len(np.unique(lith_blocks)), lith_blocks.shape[1])) for i, l_id in enumerate(lith_id): lith_count[i] = np.sum(lith_blocks == l_id, axis=0) lith_prob = lith_count / len(lith_blocks) return lith_prob ### Modyfied from GemPy! def calc_information_entropy(self, lith_prob): """Calculates information entropy for the given probability array.""" cube = np.zeros_like(lith_prob[0]) for l in lith_prob: pm = np.ma.masked_equal(l, 0) # mask where layer prob is 0 cube -= (pm * np.ma.log2(pm)).filled(0) return cube.reshape([self.xx.size, self.yy.size, self.zz.size]) # Try numpy.flatten and numpy.ravel ## Simple plotting methods def plot_entropy(self, cube, slice=10): plt.imshow(cube[slice, :, :].T, origin='upperleft', cmap='viridis') plt.show()
34.56422
145
0.584871
7,127
0.945727
0
0
0
0
0
0
2,173
0.288349
be47eb6ac22a5716a180d2587e75ad448943ea4f
1,104
py
Python
resize.py
Linx3/6.867-Final-Project
374d7093159be0bc524b291bacad52741f6bdc95
[ "MIT" ]
3
2019-12-27T12:18:29.000Z
2020-02-10T22:40:36.000Z
resize.py
Linx3/6.867-Final-Project
374d7093159be0bc524b291bacad52741f6bdc95
[ "MIT" ]
null
null
null
resize.py
Linx3/6.867-Final-Project
374d7093159be0bc524b291bacad52741f6bdc95
[ "MIT" ]
2
2019-12-29T02:11:29.000Z
2020-02-10T19:49:41.000Z
from PIL import Image # open an image file (.bmp,.jpg,.png,.gif) you have in the working folder # //imageFile = "03802.png" import os arr=os.listdir() for imageFile in arr: if "png" in imageFile: im1 = Image.open(imageFile) # adjust width and height to your needs width = 416 height = 416 # use one of these filter options to resize the image im2 = im1.resize((width, height), Image.NEAREST) # use nearest neighbour # im3 = im1.resize((width, height), Image.BILINEAR) # linear interpolation in a 2x2 environment # im4 = im1.resize((width, height), Image.BICUBIC) # cubic spline interpolation in a 4x4 environment # im5 = im1.resize((width, height), Image.ANTIALIAS) # best down-sizing filter ext = ".png" # print(imageFile.split(".")[0]) num=imageFile.split(".")[0] print(num) print(type(num)) im2.save(imageFile) # im2.save(imageFile+ ext) # im3.save("BILINEAR" + ext) # im4.save("BICUBIC" + ext) # im5.save("ANTIALIAS" + ext)
36.8
113
0.600543
0
0
0
0
0
0
0
0
656
0.594203
be483eb33f37e53a2e55abe5acc6cd622141fb6c
200
py
Python
src/game/exceptions.py
UnBParadigmas/2020.1_G2_SMA_DarwInPython
34cdc979a95f827f230bd4f13442f6c67d81ba2b
[ "MIT" ]
null
null
null
src/game/exceptions.py
UnBParadigmas/2020.1_G2_SMA_DarwInPython
34cdc979a95f827f230bd4f13442f6c67d81ba2b
[ "MIT" ]
1
2020-11-20T10:32:49.000Z
2020-11-20T10:32:49.000Z
src/game/exceptions.py
UnBParadigmas/2020.1_G2_SMA_DarwInPython
34cdc979a95f827f230bd4f13442f6c67d81ba2b
[ "MIT" ]
null
null
null
class InvalidMovementException(Exception): pass class InvalidMovementTargetException(InvalidMovementException): pass class InvalidMovimentOriginException(InvalidMovementException): pass
22.222222
63
0.84
195
0.975
0
0
0
0
0
0
0
0
be4ff442cd8f9b517de533a73d5af1571d1d4790
2,517
py
Python
src/pipeline/sentence-retrieval/run.py
simonepri/fever-transformers
3e9c57b0b4e781f318438d48589a56db709124c4
[ "MIT" ]
8
2020-05-03T08:40:24.000Z
2022-01-25T18:47:44.000Z
src/pipeline/sentence-retrieval/run.py
simonepri/fever-transformers
3e9c57b0b4e781f318438d48589a56db709124c4
[ "MIT" ]
null
null
null
src/pipeline/sentence-retrieval/run.py
simonepri/fever-transformers
3e9c57b0b4e781f318438d48589a56db709124c4
[ "MIT" ]
3
2020-05-02T20:21:45.000Z
2022-01-25T18:48:28.000Z
#!/usr/bin/env python3 import argparse import bisect import csv import json import os from collections import defaultdict from functools import reduce from tqdm import tqdm def get_best_evidence(scores_file, max_sentences_per_claim): weighted_claim_evidence = defaultdict(lambda: []) with open(scores_file, "r") as f: nlines = reduce(lambda a, b: a + b, map(lambda x: 1, f.readlines()), 0) f.seek(0) lines = csv.reader(f, delimiter="\t") for line in tqdm(lines, desc="Score", total=nlines): claim_id, claim, page, sent_id, sent, score = line claim_id, sent_id, score = int(claim_id), int(sent_id), float(score) evid = (page, sent_id, sent) bisect.insort(weighted_claim_evidence[claim_id], (-score, evid)) if len(weighted_claim_evidence[claim_id]) > max_sentences_per_claim: weighted_claim_evidence[claim_id].pop() for claim_id in weighted_claim_evidence: for i, (score, evid) in enumerate(weighted_claim_evidence[claim_id]): weighted_claim_evidence[claim_id][i] = (-score, evid) return weighted_claim_evidence def main(scores_file, in_file, out_file, max_sentences_per_claim=None): path = os.getcwd() scores_file = os.path.join(path, scores_file) in_file = os.path.join(path, in_file) out_file = os.path.join(path, out_file) best_evidence = get_best_evidence(scores_file, max_sentences_per_claim) with open(out_file, "w+") as fout: with open(in_file, "r") as fin: nlines = reduce(lambda a, b: a + b, map(lambda x: 1, fin.readlines()), 0) fin.seek(0) lines = map(json.loads, fin.readlines()) for line in tqdm(lines, desc="Claim", total=nlines): claim_id = line["id"] line["predicted_sentences"] = best_evidence[claim_id] fout.write(json.dumps(line) + "\n") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--scores-file", type=str) parser.add_argument("--in-file", type=str, help="input dataset") parser.add_argument("--out-file", type=str, help="path to save output dataset") parser.add_argument("--max-sentences-per-claim", type=int, help="number of top sentences to return for each claim") args = parser.parse_args() main(args.scores_file, args.in_file, args.out_file, max_sentences_per_claim=args.max_sentences_per_claim)
40.596774
109
0.65594
0
0
0
0
0
0
0
0
248
0.09853
be514c5db015a36e1e21cf77afc4f28e841509a0
4,455
py
Python
bot/__main__.py
KOTBOTS/Telegram-CloneBot
446d66ba46817f784e8de2b8bd2966865ee1965f
[ "MIT" ]
1
2021-11-10T05:06:00.000Z
2021-11-10T05:06:00.000Z
bot/__main__.py
KOTBOTS/Telegram-CloneBot
446d66ba46817f784e8de2b8bd2966865ee1965f
[ "MIT" ]
null
null
null
bot/__main__.py
KOTBOTS/Telegram-CloneBot
446d66ba46817f784e8de2b8bd2966865ee1965f
[ "MIT" ]
1
2022-01-30T08:50:28.000Z
2022-01-30T08:50:28.000Z
from telegram.ext import CommandHandler, run_async from bot.gDrive import GoogleDriveHelper from bot.fs_utils import get_readable_file_size from bot import LOGGER, dispatcher, updater, bot from bot.config import BOT_TOKEN, OWNER_ID, GDRIVE_FOLDER_ID from bot.decorators import is_authorised, is_owner from telegram.error import TimedOut, BadRequest from bot.clone_status import CloneStatus from bot.msg_utils import deleteMessage, sendMessage import time REPO_LINK = "https://t.me/KOT_BOTS" # Soon to be used for direct updates from within the bot. @run_async def start(update, context): sendMessage("Hello! Please send me a Google Drive Shareable Link to Clone to your Drive!" \ "\nSend /help for checking all available commands.", context.bot, update, 'Markdown') # ;-; @run_async def helper(update, context): sendMessage("Here are the available commands of the bot\n\n" \ "*Usage:* `/clone <link> [DESTINATION_ID]`\n*Example:* \n1. `/clone https://drive.google.com/drive/u/1/folders/0AO-ISIXXXXXXXXXXXX`\n2. `/clone 0AO-ISIXXXXXXXXXXXX`" \ "\n*DESTIONATION_ID* is optional. It can be either link or ID to where you wish to store a particular clone." \ "\n\nYou can also *ignore folders* from clone process by doing the following:\n" \ "`/clone <FOLDER_ID> [DESTINATION] [id1,id2,id3]`\n In this example: id1, id2 and id3 would get ignored from cloning\nDo not use <> or [] in actual message." \ "*Make sure to not put any space between commas (,).*\n" \ f"Source of this bot: [GitHub]({REPO_LINK})", context.bot, update, 'Markdown') # TODO Cancel Clones with /cancel command. @run_async @is_authorised def cloneNode(update, context): args = update.message.text.split(" ") if len(args) > 1: link = args[1] try: ignoreList = args[-1].split(',') except IndexError: ignoreList = [] DESTINATION_ID = GDRIVE_FOLDER_ID try: DESTINATION_ID = args[2] print(DESTINATION_ID) except IndexError: pass # Usage: /clone <FolderToClone> <Destination> <IDtoIgnoreFromClone>,<IDtoIgnoreFromClone> msg = sendMessage(f"<b>Cloning:</b> <code>{link}</code>", context.bot, update) status_class = CloneStatus() gd = GoogleDriveHelper(GFolder_ID=DESTINATION_ID) sendCloneStatus(update, context, status_class, msg, link) result = gd.clone(link, status_class, ignoreList=ignoreList) deleteMessage(context.bot, msg) status_class.set_status(True) sendMessage(result, context.bot, update) else: sendMessage("Please Provide a Google Drive Shared Link to Clone.", bot, update) @run_async def sendCloneStatus(update, context, status, msg, link): old_text = '' while not status.done(): sleeper(3) try: text=f'🔗 *Cloning:* [{status.MainFolderName}]({status.MainFolderLink})\n━━━━━━━━━━━━━━\n🗃️ *Current File:* `{status.get_name()}`\n⬆️ *Transferred*: `{status.get_size()}`\n📁 *Destination:* [{status.DestinationFolderName}]({status.DestinationFolderLink})' if status.checkFileStatus(): text += f"\n🕒 *Checking Existing Files:* `{str(status.checkFileStatus())}`" if not text == old_text: msg.edit_text(text=text, parse_mode="Markdown", timeout=200) old_text = text except Exception as e: LOGGER.error(e) if str(e) == "Message to edit not found": break sleeper(2) continue return def sleeper(value, enabled=True): time.sleep(int(value)) return @run_async @is_owner def sendLogs(update, context): with open('log.txt', 'rb') as f: bot.send_document(document=f, filename=f.name, reply_to_message_id=update.message.message_id, chat_id=update.message.chat_id) def main(): LOGGER.info("Bot Started!") clone_handler = CommandHandler('clone', cloneNode) start_handler = CommandHandler('start', start) help_handler = CommandHandler('help', helper) log_handler = CommandHandler('logs', sendLogs) dispatcher.add_handler(log_handler) dispatcher.add_handler(start_handler) dispatcher.add_handler(clone_handler) dispatcher.add_handler(help_handler) updater.start_polling() main()
40.87156
265
0.655892
0
0
0
0
3,371
0.748945
0
0
1,573
0.349478
be520ba7720ed297f3538b6906896f4c66ca61d8
8,180
py
Python
src/pyfinlab/risk_models.py
AnaSan27/pyfinlab
509cc9544af5e1a5b2b642eca9ae02d383dd743c
[ "BSD-3-Clause" ]
1
2021-10-05T19:34:34.000Z
2021-10-05T19:34:34.000Z
src/pyfinlab/risk_models.py
AnaSan27/pyfinlab
509cc9544af5e1a5b2b642eca9ae02d383dd743c
[ "BSD-3-Clause" ]
null
null
null
src/pyfinlab/risk_models.py
AnaSan27/pyfinlab
509cc9544af5e1a5b2b642eca9ae02d383dd743c
[ "BSD-3-Clause" ]
null
null
null
import pandas as pd import numpy as np from portfoliolab.utils import RiskMetrics from portfoliolab.estimators import RiskEstimators from pypfopt import risk_models as risk_models_ """ Available covariance risk models in PortfolioLab library. https://hudson-and-thames-portfoliolab-pro.readthedocs-hosted.com/en/latest/estimators/risk_estimators.html Available covariance risk models in PyPortfolioOpt library. https://pyportfolioopt.readthedocs.io/en/latest/RiskModels.html# These functions bring together all covariance matrix risk models from PortfolioLab and PyPortfolioOpt into one function for ease of use. """ risk_met = RiskMetrics() risk_estimators = RiskEstimators() risk_models = [ # PyPortfolioOpt 'sample_cov', 'semicovariance', 'exp_cov', 'ledoit_wolf_constant_variance', 'ledoit_wolf_single_factor', 'ledoit_wolf_constant_correlation', 'oracle_approximating', # PortfolioLab 'sample_covariance', 'minimum_covariance_determinant', 'empirical_covariance', 'shrinked_covariance_basic', 'shrinked_covariance_lw', 'shrinked_covariance_oas', 'semi_covariance', 'exponential_covariance', 'constant_residual_eigenvalues_denoised', 'constant_residual_spectral_denoised', 'targeted_shrinkage_denoised', 'targeted_shrinkage_detoned', 'constant_residual_detoned', 'hierarchical_filtered_complete', 'hierarchical_filtered_single', 'hierarchical_filtered_avg' ] def risk_model(prices, model, kde_bwidth=0.01, basic_shrinkage=0.1): """ Calculates the covariance matrix for a dataframe of asset prices. :param prices: (pd.DataFrame) Dataframe where each column is a series of prices for an asset. :param model: (str) Risk model to use. Should be one of: PyPortfolioOpt - 'sample_cov', - 'semicovariance', - 'exp_cov', - 'ledoit_wolf_constant_variance', - 'ledoit_wolf_single_factor' - 'ledoit_wolf_constant_correlation', - 'oracle_approximating' PortfolioLab - 'sample_covariance', - 'minimum_covariance_determinant', - 'empirical_covariance', - 'shrinked_covariance_basic', - 'shrinked_covariance_lw', - 'shrinked_covariance_oas', - 'semi_covariance', - 'exponential_covariance', - 'constant_residual_eigenvalues_denoised', - 'constant_residual_spectral_denoised', - 'targeted_shrinkage_denoised', - 'targeted_shrinkage_detoned', - 'constant_residual_detoned', - 'hierarchical_filtered_complete', - 'hierarchical_filtered_single', - 'hierarchical_filtered_avg' :param kde_bwidth: (float) Optional, bandwidth of the kernel to fit KDE. (0.01 by default) :param basic_shrinkage: (float) Optional, between 0 and 1. Coefficient in the convex combination for basic shrinkage. (0.1 by default) :return: (pd.DataFrame) Estimated covariance matrix. """ tn_relation = prices.shape[0] / prices.shape[1] sample_cov = prices.pct_change().dropna().cov() empirical_cov = pd.DataFrame(risk_estimators.empirical_covariance(prices, price_data=True), index=sample_cov.index, columns=sample_cov.columns) empirical_corr = pd.DataFrame(risk_estimators.cov_to_corr(empirical_cov ** 2), index=sample_cov.index, columns=sample_cov.columns) std = np.diag(empirical_cov) ** (1 / 2) if model == 'sample_covariance': return prices.pct_change().dropna().cov() elif model == 'minimum_covariance_determinant': covariance_matrix = risk_estimators.minimum_covariance_determinant(prices, price_data=True) elif model == 'empirical_covariance': covariance_matrix = risk_estimators.empirical_covariance(prices, price_data=True) elif model == 'shrinked_covariance_basic': covariance_matrix = risk_estimators.shrinked_covariance( prices, price_data=True, shrinkage_type='basic', basic_shrinkage=basic_shrinkage) elif model == 'shrinked_covariance_lw': covariance_matrix = risk_estimators.shrinked_covariance( prices, price_data=True, shrinkage_type='lw', basic_shrinkage=basic_shrinkage) elif model == 'shrinked_covariance_oas': covariance_matrix = risk_estimators.shrinked_covariance( prices, price_data=True, shrinkage_type='oas', basic_shrinkage=basic_shrinkage) elif model == 'semi_covariance': covariance_matrix = risk_estimators.semi_covariance(prices, price_data=True, threshold_return=0) elif model == 'exponential_covariance': covariance_matrix = risk_estimators.exponential_covariance(prices, price_data=True, window_span=60) elif model == 'constant_residual_eigenvalues_denoised': covariance_matrix = risk_estimators.denoise_covariance( empirical_cov, tn_relation, denoise_method='const_resid_eigen', detone=False, kde_bwidth=kde_bwidth) elif model == 'constant_residual_spectral_denoised': covariance_matrix = risk_estimators.denoise_covariance(empirical_cov, tn_relation, denoise_method='spectral') elif model == 'targeted_shrinkage_denoised': covariance_matrix = risk_estimators.denoise_covariance( empirical_cov, tn_relation, denoise_method='target_shrink', detone=False, kde_bwidth=kde_bwidth) elif model == 'targeted_shrinkage_detoned': covariance_matrix = risk_estimators.denoise_covariance( empirical_cov, tn_relation, denoise_method='target_shrink', detone=True, kde_bwidth=kde_bwidth) elif model == 'constant_residual_detoned': covariance_matrix = risk_estimators.denoise_covariance( empirical_cov, tn_relation, denoise_method='const_resid_eigen', detone=True, market_component=1, kde_bwidth=kde_bwidth) elif model == 'hierarchical_filtered_complete': covariance_matrix = risk_estimators.corr_to_cov(risk_estimators.filter_corr_hierarchical( empirical_corr.to_numpy(), method='complete', draw_plot=False), std) elif model == 'hierarchical_filtered_single': covariance_matrix = risk_estimators.corr_to_cov(risk_estimators.filter_corr_hierarchical( empirical_corr.to_numpy(), method='single', draw_plot=False), std) elif model == 'hierarchical_filtered_avg': covariance_matrix = risk_estimators.corr_to_cov(risk_estimators.filter_corr_hierarchical( empirical_corr.to_numpy(), method='average', draw_plot=False), std) elif model == 'sample_cov': covariance_matrix = risk_models_.fix_nonpositive_semidefinite( risk_models_.sample_cov(prices)) / 252 elif model == 'semicovariance': covariance_matrix = risk_models_.fix_nonpositive_semidefinite( risk_models_.semicovariance(prices)) / 252 elif model == 'exp_cov': covariance_matrix = risk_models_.fix_nonpositive_semidefinite( risk_models_.exp_cov(prices, span=180)) / 252 elif model == 'ledoit_wolf_constant_variance': covariance_matrix = risk_models_.fix_nonpositive_semidefinite( risk_models_.risk_matrix(prices, model)) / 252 elif model == 'ledoit_wolf_single_factor': covariance_matrix = risk_models_.fix_nonpositive_semidefinite( risk_models_.risk_matrix(prices, model)) / 252 elif model == 'ledoit_wolf_constant_correlation': covariance_matrix = risk_models_.fix_nonpositive_semidefinite( risk_models_.risk_matrix(prices, model)) / 252 elif model == 'oracle_approximating': covariance_matrix = risk_models_.fix_nonpositive_semidefinite( risk_models_.risk_matrix(prices, model)) / 252 else: raise NameError('You must input a risk model. Check spelling. Case-Sensitive.') if not isinstance(covariance_matrix, pd.DataFrame): covariance_matrix = pd.DataFrame(covariance_matrix, index=sample_cov.index, columns=sample_cov.columns).round(6) return covariance_matrix * 252
49.575758
121
0.718093
0
0
0
0
0
0
0
0
3,407
0.416504
be53ecbf1f6e947fe3a12409a789c5940cb5ceed
2,516
py
Python
gaussian_blur/gaussian_blur.py
Soft-illusion/ComputerVision
9afaa9eafef8ac47fdb1023c5332cff98626f1bd
[ "MIT" ]
null
null
null
gaussian_blur/gaussian_blur.py
Soft-illusion/ComputerVision
9afaa9eafef8ac47fdb1023c5332cff98626f1bd
[ "MIT" ]
null
null
null
gaussian_blur/gaussian_blur.py
Soft-illusion/ComputerVision
9afaa9eafef8ac47fdb1023c5332cff98626f1bd
[ "MIT" ]
null
null
null
import cv2 as cv import sys import numpy as np import random as r import os from PIL import Image as im def noisy(noise_typ,image): if noise_typ == "gauss": # Generate Gaussian noise gauss = np.random.normal(0,1,image.size) print(gauss) gauss = gauss.reshape(image.shape[0],image.shape[1],image.shape[2]).astype('uint8') # Add the Gaussian noise to the image img_gauss = cv.add(image,gauss) cv.imwrite("Noise.png", gauss) return img_gauss elif noise_typ == "s&p": row,col,ch = image.shape s_vs_p = 0.5 amount = 0.004 out = np.copy(image) # Salt mode num_salt = np.ceil(amount * image.size * s_vs_p) coords = [np.random.randint(0, i - 1, int(num_salt)) for i in image.shape] out[coords] = 1 # Pepper mode num_pepper = np.ceil(amount* image.size * (1. - s_vs_p)) coords = [np.random.randint(0, i - 1, int(num_pepper)) for i in image.shape] out[coords] = 0 return out elif noise_typ == "poisson": vals = len(np.unique(image)) vals = 2 ** np.ceil(np.log2(vals)) noisy = np.random.poisson(image * vals) / float(vals) return noisy elif noise_typ =="speckle": row,col,ch = image.shape gauss = np.random.randn(row,col,ch) gauss = gauss.reshape(row,col,ch) noisy = image + image * gauss return noisy img = cv.imread(cv.samples.findFile("3.png")) if img is None: sys.exit("Could not read the image.") else : width , height , depth = img.shape img_noisy = noisy("gauss",img) for kernal_size in range (1,71,2): print(kernal_size) dst = cv.GaussianBlur(img_noisy,(kernal_size,kernal_size),0) # print( cv.getGaussianKernel(kernal_size,0)) file_name = "gaussian_blur" + str(kernal_size) + ".png" cv.imwrite(file_name, dst) # dst = img_noisy # for kernal_no in range (0,200): # print(kernal_no) # dst = cv.GaussianBlur(dst,(3,3),1) # # print( cv.getGaussianKernel(kernal_size,3)) # file_name = "gaussian_blur" + str(kernal_no) + ".png" # cv.imwrite(file_name, dst) for kernal_size in range (1,71,2): print(kernal_size) dst = cv.bilateralFilter(img_noisy,kernal_size,300,300) # print( cv.getGaussianKernel(kernal_size,0)) file_name = "bilateral_blur" + str(kernal_size) + ".png" cv.imwrite(file_name, dst)
32.675325
91
0.598967
0
0
0
0
0
0
0
0
566
0.22496
be55e1c8b12cbd1b4bd83120c737d0990e906ce2
3,223
py
Python
citywok_ms/employee/routes.py
fossabot/CityWok-Manager
ccd31eb684ddeec5c741c9520c779d98eb0e3cc6
[ "MIT" ]
null
null
null
citywok_ms/employee/routes.py
fossabot/CityWok-Manager
ccd31eb684ddeec5c741c9520c779d98eb0e3cc6
[ "MIT" ]
null
null
null
citywok_ms/employee/routes.py
fossabot/CityWok-Manager
ccd31eb684ddeec5c741c9520c779d98eb0e3cc6
[ "MIT" ]
null
null
null
from citywok_ms.file.models import EmployeeFile, File import citywok_ms.employee.messages as employee_msg import citywok_ms.file.messages as file_msg from citywok_ms.employee.forms import EmployeeForm from citywok_ms.file.forms import FileForm from flask import Blueprint, flash, redirect, render_template, url_for from citywok_ms.employee.models import Employee employee = Blueprint("employee", __name__, url_prefix="/employee") @employee.route("/") def index(): return render_template( "employee/index.html", title=employee_msg.INDEX_TITLE, active_employees=Employee.get_active(), suspended_employees=Employee.get_suspended(), ) @employee.route("/new", methods=["GET", "POST"]) def new(): form = EmployeeForm() if form.validate_on_submit(): employee = Employee.create_by_form(form) flash(employee_msg.NEW_SUCCESS.format(name=employee.full_name), "success") return redirect(url_for("employee.index")) return render_template( "employee/form.html", title=employee_msg.NEW_TITLE, form=form ) @employee.route("/<int:employee_id>") def detail(employee_id): return render_template( "employee/detail.html", title=employee_msg.DETAIL_TITLE, employee=Employee.get_or_404(employee_id), file_form=FileForm(), ) @employee.route("/<int:employee_id>/update", methods=["GET", "POST"]) def update(employee_id): employee = Employee.get_or_404(employee_id) form = EmployeeForm() form.hide_id.data = employee_id if form.validate_on_submit(): employee.update_by_form(form) flash(employee_msg.UPDATE_SUCCESS.format(name=employee.full_name), "success") return redirect(url_for("employee.detail", employee_id=employee_id)) form.process(obj=employee) return render_template( "employee/form.html", employee=employee, form=form, title=employee_msg.UPDATE_TITLE, ) @employee.route("/<int:employee_id>/suspend", methods=["POST"]) def suspend(employee_id): employee = Employee.get_or_404(employee_id) employee.suspend() flash(employee_msg.SUSPEND_SUCCESS.format(name=employee.full_name), "success") return redirect(url_for("employee.detail", employee_id=employee_id)) @employee.route("/<int:employee_id>/activate", methods=["POST"]) def activate(employee_id): employee = Employee.get_or_404(employee_id) employee.activate() flash(employee_msg.ACTIVATE_SUCCESS.format(name=employee.full_name), "success") return redirect(url_for("employee.detail", employee_id=employee_id)) @employee.route("/<int:employee_id>/upload", methods=["POST"]) def upload(employee_id): form = FileForm() file = form.file.data if form.validate_on_submit(): db_file = EmployeeFile.create_by_form(form, Employee.get_or_404(employee_id)) flash(file_msg.UPLOAD_SUCCESS.format(name=db_file.full_name), "success") elif file is not None: flash( file_msg.INVALID_FORMAT.format(format=File.split_file_format(file)), "danger", ) else: flash(file_msg.NO_FILE, "danger") return redirect(url_for("employee.detail", employee_id=employee_id))
33.926316
85
0.714552
0
0
0
0
2,771
0.859758
0
0
429
0.133106
be5634f2d2873fa0b75fded2fda0cc44792517a3
9,041
py
Python
kitsune/customercare/cron.py
safwanrahman/Ford
87e91dea1cc22b1759eea81cef069359ccb5cd0b
[ "BSD-3-Clause" ]
1
2017-07-03T12:11:03.000Z
2017-07-03T12:11:03.000Z
kitsune/customercare/cron.py
feer56/Kitsune1
0b39cbc41cb7a067699ce8401d80205dd7c5138d
[ "BSD-3-Clause" ]
8
2020-06-05T18:42:14.000Z
2022-03-11T23:26:51.000Z
kitsune/customercare/cron.py
safwanrahman/Ford
87e91dea1cc22b1759eea81cef069359ccb5cd0b
[ "BSD-3-Clause" ]
null
null
null
import calendar from datetime import datetime, timedelta import json import logging import re import rfc822 from django.conf import settings from django.db.utils import IntegrityError import cronjobs from multidb.pinning import pin_this_thread from statsd import statsd from twython import Twython from kitsune.customercare.models import Tweet, TwitterAccount, Reply from kitsune.sumo.redis_utils import redis_client, RedisError from kitsune.sumo.utils import chunked LINK_REGEX = re.compile('https?\:', re.IGNORECASE) RT_REGEX = re.compile('^rt\W', re.IGNORECASE) ALLOWED_USERS = [ {'id': 2142731, 'username': 'Firefox'}, {'id': 150793437, 'username': 'FirefoxBrasil'}, {'id': 107272435, 'username': 'firefox_es'}, ] log = logging.getLogger('k.twitter') def get_word_blacklist_regex(): """ Make a regex that looks kind of like r'\b(foo|bar|baz)\b'. This is a function so that it isn't calculated at import time, and so can be tested more easily. This doesn't use raw strings (r'') because the "mismatched" parens were confusing my syntax highlighter, which was confusing me. """ return re.compile( '\\b(' + '|'.join(map(re.escape, settings.CC_WORD_BLACKLIST)) + ')\\b') @cronjobs.register def collect_tweets(): # Don't (ab)use the twitter API from dev and stage. if settings.STAGE: return """Collect new tweets about Firefox.""" with statsd.timer('customercare.tweets.time_elapsed'): t = Twython(settings.TWITTER_CONSUMER_KEY, settings.TWITTER_CONSUMER_SECRET, settings.TWITTER_ACCESS_TOKEN, settings.TWITTER_ACCESS_TOKEN_SECRET) search_options = { 'q': ('firefox OR #fxinput OR @firefoxbrasil OR #firefoxos ' 'OR @firefox_es'), 'count': settings.CC_TWEETS_PERPAGE, # Items per page. 'result_type': 'recent', # Retrieve tweets by date. } # If we already have some tweets, collect nothing older than what we # have. try: latest_tweet = Tweet.latest() except Tweet.DoesNotExist: log.debug('No existing tweets. Retrieving %d tweets from search.' % settings.CC_TWEETS_PERPAGE) else: search_options['since_id'] = latest_tweet.tweet_id log.info('Retrieving tweets with id >= %s' % latest_tweet.tweet_id) # Retrieve Tweets results = t.search(**search_options) if len(results['statuses']) == 0: # Twitter returned 0 results. return # Drop tweets into DB for item in results['statuses']: # Apply filters to tweet before saving # Allow links in #fxinput tweets statsd.incr('customercare.tweet.collected') item = _filter_tweet(item, allow_links='#fxinput' in item['text']) if not item: continue created_date = datetime.utcfromtimestamp(calendar.timegm( rfc822.parsedate(item['created_at']))) item_lang = item['metadata'].get('iso_language_code', 'en') tweet = Tweet(tweet_id=item['id'], raw_json=json.dumps(item), locale=item_lang, created=created_date) try: tweet.save() statsd.incr('customercare.tweet.saved') except IntegrityError: pass @cronjobs.register def purge_tweets(): """Periodically purge old tweets for each locale. This does a lot of DELETEs on master, so it shouldn't run too frequently. Probably once every hour or more. """ # Pin to master pin_this_thread() # Build list of tweets to delete, by id. for locale in settings.SUMO_LANGUAGES: locale = settings.LOCALES[locale].iso639_1 # Some locales don't have an iso639_1 code, too bad for them. if not locale: continue oldest = _get_oldest_tweet(locale, settings.CC_MAX_TWEETS) if oldest: log.debug('Truncating tweet list: Removing tweets older than %s, ' 'for [%s].' % (oldest.created, locale)) Tweet.objects.filter(locale=locale, created__lte=oldest.created).delete() def _get_oldest_tweet(locale, n=0): """Returns the nth oldest tweet per locale, defaults to newest.""" try: return Tweet.objects.filter(locale=locale).order_by( '-created')[n] except IndexError: return None def _filter_tweet(item, allow_links=False): """ Apply some filters to an incoming tweet. May modify tweet. If None is returned, tweet will be discarded. Used to exclude replies and such from incoming tweets. """ text = item['text'].lower() # No replies, except to ALLOWED_USERS allowed_user_ids = [u['id'] for u in ALLOWED_USERS] to_user_id = item.get('to_user_id') if to_user_id and to_user_id not in allowed_user_ids: statsd.incr('customercare.tweet.rejected.reply_or_mention') return None # No mentions, except of ALLOWED_USERS for user in item['entities']['user_mentions']: if user['id'] not in allowed_user_ids: statsd.incr('customercare.tweet.rejected.reply_or_mention') return None # No retweets if RT_REGEX.search(text) or text.find('(via ') > -1: statsd.incr('customercare.tweet.rejected.retweet') return None # No links if not allow_links and LINK_REGEX.search(text): statsd.incr('customercare.tweet.rejected.link') return None screen_name = item['user']['screen_name'] # Django's caching system will save us here. IGNORED_USERS = set( TwitterAccount.objects .filter(ignored=True) .values_list('username', flat=True) ) # Exclude filtered users if screen_name in IGNORED_USERS: statsd.incr('customercare.tweet.rejected.user') return None # Exlude users with firefox in the handle if 'firefox' in screen_name.lower(): statsd.incr('customercare.tweet.rejected.firefox_in_handle') return None # Exclude problem words match = get_word_blacklist_regex().search(text) if match: bad_word = match.group(1) statsd.incr('customercare.tweet.rejected.blacklist_word.' + bad_word) return None return item @cronjobs.register def get_customercare_stats(): """ Generate customer care stats from the Replies table. This gets cached in Redis as a sorted list of contributors, stored as JSON. Example Top Contributor data: [ { 'twitter_username': 'username1', 'avatar': 'http://twitter.com/path/to/the/avatar.png', 'avatar_https': 'https://twitter.com/path/to/the/avatar.png', 'all': 5211, '1m': 230, '1w': 33, '1d': 3, }, { ... }, { ... }, ] """ if settings.STAGE: return contributor_stats = {} now = datetime.now() one_month_ago = now - timedelta(days=30) one_week_ago = now - timedelta(days=7) yesterday = now - timedelta(days=1) for chunk in chunked(Reply.objects.all(), 2500, Reply.objects.count()): for reply in chunk: user = reply.twitter_username if user not in contributor_stats: raw = json.loads(reply.raw_json) if 'from_user' in raw: # For tweets collected using v1 API user_data = raw else: user_data = raw['user'] contributor_stats[user] = { 'twitter_username': user, 'avatar': user_data['profile_image_url'], 'avatar_https': user_data['profile_image_url_https'], 'all': 0, '1m': 0, '1w': 0, '1d': 0, } contributor = contributor_stats[reply.twitter_username] contributor['all'] += 1 if reply.created > one_month_ago: contributor['1m'] += 1 if reply.created > one_week_ago: contributor['1w'] += 1 if reply.created > yesterday: contributor['1d'] += 1 sort_key = settings.CC_TOP_CONTRIB_SORT limit = settings.CC_TOP_CONTRIB_LIMIT # Sort by whatever is in settings, break ties with 'all' contributor_stats = sorted(contributor_stats.values(), key=lambda c: (c[sort_key], c['all']), reverse=True)[:limit] try: redis = redis_client(name='default') key = settings.CC_TOP_CONTRIB_CACHE_KEY redis.set(key, json.dumps(contributor_stats)) except RedisError as e: statsd.incr('redis.error') log.error('Redis error: %s' % e) return contributor_stats
32.289286
79
0.605243
0
0
0
0
5,648
0.62471
0
0
3,190
0.352837
be5740f5f8c7bb04c4a6f3ebc3c04afbcec0a250
1,333
py
Python
setup.py
nrcmedia/pdfrw
2a3c9caded906b7ca71f1a338673a24f90eb0e5c
[ "MIT" ]
2
2015-01-16T18:07:34.000Z
2015-11-01T05:07:15.000Z
setup.py
nrcmedia/pdfrw
2a3c9caded906b7ca71f1a338673a24f90eb0e5c
[ "MIT" ]
null
null
null
setup.py
nrcmedia/pdfrw
2a3c9caded906b7ca71f1a338673a24f90eb0e5c
[ "MIT" ]
null
null
null
#!/usr/bin/env python from distutils.core import setup try: import setuptools except: pass setup( name='pdfrw', version='0.1', description='PDF file reader/writer library', long_description=''' pdfrw lets you read and write PDF files, including compositing multiple pages together (e.g. to do watermarking, or to copy an image or diagram from one PDF to another), and can output by itself, or in conjunction with reportlab. pdfrw will faithfully reproduce vector formats without rasterization, so the rst2pdf package has used pdfrw by default for PDF and SVG images by default since March 2010. Several small examples are provided. ''', author='Patrick Maupin', author_email='[email protected]', platforms='Independent', url='http://code.google.com/p/pdfrw/', packages=['pdfrw', 'pdfrw.objects'], license='MIT', classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Multimedia :: Graphics :: Graphics Conversion', 'Topic :: Software Development :: Libraries', 'Topic :: Utilities' ], keywords='pdf vector graphics', )
31
65
0.675919
0
0
0
0
0
0
0
0
958
0.71868
be5b35007ab39510b966782ec2dccb27e2f0b068
2,429
py
Python
checkAnnotation.py
ZZIDZZ/pytorch-ssd
8d3ad092825d6f05b8a3fa7c25be7b541bf86ed9
[ "MIT" ]
null
null
null
checkAnnotation.py
ZZIDZZ/pytorch-ssd
8d3ad092825d6f05b8a3fa7c25be7b541bf86ed9
[ "MIT" ]
null
null
null
checkAnnotation.py
ZZIDZZ/pytorch-ssd
8d3ad092825d6f05b8a3fa7c25be7b541bf86ed9
[ "MIT" ]
null
null
null
import argparse import sys import cv2 import os import os.path as osp import numpy as np if sys.version_info[0] == 2: import xml.etree.cElementTree as ET else: import xml.etree.ElementTree as ET parser = argparse.ArgumentParser( description='Single Shot MultiBox Detector Training With Pytorch') train_set = parser.add_mutually_exclusive_group() parser.add_argument('--root', help='Dataset root directory path') args = parser.parse_args() CLASSES = ( # always index 0 'helmet', 'vest', 'no_helmet') annopath = osp.join('%s', 'Annotations', '%s.{}'.format("xml")) imgpath = osp.join('%s', 'JPEGImages', '%s.{}'.format("jpg")) def vocChecker(image_id, width, height, keep_difficult = False): target = ET.parse(annopath % image_id).getroot() res = [] for obj in target.iter('object'): difficult = int(obj.find('difficult').text) == 1 if not keep_difficult and difficult: continue name = obj.find('name').text.lower().strip() bbox = obj.find('bndbox') pts = ['xmin', 'ymin', 'xmax', 'ymax'] bndbox = [] for i, pt in enumerate(pts): cur_pt = int(bbox.find(pt).text) - 1 # scale height or width cur_pt = float(cur_pt) / width if i % 2 == 0 else float(cur_pt) / height bndbox.append(cur_pt) print(name) label_idx = dict(zip(CLASSES, range(len(CLASSES))))[name] bndbox.append(label_idx) res += [bndbox] # [xmin, ymin, xmax, ymax, label_ind] # img_id = target.find('filename').text[:-4] print(res) try : print(np.array(res)[:,4]) print(np.array(res)[:,:4]) except IndexError: print("\nINDEX ERROR HERE !\n") exit(0) return res # [[xmin, ymin, xmax, ymax, label_ind], ... ] if __name__ == '__main__' : i = 0 for name in sorted(os.listdir(osp.join(args.root,'Annotations'))): # as we have only one annotations file per image i += 1 img = cv2.imread(imgpath % (args.root,name.split('.')[0])) height, width, channels = img.shape res = vocChecker((args.root, name.split('.')[0]), height, width) print("path : {}".format(annopath % (args.root,name.split('.')[0]))) res = vocChecker((args.root, name.split('.')[0]), height, width) print("Total of annotations : {}".format(i))
29.987654
84
0.588308
0
0
0
0
0
0
0
0
539
0.221902
be5b3780be0df5ceef2f2e2a8a4f5c6573838a4e
3,215
py
Python
src/oci/identity_data_plane/models/password_reset_authentication_request.py
LaudateCorpus1/oci-python-sdk
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
[ "Apache-2.0", "BSD-3-Clause" ]
null
null
null
src/oci/identity_data_plane/models/password_reset_authentication_request.py
LaudateCorpus1/oci-python-sdk
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
[ "Apache-2.0", "BSD-3-Clause" ]
null
null
null
src/oci/identity_data_plane/models/password_reset_authentication_request.py
LaudateCorpus1/oci-python-sdk
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
[ "Apache-2.0", "BSD-3-Clause" ]
null
null
null
# coding: utf-8 # Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. # This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401 from oci.decorators import init_model_state_from_kwargs @init_model_state_from_kwargs class PasswordResetAuthenticationRequest(object): """ PasswordResetAuthenticationRequest model. """ def __init__(self, **kwargs): """ Initializes a new PasswordResetAuthenticationRequest object with values from keyword arguments. The following keyword arguments are supported (corresponding to the getters/setters of this class): :param user_id: The value to assign to the user_id property of this PasswordResetAuthenticationRequest. :type user_id: str :param password_reset_token: The value to assign to the password_reset_token property of this PasswordResetAuthenticationRequest. :type password_reset_token: str """ self.swagger_types = { 'user_id': 'str', 'password_reset_token': 'str' } self.attribute_map = { 'user_id': 'userId', 'password_reset_token': 'passwordResetToken' } self._user_id = None self._password_reset_token = None @property def user_id(self): """ **[Required]** Gets the user_id of this PasswordResetAuthenticationRequest. The id of the user :return: The user_id of this PasswordResetAuthenticationRequest. :rtype: str """ return self._user_id @user_id.setter def user_id(self, user_id): """ Sets the user_id of this PasswordResetAuthenticationRequest. The id of the user :param user_id: The user_id of this PasswordResetAuthenticationRequest. :type: str """ self._user_id = user_id @property def password_reset_token(self): """ **[Required]** Gets the password_reset_token of this PasswordResetAuthenticationRequest. The password reset token :return: The password_reset_token of this PasswordResetAuthenticationRequest. :rtype: str """ return self._password_reset_token @password_reset_token.setter def password_reset_token(self, password_reset_token): """ Sets the password_reset_token of this PasswordResetAuthenticationRequest. The password reset token :param password_reset_token: The password_reset_token of this PasswordResetAuthenticationRequest. :type: str """ self._password_reset_token = password_reset_token def __repr__(self): return formatted_flat_dict(self) def __eq__(self, other): if other is None: return False return self.__dict__ == other.__dict__ def __ne__(self, other): return not self == other
31.519608
245
0.675894
2,675
0.832037
0
0
2,705
0.841369
0
0
2,022
0.628927
be5d745da0aee12618b5456e7d8cbede2e23e222
656
py
Python
venv/lib/python3.7/site-packages/convertdate/dublin.py
vchiapaikeo/prophet
e8c250ca7bfffc280baa7dabc80a2c2d1f72c6a7
[ "MIT" ]
null
null
null
venv/lib/python3.7/site-packages/convertdate/dublin.py
vchiapaikeo/prophet
e8c250ca7bfffc280baa7dabc80a2c2d1f72c6a7
[ "MIT" ]
null
null
null
venv/lib/python3.7/site-packages/convertdate/dublin.py
vchiapaikeo/prophet
e8c250ca7bfffc280baa7dabc80a2c2d1f72c6a7
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # This file is part of convertdate. # http://github.com/fitnr/convertdate # Licensed under the MIT license: # http://opensource.org/licenses/MIT # Copyright (c) 2016, fitnr <fitnr@fakeisthenewreal> '''Convert to and from the Dublin day count''' from . import daycount EPOCH = 2415020 # Julian Day Count for Dublin Count 0 _dublin = daycount.DayCount(EPOCH) to_gregorian = _dublin.to_gregorian from_gregorian = _dublin.from_gregorian to_jd = _dublin.to_jd from_jd = _dublin.from_jd from_julian = _dublin.from_julian to_julian = _dublin.to_julian to_datetime = _dublin.to_datetime from_datetime = _dublin.from_datetime
19.878788
54
0.762195
0
0
0
0
0
0
0
0
299
0.455793
be5dd7bfd950d236cdb2d9db1cde1c0dbae6c636
5,250
py
Python
tests/functional/controllers/test_group_controller_superuser.py
roscisz/TensorHive
4a680f47a0ee1ce366dc82ad9964e229d9749c4e
[ "Apache-2.0" ]
129
2017-08-25T11:45:15.000Z
2022-03-29T05:11:25.000Z
tests/functional/controllers/test_group_controller_superuser.py
roscisz/TensorHive
4a680f47a0ee1ce366dc82ad9964e229d9749c4e
[ "Apache-2.0" ]
251
2017-07-27T10:05:58.000Z
2022-03-02T12:46:13.000Z
tests/functional/controllers/test_group_controller_superuser.py
roscisz/TensorHive
4a680f47a0ee1ce366dc82ad9964e229d9749c4e
[ "Apache-2.0" ]
20
2017-08-13T13:05:14.000Z
2022-03-19T02:21:37.000Z
from tensorhive.models.Group import Group from fixtures.controllers import API_URI as BASE_URI, HEADERS from http import HTTPStatus from importlib import reload import json import auth_patcher ENDPOINT = BASE_URI + '/groups' def setup_module(_): auth_patches = auth_patcher.get_patches(superuser=True) for auth_patch in auth_patches: auth_patch.start() for module in auth_patcher.CONTROLLER_MODULES: reload(module) for auth_patch in auth_patches: auth_patch.stop() # POST /groups def test_create_group(tables, client): group_name = 'TestGroup' data = {'name': group_name} resp = client.post(ENDPOINT, headers=HEADERS, data=json.dumps(data)) resp_json = json.loads(resp.data.decode('utf-8')) assert resp.status_code == HTTPStatus.CREATED assert resp_json['group']['id'] is not None assert resp_json['group']['name'] == group_name assert Group.get(int(resp_json['group']['id'])) is not None # PUT /groups/{id} def test_update_group(tables, client, new_group): new_group.save() new_group_name = new_group.name + '111' resp = client.put(ENDPOINT + '/' + str(new_group.id), headers=HEADERS, data=json.dumps({'name': new_group_name})) resp_json = json.loads(resp.data.decode('utf-8')) assert resp.status_code == HTTPStatus.OK assert resp_json['group']['name'] == new_group_name assert Group.get(new_group.id).name == new_group_name # PUT /groups/{id} - nonexistent id def test_update_group_that_doesnt_exist(tables, client): non_existent_id = '777' resp = client.put(ENDPOINT + '/' + non_existent_id, headers=HEADERS, data=json.dumps({'name': 'test'})) assert resp.status_code == HTTPStatus.NOT_FOUND # DELETE /groups/{id} def test_delete_group(tables, client, new_group): new_group.save() resp = client.delete(ENDPOINT + '/' + str(new_group.id), headers=HEADERS) assert resp.status_code == HTTPStatus.OK # Let's get all groups to verify resp = client.get(ENDPOINT, headers=HEADERS) resp_json = json.loads(resp.data.decode('utf-8')) assert len(resp_json) == 0 # DELETE /groups/{id} - nonexistent id def test_delete_group_that_doesnt_exist(tables, client): non_existent_id = '777' resp = client.delete(ENDPOINT + '/' + non_existent_id, headers=HEADERS) assert resp.status_code == HTTPStatus.NOT_FOUND # PUT /groups/{id}/users/{id} def test_add_user_to_a_group(tables, client, new_group, new_user): new_group.save() new_user.save() resp = client.put(ENDPOINT + '/{}/users/{}'.format(new_group.id, new_user.id), headers=HEADERS) assert resp.status_code == HTTPStatus.OK assert new_group in new_user.groups assert new_user in new_group.users # DELETE /groups/{id}/users/{id} def test_remove_user_from_a_group(tables, client, new_group_with_member): new_group_with_member.save() user = new_group_with_member.users[0] resp = client.delete(ENDPOINT + '/{}/users/{}'.format(new_group_with_member.id, user.id), headers=HEADERS) assert resp.status_code == HTTPStatus.OK assert new_group_with_member not in user.groups assert user not in new_group_with_member.users # PUT /groups/{id}/users/{id} - nonexistent user id def test_add_nonexistent_user_to_a_group(tables, client, new_group): new_group.save() nonexistent_user_id = '777' resp = client.put(ENDPOINT + '/{}/users/{}'.format(new_group.id, nonexistent_user_id), headers=HEADERS) assert resp.status_code == HTTPStatus.NOT_FOUND # PUT /groups/{id}/users/{id} - nonexistent group id def test_add_user_to_nonexistent_group(tables, client, new_user): new_user.save() nonexistent_group_id = '777' resp = client.put(ENDPOINT + '/{}/users/{}'.format(nonexistent_group_id, new_user.id), headers=HEADERS) assert resp.status_code == HTTPStatus.NOT_FOUND # DELETE /groups/{id}/users/{id} - nonexistent user id def test_remove_nonexistent_user_from_a_group(tables, client, new_group): new_group.save() nonexistent_user_id = '777' resp = client.delete(ENDPOINT + '/{}/users/{}'.format(new_group.id, nonexistent_user_id), headers=HEADERS) assert resp.status_code == HTTPStatus.NOT_FOUND # DELETE /groups/{id}/users/{id} - nonexistent group id def test_remove_user_from_a_nonexistent_group(tables, client, new_user): new_user.save() nonexistent_group_id = '777' resp = client.delete(ENDPOINT + '/{}/users/{}'.format(nonexistent_group_id, new_user.id), headers=HEADERS) assert resp.status_code == HTTPStatus.NOT_FOUND # PUT /groups/{id} def test_set_group_as_a_default(tables, client, new_group): new_group.save() resp = client.put(ENDPOINT + '/{}'.format(new_group.id), data=json.dumps({'isDefault': True}), headers=HEADERS) assert resp.status_code == HTTPStatus.OK assert Group.get(new_group.id).is_default # PUT /groups/{id} def test_mark_default_group_as_non_default(tables, client, new_group): new_group.is_default = True new_group.save() resp = client.put(ENDPOINT + '/{}'.format(new_group.id), data=json.dumps({'isDefault': False}), headers=HEADERS) assert resp.status_code == HTTPStatus.OK assert Group.get(new_group.id).is_default is False
32.012195
117
0.717714
0
0
0
0
0
0
0
0
743
0.141524
be5e4769d08439109a7dee5ae6c729de8b3ba612
1,232
py
Python
code/generate_thought_vectors.py
midas-research/text2facegan
3770333f16234fc9328d8254d1c1112fad15a16c
[ "MIT" ]
23
2020-04-09T19:17:46.000Z
2021-04-13T13:46:06.000Z
code/generate_thought_vectors.py
midas-research/text2facegan
3770333f16234fc9328d8254d1c1112fad15a16c
[ "MIT" ]
3
2020-02-16T16:21:38.000Z
2021-05-22T13:18:57.000Z
code/generate_thought_vectors.py
midas-research/text2facegan
3770333f16234fc9328d8254d1c1112fad15a16c
[ "MIT" ]
7
2020-02-27T22:27:33.000Z
2021-03-16T06:03:32.000Z
import os from os.path import join, isfile import re import numpy as np import pickle import argparse import skipthoughts import h5py def main(): parser = argparse.ArgumentParser() #parser.add_argument('--caption_file', type=str, default='Data/sample_captions.txt', # help='caption file') parser.add_argument('--caption_file', type=str, default='/media/ssd_working_space/osaid/Data/sample_captions.txt', help='caption file') #parser.add_argument('--data_dir', type=str, default='Data', # help='Data Directory') parser.add_argument('--data_dir', type=str, default='/media/ssd_working_space/osaid/Data', help='Data Directory') args = parser.parse_args() with open( args.caption_file ) as f: captions = f.read().split('\n') captions = [cap for cap in captions if len(cap) > 0] print(captions) model = skipthoughts.load_model() caption_vectors = skipthoughts.encode(model, captions) if os.path.isfile(join(args.data_dir, 'sample_caption_vectors.hdf5')): os.remove(join(args.data_dir, 'sample_caption_vectors.hdf5')) h = h5py.File(join(args.data_dir, 'sample_caption_vectors.hdf5')) h.create_dataset('vectors', data=caption_vectors) h.close() if __name__ == '__main__': main()
30.8
115
0.728896
0
0
0
0
0
0
0
0
464
0.376623
be5f92734068facbaab6ebcd59a70aae8bdb395f
415
py
Python
venv/Lib/site-packages/mcipc/rcon/response_types/difficulty.py
Svesnav2/Discord-Bot-Minecraft-server-status
ee34948e741930567a3adb557197523f9d32ace1
[ "Unlicense" ]
null
null
null
venv/Lib/site-packages/mcipc/rcon/response_types/difficulty.py
Svesnav2/Discord-Bot-Minecraft-server-status
ee34948e741930567a3adb557197523f9d32ace1
[ "Unlicense" ]
null
null
null
venv/Lib/site-packages/mcipc/rcon/response_types/difficulty.py
Svesnav2/Discord-Bot-Minecraft-server-status
ee34948e741930567a3adb557197523f9d32ace1
[ "Unlicense" ]
null
null
null
"""Parsing responses from the difficulty command.""" from mcipc.rcon.functions import boolmap __all__ = ['parse'] SET = 'The difficulty has been set to (\\w+)' UNCHANGED = 'The difficulty did not change; it is already set to (\\w+)' def parse(text: str) -> bool: """Parses a boolean value from the text returned by the difficulty command. """ return boolmap(text, true=SET, false=UNCHANGED)
21.842105
72
0.684337
0
0
0
0
0
0
0
0
245
0.590361
be6134b8d63935100cb7803033cbd22148a4202a
1,558
py
Python
eth/beacon/aggregation.py
Bhargavasomu/py-evm
ee8f72d5a70805575a967cde0a43942e1526264e
[ "MIT" ]
null
null
null
eth/beacon/aggregation.py
Bhargavasomu/py-evm
ee8f72d5a70805575a967cde0a43942e1526264e
[ "MIT" ]
null
null
null
eth/beacon/aggregation.py
Bhargavasomu/py-evm
ee8f72d5a70805575a967cde0a43942e1526264e
[ "MIT" ]
null
null
null
from typing import ( Iterable, Tuple, ) from cytoolz import ( pipe ) from eth._utils import bls from eth._utils.bitfield import ( set_voted, ) from eth.beacon.enums import SignatureDomain from eth.beacon.typing import ( BLSPubkey, BLSSignature, Bitfield, CommitteeIndex, ) def verify_votes( message: bytes, votes: Iterable[Tuple[CommitteeIndex, BLSSignature, BLSPubkey]], domain: SignatureDomain ) -> Tuple[Tuple[BLSSignature, ...], Tuple[CommitteeIndex, ...]]: """ Verify the given votes. vote: (committee_index, sig, public_key) """ sigs_with_committe_info = tuple( (sig, committee_index) for (committee_index, sig, public_key) in votes if bls.verify(message, public_key, sig, domain) ) try: sigs, committee_indices = zip(*sigs_with_committe_info) except ValueError: sigs = tuple() committee_indices = tuple() return sigs, committee_indices def aggregate_votes( bitfield: Bitfield, sigs: Iterable[BLSSignature], voting_sigs: Iterable[BLSSignature], voting_committee_indices: Iterable[CommitteeIndex] ) -> Tuple[Bitfield, BLSSignature]: """ Aggregate the votes. """ # Update the bitfield and append the signatures sigs = tuple(sigs) + tuple(voting_sigs) bitfield = pipe( bitfield, *( set_voted(index=committee_index) for committee_index in voting_committee_indices ) ) return bitfield, bls.aggregate_signatures(sigs)
23.253731
68
0.662388
0
0
0
0
0
0
0
0
168
0.107831
be6143e65d151cdd084aada126448567dcd0c1d7
7,090
py
Python
src/server/bos/controllers/v1/components.py
Cray-HPE/bos
a4a7fc58c884d951b6051093e1a4e2aeaba6740f
[ "MIT" ]
1
2022-03-15T18:17:11.000Z
2022-03-15T18:17:11.000Z
src/server/bos/controllers/v1/components.py
Cray-HPE/bos
a4a7fc58c884d951b6051093e1a4e2aeaba6740f
[ "MIT" ]
null
null
null
src/server/bos/controllers/v1/components.py
Cray-HPE/bos
a4a7fc58c884d951b6051093e1a4e2aeaba6740f
[ "MIT" ]
1
2022-03-06T12:47:06.000Z
2022-03-06T12:47:06.000Z
# Copyright 2021 Hewlett Packard Enterprise Development LP # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, # ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. # # (MIT License) import connexion from datetime import datetime import logging from bos import redis_db_utils as dbutils LOGGER = logging.getLogger('bos.controllers.v1.components') DB = dbutils.get_wrapper(db='components') @dbutils.redis_error_handler def get_components(ids="", enabled=None): """Used by the GET /components API operation Allows filtering using a comma seperated list of ids. """ LOGGER.debug("GET /components invoked get_components") id_list = [] if ids: try: id_list = ids.split(',') except Exception as err: return connexion.problem( status=400, title="Error parsing the ids provided.", detail=str(err)) response = get_components_data(id_list=id_list, enabled=enabled) return response, 200 def get_components_data(id_list=None, enabled=None): """Used by the GET /components API operation Allows filtering using a comma separated list of ids. """ response = [] if id_list: for component_id in id_list: data = DB.get(component_id) if data: response.append(data) else: # TODO: On large scale systems, this response may be too large # and require paging to be implemented response = DB.get_all() if enabled is not None: response = [r for r in response if _matches_filter(r, enabled)] return response def _matches_filter(data, enabled): if enabled is not None and data.get('enabled', None) != enabled: return False return True @dbutils.redis_error_handler def put_components(): """Used by the PUT /components API operation""" LOGGER.debug("PUT /components invoked put_components") try: data = connexion.request.get_json() components = [] for component_data in data: component_id = component_data['id'] components.append((component_id, component_data)) except Exception as err: return connexion.problem( status=400, title="Error parsing the data provided.", detail=str(err)) response = [] for component_id, component_data in components: component_data = _set_auto_fields(component_data) response.append(DB.put(component_id, component_data)) return response, 200 @dbutils.redis_error_handler def patch_components(): """Used by the PATCH /components API operation""" LOGGER.debug("PATCH /components invoked patch_components") try: data = connexion.request.get_json() components = [] for component_data in data: component_id = component_data['id'] if component_id not in DB: return connexion.problem( status=404, title="Component could not found.", detail="Component {} could not be found".format(component_id)) components.append((component_id, component_data)) except Exception as err: return connexion.problem( status=400, title="Error parsing the data provided.", detail=str(err)) response = [] for component_id, component_data in components: component_data = _set_auto_fields(component_data) response.append(DB.patch(component_id, component_data, _update_handler)) return response, 200 @dbutils.redis_error_handler def get_component(component_id, config_details=False, v2=False): """Used by the GET /components/{component_id} API operation""" LOGGER.debug("GET /components/id invoked get_component") if component_id not in DB: return connexion.problem( status=404, title="Component could not found.", detail="Component {} could not be found".format(component_id)) component = DB.get(component_id) return component, 200 @dbutils.redis_error_handler def put_component(component_id): """Used by the PUT /components/{component_id} API operation""" LOGGER.debug("PUT /components/id invoked put_component") try: data = connexion.request.get_json() except Exception as err: return connexion.problem( status=400, title="Error parsing the data provided.", detail=str(err)) data['id'] = component_id data = _set_auto_fields(data) return DB.put(component_id, data), 200 @dbutils.redis_error_handler def patch_component(component_id): """Used by the PATCH /components/{component_id} API operation""" LOGGER.debug("PATCH /components/id invoked patch_component") if component_id not in DB: return connexion.problem( status=404, title="Component could not found.", detail="Component {} could not be found".format(component_id)) try: data = connexion.request.get_json() except Exception as err: return connexion.problem( status=400, title="Error parsing the data provided.", detail=str(err)) data = _set_auto_fields(data) return DB.patch(component_id, data, _update_handler), 200 @dbutils.redis_error_handler def delete_component(component_id): """Used by the DELETE /components/{component_id} API operation""" LOGGER.debug("DELETE /components/id invoked delete_component") if component_id not in DB: return connexion.problem( status=404, title="Component could not found.", detail="Component {} could not be found".format(component_id)) return DB.delete(component_id), 204 def _set_auto_fields(data): data = _set_last_updated(data) return data def _set_last_updated(data): timestamp = datetime.utcnow().isoformat() for section in ['actualState', 'desiredState', 'lastAction']: if section in data and type(data[section]) == dict: data[section]['lastUpdated'] = timestamp return data def _update_handler(data): # Allows processing of data during common patch operation return data
36.173469
82
0.687729
0
0
0
0
4,500
0.634697
0
0
2,678
0.377715
be629e4dd47b9de924dd51caddb573587b68e29b
268
py
Python
cracking_the_coding_interview_qs/10.4/find_x_in_listy_test.py
angelusualle/algorithms
86286a49db2a755bc57330cb455bcbd8241ea6be
[ "Apache-2.0" ]
null
null
null
cracking_the_coding_interview_qs/10.4/find_x_in_listy_test.py
angelusualle/algorithms
86286a49db2a755bc57330cb455bcbd8241ea6be
[ "Apache-2.0" ]
null
null
null
cracking_the_coding_interview_qs/10.4/find_x_in_listy_test.py
angelusualle/algorithms
86286a49db2a755bc57330cb455bcbd8241ea6be
[ "Apache-2.0" ]
null
null
null
import unittest from find_x_in_listy import find_x_in_listy, Listy class Test_Case_Find_X_In_Listy(unittest.TestCase): def test_case_find_x_in_listy(self): listy = Listy(list(range(0, 1*10**8))) self.assertEqual(find_x_in_listy(listy, 5678), 5678)
38.285714
60
0.761194
200
0.746269
0
0
0
0
0
0
0
0
be62c8d9e725078536f0891cffbcc08c85ff6f54
979
py
Python
my_general_helpers.py
arminbahl/drosophila_phototaxis_paper
e01dc95675f835926c9104b34bf6cfd7244dee2b
[ "MIT" ]
null
null
null
my_general_helpers.py
arminbahl/drosophila_phototaxis_paper
e01dc95675f835926c9104b34bf6cfd7244dee2b
[ "MIT" ]
null
null
null
my_general_helpers.py
arminbahl/drosophila_phototaxis_paper
e01dc95675f835926c9104b34bf6cfd7244dee2b
[ "MIT" ]
null
null
null
from scipy.signal import butter,filtfilt from numba import jit import bisect def is_number_in_sorted_vector(sorted_vector, num): index = bisect.bisect_left(sorted_vector, num) return index != len(sorted_vector) and sorted_vector[index] == num # def butter_lowpass(cutoff, fs, order=5): # nyq = 0.5 * fs # normal_cutoff = cutoff / nyq # b, a = butter(order, normal_cutoff, btype='low', analog=False) # return b, a def butter_lowpass_filter(data, cutoff, fs, order): nyq = 0.5 * fs # Nyquist Frequency normal_cutoff = cutoff / nyq # Get the filter coefficients b, a = butter(order, normal_cutoff, btype='low', analog=False) y = filtfilt(b, a, data) return y @jit def first_order_lowpass_filter(signal_in, signal_out, tau, dt): alpha_lowpass = dt / (tau + dt) signal_out[0] = signal_in[0] for i in range(1, len(signal_in)): signal_out[i] = alpha_lowpass*signal_in[i] + (1-alpha_lowpass)*signal_out[i-1]
27.971429
86
0.684372
0
0
0
0
266
0.271706
0
0
234
0.239019
be644a96343b814a2cf63e0bf374f535055ecf7e
6,856
py
Python
test/mitmproxy/addons/test_proxyserver.py
KarlParkinson/mitmproxy
fd5caf40c75ca73c4b767170497abf6a5bf016a0
[ "MIT" ]
24,939
2015-01-01T17:13:21.000Z
2022-03-31T17:50:04.000Z
test/mitmproxy/addons/test_proxyserver.py
KarlParkinson/mitmproxy
fd5caf40c75ca73c4b767170497abf6a5bf016a0
[ "MIT" ]
3,655
2015-01-02T12:31:43.000Z
2022-03-31T20:24:57.000Z
test/mitmproxy/addons/test_proxyserver.py
KarlParkinson/mitmproxy
fd5caf40c75ca73c4b767170497abf6a5bf016a0
[ "MIT" ]
3,712
2015-01-06T06:47:06.000Z
2022-03-31T10:33:27.000Z
import asyncio from contextlib import asynccontextmanager import pytest from mitmproxy import exceptions from mitmproxy.addons.proxyserver import Proxyserver from mitmproxy.connection import Address from mitmproxy.proxy import layers, server_hooks from mitmproxy.proxy.layers.http import HTTPMode from mitmproxy.test import taddons, tflow from mitmproxy.test.tflow import tclient_conn, tserver_conn class HelperAddon: def __init__(self): self.flows = [] self.layers = [ lambda ctx: layers.modes.HttpProxy(ctx), lambda ctx: layers.HttpLayer(ctx, HTTPMode.regular), lambda ctx: layers.TCPLayer(ctx), ] def request(self, f): self.flows.append(f) def tcp_start(self, f): self.flows.append(f) def next_layer(self, nl): nl.layer = self.layers.pop(0)(nl.context) @asynccontextmanager async def tcp_server(handle_conn) -> Address: server = await asyncio.start_server(handle_conn, '127.0.0.1', 0) await server.start_serving() try: yield server.sockets[0].getsockname() finally: server.close() @pytest.mark.asyncio async def test_start_stop(): async def server_handler(reader: asyncio.StreamReader, writer: asyncio.StreamWriter): assert await reader.readuntil(b"\r\n\r\n") == b"GET /hello HTTP/1.1\r\n\r\n" writer.write(b"HTTP/1.1 204 No Content\r\n\r\n") await writer.drain() writer.close() ps = Proxyserver() with taddons.context(ps) as tctx: state = HelperAddon() tctx.master.addons.add(state) async with tcp_server(server_handler) as addr: tctx.configure(ps, listen_host="127.0.0.1", listen_port=0) assert not ps.server ps.running() await tctx.master.await_log("Proxy server listening", level="info") assert ps.server proxy_addr = ps.server.sockets[0].getsockname()[:2] reader, writer = await asyncio.open_connection(*proxy_addr) req = f"GET http://{addr[0]}:{addr[1]}/hello HTTP/1.1\r\n\r\n" writer.write(req.encode()) assert await reader.readuntil(b"\r\n\r\n") == b"HTTP/1.1 204 No Content\r\n\r\n" assert repr(ps) == "ProxyServer(running, 1 active conns)" tctx.configure(ps, server=False) await tctx.master.await_log("Stopping server", level="info") assert not ps.server assert state.flows assert state.flows[0].request.path == "/hello" assert state.flows[0].response.status_code == 204 # Waiting here until everything is really torn down... takes some effort. conn_handler = list(ps._connections.values())[0] client_handler = conn_handler.transports[conn_handler.client].handler writer.close() await writer.wait_closed() try: await client_handler except asyncio.CancelledError: pass for _ in range(5): # Get all other scheduled coroutines to run. await asyncio.sleep(0) assert repr(ps) == "ProxyServer(stopped, 0 active conns)" @pytest.mark.asyncio async def test_inject() -> None: async def server_handler(reader: asyncio.StreamReader, writer: asyncio.StreamWriter): while s := await reader.read(1): writer.write(s.upper()) ps = Proxyserver() with taddons.context(ps) as tctx: state = HelperAddon() tctx.master.addons.add(state) async with tcp_server(server_handler) as addr: tctx.configure(ps, listen_host="127.0.0.1", listen_port=0) ps.running() await tctx.master.await_log("Proxy server listening", level="info") proxy_addr = ps.server.sockets[0].getsockname()[:2] reader, writer = await asyncio.open_connection(*proxy_addr) req = f"CONNECT {addr[0]}:{addr[1]} HTTP/1.1\r\n\r\n" writer.write(req.encode()) assert await reader.readuntil(b"\r\n\r\n") == b"HTTP/1.1 200 Connection established\r\n\r\n" writer.write(b"a") assert await reader.read(1) == b"A" ps.inject_tcp(state.flows[0], False, b"b") assert await reader.read(1) == b"B" ps.inject_tcp(state.flows[0], True, b"c") assert await reader.read(1) == b"c" @pytest.mark.asyncio async def test_inject_fail() -> None: ps = Proxyserver() with taddons.context(ps) as tctx: ps.inject_websocket( tflow.tflow(), True, b"test" ) await tctx.master.await_log("Cannot inject WebSocket messages into non-WebSocket flows.", level="warn") ps.inject_tcp( tflow.tflow(), True, b"test" ) await tctx.master.await_log("Cannot inject TCP messages into non-TCP flows.", level="warn") ps.inject_websocket( tflow.twebsocketflow(), True, b"test" ) await tctx.master.await_log("Flow is not from a live connection.", level="warn") ps.inject_websocket( tflow.ttcpflow(), True, b"test" ) await tctx.master.await_log("Flow is not from a live connection.", level="warn") @pytest.mark.asyncio async def test_warn_no_nextlayer(): """ Test that we log an error if the proxy server is started without NextLayer addon. That is a mean trap to fall into when writing end-to-end tests. """ ps = Proxyserver() with taddons.context(ps) as tctx: tctx.configure(ps, listen_host="127.0.0.1", listen_port=0) ps.running() await tctx.master.await_log("Proxy server listening at", level="info") assert tctx.master.has_log("Warning: Running proxyserver without nextlayer addon!", level="warn") await ps.shutdown_server() def test_self_connect(): server = tserver_conn() client = tclient_conn() server.address = ("localhost", 8080) ps = Proxyserver() with taddons.context(ps) as tctx: # not calling .running() here to avoid unnecessary socket ps.options = tctx.options ps.server_connect( server_hooks.ServerConnectionHookData(server, client) ) assert server.error == "Stopped mitmproxy from recursively connecting to itself." def test_options(): ps = Proxyserver() with taddons.context(ps) as tctx: with pytest.raises(exceptions.OptionsError): tctx.configure(ps, body_size_limit="invalid") tctx.configure(ps, body_size_limit="1m") with pytest.raises(exceptions.OptionsError): tctx.configure(ps, stream_large_bodies="invalid") tctx.configure(ps, stream_large_bodies="1m")
35.895288
111
0.622666
459
0.066949
238
0.034714
5,088
0.742124
4,983
0.726809
1,277
0.18626
be64e074af6729b6171d5eed328bc46d2d983abb
19,608
py
Python
tensorflow_probability/python/distributions/masked.py
mederrata/probability
bc6c411b0fbd83141f303f91a27343fe3c43a797
[ "Apache-2.0" ]
1
2022-03-22T11:56:31.000Z
2022-03-22T11:56:31.000Z
tensorflow_probability/python/distributions/masked.py
robot0102/probability
89d248c420b8ecabfd9d6de4a1aa8d3886920049
[ "Apache-2.0" ]
null
null
null
tensorflow_probability/python/distributions/masked.py
robot0102/probability
89d248c420b8ecabfd9d6de4a1aa8d3886920049
[ "Apache-2.0" ]
null
null
null
# Copyright 2021 The TensorFlow Probability Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """The MaskedIndependent distribution class.""" import tensorflow.compat.v2 as tf from tensorflow_probability.python.bijectors import bijector as bijector_lib from tensorflow_probability.python.distributions import batch_broadcast from tensorflow_probability.python.distributions import distribution as distribution_lib from tensorflow_probability.python.distributions import kullback_leibler from tensorflow_probability.python.distributions import log_prob_ratio from tensorflow_probability.python.internal import assert_util from tensorflow_probability.python.internal import parameter_properties from tensorflow_probability.python.internal import prefer_static as ps from tensorflow_probability.python.internal import samplers from tensorflow_probability.python.internal import tensor_util def _add_event_dims_to_mask(validity_mask, *, dist=None, event_ndims=None): validity_mask = tf.convert_to_tensor(validity_mask) if event_ndims is None: event_ndims = ps.rank_from_shape(dist.event_shape_tensor()) return tf.reshape( validity_mask, ps.concat([ ps.shape(validity_mask), ps.ones(event_ndims, dtype=tf.int32) ], axis=0)) def _make_masked_fn(fn_name, n_event_shapes, safe_value, make_arg0_safe=False): """Implements functions like mean, variance, etc. Args: fn_name: Name of the method called on the underlying distribution. n_event_shapes: Number of event shape repeats in the shape of the underlying function's output. safe_value: The value to be placed in invalid locations. May be `'safe_sample'` to specify we should use the "safe sample" value. make_arg0_safe: If `True`, we will apply `self.safe_sample_fn` to ensure the argument passed into the underlying routine is a "safe" sample. Returns: fn: Callable implementing the given function. """ def fn(self, *args, **kwargs): if safe_value == 'safe_sample' or make_arg0_safe: # Only if needed. safe_val = tf.stop_gradient(self.safe_sample_fn(self.distribution)) validity_mask = tf.convert_to_tensor(self.validity_mask) if make_arg0_safe: x = args[0] safe_x = tf.where( _add_event_dims_to_mask(validity_mask, dist=self), x, safe_val) args = (safe_x,) + args[1:] val = getattr(self.distribution, fn_name)(*args, **kwargs) if n_event_shapes: validity_mask = tf.reshape( validity_mask, ps.concat( [ps.shape(validity_mask)] + [ps.ones_like(self.event_shape_tensor())] * n_event_shapes, axis=0)) if safe_value == 'safe_sample': sentinel = tf.cast(safe_val, val.dtype) else: sentinel = tf.cast(safe_value, val.dtype) return tf.where(validity_mask, val, sentinel) fn.__name__ = f'_{fn_name}' return fn def _fixed_sample(d): return d.sample(seed=samplers.zeros_seed()) class _Masked(distribution_lib.Distribution): """A distribution that masks invalid underlying distributions. Sometimes we may want a way of masking out a subset of distributions. Perhaps we have labels for only a subset of batch members and want to evaluate a log_prob. Or we may want to encode a sparse random variable as a dense random variable with a mask applied. In single-program/multiple-data regimes, it can be necessary to pad Distributions and the samples thereof to a given size in order to achieve the "single-program" desideratum. When computing a probability density in this regime, we would like to mask out the contributions of invalid batch members. We may also want to ensure that the values being sampled are valid parameters for descendant distributions in a hierarchical model, even if they are ultimately masked out. This distribution answers those requirements. Specifically, for invalid batch elements: - `log_prob(x) == 0.` for all `x`, with no gradients back to `x`, nor any gradients to the parameters of `distribution`. - `sample() == tf.stop_gradient(safe_value_fn(distribution))`, with no gradients back to the parameters of `distribution`. The distribution accepts a mask specified by `validity_mask`, a boolean tensor broadcastable with the underlying distribution's batch shape which specifies for each batch element whether or not it is valid. Entries in `validity_mask` which are `False` denote missing distributions, which means that the corresponding entries in the measures (e.g. `prob`) and statistics (e.g. `mean`) must not be treated as coming from some real distribution. Whenever doing a reduction across those quantites, make sure to either mask out the invalid entries or make sure the returned value corresponds to the identity element of the reduction. For a couple examples: - OK: `reduce_sum(masked_dist.log_prob(x))` - OK: `tfd.Independent(masked_dist, ...)` - Not OK: `reduce_var(masked_dist.mean())` will underestimate the variance because it uses too large an `N`. - Not OK: `tf.linalg.cholesky(masked_dist.covariance())` will fail for invalid batch elements. The default `safe_value_fn` is to draw a fixed-seeded sample from the underlying `distribution`. Since this may be expensive, it is suggested to specify a computationally cheaper method. Some options might include: - `tfd.Distribution.mode` - `tfd.Distribution.mean` - `lambda d: d.quantile(.5)` (median) - `lambda _: 0.` (if zero is always in the support of d) - `lambda d: d.experimental_default_event_space_bijector()(0.)` Besides the output of `sample`, results from `safe_value_fn` may also appear in (invalid batch members of) `masked.default_event_space_bijector().forward`. #### Examples ``` # Use tf.sequence_mask for `range(n) < num_valid`. num_valid = 3 num_entries = 4 d = tfd.Masked( tfd.MultivariateNormalDiag(tf.zeros([2, num_entries, 5]), tf.ones([5])), tf.sequence_mask(num_valid, num_entries)) d.batch_shape # [2, 4] d.event_shape # [5] d.log_prob(tf.zeros([5])) # shape [2, 4] # => [[nonzero, nonzero, nonzero, 0.], # [nonzero, nonzero, nonzero, 0.]] # Explicitly denote which elements are valid, adding a new batch dim of 2. d = tfd.Masked(tfd.MultivariateNormalDiag(tf.zeros([4, 5]), tf.ones([5])), [[False], [True]]) d.batch_shape # [2, 4] d.event_shape # [5] d.log_prob(tf.zeros([5])) # shape [2, 4] # => [[0., 0., 0., 0.], # [nonzero, nonzero, nonzero, nonzero]] # Use `BatchBroadcast` and `Independent` to achieve the equivalent of adding # positional mask functionality to `tfd.Sample`. # Suppose we wanted to achieve this: # `tfd.Sample(tfd.Normal(tf.zeros(2), 1), [3, 4], validity_mask=mask)` # We can write: d = tfd.Independent( tfd.Masked(tfd.BatchBroadcast(tfd.Normal(0, 1), [2, 3, 4]), mask), reinterpreted_batch_ndims=2) d.batch_shape # [2] d.event_shape # [3, 4] d.log_prob(tf.ones([3, 4])) # shape [2] ``` """ def __init__(self, distribution, validity_mask, safe_sample_fn=_fixed_sample, validate_args=False, allow_nan_stats=True, name=None): """Constructs a Masked distribution. Args: distribution: The underlying distribution, which will be masked. validity_mask: Boolean mask where `True` indicates an element is valid. `validity_mask` must broadcast with the batch shape of the underlying distribution. Invalid batch elements are masked so that sampling returns `safe_sample_fn(dist)` in invalid positions and `log_prob(x)` returns `0.` for invalid positions. safe_sample_fn: A callable which takes a distribution (namely, the `distribution` argument) and returns a determinstic, safe sample value. This helps to avoid `nan` gradients and allows downstream usage of samples from a `Masked` distribution to assume a "safe" even if invalid value. (Be careful to ensure that such downstream usages are themselves masked!) Note that the result of this function will be wrapped in a `tf.stop_gradient` call. validate_args: Boolean indicating whether argument assertions should be run. May impose performance penalties. allow_nan_stats: Boolean indicating whether statistical functions may return `nan`, or should instead use asserts where possible. name: Optional name for operation scoping. """ parameters = dict(locals()) with tf.name_scope(name or f'Masked{distribution.name}') as name: self._distribution = distribution self._validity_mask = tensor_util.convert_nonref_to_tensor( validity_mask, dtype_hint=tf.bool) self._safe_sample_fn = safe_sample_fn super(_Masked, self).__init__( dtype=distribution.dtype, reparameterization_type=distribution.reparameterization_type, validate_args=validate_args, allow_nan_stats=allow_nan_stats, parameters=parameters, name=name) @classmethod def _parameter_properties(cls, dtype, num_classes=None): return dict( distribution=parameter_properties.BatchedComponentProperties(), validity_mask=parameter_properties.ParameterProperties( shape_fn=parameter_properties.SHAPE_FN_NOT_IMPLEMENTED)) @property def distribution(self): return self._distribution @property def validity_mask(self): return self._validity_mask @property def safe_sample_fn(self): return self._safe_sample_fn @property def experimental_is_sharded(self): return self.distribution.experimental_is_sharded def _event_shape(self): return self.distribution.event_shape def _event_shape_tensor(self): return self.distribution.event_shape_tensor() def _sample_n(self, n, seed=None, **kwargs): validity_mask = tf.convert_to_tensor(self.validity_mask) # To avoid the shape gymnastics of drawing extra samples, we delegate # sampling to the BatchBroadcast distribution. bb = batch_broadcast.BatchBroadcast(self.distribution, ps.shape(validity_mask)) samples = bb.sample(n, seed=seed, **kwargs) safe_val = tf.stop_gradient(self.safe_sample_fn(self.distribution)) return tf.where(_add_event_dims_to_mask(validity_mask, dist=self), samples, safe_val) _log_prob = _make_masked_fn( 'log_prob', n_event_shapes=0, safe_value=0., make_arg0_safe=True) _prob = _make_masked_fn( 'prob', n_event_shapes=0, safe_value=1., make_arg0_safe=True) _log_cdf = _make_masked_fn( 'log_cdf', n_event_shapes=0, safe_value=0., make_arg0_safe=True) _cdf = _make_masked_fn( 'cdf', n_event_shapes=0, safe_value=1., make_arg0_safe=True) _log_survival_function = _make_masked_fn( 'log_survival_function', n_event_shapes=0, safe_value=-float('inf'), make_arg0_safe=True) _survival_function = _make_masked_fn( 'survival_function', n_event_shapes=0, safe_value=0., make_arg0_safe=True) _entropy = _make_masked_fn( 'entropy', n_event_shapes=0, safe_value=0.) _mode = _make_masked_fn( 'mode', n_event_shapes=1, safe_value='safe_sample') _mean = _make_masked_fn( 'mean', n_event_shapes=1, safe_value='safe_sample') _variance = _make_masked_fn( 'variance', n_event_shapes=1, safe_value=0.) _stddev = _make_masked_fn( 'stddev', n_event_shapes=1, safe_value=0.) _covariance = _make_masked_fn( 'covariance', n_event_shapes=2, safe_value=0.) _quantile = _make_masked_fn( 'quantile', n_event_shapes=1, safe_value='safe_sample') def _default_event_space_bijector(self, *args, **kwargs): underlying_bijector = ( self.distribution.experimental_default_event_space_bijector()) if underlying_bijector is None: return None return _MaskedBijector(self, underlying_bijector) class Masked(_Masked, distribution_lib.AutoCompositeTensorDistribution): def __new__(cls, *args, **kwargs): """Maybe return a non-`CompositeTensor` `_Masked`.""" if cls is Masked: if args: distribution = args[0] else: distribution = kwargs.get('distribution') if not isinstance(distribution, tf.__internal__.CompositeTensor): return _Masked(*args, **kwargs) return super(Masked, cls).__new__(cls) Masked.__doc__ = _Masked.__doc__ + '\n' + ( 'If `distribution` is a `CompositeTensor`, then the resulting `Masked` ' 'instance is a `CompositeTensor` as well. Otherwise, a ' 'non-`CompositeTensor` `_Masked` instance is created instead. Distribution ' 'subclasses that inherit from `Masked` will also inherit from ' '`CompositeTensor`.') @kullback_leibler.RegisterKL(_Masked, _Masked) def _kl_masked_masked(a, b, name=None): """KL divergence between Masked distributions.""" with tf.name_scope(name or 'kl_masked_masked'): a_valid = tf.convert_to_tensor(a.validity_mask) b_valid = tf.convert_to_tensor(b.validity_mask) underlying_kl = kullback_leibler.kl_divergence( a.distribution, b.distribution) # The treatment for KL is as follows: # When both random variables are valid, the underlying KL applies. # When neither random variable is valid, the KL is 0., i.e. # `a log a - a log b = 0` because log a and log b are everywhere 0. # When exactly one is valid, we (a) raise an assertion error, if either # distribution's allow_nan_stats is set to False, or (b) return nan in # such positions. asserts = [] if not (a.allow_nan_stats and b.allow_nan_stats): asserts.append(assert_util.assert_equal( a_valid, b_valid, message='KL is only valid for matching mask values')) with tf.control_dependencies(asserts): both_valid = (a_valid & b_valid) neither_valid = (~a_valid) & (~b_valid) dtype = underlying_kl.dtype return tf.where(both_valid, underlying_kl, tf.where(neither_valid, tf.zeros([], dtype), float('nan'))) @log_prob_ratio.RegisterLogProbRatio(_Masked) def _masked_log_prob_ratio(p, x, q, y, name=None): """Computes log p(x) - log q(y) for Masked p, q.""" with tf.name_scope(name or 'masked_log_prob_ratio'): p_valid = tf.convert_to_tensor(p.validity_mask) safe_x = tf.where(_add_event_dims_to_mask(p_valid, dist=p), x, tf.stop_gradient(p.safe_sample_fn(p.distribution))) q_valid = tf.convert_to_tensor(q.validity_mask) safe_y = tf.where(_add_event_dims_to_mask(q_valid, dist=q), y, tf.stop_gradient(q.safe_sample_fn(q.distribution))) underlying = log_prob_ratio.log_prob_ratio( p.distribution, safe_x, q.distribution, safe_y) asserts = [] # As with KL, we return the underlying log_prob_ratio where both are valid, # `0.` where neither is valid, and `nan` otherwise (or an assertion if # either distribution does not `allow_nan_stats`). if not (p.allow_nan_stats and p.allow_nan_stats): asserts.append(assert_util.assert_equal( p_valid, q_valid, message='Masked log_prob_ratio only valid for matching mask values')) with tf.control_dependencies(asserts): both_valid = (p_valid & q_valid) neither_valid = (~p_valid) & (~q_valid) return tf.where(both_valid, underlying, tf.where(neither_valid, tf.zeros([], dtype=underlying.dtype), float('nan'))) class _NonCompositeTensorMaskedBijector(bijector_lib.Bijector): """Event space bijector for Masked distributions.""" def __init__(self, masked, underlying_bijector): self._masked = masked self._bijector = underlying_bijector super(_NonCompositeTensorMaskedBijector, self).__init__( validate_args=underlying_bijector.validate_args, dtype=underlying_bijector.dtype, forward_min_event_ndims=underlying_bijector.forward_min_event_ndims, inverse_min_event_ndims=underlying_bijector.inverse_min_event_ndims) def _forward_event_shape(self, x): return self._bijector.forward_event_shape(x) def _forward_event_shape_tensor(self, x): return self._bijector.forward_event_shape_tensor(x) def _inverse_event_shape(self, y): return self._bijector.inverse_event_shape(y) def _inverse_event_shape_tensor(self, y): return self._bijector.inverse_event_shape_tensor(y) def _make_safe_x(self, x, validity_mask): bij = self._bijector masked = self._masked pullback_event_ndims = ps.rank_from_shape( lambda: bij.inverse_event_shape_tensor(masked.event_shape_tensor()), self._bijector.inverse_event_shape(masked.event_shape)) pullback_event_mask = _add_event_dims_to_mask( validity_mask, event_ndims=pullback_event_ndims) # We presume that 0 in unconstrained space is safe. return tf.where(pullback_event_mask, x, 0.) def _forward(self, x): mask = self._masked.validity_mask safe_x = self._make_safe_x(x, mask) return self._make_safe_y(self._bijector.forward(safe_x), mask) def _forward_log_det_jacobian(self, x): validity_mask = tf.convert_to_tensor(self._masked.validity_mask) safe_x = self._make_safe_x(x, validity_mask) return tf.where(validity_mask, self._bijector.forward_log_det_jacobian(safe_x), 0.) def _make_safe_y(self, y, validity_mask): safe_val = tf.stop_gradient( self._masked.safe_sample_fn(self._masked.distribution)) event_mask = _add_event_dims_to_mask(validity_mask, dist=self._masked) return tf.where(event_mask, y, safe_val) def _inverse(self, y): safe_y = self._make_safe_y(y, self._masked.validity_mask) return self._bijector.inverse(safe_y) def _inverse_log_det_jacobian(self, y): validity_mask = tf.convert_to_tensor(self._masked.validity_mask) safe_y = self._make_safe_y(y, validity_mask) return tf.where(validity_mask, self._bijector.inverse_log_det_jacobian(safe_y), 0.) class _MaskedBijector(_NonCompositeTensorMaskedBijector, bijector_lib.AutoCompositeTensorBijector): """Event space bijector for Masked distributions.""" def __new__(cls, *args, **kwargs): """Maybe return a `_NonCompositeTensorMaskedBijector`.""" if cls is _MaskedBijector: if args: masked = args[0] else: masked = kwargs.get('masked') if len(args) > 1: bijector = args[1] else: bijector = kwargs.get('underlying_bijector') if not (isinstance(masked, tf.__internal__.CompositeTensor) and isinstance(bijector, tf.__internal__.CompositeTensor)): return _NonCompositeTensorMaskedBijector(*args, **kwargs) return super(_MaskedBijector, cls).__new__(cls)
41.719149
88
0.708588
12,845
0.65509
0
0
3,400
0.173399
0
0
8,435
0.430182
be665281e674fbcee73480a5a06334a427283318
1,254
py
Python
download.py
kaija/taiwan_stockloader
637244c3b0bc96093cc5a7b3df093a829f9e3c2d
[ "MIT" ]
2
2015-06-13T09:17:46.000Z
2015-10-25T15:31:33.000Z
download.py
kaija/taiwan_stockloader
637244c3b0bc96093cc5a7b3df093a829f9e3c2d
[ "MIT" ]
null
null
null
download.py
kaija/taiwan_stockloader
637244c3b0bc96093cc5a7b3df093a829f9e3c2d
[ "MIT" ]
3
2016-02-01T07:36:55.000Z
2018-08-03T12:22:20.000Z
import datetime import httplib import urllib from datetime import timedelta #now = datetime.datetime.now(); #today = now.strftime('%Y-%m-%d') #print today def isfloat(value): try: float(value) return True except ValueError: return False def convfloat(value): try: return float(value) except ValueError: return -1 today = datetime.date.today() one_day = timedelta(days=1); #start_day = datetime.date(2004, 2, 11); start_day = datetime.date(2010, 8, 21); print "Download from " + start_day.strftime("%Y-%m-%d") + " to " + today.strftime("%Y-%m-%d") dl_date = start_day while dl_date < today: httpreq = httplib.HTTPConnection('www.twse.com.tw') headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"} date_str = str(dl_date.year - 1911 ) + dl_date.strftime("/%m/%d") form = urllib.urlencode({'download': 'csv', 'qdate': date_str, 'selectType': 'ALLBUT0999'}) httpreq.request("POST", "/ch/trading/exchange/MI_INDEX/MI_INDEX.php", form, headers); httpres = httpreq.getresponse() stock_csv = httpres.read() file_name = "data/" + dl_date.strftime("%Y%m%d") + ".csv" print "downloading " + file_name f = open(file_name, "w") f.write(stock_csv) dl_date += one_day print "Download Finish!"
23.660377
93
0.692185
0
0
0
0
0
0
0
0
404
0.322169
be665e63998c0015bc21386a7c5b3385196a6cfb
5,403
py
Python
heuristic/improvement/reopt/disruption_updater.py
annalunde/master
2552d43713e8ebca0b0e57bc5bebd1eaeeac1875
[ "MIT" ]
1
2022-03-17T15:40:00.000Z
2022-03-17T15:40:00.000Z
heuristic/improvement/reopt/disruption_updater.py
annalunde/master
2552d43713e8ebca0b0e57bc5bebd1eaeeac1875
[ "MIT" ]
null
null
null
heuristic/improvement/reopt/disruption_updater.py
annalunde/master
2552d43713e8ebca0b0e57bc5bebd1eaeeac1875
[ "MIT" ]
null
null
null
import copy import pandas as pd from decouple import config from heuristic.construction.construction import ConstructionHeuristic from config.construction_config import * from simulation.simulator import Simulator from heuristic.improvement.reopt.new_request_updater import NewRequestUpdater class DisruptionUpdater: def __init__(self, new_request_updater): self.new_request_updater = new_request_updater def update_route_plan(self, current_route_plan, disruption_type, disruption_info, sim_clock): # adding current position for each vehicle vehicle_clocks, artificial_depot = self.update_vehicle_clocks( current_route_plan, sim_clock, disruption_type, disruption_info) updated_route_plan = copy.deepcopy(current_route_plan) if disruption_type == 'request': self.new_request_updater.set_parameters(disruption_info) elif disruption_type == 'delay': updated_route_plan = self.update_with_delay( current_route_plan, disruption_info) elif disruption_type == 'cancel': # update capacities updated_vehicle_route = self.update_capacities( updated_route_plan[disruption_info[0]], disruption_info[1], disruption_info[2], updated_route_plan[disruption_info[0]][disruption_info[1]][5]) updated_route_plan[disruption_info[0]] = updated_vehicle_route if artificial_depot: # remove dropoff node del updated_route_plan[disruption_info[0]][disruption_info[2]] else: # remove dropoff node del updated_route_plan[disruption_info[0]][disruption_info[2]] # remove pickup node del updated_route_plan[disruption_info[0]][disruption_info[1]] else: # no show # update capacities updated_vehicle_route = self.update_capacities( updated_route_plan[disruption_info[0]], disruption_info[1], disruption_info[2], updated_route_plan[disruption_info[0]][disruption_info[1]][5]) updated_route_plan[disruption_info[0]] = updated_vehicle_route # remove dropoff node del updated_route_plan[disruption_info[0]][disruption_info[2]] return updated_route_plan, vehicle_clocks def update_with_delay(self, current_route_plan, disruption_info): delay_duration = disruption_info[2] route_plan = copy.deepcopy(current_route_plan) start_idx = disruption_info[1] for node in route_plan[disruption_info[0]][disruption_info[1]:]: t = node[1] + delay_duration d = node[2] + delay_duration node = (node[0], t, d, node[3], node[4], node[5]) route_plan[disruption_info[0]][start_idx] = node start_idx += 1 return route_plan @staticmethod def recalibrate_solution(current_route_plan, disruption_info, still_delayed_nodes): delay_duration = disruption_info[2] route_plan = copy.deepcopy(current_route_plan) for node in still_delayed_nodes: idx = next(i for i, (node_test, *_) in enumerate(route_plan[disruption_info[0]]) if node_test == node) node_route = route_plan[disruption_info[0]][idx] d = node_route[2] - delay_duration node_route = (node_route[0], node_route[1], d, node_route[3], node_route[4], node_route[5]) route_plan[disruption_info[0]][idx] = node_route return route_plan def update_vehicle_clocks(self, current_route_plan, sim_clock, disruption_type, disruption_info): artificial_depot = False # find index for next node after sim_clock and corresponding time of service vehicle_clocks = [] for vehicle_route in current_route_plan: if len(vehicle_route) > 1: if vehicle_route[0][1] < sim_clock: prev_idx = 0 for idx, (node, time, deviation, passenger, wheelchair, _) in enumerate(vehicle_route): if time <= sim_clock: prev_idx = idx if prev_idx == len(vehicle_route) - 1: vehicle_clocks.append(sim_clock) else: next_idx = prev_idx + 1 vehicle_clocks.append(vehicle_route[next_idx][1]) if disruption_type == 'cancel': # check whether next node after sim_clock is the request that is cancelled if current_route_plan[disruption_info[0]][disruption_info[1]] == vehicle_route[next_idx]: artificial_depot = True else: vehicle_clocks.append(sim_clock) else: vehicle_clocks.append(sim_clock) return vehicle_clocks, artificial_depot def update_capacities(self, vehicle_route, start_id, dropoff_id, request): idx = start_id for n, t, d, p, w, _ in vehicle_route[start_id:dropoff_id]: p -= request["Number of Passengers"] w -= request["Wheelchair"] vehicle_route[idx] = (n, t, d, p, w, _) idx += 1 return vehicle_route
40.931818
117
0.626504
5,107
0.945216
0
0
706
0.130668
0
0
388
0.071812
be687c8fd20a0765459343471aaeb0dc60aa0c2b
666
py
Python
evennia/scripts/migrations/0013_auto_20191025_0831.py
Jaykingamez/evennia
cf7cab1fea99ede3efecb70a65c3eb0fba1d3745
[ "BSD-3-Clause" ]
1,544
2015-01-01T22:16:31.000Z
2022-03-31T19:17:45.000Z
evennia/scripts/migrations/0013_auto_20191025_0831.py
Jaykingamez/evennia
cf7cab1fea99ede3efecb70a65c3eb0fba1d3745
[ "BSD-3-Clause" ]
1,686
2015-01-02T18:26:31.000Z
2022-03-31T20:12:03.000Z
evennia/scripts/migrations/0013_auto_20191025_0831.py
Jaykingamez/evennia
cf7cab1fea99ede3efecb70a65c3eb0fba1d3745
[ "BSD-3-Clause" ]
867
2015-01-02T21:01:54.000Z
2022-03-29T00:28:27.000Z
# Generated by Django 2.2.6 on 2019-10-25 12:31 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [("scripts", "0012_auto_20190128_1820")] operations = [ migrations.AlterField( model_name="scriptdb", name="db_typeclass_path", field=models.CharField( db_index=True, help_text="this defines what 'type' of entity this is. This variable holds a Python path to a module with a valid Evennia Typeclass.", max_length=255, null=True, verbose_name="typeclass", ), ) ]
28.956522
150
0.587087
573
0.86036
0
0
0
0
0
0
244
0.366366
be6ac11cc08ea3cf2a70097fa4537b051b80fea9
834
py
Python
tests/test_pyqrcodeng_issue13.py
dbajar/segno
f7d5669537b12d3ebb914ae6d0a0a1e14f8d25f5
[ "BSD-3-Clause" ]
254
2016-09-25T21:32:00.000Z
2022-03-30T09:56:14.000Z
tests/test_pyqrcodeng_issue13.py
dbajar/segno
f7d5669537b12d3ebb914ae6d0a0a1e14f8d25f5
[ "BSD-3-Clause" ]
102
2016-08-04T12:18:44.000Z
2022-03-23T09:09:51.000Z
tests/test_pyqrcodeng_issue13.py
dbajar/segno
f7d5669537b12d3ebb914ae6d0a0a1e14f8d25f5
[ "BSD-3-Clause" ]
34
2016-09-25T21:34:42.000Z
2022-03-30T08:19:03.000Z
# -*- coding: utf-8 -*- # # Copyright (c) 2016 - 2020 -- Lars Heuer # All rights reserved. # # License: BSD License # """\ Test against issue <https://github.com/pyqrcode/pyqrcodeNG/pull/13/>. The initial test was created by Mathieu <https://github.com/albatros69>, see the above mentioned pull request. Adapted for Segno to check if it suffers from the same problem. """ from __future__ import absolute_import, unicode_literals import segno def test_autodetect(): data = 'Émetteur' qr = segno.make(data) assert qr.mode == 'byte' def test_encoding(): encoding = 'iso-8859-15' data = 'Émetteur' qr = segno.make(data.encode(encoding)) assert qr.mode == 'byte' qr2 = segno.make(data, encoding=encoding) assert qr2 == qr if __name__ == '__main__': import pytest pytest.main([__file__])
21.947368
72
0.681055
0
0
0
0
0
0
0
0
423
0.505981
be6b1f866bc5d3fdc38f4e9b6fd3e9f0bcf0235f
384
py
Python
qiskit/quantum_info/operators/__init__.py
jagunnels/qiskit-sdk-py
153cdde972e65c0f23675bbe17c93e18be27bd51
[ "Apache-2.0" ]
null
null
null
qiskit/quantum_info/operators/__init__.py
jagunnels/qiskit-sdk-py
153cdde972e65c0f23675bbe17c93e18be27bd51
[ "Apache-2.0" ]
null
null
null
qiskit/quantum_info/operators/__init__.py
jagunnels/qiskit-sdk-py
153cdde972e65c0f23675bbe17c93e18be27bd51
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # Copyright 2019, IBM. # # This source code is licensed under the Apache License, Version 2.0 found in # the LICENSE.txt file in the root directory of this source tree. """Quantum Operators.""" from .operator import Operator from .unitary import Unitary from .pauli import Pauli, pauli_group from .channel import Choi, SuperOp, Kraus, Stinespring, Chi, PTM
27.428571
77
0.742188
0
0
0
0
0
0
0
0
212
0.552083
be6d27c87017d3ff2b758a9a1954cf3e265b550c
554
py
Python
iocms/iocms/urls.py
Gaurav-Zaiswal/iw-acad-iocms-be
a133f120eed93433925608f08c5145d2d0d1db39
[ "MIT" ]
null
null
null
iocms/iocms/urls.py
Gaurav-Zaiswal/iw-acad-iocms-be
a133f120eed93433925608f08c5145d2d0d1db39
[ "MIT" ]
null
null
null
iocms/iocms/urls.py
Gaurav-Zaiswal/iw-acad-iocms-be
a133f120eed93433925608f08c5145d2d0d1db39
[ "MIT" ]
2
2021-09-16T04:44:59.000Z
2021-09-16T05:45:31.000Z
from django.contrib import admin from django.urls import include, path from django.conf import settings from django.conf.urls.static import static urlpatterns = [ path('admin/', admin.site.urls), path('class/', include('classroom.urls')), path('assignment-api/', include('assignment.urls', namespace='assignment')), path('feed/', include('feed.urls', namespace='feed')), path('users/', include('users.urls'), name="user-register") ] if settings.DEBUG: urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
36.933333
80
0.720217
0
0
0
0
0
0
0
0
137
0.247292
be6f16523ef2463524119c42f75567ed0f66d560
1,905
py
Python
src/security/__init__.py
slippers/blogging_security_flatpage
53644978b798c66369416b1e5625cc04d89c0a87
[ "MIT" ]
1
2018-12-31T05:30:13.000Z
2018-12-31T05:30:13.000Z
src/security/__init__.py
slippers/blogging_security_flatpage
53644978b798c66369416b1e5625cc04d89c0a87
[ "MIT" ]
null
null
null
src/security/__init__.py
slippers/blogging_security_flatpage
53644978b798c66369416b1e5625cc04d89c0a87
[ "MIT" ]
null
null
null
from src import app, db from .models import User, Role, RoleUsers from .security_admin import UserAdmin, RoleAdmin from flask_security import Security, SQLAlchemyUserDatastore, \ login_required, roles_accepted from flask_security.utils import encrypt_password def config_security_admin(admin): admin.add_view(UserAdmin(db.session)) admin.add_view(RoleAdmin(db.session)) def configure_security(): # Create the Roles "admin" and "end-user" -- unless they already exist user_datastore.find_or_create_role(name='admin', description='Administrator') user_datastore.find_or_create_role(name='end-user', description='End user') user_datastore.find_or_create_role(name='blogger', description='Blogger') # Create two Users for testing purposes -- unless they already exists. # In each case, use Flask-Security utility function to encrypt the password. pw = encrypt_password('password') # pw = 'password' if not user_datastore.get_user('[email protected]'): user_datastore.create_user(email='[email protected]', password=pw) if not user_datastore.get_user('[email protected]'): user_datastore.create_user(email='[email protected]', password=pw) # Give one User has the "end-user" role, while the other has the "admin" role. #(This will have no effect if the # Users already have these Roles.) Again, commit any database changes. user_datastore.add_role_to_user('[email protected]', 'end-user') user_datastore.add_role_to_user('[email protected]', 'blogger') user_datastore.add_role_to_user('[email protected]', 'admin') user_datastore.add_role_to_user('[email protected]', 'blogger') db.session.commit() # Setup Flask-Security user_datastore = SQLAlchemyUserDatastore(db, User, Role) security = Security(app, user_datastore) # Create any database tables that don't exist yet. db.create_all()
40.531915
83
0.752756
0
0
0
0
0
0
0
0
755
0.396325
be6f25ab250ddab2ab944a4c759bdf74b87010ce
12,251
py
Python
usaspending_api/download/lookups.py
lenjonemcse/usaspending-api
cbffc4e0a0c2b1339c7a8bfe6b0d687b3731b6ce
[ "CC0-1.0" ]
1
2022-01-28T16:08:04.000Z
2022-01-28T16:08:04.000Z
usaspending_api/download/lookups.py
lenjonemcse/usaspending-api
cbffc4e0a0c2b1339c7a8bfe6b0d687b3731b6ce
[ "CC0-1.0" ]
null
null
null
usaspending_api/download/lookups.py
lenjonemcse/usaspending-api
cbffc4e0a0c2b1339c7a8bfe6b0d687b3731b6ce
[ "CC0-1.0" ]
null
null
null
""" This file defines a series of constants that represent the values used in the API's "helper" tables. Rather than define the values in the db setup scripts and then make db calls to lookup the surrogate keys, we'll define everything here, in a file that can be used by the db setup scripts *and* the application code. """ from collections import namedtuple, OrderedDict from usaspending_api.accounts.models import AppropriationAccountBalances from usaspending_api.accounts.v2.filters.account_download import account_download_filter from usaspending_api.awards.models import Award, TransactionNormalized from usaspending_api.awards.models import FinancialAccountsByAwards from usaspending_api.download.helpers.elasticsearch_download_functions import ( AwardsElasticsearchDownload, TransactionsElasticsearchDownload, ) from usaspending_api.download.helpers.disaster_filter_functions import disaster_filter_function from usaspending_api.search.models import AwardSearchView, TransactionSearch, SubawardView from usaspending_api.awards.v2.filters.idv_filters import ( idv_order_filter, idv_transaction_filter, idv_treasury_account_funding_filter, ) from usaspending_api.awards.v2.filters.award_filters import ( awards_transaction_filter, awards_subaward_filter, awards_treasury_account_funding_filter, ) from usaspending_api.awards.v2.filters.search import ( universal_award_matview_filter, transaction_search_filter, ) from usaspending_api.awards.v2.filters.sub_award import subaward_download from usaspending_api.financial_activities.models import FinancialAccountsByProgramActivityObjectClass from usaspending_api.download.helpers.download_annotation_functions import ( transaction_search_annotations, universal_award_matview_annotations, subaward_annotations, idv_order_annotations, idv_transaction_annotations, ) LookupType = namedtuple("LookupType", ["id", "name", "desc"]) JOB_STATUS = [ LookupType(1, "ready", "job is ready to be run"), LookupType(2, "running", "job is currently in progress"), LookupType(3, "finished", "job is complete"), LookupType(4, "failed", "job failed to complete"), LookupType(5, "queued", "job sent to queue for async processing"), LookupType(6, "resumed", "job is being reprocessed after a failure"), LookupType(7, "created", "job product has been created and stored locally"), LookupType(8, "uploading", "job is being uploaded to public storage"), ] JOB_STATUS_DICT = {item.name: item.id for item in JOB_STATUS} VALUE_MAPPINGS = { # Award Level "awards": { "source_type": "award", "table": AwardSearchView, "table_name": "award", "type_name": "PrimeAwardSummaries", "download_name": "{agency}{type}_PrimeAwardSummaries_{timestamp}", "contract_data": "award__latest_transaction__contract_data", "assistance_data": "award__latest_transaction__assistance_data", "filter_function": universal_award_matview_filter, "annotations_function": universal_award_matview_annotations, }, # Elasticsearch Award Level "elasticsearch_awards": { "source_type": "award", "table": AwardSearchView, "table_name": "award", "type_name": "PrimeAwardSummaries", "download_name": "{agency}{type}_PrimeAwardSummaries_{timestamp}", "contract_data": "award__latest_transaction__contract_data", "assistance_data": "award__latest_transaction__assistance_data", "filter_function": AwardsElasticsearchDownload.query, "annotations_function": universal_award_matview_annotations, }, # Transaction Level "transactions": { "source_type": "award", "table": TransactionSearch, "table_name": "transaction", "type_name": "PrimeTransactions", "download_name": "{agency}{type}_PrimeTransactions_{timestamp}", "contract_data": "transaction__contract_data", "assistance_data": "transaction__assistance_data", "filter_function": transaction_search_filter, "annotations_function": transaction_search_annotations, }, # Elasticsearch Transaction Level "elasticsearch_transactions": { "source_type": "award", "table": TransactionSearch, "table_name": "transaction", "type_name": "PrimeTransactions", "download_name": "{agency}{type}_PrimeTransactions_{timestamp}", "contract_data": "transaction__contract_data", "assistance_data": "transaction__assistance_data", "filter_function": TransactionsElasticsearchDownload.query, "annotations_function": transaction_search_annotations, }, # SubAward Level "sub_awards": { "source_type": "award", "table": SubawardView, "table_name": "subaward", "type_name": "Subawards", "download_name": "{agency}{type}_Subawards_{timestamp}", "contract_data": "award__latest_transaction__contract_data", "assistance_data": "award__latest_transaction__assistance_data", "filter_function": subaward_download, "annotations_function": subaward_annotations, }, # Appropriations Account Data "account_balances": { "source_type": "account", "table": AppropriationAccountBalances, "table_name": "account_balances", "download_name": "{data_quarters}_{agency}_{level}_AccountBalances_{timestamp}", "zipfile_template": "{data_quarters}_{agency}_{level}_AccountBalances_{timestamp}", "filter_function": account_download_filter, }, # Object Class Program Activity Account Data "object_class_program_activity": { "source_type": "account", "table": FinancialAccountsByProgramActivityObjectClass, "table_name": "object_class_program_activity", "download_name": "{data_quarters}_{agency}_{level}_AccountBreakdownByPA-OC_{timestamp}", "zipfile_template": "{data_quarters}_{agency}_{level}_AccountBreakdownByPA-OC_{timestamp}", "filter_function": account_download_filter, }, "award_financial": { "source_type": "account", "table": FinancialAccountsByAwards, "table_name": "award_financial", "download_name": "{data_quarters}_{agency}_{level}_AccountBreakdownByAward_{timestamp}", "zipfile_template": "{data_quarters}_{agency}_{level}_AccountBreakdownByAward_{timestamp}", "filter_function": account_download_filter, }, "idv_orders": { "source_type": "award", "table": Award, "table_name": "idv_orders", "download_name": "IDV_{piid}_Orders", "contract_data": "latest_transaction__contract_data", "filter_function": idv_order_filter, "is_for_idv": True, "annotations_function": idv_order_annotations, }, "idv_federal_account_funding": { "source_type": "account", "table": FinancialAccountsByAwards, "table_name": "award_financial", "download_name": "IDV_{piid}_FederalAccountFunding", "filter_function": idv_treasury_account_funding_filter, "is_for_idv": True, }, "idv_transaction_history": { "source_type": "award", "table": TransactionNormalized, "table_name": "idv_transaction_history", "download_name": "IDV_{piid}_TransactionHistory", "contract_data": "contract_data", "filter_function": idv_transaction_filter, "is_for_idv": True, "annotations_function": idv_transaction_annotations, }, "contract_federal_account_funding": { "source_type": "account", "table": FinancialAccountsByAwards, "table_name": "award_financial", "download_name": "Contract_{piid}_FederalAccountFunding", "filter_function": awards_treasury_account_funding_filter, "is_for_contract": True, }, "assistance_federal_account_funding": { "source_type": "account", "table": FinancialAccountsByAwards, "table_name": "award_financial", "download_name": "Assistance_{assistance_id}_FederalAccountFunding", "filter_function": awards_treasury_account_funding_filter, "is_for_assistance": True, }, "sub_contracts": { "source_type": "award", "table": SubawardView, "table_name": "subaward", "download_name": "Contract_{piid}_Sub-Awards", "contract_data": "award__latest_transaction__contract_data", "filter_function": awards_subaward_filter, "is_for_contract": True, "annotations_function": subaward_annotations, }, "sub_grants": { "source_type": "award", "table": SubawardView, "table_name": "subaward", "download_name": "Assistance_{assistance_id}_Sub-Awards", "assistance_data": "award__latest_transaction__assistance_data", "filter_function": awards_subaward_filter, "is_for_assistance": True, "annotations_function": subaward_annotations, }, "contract_transactions": { "source_type": "award", "table": TransactionNormalized, "table_name": "idv_transaction_history", "download_name": "Contract_{piid}_TransactionHistory", "contract_data": "contract_data", "filter_function": awards_transaction_filter, "is_for_contract": True, "annotations_function": idv_transaction_annotations, }, "assistance_transactions": { "source_type": "award", "table": TransactionNormalized, "table_name": "assistance_transaction_history", "download_name": "Assistance_{assistance_id}_TransactionHistory", "assistance_data": "assistance_data", "filter_function": awards_transaction_filter, "is_for_assistance": True, "annotations_function": idv_transaction_annotations, }, "disaster_recipient": { "source_type": "disaster", "table": AwardSearchView, "table_name": "recipient", "download_name": "COVID-19_Recipients_{award_category}_{timestamp}", "filter_function": disaster_filter_function, "base_fields": ["recipient_name", "recipient_unique_id"], }, } # Bulk Download still uses "prime awards" instead of "transactions" VALUE_MAPPINGS["prime_awards"] = VALUE_MAPPINGS["transactions"] # List of CFO CGACS for list agencies viewset in the correct order, names included for reference # TODO: Find a solution that marks the CFO agencies in the database AND have the correct order CFO_CGACS_MAPPING = OrderedDict( [ ("012", "Department of Agriculture"), ("013", "Department of Commerce"), ("097", "Department of Defense"), ("091", "Department of Education"), ("089", "Department of Energy"), ("075", "Department of Health and Human Services"), ("070", "Department of Homeland Security"), ("086", "Department of Housing and Urban Development"), ("015", "Department of Justice"), ("1601", "Department of Labor"), ("019", "Department of State"), ("014", "Department of the Interior"), ("020", "Department of the Treasury"), ("069", "Department of Transportation"), ("036", "Department of Veterans Affairs"), ("068", "Environmental Protection Agency"), ("047", "General Services Administration"), ("080", "National Aeronautics and Space Administration"), ("049", "National Science Foundation"), ("031", "Nuclear Regulatory Commission"), ("024", "Office of Personnel Management"), ("073", "Small Business Administration"), ("028", "Social Security Administration"), ("072", "Agency for International Development"), ] ) CFO_CGACS = list(CFO_CGACS_MAPPING.keys()) FILE_FORMATS = { "csv": {"delimiter": ",", "extension": "csv", "options": "WITH CSV HEADER"}, "tsv": {"delimiter": "\t", "extension": "tsv", "options": r"WITH CSV DELIMITER E'\t' HEADER"}, "pstxt": {"delimiter": "|", "extension": "txt", "options": "WITH CSV DELIMITER '|' HEADER"}, } VALID_ACCOUNT_SUBMISSION_TYPES = ("account_balances", "object_class_program_activity", "award_financial")
42.835664
105
0.691862
0
0
0
0
0
0
0
0
6,691
0.546159
be70bab0d740612dff3c9c4f650b4e73f95cd9c5
1,985
py
Python
python/modules/mysql_server.py
91-jinrong/-91_monitor
e0325229bffbb0df20d9337925b591eee8ac0289
[ "Apache-2.0" ]
1
2015-03-30T06:25:59.000Z
2015-03-30T06:25:59.000Z
python/modules/mysql_server.py
91-jinrong/91_monitor
e0325229bffbb0df20d9337925b591eee8ac0289
[ "Apache-2.0" ]
null
null
null
python/modules/mysql_server.py
91-jinrong/91_monitor
e0325229bffbb0df20d9337925b591eee8ac0289
[ "Apache-2.0" ]
null
null
null
#!/bin/env python #-*-coding:utf-8-*- import os import sys import string import time import datetime import MySQLdb class MySQL: def __int__(self,host,port,user,passwd,dbname,timeout,charset): self.host = host self.port = port self.user = user self.passwd = passwd self.dbname = test self.timeout = timeout self.charset = charset def db_connect(self): connect=MySQLdb.connect(host=self.host,user=self.user,passwd=self.passwd,port=int(self.port),connect_timeout=int(self.timeout),charset=self.charset) return connect def execute(self,sql,param): conn=MySQLdb.connect(host=self.host,user=self.user,passwd=self.passwd,port=int(self.port),connect_timeout=int(self.timeout),charset=self.charset) conn.select_db(self.dbname) cursor = conn.cursor() if param <> '': cursor.execute(sql,param) else: cursor.execute(sql) conn.commit() cursor.close() conn.close() def query(self,sql): conn=MySQLdb.connect(host=self.host,user=self.user,passwd=self.passwd,port=int(self.port),connect_timeout=int(self.timeout),charset=self.charset) conn.select_db(self.dbname) cursor = conn.cursor() count=cursor.execute(sql) if count == 0 : result=0 else: result=cursor.fetchall() return result cursor.close() conn.close() def get_option(self,key): conn=MySQLdb.connect(host=self.host,user=self.user,passwd=self.passwd,port=int(self.port),connect_timeout=int(self.timeout),charset=self.charset) conn.select_db(self.dbname) cursor = conn.cursor() sql="select value from options where name=+'"+key+"'" count=cursor.execute(sql) if count == 0 : result=0 else: result=cursor.fetchone() return result[0] cursor.close() conn.close()
31.507937
156
0.624181
1,866
0.94005
0
0
0
0
0
0
82
0.04131
be71f6b56c912c07678325e23f7389ad744e9921
149
py
Python
Ethan File/Carrentsystem/Carrentsystem/test.py
hklhfong/Car-Rental-System
3a4844eea8e9dbf85f4ce62b5115772f48277240
[ "Apache-2.0" ]
null
null
null
Ethan File/Carrentsystem/Carrentsystem/test.py
hklhfong/Car-Rental-System
3a4844eea8e9dbf85f4ce62b5115772f48277240
[ "Apache-2.0" ]
null
null
null
Ethan File/Carrentsystem/Carrentsystem/test.py
hklhfong/Car-Rental-System
3a4844eea8e9dbf85f4ce62b5115772f48277240
[ "Apache-2.0" ]
null
null
null
import sqlite3 conn = sqlite3.connect("db") cur = conn.cursor() cur.execute("select * from CAR_ID limit 5;") results = cur.fetchall() print(results)
21.285714
44
0.724832
0
0
0
0
0
0
0
0
35
0.234899
be72c9a20697c3fb3a739104db43d4e053b51e7c
249
py
Python
tests/integration/hub_usage/dummyhub_slow/__init__.py
abreu4/jina
d1d045e9e0933dffb3bd668cb9cfebab6cd52202
[ "Apache-2.0" ]
2
2021-01-22T07:34:35.000Z
2021-01-23T04:36:41.000Z
tests/integration/hub_usage/dummyhub_slow/__init__.py
abreu4/jina
d1d045e9e0933dffb3bd668cb9cfebab6cd52202
[ "Apache-2.0" ]
4
2020-09-01T17:47:27.000Z
2021-04-16T23:11:57.000Z
tests/integration/hub_usage/dummyhub_slow/__init__.py
abreu4/jina
d1d045e9e0933dffb3bd668cb9cfebab6cd52202
[ "Apache-2.0" ]
null
null
null
import time from jina.executors.crafters import BaseCrafter from .helper import foo class DummyHubExecutorSlow(BaseCrafter): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) time.sleep(15) foo()
19.153846
47
0.678715
161
0.646586
0
0
0
0
0
0
0
0
be7401e08d215565703c4b9fa33b7d5e7ca05a69
8,827
py
Python
src/evaluation_utils.py
philipp-hess/deep-learning-for-heavy-rainfall
dbec03245dd8db0c5f2f53af014b8dd8d80f245c
[ "MIT" ]
null
null
null
src/evaluation_utils.py
philipp-hess/deep-learning-for-heavy-rainfall
dbec03245dd8db0c5f2f53af014b8dd8d80f245c
[ "MIT" ]
null
null
null
src/evaluation_utils.py
philipp-hess/deep-learning-for-heavy-rainfall
dbec03245dd8db0c5f2f53af014b8dd8d80f245c
[ "MIT" ]
null
null
null
import numpy as np import pandas as pd from scipy.stats import spearmanr from sklearn.metrics import f1_score, precision_score, recall_score from IPython.display import display, clear_output from sklearn.metrics import confusion_matrix import scipy.stats as st def continuous_to_categorical_with_quantiles(data: np.ndarray, quantiles:list ) -> np.ndarray: """ Converts continuous data into binar classes using quantiles Args: data: shape [n_time, n_lat, n_lon] quantiles: list containing quantiles Returns: tmp: shape [n_quantiles, n_time*n_lat*n_lon] binary data """ shape = data.shape tmp = np.zeros((len(quantiles), shape[0], shape[1], shape[2])) for i, quantile in enumerate(quantiles): threshold = np.quantile(data, quantile) binary = np.where(data > threshold, 1, 0).reshape((shape[0], shape[1], shape[2],-1)) tmp[i] = binary.squeeze() return tmp def global_thresholds_from_quantiles(data: np.ndarray, quantiles:list) -> list: thresholds = [np.quantile(data, quantile) for quantile in quantiles] return thresholds def local_thresholds_from_percentiles(data: np.ndarray, percentile: float, data_min=0) -> np.ndarray: n_lat = data.shape[1] n_lon = data.shape[2] threshold_map = np.zeros((n_lat, n_lon)) for lat in range(n_lat): for lon in range(n_lon): tmp = data[:, lat, lon] threshold = st.scoreatpercentile(tmp[tmp>data_min], percentile) if not np.isnan(threshold): threshold_map[lat, lon] = threshold return threshold_map def get_threshold_mask(data: np.ndarray, percentile: float, data_min=0) -> np.ndarray: n_lat = data.shape[1] n_lon = data.shape[2] mask = np.zeros((n_lat, n_lon)) for lat in range(n_lat): for lon in range(n_lon): tmp = data[:, lat, lon] threshold = st.scoreatpercentile(tmp[tmp>data_min], percentile) if np.isnan(threshold): mask[lat, lon] = 1 return mask def continuous_to_categorical_with_thresholds(data: np.ndarray, thresholds: list) -> np.ndarray: """ Converts continuous data into binar classes using thresholds Args: data: shape [n_time, n_lat, n_lon] quantiles: list containing thresholds Returns: tmp: shape [n_quantiles, n_time*n_lat*n_lon] binary data """ shape = data.shape tmp = np.zeros((len(thresholds), shape[0], shape[1], shape[2])) for i, threshold in enumerate(thresholds): binary = np.where(data > threshold, 1, 0).reshape((shape[0], shape[1], shape[2],-1)) tmp[i] = binary.squeeze() return tmp def categorical_evaluation(prediction: np.ndarray, target: np.ndarray, metric_name: str, mask=None) -> pd.DataFrame: """ Evaluates a regression prediction with the F1 score on quantile-based categories Args: prediction: shape [n_classes, X] target: shape [n_classes, X] X can be any other number of dimensions > 0 Returns: scores (list): List with an element per class """ n_classes = prediction.shape[0] prediction = prediction.reshape(n_classes, -1) target = target.reshape(n_classes, -1) scores = [] for c in range(n_classes): forecast_skill = ForecastSkill(prediction[c], target[c]) forecast_skill.compute_categories(mask=mask) scores.append(getattr(forecast_skill, f'get_{metric_name}')()) return scores def geographic_categorical_evaluation(prediction: np.ndarray, target: np.ndarray, metric_name: str) -> np.ndarray: """ Evaluates a regression prediction with the F1 score on quantile-based categories Args: prediction: shape [n_classes, n_time, n_lat, n_lon] target: shape [n_classes, n_time, n_lat, n_lon] Returns: scores: shape [n_classes, n_lat, n_lon] """ n_classes = prediction.shape[0] n_lat = prediction.shape[2] n_lon = prediction.shape[3] scores = np.zeros((n_classes, n_lat, n_lon)) for c in range(n_classes): for lat in range(n_lat): for lon in range(n_lon): grid_cell_prediction = prediction[c, :, lat, lon] grid_cell_target = target[c, :, lat, lon] if sum(grid_cell_prediction) == 0 and sum(grid_cell_target) == 0: scores[c, lat, lon] = -999 else: forecast_skill = ForecastSkill(prediction[c, :, lat, lon], target[c, :, lat, lon]) forecast_skill.compute_categories() scores[c, lat, lon] = getattr(forecast_skill, f'get_{metric_name}')() print(f'Progress {int((lat * lon)/(n_lat*n_lon)*100):2d}%') clear_output(wait=True) return scores class ForecastSkill: """ A collection of categorical forecast skill metrics """ def __init__(self, prediction, target): self.prediction = prediction self.target = target self.true_positive = 0 self.false_positive = 0 self.false_negative = 0 self.true_negative = 0 def compute_categories(self, mask=None): self.target = self.target.flatten().astype('int') self.prediction = self.prediction.flatten().astype('int') if mask is not None: mask = mask.flatten() indices_to_remove = np.where(mask==1) self.target = np.delete(self.target, indices_to_remove) self.prediction = np.delete(self.prediction, indices_to_remove) categories = confusion_matrix(self.target, self.prediction) self.true_negative, self.false_positive, self.false_negative, self.true_positive = categories.ravel() def print_category_sums(self): total = self.target.size print(f'tp: {self.true_positive/total*100:2.3f}') print(f'fp: {self.false_positive/total*100:2.3f}') print(f'fn: {self.false_negative/total*100:2.3f}') print(f'tn: {self.true_negative/total*100:2.3f}') def get_category_sums(self): return self.true_positive, self.false_positive, self.false_negative, self.true_negative def get_heidke_skill_score(self) -> float: tp = self.true_positive fp = self.false_positive fn = self.false_negative tn = self.true_negative nominator = 2*(tp*tn - fp*fn) denominator = ((tp + fn)*(fn + tn) + (tp + fp)*(fp + tn)) if denominator > 0: return nominator/denominator else: raise ValueError('devision by zero') def get_critical_success_index(self) -> float: hits = self.true_positive false_alarms = self.false_positive misses = self.false_negative nominator = hits denominator = hits + misses + false_alarms if denominator > 0: return nominator/denominator else: raise ValueError('devision by zero') def get_false_alarm_ratio(self) -> float: hits = self.true_positive false_alarms = self.false_positive nominator = false_alarms denominator = hits + false_alarms if denominator > 0: return nominator/denominator else: raise ValueError('devision by zero') def get_probability_of_detection(self) -> float: hits = self.true_positive misses = self.false_negative nominator = hits denominator = hits + misses if denominator > 0: return nominator/denominator else: raise ValueError('devision by zero') def get_f1(self) -> float: return f1_score(self.target, self.prediction, average='binary') def get_recall(self) -> float: return recall_score(self.target, self.prediction, average='binary') def get_precision(self) -> float: return precision_score(self.target, self.prediction, average='binary') def rmse(output, target): return np.sqrt(((output-target)**2).mean(axis=0)) def me(output, target): return (output-target).mean(axis=0) def corr(output, target): result = np.zeros((output.shape[1], output.shape[2])) for i in range(output.shape[1]): for j in range(output.shape[2]): result[i,j] = spearmanr(output[:,i,j], target[:,i,j])[0] return result
32.814126
116
0.605528
3,383
0.383256
0
0
0
0
0
0
1,705
0.193157
be74846aa8bb878ca4aaee267b213fd10335d381
1,709
py
Python
poloniex_apis/api_models/deposit_withdrawal_history.py
xJuggl3r/anapolo
5ffd87594c75575c5a19b9f47bf1b6606cfcdd1b
[ "MIT" ]
null
null
null
poloniex_apis/api_models/deposit_withdrawal_history.py
xJuggl3r/anapolo
5ffd87594c75575c5a19b9f47bf1b6606cfcdd1b
[ "MIT" ]
null
null
null
poloniex_apis/api_models/deposit_withdrawal_history.py
xJuggl3r/anapolo
5ffd87594c75575c5a19b9f47bf1b6606cfcdd1b
[ "MIT" ]
null
null
null
from collections import defaultdict from poloniex_apis.api_models.ticker_price import TickerData class DWHistory: def __init__(self, history): self.withdrawals = defaultdict(float) self.deposits = defaultdict(float) self.history = history def get_dw_history(self): for deposit in self.history['deposits']: if deposit['currency'] in self.deposits: self.deposits[deposit['currency']] += float(deposit['amount']) else: self.deposits[deposit['currency']] = float(deposit['amount']) for withdrawal in self.history['withdrawals']: if withdrawal['currency'] in self.withdrawals: self.withdrawals[withdrawal['currency']] += float(withdrawal['amount']) else: self.withdrawals[withdrawal['currency']] = float(withdrawal['amount']) return self.deposits, self.withdrawals def get_btc_balance(self, ticker): balance = 0 for deposit_symbol, amount in self.deposits.items(): if deposit_symbol == u"USDT": balance += amount * ticker.get_price("USDT_BTC") if deposit_symbol != u'BTC': balance += amount * ticker.get_price("BTC_" + deposit_symbol) else: balance += amount for withdrawal_symbol, amount in self.withdrawals.items(): if withdrawal_symbol == u"USDT": balance -= amount * ticker.get_price("USDT_BTC") if withdrawal_symbol != u'BTC': balance -= amount * ticker.get_price("BTC_" + withdrawal_symbol) else: balance -= amount return balance
39.744186
87
0.599181
1,607
0.940316
0
0
0
0
0
0
173
0.101229
be748f98db9ba8c29d78f47f7af4dd25c01061b7
7,320
py
Python
app/handler.py
vnrag/aws-pipeline-dashboard
679af73f8e777990840bc829a014e205f0c94ac0
[ "BSD-3-Clause" ]
null
null
null
app/handler.py
vnrag/aws-pipeline-dashboard
679af73f8e777990840bc829a014e205f0c94ac0
[ "BSD-3-Clause" ]
null
null
null
app/handler.py
vnrag/aws-pipeline-dashboard
679af73f8e777990840bc829a014e205f0c94ac0
[ "BSD-3-Clause" ]
null
null
null
from datetime import datetime,timezone import sys import boto3 import json def pipeline_event(event, context): state = get_final_state(event) if state is None: return event_time = datetime.strptime(event['time'], '%Y-%m-%dT%H:%M:%SZ').replace(tzinfo=timezone.utc) metric_data = [] if event['detail-type'] == "CodePipeline Pipeline Execution State Change": # Write green/red time based on last execution state prior_execution = get_prior_execution(event['detail']['pipeline'], event['detail']['execution-id']) if prior_execution is not None: last_execution_state = prior_execution['status'] seconds_since_last_execution = (event_time - prior_execution['lastUpdateTime']).total_seconds() if last_execution_state == "Succeeded": append_metric(metric_data, "GreenTime", event, seconds=seconds_since_last_execution) elif last_execution_state == "Failed": append_metric(metric_data, "RedTime", event, seconds=seconds_since_last_execution) if state == "SUCCEEDED": append_metric(metric_data, "SuccessCount", event, count=1) current_execution = get_execution(event['detail']['pipeline'], event['detail']['execution-id']) if current_execution is not None: duration = (event_time - current_execution['startTime']).total_seconds() append_metric(metric_data, "LeadTime", event, seconds=duration) elif state == "FAILED": append_metric(metric_data, "FailureCount", event, count=1) elif event['detail-type'] == "CodePipeline Stage Execution State Change": if state == "SUCCEEDED": append_metric(metric_data, "SuccessCount", event, count=1) #append_metric(metric_data, "LeadTime", event, seconds=duration) elif state == "FAILED": append_metric(metric_data, "FailureCount", event, count=1) elif event['detail-type'] == "CodePipeline Action Execution State Change": if state == "SUCCEEDED": append_metric(metric_data, "SuccessCount", event, count=1) elif state == "FAILED": append_metric(metric_data, "FailureCount", event, count=1) if len(metric_data) > 0: client = boto3.client('cloudwatch') client.put_metric_data( Namespace='Pipeline', MetricData=metric_data ) # Return the state from the event iff it's one of SUCCEEDED or FAILED def get_final_state(event): if 'detail' in event and 'state' in event['detail']: if any(event['detail']['state'] in s for s in ['SUCCEEDED', 'FAILED']): return event['detail']['state'] return None # Return the execution summary for a given execution id def get_execution(pipeline_name, execution_id): client = boto3.client('codepipeline') response = client.list_pipeline_executions(pipelineName=pipeline_name) for e in response['pipelineExecutionSummaries']: if e['pipelineExecutionId'] == execution_id: return e return None # Return the execution summary for the most prior final execution before a given execution id def get_prior_execution(pipeline_name, execution_id): client = boto3.client('codepipeline') response = client.list_pipeline_executions(pipelineName=pipeline_name) found_current = False for e in response['pipelineExecutionSummaries']: if found_current and any(e['status'] in s for s in ['Succeeded', 'Failed']): return e elif e['pipelineExecutionId'] == execution_id: found_current = True return None def append_metric(metric_list, metric_name, event, seconds=0, count=0): data = { 'MetricName': metric_name, 'Dimensions': [], 'Timestamp': datetime.strptime(event['time'], '%Y-%m-%dT%H:%M:%SZ'), } resource_parts = [] if 'pipeline' in event['detail']: data['Dimensions'].append({ 'Name': 'PipelineName', 'Value': event['detail']['pipeline'] }) resource_parts.append(event['detail']['pipeline']) if 'stage' in event['detail']: data['Dimensions'].append({ 'Name': 'StageName', 'Value': event['detail']['stage'] }) resource_parts.append(event['detail']['stage']) if 'action' in event['detail']: data['Dimensions'].append({ 'Name': 'ActionName', 'Value': event['detail']['action'] }) resource_parts.append(event['detail']['action']) if seconds > 0: data['Value'] = seconds data['Unit'] = 'Seconds' elif count > 0: data['Value'] = count data['Unit'] = 'Count' else: # no metric to add return print("resource=%s metric=%s value=%s" % ('.'.join(resource_parts), metric_name, data['Value'])) metric_list.append(data) def generate_dashboard(client): paginator = client.get_paginator('list_metrics') response_iterator = paginator.paginate( Namespace='Pipeline' ) pipeline_names = set() for response in response_iterator: for metric in response['Metrics']: for dim in metric['Dimensions']: if dim['Name'] == 'PipelineName': pipeline_names.add(dim['Value']) widgets = [] dashboard = { "widgets": widgets } y = 0 for pipeline_name in sorted(pipeline_names): widgets.append({ "type": "metric", "x": 0, "y": y, "width": 18, "height": 3, "properties": { "view": "singleValue", "metrics": [ [ "Pipeline", "SuccessCount", "PipelineName", pipeline_name, { "stat": "Sum", "period": 2592000 } ], [ ".", "FailureCount", ".", ".", { "stat": "Sum", "period": 2592000 } ], [ ".", "LeadTime", ".", ".", { "period": 2592000, "color": "#9467bd" } ], [ ".", "RedTime", ".", ".", { "stat": "Sum", "period": 2592000, "yAxis": "left", "color": "#d62728" } ], [ ".", "GreenTime", ".", ".", { "period": 2592000, "stat": "Sum", "color": "#2ca02c" } ] ], "region": "eu-central-1", "title": pipeline_name, "period": 300 } }) y += 3 widgets.append({ "type": "text", "x": 18, "y": 0, "width": 6, "height": 6, "properties": { "markdown": "\nAll metrics are calculated over the past 30 days\n\n* **SuccessCount** - count of all successful pipeline executions\n* **FailureCount** - count of all failed pipeline executions\n* **LeadTime** - average pipeline time for successful executions\n* **RedTime** - sum of all time spent with a red pipeline\n* **GreenTime** - sum of all time spent with a green pipeline\n" } }) return dashboard def dashboard_event(event, context): client = boto3.client('cloudwatch') dashboard = generate_dashboard(client) client.put_dashboard( DashboardName='Pipeline', DashboardBody=json.dumps(dashboard) ) if __name__ == '__main__': dashboard_event(None, None)
35.882353
396
0.593579
0
0
0
0
0
0
0
0
2,420
0.330601
be74f9e10e7b3e7db834044fe7d0389031a09884
4,507
py
Python
cogs/commands.py
sudo-do/discord-chatbot
970af7d8b9275a518396648ebe5c33c291370d6a
[ "MIT" ]
1
2021-05-14T08:01:53.000Z
2021-05-14T08:01:53.000Z
cogs/commands.py
sudo-do/discord-chatbot
970af7d8b9275a518396648ebe5c33c291370d6a
[ "MIT" ]
null
null
null
cogs/commands.py
sudo-do/discord-chatbot
970af7d8b9275a518396648ebe5c33c291370d6a
[ "MIT" ]
null
null
null
import discord import sqlite3 from discord.ext import commands conn= sqlite3.connect("dbs/main.db") class Commands(commands.Cog): def __init__(self, bot): self.bot = bot @commands.command() @commands.cooldown(1, 30, commands.BucketType.guild) @commands.has_permissions(manage_channels=True) async def setchannel(self, ctx, *, cbchannel: discord.TextChannel = None): if cbchannel == None: await ctx.send(":warning: You have to mention the channel that you want as the channel in which users will talk to me. Example: `!!setchannel #channel-name`") return elif cbchannel != None: try: cur= conn.cursor() guildID= str(ctx.guild.id) r= cur.execute("SELECT channel_id FROM main WHERE guild_id = '"+guildID+"'") row= None for row in r: ... if row != None: await ctx.send(f":warning: The channel is already setup to <#{row[0]}>. Use `!!settings channel` to change it.") elif row == None: guildID= str(ctx.guild.id) channelID= str(cbchannel.id) cur.execute("INSERT INTO main(guild_id, channel_id, toggle) VALUES('"+guildID+"', '"+channelID+"', '1')") conn.commit() await ctx.send(f":tada: Start talking to me in {cbchannel.mention}!") except discord.NotFound: await ctx.send(":warning: I can't find that channel. Make sure I can access it or channel is valid.") return except discord.MissingPermissions: await ctx.send(":warning: I can't send messages in that channel.") return @commands.group(invoke_without_command=True) async def settings(self, ctx): em= discord.Embed(title="Discord Chat Bot Settings", description="Welcome to Discord Chat Bot Settings! Here are the list of commands you can use to setup the bot. If this is your first time with this bot, Use the `!!setchannel` command first. **Arguments enclosed in `<>` are required!**") em.add_field(name="`!!settings channel <channel_mention>`", value="Updates the chatting channel.") em.add_field(name="`!!settings toggle <toggle>`", value="Toggles the bot chat on or off. This doesn't disable commands.") await ctx.send(embed=em) @settings.command() @commands.has_permissions(manage_channels=True) @commands.cooldown(1, 30, commands.BucketType.guild) async def channel(self, ctx, *, cbchannel: discord.TextChannel = None): cur= conn.cursor() if cbchannel == None: guildID= str(ctx.guild.id) r= cur.execute("SELECT channel_id FROM main WHERE guild_id = '"+guildID+"'") row= None for row in r: ... if row != None: await ctx.send(f"I'm currently waiting for messages in <#{row[0]}>. Run `!!settings channel #channel-mention` to change this.") elif row == None: await ctx.send("Channel is not even setup yet! Use `!!setchannel` to set a channel.") elif cbchannel != None: guildID= str(ctx.guild.id) channelID= str(cbchannel.id) r= cur.execute("SELECT channel_id FROM main WHERE guild_id = '"+guildID+"'") row= None for row in r: ... if row == None: await ctx.send("Channel is not even setup yet! Use `!!setchannel` to set a channel.") elif row != None: cur.execute("UPDATE main SET channel_id = '"+channelID+"' where guild_id = '"+guildID+"'") conn.commit() await ctx.send(f":tada: Channel has been updated to {cbchannel.mention}!") @settings.command() @commands.has_permissions(manage_channels=True) @commands.cooldown(1, 30, commands.BucketType.guild) async def toggle(self, ctx, *, toggle = None): if toggle == None: await ctx.send(":warning: Use the command again but mention the toggle i.e `on` or `off` For example: `!!settings toggle on` to toggle on, `!!settings toggle off` to toggle off.") elif toggle != None: if toggle.lower() == "on": toggle = '1' elif toggle.lower() == 'off': toggle = '0' else: await ctx.send(":warning: Use the command again but mention the toggle correctly. i.e `on` or `off` For example: `!!settings toggle on` to toggle on, `!!settings toggle off` to toggle off.") return guildID= str(ctx.guild.id) cur= conn.cursor() r= cur.execute("SELECT toggle FROM main WHERE guild_id = '"+guildID+"'") row= None for row in r: ... if row == None: await ctx.send("Channel is not setup yet! Use `!!setchannel` to set a channel.") elif row != None: cur.execute("UPDATE main SET toggle = '"+toggle+"' where guild_id = '"+guildID+"'") conn.commit() await ctx.send(f":tada: Toggle updated!") def setup(bot): bot.add_cog(Commands(bot))
36.942623
292
0.676503
4,357
0.966718
0
0
4,272
0.947859
3,851
0.854449
1,985
0.440426
be75b53bc3cf75e488408e710557a7588ee69c9c
6,210
py
Python
poetry/console/commands/self/update.py
mgasner/poetry
44221689e05feb0cc93c231096334f8eefbf86fc
[ "MIT" ]
null
null
null
poetry/console/commands/self/update.py
mgasner/poetry
44221689e05feb0cc93c231096334f8eefbf86fc
[ "MIT" ]
null
null
null
poetry/console/commands/self/update.py
mgasner/poetry
44221689e05feb0cc93c231096334f8eefbf86fc
[ "MIT" ]
null
null
null
import hashlib import os import shutil import subprocess import sys import tarfile from functools import cmp_to_key from gzip import GzipFile try: from urllib.error import HTTPError from urllib.request import urlopen except ImportError: from urllib2 import HTTPError from urllib2 import urlopen from cleo import argument from cleo import option from ..command import Command class SelfUpdateCommand(Command): name = "update" description = "Updates poetry to the latest version." arguments = [argument("version", "The version to update to.", optional=True)] options = [option("preview", None, "Install prereleases.")] BASE_URL = "https://github.com/sdispater/poetry/releases/download" @property def home(self): from poetry.utils._compat import Path from poetry.utils.appdirs import expanduser home = Path(expanduser("~")) return home / ".poetry" @property def lib(self): return self.home / "lib" @property def lib_backup(self): return self.home / "lib-backup" def handle(self): from poetry.__version__ import __version__ from poetry.repositories.pypi_repository import PyPiRepository from poetry.semver import Version from poetry.utils._compat import Path current = Path(__file__) try: current.relative_to(self.home) except ValueError: raise RuntimeError( "Poetry was not installed with the recommended installer. " "Cannot update automatically." ) version = self.argument("version") if not version: version = ">=" + __version__ repo = PyPiRepository(fallback=False) packages = repo.find_packages( "poetry", version, allow_prereleases=self.option("preview") ) if not packages: self.line("No release found for the specified version") return packages.sort( key=cmp_to_key( lambda x, y: 0 if x.version == y.version else int(x.version < y.version or -1) ) ) release = None for package in packages: if package.is_prerelease(): if self.option("preview"): release = package break continue release = package break if release is None: self.line("No new release found") return if release.version == Version.parse(__version__): self.line("You are using the latest version") return self.update(release) def update(self, release): version = release.version self.line("Updating to <info>{}</info>".format(version)) if self.lib_backup.exists(): shutil.rmtree(str(self.lib_backup)) # Backup the current installation if self.lib.exists(): shutil.copytree(str(self.lib), str(self.lib_backup)) shutil.rmtree(str(self.lib)) try: self._update(version) except Exception: if not self.lib_backup.exists(): raise shutil.copytree(str(self.lib_backup), str(self.lib)) shutil.rmtree(str(self.lib_backup)) raise finally: if self.lib_backup.exists(): shutil.rmtree(str(self.lib_backup)) self.line("") self.line("") self.line( "<info>Poetry</info> (<comment>{}</comment>) is installed now. Great!".format( version ) ) def _update(self, version): from poetry.utils.helpers import temporary_directory platform = sys.platform if platform == "linux2": platform = "linux" checksum = "poetry-{}-{}.sha256sum".format(version, platform) try: r = urlopen(self.BASE_URL + "/{}/{}".format(version, checksum)) except HTTPError as e: if e.code == 404: raise RuntimeError("Could not find {} file".format(checksum)) raise checksum = r.read().decode() # We get the payload from the remote host name = "poetry-{}-{}.tar.gz".format(version, platform) try: r = urlopen(self.BASE_URL + "/{}/{}".format(version, name)) except HTTPError as e: if e.code == 404: raise RuntimeError("Could not find {} file".format(name)) raise meta = r.info() size = int(meta["Content-Length"]) current = 0 block_size = 8192 bar = self.progress_bar(max=size) bar.set_format(" - Downloading <info>{}</> <comment>%percent%%</>".format(name)) bar.start() sha = hashlib.sha256() with temporary_directory(prefix="poetry-updater-") as dir_: tar = os.path.join(dir_, name) with open(tar, "wb") as f: while True: buffer = r.read(block_size) if not buffer: break current += len(buffer) f.write(buffer) sha.update(buffer) bar.set_progress(current) bar.finish() # Checking hashes if checksum != sha.hexdigest(): raise RuntimeError( "Hashes for {} do not match: {} != {}".format( name, checksum, sha.hexdigest() ) ) gz = GzipFile(tar, mode="rb") try: with tarfile.TarFile(tar, fileobj=gz, format=tarfile.PAX_FORMAT) as f: f.extractall(str(self.lib)) finally: gz.close() def process(self, *args): return subprocess.check_output(list(args), stderr=subprocess.STDOUT) def _bin_path(self, base_path, bin): if sys.platform == "win32": return (base_path / "Scripts" / bin).with_suffix(".exe") return base_path / "bin" / bin
27.972973
90
0.54847
5,812
0.93591
0
0
334
0.053784
0
0
901
0.145089
be75c777b16f1617c2f87efa99ed969f4c41aed6
1,192
py
Python
osp/test/corpus/syllabus/test_text.py
davidmcclure/open-syllabus-project
078cfd4c5a257fbfb0901d43bfbc6350824eed4e
[ "Apache-2.0" ]
220
2016-01-22T21:19:02.000Z
2022-01-25T04:33:55.000Z
osp/test/corpus/syllabus/test_text.py
davidmcclure/open-syllabus-project
078cfd4c5a257fbfb0901d43bfbc6350824eed4e
[ "Apache-2.0" ]
14
2016-01-23T14:34:39.000Z
2016-09-19T19:58:37.000Z
osp/test/corpus/syllabus/test_text.py
davidmcclure/open-syllabus-project
078cfd4c5a257fbfb0901d43bfbc6350824eed4e
[ "Apache-2.0" ]
14
2016-02-03T13:47:48.000Z
2019-03-27T13:09:05.000Z
from osp.corpus.syllabus import Syllabus from osp.test.utils import requires_tika def test_empty(mock_osp): """ Should return None if the file is empty. """ path = mock_osp.add_file(content='', ftype='plain') syllabus = Syllabus(path) assert syllabus.text == None def test_plaintext(mock_osp): """ Should extract text from vanilla text files. """ path = mock_osp.add_file(content='text', ftype='plain') syllabus = Syllabus(path) assert syllabus.text == 'text' def test_html(mock_osp): """ Should extract text from HTML files. """ path = mock_osp.add_file(content='<p>text</p>', ftype='html') syllabus = Syllabus(path) assert syllabus.text == 'text' def test_pdf(mock_osp): """ Should extract text from PDF files. """ path = mock_osp.add_file(content='text', ftype='pdf') syllabus = Syllabus(path) assert syllabus.text.strip() == 'text' @requires_tika def test_office(mock_osp): """ Should extract text from office files. """ path = mock_osp.add_file(content='text', ftype='docx') syllabus = Syllabus(path) assert syllabus.text.strip() == 'text'
18.060606
65
0.645134
0
0
0
0
235
0.197148
0
0
361
0.302852
be763dff688768c2aba41209e3bec63f50ee2a53
19,099
py
Python
boa_test/tests/test_ico_template.py
mixbee/neo-boa
da7366c26c7b8e60afb9ac27439a1da37b0be355
[ "MIT" ]
4
2018-08-22T03:30:34.000Z
2019-04-16T10:54:08.000Z
boa_test/tests/test_ico_template.py
mixbee/neo-boa
da7366c26c7b8e60afb9ac27439a1da37b0be355
[ "MIT" ]
3
2018-09-03T09:19:26.000Z
2019-01-24T00:06:29.000Z
boa_test/tests/test_ico_template.py
mixbee/neo-boa
da7366c26c7b8e60afb9ac27439a1da37b0be355
[ "MIT" ]
12
2018-07-19T06:36:44.000Z
2019-05-13T05:45:58.000Z
from boa_test.tests.boa_test import BoaFixtureTest from boa.compiler import Compiler from neo.Core.TX.Transaction import Transaction from neo.Prompt.Commands.BuildNRun import TestBuild from neo.EventHub import events from neo.SmartContract.SmartContractEvent import SmartContractEvent, NotifyEvent from neo.Settings import settings from neo.Prompt.Utils import parse_param from neo.Core.FunctionCode import FunctionCode from neocore.Fixed8 import Fixed8 from boa_test.example.demo.nex.token import * import shutil import os from logzero import logger settings.USE_DEBUG_STORAGE = True settings.DEBUG_STORAGE_PATH = './fixtures/debugstorage' class TestContract(BoaFixtureTest): dispatched_events = [] dispatched_logs = [] @classmethod def tearDownClass(cls): super(BoaFixtureTest, cls).tearDownClass() try: if os.path.exists(settings.debug_storage_leveldb_path): shutil.rmtree(settings.debug_storage_leveldb_path) else: logger.error("debug storage path doesn't exist") except Exception as e: logger.error("couldn't remove debug storage %s " % e) @classmethod def setUpClass(cls): super(TestContract, cls).setUpClass() def on_notif(evt): print(evt) cls.dispatched_events.append(evt) print("dispatched events %s " % cls.dispatched_events) def on_log(evt): print(evt) cls.dispatched_logs.append(evt) events.on(SmartContractEvent.RUNTIME_NOTIFY, on_notif) events.on(SmartContractEvent.RUNTIME_LOG, on_log) def test_ICOTemplate_1(self): output = Compiler.instance().load('%s/boa_test/example/demo/ICO_Template.py' % TestContract.dirname).default out = output.write() # print(output.to_s()) tx, results, total_ops, engine = TestBuild(out, ['name', '[]'], self.GetWallet1(), '0705', '05') self.assertEqual(len(results), 1) self.assertEqual(results[0].GetString(), TOKEN_NAME) tx, results, total_ops, engine = TestBuild(out, ['symbol', '[]'], self.GetWallet1(), '0705', '05') self.assertEqual(len(results), 1) self.assertEqual(results[0].GetString(), TOKEN_SYMBOL) tx, results, total_ops, engine = TestBuild(out, ['decimals', '[]'], self.GetWallet1(), '0705', '05') self.assertEqual(len(results), 1) self.assertEqual(results[0].GetBigInteger(), TOKEN_DECIMALS) tx, results, total_ops, engine = TestBuild(out, ['totalSupply', '[]'], self.GetWallet1(), '0705', '05') self.assertEqual(len(results), 1) self.assertEqual(results[0].GetBigInteger(), 0) tx, results, total_ops, engine = TestBuild(out, ['nonexistentmethod', '[]'], self.GetWallet1(), '0705', '05') self.assertEqual(len(results), 1) self.assertEqual(results[0].GetString(), 'unknown operation') # deploy with wallet 2 should fail CheckWitness tx, results, total_ops, engine = TestBuild(out, ['deploy', '[]'], self.GetWallet2(), '0705', '05') self.assertEqual(len(results), 1) self.assertEqual(results[0].GetBoolean(), False) tx, results, total_ops, engine = TestBuild(out, ['deploy', '[]'], self.GetWallet1(), '0705', '05') self.assertEqual(len(results), 1) self.assertEqual(results[0].GetBoolean(), True) # second time, it should already be deployed and return false tx, results, total_ops, engine = TestBuild(out, ['deploy', '[]'], self.GetWallet1(), '0705', '05') self.assertEqual(len(results), 1) self.assertEqual(results[0].GetBoolean(), False) # now total supply should be equal to the initial owner amount tx, results, total_ops, engine = TestBuild(out, ['totalSupply', '[]'], self.GetWallet1(), '0705', '05') self.assertEqual(len(results), 1) self.assertEqual(results[0].GetBigInteger(), TOKEN_INITIAL_AMOUNT) # now the owner should have a balance of the TOKEN_INITIAL_AMOUNT tx, results, total_ops, engine = TestBuild(out, ['balanceOf', parse_param([bytearray(TOKEN_OWNER)])], self.GetWallet1(), '0705', '05') self.assertEqual(len(results), 1) self.assertEqual(results[0].GetBigInteger(), TOKEN_INITIAL_AMOUNT) def test_ICOTemplate_2(self): output = Compiler.instance().load('%s/boa_test/example/demo/ICO_Template.py' % TestContract.dirname).default out = output.write() # now transfer tokens to wallet 2 TestContract.dispatched_events = [] test_transfer_amount = 2400000001 tx, results, total_ops, engine = TestBuild(out, ['transfer', parse_param([bytearray(TOKEN_OWNER), self.wallet_2_script_hash.Data, test_transfer_amount])], self.GetWallet1(), '0705', '05') self.assertEqual(len(results), 1) self.assertEqual(results[0].GetBoolean(), True) self.assertEqual(len(TestContract.dispatched_events), 1) evt = TestContract.dispatched_events[0] self.assertIsInstance(evt, NotifyEvent) self.assertEqual(evt.addr_from.Data, bytearray(TOKEN_OWNER)) self.assertEqual(evt.addr_to, self.wallet_2_script_hash) self.assertEqual(evt.amount, test_transfer_amount) # now get balance of wallet 2 tx, results, total_ops, engine = TestBuild(out, ['balanceOf', parse_param([self.wallet_2_script_hash.Data])], self.GetWallet1(), '0705', '05') self.assertEqual(len(results), 1) self.assertEqual(results[0].GetBigInteger(), test_transfer_amount) # now the owner should have less tx, results, total_ops, engine = TestBuild(out, ['balanceOf', parse_param([bytearray(TOKEN_OWNER)])], self.GetWallet1(), '0705', '05') self.assertEqual(len(results), 1) self.assertEqual(results[0].GetBigInteger(), TOKEN_INITIAL_AMOUNT - test_transfer_amount) # now this transfer should fail tx, results, total_ops, engine = TestBuild(out, ['transfer', parse_param([bytearray(TOKEN_OWNER), self.wallet_2_script_hash.Data, TOKEN_INITIAL_AMOUNT])], self.GetWallet1(), '0705', '05') self.assertEqual(len(results), 1) self.assertEqual(results[0].GetBoolean(), False) # this transfer should fail because it is not signed by the 'from' address tx, results, total_ops, engine = TestBuild(out, ['transfer', parse_param([bytearray(TOKEN_OWNER), self.wallet_2_script_hash.Data, 10000])], self.GetWallet3(), '0705', '05') self.assertEqual(len(results), 1) self.assertEqual(results[0].GetBoolean(), False) # now this transfer should fail, this is from address with no tokens tx, results, total_ops, engine = TestBuild(out, ['transfer', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, 1000])], self.GetWallet3(), '0705', '05') self.assertEqual(len(results), 1) self.assertEqual(results[0].GetBoolean(), False) # get balance of bad data tx, results, total_ops, engine = TestBuild(out, ['balanceOf', parse_param(['abc'])], self.GetWallet1(), '0705', '05') self.assertEqual(len(results), 1) self.assertEqual(results[0].GetBigInteger(), 0) # get balance no params tx, results, total_ops, engine = TestBuild(out, ['balanceOf', parse_param([])], self.GetWallet1(), '0705', '05') self.assertEqual(len(results), 1) self.assertEqual(results[0].GetBoolean(), False) def test_ICOTemplate_3_KYC(self): output = Compiler.instance().load('%s/boa_test/example/demo/ICO_Template.py' % TestContract.dirname).default out = output.write() print(output.to_s()) # now transfer tokens to wallet 2 TestContract.dispatched_events = [] # test mint tokens without being kyc verified tx, results, total_ops, engine = TestBuild(out, ['mintTokens', '[]', '--attach-neo=10'], self.GetWallet3(), '0705', '05') self.assertEqual(len(results), 1) self.assertEqual(results[0].GetBoolean(), False) # Try to register as a non owner tx, results, total_ops, engine = TestBuild(out, ['crowdsale_register', parse_param([self.wallet_3_script_hash.Data])], self.GetWallet3(), '0705', '05') self.assertEqual(len(results), 1) self.assertEqual(results[0].GetBoolean(), False) # Get status of non registered address tx, results, total_ops, engine = TestBuild(out, ['crowdsale_status', parse_param([self.wallet_3_script_hash.Data])], self.GetWallet3(), '0705', '05') self.assertEqual(len(results), 1) self.assertEqual(results[0].GetBoolean(), False) TestContract.dispatched_events = [] # register an address tx, results, total_ops, engine = TestBuild(out, ['crowdsale_register', parse_param([self.wallet_3_script_hash.Data])], self.GetWallet1(), '0705', '05') self.assertEqual(len(results), 1) self.assertEqual(results[0].GetBigInteger(), 1) # it should dispatch an event self.assertEqual(len(TestContract.dispatched_events), 1) evt = TestContract.dispatched_events[0] self.assertEqual(evt.event_payload.Value[0].Value, b'kyc_registration') # register 2 addresses at once tx, results, total_ops, engine = TestBuild(out, ['crowdsale_register', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data])], self.GetWallet1(), '0705', '05') self.assertEqual(len(results), 1) self.assertEqual(results[0].GetBigInteger(), 2) # now check reg status tx, results, total_ops, engine = TestBuild(out, ['crowdsale_status', parse_param([self.wallet_3_script_hash.Data])], self.GetWallet3(), '0705', '05') self.assertEqual(len(results), 1) self.assertEqual(results[0].GetBoolean(), True) def test_ICOTemplate_4_attachments(self): output = Compiler.instance().load('%s/boa_test/example/demo/ICO_Template.py' % TestContract.dirname).default out = output.write() # test mint tokens without being kyc verified tx, results, total_ops, engine = TestBuild(out, ['get_attachments', '[]', '--attach-neo=10'], self.GetWallet3(), '0705', '05') self.assertEqual(len(results), 1) attachments = results[0].GetArray() self.assertEqual(len(attachments), 4) fn = FunctionCode(out, '0705', '05') self.assertEqual(attachments[0].GetByteArray(), fn.ScriptHash().Data) self.assertEqual(attachments[1].GetByteArray(), self.wallet_3_script_hash.Data) self.assertEqual(attachments[2].GetBigInteger(), Fixed8.FromDecimal(10).value) self.assertEqual(attachments[3].GetBigInteger(), 0) tx, results, total_ops, engine = TestBuild(out, ['get_attachments', '[]'], self.GetWallet3(), '0705', '05') self.assertEqual(len(results), 1) attachments = results[0].GetArray() self.assertEqual(len(attachments), 4) self.assertEqual(attachments[1].GetByteArray(), bytearray()) self.assertEqual(attachments[2].GetBigInteger(), 0) self.assertEqual(attachments[3].GetBigInteger(), 0) tx, results, total_ops, engine = TestBuild(out, ['get_attachments', '[]', '--attach-neo=3', '--attach-gas=3.12'], self.GetWallet1(), '0705', '05') self.assertEqual(len(results), 1) attachments = results[0].GetArray() self.assertEqual(len(attachments), 4) self.assertEqual(attachments[1].GetByteArray(), self.wallet_1_script_hash.Data) self.assertEqual(attachments[2].GetBigInteger(), Fixed8.FromDecimal(3).value) self.assertEqual(attachments[3].GetBigInteger(), Fixed8.FromDecimal(3.12).value) def test_ICOTemplate_5_mint(self): output = Compiler.instance().load('%s/boa_test/example/demo/ICO_Template.py' % TestContract.dirname).default out = output.write() # register an address tx, results, total_ops, engine = TestBuild(out, ['crowdsale_register', parse_param([self.wallet_3_script_hash.Data])], self.GetWallet1(), '0705', '05') self.assertEqual(len(results), 1) self.assertEqual(results[0].GetBigInteger(), 1) TestContract.dispatched_events = [] # test mint tokens, this should return true tx, results, total_ops, engine = TestBuild(out, ['mintTokens', '[]', '--attach-neo=10'], self.GetWallet3(), '0705', '05') self.assertEqual(len(results), 1) self.assertEqual(results[0].GetBoolean(), True) # it should dispatch an event self.assertEqual(len(TestContract.dispatched_events), 1) evt = TestContract.dispatched_events[0] self.assertIsInstance(evt, NotifyEvent) self.assertEqual(evt.amount, 10 * TOKENS_PER_NEO) self.assertEqual(evt.addr_to, self.wallet_3_script_hash) # test mint tokens again, this should be false since you can't do it twice tx, results, total_ops, engine = TestBuild(out, ['mintTokens', '[]', '--attach-neo=10'], self.GetWallet3(), '0705', '05') self.assertEqual(len(results), 1) self.assertEqual(results[0].GetBoolean(), False) # now the minter should have a balance tx, results, total_ops, engine = TestBuild(out, ['balanceOf', parse_param([self.wallet_3_script_hash.Data])], self.GetWallet1(), '0705', '05') self.assertEqual(len(results), 1) self.assertEqual(results[0].GetBigInteger(), 10 * TOKENS_PER_NEO) # now the total circulation should be bigger tx, results, total_ops, engine = TestBuild(out, ['totalSupply', '[]'], self.GetWallet1(), '0705', '05') self.assertEqual(len(results), 1) self.assertEqual(results[0].GetBigInteger(), (10 * TOKENS_PER_NEO) + TOKEN_INITIAL_AMOUNT) def test_ICOTemplate_6_approval(self): output = Compiler.instance().load('%s/boa_test/example/demo/ICO_Template.py' % TestContract.dirname).default out = output.write() # tranfer_from, approve, allowance tx, results, total_ops, engine = TestBuild(out, ['allowance', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data])], self.GetWallet2(), '0705', '05') self.assertEqual(len(results), 1) self.assertEqual(results[0].GetBigInteger(), 0) # try to transfer from tx, results, total_ops, engine = TestBuild(out, ['transferFrom', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, 10000])], self.GetWallet2(), '0705', '05') self.assertEqual(len(results), 1) self.assertEqual(results[0].GetBoolean(), False) # try to approve from someone not yourself tx, results, total_ops, engine = TestBuild(out, ['approve', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, 10000])], self.GetWallet2(), '0705', '05') self.assertEqual(len(results), 1) self.assertEqual(results[0].GetBigInteger(), 0) # try to approve more than you have tx, results, total_ops, engine = TestBuild(out, ['approve', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, TOKEN_INITIAL_AMOUNT])], self.GetWallet3(), '0705', '05') self.assertEqual(len(results), 1) self.assertEqual(results[0].GetBigInteger(), 0) TestContract.dispatched_events = [] # approve should work tx, results, total_ops, engine = TestBuild(out, ['approve', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, 1234])], self.GetWallet3(), '0705', '05') self.assertEqual(len(results), 1) self.assertEqual(results[0].GetBoolean(), True) # it should dispatch an event self.assertEqual(len(TestContract.dispatched_events), 1) evt = TestContract.dispatched_events[0] self.assertIsInstance(evt, NotifyEvent) self.assertEqual(evt.notify_type, b'approve') self.assertEqual(evt.amount, 1234) # check allowance tx, results, total_ops, engine = TestBuild(out, ['allowance', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data])], self.GetWallet2(), '0705', '05') self.assertEqual(len(results), 1) self.assertEqual(results[0].GetBigInteger(), 1234) # approve should not be additive, it should overwrite previous approvals tx, results, total_ops, engine = TestBuild(out, ['approve', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, 133234])], self.GetWallet3(), '0705', '05') self.assertEqual(len(results), 1) self.assertEqual(results[0].GetBoolean(), True) tx, results, total_ops, engine = TestBuild(out, ['allowance', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data])], self.GetWallet2(), '0705', '05') self.assertEqual(len(results), 1) self.assertEqual(results[0].GetBigInteger(), 133234) # now you can transfer from tx, results, total_ops, engine = TestBuild(out, ['transferFrom', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, 10000])], self.GetWallet2(), '0705', '05') self.assertEqual(len(results), 1) self.assertEqual(results[0].GetBoolean(), True) # now the recevier should have a balance # it is equal to 10000 plus test_transfer_amount = 2400000001 tx, results, total_ops, engine = TestBuild(out, ['balanceOf', parse_param([self.wallet_2_script_hash.Data])], self.GetWallet1(), '0705', '05') self.assertEqual(len(results), 1) self.assertEqual(results[0].GetBigInteger(), 10000 + 2400000001) # now the allowance should be less tx, results, total_ops, engine = TestBuild(out, ['allowance', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data])], self.GetWallet2(), '0705', '05') self.assertEqual(len(results), 1) self.assertEqual(results[0].GetBigInteger(), 133234 - 10000) # try to transfer too much, even with approval tx, results, total_ops, engine = TestBuild(out, ['transferFrom', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, 14440000])], self.GetWallet2(), '0705', '05') self.assertEqual(len(results), 1) self.assertEqual(results[0].GetBoolean(), False) # cant approve negative amounts tx, results, total_ops, engine = TestBuild(out, ['approve', parse_param([self.wallet_3_script_hash.Data, self.wallet_2_script_hash.Data, -1000])], self.GetWallet3(), '0705', '05') self.assertEqual(len(results), 1) self.assertEqual(results[0].GetBoolean(), False) def test_many_ops(self): output = Compiler.instance().load('%s/boa_test/example/demo/ICO_Template.py' % TestContract.dirname).default out = output.write() # tranfer_from, approve, allowance tx, results, total_ops, engine = TestBuild(out, ['another_op_5', bytearray()], self.GetWallet2(), '0705', '05') self.assertEqual(len(results), 1) self.assertEqual(results[0].GetBigInteger(), 6)
51.899457
202
0.673857
18,453
0.966176
0
0
881
0.046128
0
0
3,340
0.174878
be76f999496b5e5961109377d7a8e9bebf2c7e1e
2,576
py
Python
regexem.py
lvijay/ilc
1c3b1381e7e5a5064bda829e3d34bfaf24745d1a
[ "BSD-3-Clause-No-Nuclear-Warranty" ]
1
2019-01-03T17:44:11.000Z
2019-01-03T17:44:11.000Z
regexem.py
lvijay/ilc
1c3b1381e7e5a5064bda829e3d34bfaf24745d1a
[ "BSD-3-Clause-No-Nuclear-Warranty" ]
null
null
null
regexem.py
lvijay/ilc
1c3b1381e7e5a5064bda829e3d34bfaf24745d1a
[ "BSD-3-Clause-No-Nuclear-Warranty" ]
null
null
null
#!/usr/bin/python # -*- mode: python; -*- ## This file is part of Indian Language Converter ## Copyright (C) 2006 Vijay Lakshminarayanan <[email protected]> ## Indian Language Converter is free software; you can redistribute it ## and/or modify it under the terms of the GNU General Public License ## as published by the Free Software Foundation; either version 2 of ## the License, or (at your option) any later version. ## This program is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## You should have received a copy of the GNU General Public License ## along with this program; if not, write to the Free Software ## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA ## 02110-1301, USA. ## $Id: regexem.py,v 1.4 2006-03-26 03:15:24 vijay Exp $ ## Author: Vijay Lakshminarayanan ## $Date: 2006-03-26 03:15:24 $ import sys from re import escape def regexem (strlst): """Returns a single string which is the regular expression to identify any single word in the given argument. See the Examples given at the end of this file.""" return regexem_internal([escape(s) for s in strlst]) def regexem_internal (strlst): strlst.sort() s, rest = strlst[0], strlst[1:] groups = {} groups[s] = [s] for string in rest: if string.startswith(s) and len(s) < len(string): # avoid duplicates groups[s].append(string[len(s):]) # add the suffix to the group else: s = string # a fresh prefix groups[s] = [s] regex = '' for prefix, words in groups.items(): inreg = '' if len(words) == 2: # i.e. words[0] is a subset of words[1] inreg += words[0] + '(' + words[1] + ')' + '?' elif len(words) > 2: inreg += words[0] + '(' + regexem_internal(words[1:]) + ')' + '?' else: inreg += prefix # since prefix == words[0] in this case. regex += '(' + inreg + ')' + '|' return regex[:-1] # we don't need the last '|' if __name__ == '__main__': print ''.join(regexem(sys.argv[1:])) ## Examples # # $ ./regexem.py emacs vi ed # (ed)|(emacs)|(vi) # # $ ./regexem.py batsman bats well # (well)|(bats(man)?) # # $ ./regexem.py houses housefly # (houses)|(housefly) ## Note that they aren't grouped together # ## a slightly complicated example # $ ./regexem.py an anteater and an ant # (an((d)|(t(eater)?))?)
33.025641
77
0.632376
0
0
0
0
0
0
0
0
1,695
0.657997
be7730b08647563bbdf351876a21f2fa9df7d7f9
3,765
py
Python
main.py
rohit-k-das/crowdstrike-alerts
48c23357f819f90134f76cefb58f1355967363d4
[ "MIT" ]
3
2019-07-10T17:05:56.000Z
2019-10-18T22:34:08.000Z
main.py
rohit-k-das/crowdstrike-alerts
48c23357f819f90134f76cefb58f1355967363d4
[ "MIT" ]
1
2020-01-09T14:43:58.000Z
2020-02-06T11:24:04.000Z
main.py
rohit-k-das/crowdstrike-alerts
48c23357f819f90134f76cefb58f1355967363d4
[ "MIT" ]
2
2019-07-10T17:05:57.000Z
2019-10-18T22:34:09.000Z
import requests import crowdstrike_detection as crowdstrike import logging import click import urllib.parse import ConfigParser import os logging.basicConfig(level=logging.INFO, format='%(asctime)s %(name)-15s [%(levelname)-8s]: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p') logger = logging.getLogger(__name__) Config = ConfigParser.ConfigParser() Config.read(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'Crowdstrike_creds')) # Create your own slackbot hubot_webhook_url = Config.get('Settings', 'Slackbot_Url') # Send slack alert via hubot for each high or critical detection in crowdstrike def send_hubot_alert_crowdstrike(detection): logger.info("Send hubot alert for detection %s" % detection.detection_id) # Emoji for slack based on action taken green_alerts = ['Kill process', 'Kill subprocess', 'Quarantine file', 'Kill parent', 'Process blocked', 'Operation blocked'] red_alerts = ['Policy disabled'] amber_alerts = [] actions = [] for behavior in detection.behavior: actions.extend(behavior['action_taken']) if actions: actions = list(set(actions)) alerts = [] if actions: if list(set(actions).intersection(red_alerts)): alerts.append(':red-alert: Allowed') if list(set(actions).intersection(green_alerts)): alerts.append(':green-alert: Blocked') else: alerts.append(':red-alert: Allowed') if ':green-alert: Blocked' in alerts and ':red-alert: Allowed' in alerts: alerts = [':amber-alert: Suspicious'] message_to_send = ":crowd-strike: *%s* Alert: <%s|%s> ---> %s\n" % ( detection.severity, detection.link, detection.detection_id.split(':')[2], str(alerts).strip('[').strip(']').replace("'", "")) message_to_send = "%sDevice: %s\n" % (message_to_send, detection.device) for behavior in detection.behavior: message_to_send = "%sBad Behavior: %s\n" % (message_to_send, behavior['bad_behavior'].replace('&', '%26amp;').replace('<', '%26lt;').replace('>', '%26gt;')) message_to_send = "%sHash: %s\n" % (message_to_send, behavior['hash']) message_to_send = "%sParent Cmd: %s\n" % (message_to_send, behavior['parent_commandline']) message_to_send = "%sTactic-Technique: %s\n" % (message_to_send, behavior['tactic + technique']) if behavior['action_taken']: message_to_send = "%sAction Taken: %s" % ( message_to_send, str(behavior['action_taken']).strip('[').strip(']').replace("'", "")) else: message_to_send = "%sAction Taken: %s" % (message_to_send, 'None') if len(detection.behavior) > 1: message_to_send = "%s\n" % message_to_send # Whom to send the alert send_to = 'yourchannel or a user' data = {'message': message_to_send, 'users': send_to} data = urllib.parse.urlencode(data) headers = {"Content-Type": "application/x-www-form-urlencoded"} resp = requests.post(hubot_webhook_url, headers=headers, data=data) if resp.ok: logger.info("Sent alert to user/channel %s" % send_to) else: logger.critical("Unable to connect to hubot.") logger.info("Hubot Error %d:%s" % (resp.status_code, resp.text)) @click.command() @click.option("-d", "--duration", default=600, show_default=True, nargs=1, type=int, required=False, help="Crowdstrike detections that were last seen since 'duration' seconds") def main(duration): crowdstrike_detections = crowdstrike.fetch_detections(duration) if crowdstrike_detections: logger.info("Sending alerts") for detection in crowdstrike_detections: send_hubot_alert_crowdstrike(detection) if __name__ == '__main__': main()
41.373626
176
0.661355
0
0
0
0
451
0.119788
0
0
1,208
0.32085
be7756c046d0e49be191bd99222501f37d6b8b9a
92
py
Python
connexion/http_facts.py
lumikanta/connexion
b6530d32aaee92ebbdfef501540d642a26185174
[ "Apache-2.0" ]
null
null
null
connexion/http_facts.py
lumikanta/connexion
b6530d32aaee92ebbdfef501540d642a26185174
[ "Apache-2.0" ]
null
null
null
connexion/http_facts.py
lumikanta/connexion
b6530d32aaee92ebbdfef501540d642a26185174
[ "Apache-2.0" ]
1
2019-03-21T18:21:32.000Z
2019-03-21T18:21:32.000Z
FORM_CONTENT_TYPES = [ 'application/x-www-form-urlencoded', 'multipart/form-data' ]
18.4
40
0.695652
0
0
0
0
0
0
0
0
56
0.608696
be775d3a62274c2c57f452dafb16e1035b3dff0c
4,593
py
Python
Test3/yandexAPI3.py
klepik1990/YandexTestAPI
ded41ff607c0b209b51efbcaa13c8008156a5e0a
[ "MIT" ]
null
null
null
Test3/yandexAPI3.py
klepik1990/YandexTestAPI
ded41ff607c0b209b51efbcaa13c8008156a5e0a
[ "MIT" ]
null
null
null
Test3/yandexAPI3.py
klepik1990/YandexTestAPI
ded41ff607c0b209b51efbcaa13c8008156a5e0a
[ "MIT" ]
null
null
null
import requests import json HEADERS = {"Authorization": "OAuth AgAAAAA00Se2AAW1W1yCegavqkretMXBGkoUUQk", "Accept": "*/*"} URL = "https://cloud-api.yandex.net:443/v1/disk/" def get_folder_info(folder_name_1, folder_name_2, url=None, headers=None): """Получение информации о статусе папок на диске Args: folder_name_1: имя корневой папки. folder_name_2: имя вложенной папки. url: адрес для запроса. headers: заголовки запроса, содержащие токен авторизации. Returns: Информация о папках: путь до папок, если созданы успешно. В противном случае описание ошибки. """ info = requests.get(url= URL + "resources?path=" + folder_name_1 + "/" + folder_name_2 + "&fields=path", headers=HEADERS) dict_response = json.loads(info.content) if info.status_code == 404: return dict_response["description"] else: return dict_response["path"] def get_file_info(folder_name_1, folder_name_2, file_name, url=None, headers=None): """Получение информации о файле Args: folder_name_1: имя корневой папки. folder_name_2: имя вложенной папки. file_name: имя файла. url: адрес для запроса. headers: заголовки запроса, содержащие токен авторизации. Returns: Путь до файла. """ file_info_json = requests.get(url= URL + "resources?path=" + folder_name_1 + "/" + folder_name_2 + "/" + file_name + ".jpg&fields=path", headers = HEADERS) file_info_dict = json.loads(file_info_json.content) if file_info_json.status_code == 404: return file_info_dict["description"] else: return file_info_dict["path"] def create_folder(folder_name_1, folder_name_2, url=None, headers=None): """Создание папок на диске. Args: folder_name_1: имя корневой папки. folder_name_2: имя вложенной папки. url: адрес для запроса. headers: заголовки запроса, содержащие токен авторизации. Returns: Информация о папках через вызов другой функции. """ response_code = [202, 204] new_folder = requests.put(url= URL + "resources?path=" + folder_name_1, headers=HEADERS) if new_folder.status_code == 409: new_folder = requests.delete(url= URL + "resources?path=" + folder_name_1 + "&permanently=true", headers=HEADERS) if new_folder.status_code in response_code: requests.put(url= URL + "resources?path=" + folder_name_1, headers=HEADERS) requests.put(url= URL + "resources?path=" + folder_name_1 + "/" + folder_name_2, headers=HEADERS) return get_folder_info(folder_name_1, folder_name_2) def create_file(folder_name_1, folder_name_2, file_name, url=None, headers=None): """Загрузка файла на диск. Args: folder_name_1: имя корневой папки. folder_name_2: имя вложенной папки. file_name: имя файла. url: адрес для запроса. headers: заголовки запроса, содержащие токен авторизации. Returns: Информацию о созданном файле через вызов другой функции. """ assert len(file_name) > 0, "Не введено имя файла" new_file = requests.get(url= URL + "resources/upload?path=" + folder_name_1 + "/" + folder_name_2 + "/" + file_name + ".jpg&overwrite=true", headers=HEADERS) get_link = new_file.content link = json.loads(get_link) requests.put(url=link["href"]) return get_file_info(folder_name_1, folder_name_2, file_name) def move_to_bucket(folder_name, url=None, headers=None): """Перемещение папки с содержимым в корзину. Args: folder_name: имя корневой папки. url: адрес для запроса. headers: заголовки запроса, содержащие токен авторизации. Returns: Ссылку для проверки статуса. """ order_response = requests.delete(url= URL + "resources?path=" + folder_name, headers=HEADERS) return json.loads(order_response.content)["href"] def get_status(link, headers=None): """Получение статуса операции по ссылке. Args: link: ссылка, для которой проверяется статус. headers: заголовки запроса, содержащие токен авторизации. Returns: Статус операции. """ status_response = requests.get(url=link, headers=HEADERS) return json.loads(status_response.content)["status"] def clean_bucket(): """Очистка корзины. Returns: Ссылку для проверки статуса. """ remove_folder = requests.delete(url= URL + "trash/resources", headers=HEADERS) return json.loads(remove_folder.content)["href"]
33.282609
125
0.674069
0
0
0
0
0
0
0
0
3,275
0.589559
be78c46e8b283fc835a189209cd53b3fea610e40
3,208
py
Python
app/users/operator/views.py
trinanda/AQUR
2a415b05ba4c0113b05b6fa14fb454af2bad52ec
[ "MIT" ]
null
null
null
app/users/operator/views.py
trinanda/AQUR
2a415b05ba4c0113b05b6fa14fb454af2bad52ec
[ "MIT" ]
null
null
null
app/users/operator/views.py
trinanda/AQUR
2a415b05ba4c0113b05b6fa14fb454af2bad52ec
[ "MIT" ]
null
null
null
import os from collections import defaultdict from flask import render_template from flask_login import login_required from sqlalchemy import and_ from app import db from app.decorators import operator_required from app.models import Student, MonthNameList, Course, PaymentStatus, Payment, Teacher, Schedule from app.users.operator import operator @operator.route('/') @login_required @operator_required def index(): title = os.environ.get('APP_NAME') # get all students data on schedule, except if the student tuition payment is None, PENDING, REJECTED or WARNING_3 students_courses_data = db.session.query(Schedule, Payment).join(Payment).filter( and_(Payment.status_of_payment is not None, Payment.status_of_payment != PaymentStatus.PENDING.name, Payment.status_of_payment != PaymentStatus.REJECTED.name, Payment.status_of_payment != PaymentStatus.WARNING_3.name)) # get the amount of Teachers and Students total_students = Student.query.count() total_teachers = Teacher.query.count() month_name_list = [] for data in MonthNameList: month_name_list.append(str(data)) # make a query object for "Tahsin" and "Arabic Language" course tahsin = students_courses_data.join(Course).filter(Course.name == "Tahsin") arabic = students_courses_data.join(Course).filter(Course.name == "Bahasa Arab") # the total payment for the courses each month tahsin_course_data = [] arabic_course_data = [] for data in tahsin: for month_name in month_name_list: tahsin_course_data.append({str(month_name): data.Payment.created_at.strftime('%B').count(month_name)}) for data in arabic: for month_name in month_name_list: arabic_course_data.append({str(month_name): data.Payment.created_at.strftime('%B').count(month_name)}) # merge and sum the total value from the dictionary on the same month from the _courses_data result above total_tahsin_students_per_month = defaultdict(int) total_arabic_students_per_month = defaultdict(int) for d in tahsin_course_data: for key, value in d.items(): total_tahsin_students_per_month[key] += value for d in arabic_course_data: for key, value in d.items(): total_arabic_students_per_month[key] += value # store all of the month values on a list for each course tahsin_values = [] arabic_values = [] for key, value in total_tahsin_students_per_month.items(): tahsin_values.append(value) for key, value in total_arabic_students_per_month.items(): arabic_values.append(value) # make a dictionary to represent course name with the matching total student that do the payment for each month data_courses_each_month = [ { 'Tahsin': tahsin_values, }, { 'Bahasa Arab': arabic_values } ] return render_template('main/operator/operator-dashboard.html', title=title, total_teachers=total_teachers, total_students=total_students, month_name_list=month_name_list, data_courses_each_month=data_courses_each_month)
40.1
118
0.711658
0
0
0
0
2,855
0.889963
0
0
639
0.19919
be7b321e4983f3461ae58d22d3131016ec26c37d
5,936
py
Python
arvet/core/metric.py
jskinn/arvet
742cf3e7ee8848c4efebfaa887fc9c0fd90a06e9
[ "BSD-2-Clause" ]
2
2021-05-27T21:48:34.000Z
2021-06-12T02:58:44.000Z
arvet/core/metric.py
jskinn/arvet
742cf3e7ee8848c4efebfaa887fc9c0fd90a06e9
[ "BSD-2-Clause" ]
null
null
null
arvet/core/metric.py
jskinn/arvet
742cf3e7ee8848c4efebfaa887fc9c0fd90a06e9
[ "BSD-2-Clause" ]
null
null
null
# Copyright (c) 2017, John Skinner import abc import typing import bson import pymodm import pymodm.fields as fields import arvet.database.pymodm_abc as pymodm_abc from arvet.database.reference_list_field import ReferenceListField import arvet.core.trial_result class Metric(pymodm.MongoModel, metaclass=pymodm_abc.ABCModelMeta): """ A class that measures results This is an abstract base class defining an interface for all metrics, to allow them to be called easily and in a structured way. """ @property def identifier(self) -> bson.ObjectId: """ Get the id for this metric :return: """ return self._id @abc.abstractmethod def is_trial_appropriate(self, trial_result: arvet.core.trial_result.TrialResult) -> bool: """ Fine-grained filtering for trial results, to make sure this class can measure this trial result. :return: """ pass @abc.abstractmethod def measure_results(self, trial_results: typing.Iterable[arvet.core.trial_result.TrialResult]) \ -> 'MetricResult': """ Measure the results of running a particular system on a particular image source. We take a collection of trials to allow for multiple repeats of the system on the same data, which allows us to account for and measure random variation in the system. A helper to check this is provided below, call it in any implementation. The trial result MUST include the ground truth along with the system estimates, which must be the same for all trials. :param trial_results: A collection of trial results to measure. These are assumed to be repeat runs of the same system on the same data. :return: A MetricResult object containing either the results, or explaining the error :rtype: MetricResult """ pass @abc.abstractmethod def get_columns(self) -> typing.Set[str]: """ Get the set of available properties for this metric. Pass these to "get_properties", below. :return: """ pass @abc.abstractmethod def get_properties(self, columns: typing.Iterable[str] = None) -> typing.Mapping[str, typing.Any]: """ Get the values of the requested properties :param columns: :return: """ pass @classmethod def get_pretty_name(cls) -> str: """ Get a human-readable name for this metric :return: """ return cls.__module__ + '.' + cls.__name__ @classmethod def get_instance(cls) -> 'Metric': """ Get an instance of this vision system, with some parameters, pulling from the database if possible, or construct a new one if needed. It is the responsibility of subclasses to ensure that as few instances of each system as possible exist within the database. Does not save the returned object, you'll usually want to do that straight away. :return: """ all_objects = cls.objects.all() if all_objects.count() > 0: return all_objects.first() obj = cls() return obj class MetricResult(pymodm.MongoModel): """ A general superclass for metric results for all metrics """ metric = fields.ReferenceField(Metric, required=True, on_delete=fields.ReferenceField.CASCADE) trial_results = ReferenceListField(arvet.core.trial_result.TrialResult, required=True, on_delete=fields.ReferenceField.CASCADE) success = fields.BooleanField(required=True) message = fields.CharField() # The set of plots available to visualize_results. available_plots = set() @property def identifier(self) -> bson.ObjectId: """ Get the id of this metric result :return: """ return self._id def get_columns(self) -> typing.Set[str]: """ Get a list of available results columns, which are the possible keys in dictionaries returned by get_results. Should delegate to the linked trial results, systems, etc for the full list. :return: """ return set() def get_results(self, columns: typing.Iterable[str] = None) -> typing.List[dict]: """ Get the results from this metric result, as a list of dictionaries we can turn into a Pandas data frame. Each dictionary should include as much data as possible, including data about the system, the image source, the particular image, etc... Use the argument to restrict the columns to a limited set, should return all by default. This must return a non-empty list for any trial result where success is True. :return: """ return [] def check_trial_collection(trial_results: typing.Iterable[arvet.core.trial_result.TrialResult]) \ -> typing.Union[str, None]: """ A helper function to check that all the given trial results come from the same system and image source. Call this at the start of Metric.measure_results :param trial_results: A collection of trial results passed to Metric.measure_results :return: None if all the trials are OK, string explaining the problem if they are not """ first_trial = None for idx, trial in enumerate(trial_results): if not trial.success: return "Trial {0} (1) is failed".format(idx, trial.pk) if first_trial is None: first_trial = trial else: if trial.image_source != first_trial.image_source: return "Trial {0} ({1}) does not have the same image source as the first trial".format(idx, trial.pk) if trial.system != first_trial.system: return "Trial {0} ({1}) does not have the same system as the first trial".format(idx, trial.pk)
37.56962
117
0.657008
4,586
0.772574
0
0
2,825
0.47591
0
0
3,386
0.570418
be7e9dc9b18c9759a533f45fd2110a059eb361f0
19,192
py
Python
pfile/accessor.py
thorwhalen/ut
353a4629c35a2cca76ef91a4d5209afe766433b4
[ "MIT" ]
4
2016-12-17T20:06:10.000Z
2021-11-19T04:45:29.000Z
pfile/accessor.py
thorwhalen/ut
353a4629c35a2cca76ef91a4d5209afe766433b4
[ "MIT" ]
11
2021-01-06T05:35:11.000Z
2022-03-11T23:28:31.000Z
pfile/accessor.py
thorwhalen/ut
353a4629c35a2cca76ef91a4d5209afe766433b4
[ "MIT" ]
3
2015-06-12T10:44:16.000Z
2021-07-26T18:39:47.000Z
"""File access utils""" __author__ = 'thorwhalen' # from ut.datapath import datapath import pickle import os from ut.util.importing import get_environment_variable import pandas as pd import ut.pfile.to as file_to import ut.pfile.name as pfile_name import ut.pstr.to as pstr_to from ut.serialize.local import Local from ut.serialize.s3 import S3 from os import environ # does this load the whole array? Can we just take MS_DATA instead? import ut.pstr.trans as pstr_trans import shutil try: MS_DATA = get_environment_variable('MS_DATA') except KeyError: MS_DATA = '' LOCATION_LOCAL = 'LOCAL' LOCATION_S3 = 'S3' #################################################################################################################### # Quick Utils def ms_data_path(relative_root, root_folder=MS_DATA): return os.path.join(pfile_name.ensure_slash_suffix(root_folder), relative_root) #################################################################################################################### # FACTORIES def for_local(relative_root='', read_only=False, extension=None, force_extension=False, root_folder=MS_DATA, **kwargs): # if a full path (i.e. starting with "/" is entered as a relative_root, then take it as the sound_file_root_folder if relative_root and ((relative_root[0] == '/') or (relative_root[0] == '~')): root_folder = relative_root relative_root = '' elif relative_root == 'test': # if relative root is test... relative_root = 'test' print("you asked for a local test, so I forced the root to be %s" % relative_root) # ensure that sound_file_root_folder ends with a "/" file_handler = FilepathHandler(relative_root=pfile_name.ensure_slash_suffix(root_folder)+relative_root) # take care of extensions if extension: extension_handler = ExtensionHandler(extension=extension, force_extension=force_extension) file_loc_proc = lambda x: file_handler.process(extension_handler.process(x)) else: file_loc_proc = file_handler.process instance = Accessor( relative_root=relative_root, extension=extension, force_extension=force_extension, file_loc_proc=file_loc_proc, location=LOCATION_LOCAL, read_only=read_only, **kwargs ) instance._set_local_defaults() return instance def for_s3(relative_root='loc-data', read_only=False, extension=None, force_extension=False, **kwargs): if relative_root == 'test': relative_root = 'loc-data/test' print("you asked for a s3 test, so I forced the root to be %s" % relative_root) file_handler = FilepathHandler(relative_root=relative_root) if extension: extension_handler = ExtensionHandler(extension=extension, force_extension=force_extension) file_loc_proc = lambda x: file_handler.process(extension_handler.process(x)) else: file_loc_proc = file_handler.process instance = Accessor( relative_root=relative_root, extension=extension, force_extension=force_extension, file_loc_proc=file_loc_proc, location=LOCATION_S3, read_only=read_only, **kwargs ) save_kwargs = instance.mk_save_kwargs(relative_root) try: bucket_name = save_kwargs['bucket_name'] base_folder = save_kwargs['key_name'] except: print("couldn't get bucket_name and key_name for relative_root") instance.s3 = S3(bucket_name=bucket_name, base_folder=base_folder) instance._set_s3_defaults() return instance #################################################################################################################### class Accessor(object): LOCATION_LOCAL = LOCATION_LOCAL LOCATION_S3 = LOCATION_S3 def __init__(self, file_loc_proc=None, location=LOCATION_LOCAL, mk_save_kwargs=None, pre_save_proc=None, save_fun=None, mk_load_kwargs=None, load_fun=None, post_load_proc=None, read_only=False, **kwargs): # if file_loc_proc: # self.file_loc_proc = file_loc_proc # else: # self.file_loc_proc = FilepathHandler().process self.file_loc_proc = file_loc_proc self.location = location self.mk_save_kwargs = mk_save_kwargs self.pre_save_proc = pre_save_proc self.save_fun = save_fun self.mk_load_kwargs = mk_load_kwargs self.load_fun = load_fun self.post_load_proc = post_load_proc self.read_only = read_only for k, v in list(kwargs.items()): self.__setattr__(k,v) self._guess_missing_attributes() def __call__(self, *args, **kwargs): return self.filepath(*args, **kwargs) #################################################################################################################### # INSTANCE METHODS def root_folder(self): if self.extension: return self.file_loc_proc('')[:(-len(self.extension))] else: return self.file_loc_proc('') def filepath(self, file_spec): return self.file_loc_proc(file_spec) def exists(self, file_spec): return os.path.exists(self.filepath(file_spec)) def save(self, obj, file_spec, **kwargs): if self.read_only: raise BaseException("read_only was set to True, so you can't save anything") else: # make the dict specifying the input to the save_fun file_spec = self.file_loc_proc(file_spec) if self.pre_save_proc: obj = self.pre_save_proc(obj) if self.mk_save_kwargs: file_spec_kwargs = self.mk_save_kwargs(file_spec) self.save_fun(obj, **file_spec_kwargs) else: self.save_fun(obj, file_spec) def append(self, obj, file_spec, **kwargs): # TODO: Write this code someday """ Intent of this function is to append data to a file's data without having to specify how to do so. For example, if the obj is a string and the file is a text file, use file append. If obj is a pickled dataframe, the effect (however you do it--hopefully there's a better way than loading the data, appending, and saving the final result) should be to have a pickled version of the old and new dataframes appended. Etc. """ pass # if isinstance(obj, basestring): # raise ValueError("strings not implemented yet") # elif isinstance(obj, (pd.DataFrame, pd.Series)): # pass def load(self, file_spec, **kwargs): file_spec = self.file_loc_proc(file_spec) if pfile_name.get_extension(file_spec) not in ['.xls', '.xlsx']: if self.mk_load_kwargs: file_spec_kwargs = self.mk_load_kwargs(file_spec) obj = self.load_fun(**file_spec_kwargs) else: obj = self.load_fun(file_spec) if self.post_load_proc: obj = self.post_load_proc(obj) else: # obj = pd.read_excel(file_spec, **kwargs) xls = pd.ExcelFile(file_spec) kwargs = dict({'sheetname': xls.sheet_names[0]}, **kwargs) # take first sheet if sheet not specified obj = pd.read_excel(file_spec, **kwargs) #obj = xls.parse(**kwargs) return obj def copy_local_file_to(self, local_file_path, target_file_spec): ''' Copies a file from the local computer to self.filepath(target_file_spec) :param local_file_path: :param target_file_spec: :return: ''' if self.read_only: raise BaseException("read_only was set to True, so you can't copy anything to this location") else: if self.location == LOCATION_LOCAL: if not os.path.exists(local_file_path): local_file_path = self.filepath(local_file_path) shutil.copyfile(local_file_path, self.filepath(target_file_spec)) elif self.location == LOCATION_S3: # make the dict specifying the input to the save_fun target_file_spec = self.file_loc_proc(target_file_spec) if self.pre_save_proc: local_file_path = self.pre_save_proc(local_file_path) if self.mk_save_kwargs: file_spec_kwargs = self.mk_save_kwargs(target_file_spec) self.copy_local_file_to_fun(local_file_path, **file_spec_kwargs) else: raise ("this shouldn't happen") else: raise ValueError("unknown location") def copy_to(self, target_relative_root, file_spec, target_location=None): if isinstance(target_relative_root, str): target_relative_root, target_location = \ _make_a_file_loc_proc_and_location_from_string_specifications(target_relative_root, target_location) # make a file accessor for the (target_location, target_relative_root) facc = Accessor(relative_root=target_relative_root, location=target_location) #################################################################################################################### # PARTIAL FACTORIES def _add_extension_handler(self, extension, force_extension=False): extension_handler = ExtensionHandler(extension=extension, force_extension=force_extension) self.file_loc_proc = lambda x : self.file_loc_proc(extension_handler.process(x)) def _guess_missing_attributes(self): if self.file_loc_proc is None: # if no file_loc_proc is given if self.location is not None and isinstance(self.location, str): self.file_loc_proc==self.location else: self.file_loc_proc==LOCATION_LOCAL elif isinstance(self.file_loc_proc, str): # if file_loc_proc is a string self.file_loc_proc, self.location = \ _make_a_file_loc_proc_and_location_from_string_specifications(self.file_loc_proc, self.location) # if self.file_loc_proc==LOCATION_LOCAL: # self.location = LOCATION_LOCAL # self.file_loc_proc = '' # elif self.file_loc_proc==LOCATION_S3: # self.location = LOCATION_S3 # self.file_loc_proc = '' # else: # if self.location==LOCATION_LOCAL: # self.file_loc_proc = FilepathHandler(relative_root=os.path.join(MS_DATA,self.file_loc_proc)).process # elif self.location==LOCATION_S3: # self.file_loc_proc = FilepathHandler(relative_root=os.path.join('loc-data',self.file_loc_proc)).process # set defaults for remaining missing attributes self._set_defaults() def _set_defaults(self): if self.location is None: print("setting location to LOCAL (because you didn't specify a location)") self.location = LOCATION_LOCAL if self.location == LOCATION_LOCAL: self._set_local_defaults() elif self.location == LOCATION_S3: self._set_s3_defaults() def _set_local_defaults(self, root_folder=MS_DATA): # set defaults for local if attr is None self.file_loc_proc = self.file_loc_proc or FilepathHandler(relative_root=os.path.join(root_folder)).process self.save_fun = self.save_fun or LocalIOMethods().unicode_save self.load_fun = self.load_fun or LocalIOMethods().unicode_load # self.pre_save_proc = self.pre_save_proc or FilepathHandler().process # self.post_load_proc = self.post_load_proc or FilepathHandler().process def _set_s3_defaults(self): # set defaults for local if attr is None self.file_loc_proc = self.file_loc_proc or FilepathHandler(relative_root='loc-data').process self.mk_save_kwargs = fullpath_to_s3_kargs self.mk_load_kwargs = fullpath_to_s3_kargs self.save_fun = self.save_fun or S3IOMethods().unicode_save self.load_fun = self.load_fun or S3IOMethods().unicode_load self.copy_local_file_to_fun = S3IOMethods().copy_local_file_to_fun #################################################################################################################### # OBJECT UTILS def local_file_loc_proc_simple(self, file_spec): # add extension file_spec = self.handle_extension(file_spec) # remove slash suffix if present (because self.sound_file_root_folder ends with / already) if file_spec.startswith('/'): file_spec = file_spec[1:] def handle_extension(self, file_spec): if self.extension: if self.force_extension: file_spec = pfile_name.replace_extension(file_spec, self.extension) else: file_spec = pfile_name.add_extension_if_not_present(file_spec, self.extension) return os.path.join(self.root_folder, file_spec) #################################################################################################################### # OTHER UTILS def _make_a_file_loc_proc_and_location_from_string_specifications(file_loc_proc, location): if file_loc_proc is None and isinstance(location, str): file_loc_proc = location + "/" location = None elif location is None and isinstance(file_loc_proc, str): first_folder = pfile_name.get_highest_level_folder(location) if first_folder in [LOCATION_LOCAL, LOCATION_S3]: location = first_folder # set the location to first_folder file_loc_proc.replace(location+"/","") # remove the first_folder else: raise ValueError("location was not specified and couldn't be guessed from the file_loc_proc") else: raise ValueError("you've neither specified a file_loc_proc (as a file_loc_proc) nor a location") # make a file accessor for the (location, target_relative_root) file_loc_proc = FilepathHandler(relative_root=os.path.join(location,file_loc_proc)).process return (file_loc_proc, location) def file_loc_proc_from_full_path(fullpath): return FilepathHandler(relative_root=fullpath).process def fullpath_to_s3_kargs(filename): # remove slash suffix if present (because self.sound_file_root_folder ends with / already) if filename.startswith('/'): filename = filename[1:] mother_root = pfile_name.get_highest_level_folder(filename) rest_of_the_filepath = filename.replace(mother_root + '/','',1) return { 'bucket_name': mother_root, 'key_name': rest_of_the_filepath } class ExtensionHandler(object): def __init__(self, extension=None, force_extension=False): self.extension = extension self.force_extension = force_extension def process(self, file_spec): if self.force_extension: return pfile_name.replace_extension(file_spec, self.extension) else: return pfile_name.add_extension_if_not_present(file_spec, self.extension) class FilepathHandler(object): def __init__(self, relative_root=''): self.relative_root = relative_root def process(self, filepath=''): return os.path.join(self.relative_root, filepath) ##### LOCAL METHODS class LocalIOMethods(object): def __init__(self, encoding="UTF-8"): self.encoding = encoding def unicode_save(self, obj, filepath=None, **kwargs): if isinstance(obj, str): # pstr_to.file(string=pstr_trans.to_unicode_or_bust(obj), tofile=filepath, encoding=self.encoding) # pstr_to.file(string=pstr_trans.to_utf8_or_bust_iter(obj), tofile=filepath, encoding=self.encoding) # pstr_to.file(string=pstr_trans.str_to_utf8_or_bust(obj), tofile=filepath, encoding=self.encoding) pstr_to.file(string=obj, tofile=filepath, encoding=self.encoding) else: pickle.dump(obj=obj, file=open(filepath, 'w')) def simple_save(self, obj, filepath=None, **kwargs): if isinstance(obj, str): pstr_to.file(string=obj, tofile=filepath, encoding=self.encoding) else: pickle.dump(obj=obj, file=open(filepath, 'w')) def unicode_load(self, filepath=None, **kwargs): """ try pd.from_pickle, then pickle.loading, and if it doesn't work, try file_to.string """ return pstr_trans.to_unicode_or_bust(self.simple_load(filepath=filepath, **kwargs)) # try: # try: # getting it as a pandas object # return pstr_trans.to_unicode_or_bust(pd.read_pickle(path=filepath)) # except Exception: # getting it as a pickled object # return pstr_trans.to_unicode_or_bust(pickle.load(file=open(filepath, 'r'))) # except Exception: # getting it as a string # return pstr_trans.to_unicode_or_bust(file_to.string(filename=filepath)) def simple_load(self, filepath=None, **kwargs): """ try pd.read_pickle, pickle.load, and file_to.string in that order """ try: try: # getting it as a pandas object return pd.read_pickle(path=filepath) except Exception: # getting it as a pickled object return pickle.load(file=open(filepath, 'r')) except Exception: # getting it as a string return file_to.string(filename=filepath) ##### S3 METHODS class S3IOMethods(object): def __init__(self, **kwargs): self.s3 = S3(**kwargs) def unicode_save(self, obj, key_name, bucket_name): if isinstance(obj, str): self.s3.dumps(the_str=pstr_trans.to_unicode_or_bust(obj), key_name=key_name, bucket_name=bucket_name) else: self.s3.dumpo(obj=obj, key_name=key_name, bucket_name=bucket_name) def simple_save(self, obj, key_name, bucket_name): if isinstance(obj, str): self.s3.dumps(the_str=obj, key_name=key_name, bucket_name=bucket_name) else: self.s3.dumpo(obj=obj, key_name=key_name, bucket_name=bucket_name) def unicode_load(self, key_name, bucket_name): """ try pickle.loading, and if it doesn't work, try file_to.string """ try: return self.s3.loado(key_name=key_name, bucket_name=bucket_name) except: return pstr_trans.to_unicode_or_bust(self.s3.loads(key_name=key_name, bucket_name=bucket_name)) def simple_load(self, key_name, bucket_name): """ try pickle.loading, and if it doesn't work, try file_to.string """ try: return self.s3.loado(key_name=key_name, bucket_name=bucket_name) except: return self.s3.loads(key_name=key_name, bucket_name=bucket_name) def copy_local_file_to_fun(self, filepath, key_name, bucket_name): return self.s3.dumpf(f=filepath, key_name=key_name, bucket_name=bucket_name)
41.90393
125
0.634275
13,789
0.718476
0
0
0
0
0
0
5,588
0.291163
be7ea94dc71a3948ab59fd9c3e80bde2599bb1f1
4,309
py
Python
scripts/statistics.py
cstenkamp/MastersThesisText
d026f9c19819c83d99dfff12b594db9d061bfb31
[ "CC0-1.0" ]
null
null
null
scripts/statistics.py
cstenkamp/MastersThesisText
d026f9c19819c83d99dfff12b594db9d061bfb31
[ "CC0-1.0" ]
null
null
null
scripts/statistics.py
cstenkamp/MastersThesisText
d026f9c19819c83d99dfff12b594db9d061bfb31
[ "CC0-1.0" ]
null
null
null
import subprocess import git from os.path import dirname, join, abspath import pandas as pd from matplotlib import pyplot as plt import requests import io import zipfile import tempfile from datetime import timedelta FILENAME = join(dirname(__file__), "..", "thesis.tex") DISP_PAGESMAX = 80 DISP_WORDSMAX = 10000 def return_piped_cmd(cmd, stdin=None): cmd = cmd.split("|") if not stdin: ps = subprocess.Popen(cmd[0].strip().split(" "), stdout=subprocess.PIPE) else: ps = subprocess.Popen(cmd[0].strip().split(" "), stdin=subprocess.PIPE, stdout=subprocess.PIPE) ps.stdin.write(stdin.encode("UTF-8")) ps.stdin.close() if len(cmd) == 1: return ps.stdout.read().decode("UTF-8") output = subprocess.check_output(cmd[1].strip().split(" "), stdin=ps.stdout).decode("UTF-8") ps.wait() return output def get_todos(fname=None, txt=None): if fname: with open(fname, "r") as rfile: txt = rfile.read() txt = txt.replace("% ", "%").lower() return txt.count("%todo") def get_npages(fname): tmp = return_piped_cmd(f'pdfinfo {fname.replace(".tex", ".pdf")}') return int([i for i in tmp.split("\n") if "Pages:" in i][0][len("Pages:"):].strip()) def github_get_npages(owner, repo, pdfname): date_pages = {} resp = requests.get(f"https://api.github.com/repos/{owner}/{repo}/actions/artifacts", headers=dict(Accept="application/vnd.github.v3+json")) for i in resp.json()["artifacts"]: art_id = i["url"][i["url"].rfind("/")+1:] re2 = requests.get(f"https://nightly.link/{owner}/{repo}/actions/artifacts/{art_id}.zip") if re2.status_code != 404: # print(i["created_at"]) archive = zipfile.ZipFile(io.BytesIO(re2.content)) with tempfile.NamedTemporaryFile(suffix=".pdf") as wfile: wfile.write(archive.read(pdfname)) n_pages = get_npages(wfile.name) # print(f"Pages: {n_pages}") date_pages[pd.to_datetime([i["created_at"]]).to_pydatetime()[0]] = n_pages return pd.Series(date_pages) def plot_df(df): ax1 = df["Words"].plot(color="red", linestyle="-", marker="o", ylabel="Words") ax1.set_ylim(0, max(df["Words"].max(), DISP_WORDSMAX)) ax2 = ax1.twinx() ax2.spines['right'].set_position(('axes', 1.0)) df["Todos"].plot(ax=ax2, color="blue", linestyle="-", marker="x", ylabel="Todos") ax3 = ax1.twinx() df["Pages"].plot(ax=ax3, color="yellow", linestyle="", marker="s", ylabel="Pages") for ax in [ax2, ax3]: ax.set_ylim((0, max(df["Todos"].max(), df["Pages"].max(), DISP_PAGESMAX))) ax3.yaxis.set_ticklabels([]) lines, labels = list(zip(*[[i[0] for i in ax.get_legend_handles_labels()] for ax in [ax1, ax2, ax3]])) plt.legend(lines, labels, loc=0) plt.show() def create_history_df(repo_dir, filename): #print(abspath(repo_dir)) repo = git.Repo(repo_dir) all_commits = {} for commit in repo.iter_commits(): txt = (commit.tree / filename).data_stream.read().decode("UTF-8") n_words = int(return_piped_cmd("detex | wc -w", stdin=txt).strip()) n_todos = get_todos(txt=txt) # print(datetime.fromtimestamp(commit.committed_date)) # print(f"words: {n_words}, todos: {n_todos}") all_commits[pd.to_datetime(commit.committed_datetime, utc=True)] = [n_words, n_todos] df = pd.DataFrame(all_commits, index=["Words", "Todos"]).T return df def merge_page_df(df, date_pages): for date in df.index: try: nearest_datepage_after = date_pages.index[date_pages.index.get_loc(date, method='bfill')] except KeyError: continue if nearest_datepage_after-date <= timedelta(hours=2): df.loc[date, "Pages"] = int(date_pages[nearest_datepage_after]) return df if __name__ == "__main__": #history df = create_history_df(dirname(FILENAME), "thesis.tex") date_pages = github_get_npages("cstenkamp", "MastersThesisText", "thesis.pdf") df = merge_page_df(df, date_pages) plot_df(df) #current n_words = int(return_piped_cmd(f"detex {FILENAME} | wc -w")) n_pages = get_npages(FILENAME) n_todos = get_todos(FILENAME) print(f"Words: {n_words}, Pages: {n_pages}, Todos: {n_todos}")
38.132743
144
0.637503
0
0
0
0
0
0
0
0
836
0.194013
be7ef9e5cafc81c92530c829cae514f567ffa39a
1,966
py
Python
setup.py
TheFraserLab/enrich_pvalues
6c5065da5e6367cc39a045afbdfa1e78322857a6
[ "MIT" ]
1
2019-03-25T17:38:47.000Z
2019-03-25T17:38:47.000Z
setup.py
TheFraserLab/enrich_pvalues
6c5065da5e6367cc39a045afbdfa1e78322857a6
[ "MIT" ]
null
null
null
setup.py
TheFraserLab/enrich_pvalues
6c5065da5e6367cc39a045afbdfa1e78322857a6
[ "MIT" ]
null
null
null
"""Installation instructions for enrich_pvalues.""" import os from setuptools import setup import enrich_pvalues # For version VERSION=enrich_pvalues.__version__ GITHUB='https://github.com/MikeDacre/enrich_pvalues' with open('requirements.txt') as fin: REQUIREMENTS = [ i[0] for i in [j.split('>=') for j in fin.read().strip().split('\n')] ] def read(fname): """Read the contents of a file in this dir.""" with open(os.path.join(os.path.dirname(__file__), fname)) as fin: return fin.read() # Actual setup instructions setup( name = 'enrich_pvalues', version = VERSION, author = 'Mike Dacre', author_email = '[email protected]', description = ( "Compare one dataset to another at a variety of p-value cutoffs" ), keywords = ( "statistics p-values biology molecular-biology console" ), long_description = read('README.rst'), license = 'MIT', # URLs url = GITHUB, download_url='{0}/archive/v{1}.tar.gz'.format(GITHUB, VERSION), py_modules=['enrich_pvalues'], entry_points = { 'console_scripts': [ 'enrich_pvalues = enrich_pvalues:main', ], }, # See https://pypi.python.org/pypi?%3Aaction=list_classifiers classifiers=[ 'Development Status :: 4 - Beta', # 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Intended Audience :: End Users/Desktop', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', 'Operating System :: MacOS :: MacOS X', 'Operating System :: POSIX', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', 'Topic :: Utilities', ], # Requirements requires=REQUIREMENTS, install_requires=REQUIREMENTS )
28.085714
77
0.61648
0
0
0
0
0
0
0
0
1,019
0.518311
be7fa8fa9510f2347bc60a9ff146e619c5f6dc1c
11,457
py
Python
homeschool/students/tests/test_forms.py
brandonmcclure/homeschool
6ba2e35014740e952222535e9492cde0d41338b4
[ "MIT" ]
null
null
null
homeschool/students/tests/test_forms.py
brandonmcclure/homeschool
6ba2e35014740e952222535e9492cde0d41338b4
[ "MIT" ]
null
null
null
homeschool/students/tests/test_forms.py
brandonmcclure/homeschool
6ba2e35014740e952222535e9492cde0d41338b4
[ "MIT" ]
null
null
null
import datetime from homeschool.courses.tests.factories import ( CourseFactory, CourseTaskFactory, GradedWorkFactory, ) from homeschool.schools.tests.factories import GradeLevelFactory from homeschool.students.forms import CourseworkForm, EnrollmentForm, GradeForm from homeschool.students.models import Coursework, Grade from homeschool.students.tests.factories import ( CourseworkFactory, EnrollmentFactory, GradeFactory, StudentFactory, ) from homeschool.test import TestCase class TestCourseworkForm(TestCase): def test_is_valid(self): """The coursework validates.""" user = self.make_user() student = StudentFactory(school=user.school) grade_level = GradeLevelFactory(school_year__school=user.school) EnrollmentFactory(student=student, grade_level=grade_level) course = CourseFactory(grade_levels=[grade_level]) course_task = CourseTaskFactory(course=course) data = { "student": str(student.id), "course_task": str(course_task.id), "completed_date": str(grade_level.school_year.start_date), } form = CourseworkForm(data=data) is_valid = form.is_valid() assert is_valid def test_student_can_create_coursework(self): """The student is enrolled in a course that contains the task.""" user = self.make_user() student = StudentFactory(school=user.school) grade_level = GradeLevelFactory(school_year__school=user.school) course = CourseFactory(grade_levels=[grade_level]) course_task = CourseTaskFactory(course=course) data = { "student": str(student.id), "course_task": str(course_task.id), "completed_date": str(grade_level.school_year.start_date), } form = CourseworkForm(data=data) is_valid = form.is_valid() assert not is_valid assert form.non_field_errors() == [ "The student is not enrolled in this course." ] def test_save_new_coursework(self): """A new coursework is created for a student and task.""" user = self.make_user() student = StudentFactory(school=user.school) grade_level = GradeLevelFactory(school_year__school=user.school) EnrollmentFactory(student=student, grade_level=grade_level) course = CourseFactory(grade_levels=[grade_level]) course_task = CourseTaskFactory(course=course) data = { "student": str(student.id), "course_task": str(course_task.id), "completed_date": str(grade_level.school_year.start_date), } form = CourseworkForm(data=data) form.is_valid() form.save() assert ( Coursework.objects.filter(student=student, course_task=course_task).count() == 1 ) def test_save_existing_coursework(self): """A new coursework is created for a student and task.""" user = self.make_user() student = StudentFactory(school=user.school) grade_level = GradeLevelFactory(school_year__school=user.school) EnrollmentFactory(student=student, grade_level=grade_level) course = CourseFactory(grade_levels=[grade_level]) course_task = CourseTaskFactory(course=course) CourseworkFactory(student=student, course_task=course_task) data = { "student": str(student.id), "course_task": str(course_task.id), "completed_date": str(grade_level.school_year.start_date), } form = CourseworkForm(data=data) form.is_valid() form.save() assert ( Coursework.objects.filter(student=student, course_task=course_task).count() == 1 ) def test_save_deletes_coursework(self): """A blank completed date deletes an existing coursework.""" user = self.make_user() student = StudentFactory(school=user.school) grade_level = GradeLevelFactory(school_year__school=user.school) EnrollmentFactory(student=student, grade_level=grade_level) course = CourseFactory(grade_levels=[grade_level]) course_task = CourseTaskFactory(course=course) CourseworkFactory(student=student, course_task=course_task) data = { "student": str(student.id), "course_task": str(course_task.id), } form = CourseworkForm(data=data) form.is_valid() form.save() assert ( Coursework.objects.filter(student=student, course_task=course_task).count() == 0 ) def test_completed_date_outside_school_year(self): """The completed data must be in the school year.""" user = self.make_user() student = StudentFactory(school=user.school) grade_level = GradeLevelFactory(school_year__school=user.school) EnrollmentFactory(student=student, grade_level=grade_level) course = CourseFactory(grade_levels=[grade_level]) course_task = CourseTaskFactory(course=course) data = { "student": str(student.id), "course_task": str(course_task.id), "completed_date": str( grade_level.school_year.start_date - datetime.timedelta(days=1) ), } form = CourseworkForm(data=data) is_valid = form.is_valid() assert not is_valid assert form.non_field_errors() == [ "The completed date must be in the school year." ] def test_invalid_course_task(self): """An invalid course task is an error.""" user = self.make_user() student = StudentFactory(school=user.school) grade_level = GradeLevelFactory(school_year__school=user.school) EnrollmentFactory(student=student, grade_level=grade_level) course = CourseFactory(grade_levels=[grade_level]) CourseTaskFactory(course=course) data = { "student": str(student.id), "course_task": "0", "completed_date": str(grade_level.school_year.start_date), } form = CourseworkForm(data=data) is_valid = form.is_valid() assert not is_valid def test_invalid_completed_date(self): """An invalid completed date is an error.""" user = self.make_user() student = StudentFactory(school=user.school) grade_level = GradeLevelFactory(school_year__school=user.school) EnrollmentFactory(student=student, grade_level=grade_level) course = CourseFactory(grade_levels=[grade_level]) course_task = CourseTaskFactory(course=course) data = { "student": str(student.id), "course_task": str(course_task.id), "completed_date": "boom", } form = CourseworkForm(data=data) is_valid = form.is_valid() assert not is_valid class TestEnrollmentForm(TestCase): def test_students_only_enroll_in_one_grade_level_per_year(self): """A student can only be enrolled in a single grade level in a school year.""" user = self.make_user() enrollment = EnrollmentFactory( student__school=user.school, grade_level__school_year__school=user.school ) another_grade_level = GradeLevelFactory( school_year=enrollment.grade_level.school_year ) data = { "student": str(enrollment.student.id), "grade_level": str(another_grade_level.id), } form = EnrollmentForm(user=user, data=data) is_valid = form.is_valid() assert not is_valid assert ( "A student may not be enrolled in multiple grade levels in a school year. " f"{enrollment.student} is enrolled in {enrollment.grade_level}." in form.non_field_errors() ) def test_no_grade_level(self): """A missing grade level raises a validation error.""" user = self.make_user() school = user.school enrollment = EnrollmentFactory( student__school=school, grade_level__school_year__school=school ) data = {"student": str(enrollment.student.id), "grade_level": "0"} form = EnrollmentForm(user=user, data=data) is_valid = form.is_valid() assert not is_valid assert "You need to select a grade level." in form.non_field_errors() class TestGradeForm(TestCase): def test_is_valid(self): """The new grade validates.""" user = self.make_user() student = StudentFactory(school=user.school) grade_level = GradeLevelFactory(school_year__school=user.school) EnrollmentFactory(student=student, grade_level=grade_level) course = CourseFactory(grade_levels=[grade_level]) graded_work = GradedWorkFactory(course_task__course=course) data = { "student": str(student.id), "graded_work": str(graded_work.id), "score": "100", } form = GradeForm(data=data) is_valid = form.is_valid() assert is_valid def test_invalid_graded_work(self): """An invalid graded work is an error.""" user = self.make_user() student = StudentFactory(school=user.school) grade_level = GradeLevelFactory(school_year__school=user.school) EnrollmentFactory(student=student, grade_level=grade_level) course = CourseFactory(grade_levels=[grade_level]) GradedWorkFactory(course_task__course=course) data = {"student": str(student.id), "graded_work": "0", "score": "100"} form = GradeForm(data=data) is_valid = form.is_valid() assert not is_valid def test_save(self): """The form creates a new grade.""" user = self.make_user() student = StudentFactory(school=user.school) grade_level = GradeLevelFactory(school_year__school=user.school) EnrollmentFactory(student=student, grade_level=grade_level) course = CourseFactory(grade_levels=[grade_level]) graded_work = GradedWorkFactory(course_task__course=course) data = { "student": str(student.id), "graded_work": str(graded_work.id), "score": "100", } form = GradeForm(data=data) form.is_valid() form.save() assert ( Grade.objects.filter( student=student, graded_work=graded_work, score=100 ).count() == 1 ) def test_save_update(self): """The form updates a grade.""" user = self.make_user() student = StudentFactory(school=user.school) grade_level = GradeLevelFactory(school_year__school=user.school) EnrollmentFactory(student=student, grade_level=grade_level) course = CourseFactory(grade_levels=[grade_level]) graded_work = GradedWorkFactory(course_task__course=course) GradeFactory(student=student, graded_work=graded_work) data = { "student": str(student.id), "graded_work": str(graded_work.id), "score": "100", } form = GradeForm(data=data) form.is_valid() form.save() assert ( Grade.objects.filter(student=student, graded_work=graded_work).count() == 1 )
36.141956
87
0.639696
10,940
0.954875
0
0
0
0
0
0
1,426
0.124465
be7fc184a7b92d4ec6db9908dc208989d6e4f546
23,144
py
Python
Mining_Projects/getAllProjects_Parallel.py
ai-se/heroes_compsci
613fd623a6da073b2c62c773ed902acb0c756809
[ "MIT" ]
null
null
null
Mining_Projects/getAllProjects_Parallel.py
ai-se/heroes_compsci
613fd623a6da073b2c62c773ed902acb0c756809
[ "MIT" ]
12
2019-12-17T04:04:19.000Z
2019-12-26T20:23:02.000Z
Mining_Projects/getAllProjects_Parallel.py
ai-se/heroes_compsci
613fd623a6da073b2c62c773ed902acb0c756809
[ "MIT" ]
1
2020-03-12T22:19:48.000Z
2020-03-12T22:19:48.000Z
""" @Author Jchakra""" """ This code is to download project information using GitHub API (Following Amrit's Hero paper criteria of how to find good projects) """ from multiprocessing import Process,Lock import time import json import requests ## Downloading all the projects def func1(): repo_result = [] Token_list = [''**'',''**'',''**'',''**'',''**''] i = 0 api_url = 'https://api.github.com/' while i < 10000: # This number will be increased to collect all the projects repo_url = api_url + 'repositories?since=' + str(i) exception_count = 0 while exception_count < 2: try: for k in range(0,len(Token_list)): headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])} #print(Token_list[k]) repo_response = requests.get(repo_url, headers=headers).json() #print(repo_response) try: if ( len(repo_response['message']) > 0): if( k == len(Token_list) - 1): time.sleep(600) exception_count = exception_count + 1 else: continue except: break if ( exception_count == 0): break else: continue except: exception_count = 0 project_list = [] try: for j in range(0,len(repo_response)): project_id = repo_response[j]['id'] project_name = repo_response[j]['name'] project_full_name = repo_response[j]['full_name'] project_html_url = repo_response[j]['html_url'] project_owner_name = repo_response[j]['owner']['login'] project_obj = {"id" : project_id, "name": project_name, "full_name" : project_full_name, "html_url" : project_html_url, "owner" : project_owner_name , "issues" : "", "commits" : "", "PR" : ""} project_list.append(project_obj) except: print ("exception occurred") try: last_id = repo_response[99]["id"] i = last_id repo_result = repo_result + project_list except: print(" exception inside function 1 ") break ## Removing projects having less than 8 issues p = 0 while p < len(repo_result): repo_owner = repo_result[p]['owner'] repo_name = repo_result[p]['name'] issue_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'issues' exception_count = 0 while exception_count < 2: try: for k in range(0,len(Token_list)): headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])} #print(Token_list[k]) issue_response = requests.get(issue_url, headers=headers).json() try: if ( len(issue_response['message']) > 0): if( k == len(Token_list) - 1): time.sleep(600) exception_count = exception_count + 1 else: continue except: break if ( exception_count == 0): break else: continue except: exception_count = 0 if(len(issue_response) > 10): repo_result[p]["issues"] = len(issue_response) p = p + 1 else: repo_result.pop(p) ## Selecting the projects with Pull Request > 0 m = 0 while m < len(repo_result): repo_owner = repo_result[m]['owner'] repo_name = repo_result[m]['name'] PR_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'pulls?state=all' exception_count = 0 while exception_count < 2: try: for k in range(0,len(Token_list)): headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])} #print(Token_list[k]) PR_response = requests.get(PR_url, headers=headers).json() try: if ( len(PR_response['message']) > 0): if( k == len(Token_list) - 1): time.sleep(600) exception_count = exception_count + 1 else: continue except: break if ( exception_count == 0): break else: continue except: exception_count = 0 if(len(PR_response) > 0): repo_result[m]["PR"] = len(PR_response) m = m + 1 else: repo_result.pop(m) ## Selecting Projects with commits > 20 n = 0 while n < len(repo_result): repo_owner = repo_result[n]['owner'] repo_name = repo_result[n]['name'] commit_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'commits' exception_count = 0 while exception_count < 2: try: for k in range(0,len(Token_list)): headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])} #print(Token_list[k]) commit_response = requests.get(commit_url, headers=headers).json() try: if ( len(commit_response['message']) > 0): if( k == len(Token_list) - 1): time.sleep(600) exception_count = exception_count + 1 else: continue except: break if ( exception_count == 0): break else: continue except: exception_count = 0 if(len(commit_response) > 20): repo_result[n]["commits"] = len(commit_response) n = n + 1 else: repo_result.pop(n) with open("repo_file1.json", "w") as repo_file: json.dump(repo_result, repo_file) print("function 1 finished", len(repo_result)) def func2(): repo_result = [] Token_list = [''**'',''**'',''**'',''**'',''**''] i = 10000 api_url = 'https://api.github.com/' while i < 20000: # This number will be increased to collect all the projects repo_url = api_url + 'repositories?since=' + str(i) exception_count = 0 while exception_count < 2: try: for k in range(0,len(Token_list)): headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])} #print(Token_list[k]) repo_response = requests.get(repo_url, headers=headers).json() #print(repo_response) try: if ( len(repo_response['message']) > 0): if( k == len(Token_list) - 1): time.sleep(600) exception_count = exception_count + 1 else: continue except: break if ( exception_count == 0): break else: continue except: exception_count = 0 project_list = [] try: for j in range(0,len(repo_response)): project_id = repo_response[j]['id'] project_name = repo_response[j]['name'] project_full_name = repo_response[j]['full_name'] project_html_url = repo_response[j]['html_url'] project_owner_name = repo_response[j]['owner']['login'] project_obj = {"id" : project_id, "name": project_name, "full_name" : project_full_name, "html_url" : project_html_url, "owner" : project_owner_name , "issues" : "", "commits" : "", "PR" : ""} project_list.append(project_obj) except: print ("exception occurred") try: last_id = repo_response[99]["id"] i = last_id repo_result = repo_result + project_list except: print(" exception inside function 2 ") break ## Removing projects having less than 8 issues p = 0 while p < len(repo_result): repo_owner = repo_result[p]['owner'] repo_name = repo_result[p]['name'] issue_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'issues' exception_count = 0 while exception_count < 2: try: for k in range(0,len(Token_list)): headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])} #print(Token_list[k]) issue_response = requests.get(issue_url, headers=headers).json() try: if ( len(issue_response['message']) > 0): if( k == len(Token_list) - 1): time.sleep(600) exception_count = exception_count + 1 else: continue except: break if ( exception_count == 0): break else: continue except: exception_count = 0 if(len(issue_response) > 10): repo_result[p]["issues"] = len(issue_response) p = p + 1 else: repo_result.pop(p) ## Selecting the projects with Pull Request > 0 m = 0 while m < len(repo_result): repo_owner = repo_result[m]['owner'] repo_name = repo_result[m]['name'] PR_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'pulls?state=all' exception_count = 0 while exception_count < 2: try: for k in range(0,len(Token_list)): headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])} #print(Token_list[k]) PR_response = requests.get(PR_url, headers=headers).json() try: if ( len(PR_response['message']) > 0): if( k == len(Token_list) - 1): time.sleep(600) exception_count = exception_count + 1 else: continue except: break if ( exception_count == 0): break else: continue except: exception_count = 0 if(len(PR_response) > 0): repo_result[m]["PR"] = len(PR_response) m = m + 1 else: repo_result.pop(m) ## Selecting Projects with commits > 20 n = 0 while n < len(repo_result): repo_owner = repo_result[n]['owner'] repo_name = repo_result[n]['name'] commit_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'commits' exception_count = 0 while exception_count < 2: try: for k in range(0,len(Token_list)): headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])} #print(Token_list[k]) commit_response = requests.get(commit_url, headers=headers).json() try: if ( len(commit_response['message']) > 0): if( k == len(Token_list) - 1): time.sleep(600) exception_count = exception_count + 1 else: continue except: break if ( exception_count == 0): break else: continue except: exception_count = 0 if(len(commit_response) > 20): repo_result[n]["commits"] = len(commit_response) n = n + 1 else: repo_result.pop(n) with open("repo_file2.json", "w") as repo_file: json.dump(repo_result, repo_file) print("function 2 finished", len(repo_result)) def func3(): repo_result = [] Token_list = [''**'',''**'',''**'',''**'',''**''] i = 20000 api_url = 'https://api.github.com/' while i < 30000: # This number will be increased to collect all the projects repo_url = api_url + 'repositories?since=' + str(i) exception_count = 0 while exception_count < 2: try: for k in range(0,len(Token_list)): headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])} #print(Token_list[k]) repo_response = requests.get(repo_url, headers=headers).json() #print(repo_response) try: if ( len(repo_response['message']) > 0): if( k == len(Token_list) - 1): time.sleep(600) exception_count = exception_count + 1 else: continue except: break if ( exception_count == 0): break else: continue except: exception_count = 0 project_list = [] try: for j in range(0,len(repo_response)): project_id = repo_response[j]['id'] project_name = repo_response[j]['name'] project_full_name = repo_response[j]['full_name'] project_html_url = repo_response[j]['html_url'] project_owner_name = repo_response[j]['owner']['login'] project_obj = {"id" : project_id, "name": project_name, "full_name" : project_full_name, "html_url" : project_html_url, "owner" : project_owner_name , "issues" : "", "commits" : "", "PR" : ""} project_list.append(project_obj) except: print ("exception occurred") try: last_id = repo_response[99]["id"] i = last_id repo_result = repo_result + project_list except: print(" exception inside function 3 ") break ## Removing projects having less than 8 issues p = 0 while p < len(repo_result): repo_owner = repo_result[p]['owner'] repo_name = repo_result[p]['name'] issue_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'issues' exception_count = 0 while exception_count < 2: try: for k in range(0,len(Token_list)): headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])} #print(Token_list[k]) issue_response = requests.get(issue_url, headers=headers).json() try: if ( len(issue_response['message']) > 0): if( k == len(Token_list) - 1): time.sleep(600) exception_count = exception_count + 1 else: continue except: break if ( exception_count == 0): break else: continue except: exception_count = 0 if(len(issue_response) > 10): repo_result[p]["issues"] = len(issue_response) p = p + 1 else: repo_result.pop(p) ## Selecting the projects with Pull Request > 0 m = 0 while m < len(repo_result): repo_owner = repo_result[m]['owner'] repo_name = repo_result[m]['name'] PR_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'pulls?state=all' exception_count = 0 while exception_count < 2: try: for k in range(0,len(Token_list)): headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])} #print(Token_list[k]) PR_response = requests.get(PR_url, headers=headers).json() try: if ( len(PR_response['message']) > 0): if( k == len(Token_list) - 1): time.sleep(600) exception_count = exception_count + 1 else: continue except: break if ( exception_count == 0): break else: continue except: exception_count = 0 if(len(PR_response) > 0): repo_result[m]["PR"] = len(PR_response) m = m + 1 else: repo_result.pop(m) ## Selecting Projects with commits > 20 n = 0 while n < len(repo_result): repo_owner = repo_result[n]['owner'] repo_name = repo_result[n]['name'] commit_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'commits' exception_count = 0 while exception_count < 2: try: for k in range(0,len(Token_list)): headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])} #print(Token_list[k]) commit_response = requests.get(commit_url, headers=headers).json() try: if ( len(commit_response['message']) > 0): if( k == len(Token_list) - 1): time.sleep(600) exception_count = exception_count + 1 else: continue except: break if ( exception_count == 0): break else: continue except: exception_count = 0 if(len(commit_response) > 20): repo_result[n]["commits"] = len(commit_response) n = n + 1 else: repo_result.pop(n) with open("repo_file3.json", "w") as repo_file: json.dump(repo_result, repo_file) print("function 3 finished", len(repo_result)) def func4(): repo_result = [] Token_list = [''**'',''**'',''**'',''**'',''**''] i = 30000 api_url = 'https://api.github.com/' while i < 40000: # This number will be increased to collect all the projects repo_url = api_url + 'repositories?since=' + str(i) exception_count = 0 while exception_count < 2: try: for k in range(0,len(Token_list)): headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])} #print(Token_list[k]) repo_response = requests.get(repo_url, headers=headers).json() #print(repo_response) try: if ( len(repo_response['message']) > 0): if( k == len(Token_list) - 1): time.sleep(600) exception_count = exception_count + 1 else: continue except: break if ( exception_count == 0): break else: continue except: exception_count = 0 project_list = [] try: for j in range(0,len(repo_response)): project_id = repo_response[j]['id'] project_name = repo_response[j]['name'] project_full_name = repo_response[j]['full_name'] project_html_url = repo_response[j]['html_url'] project_owner_name = repo_response[j]['owner']['login'] project_obj = {"id" : project_id, "name": project_name, "full_name" : project_full_name, "html_url" : project_html_url, "owner" : project_owner_name , "issues" : "", "commits" : "", "PR" : ""} project_list.append(project_obj) except: print ("exception occurred") try: last_id = repo_response[99]["id"] i = last_id repo_result = repo_result + project_list except: print(" exception inside function 4 ") break ## Removing projects having less than 8 issues p = 0 while p < len(repo_result): repo_owner = repo_result[p]['owner'] repo_name = repo_result[p]['name'] issue_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'issues' exception_count = 0 while exception_count < 2: try: for k in range(0,len(Token_list)): headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])} #print(Token_list[k]) issue_response = requests.get(issue_url, headers=headers).json() try: if ( len(issue_response['message']) > 0): if( k == len(Token_list) - 1): time.sleep(600) exception_count = exception_count + 1 else: continue except: break if ( exception_count == 0): break else: continue except: exception_count = 0 if(len(issue_response) > 10): repo_result[p]["issues"] = len(issue_response) p = p + 1 else: repo_result.pop(p) ## Selecting the projects with Pull Request > 0 m = 0 while m < len(repo_result): repo_owner = repo_result[m]['owner'] repo_name = repo_result[m]['name'] PR_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'pulls?state=all' exception_count = 0 while exception_count < 2: try: for k in range(0,len(Token_list)): headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])} #print(Token_list[k]) PR_response = requests.get(PR_url, headers=headers).json() try: if ( len(PR_response['message']) > 0): if( k == len(Token_list) - 1): time.sleep(600) exception_count = exception_count + 1 else: continue except: break if ( exception_count == 0): break else: continue except: exception_count = 0 if(len(PR_response) > 0): repo_result[m]["PR"] = len(PR_response) m = m + 1 else: repo_result.pop(m) ## Selecting Projects with commits > 20 n = 0 while n < len(repo_result): repo_owner = repo_result[n]['owner'] repo_name = repo_result[n]['name'] commit_url = api_url + 'repos/' + repo_owner + '/' + repo_name + '/' + 'commits' exception_count = 0 while exception_count < 2: try: for k in range(0,len(Token_list)): headers = {'Content-Type': 'application/json','Authorization': 'Bearer {0}'.format(Token_list[k])} #print(Token_list[k]) commit_response = requests.get(commit_url, headers=headers).json() try: if ( len(commit_response['message']) > 0): if( k == len(Token_list) - 1): time.sleep(600) exception_count = exception_count + 1 else: continue except: break if ( exception_count == 0): break else: continue except: exception_count = 0 if(len(commit_response) > 20): repo_result[n]["commits"] = len(commit_response) n = n + 1 else: repo_result.pop(n) with open("repo_file4.json", "w") as repo_file: json.dump(repo_result, repo_file) print("function 4 finished", len(repo_result)) if __name__ == '__main__': lock = Lock() p1 = Process(target=func1) p2 = Process(target=func2) p3 = Process(target=func3) p4 = Process(target=func4) p1.start() p2.start() p3.start() p4.start() p1.join() p2.join() p3.join() p4.join()
29.407878
169
0.527523
0
0
0
0
0
0
0
0
4,357
0.188256
be8016a800ed48d86a67fbff5afe5ec6d0a2e6a3
2,173
py
Python
examples/source/benchmarks/googlenet_model.py
ably77/dcos-tensorflow-tools
d434ff6c0cee6db9f62be583723dc2bee46ebbf2
[ "Apache-2.0" ]
7
2017-11-02T18:21:37.000Z
2019-06-20T20:46:51.000Z
scripts/tf_cnn_benchmarks/googlenet_model.py
Aetf/tf_benchmarks
b473961620de1b03cb34902960c820e195bea678
[ "Apache-2.0" ]
7
2017-10-19T20:45:25.000Z
2020-03-24T15:28:52.000Z
scripts/tf_cnn_benchmarks/googlenet_model.py
Aetf/tf_benchmarks
b473961620de1b03cb34902960c820e195bea678
[ "Apache-2.0" ]
4
2017-10-19T09:57:17.000Z
2019-01-22T05:33:25.000Z
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Googlenet model configuration. References: Szegedy, Christian, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed, Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, and Andrew Rabinovich Going deeper with convolutions arXiv preprint arXiv:1409.4842 (2014) """ import model class GooglenetModel(model.Model): def __init__(self): super(GooglenetModel, self).__init__('googlenet', 224, 32, 0.005) def add_inference(self, cnn): def inception_v1(cnn, k, l, m, n, p, q): cols = [[('conv', k, 1, 1)], [('conv', l, 1, 1), ('conv', m, 3, 3)], [('conv', n, 1, 1), ('conv', p, 5, 5)], [('mpool', 3, 3, 1, 1, 'SAME'), ('conv', q, 1, 1)]] cnn.inception_module('incept_v1', cols) cnn.conv(64, 7, 7, 2, 2) cnn.mpool(3, 3, 2, 2, mode='SAME') cnn.conv(64, 1, 1) cnn.conv(192, 3, 3) cnn.mpool(3, 3, 2, 2, mode='SAME') inception_v1(cnn, 64, 96, 128, 16, 32, 32) inception_v1(cnn, 128, 128, 192, 32, 96, 64) cnn.mpool(3, 3, 2, 2, mode='SAME') inception_v1(cnn, 192, 96, 208, 16, 48, 64) inception_v1(cnn, 160, 112, 224, 24, 64, 64) inception_v1(cnn, 128, 128, 256, 24, 64, 64) inception_v1(cnn, 112, 144, 288, 32, 64, 64) inception_v1(cnn, 256, 160, 320, 32, 128, 128) cnn.mpool(3, 3, 2, 2, mode='SAME') inception_v1(cnn, 256, 160, 320, 32, 128, 128) inception_v1(cnn, 384, 192, 384, 48, 128, 128) cnn.apool(7, 7, 1, 1, mode='VALID') cnn.reshape([-1, 1024])
37.465517
80
0.61942
1,191
0.54809
0
0
0
0
0
0
1,051
0.483663
be802497e70c37700eec284c7ee0e2b8f03f6401
60
py
Python
demos/prey-predator/prey_predator_abm/sim_params.py
neo-empresarial/covid-19
cef10ee79d955c9e84148c3c8da542788a1f7395
[ "MIT" ]
3
2020-05-26T12:17:48.000Z
2020-06-25T12:03:37.000Z
demos/prey-predator/prey_predator_abm/sim_params.py
neo-empresarial/covid-19
cef10ee79d955c9e84148c3c8da542788a1f7395
[ "MIT" ]
4
2020-05-26T21:03:44.000Z
2020-06-30T12:13:15.000Z
demos/prey-predator/prey_predator_abm/sim_params.py
neo-empresarial/epidemiological-analysis
cef10ee79d955c9e84148c3c8da542788a1f7395
[ "MIT" ]
1
2021-11-22T23:10:45.000Z
2021-11-22T23:10:45.000Z
""" Simulation parameters. """ SIMULATION_TIME_STEPS = 300
10
27
0.733333
0
0
0
0
0
0
0
0
30
0.5
be82ffa5bc528b97777e4e4160bb45aca2d0d6ec
12,669
py
Python
process_ops.py
gcosne/generative_inpainting
1ae50277e5815a4f0c1e339ede0dbfae8e5036d1
[ "MIT" ]
11
2018-11-16T04:29:06.000Z
2019-07-25T08:11:47.000Z
process_ops.py
Yukariin/PEPSI
91aea1ae6f528d92ee19007ed132d3482b3a98cc
[ "MIT" ]
null
null
null
process_ops.py
Yukariin/PEPSI
91aea1ae6f528d92ee19007ed132d3482b3a98cc
[ "MIT" ]
1
2019-07-16T18:52:49.000Z
2019-07-16T18:52:49.000Z
import cv2 import numpy as np try: import scipy # scipy.ndimage cannot be accessed until explicitly imported from scipy import ndimage except ImportError: scipy = None def flip_axis(x, axis): x = np.asarray(x).swapaxes(axis, 0) x = x[::-1, ...] x = x.swapaxes(0, axis) return x def random_rotation(x, rg, row_axis=0, col_axis=1, channel_axis=2, fill_mode='nearest', cval=0., interpolation_order=1): """Performs a random rotation of a Numpy image tensor. # Arguments x: Input tensor. Must be 3D. rg: Rotation range, in degrees. row_axis: Index of axis for rows in the input tensor. col_axis: Index of axis for columns in the input tensor. channel_axis: Index of axis for channels in the input tensor. fill_mode: Points outside the boundaries of the input are filled according to the given mode (one of `{'constant', 'nearest', 'reflect', 'wrap'}`). cval: Value used for points outside the boundaries of the input if `mode='constant'`. interpolation_order int: order of spline interpolation. see `ndimage.interpolation.affine_transform` # Returns Rotated Numpy image tensor. """ theta = np.random.uniform(-rg, rg) x = apply_affine_transform(x, theta=theta, channel_axis=channel_axis, fill_mode=fill_mode, cval=cval, order=interpolation_order) return x def random_shift(x, wrg, hrg, row_axis=0, col_axis=1, channel_axis=2, fill_mode='nearest', cval=0., interpolation_order=1): """Performs a random spatial shift of a Numpy image tensor. # Arguments x: Input tensor. Must be 3D. wrg: Width shift range, as a float fraction of the width. hrg: Height shift range, as a float fraction of the height. row_axis: Index of axis for rows in the input tensor. col_axis: Index of axis for columns in the input tensor. channel_axis: Index of axis for channels in the input tensor. fill_mode: Points outside the boundaries of the input are filled according to the given mode (one of `{'constant', 'nearest', 'reflect', 'wrap'}`). cval: Value used for points outside the boundaries of the input if `mode='constant'`. interpolation_order int: order of spline interpolation. see `ndimage.interpolation.affine_transform` # Returns Shifted Numpy image tensor. """ h, w = x.shape[row_axis], x.shape[col_axis] tx = np.random.uniform(-hrg, hrg) * h ty = np.random.uniform(-wrg, wrg) * w x = apply_affine_transform(x, tx=tx, ty=ty, channel_axis=channel_axis, fill_mode=fill_mode, cval=cval, order=interpolation_order) return x def random_shear(x, intensity, row_axis=0, col_axis=1, channel_axis=2, fill_mode='nearest', cval=0., interpolation_order=1): """Performs a random spatial shear of a Numpy image tensor. # Arguments x: Input tensor. Must be 3D. intensity: Transformation intensity in degrees. row_axis: Index of axis for rows in the input tensor. col_axis: Index of axis for columns in the input tensor. channel_axis: Index of axis for channels in the input tensor. fill_mode: Points outside the boundaries of the input are filled according to the given mode (one of `{'constant', 'nearest', 'reflect', 'wrap'}`). cval: Value used for points outside the boundaries of the input if `mode='constant'`. interpolation_order int: order of spline interpolation. see `ndimage.interpolation.affine_transform` # Returns Sheared Numpy image tensor. """ shear = np.random.uniform(-intensity, intensity) x = apply_affine_transform(x, shear=shear, channel_axis=channel_axis, fill_mode=fill_mode, cval=cval, order=interpolation_order) return x def random_zoom(x, zoom_range, row_axis=0, col_axis=1, channel_axis=2, fill_mode='nearest', cval=0., interpolation_order=1): """Performs a random spatial zoom of a Numpy image tensor. # Arguments x: Input tensor. Must be 3D. zoom_range: Tuple of floats; zoom range for width and height. row_axis: Index of axis for rows in the input tensor. col_axis: Index of axis for columns in the input tensor. channel_axis: Index of axis for channels in the input tensor. fill_mode: Points outside the boundaries of the input are filled according to the given mode (one of `{'constant', 'nearest', 'reflect', 'wrap'}`). cval: Value used for points outside the boundaries of the input if `mode='constant'`. interpolation_order int: order of spline interpolation. see `ndimage.interpolation.affine_transform` # Returns Zoomed Numpy image tensor. # Raises ValueError: if `zoom_range` isn't a tuple. """ if len(zoom_range) != 2: raise ValueError('`zoom_range` should be a tuple or list of two' ' floats. Received: %s' % (zoom_range,)) if zoom_range[0] == 1 and zoom_range[1] == 1: zx, zy = 1, 1 else: zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2) x = apply_affine_transform(x, zx=zx, zy=zy, channel_axis=channel_axis, fill_mode=fill_mode, cval=cval, order=interpolation_order) return x def random_channel_shift(x, intensity, channel_axis=0): x = np.rollaxis(x, channel_axis, 0) min_x, max_x = np.min(x), np.max(x) channel_images = [np.clip(x_channel + np.random.uniform(-intensity, intensity), min_x, max_x) for x_channel in x] x = np.stack(channel_images, axis=0) x = np.rollaxis(x, 0, channel_axis + 1) return x # For curving soybean pods. L.C.Uzal def random_curves_transform(x, strength=0.1, range=(0.,255.)): low, high = range delta = (high - low) * strength / 2. xp = np.random.uniform(low=low + delta, high=high - delta) yp = np.random.uniform(low=xp-delta, high=xp+delta) xp = np.asarray([low, xp, high]) yp = np.asarray([low, yp, high]) return np.interp(x,xp,yp) def transform_matrix_offset_center(matrix, x, y): o_x = float(x) / 2 + 0.5 o_y = float(y) / 2 + 0.5 offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]]) reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]]) transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix) return transform_matrix def apply_affine_transform(x, theta=0, tx=0, ty=0, shear=0, zx=1, zy=1, row_axis=0, col_axis=1, channel_axis=2, fill_mode='nearest', cval=0., order=1): """Applies an affine transformation specified by the parameters given. # Arguments x: 2D numpy array, single image. theta: Rotation angle in degrees. tx: Width shift. ty: Heigh shift. shear: Shear angle in degrees. zx: Zoom in x direction. zy: Zoom in y direction row_axis: Index of axis for rows in the input image. col_axis: Index of axis for columns in the input image. channel_axis: Index of axis for channels in the input image. fill_mode: Points outside the boundaries of the input are filled according to the given mode (one of `{'constant', 'nearest', 'reflect', 'wrap'}`). cval: Value used for points outside the boundaries of the input if `mode='constant'`. order int: order of interpolation # Returns The transformed version of the input. """ if scipy is None: raise ImportError('Image transformations require SciPy. ' 'Install SciPy.') transform_matrix = None if theta != 0: theta = np.deg2rad(theta) rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0], [np.sin(theta), np.cos(theta), 0], [0, 0, 1]]) transform_matrix = rotation_matrix if tx != 0 or ty != 0: shift_matrix = np.array([[1, 0, tx], [0, 1, ty], [0, 0, 1]]) if transform_matrix is None: transform_matrix = shift_matrix else: transform_matrix = np.dot(transform_matrix, shift_matrix) if shear != 0: shear = np.deg2rad(shear) shear_matrix = np.array([[1, -np.sin(shear), 0], [0, np.cos(shear), 0], [0, 0, 1]]) if transform_matrix is None: transform_matrix = shear_matrix else: transform_matrix = np.dot(transform_matrix, shear_matrix) if zx != 1 or zy != 1: zoom_matrix = np.array([[zx, 0, 0], [0, zy, 0], [0, 0, 1]]) if transform_matrix is None: transform_matrix = zoom_matrix else: transform_matrix = np.dot(transform_matrix, zoom_matrix) if transform_matrix is not None: h, w = x.shape[row_axis], x.shape[col_axis] transform_matrix = transform_matrix_offset_center( transform_matrix, h, w) x = np.rollaxis(x, channel_axis, 0) final_affine_matrix = transform_matrix[:2, :2] final_offset = transform_matrix[:2, 2] channel_images = [ndimage.interpolation.affine_transform( x_channel, final_affine_matrix, final_offset, order=order, mode=fill_mode, cval=cval) for x_channel in x] x = np.stack(channel_images, axis=0) x = np.rollaxis(x, 0, channel_axis + 1) return x def random_transform(x, rotation_range=0, width_shift_range=0., height_shift_range=0., shear_range=0., zoom_range=0., channel_shift_range=0., horizontal_flip=False, vertical_flip=False, random_curves_strength=0.): # Generate params if rotation_range: theta = np.random.uniform(-rotation_range, rotation_range) else: theta = 0 h, w = x.shape[0], x.shape[1] if height_shift_range: tx = np.random.uniform(-height_shift_range, height_shift_range) * h else: tx = 0 if width_shift_range: ty = np.random.uniform(-width_shift_range, width_shift_range) * w else: ty = 0 if shear_range: shear = np.random.uniform(-shear_range, shear_range) else: shear = 0 if np.isscalar(zoom_range): zoom_range = [1 - zoom_range, 1 + zoom_range] elif len(zoom_range) == 2: zoom_range = [zoom_range[0], zoom_range[1]] else: raise ValueError('`zoom_range` should be a float or ' 'a tuple or list of two floats. ' 'Received arg: ', zoom_range) if zoom_range[0] == 1 and zoom_range[1] == 1: zx, zy = 1, 1 else: zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2) # Apply transforms x = apply_affine_transform(x, theta, tx, ty, shear, zx, zy) if channel_shift_range != 0: x = random_channel_shift(x, channel_shift_range, 2) if horizontal_flip: if np.random.random() < 0.5: x = flip_axis(x, 1) if vertical_flip: if np.random.random() < 0.5: x = flip_axis(x, 0) if random_curves_strength > 0.: x = random_curves_transform(x, random_curves_strength) return x if __name__ == "__main__": import argparse from PIL import Image parser = argparse.ArgumentParser() parser.add_argument('--image', default='', type=str) parser.add_argument('--imageOut', default='result.png', type=str) args = parser.parse_args() im = np.array(Image.open(args.image)) img = random_transform(im, rotation_range=10, shear_range=.5, zoom_range=.2, channel_shift_range=10., horizontal_flip=True) Image.fromarray(np.uint8(img)).save(args.imageOut)
38.861963
127
0.591207
0
0
0
0
0
0
0
0
4,797
0.378641
be831484dedc63eae50e233ddb777cdbd9a06d19
1,093
py
Python
keystone/tests/unit/token/test_provider.py
maestro-hybrid-cloud/keystone
a597a86b854215835a4d54885daeb161d7b0efb8
[ "Apache-2.0" ]
null
null
null
keystone/tests/unit/token/test_provider.py
maestro-hybrid-cloud/keystone
a597a86b854215835a4d54885daeb161d7b0efb8
[ "Apache-2.0" ]
null
null
null
keystone/tests/unit/token/test_provider.py
maestro-hybrid-cloud/keystone
a597a86b854215835a4d54885daeb161d7b0efb8
[ "Apache-2.0" ]
null
null
null
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import six from six.moves import urllib from keystone.tests import unit from keystone.token import provider class TestRandomStrings(unit.BaseTestCase): def test_strings_are_url_safe(self): s = provider.random_urlsafe_str() self.assertEqual(s, urllib.parse.quote_plus(s)) def test_strings_can_be_converted_to_bytes(self): s = provider.random_urlsafe_str() self.assertTrue(isinstance(s, six.string_types)) b = provider.random_urlsafe_str_to_bytes(s) self.assertTrue(isinstance(b, bytes))
35.258065
75
0.751144
435
0.397987
0
0
0
0
0
0
534
0.488564
be84323ccf5c7d5239ba7b3bf5eba0ad7152ce2f
2,927
py
Python
fasm2bels/database/connection_db_utils.py
mithro/symbiflow-xc-fasm2bels
9ed029558bedca4e726969427dc4e62ecd6d5733
[ "ISC" ]
null
null
null
fasm2bels/database/connection_db_utils.py
mithro/symbiflow-xc-fasm2bels
9ed029558bedca4e726969427dc4e62ecd6d5733
[ "ISC" ]
null
null
null
fasm2bels/database/connection_db_utils.py
mithro/symbiflow-xc-fasm2bels
9ed029558bedca4e726969427dc4e62ecd6d5733
[ "ISC" ]
null
null
null
import functools def create_maybe_get_wire(conn): c = conn.cursor() @functools.lru_cache(maxsize=None) def get_tile_type_pkey(tile): c.execute('SELECT pkey, tile_type_pkey FROM phy_tile WHERE name = ?', (tile, )) return c.fetchone() @functools.lru_cache(maxsize=None) def maybe_get_wire(tile, wire): phy_tile_pkey, tile_type_pkey = get_tile_type_pkey(tile) c.execute( 'SELECT pkey FROM wire_in_tile WHERE phy_tile_type_pkey = ? and name = ?', (tile_type_pkey, wire)) result = c.fetchone() if result is None: return None wire_in_tile_pkey = result[0] c.execute( 'SELECT pkey FROM wire WHERE phy_tile_pkey = ? AND wire_in_tile_pkey = ?', (phy_tile_pkey, wire_in_tile_pkey)) return c.fetchone()[0] return maybe_get_wire def maybe_add_pip(top, maybe_get_wire, feature): if feature.value != 1: return parts = feature.feature.split('.') assert len(parts) == 3 sink_wire = maybe_get_wire(parts[0], parts[2]) if sink_wire is None: return src_wire = maybe_get_wire(parts[0], parts[1]) if src_wire is None: return top.active_pips.add((sink_wire, src_wire)) def get_node_pkey(conn, wire_pkey): c = conn.cursor() c.execute("SELECT node_pkey FROM wire WHERE pkey = ?", (wire_pkey, )) return c.fetchone()[0] def get_wires_in_node(conn, node_pkey): c = conn.cursor() c.execute("SELECT pkey FROM wire WHERE node_pkey = ?", (node_pkey, )) for row in c.fetchall(): yield row[0] def get_wire(conn, phy_tile_pkey, wire_in_tile_pkey): c = conn.cursor() c.execute( "SELECT pkey FROM wire WHERE wire_in_tile_pkey = ? AND phy_tile_pkey = ?;", ( wire_in_tile_pkey, phy_tile_pkey, )) return c.fetchone()[0] def get_tile_type(conn, tile_name): c = conn.cursor() c.execute( """ SELECT name FROM tile_type WHERE pkey = ( SELECT tile_type_pkey FROM phy_tile WHERE name = ?);""", (tile_name, )) return c.fetchone()[0] def get_wire_pkey(conn, tile_name, wire): c = conn.cursor() c.execute( """ WITH selected_tile(phy_tile_pkey, tile_type_pkey) AS ( SELECT pkey, tile_type_pkey FROM phy_tile WHERE name = ? ) SELECT wire.pkey FROM wire WHERE wire.phy_tile_pkey = ( SELECT selected_tile.phy_tile_pkey FROM selected_tile ) AND wire.wire_in_tile_pkey = ( SELECT wire_in_tile.pkey FROM wire_in_tile WHERE wire_in_tile.name = ? AND wire_in_tile.phy_tile_type_pkey = ( SELECT tile_type_pkey FROM selected_tile ) ); """, (tile_name, wire)) results = c.fetchone() assert results is not None, (tile_name, wire) return results[0]
21.364964
86
0.618039
0
0
187
0.063888
789
0.269559
0
0
1,025
0.350188
be84a1cf98701b670f1ef999229373bd7e2f389c
2,443
py
Python
ppr-api/src/services/payment_service.py
bcgov/ppr-deprecated
c8925b6f6b0d7fb3f4e267dfe25650a1045ef2e3
[ "Apache-2.0" ]
1
2019-11-15T19:07:25.000Z
2019-11-15T19:07:25.000Z
ppr-api/src/services/payment_service.py
bryan-gilbert/ppr
c8925b6f6b0d7fb3f4e267dfe25650a1045ef2e3
[ "Apache-2.0" ]
6
2021-03-03T05:18:35.000Z
2022-02-10T21:55:45.000Z
ppr-api/src/services/payment_service.py
bcgov/ppr-deprecated
c8925b6f6b0d7fb3f4e267dfe25650a1045ef2e3
[ "Apache-2.0" ]
null
null
null
"""A module that provides functionality for accessing the Payments API.""" import enum import http import logging import requests from fastapi import Depends, Header, HTTPException from fastapi.security.http import HTTPAuthorizationCredentials import auth.authentication import config import schemas.payment logger = logging.getLogger(__name__) CORP_TYPE = 'PPR' class FilingCode(enum.Enum): """An enumeration of the filing codes available to PPR.""" SEARCH = 'SERCH' YEARLY_REGISTRATION = 'FSREG' INFINITE_REGISTRATION = 'INFRG' class PaymentService: """A service used for interacting with the Payments API.""" auth_header: HTTPAuthorizationCredentials account_id: str def __init__(self, auth_header: HTTPAuthorizationCredentials = Depends(auth.authentication.bearer_scheme), account_id: str = Header(None)): """Initialize the repository with the Authorization and Account-Id headers provided in the request.""" self.auth_header = auth_header self.account_id = account_id def create_payment(self, filing_code: FilingCode): """Submit a payment request and provide the details to the caller.""" request = { 'businessInfo': {'corpType': CORP_TYPE}, 'filingInfo': {'filingTypes': [{'filingTypeCode': filing_code.value}]} } pay_response = requests.post( '{}/payment-requests'.format(config.PAY_API_URL), json=request, headers={ 'Authorization': '{} {}'.format(self.auth_header.scheme, self.auth_header.credentials), 'Account-Id': self.account_id } ) try: auth.authentication.check_auth_response(pay_response) except HTTPException as auth_ex: logger.error('Create Payment call failed auth with status {}. Response body: {}'.format( pay_response.status_code, pay_response.text)) raise auth_ex if not pay_response: # status_code is unsuccessful logger.error('Create Payment call failed unexpectedly with status {}. Response body: {}'.format( pay_response.status_code, pay_response.text)) raise HTTPException(status_code=http.HTTPStatus.INTERNAL_SERVER_ERROR) body = pay_response.json() return schemas.payment.Payment(id=body['id'], status=body['statusCode'], method=body['paymentMethod'])
35.405797
110
0.677855
2,068
0.8465
0
0
0
0
0
0
712
0.291445
be84bdd8bc7a0db1a7baadae4ae6c5d55cf356e0
168
py
Python
SmerekaRoman/HW_6/HW 6.3.py
kolyasalubov/Lv-639.pythonCore
06f10669a188318884adb00723127465ebdf2907
[ "MIT" ]
null
null
null
SmerekaRoman/HW_6/HW 6.3.py
kolyasalubov/Lv-639.pythonCore
06f10669a188318884adb00723127465ebdf2907
[ "MIT" ]
null
null
null
SmerekaRoman/HW_6/HW 6.3.py
kolyasalubov/Lv-639.pythonCore
06f10669a188318884adb00723127465ebdf2907
[ "MIT" ]
null
null
null
def numb_of_char(a): d = {} for char in set(a): d[char] = a.count(char) return d a = numb_of_char(str(input("Input the word please: "))) print(a)
16.8
55
0.577381
0
0
0
0
0
0
0
0
25
0.14881
be876cf3ef298b948a6559bdc7b9b04da2062463
589
py
Python
0201-0300/0251-Flatten 2D Vector/0251-Flatten 2D Vector.py
jiadaizhao/LeetCode
4ddea0a532fe7c5d053ffbd6870174ec99fc2d60
[ "MIT" ]
49
2018-05-05T02:53:10.000Z
2022-03-30T12:08:09.000Z
0201-0300/0251-Flatten 2D Vector/0251-Flatten 2D Vector.py
jolly-fellow/LeetCode
ab20b3ec137ed05fad1edda1c30db04ab355486f
[ "MIT" ]
11
2017-12-15T22:31:44.000Z
2020-10-02T12:42:49.000Z
0201-0300/0251-Flatten 2D Vector/0251-Flatten 2D Vector.py
jolly-fellow/LeetCode
ab20b3ec137ed05fad1edda1c30db04ab355486f
[ "MIT" ]
28
2017-12-05T10:56:51.000Z
2022-01-26T18:18:27.000Z
class Vector2D: def __init__(self, v: List[List[int]]): def getIt(): for row in v: for val in row: yield val self.it = iter(getIt()) self.val = next(self.it, None) def next(self) -> int: result = self.val self.val = next(self.it, None) return result def hasNext(self) -> bool: return self.val is not None # Your Vector2D object will be instantiated and called as such: # obj = Vector2D(v) # param_1 = obj.next() # param_2 = obj.hasNext()
22.653846
63
0.519525
453
0.7691
240
0.40747
0
0
0
0
129
0.219015
be87bd0c5c2ff868bb6a502f0a693e022ddbbafe
1,049
py
Python
logger_decorator.py
jbhayback/reconciliation-manager
5de10a0ec89e397a4937d1764976c94cde06beee
[ "MIT" ]
null
null
null
logger_decorator.py
jbhayback/reconciliation-manager
5de10a0ec89e397a4937d1764976c94cde06beee
[ "MIT" ]
null
null
null
logger_decorator.py
jbhayback/reconciliation-manager
5de10a0ec89e397a4937d1764976c94cde06beee
[ "MIT" ]
null
null
null
from datetime import datetime import inspect def log_time(msg=None): def decorator(f): nonlocal msg if msg is None: msg = '{} time spent: '.format(f.__name__) def inner(*args, **kwargs): # check if the object has a logger global logger if args and hasattr(args[0], 'logger'): logger = args[0].logger start = datetime.now() result = f(*args, **kwargs) logger.info( msg + ' {} seconds'.format((datetime.now() - start).total_seconds()) ) return result return inner return decorator def log_params(f): arg_spec = inspect.getargspec(f).args has_self = arg_spec and arg_spec[0] == 'self' def decorator(*args, **kwargs): logger.info( 'calling {} with args: {}, and kwargs: {}'.format( f.__name__, args if not has_self else args[1:], kwargs ) ) return f(*args, **kwargs) return decorator
25.585366
84
0.530029
0
0
0
0
0
0
0
0
120
0.114395
be8915c20c303761d43a0098702f7e241e75e9c4
40
py
Python
lf3py/di/__init__.py
rog-works/lf3py
e89937f7aa133ed54d85764f06101ab9abf6b960
[ "CNRI-Python" ]
null
null
null
lf3py/di/__init__.py
rog-works/lf3py
e89937f7aa133ed54d85764f06101ab9abf6b960
[ "CNRI-Python" ]
48
2020-12-19T13:47:26.000Z
2021-01-07T22:27:56.000Z
lf3py/di/__init__.py
rog-works/lf3py
e89937f7aa133ed54d85764f06101ab9abf6b960
[ "CNRI-Python" ]
null
null
null
from lf3py.di.di import DI # noqa F401
20
39
0.725
0
0
0
0
0
0
0
0
11
0.275
be8a2d82d13baa6e60ff4dbca25351bcb2190394
1,418
py
Python
critical/tasks.py
lenarother/django-critical-css
15c12ea02f7ea049e59efba4d963c35f41f26d78
[ "MIT" ]
2
2020-06-06T06:50:38.000Z
2022-02-03T08:54:28.000Z
critical/tasks.py
lenarother/django-critical-css
15c12ea02f7ea049e59efba4d963c35f41f26d78
[ "MIT" ]
5
2018-12-17T11:12:20.000Z
2020-11-27T10:28:51.000Z
critical/tasks.py
lenarother/django-critical-css
15c12ea02f7ea049e59efba4d963c35f41f26d78
[ "MIT" ]
1
2021-08-19T06:02:44.000Z
2021-08-19T06:02:44.000Z
import logging from django.utils.safestring import mark_safe from django_rq import job from inline_static.css import transform_css_urls logger = logging.getLogger(__name__) @job def calculate_critical_css(critical_id, original_path): from .exceptions import CriticalException from .models import Critical from .services import calculate_critical_css as service_calculate logger.info('Task: critical css with id {0} requested.'.format(critical_id)) critical = Critical.objects.filter(id=critical_id).first() if not critical: raise CriticalException('There is no Critical object with id {0}'.format(critical_id)) logger.info('Task: {0}, {1}'.format(critical.url, critical.path)) critical.is_pending = True critical.save(update_fields=['is_pending']) logger.info('Task: critical css with id {0} pending.'.format(critical_id)) try: critical_css_raw = service_calculate(critical.url, critical.path) critical_css = transform_css_urls(original_path, critical.path, critical_css_raw) except Exception as exc: critical.is_pending = False critical.save(update_fields=['is_pending']) raise CriticalException('Could not calculate critical css') from exc critical.css = mark_safe(critical_css) critical.is_pending = False critical.save() logger.info('Task: critical css with id {0} saved.'.format(critical_id))
37.315789
94
0.74189
0
0
0
0
1,240
0.874471
0
0
238
0.167842
be8c87105d1db21be6f93eb2ae080ad460d99a47
1,837
py
Python
test.py
wei2912/bce-simulation
65c19051417c871bce4585481eb06c5ba986a96f
[ "MIT" ]
null
null
null
test.py
wei2912/bce-simulation
65c19051417c871bce4585481eb06c5ba986a96f
[ "MIT" ]
1
2016-11-06T11:50:45.000Z
2016-11-06T11:53:49.000Z
test.py
wei2912/bce-simulation
65c19051417c871bce4585481eb06c5ba986a96f
[ "MIT" ]
null
null
null
#!/usr/bin/env python # coding=utf-8 """ This script tests the simulations of the experiments. """ import math from utils import coin_var, needle_var def main(): needle_var_vals = [ (1.1, 1.0), (1.4, 1.0), (2.0, 1.0), (2.9, 1.0), (3.3, 1.0), (5.0, 1.0) ] print("needle_var:") for L, D in needle_var_vals: trials = 1000000 pred_prob = needle_var.predict_prob(length=L, gap_width=D) pred_hits = pred_prob * trials hits = needle_var.run_trials(length=L, gap_width=D, trials=trials) if pred_hits == 0 or pred_hits == trials: stat = float('nan') else: stat = sum([ (hits - pred_hits) ** 2 / pred_hits, ((trials - hits) - (trials - pred_hits)) ** 2 / (trials-pred_hits) ]) print("L = {}, D = {}, expected = {}, observed = {}, stat = {}".format(L, D, pred_hits, hits, stat)) print("coin_var:") coin_var_vals = [ (1.0, 1.0), (1.0, 1.2), (1.0, math.sqrt(2)), (1.0, 1.5), (1.0, 1.8), (1.0, 1.9), (1.0, 2.0), (1.0, 3.0), (1.0, 5.0) ] for R, D in coin_var_vals: trials = 100000 pred_prob = coin_var.predict_prob(diameter=2*R, gap_width=D) pred_hits = pred_prob * trials hits = coin_var.run_trials(diameter=2*R, gap_width=D, trials=trials) if pred_hits == 0 or pred_hits == trials: stat = float('nan') else: stat = sum([ (hits - pred_hits) ** 2 / pred_hits, ((trials - hits) - (trials - pred_hits)) ** 2 / (trials-pred_hits) ]) print("R = {}, D = {}, expected = {}, observed = {}, stat = {}".format(R, D, pred_hits, hits, stat)) main()
25.873239
108
0.491018
0
0
0
0
0
0
0
0
244
0.132825
be8cef6fbad82834998e279653a3e939a968c9d8
2,244
py
Python
instructions/instructions.py
fernandozanutto/PyNES
cb8d589ceb55cd7df0e114e726c6b6bbbc556172
[ "Apache-2.0" ]
null
null
null
instructions/instructions.py
fernandozanutto/PyNES
cb8d589ceb55cd7df0e114e726c6b6bbbc556172
[ "Apache-2.0" ]
null
null
null
instructions/instructions.py
fernandozanutto/PyNES
cb8d589ceb55cd7df0e114e726c6b6bbbc556172
[ "Apache-2.0" ]
null
null
null
from addressing import * from instructions.base_instructions import SetBit, ClearBit from instructions.generic_instructions import Instruction from status import Status # set status instructions class Sec(SetBit): identifier_byte = bytes([0x38]) bit = Status.StatusTypes.carry class Sei(SetBit): identifier_byte = bytes([0x78]) bit = Status.StatusTypes.interrupt class Sed(SetBit): identifier_byte = bytes([0xF8]) bit = Status.StatusTypes.decimal # clear status instructions class Cld(ClearBit): identifier_byte = bytes([0xD8]) bit = Status.StatusTypes.decimal class Clc(ClearBit): identifier_byte = bytes([0x18]) bit = Status.StatusTypes.carry class Clv(ClearBit): identifier_byte = bytes([0xB8]) bit = Status.StatusTypes.overflow class Cli(ClearBit): identifier_byte = bytes([0x58]) bit = Status.StatusTypes.interrupt class Bit(Instruction): @classmethod def get_data(cls, cpu, memory_address, data_bytes) -> Optional[int]: return cpu.bus.read_memory(memory_address) @classmethod def apply_side_effects(cls, cpu, memory_address, value): and_result = cpu.a_reg & value cpu.status_reg.bits[Status.StatusTypes.zero] = not and_result cpu.status_reg.bits[Status.StatusTypes.overflow] = ( value & (1 << 6)) > 0 cpu.status_reg.bits[Status.StatusTypes.negative] = ( value & (1 << 7)) > 0 class BitZeroPage(ZeroPageAddressing, Bit): identifier_byte = bytes([0x24]) class BitAbsolute(AbsoluteAddressing, Bit): identifier_byte = bytes([0x2C]) class Brk(ImplicitAddressing, Instruction): identifier_byte = bytes([0x00]) @classmethod def get_data(cls, cpu, memory_address, data_bytes) -> Optional[int]: return super().get_data(cpu, memory_address, data_bytes) @classmethod def write(cls, cpu, memory_address, value): cpu.push_to_stack(cpu.pc_reg + 1, 2) cpu.push_to_stack(cpu.status_reg.to_int() | (1 << 4), 1) @classmethod def apply_side_effects(cls, cpu, memory_address, value): cpu.status_reg.bits[Status.StatusTypes.interrupt] = 1 cpu.running = False @classmethod def get_cycles(cls): return 7
25.213483
72
0.69385
1,988
0.885918
0
0
1,047
0.466578
0
0
52
0.023173
be8d24f272fa353fa6c9d0869d13de96b4754241
1,960
py
Python
python/530.minimum-absolute-difference-in-bst.py
vermouth1992/Leetcode
0d7dda52b12f9e01d88fc279243742cd8b4bcfd1
[ "MIT" ]
null
null
null
python/530.minimum-absolute-difference-in-bst.py
vermouth1992/Leetcode
0d7dda52b12f9e01d88fc279243742cd8b4bcfd1
[ "MIT" ]
null
null
null
python/530.minimum-absolute-difference-in-bst.py
vermouth1992/Leetcode
0d7dda52b12f9e01d88fc279243742cd8b4bcfd1
[ "MIT" ]
null
null
null
# # @lc app=leetcode id=530 lang=python3 # # [530] Minimum Absolute Difference in BST # # https://leetcode.com/problems/minimum-absolute-difference-in-bst/description/ # # algorithms # Easy (55.23%) # Total Accepted: 115.5K # Total Submissions: 209K # Testcase Example: '[4,2,6,1,3]' # # Given the root of a Binary Search Tree (BST), return the minimum absolute # difference between the values of any two different nodes in the tree. # # # Example 1: # # # Input: root = [4,2,6,1,3] # Output: 1 # # # Example 2: # # # Input: root = [1,0,48,null,null,12,49] # Output: 1 # # # # Constraints: # # # The number of nodes in the tree is in the range [2, 10^4]. # 0 <= Node.val <= 10^5 # # # # Note: This question is the same as 783: # https://leetcode.com/problems/minimum-distance-between-bst-nodes/ # # # Definition for a binary tree node. from typing import List class TreeNode: def __init__(self, val=0, left=None, right=None): self.val = val self.left = left self.right = right class Solution: def getNodeValues(self, root: TreeNode) -> List[int]: value = [] self.getNodeValuesHelper(root, value) return value def getNodeValuesHelper(self, root: TreeNode, value: List[int]): if root is None: return value.append(root.val) self.getNodeValuesHelper(root.left, value) self.getNodeValuesHelper(root.right, value) def getMinimumDifference(self, root: TreeNode) -> int: # get all the values and put into a list O(n) value = self.getNodeValues(root) # sort the list O(nlogn) value = sorted(value) # find the minimum difference between ajacent values O(n) min_abs_diff = abs(value[0] - value[1]) for i in range(1, len(value) - 1): diff = abs(value[i] - value[i + 1]) if diff < min_abs_diff: min_abs_diff = diff return min_abs_diff
24.5
79
0.625
1,074
0.547959
0
0
0
0
0
0
938
0.478571
be8d50256f2d9fce8a7ed11893b6cad92bc5a14b
2,769
py
Python
tensorflow/python/eager/remote_cloud_tpu_test.py
abhaikollara/tensorflow
4f96df3659696990cb34d0ad07dc67843c4225a9
[ "Apache-2.0" ]
26
2019-11-10T15:33:34.000Z
2022-03-24T19:56:57.000Z
tensorflow/python/eager/remote_cloud_tpu_test.py
abhaikollara/tensorflow
4f96df3659696990cb34d0ad07dc67843c4225a9
[ "Apache-2.0" ]
6
2022-01-15T07:17:47.000Z
2022-02-14T15:28:22.000Z
tensorflow/python/eager/remote_cloud_tpu_test.py
abhaikollara/tensorflow
4f96df3659696990cb34d0ad07dc67843c4225a9
[ "Apache-2.0" ]
6
2020-03-29T11:10:53.000Z
2021-06-14T05:39:14.000Z
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Test that we can connect to a real Cloud TPU.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl import flags from absl.testing import absltest from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver from tensorflow.python.eager import context from tensorflow.python.eager import remote from tensorflow.python.tpu import tpu_strategy_util FLAGS = flags.FLAGS flags.DEFINE_string('tpu', '', 'Name of TPU to connect to.') flags.DEFINE_string('project', None, 'Name of GCP project with TPU.') flags.DEFINE_string('zone', None, 'Name of GCP zone with TPU.') EXPECTED_DEVICES_PRE_CONNECT = [ '/job:localhost/replica:0/task:0/device:CPU:0', '/job:localhost/replica:0/task:0/device:XLA_CPU:0' ] EXPECTED_DEVICES_AFTER_CONNECT = [ '/job:localhost/replica:0/task:0/device:CPU:0', '/job:localhost/replica:0/task:0/device:XLA_CPU:0', '/job:worker/replica:0/task:0/device:CPU:0', '/job:worker/replica:0/task:0/device:XLA_CPU:0', '/job:worker/replica:0/task:0/device:TPU_SYSTEM:0', '/job:worker/replica:0/task:0/device:TPU:0', '/job:worker/replica:0/task:0/device:TPU:1', '/job:worker/replica:0/task:0/device:TPU:2', '/job:worker/replica:0/task:0/device:TPU:3', '/job:worker/replica:0/task:0/device:TPU:4', '/job:worker/replica:0/task:0/device:TPU:5', '/job:worker/replica:0/task:0/device:TPU:6', '/job:worker/replica:0/task:0/device:TPU:7', ] class RemoteCloudTPUTest(absltest.TestCase): """Test that we can connect to a real Cloud TPU.""" def test_connect(self): self.assertCountEqual( EXPECTED_DEVICES_PRE_CONNECT, context.list_devices()) resolver = tpu_cluster_resolver.TPUClusterResolver( tpu=FLAGS.tpu, zone=FLAGS.zone, project=FLAGS.project ) remote.connect_to_cluster(resolver) self.assertCountEqual( EXPECTED_DEVICES_AFTER_CONNECT, context.list_devices()) tpu_strategy_util.initialize_tpu_system(resolver) if __name__ == '__main__': absltest.main()
36.434211
80
0.717949
542
0.195739
0
0
0
0
0
0
1,568
0.566269
be8db6395c3bc7d6f2f0df95f16ef512dceb29b7
1,418
py
Python
test/functional/bsv-blocksize-params.py
gbtn/bitcoin-sv-gbtn
8b09d1aa072da819fb3309b0be85dae0f1ac9549
[ "MIT" ]
3
2018-12-03T03:55:08.000Z
2019-08-13T07:50:45.000Z
test/functional/bsv-blocksize-params.py
Chihuataneo/bitcoin-sv
d9b12a23dbf0d2afc5f488fa077d762b302ba873
[ "MIT" ]
1
2020-02-09T11:35:45.000Z
2020-02-09T11:35:45.000Z
test/functional/bsv-blocksize-params.py
Chihuataneo/bitcoin-sv
d9b12a23dbf0d2afc5f488fa077d762b302ba873
[ "MIT" ]
1
2018-11-25T03:18:52.000Z
2018-11-25T03:18:52.000Z
#!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Copyright (c) 2017 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ Test that the blockmaxsize and excessiveblocksize parameters are also settable via the bitcoin.conf file. """ from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, assert_raises_rpc_error from test_framework.cdefs import (ONE_MEGABYTE) import os class BSVBlockSizeParams(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.maxminedblocksize = 4 * ONE_MEGABYTE self.maxblocksize = 16 * ONE_MEGABYTE def setup_chain(self): super().setup_chain() with open(os.path.join(self.options.tmpdir + "/node0", "bitcoin.conf"), 'a', encoding='utf8') as f: f.write("blockmaxsize=" + str(self.maxminedblocksize) + "\n") f.write("excessiveblocksize=" + str(self.maxblocksize) + "\n") def add_options(self, parser): super().add_options(parser) def run_test(self): gires = self.nodes[0].getinfo() assert_equal(gires["maxblocksize"], self.maxblocksize) assert_equal(gires["maxminedblocksize"], self.maxminedblocksize) if __name__ == '__main__': BSVBlockSizeParams().main()
34.585366
107
0.715797
790
0.557123
0
0
0
0
0
0
484
0.341326
be8eb4d6e0f2ba30a5412f64a491cd5cc3dcacad
1,750
py
Python
yotta/test/cli/outdated.py
headlessme/yotta
947ab074b629c8f18ca91ab84ebaa29096b011c6
[ "Apache-2.0" ]
null
null
null
yotta/test/cli/outdated.py
headlessme/yotta
947ab074b629c8f18ca91ab84ebaa29096b011c6
[ "Apache-2.0" ]
null
null
null
yotta/test/cli/outdated.py
headlessme/yotta
947ab074b629c8f18ca91ab84ebaa29096b011c6
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python # Copyright 2015 ARM Limited # # Licensed under the Apache License, Version 2.0 # See LICENSE file for details. # standard library modules, , , import unittest # internal modules: from . import util from . import cli Test_Outdated = { 'module.json':'''{ "name": "test-outdated", "version": "0.0.0", "description": "Test yotta outdated", "author": "James Crosby <[email protected]>", "license": "Apache-2.0", "dependencies":{ "test-testing-dummy": "*" } }''', 'source/foo.c':'''#include "stdio.h" int foo(){ printf("foo!\\n"); return 7; }''', # test-testing-dummy v0.0.1 (a newer version is available from the registry, # and will be installed by yt up) 'yotta_modules/test-testing-dummy/module.json':'''{ "name": "test-testing-dummy", "version": "0.0.1", "description": "Test yotta's compilation of tests.", "author": "James Crosby <[email protected]>", "license": "Apache-2.0" } ''' } class TestCLIOutdated(unittest.TestCase): def test_outdated(self): path = util.writeTestFiles(Test_Outdated, True) stdout, stderr, statuscode = cli.run(['-t', 'x86-linux-native', 'outdated'], cwd=path) self.assertNotEqual(statuscode, 0) self.assertIn('test-testing-dummy', stdout + stderr) util.rmRf(path) def test_notOutdated(self): path = util.writeTestFiles(Test_Outdated, True) stdout, stderr, statuscode = cli.run(['-t', 'x86-linux-native', 'up'], cwd=path) self.assertEqual(statuscode, 0) stdout, stderr, statuscode = cli.run(['-t', 'x86-linux-native', 'outdated'], cwd=path) self.assertEqual(statuscode, 0) self.assertNotIn('test-testing-dummy', stdout + stderr) util.rmRf(path)
27.777778
94
0.646286
795
0.454286
0
0
0
0
0
0
990
0.565714
be8eb9841690585b80bc1d8c7ae03dcd42ff539a
208
py
Python
geoposition/tests/urls.py
Starcross/django-geoposition
b2b3af2a1b73e0ce99e76f19b7f63f1a91f3e093
[ "MIT" ]
null
null
null
geoposition/tests/urls.py
Starcross/django-geoposition
b2b3af2a1b73e0ce99e76f19b7f63f1a91f3e093
[ "MIT" ]
1
2020-02-03T17:10:55.000Z
2020-02-03T17:10:55.000Z
geoposition/tests/urls.py
Starcross/django-geoposition
b2b3af2a1b73e0ce99e76f19b7f63f1a91f3e093
[ "MIT" ]
1
2019-12-22T12:17:19.000Z
2019-12-22T12:17:19.000Z
from django.urls import path, include from django.contrib import admin from example.views import poi_list admin.autodiscover() urlpatterns = [ path('', poi_list), path('admin/', admin.site.urls), ]
18.909091
37
0.725962
0
0
0
0
0
0
0
0
10
0.048077
be8fca7576bb080c666d1d705dca421abd5cb1da
2,453
py
Python
A_Stocker/Stocker.py
Allen1218/Python_Project_Interesting
55d5e58e70e21d45c4bb9dc4d4c219f3a8385834
[ "Apache-2.0" ]
1
2021-02-03T12:08:06.000Z
2021-02-03T12:08:06.000Z
A_Stocker/Stocker.py
Allen1218/Python_Project_Interesting
55d5e58e70e21d45c4bb9dc4d4c219f3a8385834
[ "Apache-2.0" ]
null
null
null
A_Stocker/Stocker.py
Allen1218/Python_Project_Interesting
55d5e58e70e21d45c4bb9dc4d4c219f3a8385834
[ "Apache-2.0" ]
null
null
null
import threading import tushare as ts import pandas as pd import datetime STOCK = {#'002594':[1,170.15], ## 比亚迪 / 几手,成本价 '601012':[11,99.9], ## 隆基股份 '002340':[12,8.72], ## 格林美 '603259':[1,141.7], ## 药明康德 '002346':[10,10.68], ## 柘中股份 #'600438':[9,42.96], ## 通威股份 #'002475':[3,59.51], ## 立讯精密 #'603308':[1,33.49], ## 应流股份 #'002415': [3, 66.40], ## 海康威视 # '600559':[3,35.3], ## 老白干 # '601100':[1, 114.5], ## 恒立液压 # '603466':[6, 22.40] ## 风语筑 } TimerNum = 20.0 # s Total = 0 # #rodo def get_all_price(): '''process all stock''' stockCode = list(STOCK.keys()) df = ts.get_realtime_quotes(stockCode) lp = list(STOCK.values()) stockNum = [] stockCostPrice = [] for i in range(len(lp)): stockNum.append(lp[i][0]) stockCostPrice.append(lp[i][1]) df['num'] = stockNum df['stockCostPrice'] = stockCostPrice # 处理 # profit and lost ratio 盈亏率 plRatio = round((df['price'].astype(float) / df['stockCostPrice'] - 1)*100,2) # profit and lost 盈亏 df['plRatio'] = plRatio df['stockNum'] = stockNum pl = round(df['plRatio'].astype(float) * df['stockNum'] * df['stockCostPrice'].astype(float),2) df['pl'] = pl # 当日涨幅 Rise and fall currentRF = round((df['price'].astype(float) / df['pre_close'].astype(float) - 1)*100,2) df['currentRF'] = currentRF df1 = df[[ 'open', 'price', 'stockCostPrice', 'plRatio', 'num','pl', 'currentRF','name']] pd.set_option('display.unicode.ambiguous_as_wide', True) pd.set_option('display.unicode.east_asian_width', True) pd.set_option('display.width', 180) # 设置打印宽度(**重要**) pd.set_option('display.max_columns', 1000) pd.set_option('display.width', 1000) pd.set_option('display.max_colwidth', 1000) sss = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f)")[:-4] print('\n') print("----------------" + sss +"------------------") print(df1) sum_int = round(df['pl'].sum(),2) print("total profit and lost is " + sum_int.astype(str)) print('\n') # df.to_csv('stock_data.csv', encoding='utf_8_sig', index=None) global timer timer = threading.Timer(TimerNum, get_all_price, []) timer.start() if __name__ == '__main__': print(STOCK) get_all_price() timer = threading.Timer(TimerNum, get_all_price, []) timer.start()
30.6625
99
0.565838
0
0
0
0
0
0
0
0
1,085
0.418757
be9026a8dcf2d835f2e8c702efdeeb3e278299c1
1,011
py
Python
tests/extractors/test_etrade.py
mkazin/StatementRenamer
ef03c71f0e627a15a4bba08e45bfa90ecacd28fc
[ "Apache-2.0" ]
null
null
null
tests/extractors/test_etrade.py
mkazin/StatementRenamer
ef03c71f0e627a15a4bba08e45bfa90ecacd28fc
[ "Apache-2.0" ]
15
2018-05-01T12:48:30.000Z
2021-05-14T02:52:48.000Z
tests/extractors/test_etrade.py
mkazin/StatementRenamer
ef03c71f0e627a15a4bba08e45bfa90ecacd28fc
[ "Apache-2.0" ]
1
2019-07-09T22:59:50.000Z
2019-07-09T22:59:50.000Z
from datetime import datetime from statement_renamer.extractors.etrade import ETradeDateExtractor as EXTRACTOR_UNDER_TEST from statement_renamer.extractors.factory import ExtractorFactory TESTDATA = ( """ PAGE 1 OF 6 February 1, 2019 - March 31, 2019AccountNumber:####-####AccountType:ROTH IRA PAGE 5 OF 6Account Number: ####-####Statement Period : February 1, 2019 - March 31, 2019Account Type TolearnmoreabouttheRSDAProgram,pleasereviewyourRSDAProgramCustomerAgreement,visitwww.etrade.com,orcallusat1-800-387-2331 """ ) def test_monthly_statement(): extractor = EXTRACTOR_UNDER_TEST() data = extractor.extract(TESTDATA) new_name = extractor.rename(data) assert data.get_start_date() == datetime(2019, 2, 1) assert data.get_end_date() == datetime(2019, 3, 31) assert new_name == '2019-03 E-Trade Statement.pdf' def test_factory(): extractor = ExtractorFactory.get_matching_extractor(TESTDATA) assert isinstance(extractor, EXTRACTOR_UNDER_TEST)
32.612903
124
0.75272
0
0
0
0
0
0
0
0
369
0.364985
be917ccdfeb7754dd0eabc0327954755752723d8
425
py
Python
Estrutura_Decisao/who.py
M3nin0/supreme-broccoli
186c1ea3b839ba3139f9301660dec8fbd27a162e
[ "Apache-2.0" ]
null
null
null
Estrutura_Decisao/who.py
M3nin0/supreme-broccoli
186c1ea3b839ba3139f9301660dec8fbd27a162e
[ "Apache-2.0" ]
null
null
null
Estrutura_Decisao/who.py
M3nin0/supreme-broccoli
186c1ea3b839ba3139f9301660dec8fbd27a162e
[ "Apache-2.0" ]
null
null
null
prod1 = float(input("Insira o valor do produto A: ")) prod2 = float(input("Insira o valor do produto B: ")) prod3 = float(input("Insira o valor do produto C: ")) if prod1 < prod2 and prod1 < prod3: print ("Escolha o produto A é o mais barato") elif prod2 < prod1 and prod2 < prod3: print ("Escolha o produto B é o mais barato") elif prod3 < prod1 and prod3 < prod2: print ("Escolha o produto C é o mais barato")
38.636364
53
0.68
0
0
0
0
0
0
0
0
207
0.483645
be98084b654d84cf6a197790eaa2f280fb68a68e
800
py
Python
ProgettoLube/WebInspector/venv/Lib/site-packages/tensorflow/_api/v2/compat/v2/train/experimental/__init__.py
Lube-Project/ProgettoLube
cbf33971e2c2e865783ec1a2302625539186a338
[ "MIT" ]
2
2020-09-30T00:11:09.000Z
2021-10-04T13:00:38.000Z
ProgettoLube/WebInspector/venv/Lib/site-packages/tensorflow/_api/v2/compat/v2/train/experimental/__init__.py
Lube-Project/ProgettoLube
cbf33971e2c2e865783ec1a2302625539186a338
[ "MIT" ]
null
null
null
ProgettoLube/WebInspector/venv/Lib/site-packages/tensorflow/_api/v2/compat/v2/train/experimental/__init__.py
Lube-Project/ProgettoLube
cbf33971e2c2e865783ec1a2302625539186a338
[ "MIT" ]
1
2021-01-28T01:57:41.000Z
2021-01-28T01:57:41.000Z
# This file is MACHINE GENERATED! Do not edit. # Generated by: tensorflow/python/tools/api/generator/create_python_api.py script. """Public API for tf.train.experimental namespace. """ from __future__ import print_function as _print_function import sys as _sys from tensorflow.python.training.experimental.loss_scale import DynamicLossScale from tensorflow.python.training.experimental.loss_scale import FixedLossScale from tensorflow.python.training.experimental.loss_scale import LossScale from tensorflow.python.training.experimental.mixed_precision import disable_mixed_precision_graph_rewrite from tensorflow.python.training.experimental.mixed_precision import enable_mixed_precision_graph_rewrite from tensorflow.python.training.tracking.python_state import PythonState del _print_function
44.444444
105
0.86875
0
0
0
0
0
0
0
0
182
0.2275
be986d230ef62a7e44ef6996ed58eb548aa4181b
4,004
py
Python
SciDataTool/Methods/VectorField/plot_3D_Data.py
BenjaminGabet/SciDataTool
7994441de4c54921d43750cacd8df761ba4bd421
[ "Apache-2.0" ]
null
null
null
SciDataTool/Methods/VectorField/plot_3D_Data.py
BenjaminGabet/SciDataTool
7994441de4c54921d43750cacd8df761ba4bd421
[ "Apache-2.0" ]
null
null
null
SciDataTool/Methods/VectorField/plot_3D_Data.py
BenjaminGabet/SciDataTool
7994441de4c54921d43750cacd8df761ba4bd421
[ "Apache-2.0" ]
null
null
null
def plot_3D_Data( self, *arg_list, is_norm=False, unit="SI", component_list=None, save_path=None, x_min=None, x_max=None, y_min=None, y_max=None, z_min=None, z_max=None, z_range=None, is_auto_ticks=True, is_auto_range=False, is_2D_view=False, is_same_size=False, N_stem=100, fig=None, ax=None, is_show_fig=None, is_logscale_x=False, is_logscale_y=False, is_logscale_z=False, thresh=0.02, is_switch_axes=False, colormap="RdBu_r", win_title=None, font_name="arial", font_size_title=12, font_size_label=10, font_size_legend=8, ): """Plots a field as a function of time Parameters ---------- self : Output an Output object Data_str : str name of the Data Object to plot (e.g. "mag.Br") *arg_list : list of str arguments to specify which axes to plot is_norm : bool boolean indicating if the field must be normalized unit : str unit in which to plot the field save_path : str full path including folder, name and extension of the file to save if save_path is not None x_min : float minimum value for the x-axis x_max : float maximum value for the x-axis y_min : float minimum value for the y-axis y_max : float maximum value for the y-axis z_min : float minimum value for the z-axis z_max : float maximum value for the z-axis is_auto_ticks : bool in fft, adjust ticks to freqs (deactivate if too close) is_auto_range : bool in fft, display up to 1% of max is_2D_view : bool True to plot Data in xy plane and put z as colormap is_same_size : bool True to have all color blocks with same size in 2D view N_stem : int number of harmonics to plot (only for stem plots) fig : Matplotlib.figure.Figure existing figure to use if None create a new one ax : Matplotlib.axes.Axes object ax on which to plot the data is_show_fig : bool True to show figure after plot is_logscale_x : bool boolean indicating if the x-axis must be set in logarithmic scale is_logscale_y : bool boolean indicating if the y-axis must be set in logarithmic scale is_logscale_z : bool boolean indicating if the z-axis must be set in logarithmic scale thresh : float threshold for automatic fft ticks is_switch_axes : bool to switch x and y axes """ # Call the plot on each component if component_list is None: # default: extract all components component_list = self.components.keys() for i, comp in enumerate(component_list): if save_path is not None and len(component_list) > 1: save_path_comp = ( save_path.split(".")[0] + "_" + comp + "." + save_path.split(".")[1] ) else: save_path_comp = save_path self.components[comp].plot_3D_Data( arg_list, is_norm=is_norm, unit=unit, save_path=save_path_comp, x_min=x_min, x_max=x_max, y_min=y_min, y_max=y_max, z_min=z_min, z_max=z_max, colormap=colormap, is_auto_ticks=is_auto_ticks, is_auto_range=is_auto_range, is_2D_view=is_2D_view, is_same_size=is_same_size, N_stem=N_stem, fig=fig, ax=ax, is_show_fig=is_show_fig, is_logscale_x=is_logscale_x, is_logscale_y=is_logscale_y, is_logscale_z=is_logscale_z, thresh=thresh, is_switch_axes=is_switch_axes, win_title=win_title, font_name=font_name, font_size_title=font_size_title, font_size_label=font_size_label, font_size_legend=font_size_legend, )
29.880597
99
0.610889
0
0
0
0
0
0
0
0
1,992
0.497502
be995dc30a4b39d65ba03829daf98d9b834c9449
37,788
py
Python
tests/unittests/plotting/test_plotly_backend.py
obilaniu/orion
bc886daf791d66490b59e43657f6f6db45d34ea8
[ "BSD-3-Clause" ]
1
2021-04-10T16:18:03.000Z
2021-04-10T16:18:03.000Z
tests/unittests/plotting/test_plotly_backend.py
obilaniu/orion
bc886daf791d66490b59e43657f6f6db45d34ea8
[ "BSD-3-Clause" ]
null
null
null
tests/unittests/plotting/test_plotly_backend.py
obilaniu/orion
bc886daf791d66490b59e43657f6f6db45d34ea8
[ "BSD-3-Clause" ]
null
null
null
"""Collection of tests for :mod:`orion.plotting.backend_plotly`.""" import copy import numpy import pandas import plotly import pytest import orion.client from orion.analysis.partial_dependency_utils import partial_dependency_grid from orion.core.worker.experiment import Experiment from orion.plotting.base import ( lpi, parallel_coordinates, partial_dependencies, rankings, regret, regrets, ) from orion.testing import create_experiment from orion.testing.plotting import ( assert_lpi_plot, assert_parallel_coordinates_plot, assert_partial_dependencies_plot, assert_rankings_plot, assert_regret_plot, assert_regrets_plot, ) config = dict( name="experiment-name", space={"x": "uniform(0, 200)"}, metadata={ "user": "test-user", "orion_version": "XYZ", "VCS": { "type": "git", "is_dirty": False, "HEAD_sha": "test", "active_branch": None, "diff_sha": "diff", }, }, version=1, pool_size=1, max_trials=10, working_dir="", algorithms={"random": {"seed": 1}}, producer={"strategy": "NoParallelStrategy"}, ) trial_config = { "experiment": 0, "status": "completed", "worker": None, "start_time": None, "end_time": None, "heartbeat": None, "results": [], "params": [], } def mock_space(x="uniform(0, 6)", y="uniform(0, 3)", **kwargs): """Build a mocked space""" mocked_config = copy.deepcopy(config) mocked_config["space"] = {"x": x} if y is not None: mocked_config["space"]["y"] = y mocked_config["space"].update(kwargs) return mocked_config def mock_experiment( monkeypatch, ids=None, x=None, y=None, z=None, objectives=None, status=None ): """Mock experiment to_pandas to return given data (or default one)""" if ids is None: ids = ["a", "b", "c", "d"] if x is None: x = [0, 1, 2, 4] if y is None: y = [3, 2, 0, 1] if objectives is None: objectives = [0.1, 0.2, 0.3, 0.5] if status is None: status = ["completed", "completed", "completed", "completed"] data = { "id": ids, "x": x, "objective": objectives, "status": status, "suggested": ids, } if not isinstance(y, str): data["y"] = y if z is not None: data["z"] = z def to_pandas(self, with_evc_tree=False): return pandas.DataFrame(data=data) monkeypatch.setattr(Experiment, "to_pandas", to_pandas) def mock_experiment_with_random_to_pandas(monkeypatch, status=None, unbalanced=False): def to_pandas(self, with_evc_tree=False): if unbalanced: N = numpy.random.randint(5, 15) elif status is not None: N = len(status) else: N = 10 ids = numpy.arange(N) x = numpy.random.normal(0, 0.1, size=N) y = numpy.random.normal(0, 0.1, size=N) objectives = numpy.random.normal(0, 0.1, size=N) if status is None: exp_status = ["completed"] * N else: exp_status = status data = pandas.DataFrame( data={ "id": ids, "x": x, "y": y, "objective": objectives, "status": exp_status, "suggested": ids, } ) return data monkeypatch.setattr(Experiment, "to_pandas", to_pandas) def mock_model(): """Return a mocked regressor which just predict iterated integers""" class Model: """Mocked Regressor""" def __init__(self): self.i = 0 def predict(self, data): """Returns counting of predictions requested.""" data = numpy.arange(data.shape[0]) + self.i self.i += data.shape[0] return data # + numpy.random.normal(0, self.i, size=data.shape[0]) return Model() def mock_train_regressor(monkeypatch, assert_model=None, assert_model_kwargs=None): """Mock the train_regressor to return the mocked regressor instead""" def train_regressor(model, data, **kwargs): """Return the mocked model, and then model argument if requested""" if assert_model: assert model == assert_model if assert_model_kwargs: assert kwargs == assert_model_kwargs return mock_model() monkeypatch.setattr( "orion.analysis.partial_dependency_utils.train_regressor", train_regressor ) @pytest.mark.usefixtures("version_XYZ") class TestLPI: """Tests the ``lpi()`` method provided by the plotly backend""" def test_requires_argument(self): """Tests that the experiment data are required.""" with pytest.raises(ValueError): lpi(None) def test_returns_plotly_object(self): """Tests that the plotly backend returns a plotly object""" with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): plot = lpi(experiment, model_kwargs=dict(random_state=1)) assert type(plot) is plotly.graph_objects.Figure def test_graph_layout(self, monkeypatch): """Tests the layout of the plot""" config = mock_space() mock_experiment(monkeypatch) with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): plot = lpi(experiment, model_kwargs=dict(random_state=1)) df = experiment.to_pandas() assert df["x"].tolist() == [0, 1, 2, 4] assert df["y"].tolist() == [3, 2, 0, 1] assert df["objective"].tolist() == [0.1, 0.2, 0.3, 0.5] assert_lpi_plot(plot, dims=["x", "y"]) def test_experiment_worker_as_parameter(self, monkeypatch): """Tests that ``Experiment`` is a valid parameter""" config = mock_space() mock_experiment(monkeypatch) with create_experiment(config, trial_config, ["completed"]) as ( _, experiment, _, ): plot = lpi(experiment, model_kwargs=dict(random_state=1)) assert_lpi_plot(plot, dims=["x", "y"]) def test_ignore_uncompleted_statuses(self, monkeypatch): """Tests that uncompleted statuses are ignored""" config = mock_space() mock_experiment( monkeypatch, ids="abcdefgh", x=[0, 0, 0, 1, 0, 2, 0, 3], y=[1, 0, 0, 2, 0, 0, 0, 3], objectives=[0.1, None, None, 0.2, None, 0.3, None, 0.5], status=[ "completed", "new", "reserved", "completed", "broken", "completed", "interrupted", "completed", ], ) with create_experiment(config, trial_config) as (_, _, experiment): plot = lpi(experiment) assert_lpi_plot(plot, dims=["x", "y"]) def test_multidim(self, monkeypatch): """Tests that dimensions with shape > 1 are flattened properly""" config = mock_space(y="uniform(0, 3, shape=2)") mock_experiment(monkeypatch, y=[[3, 3], [2, 3], [1, 2], [0, 3]]) with create_experiment(config, trial_config) as (_, _, experiment): plot = lpi(experiment, model_kwargs=dict(random_state=1)) assert_lpi_plot(plot, dims=["x", "y[0]", "y[1]"]) def test_fidelity(self, monkeypatch): """Tests that fidelity is supported""" config = mock_space(y="fidelity(1, 200, base=3)") mock_experiment(monkeypatch, y=[1, 3 ** 2, 1, 3 ** 4]) with create_experiment(config, trial_config) as (_, _, experiment): plot = lpi(experiment, model_kwargs=dict(random_state=1)) assert_lpi_plot(plot, dims=["x", "y"]) def test_categorical(self, monkeypatch): """Tests that categorical is supported""" config = mock_space(y='choices(["a", "b", "c"])') mock_experiment(monkeypatch, y=["c", "c", "a", "b"]) with create_experiment(config, trial_config) as (_, _, experiment): plot = lpi(experiment, model_kwargs=dict(random_state=1)) assert_lpi_plot(plot, dims=["x", "y"]) def test_categorical_multidim(self, monkeypatch): """Tests that multidim categorical is supported""" config = mock_space(y='choices(["a", "b", "c"], shape=3)') mock_experiment( monkeypatch, y=[["c", "b", "a"], ["c", "a", "c"], ["a", "b", "a"], ["c", "b", "b"]], ) with create_experiment(config, trial_config) as (_, _, experiment): plot = lpi(experiment, model_kwargs=dict(random_state=1)) assert_lpi_plot(plot, dims=["x", "y[0]", "y[1]", "y[2]"]) @pytest.mark.usefixtures("version_XYZ") class TestParallelCoordinates: """Tests the ``parallel_coordinates()`` method provided by the plotly backend""" def test_requires_argument(self): """Tests that the experiment data are required.""" with pytest.raises(ValueError): parallel_coordinates(None) def test_returns_plotly_object(self): """Tests that the plotly backend returns a plotly object""" with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): plot = parallel_coordinates(experiment) assert type(plot) is plotly.graph_objects.Figure def test_graph_layout(self): """Tests the layout of the plot""" with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): plot = parallel_coordinates(experiment) assert_parallel_coordinates_plot(plot, order=["x", "loss"]) def test_experiment_worker_as_parameter(self): """Tests that ``Experiment`` is a valid parameter""" with create_experiment(config, trial_config, ["completed"]) as ( _, experiment, _, ): plot = parallel_coordinates(experiment) assert_parallel_coordinates_plot(plot, order=["x", "loss"]) def test_ignore_uncompleted_statuses(self): """Tests that uncompleted statuses are ignored""" with create_experiment(config, trial_config) as (_, _, experiment): plot = parallel_coordinates(experiment) assert_parallel_coordinates_plot(plot, order=["x", "loss"]) def test_unsupported_order_key(self): """Tests that unsupported order keys are rejected""" with create_experiment(config, trial_config) as (_, _, experiment): with pytest.raises(ValueError): parallel_coordinates(experiment, order=["unsupported"]) def test_order_columns(self): """Tests that columns are sorted according to ``order``""" multidim_config = copy.deepcopy(config) for k in "yzutv": multidim_config["space"][k] = "uniform(0, 200)" with create_experiment(multidim_config, trial_config) as (_, _, experiment): plot = parallel_coordinates(experiment, order="vzyx") assert_parallel_coordinates_plot(plot, order=["v", "z", "y", "x", "loss"]) def test_multidim(self): """Tests that dimensions with shape > 1 are flattened properly""" multidim_config = copy.deepcopy(config) multidim_config["space"]["y"] = "uniform(0, 200, shape=4)" with create_experiment(multidim_config, trial_config) as (_, _, experiment): plot = parallel_coordinates(experiment) assert_parallel_coordinates_plot( plot, order=["x", "y[0]", "y[1]", "y[2]", "y[3]", "loss"] ) def test_fidelity(self): """Tests that fidelity is set to first column by default""" fidelity_config = copy.deepcopy(config) fidelity_config["space"]["z"] = "fidelity(1, 200, base=3)" with create_experiment(fidelity_config, trial_config) as (_, _, experiment): plot = parallel_coordinates(experiment) assert_parallel_coordinates_plot(plot, order=["z", "x", "loss"]) def test_categorical(self): """Tests that categorical is supported""" categorical_config = copy.deepcopy(config) categorical_config["space"]["z"] = 'choices(["a", "b", "c"])' with create_experiment(categorical_config, trial_config) as (_, _, experiment): plot = parallel_coordinates(experiment) assert_parallel_coordinates_plot(plot, order=["x", "z", "loss"]) def test_categorical_multidim(self): """Tests that multidim categorical is supported""" categorical_config = copy.deepcopy(config) categorical_config["space"]["z"] = 'choices(["a", "b", "c"], shape=3)' with create_experiment(categorical_config, trial_config) as (_, _, experiment): plot = parallel_coordinates(experiment) assert_parallel_coordinates_plot( plot, order=["x", "z[0]", "z[1]", "z[2]", "loss"] ) @pytest.mark.usefixtures("version_XYZ") class TestPartialDependencies: """Tests the ``partial_dependencies()`` method provided by the plotly backend""" def test_returns_plotly_object(self, monkeypatch): """Tests that the plotly backend returns a plotly object""" mock_train_regressor(monkeypatch) with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): plot = partial_dependencies( experiment, n_grid_points=5, model_kwargs=dict(random_state=1) ) assert type(plot) is plotly.graph_objects.Figure def test_graph_layout(self, monkeypatch): """Tests the layout of the plot""" mock_train_regressor(monkeypatch) config = mock_space() mock_experiment(monkeypatch) with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): plot = partial_dependencies( experiment, n_grid_points=5, model_kwargs=dict(random_state=1) ) df = experiment.to_pandas() assert df["x"].tolist() == [0, 1, 2, 4] assert df["y"].tolist() == [3, 2, 0, 1] assert df["objective"].tolist() == [0.1, 0.2, 0.3, 0.5] assert_partial_dependencies_plot(plot, dims=["x", "y"]) def test_ignore_uncompleted_statuses(self, monkeypatch): """Tests that uncompleted statuses are ignored""" mock_train_regressor(monkeypatch) config = mock_space() mock_experiment( monkeypatch, ids="abcdefgh", x=[0, 0, 0, 1, 0, 2, 0, 3], y=[1, 0, 0, 2, 0, 0, 0, 3], objectives=[0.1, None, None, 0.2, None, 0.3, None, 0.5], status=[ "completed", "new", "reserved", "completed", "broken", "completed", "interrupted", "completed", ], ) with create_experiment(config, trial_config) as (_, _, experiment): plot = partial_dependencies(experiment, n_grid_points=5) assert_partial_dependencies_plot(plot, dims=["x", "y"]) def test_multidim(self, monkeypatch): """Tests that dimensions with shape > 1 are flattened properly""" mock_train_regressor(monkeypatch) config = mock_space(y="uniform(0, 3, shape=2)") mock_experiment(monkeypatch, y=[[3, 3], [2, 3], [1, 2], [0, 3]]) with create_experiment(config, trial_config) as (_, _, experiment): plot = partial_dependencies( experiment, n_grid_points=5, model_kwargs=dict(random_state=1) ) assert_partial_dependencies_plot(plot, dims=["x", "y[0]", "y[1]"]) def test_fidelity(self, monkeypatch): """Tests that fidelity is supported""" mock_train_regressor(monkeypatch) config = mock_space(y="fidelity(1, 200, base=3)") mock_experiment(monkeypatch, y=[1, 3 ** 2, 1, 3 ** 4]) with create_experiment(config, trial_config) as (_, _, experiment): plot = partial_dependencies( experiment, n_grid_points=5, model_kwargs=dict(random_state=1) ) assert_partial_dependencies_plot(plot, dims=["x", "y"], log_dims=["y"]) def test_categorical(self, monkeypatch): """Tests that categorical is supported""" mock_train_regressor(monkeypatch) config = mock_space(y='choices(["a", "b", "c"])') mock_experiment(monkeypatch, y=["c", "c", "a", "b"]) with create_experiment(config, trial_config) as (_, _, experiment): plot = partial_dependencies( experiment, n_grid_points=5, model_kwargs=dict(random_state=1) ) # There is only 3 categories, so test must be adjusted accordingly. assert_partial_dependencies_plot( plot, dims=["x", "y"], n_grid_points={"x": 5, "y": 3} ) def test_categorical_multidim(self, monkeypatch): """Tests that multidim categorical is supported""" mock_train_regressor(monkeypatch) config = mock_space(y='choices(["a", "b", "c"], shape=3)') mock_experiment( monkeypatch, y=[["c", "b", "a"], ["c", "a", "c"], ["a", "b", "a"], ["c", "b", "b"]], ) with create_experiment(config, trial_config) as (_, _, experiment): plot = partial_dependencies( experiment, n_grid_points=5, model_kwargs=dict(random_state=1) ) assert_partial_dependencies_plot( plot, dims=["x", "y[0]", "y[1]", "y[2]"], n_grid_points={"x": 5, "y[0]": 3, "y[1]": 3, "y[2]": 3}, ) def test_logarithmic_scales_first(self, monkeypatch): """Test that log dims are turn to log scale Test first dim specifically because special xaxis name for first dim. """ mock_train_regressor(monkeypatch) config = mock_space(x="loguniform(0.001, 1)", z="uniform(0, 1)") mock_experiment(monkeypatch, x=[0.001, 0.1, 0.01, 1], z=[0, 0.1, 0.2, 0.5]) with create_experiment(config, trial_config) as (_, _, experiment): plot = partial_dependencies( experiment, n_grid_points=5, model_kwargs=dict(random_state=1) ) assert_partial_dependencies_plot( plot, dims=["x", "y", "z"], n_grid_points=5, log_dims=["x"] ) def test_logarithmic_scales_any_dim(self, monkeypatch): """Test that log dims are turn to log scale""" mock_train_regressor(monkeypatch) config = mock_space(y="loguniform(0.001, 1)", z="uniform(0, 1)") mock_experiment(monkeypatch, y=[0.001, 0.1, 0.01, 1], z=[0, 0.1, 0.2, 0.5]) with create_experiment(config, trial_config) as (_, _, experiment): plot = partial_dependencies( experiment, n_grid_points=5, model_kwargs=dict(random_state=1) ) assert_partial_dependencies_plot( plot, dims=["x", "y", "z"], n_grid_points=5, log_dims=["y"] ) def test_int_logarithmic_scales(self, monkeypatch): """Test that int log dims are turn to log scale""" mock_train_regressor(monkeypatch) config = mock_space(y="loguniform(1, 1000, discrete=True)", z="uniform(0, 1)") mock_experiment(monkeypatch, y=[1, 10, 100, 1000], z=[0, 0.1, 0.2, 0.5]) with create_experiment(config, trial_config) as (_, _, experiment): plot = partial_dependencies( experiment, n_grid_points=5, model_kwargs=dict(random_state=1) ) assert_partial_dependencies_plot( plot, dims=["x", "y", "z"], n_grid_points=5, log_dims=["y"] ) def test_one_param(self, monkeypatch): """Test ploting a space with only 1 dim""" mock_train_regressor(monkeypatch) config = mock_space(y=None) mock_experiment(monkeypatch, y="drop") with create_experiment(config, trial_config) as (_, _, experiment): plot = partial_dependencies( experiment, n_grid_points=5, model_kwargs=dict(random_state=1) ) assert_partial_dependencies_plot(plot, dims=["x"], n_grid_points=5) def test_select_params(self, monkeypatch): """Test selecting subset""" mock_train_regressor(monkeypatch) config = mock_space(z="uniform(0, 1)") mock_experiment(monkeypatch, z=[0, 0.1, 0.2, 0.5]) for params in [["x"], ["x", "y"], ["y", "z"]]: with create_experiment(config, trial_config) as (_, _, experiment): plot = partial_dependencies( experiment, params=params, n_grid_points=5, model_kwargs=dict(random_state=1), ) assert_partial_dependencies_plot(plot, dims=params, n_grid_points=5) def test_custom_smoothing(self, monkeypatch): """Test changing smoothing value""" mock_train_regressor(monkeypatch) config = mock_space() mock_experiment(monkeypatch) with create_experiment(config, trial_config) as (_, _, experiment): plot = partial_dependencies( experiment, n_grid_points=5, model_kwargs=dict(random_state=1), smoothing=1.2, ) with pytest.raises(AssertionError): assert_partial_dependencies_plot(plot, dims=["x", "y"], n_grid_points=5) assert_partial_dependencies_plot( plot, dims=["x", "y"], n_grid_points=5, smoothing=1.2 ) def test_custom_n_grid_points(self, monkeypatch): """Test changing n_grid_points value""" mock_train_regressor(monkeypatch) config = mock_space() mock_experiment(monkeypatch) with create_experiment(config, trial_config) as (_, _, experiment): plot = partial_dependencies( experiment, n_grid_points=10, model_kwargs=dict(random_state=1), ) with pytest.raises(AssertionError): assert_partial_dependencies_plot(plot, dims=["x", "y"], n_grid_points=5) assert_partial_dependencies_plot(plot, dims=["x", "y"], n_grid_points=10) def test_custom_n_samples(self, monkeypatch): """Test changing n_samples value""" mock_train_regressor(monkeypatch) config = mock_space() mock_experiment(monkeypatch) PARAMS = ["x", "y"] N_SAMPLES = numpy.random.randint(20, 50) def mock_partial_dependency_grid(space, model, params, samples, n_points): print(samples) assert samples.shape == (N_SAMPLES, len(PARAMS)) return partial_dependency_grid(space, model, params, samples, n_points) monkeypatch.setattr( "orion.analysis.partial_dependency_utils.partial_dependency_grid", mock_partial_dependency_grid, ) with create_experiment(config, trial_config) as (_, _, experiment): plot = partial_dependencies( experiment, n_grid_points=10, model_kwargs=dict(random_state=1), n_samples=N_SAMPLES, ) def test_custom_colorscale(self, monkeypatch): """Test changing colorscale""" mock_train_regressor(monkeypatch) config = mock_space() mock_experiment(monkeypatch) with create_experiment(config, trial_config) as (_, _, experiment): plot = partial_dependencies( experiment, n_grid_points=5, colorscale="Viridis", model_kwargs=dict(random_state=1), ) with pytest.raises(AssertionError): assert_partial_dependencies_plot( plot, dims=["x", "y"], n_grid_points=5, custom_colorscale=False ) assert_partial_dependencies_plot( plot, dims=["x", "y"], n_grid_points=5, custom_colorscale=True ) def test_custom_model(self, monkeypatch): """Test changing type of regression model""" mock_train_regressor(monkeypatch, assert_model="BaggingRegressor") config = mock_space() mock_experiment(monkeypatch) with create_experiment(config, trial_config) as (_, _, experiment): plot = partial_dependencies( experiment, n_grid_points=5, model="BaggingRegressor", model_kwargs=dict(random_state=1), ) def test_custom_model_kwargs(self, monkeypatch): """Test changing arguments of regression model""" mock_train_regressor(monkeypatch, assert_model_kwargs=dict(random_state=1)) config = mock_space() mock_experiment(monkeypatch) with create_experiment(config, trial_config) as (_, _, experiment): plot = partial_dependencies( experiment, n_grid_points=5, model_kwargs=dict(random_state=1), ) @pytest.mark.usefixtures("version_XYZ") class TestRankings: """Tests the ``rankings()`` method provided by the plotly backend""" def test_requires_argument(self): """Tests that the experiment data are required.""" with pytest.raises(ValueError): rankings(None) def test_returns_plotly_object(self, monkeypatch): """Tests that the plotly backend returns a plotly object""" mock_experiment_with_random_to_pandas(monkeypatch) with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): plot = rankings([experiment, experiment]) assert type(plot) is plotly.graph_objects.Figure def test_graph_layout(self, monkeypatch): """Tests the layout of the plot""" mock_experiment_with_random_to_pandas(monkeypatch) with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): plot = rankings([experiment]) assert_rankings_plot(plot, [f"{experiment.name}-v{experiment.version}"]) def test_list_of_experiments(self, monkeypatch): """Tests the rankings with list of experiments""" mock_experiment_with_random_to_pandas(monkeypatch) with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): child = orion.client.create_experiment( experiment.name, branching={"branch_to": "child"} ) plot = rankings([experiment, child]) # Exps are sorted alphabetically by names. assert_rankings_plot( plot, [f"{exp.name}-v{exp.version}" for exp in [child, experiment]] ) def test_list_of_experiments_name_conflict(self, monkeypatch): """Tests the rankings with list of experiments with the same name""" mock_experiment_with_random_to_pandas(monkeypatch) with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): child = orion.client.create_experiment( experiment.name, branching={"branch_to": experiment.name} ) assert child.name == experiment.name assert child.version == experiment.version + 1 plot = rankings([experiment, child]) # Exps are sorted alphabetically by names. assert_rankings_plot( plot, [f"{exp.name}-v{exp.version}" for exp in [experiment, child]] ) def test_dict_of_experiments(self, monkeypatch): """Tests the rankings with renamed experiments""" mock_experiment_with_random_to_pandas(monkeypatch) with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): plot = rankings({"exp-1": experiment, "exp-2": experiment}) assert_rankings_plot(plot, ["exp-1", "exp-2"]) def test_list_of_dict_of_experiments(self, monkeypatch): """Tests the rankings with avg of competitions""" mock_experiment_with_random_to_pandas(monkeypatch) with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): plot = rankings( [{"exp-1": experiment, "exp-2": experiment} for _ in range(10)] ) assert_rankings_plot(plot, ["exp-1", "exp-2"], with_avg=True) def test_dict_of_list_of_experiments(self, monkeypatch): """Tests the rankings with avg of experiments separated in lists""" mock_experiment_with_random_to_pandas(monkeypatch) with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): plot = rankings({"exp-1": [experiment] * 10, "exp-2": [experiment] * 10}) assert_rankings_plot(plot, ["exp-1", "exp-2"], with_avg=True) def test_unbalanced_experiments(self, monkeypatch): """Tests the regrets with avg of unbalanced experiments""" mock_experiment_with_random_to_pandas(monkeypatch, unbalanced=True) with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): plot = rankings({"exp-1": [experiment] * 10, "exp-2": [experiment] * 10}) assert_rankings_plot(plot, ["exp-1", "exp-2"], with_avg=True, balanced=0) def test_ignore_uncompleted_statuses(self, monkeypatch): """Tests that uncompleted statuses are ignored""" mock_experiment_with_random_to_pandas( monkeypatch, status=[ "completed", "new", "reserved", "completed", "broken", "completed", "interrupted", "completed", ], ) with create_experiment(config, trial_config) as (_, _, experiment): plot = rankings([experiment]) assert_rankings_plot( plot, [f"{experiment.name}-v{experiment.version}"], balanced=4 ) def test_unsupported_order_key(self): """Tests that unsupported order keys are rejected""" with create_experiment(config, trial_config) as (_, _, experiment): with pytest.raises(ValueError): rankings([experiment], order_by="unsupported") @pytest.mark.usefixtures("version_XYZ") class TestRegret: """Tests the ``regret()`` method provided by the plotly backend""" def test_requires_argument(self): """Tests that the experiment data are required.""" with pytest.raises(ValueError): regret(None) def test_returns_plotly_object(self): """Tests that the plotly backend returns a plotly object""" with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): plot = regret(experiment) assert type(plot) is plotly.graph_objects.Figure def test_graph_layout(self): """Tests the layout of the plot""" with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): plot = regret(experiment) assert_regret_plot(plot) def test_experiment_worker_as_parameter(self): """Tests that ``Experiment`` is a valid parameter""" with create_experiment(config, trial_config, ["completed"]) as ( _, experiment, _, ): plot = regret(experiment) assert_regret_plot(plot) def test_ignore_uncompleted_statuses(self): """Tests that uncompleted statuses are ignored""" with create_experiment(config, trial_config) as (_, _, experiment): plot = regret(experiment) assert_regret_plot(plot) def test_unsupported_order_key(self): """Tests that unsupported order keys are rejected""" with create_experiment(config, trial_config) as (_, _, experiment): with pytest.raises(ValueError): regret(experiment, order_by="unsupported") @pytest.mark.usefixtures("version_XYZ") class TestRegrets: """Tests the ``regrets()`` method provided by the plotly backend""" def test_requires_argument(self): """Tests that the experiment data are required.""" with pytest.raises(ValueError): regrets(None) def test_returns_plotly_object(self, monkeypatch): """Tests that the plotly backend returns a plotly object""" mock_experiment_with_random_to_pandas(monkeypatch) with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): plot = regrets([experiment]) assert type(plot) is plotly.graph_objects.Figure def test_graph_layout(self, monkeypatch): """Tests the layout of the plot""" mock_experiment_with_random_to_pandas(monkeypatch) with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): plot = regrets([experiment]) assert_regrets_plot(plot, [f"{experiment.name}-v{experiment.version}"]) def test_list_of_experiments(self, monkeypatch): """Tests the regrets with list of experiments""" mock_experiment_with_random_to_pandas(monkeypatch) with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): child = orion.client.create_experiment( experiment.name, branching={"branch_to": "child"} ) plot = regrets([experiment, child]) # Exps are sorted alphabetically by names. assert_regrets_plot( plot, [f"{exp.name}-v{exp.version}" for exp in [child, experiment]] ) def test_list_of_experiments_name_conflict(self, monkeypatch): """Tests the regrets with list of experiments with the same name""" mock_experiment_with_random_to_pandas(monkeypatch) with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): child = orion.client.create_experiment( experiment.name, branching={"branch_to": experiment.name} ) assert child.name == experiment.name assert child.version == experiment.version + 1 plot = regrets([experiment, child]) # Exps are sorted alphabetically by names. assert_regrets_plot( plot, [f"{exp.name}-v{exp.version}" for exp in [experiment, child]] ) def test_dict_of_experiments(self, monkeypatch): """Tests the regrets with renamed experiments""" mock_experiment_with_random_to_pandas(monkeypatch) with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): plot = regrets({"exp-1": experiment, "exp-2": experiment}) assert_regrets_plot(plot, ["exp-1", "exp-2"]) def test_dict_of_list_of_experiments(self, monkeypatch): """Tests the regrets with avg of experiments""" mock_experiment_with_random_to_pandas(monkeypatch) with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): plot = regrets({"exp-1": [experiment] * 10, "exp-2": [experiment] * 10}) assert_regrets_plot(plot, ["exp-1", "exp-2"], with_avg=True) def test_unbalanced_experiments(self, monkeypatch): """Tests the regrets with avg of unbalanced experiments""" mock_experiment_with_random_to_pandas(monkeypatch, unbalanced=True) with create_experiment(config, trial_config, ["completed"]) as ( _, _, experiment, ): plot = regrets({"exp-1": [experiment] * 10, "exp-2": [experiment] * 10}) assert_regrets_plot(plot, ["exp-1", "exp-2"], with_avg=True, balanced=0) def test_ignore_uncompleted_statuses(self, monkeypatch): """Tests that uncompleted statuses are ignored""" mock_experiment_with_random_to_pandas( monkeypatch, status=[ "completed", "new", "reserved", "completed", "broken", "completed", "interrupted", "completed", ], ) with create_experiment(config, trial_config) as (_, _, experiment): plot = regrets([experiment]) assert_regrets_plot( plot, [f"{experiment.name}-v{experiment.version}"], balanced=4 ) def test_unsupported_order_key(self): """Tests that unsupported order keys are rejected""" with create_experiment(config, trial_config) as (_, _, experiment): with pytest.raises(ValueError): regrets([experiment], order_by="unsupported")
35.716446
87
0.59196
33,335
0.882158
0
0
33,212
0.878903
0
0
7,453
0.197232
be99d62141111a8ad89510bea1e2a527e33cf08b
478
py
Python
autodiff/debug_vjp.py
Jakob-Unfried/msc-legacy
2c41f3f714936c25dd534bd66da802c26176fcfa
[ "MIT" ]
1
2021-03-22T14:16:43.000Z
2021-03-22T14:16:43.000Z
autodiff/debug_vjp.py
Jakob-Unfried/msc-legacy
2c41f3f714936c25dd534bd66da802c26176fcfa
[ "MIT" ]
null
null
null
autodiff/debug_vjp.py
Jakob-Unfried/msc-legacy
2c41f3f714936c25dd534bd66da802c26176fcfa
[ "MIT" ]
null
null
null
import pdb import warnings from jax import custom_vjp @custom_vjp def debug_identity(x): """ acts as identity, but inserts a pdb trace on the backwards pass """ warnings.warn('Using a module intended for debugging') return x def _debug_fwd(x): warnings.warn('Using a module intended for debugging') return x, x # noinspection PyUnusedLocal def _debug_bwd(x, g): pdb.set_trace() return g debug_identity.defvjp(_debug_fwd, _debug_bwd)
17.071429
67
0.713389
0
0
0
0
190
0.39749
0
0
185
0.387029
be9aae87c4295f41e5dad9ea47ddb818dd41be55
1,246
py
Python
mileage.py
vwfinley/mileage
eb880107c8c38d33706eac74d01a0d0516716cc7
[ "MIT" ]
null
null
null
mileage.py
vwfinley/mileage
eb880107c8c38d33706eac74d01a0d0516716cc7
[ "MIT" ]
null
null
null
mileage.py
vwfinley/mileage
eb880107c8c38d33706eac74d01a0d0516716cc7
[ "MIT" ]
null
null
null
#!/usr/bin/env python # Some helpful links # https://docs.python.org/3/library/tkinter.html # https://www.python-course.eu/tkinter_entry_widgets.php import tkinter as tk class Application(tk.Frame): def __init__(self, root=None): super().__init__(root) self.root = root self.root.title("Mileage") self.root.geometry("250x125") self.pack() self.miles = tk.Entry(self); self.gallons = tk.Entry(self); self.mpg = tk.Label(self) self.init_widgets() def init_widgets(self): self.miles.grid(row=0) tk.Label(self, text="Miles").grid(row=0, column=1) self.gallons.grid(row=1) tk.Label(self, text="Gallons").grid(row=1, column=1) self.mpg.grid(row=2) tk.Label(self, text="MPG").grid(row=2, column=1) tk.Button(self, text="Calculate", command = self.calculate).grid(row=3, column=1) tk.Button(self, text="Quit", command=self.root.destroy).grid(row=4, column=1) def calculate(self): self.mpg['text'] = float(self.miles.get()) / float(self.gallons.get()) app = Application(root=tk.Tk()) app.mainloop()
28.318182
90
0.578652
1,010
0.810594
0
0
0
0
0
0
211
0.169342
be9bd5f7d840a39915f5c547fcf6ced95fe85e75
1,087
py
Python
rankings/elo.py
ulternate/table_tennis_league
1762c5b606f149b27d9c06c82e825c948c47b56f
[ "MIT" ]
null
null
null
rankings/elo.py
ulternate/table_tennis_league
1762c5b606f149b27d9c06c82e825c948c47b56f
[ "MIT" ]
7
2017-08-18T04:15:16.000Z
2017-08-28T00:54:25.000Z
rankings/elo.py
mohamed-yahya-zakria/table-tennis-league
07cc6fe46100a4d4279c8a6ae5eea26984df4664
[ "MIT" ]
1
2017-08-18T11:24:00.000Z
2017-08-18T11:24:00.000Z
def elo(winner_rank, loser_rank, weighting): """ :param winner: The Player that won the match. :param loser: The Player that lost the match. :param weighting: The weighting factor to suit your comp. :return: (winner_new_rank, loser_new_rank) Tuple. This follows the ELO ranking method. """ winner_rank_transformed = 10 ** (winner_rank / 400) opponent_rank_transformed = 10 ** (loser_rank / 400) transformed_sum = winner_rank_transformed + opponent_rank_transformed winner_score = winner_rank_transformed / transformed_sum loser_score = opponent_rank_transformed / transformed_sum winner_rank = winner_rank + weighting * ( 1 - winner_score) loser_rank = loser_rank - weighting * loser_score # Set a floor of 100 for the rankings. winner_rank = 100 if winner_rank < 100 else winner_rank loser_rank = 100 if loser_rank < 100 else loser_rank winner_rank = float('{result:.2f}'.format(result=winner_rank)) loser_rank = float('{result:.2f}'.format(result=loser_rank)) return winner_rank, loser_rank
37.482759
73
0.720331
0
0
0
0
0
0
0
0
335
0.308188
be9c9dcbecf6ee782a06508d51f148623da5f942
3,766
py
Python
src/samplics/regression/glm.py
samplics-org/samplics
b5f49d075194cc24208f567e6a00e86aa24bec26
[ "MIT" ]
14
2021-05-03T19:59:58.000Z
2022-03-27T18:58:36.000Z
src/samplics/regression/glm.py
samplics-org/samplics
b5f49d075194cc24208f567e6a00e86aa24bec26
[ "MIT" ]
8
2021-06-17T01:13:01.000Z
2022-03-27T18:31:15.000Z
src/samplics/regression/glm.py
samplics-org/samplics
b5f49d075194cc24208f567e6a00e86aa24bec26
[ "MIT" ]
1
2022-03-28T06:58:55.000Z
2022-03-28T06:58:55.000Z
from __future__ import annotations from typing import Any, Callable, Optional, Union import numpy as np # import pandas as pd import statsmodels.api as sm from samplics.estimation.expansion import TaylorEstimator from samplics.utils.formats import dict_to_dataframe, fpc_as_dict, numpy_array, remove_nans from samplics.utils.types import Array, Number, Series, StringNumber class SurveyGLM: """General linear models under complex survey sampling""" def __init__(self): self.beta: np.ndarray @staticmethod def _residuals(e: np.ndarray, psu: np.ndarray, nb_vars: Number) -> tuple(np.ndarray, Number): psus = np.unique(psu) if psus.shape[0] == 1 and e.shape[0] == 1: raise AssertionError("Only one observation in the stratum") if psus.shape[0] == 1: psu = np.arange(e.shape[0]) psus = np.unique(psu) e_values = np.zeros((psus.shape[0], nb_vars)) for i, p in enumerate(np.unique(psus)): e_values[i, :] += np.sum(e[psu == p, :], axis=0) e_means = np.sum(e_values, axis=0) / psus.shape[0] return np.transpose(e_values - e_means) @ (e_values - e_means), psus.shape[0] def _calculate_g( self, samp_weight: np.ndarray, resid: np.ndarray, x: np.ndarray, stratum: Optional[np.ndarray], psu: Optional[np.ndarray], fpc: Union[dict[StringNumber, Number], Number], glm_scale=Number, ) -> np.ndarray: e = (samp_weight * resid)[:, None] * x / glm_scale if psu is None: psu = np.arange(e.shape[0]) if stratum is None: e_h, n_h = self._residuals(e=e, psu=psu, nb_vars=x.shape[1]) return fpc * (n_h / (n_h - 1)) * e_h else: g_h = np.zeros((x.shape[1], x.shape[1])) for s in np.unique(stratum): e_s = e[stratum == s, :] psu_s = psu[stratum == s] e_h, n_h = self._residuals(e=e_s, psu=psu_s, nb_vars=x.shape[1]) g_h += fpc[s] * (n_h / (n_h - 1)) * e_h return g_h def estimate( self, y: Array, x: Optional[Array] = None, samp_weight: Optional[Array] = None, stratum: Optional[Series] = None, psu: Optional[Series] = None, fpc: Union[dict[StringNumber, Number], Series, Number] = 1.0, remove_nan: bool = False, ) -> None: y = numpy_array(y) y_temp = y.copy() x = numpy_array(x) if x is not None else None psu = numpy_array(psu) if psu is not None else None if samp_weight is None: weight_temp = np.ones(y.shape[0]) elif isinstance(samp_weight, (float, int)): weight_temp = samp_weight * np.ones(y_temp.shape[0]) elif isinstance(samp_weight, np.ndarray): weight_temp = samp_weight.copy() else: weight_temp = np.asarray(samp_weight) if not isinstance(fpc, dict): self.fpc = fpc_as_dict(stratum, fpc) else: if list(np.unique(stratum)) != list(fpc.keys()): raise AssertionError("fpc dictionary keys must be the same as the strata!") else: self.fpc = fpc glm_model = sm.GLM(endog=y_temp, exog=x, var_weights=weight_temp) glm_results = glm_model.fit() g = self._calculate_g( samp_weight=samp_weight, resid=glm_results.resid_response, x=x, stratum=stratum, psu=psu, fpc=self.fpc, glm_scale=glm_results.scale, ) d = glm_results.cov_params() self.beta = glm_results.params self.cov_beta = (d @ g) @ d
32.747826
97
0.573022
3,385
0.898832
0
0
680
0.180563
0
0
168
0.04461
be9e12d7ef9f5aeb6611304d96bd16eabcc64477
2,563
py
Python
tests/test_scopes.py
leg100/scopes
6a31908acf44b9f65f25668230197ed13229a80d
[ "MIT" ]
null
null
null
tests/test_scopes.py
leg100/scopes
6a31908acf44b9f65f25668230197ed13229a80d
[ "MIT" ]
1
2021-11-15T17:47:40.000Z
2021-11-15T17:47:40.000Z
tests/test_scopes.py
leg100/scopes
6a31908acf44b9f65f25668230197ed13229a80d
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- """Tests for `scopes` package.""" import os print(os.getenv('PYTHONPATH')) import pytest from click.testing import CliRunner from scopes.tasks import tasks, bolt, spout, builder from scopes.graph import G, build, topological_sort, traverse from scopes import cli @pytest.fixture def response(): """Sample pytest fixture. See more at: http://doc.pytest.org/en/latest/fixture.html """ # import requests # return requests.get('https://github.com/audreyr/cookiecutter-pypackage') def test_content(response): """Sample pytest test function with the pytest fixture as an argument.""" # from bs4 import BeautifulSoup # assert 'GitHub' in BeautifulSoup(response.content).title.string def test_command_line_interface(): """Test the CLI.""" runner = CliRunner() result = runner.invoke(cli.main) assert result.exit_code == 0 assert 'scopes.cli.main' in result.output help_result = runner.invoke(cli.main, ['--help']) assert help_result.exit_code == 0 assert '--help Show this message and exit.' in help_result.output # t1--- # | | # v v # t2 t3 # \ / t4 # v | # t5<----/ @pytest.fixture def example(): tasks.clear() G.clear() @spout({'x': None}) def t1(): yield {'x': 'east'} yield {'x': 'west'} @bolt({'y': None}, lambda d: 'x' in d) def t2(dep): return {'y': 1, **dep} @bolt({'z': None}, lambda d: d == {'x': None}) def t3(dep): return {'z': 1, **dep} @spout({'c': None}) def t4(): yield {'c': 4, 'x': 'east'} yield {'c': 5, 'x': 'west'} @builder({'a': 2}, lambda _: True, 'x') def t5(obj, dep): obj.update(dep) def test_task_decorator(example): assert len(tasks) == 5 assert callable(tasks[0].func) assert tasks[0].obj == {'x': None} def test_task_dag(example): build(tasks) assert len(G) == 5 assert len(G.edges) == 6 def test_task_traversal(example): build(tasks) nodes = topological_sort() results = traverse(nodes) assert results == { 't1': [{'x': 'east'}, {'x': 'west'}], 't2': [{'x': 'east', 'y': 1}, {'x': 'west', 'y': 1}], 't3': [{'x': 'east', 'z': 1}, {'x': 'west', 'z': 1}], 't4': [{'x': 'east', 'c': 4}, {'x': 'west', 'c': 5}], 't5': [ {'a': 2, 'x': 'east', 'y': 1, 'z': 1, 'c': 4}, {'a': 2, 'x': 'west', 'y': 1, 'z': 1, 'c': 5} ] }
22.286957
78
0.536871
0
0
539
0.2103
788
0.307452
0
0
818
0.319157
be9e3afec2b413ef97912bf7c25f3305c1a3ab7c
1,055
py
Python
timeparse/LunarSolarConverter/__init__.py
tornadoyi/timeparse
1e44dbc6acdb07d6c023806d55034642c7ec0de9
[ "Apache-2.0" ]
null
null
null
timeparse/LunarSolarConverter/__init__.py
tornadoyi/timeparse
1e44dbc6acdb07d6c023806d55034642c7ec0de9
[ "Apache-2.0" ]
null
null
null
timeparse/LunarSolarConverter/__init__.py
tornadoyi/timeparse
1e44dbc6acdb07d6c023806d55034642c7ec0de9
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- __author__ = 'isee15' import LunarSolarConverter converter = LunarSolarConverter.LunarSolarConverter() def LunarToSolar(year, month, day, isleap = False): lunar = LunarSolarConverter.Lunar(year, month, day, isleap) solar = converter.LunarToSolar(lunar) return (solar.solarYear, solar.solarMonth, solar.solarDay) def SolarToLunar(year, month, day): solar = LunarSolarConverter.Solar(year, month, day) lunar = converter.SolarToLunar(solar) return (lunar.lunarYear, lunar.lunarMonth, lunar.lunarDay) def LunarMonthDays(year, month, isleap = False): converter = LunarSolarConverter.LunarSolarConverter days = converter.lunar_month_days[year - converter.lunar_month_days[0]] leap = LunarSolarConverter.GetBitInt(days, 4, 13) offset = 0 loopend = leap if not isleap: if month <= leap or leap == 0: loopend = month - 1 else: loopend = month days = LunarSolarConverter.GetBitInt(days, 1, 12 - loopend) == 1 and 30 or 29 return days
28.513514
81
0.694787
0
0
0
0
0
0
0
0
31
0.029384
be9f7ef00ae244d09a69281d387b6fc00e3b787b
4,345
py
Python
examples/hello-pt/custom/cifar10validator.py
ArnovanHilten/NVFlare
bb45e7d606849c6bc8f7542347459c6ba1be00c4
[ "Apache-2.0" ]
155
2021-08-05T18:05:09.000Z
2022-03-27T15:32:56.000Z
examples/hello-pt/custom/cifar10validator.py
ArnovanHilten/NVFlare
bb45e7d606849c6bc8f7542347459c6ba1be00c4
[ "Apache-2.0" ]
216
2021-12-01T06:07:12.000Z
2022-03-30T23:34:02.000Z
examples/hello-pt/custom/cifar10validator.py
ArnovanHilten/NVFlare
bb45e7d606849c6bc8f7542347459c6ba1be00c4
[ "Apache-2.0" ]
44
2021-11-24T16:03:29.000Z
2022-03-24T23:28:39.000Z
# Copyright (c) 2021, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from torch.utils.data import DataLoader from torchvision.datasets import CIFAR10 from torchvision.transforms import Compose, ToTensor, Normalize from nvflare.apis.dxo import from_shareable, DataKind, DXO from nvflare.apis.executor import Executor from nvflare.apis.fl_constant import ReturnCode from nvflare.apis.fl_context import FLContext from nvflare.apis.shareable import Shareable, make_reply from nvflare.apis.signal import Signal from nvflare.app_common.app_constant import AppConstants from simple_network import SimpleNetwork class Cifar10Validator(Executor): def __init__(self, validate_task_name=AppConstants.TASK_VALIDATION): super(Cifar10Validator, self).__init__() self._validate_task_name = validate_task_name # Setup the model self.model = SimpleNetwork() self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") self.model.to(self.device) # Preparing the dataset for testing. transforms = Compose([ ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ]) self.test_data = CIFAR10(root='~/data', train=False, transform=transforms) self.test_loader = DataLoader(self.test_data, batch_size=4, shuffle=False) def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable: if task_name == self._validate_task_name: model_owner = "?" try: try: dxo = from_shareable(shareable) except: self.log_error(fl_ctx, "Error in extracting dxo from shareable.") return make_reply(ReturnCode.BAD_TASK_DATA) # Ensure data_kind is weights. if not dxo.data_kind == DataKind.WEIGHTS: self.log_exception(fl_ctx, f"DXO is of type {dxo.data_kind} but expected type WEIGHTS.") return make_reply(ReturnCode.BAD_TASK_DATA) # Extract weights and ensure they are tensor. model_owner = shareable.get_header(AppConstants.MODEL_OWNER, "?") weights = {k: torch.as_tensor(v, device=self.device) for k, v in dxo.data.items()} # Get validation accuracy val_accuracy = self.do_validation(weights, abort_signal) if abort_signal.triggered: return make_reply(ReturnCode.TASK_ABORTED) self.log_info(fl_ctx, f"Accuracy when validating {model_owner}'s model on" f" {fl_ctx.get_identity_name()}"f's data: {val_accuracy}') dxo = DXO(data_kind=DataKind.METRICS, data={'val_acc': val_accuracy}) return dxo.to_shareable() except: self.log_exception(fl_ctx, f"Exception in validating model from {model_owner}") return make_reply(ReturnCode.EXECUTION_EXCEPTION) else: return make_reply(ReturnCode.TASK_UNKNOWN) def do_validation(self, weights, abort_signal): self.model.load_state_dict(weights) self.model.eval() correct = 0 total = 0 with torch.no_grad(): for i, (images, labels) in enumerate(self.test_loader): if abort_signal.triggered: return 0 images, labels = images.to(self.device), labels.to(self.device) output = self.model(images) _, pred_label = torch.max(output, 1) correct += (pred_label == labels).sum().item() total += images.size()[0] metric = correct/float(total) return metric
40.231481
114
0.643268
3,204
0.737399
0
0
0
0
0
0
1,023
0.235443
be9f9cd98cdf38a09e9b5c7bf41b9142f3bd6c42
4,220
py
Python
lambda/enable-traffic-mirroring.py
wrharding/aws-infra
5e913f8342b3a3b3a4599648c4a914f828b5bc18
[ "MIT" ]
1
2022-01-14T18:03:29.000Z
2022-01-14T18:03:29.000Z
lambda/enable-traffic-mirroring.py
wrharding/aws-infra
5e913f8342b3a3b3a4599648c4a914f828b5bc18
[ "MIT" ]
null
null
null
lambda/enable-traffic-mirroring.py
wrharding/aws-infra
5e913f8342b3a3b3a4599648c4a914f828b5bc18
[ "MIT" ]
null
null
null
# MIT License # Copyright (c) 2020-2021 Chris Farris (https://www.chrisfarris.com) # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import boto3 from botocore.exceptions import ClientError import json import os import logging logger = logging.getLogger() logger.setLevel(getattr(logging, os.getenv('LOG_LEVEL', default='INFO'))) logging.getLogger('botocore').setLevel(logging.WARNING) logging.getLogger('boto3').setLevel(logging.WARNING) logging.getLogger('urllib3').setLevel(logging.WARNING) TAG_KEY=os.getenv('TAG_KEY', default='WireShark') def handler(event, context): logger.debug("Received event: " + json.dumps(event, sort_keys=True)) ec2_client = boto3.client('ec2') mirror_sessions = ec2_client.describe_traffic_mirror_sessions()['TrafficMirrorSessions'] enabled_enis = [] max_session_id = 0 for s in mirror_sessions: enabled_enis.append(s['NetworkInterfaceId']) if s['SessionNumber'] > max_session_id: max_session_id = s['SessionNumber'] response = ec2_client.describe_instances( Filters=[ {'Name': 'instance-state-name', 'Values': ['running']}, ], MaxResults=1000 # I should never need to paginate. ) for r in response['Reservations']: for i in r['Instances']: if not i['InstanceType'].startswith("t3"): logger.debug(f"Instance {i['InstanceId']} is not a t3 and does not support Traffic Mirroring") continue for tag in i['Tags']: if tag['Key'] == TAG_KEY: # See if a mirror session is setup for eni in i['NetworkInterfaces']: if eni['NetworkInterfaceId'] not in enabled_enis: logger.info(f"ENI {eni['NetworkInterfaceId']} on Instance {i['InstanceId']} needs Mirroring Enabled") max_session_id += 1 enable_traffic_mirroring(ec2_client, eni['NetworkInterfaceId'], i['InstanceId'], max_session_id) else: logger.debug(f"ENI {eni['NetworkInterfaceId']} on Instance {i['InstanceId']} is already Enabled") def enable_traffic_mirroring(ec2_client, eni, instance_id, session_id): response = ec2_client.create_traffic_mirror_session( NetworkInterfaceId=eni, TrafficMirrorTargetId=os.environ['TARGET_ID'], TrafficMirrorFilterId=os.environ['FILTER_ID'], SessionNumber=session_id, Description=f"Enabled by Lambda for {instance_id}" ) ## END OF FUNCTION ## if __name__ == '__main__': # Logging idea stolen from: https://docs.python.org/3/howto/logging.html#configuring-logging # create console handler and set level to debug ch = logging.StreamHandler() logger.setLevel(logging.DEBUG) # create formatter # formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') formatter = logging.Formatter('%(levelname)s - %(message)s') # add formatter to ch ch.setFormatter(formatter) # add ch to logger logger.addHandler(ch) try: handler(None, None) except KeyboardInterrupt: exit(1)
39.074074
129
0.679147
0
0
0
0
0
0
0
0
2,167
0.513507
be9fea8e8fc13061760196f0e3818adcd5989d77
9,088
py
Python
src/value_function.py
wu6u3/async_trpo
b6e3dd56775464b58f7433773e8b04d88cf3fdbc
[ "MIT" ]
6
2018-02-02T19:53:08.000Z
2021-12-06T19:48:19.000Z
src/value_function.py
wu6u3/async_trpo
b6e3dd56775464b58f7433773e8b04d88cf3fdbc
[ "MIT" ]
null
null
null
src/value_function.py
wu6u3/async_trpo
b6e3dd56775464b58f7433773e8b04d88cf3fdbc
[ "MIT" ]
2
2018-07-26T06:22:04.000Z
2019-03-06T10:05:18.000Z
""" State-Value Function Written by Patrick Coady (pat-coady.github.io) Modified by Tin-Yin Lai (wu6u3) into asynchronous version """ import tensorflow as tf import numpy as np from sklearn.utils import shuffle #import os class NNValueFunction(object): """ NN-based state-value function """ def __init__(self, obs_dim, hid1_mult, thread_idx, shared_nn): """ Args: obs_dim: number of dimensions in observation vector (int) hid1_mult: size of first hidden layer, multiplier of obs_dim """ self.replay_buffer_x = None self.replay_buffer_y = None self.obs_dim = obs_dim self.hid1_mult = hid1_mult self.epochs = 10 self.lr = None # learning rate set in _build_graph() self._thread_idx=thread_idx # -1 for global self._scope_name = "nn_net_"+str(self._thread_idx) self._build_graph() #self.sess = tf.Session(graph=self.g) #self.sess.run(self.init) var_refs = [v._ref() for v in self.get_vars()] self.gradients = tf.gradients( self.loss, var_refs, gate_gradients=False, aggregation_method=None, colocate_gradients_with_ops=False) self.apply_gradients=None self.sync = self.sync_from(shared_nn) #self. global_fit = self.fit_for_global(x=None, y=None, logger=None) def _build_graph(self): """ Construct TensorFlow graph, including loss function, init op and train op """ with tf.variable_scope(self._scope_name) as scope: self.obs_ph = tf.placeholder(tf.float32, (None, self.obs_dim), 'obs_valfunc') self.val_ph = tf.placeholder(tf.float32, (None,), 'val_valfunc') # hid1 layer size is 10x obs_dim, hid3 size is 10, and hid2 is geometric mean hid1_size = self.obs_dim * self.hid1_mult # default multipler 10 chosen empirically on 'Hopper-v1' hid3_size = 5 # 5 chosen empirically on 'Hopper-v1' hid2_size = int(np.sqrt(hid1_size * hid3_size)) # heuristic to set learning rate based on NN size (tuned on 'Hopper-v1') self.lr = 1e-2 / np.sqrt(hid2_size) # 1e-3 empirically determined print('Value Params -- h1: {}, h2: {}, h3: {}, lr: {:.3g}' .format(hid1_size, hid2_size, hid3_size, self.lr)) # 3 hidden layers with tanh activations out = tf.layers.dense(self.obs_ph, hid1_size, tf.tanh, kernel_initializer=tf.random_normal_initializer( stddev=np.sqrt(1 / self.obs_dim)), name="h1") out = tf.layers.dense(out, hid2_size, tf.tanh, kernel_initializer=tf.random_normal_initializer( stddev=np.sqrt(1 / hid1_size)), name="h2") out = tf.layers.dense(out, hid3_size, tf.tanh, kernel_initializer=tf.random_normal_initializer( stddev=np.sqrt(1 / hid2_size)), name="h3") out = tf.layers.dense(out, 1, kernel_initializer=tf.random_normal_initializer( stddev=np.sqrt(1 / hid3_size)), name='output') self.out = tf.squeeze(out) self.loss = tf.reduce_mean(tf.square(self.out - self.val_ph)) # squared loss optimizer = tf.train.AdamOptimizer(self.lr) self.train_op = optimizer.minimize(self.loss) #self.init = tf.global_variables_initializer() self.h1_w, self.h1_b = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self._scope_name+'/h1') self.h2_w, self.h2_b = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self._scope_name+'/h2') self.h3_w, self.h3_b = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self._scope_name+'/h3') self.output_w, self.output_b =tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self._scope_name+'/output') scope.reuse_variables() #self.sess = tf.Session(graph=self.g) #self.sess.run(self.init) def fit_for_global(self, x, y, logger): """ Fit model to current data batch + previous data batch Args: x: features y: target logger: logger to save training loss and % explained variance """ num_batches = max(x.shape[0] // 256, 1) batch_size = x.shape[0] // num_batches y_hat = self.predict(sess, x) # check explained variance prior to update old_exp_var = 1 - np.var(y - y_hat)/np.var(y) if self.replay_buffer_x is None: x_train, y_train = x, y else: x_train = np.concatenate([x, self.replay_buffer_x]) y_train = np.concatenate([y, self.replay_buffer_y]) self.replay_buffer_x = x self.replay_buffer_y = y for e in range(self.epochs): x_train, y_train = shuffle(x_train, y_train) for j in range(num_batches): start = j * batch_size end = (j + 1) * batch_size feed_dict = {self.obs_ph: x_train[start:end, :], self.val_ph: y_train[start:end]} _, l = sess.run([self.train_op, self.loss], feed_dict=feed_dict) y_hat = self.predict(sess, x) loss = np.mean(np.square(y_hat - y)) # explained variance after update exp_var = 1 - np.var(y - y_hat) / np.var(y) # diagnose over-fitting of val func logger.log({'ValFuncLoss': loss, 'ExplainedVarNew': exp_var, 'ExplainedVarOld': old_exp_var}) def fit(self, sess, x, y, logger): """ Fit model to current data batch + previous data batch Args: x: features y: target logger: logger to save training loss and % explained variance """ num_batches = max(x.shape[0] // 256, 1) batch_size = x.shape[0] // num_batches y_hat = self.predict(sess, x) # check explained variance prior to update old_exp_var = 1 - np.var(y - y_hat)/np.var(y) if self.replay_buffer_x is None: x_train, y_train = x, y else: x_train = np.concatenate([x, self.replay_buffer_x]) y_train = np.concatenate([y, self.replay_buffer_y]) self.replay_buffer_x = x self.replay_buffer_y = y for e in range(self.epochs): x_train, y_train = shuffle(x_train, y_train) for j in range(num_batches): start = j * batch_size end = (j + 1) * batch_size feed_dict = {self.obs_ph: x_train[start:end, :], self.val_ph: y_train[start:end]} _, l = sess.run([self.train_op, self.loss], feed_dict=feed_dict) y_hat = self.predict(sess, x) loss = np.mean(np.square(y_hat - y)) # explained variance after update exp_var = 1 - np.var(y - y_hat) / np.var(y) # diagnose over-fitting of val func logger.log({'ValFuncLoss': loss, 'ExplainedVarNew': exp_var, 'ExplainedVarOld': old_exp_var}) def predict(self, sess, x): """ Predict method """ feed_dict = {self.obs_ph: x} y_hat = sess.run(self.out, feed_dict=feed_dict) return np.squeeze(y_hat) #def close_sess(self): # """ Close TensorFlow session """ # sess.close() def get_vars(self): return [self.h1_w, self.h1_b, self.h2_w, self.h2_b, self.h3_w, self.h3_b, self.output_w, self.output_b ] # weights = [] #name = [] #for tensor in self.g.as_graph_def().node: # name.append(tensor.name) #print(name) #with self.g.as_default() as g: # with tf.variable_scope(self._scope_name) as scope: # weights.extend(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)) # weights.append(g.get_tensor_by_name('h1/kernel:0')) # weights.append(g.get_tensor_by_name('h1/bias:0')) # weights.append(g.get_tensor_by_name('h2/kernel:0')) # weights.append(g.get_tensor_by_name('h2/bias:0')) # weights.append(g.get_tensor_by_name('h3/kernel:0')) # weights.append(g.get_tensor_by_name('h3/bias:0')) # return weights def sync_from(self, shared_nn, name=None): if shared_nn != None: src_vars = shared_nn.get_vars() dst_vars = self.get_vars() sync_ops = [] with tf.name_scope(name, self._scope_name, []) as name: for(src_var, dst_var) in zip(src_vars, dst_vars): sync_op = tf.assign(dst_var, src_var) sync_ops.append(sync_op) return tf.group(*sync_ops, name=name) else: return None
42.666667
121
0.575704
8,859
0.974802
0
0
0
0
0
0
2,626
0.288952
bea166ed0dc38a3bddb60dc5fe5709a4f52a15f3
168
py
Python
mdepub/actions/__init__.py
bkidwell/mdepub
af9e7d2065fb8251b6767e827ac2cff059ce7668
[ "0BSD" ]
35
2015-01-14T22:15:35.000Z
2021-05-23T06:04:34.000Z
mdepub/actions/__init__.py
bkidwell/mdepub
af9e7d2065fb8251b6767e827ac2cff059ce7668
[ "0BSD" ]
null
null
null
mdepub/actions/__init__.py
bkidwell/mdepub
af9e7d2065fb8251b6767e827ac2cff059ce7668
[ "0BSD" ]
7
2015-07-23T11:28:18.000Z
2021-02-09T17:07:06.000Z
"""mdepub actions -- these modules do the actual work.""" import archive import clean import create import epub import extract import html import newid import version
15.272727
57
0.791667
0
0
0
0
0
0
0
0
57
0.339286
bea186d9537f0999c2f3875648b97a7c001cd71a
10,439
py
Python
gbe/views/make_bid_view.py
bethlakshmi/gbe-divio-djangocms-python2.7
6e9b2c894162524bbbaaf73dcbe927988707231d
[ "Apache-2.0" ]
1
2021-03-14T11:56:47.000Z
2021-03-14T11:56:47.000Z
gbe/views/make_bid_view.py
bethlakshmi/gbe-divio-djangocms-python2.7
6e9b2c894162524bbbaaf73dcbe927988707231d
[ "Apache-2.0" ]
180
2019-09-15T19:52:46.000Z
2021-11-06T23:48:01.000Z
gbe/views/make_bid_view.py
bethlakshmi/gbe-divio-djangocms-python2.7
6e9b2c894162524bbbaaf73dcbe927988707231d
[ "Apache-2.0" ]
null
null
null
from django.views.generic import View from django.utils.decorators import method_decorator from django.contrib.auth.decorators import login_required from django.views.decorators.cache import never_cache from django.contrib import messages from django.http import HttpResponseRedirect from django.urls import reverse from django.shortcuts import ( get_object_or_404, render, ) from gbe.models import ( Conference, UserMessage, ) from gbe_logging import log_func from gbe.functions import ( validate_profile, ) from gbe.email.functions import notify_reviewers_on_bid_change from gbetext import ( no_login_msg, fee_instructions, full_login_msg, payment_needed_msg, payment_details_error, ) from gbe_utils.text import no_profile_msg from gbe.ticketing_idd_interface import ( get_payment_details, get_ticket_form, fee_paid, ) class MakeBidView(View): form = None has_draft = True instructions = '' payment_form = None coordinated = False def groundwork(self, request, args, kwargs): self.owner = validate_profile(request, require=False) if not self.owner or not self.owner.complete: user_message = UserMessage.objects.get_or_create( view=self.__class__.__name__, code="PROFILE_INCOMPLETE", defaults={ 'summary': "Profile Incomplete", 'description': no_profile_msg}) messages.warning(request, user_message[0].description) return '%s?next=%s' % ( reverse('profile_update', urlconf='gbe.urls'), reverse('%s_create' % self.bid_type.lower(), urlconf='gbe.urls')) self.bid_object = None if "bid_id" in kwargs: bid_id = kwargs.get("bid_id") self.bid_object = get_object_or_404(self.bid_class, pk=bid_id) self.conference = self.bid_object.b_conference else: self.conference = Conference.objects.filter( accepting_bids=True).first() def make_post_forms(self, request, the_form): if self.bid_object: self.form = the_form( request.POST, instance=self.bid_object, initial=self.get_initial(), prefix=self.prefix) else: self.form = the_form( request.POST, initial=self.get_initial(), prefix=self.prefix) self.set_up_form() def set_up_post(self, request): the_form = None if 'submit' in list(request.POST.keys()) or not self.has_draft: the_form = self.submit_form user_message = UserMessage.objects.get_or_create( view=self.__class__.__name__, code="SUBMIT_SUCCESS", defaults={ 'summary': "%s Submit Success" % self.bid_type, 'description': self.submit_msg}) else: the_form = self.draft_form user_message = UserMessage.objects.get_or_create( view=self.__class__.__name__, code="DRAFT_SUCCESS", defaults={ 'summary': "%s Save Draft Success" % self.bid_type, 'description': self.draft_msg}) self.make_post_forms(request, the_form) return user_message def make_context(self, request): paid = fee_paid( self.bid_type, self.owner.user_object.username, self.conference) instructions = UserMessage.objects.get_or_create( view=self.__class__.__name__, code="BID_INSTRUCTIONS", defaults={ 'summary': "%s Bid Instructions" % self.bid_type, 'description': self.instructions}) context = { 'conference': self.conference, 'forms': [self.form], 'page_title': self.page_title, 'view_title': self.view_title, 'draft_fields': self.draft_fields, 'submit_fields': self.submit_fields, 'fee_paid': paid, 'view_header_text': instructions[0].description, } if not paid and not self.coordinated: user_message = UserMessage.objects.get_or_create( view=self.__class__.__name__, code="FEE_MESSAGE", defaults={ 'summary': "%s Pre-submit Message" % self.bid_type, 'description': fee_instructions}) messages.info( request, user_message[0].description) if self.payment_form: context['forms'] += [self.payment_form] else: context['forms'] += [get_ticket_form(self.bid_class.__name__, self.conference)] return context def get_create_form(self, request): if self.bid_object: self.form = self.submit_form( prefix=self.prefix, instance=self.bid_object, initial=self.get_initial()) else: self.form = self.submit_form( prefix=self.prefix, initial=self.get_initial()) self.set_up_form() return render( request, 'gbe/bid.tmpl', self.make_context(request) ) def check_validity(self, request): return self.form.is_valid() def set_up_form(self): pass def get_invalid_response(self, request): self.set_up_form() context = self.make_context(request) return render( request, 'gbe/bid.tmpl', context) def submit_bid(self, request): self.bid_object.submitted = True self.bid_object.save() notify_reviewers_on_bid_change( self.owner, self.bid_object, self.bid_type, "Submission", self.conference, '%s Reviewers' % self.bid_type, reverse('%s_review' % self.bid_type.lower(), urlconf='gbe.urls')) @never_cache @log_func def get(self, request, *args, **kwargs): if not request.user.is_authenticated: follow_on = '?next=%s' % reverse( '%s_create' % self.bid_type.lower(), urlconf='gbe.urls') user_message = UserMessage.objects.get_or_create( view=self.__class__.__name__, code="USER_NOT_LOGGED_IN", defaults={ 'summary': "Need Login - %s Bid", 'description': no_login_msg}) full_msg = full_login_msg % ( user_message[0].description, reverse('login', urlconf='gbe.urls') + follow_on) messages.warning(request, full_msg) return HttpResponseRedirect( reverse('register', urlconf='gbe.urls') + follow_on) redirect = self.groundwork(request, args, kwargs) if redirect: return HttpResponseRedirect(redirect) return self.get_create_form(request) @never_cache @log_func @method_decorator(login_required) def post(self, request, *args, **kwargs): cart_items = [] paypal_button = None total = None redirect = None redirect = self.groundwork(request, args, kwargs) if redirect: return HttpResponseRedirect(redirect) user_message = self.set_up_post(request) # check bid validity if not self.check_validity(request): return self.get_invalid_response(request) if not self.coordinated and not fee_paid( self.bid_type, self.owner.user_object.username, self.conference) and "draft" not in list(request.POST.keys()): self.payment_form = get_ticket_form(self.bid_class.__name__, self.conference, request.POST) if not self.payment_form.is_valid(): error_message = UserMessage.objects.get_or_create( view=self.__class__.__name__, code="PAYMENT_CHOICE_INVALID", defaults={ 'summary': "User Made Invalid Ticket Choice", 'description': payment_details_error}) messages.error(request, error_message[0].description) return self.get_invalid_response(request) # save bid if not self.bid_object: self.bid_object = self.form.save(commit=False) self.set_valid_form(request) # if this isn't a draft, move forward through process, setting up # payment review if payment is needed if "submit" in list(request.POST.keys()): if self.payment_form: cart_items, paypal_button, total = get_payment_details( request, self.payment_form, self.bid_type, self.bid_object.pk, self.owner.user_object.pk) dynamic_message = UserMessage.objects.get_or_create( view=self.__class__.__name__, code="NOT_PAID_INSTRUCTIONS", defaults={ 'summary': "%s Not Paid" % self.bid_type, 'description': payment_needed_msg}) page_title = '%s Payment' % self.bid_type return render( request, 'gbe/confirm_pay.tmpl', {'dynamic_message': dynamic_message[0].description, 'page_title': page_title, 'cart_items': cart_items, 'total': total, 'paypal_button': paypal_button}) else: redirect = self.submit_bid(request) messages.success(request, user_message[0].description) return HttpResponseRedirect( redirect or reverse('home', urlconf='gbe.urls')) def dispatch(self, *args, **kwargs): return super(MakeBidView, self).dispatch(*args, **kwargs)
36.121107
78
0.560111
9,563
0.916084
0
0
4,071
0.38998
0
0
1,100
0.105374
bea1d1375a8d223083e55cf97bff2f2ce8f4f7ba
6,977
py
Python
epicteller/core/dao/character.py
KawashiroNitori/epicteller
264b11e7e6eb58beb0f67ecbbb811d268a533f7a
[ "MIT" ]
null
null
null
epicteller/core/dao/character.py
KawashiroNitori/epicteller
264b11e7e6eb58beb0f67ecbbb811d268a533f7a
[ "MIT" ]
null
null
null
epicteller/core/dao/character.py
KawashiroNitori/epicteller
264b11e7e6eb58beb0f67ecbbb811d268a533f7a
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- import time from collections import defaultdict from typing import List, Optional, Iterable, Dict import base62 from sqlalchemy import select, and_ from sqlalchemy.dialects.mysql import insert as mysql_insert from epicteller.core.model.character import Character from epicteller.core.tables import table from epicteller.core.util import ObjectDict from epicteller.core.util.enum import ExternalType from epicteller.core.util.seq import get_id def _format_character(result) -> Optional[Character]: if not result: return character = Character( id=result.id, url_token=result.url_token, member_id=result.member_id, name=result.name, avatar=result.avatar, description=result.description, is_removed=bool(result.is_removed), raw_data=result.data, created=result.created, updated=result.updated, ) return character class CharacterDAO: t = table.character select_clause = select([ t.c.id, t.c.url_token, t.c.name, t.c.member_id, t.c.avatar, t.c.description, t.c.is_removed, t.c.data, t.c.created, t.c.updated, ]) @classmethod async def batch_get_character_by_id(cls, character_ids: Iterable[int]) -> Dict[int, Character]: query = cls.select_clause.where(cls.t.c.id.in_(character_ids)) result = await table.execute(query) rows = await result.fetchall() return {row.id: _format_character(row) for row in rows} @classmethod async def batch_get_character_by_url_token(cls, url_tokens: Iterable[str]) -> Dict[str, Character]: query = cls.select_clause.where(cls.t.c.url_token.in_(url_tokens)) result = await table.execute(query) rows = await result.fetchall() return {row.url_token: _format_character(result) for row in rows} @classmethod async def get_characters_by_owner(cls, member_id: int) -> List[Character]: query = cls.select_clause.where(cls.t.c.member_id == member_id) results = await table.execute(query) characters = [_format_character(room) for room in await results.fetchall()] return characters @classmethod async def update_character(cls, character_id: int, **kwargs) -> None: if 'updated' not in kwargs: kwargs['updated'] = int(time.time()) query = cls.t.update().values(kwargs).where(cls.t.c.id == character_id) await table.execute(query) @classmethod async def create_character(cls, member_id: int, name: str, avatar: str, description: str, raw_data: dict) -> Character: created = int(time.time()) url_token = base62.encode(get_id()) values = ObjectDict( url_token=url_token, member_id=member_id, name=name, avatar=avatar, description=description, is_removed=0, data=raw_data, created=created, updated=created, ) query = cls.t.insert().values(values) result = await table.execute(query) values.id = result.lastrowid character = _format_character(values) return character class CharacterCampaignDAO: t = table.character_campaign_index @classmethod async def get_character_id_by_campaign_name(cls, campaign_id: int, name: str) -> Optional[int]: query = select([cls.t.c.character_id]).where(and_(cls.t.c.campaign_id == campaign_id, cls.t.c.name == name)) result = await table.execute(query) row = await result.fetchone() if not row: return return int(row.character_id) @classmethod async def get_character_ids_by_campaign_id(cls, campaign_id: int) -> List[int]: query = select([cls.t.c.character_id]).where(cls.t.c.campaign_id == campaign_id) results = await table.execute(query) character_ids = [int(row.character_id) for row in await results.fetchall()] return character_ids @classmethod async def get_campaign_ids_by_character_ids(cls, character_ids: List[int]) -> Dict[int, List[int]]: query = select([ cls.t.c.character_id, cls.t.c.campaign_id, ]).where(cls.t.c.character_id.in_(character_ids)) results = await table.execute(query) rows = await results.fetchall() campaign_map = defaultdict(list) for r in rows: campaign_map[r.character_id].append(r.campaign_id) return dict(campaign_map) @classmethod async def bind_character_to_campaign(cls, character_id: int, name: str, campaign_id: int): query = mysql_insert(cls.t).values( character_id=character_id, name=name, campaign_id=campaign_id, ).on_duplicate_key_update( name=name, ) await table.execute(query) @classmethod async def unbind_character_to_campaign(cls, character_id: int, campaign_id: int): query = cls.t.delete().where(and_(cls.t.c.character_id == character_id, cls.t.c.campaign_id == campaign_id)) await table.execute(query) class CharacterExternalDAO: t = table.character_external_id @classmethod async def get_external_ids_by_character(cls, character_id: int) -> Dict[ExternalType, str]: query = select([ cls.t.c.type, cls.t.c.external_id, ]).where(cls.t.c.character_id == character_id) result = await table.execute(query) rows = await result.fetchall() externals = {ExternalType(row.type): row.external_id for row in rows} return externals @classmethod async def get_character_ids_by_external(cls, external_type: ExternalType, external_id: str) -> List[int]: query = select([cls.t.c.character_id]).where(and_(cls.t.c.type == int(external_type), cls.t.c.external_id == external_id)) result = await table.execute(query) rows = await result.fetchall() character_ids = [r.character_id for r in rows] return character_ids @classmethod async def bind_character_external_id(cls, character_id: int, external_type: ExternalType, external_id: str): query = mysql_insert(cls.t).values( character_id=character_id, type=int(external_type), external_id=external_id, ).on_duplicate_key_update( external_id=external_id, ) await table.execute(query) @classmethod async def unbind_character_external_id(cls, character_id: int, external_type: ExternalType): query = cls.t.delete().where(and_(cls.t.c.character_id == character_id, cls.t.c.type == int(external_type))) await table.execute(query)
36.528796
116
0.642826
6,004
0.860542
0
0
5,503
0.788734
5,265
0.754622
62
0.008886
bea22b520ab74130906570943260ba5b3628befe
4,313
py
Python
examples/sentence_classfication/task_sentiment_classification_roformer_v2.py
Tongjilibo/bert4torch
71d5ffb3698730b16e5a252b06644a136787711e
[ "MIT" ]
49
2022-03-15T07:28:16.000Z
2022-03-31T07:16:15.000Z
examples/sentence_classfication/task_sentiment_classification_roformer_v2.py
Tongjilibo/bert4torch
71d5ffb3698730b16e5a252b06644a136787711e
[ "MIT" ]
null
null
null
examples/sentence_classfication/task_sentiment_classification_roformer_v2.py
Tongjilibo/bert4torch
71d5ffb3698730b16e5a252b06644a136787711e
[ "MIT" ]
null
null
null
#! -*- coding:utf-8 -*- # 情感分类例子,RoPE相对位置编码 # 官方项目:https://github.com/ZhuiyiTechnology/roformer-v2 # pytorch参考项目:https://github.com/JunnYu/RoFormer_pytorch from bert4torch.tokenizers import Tokenizer from bert4torch.models import build_transformer_model, BaseModel from bert4torch.snippets import sequence_padding, Callback, text_segmentate, ListDataset import torch.nn as nn import torch import torch.optim as optim from torch.utils.data import DataLoader maxlen = 128 batch_size = 16 config_path = 'F:/Projects/pretrain_ckpt/roformer/[sushen_torch_base]--roformer_v2_char_base/config.json' checkpoint_path = 'F:/Projects/pretrain_ckpt/roformer/[sushen_torch_base]--roformer_v2_char_base/pytorch_model.bin' dict_path = 'F:/Projects/pretrain_ckpt/roformer/[sushen_torch_base]--roformer_v2_char_base/vocab.txt' device = 'cuda' if torch.cuda.is_available() else 'cpu' # 建立分词器 tokenizer = Tokenizer(dict_path, do_lower_case=True) # 加载数据集 class MyDataset(ListDataset): @staticmethod def load_data(filenames): """加载数据,并尽量划分为不超过maxlen的句子 """ D = [] seps, strips = u'\n。!?!?;;,, ', u';;,, ' for filename in filenames: with open(filename, encoding='utf-8') as f: for l in f: text, label = l.strip().split('\t') for t in text_segmentate(text, maxlen - 2, seps, strips): D.append((t, int(label))) return D def collate_fn(batch): batch_token_ids, batch_segment_ids, batch_labels = [], [], [] for text, label in batch: token_ids, segment_ids = tokenizer.encode(text, maxlen=maxlen) batch_token_ids.append(token_ids) batch_segment_ids.append(segment_ids) batch_labels.append([label]) batch_token_ids = torch.tensor(sequence_padding(batch_token_ids), dtype=torch.long, device=device) batch_segment_ids = torch.tensor(sequence_padding(batch_segment_ids), dtype=torch.long, device=device) batch_labels = torch.tensor(batch_labels, dtype=torch.long, device=device) return [batch_token_ids, batch_segment_ids], batch_labels.flatten() # 加载数据集 train_dataloader = DataLoader(MyDataset(['E:/Github/bert4torch/examples/datasets/sentiment/sentiment.train.data']), batch_size=batch_size, shuffle=True, collate_fn=collate_fn) valid_dataloader = DataLoader(MyDataset(['E:/Github/bert4torch/examples/datasets/sentiment/sentiment.valid.data']), batch_size=batch_size, collate_fn=collate_fn) test_dataloader = DataLoader(MyDataset(['E:/Github/bert4torch/examples/datasets/sentiment/sentiment.test.data']), batch_size=batch_size, collate_fn=collate_fn) # 定义bert上的模型结构 class Model(BaseModel): def __init__(self) -> None: super().__init__() # 指定好model和对应的ckpt地址 self.bert, self.config = build_transformer_model(config_path=config_path, checkpoint_path=checkpoint_path, model='roformer_v2', return_model_config=True) self.dropout = nn.Dropout(0.1) self.dense = nn.Linear(self.config['hidden_size'], 2) def forward(self, token_ids, segment_ids): last_hidden_state = self.bert([token_ids, segment_ids]) output = self.dropout(last_hidden_state[:, 0, :]) output = self.dense(output) return output model = Model().to(device) # 定义使用的loss和optimizer,这里支持自定义 model.compile( loss=nn.CrossEntropyLoss(), optimizer=optim.Adam(model.parameters(), lr=2e-5), # 用足够小的学习率 metrics=['accuracy'] ) # 定义评价函数 def evaluate(data): total, right = 0., 0. for x_true, y_true in data: y_pred = model.predict(x_true).argmax(axis=1) total += len(y_true) right += (y_true == y_pred).sum().item() return right / total class Evaluator(Callback): """评估与保存 """ def __init__(self): self.best_val_acc = 0. def on_epoch_end(self, global_step, epoch, logs=None): val_acc = evaluate(valid_dataloader) if val_acc > self.best_val_acc: self.best_val_acc = val_acc # model.save_weights('best_model.pt') print(f'val_acc: {val_acc:.5f}, best_val_acc: {self.best_val_acc:.5f}\n') if __name__ == '__main__': evaluator = Evaluator() model.fit(train_dataloader, epochs=20, steps_per_epoch=500, callbacks=[evaluator]) else: model.load_weights('best_model.pt')
38.855856
176
0.703455
1,605
0.353758
0
0
522
0.115054
0
0
1,230
0.271104
bea3fce840a92d3dac26a2f605494f57192e6efe
1,217
py
Python
pyscf/nao/test/test_0037_aos.py
fdmalone/pyscf
021b17ac721e292b277d2b740e2ff8ab38bb6a4a
[ "Apache-2.0" ]
1
2019-07-01T12:39:45.000Z
2019-07-01T12:39:45.000Z
pyscf/nao/test/test_0037_aos.py
fdmalone/pyscf
021b17ac721e292b277d2b740e2ff8ab38bb6a4a
[ "Apache-2.0" ]
null
null
null
pyscf/nao/test/test_0037_aos.py
fdmalone/pyscf
021b17ac721e292b277d2b740e2ff8ab38bb6a4a
[ "Apache-2.0" ]
null
null
null
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function, division import os,unittest,numpy as np class KnowValues(unittest.TestCase): def test_aos_libnao(self): """ Computing of the atomic orbitals """ from pyscf.nao import system_vars_c from pyscf.tools.cubegen import Cube sv = system_vars_c().init_siesta_xml(label='water', cd=os.path.dirname(os.path.abspath(__file__))) cc = Cube(sv, nx=20, ny=20, nz=20) aos = sv.comp_aos_den(cc.get_coords()) self.assertEqual(aos.shape[0], cc.nx*cc.ny*cc.nz) self.assertEqual(aos.shape[1], sv.norbs) if __name__ == "__main__": unittest.main()
38.03125
102
0.739523
480
0.394412
0
0
0
0
0
0
655
0.538209