blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8ad6704531f11096b6c1d2f7cda086784124487a | 789a9373f6198e158822706d6c243ed45e487f87 | /test/backward_compatibility/check_backward_compatibility.py | 6ba9ff66de293a14c2278e0df011e83144cde989 | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | JakobHavtorn/pytorch | 98bc88e46a6acc7c77f9ec34810cd93b74e9aceb | 6de7a4a2818fbdd9fd763ad171bdb4c7514990fe | refs/heads/master | 2021-01-02T02:58:39.819315 | 2020-10-01T08:17:22 | 2020-10-01T08:17:22 | 239,460,735 | 0 | 0 | NOASSERTION | 2020-02-10T08:20:40 | 2020-02-10T08:20:39 | null | UTF-8 | Python | false | false | 8,892 | py | import argparse
import datetime
import re
import sys
from collections import defaultdict
import torch
from torch._C import parse_schema
# The date specifies how long the allowlist exclusion should apply to.
#
# - If we NEVER give BC guarantee for an operator, you can put the
# date arbitrarily far in the future.
# - Otherwise, pick a date that is far enough in the future that you
# believe you can land your diff before then.
#
# Allowlist entries can be removed after the date listed on them passes.
#
# Allowlist item format:
# [
# 0: function name regex
# 1: date until which the allowlist entry is valid
# 2: (optional) function argument regex
# ]
#
# NB: function name DOES NOT include overload name!
allow_list = [
("c10_experimental", datetime.date(2222, 1, 1)),
# Internal
("static", datetime.date(9999, 1, 1)),
# Internal, profiler-specific ops
("profiler::_call_end_callbacks_on_jit_fut*", datetime.date(9999, 1, 1)),
("profiler::_record_function_enter", datetime.date(9999, 1, 1)),
("tensorexpr::Group", datetime.date(2020, 9, 9)),
("aten::append*", datetime.date(2020, 4, 15)),
("aten::_min", datetime.date(2020, 9, 9)),
("aten::_max", datetime.date(2020, 9, 9)),
("aten::amax", datetime.date(2020, 10, 9)),
("aten::amin", datetime.date(2020, 10, 9)),
("aten::min_values", datetime.date(2020, 10, 9)),
("aten::max_values", datetime.date(2020, 10, 9)),
("aten::split_with_sizes", datetime.date(2020, 7, 29)),
("aten::eq", datetime.date(2020, 7, 30)),
("aten::log", datetime.date(2020, 7, 30)),
("aten::__and__", datetime.date(2020, 7, 30)),
("aten::__or__", datetime.date(2020, 7, 30)),
("aten::__xor__", datetime.date(2020, 7, 30)),
("aten::add", datetime.date(2020, 7, 30)),
("aten::__upsample_bilinear", datetime.date(2020, 7, 30)),
("aten::hash", datetime.date(2020, 7, 30)),
("aten::divmod", datetime.date(2020, 7, 30)),
("aten::sorted", datetime.date(2020, 8, 30)),
("aten::__contains__", datetime.date(2020, 7, 30)),
("aten::ne", datetime.date(2020, 7, 30)),
("aten::index", datetime.date(2020, 7, 30)),
("aten::isnan", datetime.date(2020, 7, 30)),
("aten::pow", datetime.date(2020, 7, 30)),
("aten::atan2", datetime.date(2020, 7, 30)),
("aten::copy_", datetime.date(2020, 7, 30)),
("aten::sort", datetime.date(2020, 7, 30)),
("aten::_convolution", datetime.date(2020, 10, 15)),
("aten::cudnn_convolution", datetime.date(2020, 10, 15)),
("aten::cudnn_convolution_transpose", datetime.date(2020, 10, 15)),
("aten::_convolution_double_backward", datetime.date(2020, 10, 15)),
("aten::cudnn_convolution_backward_input", datetime.date(2020, 10, 15)),
("aten::cudnn_convolution_backward", datetime.date(2020, 10, 15)),
("aten::cudnn_convolution_backward_weight", datetime.date(2020, 10, 15)),
("aten::cudnn_convolution_transpose_backward", datetime.date(2020, 10, 15)),
("aten::cudnn_convolution_transpose_backward_input", datetime.date(2020, 10, 15)),
("aten::cudnn_convolution_transpose_backward_weight", datetime.date(2020, 10, 15)),
("aten::_cudnn_init_dropout_state", datetime.date(2020, 7, 30)),
("aten::sparse_coo_tensor", datetime.date(2020, 7, 30)),
("aten::_sparse_coo_tensor_with_dims", datetime.date(2020, 7, 30)),
("aten::_sparse_coo_tensor_with_dims_and_tensors", datetime.date(2020, 7, 30)),
("aten::__lshift__", datetime.date(2020, 7, 30)),
("aten::__rshift__", datetime.date(2020, 7, 30)),
("aten::__round_to_zero_floordiv", datetime.date(2020, 7, 30)),
("aten::gcd", datetime.date(2020, 7, 30)),
("aten::unflatten", datetime.date(2020, 8, 14)),
("aten::linalg_outer", datetime.date(2020, 8, 30)),
# WARNING: overload name here doesn't do anything
("aten::linalg_outer.out", datetime.date(2020, 8, 30)),
("aten::linalg_norm", datetime.date(2020, 9, 30)),
("aten::linalg_norm.ord_str", datetime.date(2020, 9, 30)),
("aten::linalg_norm.out", datetime.date(2020, 9, 30)),
("aten::linalg_norm.ord_str_out", datetime.date(2020, 9, 30)),
("aten::_compute_linear_combination", datetime.date(2020, 9, 1)),
("aten::linspace", datetime.date(2020, 9, 30)),
("aten::linspace.out", datetime.date(2020, 9, 30)),
("aten::logspace", datetime.date(2020, 9, 30)),
("aten::logspace.out", datetime.date(2020, 9, 30)),
("__getstate__", datetime.date(2020, 9, 11), "Conv[23]dPackedParams"),
("_caffe2::LearningRate", datetime.date(2020, 10, 1)),
("aten::_var", datetime.date(2020, 10, 1)),
("aten::_std", datetime.date(2020, 10, 1)),
("aten::_foreach_add_", datetime.date(2020, 10, 1)),
("aten::stft", datetime.date(2020, 10, 1)),
("aten::istft", datetime.date(2020, 10, 1)),
("prim::MakeTestTensor", datetime.date(2020, 10, 1)),
("preprocess", datetime.date(2020, 10, 1)),
("compile", datetime.date(2020, 10, 1)),
("execute", datetime.date(2020, 10, 1)),
("aten::_addr", datetime.date(2020, 10, 31)),
("aten::_addr_", datetime.date(2020, 10, 31)),
("aten::_addr.out", datetime.date(2020, 10, 31)),
("aten::_foreach_add", datetime.date(2020, 10, 1)),
("aten::_foreach_sub_", datetime.date(2020, 10, 1)),
("aten::_foreach_div", datetime.date(2020, 10, 1)),
("aten::_foreach_sub", datetime.date(2020, 10, 1)),
("aten::choose_qparams_optimized", datetime.date(2020, 10, 5)),
]
def allow_listed(schema, allow_list):
for item in allow_list:
if item[1] < datetime.date.today():
continue
regexp = re.compile(item[0])
if regexp.search(schema.name):
if len(item) > 2:
# if arguments regex is present, use it
regexp_args = re.compile(item[2])
return bool(regexp_args.search(str(schema)))
return True
return False
# The nightly will fail to parse newly added syntax to schema declarations
# Add new schemas that will fail the nightly here
dont_parse_list = [
("_TorchScriptTesting.*", datetime.date(2099, 9, 17)),
("test_backend", datetime.date(2099, 9, 17)),
]
def dont_parse(schema_line):
for item in dont_parse_list:
if item[1] < datetime.date.today():
continue
regexp = re.compile(item[0])
if regexp.search(schema_line):
return True
return False
def check_bc(existing_schemas):
new_schemas = torch._C._jit_get_all_schemas()
new_schemas += torch._C._jit_get_custom_class_schemas()
new_schema_dict = defaultdict(list)
for s in new_schemas:
new_schema_dict[s.name].append(s)
is_bc = True
broken_ops = []
for existing_schema in existing_schemas:
if allow_listed(existing_schema, allow_list):
print("schema: ", str(existing_schema), " found on allowlist, skipping")
continue
print("processing existing schema: ", str(existing_schema))
matching_new_schemas = new_schema_dict.get(existing_schema.name, [])
found = False
for matching_new_schema in matching_new_schemas:
if matching_new_schema.is_backward_compatible_with(existing_schema):
found = True
break
if not found:
print(
"Can NOT find backward compatible schemas after changes "
"for schema {} from the following candidates:\n[\n{}\n]".format(
str(existing_schema),
"\n\t".join(str(s) for s in matching_new_schemas),
)
)
# TODO Print out more details about why candidates don't match.
broken_ops.append(str(existing_schema))
is_bc = False
if is_bc:
print("Found backward compatible schemas for all existing schemas")
else:
print(
"The PR is introducing backward incompatible changes to the "
"operator library. Please contact PyTorch team to confirm "
"whether this change is wanted or not. \n\nBroken ops: "
"[\n\t{}\n]".format("\n\t".join(broken_ops))
)
return is_bc
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Process some integers.")
parser.add_argument(
"--existing-schemas",
help="filename to load existing schemas",
type=str,
default="schemas.txt",
)
args = parser.parse_args()
existing_schema_dict = dict()
slist = []
with open(args.existing_schemas, "r") as f:
while True:
line = f.readline()
if not line:
break
if dont_parse(line.strip()):
print("Not parsing schema line: ", line.strip())
continue
s = parse_schema(line.strip())
slist.append(s)
if not check_bc(slist):
sys.exit(1)
| [
"[email protected]"
] | |
73766be87c861a771f5ae7e6784f2aa71dfca856 | f954729a6941d5309f02865b5313b8524e9e6f53 | /resources/genomes.py | d4d95ae8aa3d7389867fd4d1f1a5fe88474820de | [] | no_license | bnbowman/NoAmpTools | 5f597adec6f49ab8422f443dfdd234b7a9a1dd8d | b59800c675c764ba8b5aee734c3ed79e4ac8e9a5 | refs/heads/master | 2018-12-18T18:58:27.201236 | 2018-09-14T19:33:04 | 2018-09-14T19:34:23 | 107,060,977 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,330 | py | #! /usr/bin/env python
from collections import defaultdict
import matplotlib.cm as cm
class Genome(object):
def labels(self):
return self._labels
def sizes(self):
return self._sizes
def size(self, key):
return self._sizes[key]
def targets(self):
return self._targets
def targetDictionary(self):
pass
def colors(self, N):
colors = {}
for i, n in enumerate( sorted(self._sizes.keys()) ):
color = cm.gist_rainbow(i%(N+1) / float(N))
colors[i] = color
colors[n] = color
return colors
class HG19(Genome):
_labels = ["chr{0}".format(l) for l in range(1,23) + ['M', 'X', 'Y']]
_sizes = {"chr1": 249250621, "chr2": 243199373, "chr3": 198022430, "chr4": 191154276,
"chr5": 180915260, "chr6": 171115067, "chr7": 159138663, "chr8": 146364022,
"chr9": 141213431, "chr10": 135534747, "chr11": 135006516, "chr12": 133851895,
"chr13": 115169878, "chr14": 107349540, "chr15": 102531392, "chr16": 90354753,
"chr17": 81195210, "chr18": 78077248, "chr19": 59128983, "chr20": 63025520,
"chr21": 48129895, "chr22": 51304566,
"chrM": 16571, "chrX": 155270560, "chrY": 59373566}
## Locus,ChrName,ChrIdx,GeneStart,RegionStart,RegionEnd,GeneEnd
_targets = [["HTT", "chr4", 4, 3075691, 3076603, 3076661, 3076815],
["FMR1", "chrX", 23, 146993123, 146993568, 146993629, 146994131],
["ALS", "chr9", 9, 27572985, 27573522, 27573541, 27574014],
["FUCHS", "chr18", 18, 53251995, 53253386, 53253458, 53253577],
["SCA10", "chr22", 22, 46190744, 46191234, 46191305, 46191756],
["EWINGS_Chr20", "chr20", 20, 21553989, 21556922, 21557001, 21557036],
["EWINGS_ChrX", "chrX", 23, 30325813, 30328875, 30328976, 30329062]]
def __init__(self):
# Update the size dictionary so we can index by index as well as name
for i, c in enumerate(self._labels):
self._sizes[i] = self._sizes[c]
def targetDictionary(self):
tDict = defaultdict(list)
for t in self.targets():
if t[2] <= 22:
target_tId = t[2]-1
else:
target_tId = t[2]
tDict[target_tId].append( t )
return tDict
class GRC38(Genome):
_labels = ["chr{0}".format(l) for l in range(1,23) + ['X', 'Y', 'M']]
_sizes = {"chr1": 248956422, "chr2": 242193529, "chr3": 198295559, "chr4": 190214555,
"chr5": 181538259, "chr6": 170805979, "chr7": 159345973, "chr8": 145138636,
"chr9": 138394717, "chr10": 133797422, "chr11": 135086622, "chr12": 133275309,
"chr13": 114364328, "chr14": 107043718, "chr15": 101991189, "chr16": 90338345,
"chr17": 83257441, "chr18": 80373285, "chr19": 58617616 , "chr20": 64444167,
"chr21": 46709983, "chr22": 50818468,
"chrX": 156040895, "chrY": 57227415, "chrM": 16569}
## Locus,ChrName,ChrIdx,GeneStart,RegionStart,RegionEnd,GeneEnd
_targets = [["HTT", "chr4", 3, 3072621, 3074866, 3074949, 3075351],
["ALS", "chr9", 8, 27571412, 27573474, 27573556, 27574248],
["FUCHS", "chr18", 17, 55584764, 55586145, 55586237, 55586346],
["EWINGS_Chr20", "chr20", 19, 21573351, 21576271, 21576374, 21576399],
["SCA10", "chr22", 21, 45793649, 45795344, 45795434, 45796093],
["EWINGS_ChrX", "chrX", 22, 30307696, 30310741, 30310899, 30310946],
["FMR1", "chrX", 22, 147911603, 147912040, 147912120, 147914564]]
def __init__(self):
# Update the size dictionary so we can index by index as well as name
for i, c in enumerate(self._labels):
self._sizes[i] = self._sizes[c]
def targetDictionary(self):
tDict = defaultdict(list)
for t in self.targets():
tDict[t[2]].append( t )
return tDict
def decodeGenome(genome):
"""
"""
if genome.lower() == "hg19":
return HG19()
elif genome.lower() == "grc38":
return GRC38()
else:
raise ValueError("Invalid genome: specified genome must be HG19 or GHC38")
| [
"[email protected]"
] | |
7e5a0a4b2e4b83568dafc54541a561cc3404b48e | 04b71cef66a039196a2965dfab0ff56b0793fe32 | /python/run/brian_2_xgb.py | 231a8c822d29764c92ace11f0ede645217b21be4 | [] | no_license | gviejo/Prediction_xgb_head_direction | 69d57b7d7a2f366f2c96a6e6e933d0978592718f | 6fbe724f92c15afcc634c84383f57bbeedff7d24 | refs/heads/master | 2021-11-05T09:10:48.067439 | 2021-11-05T01:15:17 | 2021-11-05T01:15:17 | 93,687,438 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,386 | py | import scipy.io
import sys,os
import numpy as np
from matplotlib.pyplot import *
import pandas as pd
import xgboost as xgb
store = pd.HDFStore("../data/spikes_brian.h5")
data = store['data']
store.close()
adn_neuron = [n for n in data.keys() if 'ADn' in n]
pos_neuron = [n for n in data.keys() if 'Pos' in n]
bin_size = 25
bin_length = 500
def extract_tree_threshold(trees):
n = len(trees.get_dump())
thr = {}
for t in xrange(n):
gv = xgb.to_graphviz(trees, num_trees=t)
body = gv.body
for i in xrange(len(body)):
for l in body[i].split('"'):
if 'f' in l and '<' in l:
tmp = l.split("<")
if thr.has_key(tmp[0]):
thr[tmp[0]].append(float(tmp[1]))
else:
thr[tmp[0]] = [float(tmp[1])]
for k in thr.iterkeys():
thr[k] = np.sort(np.array(thr[k]))
return thr
###########################################################################################
# create shifted spiking activity from -500 to 500 ms with index 0 to 40 (20 for t = 0 ms) for all ADn_neuron
# remove 20 points at the beginning and 20 points at the end
###########################################################################################
nb_bins = bin_length/bin_size
duration = len(data)
time_shifted = np.zeros(( duration-nb_bins, len(adn_neuron), nb_bins+1))
for n,i in zip(adn_neuron, range(len(adn_neuron))):
tmp = data[n]
for j in range(0,nb_bins+1):
# time_shifted[:,i,j] = tmp[40-j:duration-j]
time_shifted[:,i,j] = tmp[j:duration-nb_bins+j]
combination = {}
for k in pos_neuron:
combination[k] = { 'features' : adn_neuron,
'targets' : k,
}
#####################################################################
# LEARNING XGB
#####################################################################
bsts = {i:{} for i in combination.iterkeys()} # to keep the boosted tree
params = {'objective': "count:poisson", #for poisson output
'eval_metric': "logloss", #loglikelihood loss
'seed': 2925, #for reproducibility
'silent': 1,
'learning_rate': 0.1,
'min_child_weight': 2, 'n_estimators': 580,
'subsample': 0.6, 'max_depth': 5, 'gamma': 0.4}
num_round = 90
time_shifted = time_shifted.reshape(time_shifted.shape[0],time_shifted.shape[1]*time_shifted.shape[2])
for k in combination.keys():
print(k)
features = combination[k]['features']
targets = combination[k]['targets']
Yall = data[targets].values
# need to cut Yall
Yall = Yall[nb_bins//2:-nb_bins//2]
print(time_shifted.shape)
print(Yall.shape)
dtrain = xgb.DMatrix(time_shifted, label=Yall)
bst = xgb.train(params, dtrain, num_round)
bsts[k] = bst
#####################################################################
# EXTRACT TREE STRUCTURE
#####################################################################
thresholds = {}
for i in bsts.iterkeys():
thresholds[i] = extract_tree_threshold(bsts[i])
#####################################################################
# EXTRACT GAIN VALUE
#####################################################################
gain = {}
for i in bsts.iterkeys():
gain[i] = bsts[i].get_score(importance_type = 'gain')
#####################################################################
# CONVERT TO TIMING OF SPLIT POSITION
#####################################################################
time_count = np.zeros((len(pos_neuron), len(adn_neuron), nb_bins+1))
index = np.repeat(np.arange(len(adn_neuron)), nb_bins+1)
for n in thresholds.iterkeys():
splits = thresholds[n]
for s in splits.keys():
time_count[int(n.split(".")[1]), index[int(s[1:])], int(s[1:])%(nb_bins+1)] = len(splits[s])
time_count = time_count.sum(1)
gain_value = np.zeros((len(pos_neuron), len(adn_neuron), nb_bins+1))
index = np.repeat(np.arange(len(adn_neuron)), nb_bins+1)
for n in gain.iterkeys():
g = gain[n]
for s in g.keys():
gain_value[int(n.split(".")[1]), index[int(s[1:])], int(s[1:])%(nb_bins+1)] = g[s]
# gain_value = gain_value.reshape(len(pos_neuron)*len(adn_neuron), 41)
gain_value = gain_value.sum(1)
time = np.arange(-(bin_length/2), (bin_length/2)+bin_size, bin_size)
xgb_peaks = pd.DataFrame(index = time, data = (time_count*gain_value).transpose())
#####################################################################
# PLOT
#####################################################################
plot(time, xgb_peaks.mean(1))
title("XGB")
show()
| [
"[email protected]"
] | |
d86242cd87dd8323bb6214deb9dacb2b9f552040 | 2b3817fb9e4078e912fe1df2e964a68dcd48d053 | /code/pgms/can-sum.py | 021e88ad56ffd0af0b5efa8652465910059432a6 | [
"MIT"
] | permissive | souradeepta/PythonPractice | 350a130b341efec7b22ebd061c3d89036603587f | fa956ca4b87a0eb92fee21fa78e59757ce665770 | refs/heads/master | 2023-08-08T03:46:01.238861 | 2021-09-23T02:37:13 | 2021-09-23T02:37:13 | 256,668,632 | 1 | 0 | MIT | 2021-09-22T18:54:17 | 2020-04-18T04:22:25 | Python | UTF-8 | Python | false | false | 1,810 | py | from typing import List
def canSumRepeat(target: int, input: List, memo: dict) -> bool:
"""Can the sum of input number lead to the target value
Args:
target (int): target
input (List): list of numbers
Returns:
bool: True or False
"""
if not input:
return None
if target == 0:
return True
if target < 0:
return False
if target in memo:
return memo[target]
for elem in input:
remainder = target - elem
if(canSumRepeat(remainder, input, memo)):
memo[target] = True
return True
memo[target] = False
return False
def canSumRepeatList(target: int, input: List, output: List, memo) -> List:
"""Can the sum of input number lead to the target value
Args:
target (int): target
input (List): list of numbers
output (List): list of numbers which sum to target
Returns:
List: List of elements
"""
if not input:
return None
if target == 0:
return output
if target < 0:
return None
if target in memo:
return memo[target]
for elem in input:
remainder = target - elem
output.append(elem)
if(canSumRepeatList(remainder, input, output, {})):
memo[target] = output
return output
memo[target] = None
return None
print(canSumRepeat(7, [2, 3], {})) # true
print(canSumRepeat(7, [5, 3, 4, 7], {})) # true
print(canSumRepeat(8, [2, 3, 5], {})) # false
print(canSumRepeat(300, [7, 14], {})) # true
print(canSumRepeatList(7, [2, 3], [], {})) # true
print(canSumRepeatList(7, [5, 3, 4, 7], [], {})) # true
print(canSumRepeatList(8, [2, 3, 5], [], {})) # false
print(canSumRepeatList(300, [7, 14], [], {})) # true
| [
"[email protected]"
] | |
3b7dbaa8d490c3da4756ccad3b0ccf36d205d1c9 | a34ec07c3464369a88e68c9006fa1115f5b61e5f | /C_LinkList/Swap/L2_24_Swap_Nodes_in_Pairs.py | 875e98e38bc4183aafaf3c32d8ccd3bd631735f4 | [] | no_license | 824zzy/Leetcode | 9220f2fb13e03d601d2b471b5cfa0c2364dbdf41 | 93b7f4448a366a709214c271a570c3399f5fc4d3 | refs/heads/master | 2023-06-27T02:53:51.812177 | 2023-06-16T16:25:39 | 2023-06-16T16:25:39 | 69,733,624 | 14 | 3 | null | 2022-05-25T06:48:38 | 2016-10-01T10:56:07 | Python | UTF-8 | Python | false | false | 526 | py | """ https://leetcode.com/problems/swap-nodes-in-pairs/
from dba: https://leetcode.com/problems/swap-nodes-in-pairs/discuss/984392/Python-O(n)-solution-explained
"""
from header import *
class Solution:
def swapPairs(self, head: Optional[ListNode]) -> Optional[ListNode]:
ans = pre = ListNode(next=head)
while pre.next and pre.next.next:
a = pre.next
b = a.next
# 132 -> 213
pre.next, b.next, a.next = b, a, b.next
pre = a
return ans.next | [
"[email protected]"
] | |
ed329fe829585a042a59d6c01959272c9d3ee31e | cdc48931cb3adb62c5e4963e43aeaf3cbc5080c4 | /Scripts/Read_omi_data.py | c977f0e3517d4bf8d1888816d3f96f6023bdb222 | [] | no_license | giovannilopez9808/SEDEMA_2000_2019 | ce8f1955b7d0f760485e2a984f36e72141867a0f | 857be19c0acd9587904107ecd470b94a6a7d93b3 | refs/heads/main | 2023-06-25T10:32:06.536548 | 2021-08-04T19:55:16 | 2021-08-04T19:55:16 | 382,178,614 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,969 | py | import matplotlib.pyplot as plt
import pandas as pd
def date_format(data):
data["Date"] = data["Datetime"].str[0:4]+"-" + \
data["Datetime"].str[4:6]+"-"+data["Datetime"].str[6:8]
data["Date"] = pd.to_datetime(data["Date"])
data.index = data["Date"]
data = data.drop(["Date", "Datetime"], 1)
return data
def clean_data(data, columns):
for column in data.columns:
if not column in columns:
data = data.drop(column, 1)
return data
def obtain_data_in_period(data, date_i, date_f):
data = data[data.index >= date_i]
data = data[data.index <= date_f]
return data
def drop_data_useless(data, columns, limit):
for column in columns:
data = data[data[column] < limit]
return data
inputs = {
"path data": "../Data/",
"file data": "Data_OMI_",
"product": "OMUVB",
"skiprows": 50,
"UVI limit": 18,
"UVIcolumns": ["CSUVindex", "UVindex"],
"file results": "UVI_",
"day initial": "2005-01-01",
"day final": "2019-12-31",
}
data = pd.read_fwf(inputs["path data"]+inputs["file data"]+inputs["product"]+".dat",
skiprows=inputs["skiprows"])
data = date_format(data)
data = clean_data(data,
inputs["UVIcolumns"])
data = obtain_data_in_period(data,
inputs["day initial"],
inputs["day final"])
data = drop_data_useless(data,
inputs["UVIcolumns"],
inputs["UVI limit"])
print(data.max())
for uvicolumn in inputs["UVIcolumns"]:
print("Creando archivo {}".format(uvicolumn))
data_UVI = data[uvicolumn]
print(data_UVI.count())
data_UVI.to_csv("{}{}{}.csv".format(inputs["path data"],
inputs["file results"],
uvicolumn),
float_format='%.4f')
| [
"[email protected]"
] | |
7d382bee63d4ab632c33c235b382848e1693243a | 09fd456a6552f42c124c148978289fae1af2d5c3 | /Graph/733.py | 5ae87bd51e724a76ced10a0220dd9ab5401483da | [] | no_license | hoang-ng/LeetCode | 60b4e68cbcf54cbe763d1f98a70f52e628ab32fb | 5407c6d858bfa43325363503c31134e560522be3 | refs/heads/master | 2021-04-10T11:34:35.310374 | 2020-07-28T10:22:05 | 2020-07-28T10:22:05 | 248,932,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,084 | py | # 733. Flood Fill
# An image is represented by a 2-D array of integers, each integer representing the pixel value of the image (from 0 to 65535).
# Given a coordinate (sr, sc) representing the starting pixel (row and column) of the flood fill, and a pixel value newColor, "flood fill" the image.
# To perform a "flood fill", consider the starting pixel, plus any pixels connected 4-directionally to the starting pixel of the same color as the starting pixel, plus any pixels connected 4-directionally to those pixels (also with the same color as the starting pixel), and so on. Replace the color of all of the aforementioned pixels with the newColor.
# At the end, return the modified image.
# Example 1:
# Input:
# image = [[1,1,1],[1,1,0],[1,0,1]]
# sr = 1, sc = 1, newColor = 2
# Output: [[2,2,2],[2,2,0],[2,0,1]]
# Explanation:
# From the center of the image (with position (sr, sc) = (1, 1)), all pixels connected
# by a path of the same color as the starting pixel are colored with the new color.
# Note the bottom corner is not colored 2, because it is not 4-directionally connected
# to the starting pixel.
# Note:
# The length of image and image[0] will be in the range [1, 50].
# The given starting pixel will satisfy 0 <= sr < image.length and 0 <= sc < image[0].length.
# The value of each color in image[i][j] and newColor will be an integer in [0, 65535].
class Solution(object):
def floodFill(self, image, sr, sc, newColor):
oldColor = image[sr][sc]
if oldColor != newColor:
self.dfs(image, sr, sc, oldColor, newColor)
return image
def dfs(self, image, i, j, oldColor, newColor):
if i < 0 or j < 0 or i >= len(image) or j >= len(image[i]) or image[i][j] != oldColor:
return
image[i][j] = newColor
self.dfs(image, i + 1, j, oldColor, newColor)
self.dfs(image, i - 1, j, oldColor, newColor)
self.dfs(image, i, j + 1, oldColor, newColor)
self.dfs(image, i, j - 1, oldColor, newColor)
sol = Solution()
sol.floodFill([[0,0,0],[0,1,1]], 1, 1, 1) | [
"[email protected]"
] | |
36c8aa5f1334f81fcdcf3cffaf6fe2f5836d7abe | 951fc0da7384b961726999e5451a10e2783462c4 | /script.module.exodusscrapers/lib/exodusscrapers/sources_placenta/en_placenta-1.7.8/ororo.py | da4be034f81d870663f7cf7dbc21eacd06fa0f60 | [
"Beerware"
] | permissive | vphuc81/MyRepository | eaf7b8531b2362f0e0de997a67b889bc114cd7c2 | 9bf8aca6de07fcd91bcec573f438f29e520eb87a | refs/heads/master | 2022-01-02T15:07:35.821826 | 2021-12-24T05:57:58 | 2021-12-24T05:57:58 | 37,680,232 | 6 | 10 | null | null | null | null | UTF-8 | Python | false | false | 4,444 | py | # -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @Daddy_Blamo wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
# Addon Name: Exodus
# Addon id: plugin.video.exodus
# Addon Provider: Exodus
import re,urlparse,json,base64
from resources.lib.modules import cache
from resources.lib.modules import control
from resources.lib.modules import client
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['ororo.tv']
self.base_link = 'https://ororo.tv'
self.moviesearch_link = '/api/v2/movies'
self.tvsearch_link = '/api/v2/shows'
self.movie_link = '/api/v2/movies/%s'
self.show_link = '/api/v2/shows/%s'
self.episode_link = '/api/v2/episodes/%s'
self.user = control.setting('ororo.user')
self.password = control.setting('ororo.pass')
self.headers = {
'Authorization': 'Basic %s' % base64.b64encode('%s:%s' % (self.user, self.password)),
'User-Agent': 'Exodus for Kodi'
}
def movie(self, imdb, title, localtitle, aliases, year):
try:
if (self.user == '' or self.password == ''): raise Exception()
url = cache.get(self.ororo_moviecache, 60, self.user)
url = [i[0] for i in url if imdb == i[1]][0]
url= self.movie_link % url
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
if (self.user == '' or self.password == ''): raise Exception()
url = cache.get(self.ororo_tvcache, 120, self.user)
url = [i[0] for i in url if imdb == i[1]][0]
url= self.show_link % url
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if (self.user == '' or self.password == ''): raise Exception()
if url == None: return
url = urlparse.urljoin(self.base_link, url)
r = client.request(url, headers=self.headers)
r = json.loads(r)['episodes']
r = [(str(i['id']), str(i['season']), str(i['number']), str(i['airdate'])) for i in r]
url = [i for i in r if season == '%01d' % int(i[1]) and episode == '%01d' % int(i[2])]
url += [i for i in r if premiered == i[3]]
url= self.episode_link % url[0][0]
return url
except:
return
def ororo_moviecache(self, user):
try:
url = urlparse.urljoin(self.base_link, self.moviesearch_link)
r = client.request(url, headers=self.headers)
r = json.loads(r)['movies']
r = [(str(i['id']), str(i['imdb_id'])) for i in r]
r = [(i[0], 'tt' + re.sub('[^0-9]', '', i[1])) for i in r]
return r
except:
return
def ororo_tvcache(self, user):
try:
url = urlparse.urljoin(self.base_link, self.tvsearch_link)
r = client.request(url, headers=self.headers)
r = json.loads(r)['shows']
r = [(str(i['id']), str(i['imdb_id'])) for i in r]
r = [(i[0], 'tt' + re.sub('[^0-9]', '', i[1])) for i in r]
return r
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if (self.user == '' or self.password == ''): raise Exception()
url = urlparse.urljoin(self.base_link, url)
url = client.request(url, headers=self.headers)
url = json.loads(url)['url']
sources.append({'source': 'ororo', 'quality': 'HD', 'language': 'en', 'url': url, 'direct': True, 'debridonly': False})
return sources
except:
return sources
def resolve(self, url):
return url
| [
"[email protected]"
] | |
0e4ea3b20b875aa0d0f525b5688b469b1ae1cd07 | 242f1dafae18d3c597b51067e2a8622c600d6df2 | /src/1300-1399/1389.create.target.array.in.given.order.py | fbf27d7eff98adccaa487419a2b2644f45f97e17 | [] | no_license | gyang274/leetcode | a873adaa083270eb05ddcdd3db225025533e0dfe | 6043134736452a6f4704b62857d0aed2e9571164 | refs/heads/master | 2021-08-07T15:15:01.885679 | 2020-12-22T20:57:19 | 2020-12-22T20:57:19 | 233,179,192 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | from typing import List
class Solution:
def createTargetArray(self, nums: List[int], index: List[int]) -> List[int]:
ans = []
for i, x in zip(index, nums):
ans.insert(i, x)
return ans
if __name__ == '__main__':
solver = Solution()
cases = [
([1], [0]),
([0,1,2,3,4], [0,1,2,2,1]),
([1,2,3,4,0], [0,1,2,3,0]),
]
rslts = [solver.createTargetArray(nums, index) for nums, index in cases]
for cs, rs in zip(cases, rslts):
print(f"case: {cs} | solution: {rs}")
| [
"[email protected]"
] | |
aa680423f2958a393e449a990b4747fc8f1b6e0a | 7b5828edda7751700ca7002b40a214e39e5f48a8 | /EA/simulation/gsi_handlers/business_handlers.py | 37908330768a0f86bd61914ae6d9ee428b3c28f5 | [] | no_license | daniela-venuta/Sims-4-Python-Script-Workspace | 54c33dac02f84daed66f46b7307f222fede0fa62 | f408b28fb34626b2e3b2953152343d591a328d66 | refs/heads/main | 2023-03-29T18:08:39.202803 | 2021-03-30T19:00:42 | 2021-03-30T19:00:42 | 353,111,243 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,223 | py | from business.business_enums import BusinessType, BusinessEmployeeType
from gsi_handlers.gameplay_archiver import GameplayArchiver
from sims4.gsi.dispatcher import GsiHandler
from sims4.gsi.schema import GsiGridSchema, GsiFieldVisualizers
import services
business_managers_schema = GsiGridSchema(label='Business Managers')
business_managers_schema.add_field('household_id', label='Household Id', width=1, unique_field=True)
business_managers_schema.add_field('household_name', label='Household Name')
business_managers_schema.add_field('business_type', label='BusinessType', width=1.5)
business_managers_schema.add_field('zone_id', label='ZoneID')
business_managers_schema.add_field('is_open', label='Open', width=0.5)
business_managers_schema.add_field('time_since_open', label='Time Since Open', width=0.5)
business_managers_schema.add_field('star_rating_value', label='Star Value', type=GsiFieldVisualizers.FLOAT, width=0.5)
business_managers_schema.add_field('star_rating', label='Star', type=GsiFieldVisualizers.INT, width=0.5)
business_managers_schema.add_field('funds', label='Funds', type=GsiFieldVisualizers.FLOAT, width=0.5)
business_managers_schema.add_field('daily_revenue', label='Daily Revenue', type=GsiFieldVisualizers.FLOAT, width=0.5)
with business_managers_schema.add_has_many('other_data', GsiGridSchema) as sub_schema:
sub_schema.add_field('key', label='Data Name', width=0.5)
sub_schema.add_field('value', label='Data Value')
with business_managers_schema.add_has_many('customer_data', GsiGridSchema) as sub_schema:
sub_schema.add_field('sim_id', label='SimID', width=0.5)
sub_schema.add_field('sim_name', label='SimName', width=0.5)
sub_schema.add_field('star_rating_value', label='StarValue', type=GsiFieldVisualizers.FLOAT, width=0.5)
sub_schema.add_field('star_rating', label='Stars', type=GsiFieldVisualizers.INT, width=0.5)
sub_schema.add_field('buff_bucket_totals', label='Buff Bucket', width=2)
with business_managers_schema.add_has_many('employee_data', GsiGridSchema) as sub_schema:
sub_schema.add_field('employee_sim_id', label='SimID', width=0.6)
sub_schema.add_field('employee_sim_name', label='SimName', width=0.5)
sub_schema.add_field('employee_type', label='EmployeeType', width=1)
sub_schema.add_field('career_level_buff', label='CareerBuff', width=0.5)
sub_schema.add_field('daily_employee_wages', label='DailyWages', type=GsiFieldVisualizers.INT, width=0.5)
sub_schema.add_field('clocked_in_time', label='ClockInTime', width=0.5)
sub_schema.add_field('payroll_data', label='Payroll_data')
@GsiHandler('business_managers', business_managers_schema)
def generate_business_service_data(zone_id:int=None):
business_service = services.business_service()
business_manager_data = []
sim_info_manager = services.sim_info_manager()
def _construct_business_manager_gsi_data(zone_id, business_manager, business_tracker=None):
household = business_tracker._get_owner_household() if business_tracker is not None else None
business_manager_entry = {'household_id': str(household.id) if household is not None else 'N/A', 'household_name': household.name if household is not None and household.name else '<Unnamed Household>', 'business_type': str(BusinessType(business_manager.business_type)), 'zone_id': str(hex(zone_id)), 'is_open': 'x' if business_manager.is_open else '', 'time_since_open': str(business_manager.minutes_open), 'star_rating_value': business_manager._star_rating_value, 'star_rating': business_manager.get_star_rating(), 'funds': str(business_manager.funds.money), 'daily_revenue': business_manager._daily_revenue}
other_data = []
other_data.append({'key': 'daily_items_sold', 'value': str(business_manager._daily_items_sold)})
other_data.append({'key': 'markup_multiplier', 'value': str(business_manager._markup_multiplier)})
other_data.append({'key': 'advertising_type', 'value': business_manager.get_advertising_type_for_gsi()})
other_data.append({'key': 'quality_setting', 'value': business_manager.quality_setting.name})
other_data.append({'key': 'session_customers_served', 'value': str(business_manager._customer_manager.session_customers_served)})
other_data.append({'key': 'lifetime_customers_served', 'value': str(business_manager._customer_manager.lifetime_customers_served)})
other_data.append({'key': 'funds_category_tracker', 'value': str(business_manager._funds_category_tracker)})
other_data.append({'key': 'buff_bucket_totals', 'value': str(business_manager._buff_bucket_totals)})
other_data.append({'key': 'open_time', 'value': str(business_manager._open_time)})
if business_tracker is not None:
other_data.append({'key': 'additional_employee_slots (tracker data)', 'value': str(business_tracker._additional_employee_slots)})
other_data.append({'key': 'additional_markup_multiplier(tracker data)', 'value': business_tracker.additional_markup_multiplier})
other_data.append({'key': 'additional_customer_count(tracker data)', 'value': business_tracker.addtitional_customer_count})
business_manager_entry['other_data'] = other_data
employee_gsi_data = []
employee_manager = business_manager._employee_manager
for (sim_id, employee_data) in employee_manager._employees.items():
(clock_in_time, payroll_data) = employee_manager._employee_payroll.get(sim_id, (None, None))
sim_info = sim_info_manager.get(sim_id)
entry = {'employee_sim_id': str(sim_id), 'employee_sim_name': str(sim_info), 'employee_type': str(BusinessEmployeeType(employee_data.employee_type)), 'daily_employee_wages': employee_manager._daily_employee_wages, 'clocked_in_time': str(clock_in_time), 'payroll_data': str(payroll_data)}
buff_type = sim_info.get_buff_type(employee_data._career_level_buff_handle)
entry['career_level_buff'] = str(buff_type.__name__) if buff_type is not None else ''
employee_gsi_data.append(entry)
business_manager_entry['employee_data'] = employee_gsi_data
customer_data = []
for (sim_id, business_customer_data) in business_manager._customer_manager._customers.items():
entry = {'sim_id': str(sim_id), 'sim_name': str(sim_info_manager.get(sim_id)), 'star_rating_value': business_customer_data.get_star_rating_stat_value(), 'star_rating': business_customer_data.get_star_rating(), 'buff_bucket_totals': str(business_customer_data.buff_bucket_totals)}
customer_data.append(entry)
business_manager_entry['customer_data'] = customer_data
return business_manager_entry
zone_business_manager = services.business_service().get_business_manager_for_zone(zone_id)
if zone_business_manager is not None and zone_business_manager.is_owned_by_npc:
business_manager_data.append(_construct_business_manager_gsi_data(zone_id, zone_business_manager))
for (_, business_trackers) in business_service._business_trackers.items():
for business_tracker in business_trackers:
for (zone_id, business_manager) in business_tracker.business_managers.items():
business_manager_data.append(_construct_business_manager_gsi_data(zone_id, business_manager, business_tracker=business_tracker))
return business_manager_data
business_archiver_schema = GsiGridSchema(label='Business Archive')
business_archiver_schema.add_field('event_from', label='EventFrom', width=0.5)
business_archiver_schema.add_field('sim_id', label='SimID', width=1)
business_archiver_schema.add_field('sim_name', label='SimName', width=1)
business_archiver_schema.add_field('event_description', label='Reason', width=2)
business_archiver = GameplayArchiver('business_archiver', business_archiver_schema)
def archive_business_event(event_from, sim, event_description, sim_id=None):
entry = {'event_from': event_from, 'sim_id': str(sim.id) if sim is not None else str(sim_id), 'sim_name': sim.full_name if sim is not None else '', 'event_description': event_description}
business_archiver.archive(data=entry)
| [
"[email protected]"
] | |
668a65b4b0b55b74b317267172130bd341836dc6 | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_6_mask_unet/mask_5_3a_sobel_k5/sobel_k5_s001_6l/step10_a_mask_5_3_sobel_k5_s001_6l.py | 11a26e1fd09e420e7e64ad4554eaaf5af90295e7 | [] | no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,254 | py | #############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
kong_to_py_layer = len(code_exe_path_element) - 1 - kong_layer ### 中間 -1 是為了長度轉index
# print(" kong_to_py_layer:", kong_to_py_layer)
if (kong_to_py_layer == 0): template_dir = ""
elif(kong_to_py_layer == 2): template_dir = code_exe_path_element[kong_layer + 1][0:] ### [7:] 是為了去掉 step1x_, 後來覺得好像改有意義的名字不去掉也行所以 改 0
elif(kong_to_py_layer == 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] ### [5:] 是為了去掉 mask_ ,前面的 mask_ 是為了python 的 module 不能 數字開頭, 隨便加的這樣子, 後來覺得 自動排的順序也可以接受, 所以 改0
elif(kong_to_py_layer > 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] + "/" + "/".join(code_exe_path_element[kong_layer + 3: -1])
# print(" template_dir:", template_dir) ### 舉例: template_dir: 7_mask_unet/5_os_book_and_paper_have_dtd_hdr_mix_bg_tv_s04_mae
#############################################################################################################################################################################################################
exp_dir = template_dir
#############################################################################################################################################################################################################
from step06_a_datas_obj import *
from step09_e2_mask_unet2_obj import *
from step10_a2_loss_info_obj import *
from step10_b2_exp_builder import Exp_builder
rm_paths = [path for path in sys.path if code_dir in path]
for rm_path in rm_paths: sys.path.remove(rm_path)
rm_moduless = [module for module in sys.modules if "step09" in module]
for rm_module in rm_moduless: del sys.modules[rm_module]
#############################################################################################################################################################################################################
'''
exp_dir 是 決定 result_dir 的 "上一層"資料夾 名字喔! exp_dir要巢狀也沒問題~
比如:exp_dir = "6_mask_unet/自己命的名字",那 result_dir 就都在:
6_mask_unet/自己命的名字/result_a
6_mask_unet/自己命的名字/result_b
6_mask_unet/自己命的名字/...
'''
use_db_obj = type9_mask_flow_have_bg_dtd_hdr_mix_and_paper
############################ have_bg #################################
### 1a. ch
mask_h_bg_ch128_sig_L6_ep060 = Exp_builder().set_basic("train", use_db_obj, mask_unet_ch128_sig_L6, G_sobel_k5_loss_info_builder, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_mid="6_1_1", describe_end="mask_h_bg_ch128_sig_sobel_k5_6l_ep060") .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="type8_blender_os_book-6_1_1-flow_unet-mask_h_bg_ch128_sig_sobel_k5_6l_ep060-20211016_115331") #.change_result_name_v1_to_v2()
mask_h_bg_ch064_sig_L6_ep060 = Exp_builder().set_basic("train", use_db_obj, mask_unet_ch064_sig_L6, G_sobel_k5_loss_info_builder, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_mid="6_1_2", describe_end="mask_h_bg_ch064_sig_sobel_k5_6l_ep060") .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="type8_blender_os_book-6_1_2-flow_unet-mask_h_bg_ch064_sig_sobel_k5_6l_ep060-20211016_155014") #.change_result_name_v1_to_v2()
mask_h_bg_ch032_sig_L6_ep060 = Exp_builder().set_basic("train", use_db_obj, mask_unet_ch032_sig_L6, G_sobel_k5_loss_info_builder, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_mid="6_1_3", describe_end="mask_h_bg_ch032_sig_sobel_k5_6l_ep060") .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="type8_blender_os_book-6_1_3-flow_unet-mask_h_bg_ch032_sig_sobel_k5_6l_ep060-20211016_151755") #.change_result_name_v1_to_v2()
mask_h_bg_ch016_sig_L6_ep060 = Exp_builder().set_basic("train", use_db_obj, mask_unet_ch016_sig_L6, G_sobel_k5_loss_info_builder, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_mid="6_1_4", describe_end="mask_h_bg_ch016_sig_sobel_k5_6l_ep060") .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="type8_blender_os_book-6_1_4-flow_unet-mask_h_bg_ch016_sig_sobel_k5_6l_ep060-20211016_144809") #.change_result_name_v1_to_v2()
mask_h_bg_ch008_sig_L6_ep060 = Exp_builder().set_basic("train", use_db_obj, mask_unet_ch008_sig_L6, G_sobel_k5_loss_info_builder, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_mid="6_1_5", describe_end="mask_h_bg_ch008_sig_sobel_k5_6l_ep060") .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="type8_blender_os_book-6_1_5-flow_unet-mask_h_bg_ch008_sig_sobel_k5_6l_ep060-20211016_141912") #.change_result_name_v1_to_v2()
mask_h_bg_ch004_sig_L6_ep060 = Exp_builder().set_basic("train", use_db_obj, mask_unet_ch004_sig_L6, G_sobel_k5_loss_info_builder, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_mid="6_1_6", describe_end="mask_h_bg_ch004_sig_sobel_k5_6l_ep060") .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="type8_blender_os_book-6_1_6-flow_unet-mask_h_bg_ch004_sig_sobel_k5_6l_ep060-20211016_135029") #.change_result_name_v1_to_v2()
mask_h_bg_ch002_sig_L6_ep060 = Exp_builder().set_basic("train", use_db_obj, mask_unet_ch002_sig_L6, G_sobel_k5_loss_info_builder, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_mid="6_1_7", describe_end="mask_h_bg_ch002_sig_sobel_k5_6l_ep060") .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="type8_blender_os_book-6_1_7-flow_unet-mask_h_bg_ch002_sig_sobel_k5_6l_ep060-20211016_132143") #.change_result_name_v1_to_v2()
mask_h_bg_ch001_sig_L6_ep060 = Exp_builder().set_basic("train", use_db_obj, mask_unet_ch001_sig_L6, G_sobel_k5_loss_info_builder, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_mid="6_1_8", describe_end="mask_h_bg_ch001_sig_sobel_k5_6l_ep060") .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="type8_blender_os_book-6_1_8-flow_unet-mask_h_bg_ch001_sig_sobel_k5_6l_ep060-20211018_113827") #.change_result_name_v1_to_v2()
### 1b. ch and epoch_6l
mask_h_bg_ch128_sig_L6_ep200 = Exp_builder().set_basic("train", use_db_obj, mask_unet_ch128_sig_L6, G_sobel_k5_loss_info_builder, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_mid="6_1b_1", describe_end="mask_h_bg_ch128_sig_sobel_k5_6l_ep200") .set_train_args(epochs=200).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="type8_blender_os_book-6_1b_1-flow_unet-mask_h_bg_ch128_sig_sobel_k5_6l_ep200-20211017_052816") #.change_result_name_v1_to_v2()
mask_h_bg_ch064_sig_L6_ep200 = Exp_builder().set_basic("train", use_db_obj, mask_unet_ch064_sig_L6, G_sobel_k5_loss_info_builder, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_mid="6_1b_2", describe_end="mask_h_bg_ch064_sig_sobel_k5_6l_ep200") .set_train_args(epochs=200).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="type8_blender_os_book-6_1b_2-flow_unet-mask_h_bg_ch064_sig_sobel_k5_6l_ep200-20211017_025215") #.change_result_name_v1_to_v2()
mask_h_bg_ch032_sig_L6_ep200 = Exp_builder().set_basic("train", use_db_obj, mask_unet_ch032_sig_L6, G_sobel_k5_loss_info_builder, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_mid="6_1b_3", describe_end="mask_h_bg_ch032_sig_sobel_k5_6l_ep200") .set_train_args(epochs=200).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="type8_blender_os_book-6_1b_3-flow_unet-mask_h_bg_ch032_sig_sobel_k5_6l_ep200-20211017_010424") #.change_result_name_v1_to_v2()
mask_h_bg_ch016_sig_L6_ep200 = Exp_builder().set_basic("train", use_db_obj, mask_unet_ch016_sig_L6, G_sobel_k5_loss_info_builder, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_mid="6_1b_4", describe_end="mask_h_bg_ch016_sig_sobel_k5_6l_ep200") .set_train_args(epochs=200).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="type8_blender_os_book-6_1b_4-flow_unet-mask_h_bg_ch016_sig_sobel_k5_6l_ep200-20211016_232511") #.change_result_name_v1_to_v2()
mask_h_bg_ch008_sig_L6_ep200 = Exp_builder().set_basic("train", use_db_obj, mask_unet_ch008_sig_L6, G_sobel_k5_loss_info_builder, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_mid="6_1b_5", describe_end="mask_h_bg_ch008_sig_sobel_k5_6l_ep200") .set_train_args(epochs=200).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="type8_blender_os_book-6_1b_5-flow_unet-mask_h_bg_ch008_sig_sobel_k5_6l_ep200-20211016_214914") #.change_result_name_v1_to_v2()
mask_h_bg_ch004_sig_L6_ep200 = Exp_builder().set_basic("train", use_db_obj, mask_unet_ch004_sig_L6, G_sobel_k5_loss_info_builder, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_mid="6_1b_6", describe_end="mask_h_bg_ch004_sig_sobel_k5_6l_ep200") .set_train_args(epochs=200).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="type8_blender_os_book-6_1b_6-flow_unet-mask_h_bg_ch004_sig_sobel_k5_6l_ep200-20211016_205639") #.change_result_name_v1_to_v2()
mask_h_bg_ch002_sig_L6_ep200 = Exp_builder().set_basic("train", use_db_obj, mask_unet_ch002_sig_L6, G_sobel_k5_loss_info_builder, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_mid="6_1b_7", describe_end="mask_h_bg_ch002_sig_sobel_k5_6l_ep200") .set_train_args(epochs=200).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="type8_blender_os_book-6_1b_7-flow_unet-mask_h_bg_ch002_sig_sobel_k5_6l_ep200-20211016_192121") #.change_result_name_v1_to_v2()
mask_h_bg_ch001_sig_L6_ep200 = Exp_builder().set_basic("train", use_db_obj, mask_unet_ch001_sig_L6, G_sobel_k5_loss_info_builder, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_mid="6_1b_8", describe_end="mask_h_bg_ch001_sig_sobel_k5_6l_ep200") .set_train_args(epochs=200).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="type8_blender_os_book-6_1b_8-flow_unet-mask_h_bg_ch001_sig_sobel_k5_6l_ep200-20211016_174548") #.change_result_name_v1_to_v2()
### 3. no-concat
mask_h_bg_ch032_L6_2to2noC_sig_ep060 = Exp_builder().set_basic("train", use_db_obj, mask_unet_IN_L6_ch32_2to2noC_sig, G_sobel_k5_loss_info_builder, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_mid="6_3_1", describe_end="mask_h_bg_ch032_6l_2to2noC_sig_sobel_k5_ep060") .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="type8_blender_os_book-6_3_1-flow_unet-mask_h_bg_ch032_6l_2to2noC_sig_sobel_k5_ep060-20211017_102259") #.change_result_name_v1_to_v2()
mask_h_bg_ch032_L6_2to3noC_sig_ep060 = Exp_builder().set_basic("train", use_db_obj, mask_unet_IN_L6_ch32_2to3noC_sig, G_sobel_k5_loss_info_builder, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_mid="6_3_2", describe_end="mask_h_bg_ch032_6l_2to3noC_sig_sobel_k5_ep060") .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="type8_blender_os_book-6_3_2-flow_unet-mask_h_bg_ch032_6l_2to3noC_sig_sobel_k5_ep060-20211017_105549") #.change_result_name_v1_to_v2()
mask_h_bg_ch032_L6_2to4noC_sig_ep060 = Exp_builder().set_basic("train", use_db_obj, mask_unet_IN_L6_ch32_2to4noC_sig, G_sobel_k5_loss_info_builder, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_mid="6_3_3", describe_end="mask_h_bg_ch032_6l_2to4noC_sig_sobel_k5_ep060") .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="type8_blender_os_book-6_3_3-flow_unet-mask_h_bg_ch032_6l_2to4noC_sig_sobel_k5_ep060-20211017_112834") #.change_result_name_v1_to_v2()
mask_h_bg_ch032_L6_2to5noC_sig_ep060 = Exp_builder().set_basic("train", use_db_obj, mask_unet_IN_L6_ch32_2to5noC_sig, G_sobel_k5_loss_info_builder, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_mid="6_3_4", describe_end="mask_h_bg_ch032_6l_2to5noC_sig_sobel_k5_ep060") .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="type8_blender_os_book-6_3_4-flow_unet-mask_h_bg_ch032_6l_2to5noC_sig_sobel_k5_ep060-20211017_120111") #.change_result_name_v1_to_v2()
mask_h_bg_ch032_L6_2to6noC_sig_ep060 = Exp_builder().set_basic("train", use_db_obj, mask_unet_IN_L6_ch32_2to6noC_sig, G_sobel_k5_loss_info_builder, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_mid="6_3_5", describe_end="mask_h_bg_ch032_6l_2to6noC_sig_sobel_k5_ep060") .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="type8_blender_os_book-6_3_5-flow_unet-mask_h_bg_ch032_6l_2to6noC_sig_sobel_k5_ep060-20211017_123334") #.change_result_name_v1_to_v2()
### 4. skip use add
mask_h_bg_ch032_L6_skipAdd_sig_ep060 = Exp_builder().set_basic("train", use_db_obj, mask_unet_L6_skip_use_add_sig, G_sobel_k5_loss_info_builder, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_mid="6_4_5", describe_end="mask_h_bg_ch032_6l_skipAdd_sig_sobel_k5_ep060") .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="type8_blender_os_book-6_4_5-flow_unet-mask_h_bg_ch032_6l_skipAdd_sig_sobel_k5_ep060-20211017_130546") #.change_result_name_v1_to_v2()
if(__name__ == "__main__"):
print("build exps cost time:", time.time() - start_time)
if len(sys.argv) < 2:
############################################################################################################
### 直接按 F5 或打 python step10_b1_exp_obj_load_and_train_and_test.py,後面沒有接東西喔!才不會跑到下面給 step10_b_subprocss.py 用的程式碼~~~
mask_h_bg_ch128_sig_L6_ep060.build().run()
# print('no argument')
sys.exit()
### 以下是給 step10_b_subprocess.py 用的,相當於cmd打 python step10_b1_exp_obj_load_and_train_and_test.py 某個exp.build().run()
eval(sys.argv[1])
| [
"[email protected]"
] | |
7c7a13988c1414c47d5397c1e6f97e7be4e23afa | 90b95ac525ee731ec5ba7d5da5c9038396ac4c3d | /zoom_data/migrations/0032_auto_20180129_1556.py | 852054a2c4f744955572c6156d903d4e86f2e8a1 | [] | no_license | 5klynna5/zoom_c | 33364146915611917ae0e6e0fd49233370424929 | 59c39eece1dd0ad5e7e210f4f03d8bb64df44b98 | refs/heads/master | 2021-05-12T04:06:39.031130 | 2018-08-04T23:52:19 | 2018-08-04T23:52:19 | 117,153,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,542 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('zoom_data', '0031_auto_20180129_1151'),
]
operations = [
migrations.AlterField(
model_name='annual',
name='annual_income',
field=models.SmallIntegerField(help_text='in U.S. dollars', blank=True),
),
migrations.AlterField(
model_name='annual',
name='employment_status',
field=models.CharField(choices=[('Yes', 'YES'), ('No', 'NO')], null=True, max_length=3, blank=True),
),
migrations.AlterField(
model_name='annual',
name='student_status',
field=models.CharField(choices=[('Yes', 'YES'), ('No', 'NO')], null=True, max_length=3, blank=True),
),
migrations.AlterField(
model_name='contact',
name='contact_pref',
field=models.CharField(choices=[('Email', 'EMAIL'), ('Call', 'CALL'), ('Text', 'TEXT'), ('Mail', 'MAIL'), ('Facebook', 'FACEBOOK')], null=True, max_length=8, blank=True),
),
migrations.AlterField(
model_name='contact',
name='permission_call',
field=models.CharField(choices=[('Yes', 'YES'), ('No', 'NO')], null=True, max_length=3, blank=True),
),
migrations.AlterField(
model_name='contact',
name='permission_email',
field=models.CharField(choices=[('Yes', 'YES'), ('No', 'NO')], null=True, max_length=3, blank=True),
),
migrations.AlterField(
model_name='contact',
name='permission_facebook',
field=models.CharField(choices=[('Yes', 'YES'), ('No', 'NO')], null=True, max_length=3, blank=True),
),
migrations.AlterField(
model_name='contact',
name='permission_mail',
field=models.CharField(choices=[('Yes', 'YES'), ('No', 'NO')], null=True, max_length=3, blank=True),
),
migrations.AlterField(
model_name='contact',
name='permission_photo',
field=models.CharField(choices=[('Yes', 'YES'), ('No', 'NO')], null=True, max_length=3, blank=True),
),
migrations.AlterField(
model_name='contact',
name='permission_text',
field=models.CharField(choices=[('Yes', 'YES'), ('No', 'NO')], null=True, max_length=3, blank=True),
),
]
| [
"[email protected]"
] | |
10b42b9c096fb19446e043163c9cfb3ae6a2ed9d | 098f80474295aa024657330b8f0813eca7d015c2 | /UnrealPythonLibrary/PythonLibraries/PythonHelpers.py | 9614830514ae6520ef42d4429675bed7ed05a765 | [] | no_license | sniler/UnrealScript | e4c4387caa6402a61b4bf0ba8952faf598e4464e | a4587d578366551b2470862f18b33c42439c5cdd | refs/heads/master | 2023-04-01T18:45:37.803690 | 2021-04-13T11:25:13 | 2021-04-13T11:25:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 929 | py | # unreal._ObjectBase
# https://api.unrealengine.com/INT/PythonAPI/class/_ObjectBase.html
import unreal
# object_to_cast: obj unreal.Object : The object you want to cast
# object_class: obj unreal.Class : The class you want to cast the object into
def cast(object_to_cast=None, object_class=None):
try:
return object_class.cast(object_to_cast)
except:
return None
# Cpp ########################################################################################################################################################################################
# Note: Also work using the command : help(unreal.StaticMesh)
# unreal_class: obj : The class you want to know the properties
# return: str List : The available properties (formatted the way you can directly use them to get their values)
def getAllProperties(unreal_class=None):
return unreal.CppLib.get_all_properties(unreal_class)
| [
"[email protected]"
] | |
ff61f9d12ceebe86532e622aaecb819f0c39eb8f | e7b7505c084e2c2608cbda472bc193d4a0153248 | /LeetcodeNew/python/LC_573.py | a585295ef01ebf47f30590d98f2feb1f45f6657f | [] | no_license | Taoge123/OptimizedLeetcode | 8e5c1cd07904dfce1248bc3e3f960d2f48057a5d | 3e50f6a936b98ad75c47d7c1719e69163c648235 | refs/heads/master | 2023-02-27T21:13:40.450089 | 2023-02-07T04:11:09 | 2023-02-07T04:11:09 | 170,044,224 | 9 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,263 | py |
"""
There's a tree, a squirrel, and several nuts. Positions are represented by the cells in a 2D grid. Your goal is to find the minimal distance for the squirrel to collect all the nuts and put them under the tree one by one. The squirrel can only take at most one nut at one time and can move in four directions - up, down, left and right, to the adjacent cell. The distance is represented by the number of moves.
Example 1:
Input:
Height : 5
Width : 7
Tree position : [2,2]
Squirrel : [4,4]
Nuts : [[3,0], [2,5]]
Output: 12
Explanation:
Note:
All given positions won't overlap.
The squirrel can take at most one nut at one time.
The given positions of nuts have no order.
Height and width are positive integers. 3 <= height * width <= 10,000.
The given positions contain at least one nut, only one tree and one squirrel.
"""
class Solution:
def minDistance(self, height: int, width: int, tree, squirrel, nuts) -> int:
total = 0
dis = float('-inf')
for nut in nuts:
total += self.distance(nut, tree) * 2
dis = max(dis, self.distance(nut, tree) - self.distance(nut, squirrel))
return total - dis
def distance(self, a, b):
return abs(a[0] - b[0]) + abs(a[1] - b[1])
| [
"[email protected]"
] | |
56113bdeea081fbc16893e7fdd670b1aea96fa36 | 4cdd73fe38027d41bda2959f940fc8a2a6c4ca78 | /l10n_ve_islr_report/__openerp__.py | 437bf359edf4cb159557ab769dca6e3d5272dd75 | [] | no_license | adrt271988/l10n_ve | af408fcc0bd2c87475beccd5ec92ee180d35a0d8 | 0a762490f4ee0a4257fb75dc5ea5607dec91d3bd | refs/heads/master | 2020-04-05T14:04:14.374612 | 2016-09-05T22:19:54 | 2016-09-05T22:19:54 | 53,200,710 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,654 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Formato Comprobante ISLR Venezuela',
'category': 'Account',
'version': '1.0',
'description': """
Formato Comprobante ISLR Venezuela
====================================================
* Adaptación Qweb del formato del Comprobante de Retencion ISLR
""",
'author': 'Alexander Rodriguez <[email protected]>',
'website': '',
'depends': ['l10n_ve_withholding_islr','report'],
'data': [
'report/islr_wh_doc_report.xml',
'account_report.xml',
],
'demo': [],
'test': [],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"[email protected]"
] | |
f49db6663ec0211a03497fae9b0a7d5b4a7ae930 | e5f1befb7c7ca0072747b33086fc6569a6befd01 | /old/videos/02.py | 18f9ca234fd808ffb6c7a2703c07f80bf5cdd3a6 | [] | no_license | nepomnyashchii/TestGit | ae08d8bb1b7d2ab9389a309fd1dc9e24729b019c | c7abf4ab08ee3c2f3ea1fb09a1938bff7a3e0e5c | refs/heads/master | 2020-04-28T23:41:51.053547 | 2020-01-24T12:22:40 | 2020-01-24T12:22:40 | 175,666,093 | 0 | 1 | null | 2019-03-15T13:44:03 | 2019-03-14T17:08:58 | null | UTF-8 | Python | false | false | 472 | py | import random
from random import randint
import math
# from random import *
from math import sqrt
from math import sqrt as my_sqrt
for element in range (10):
print(random.randint(1, 10))
#random object (randint function as method and 1,10 are arguments)
num =10
print(math.sqrt(num))
print(sqrt(num))
print(randint (1,10))
def sqrt():
print("my function")
# sqrt()
print(my_sqrt(25))
# STL # standard library of python
# PyPi
| [
"[email protected]"
] | |
abf24deb33aa929ed64e1f57511d697e2db26a85 | e1436eb68e51dcd1becb7e0f8671b51eb4b23ec0 | /desktop/kde/applications/parley/actions.py | 4ecd82fc5604b7c01eeb26036ad05c7448cec256 | [] | no_license | SulinOS/SulinKDE | bef0ebbecafa6082ad7599f377c95573468827fb | 9984e0f40a5a011e59d439a24856bde78deea1c2 | refs/heads/master | 2020-09-16T05:34:20.333558 | 2020-06-10T08:10:53 | 2020-06-10T08:10:53 | 223,669,603 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/licenses/gpl.txt
from inary.actionsapi import inarytools
from inary.actionsapi import kde
def setup():
kde.configure()
def build():
kde.make()
def install():
kde.install()
inarytools.dodoc("AUTHORS", "COPYING*", "TODO*")
| [
"[email protected]"
] | |
cf29657646e6dc10ca79da4d4d8f025b52a0bdd1 | 7416056e689dfc94391c4b108652cea02d59a31a | /reservation/migrations/0009_auto_20200128_0154.py | ee59b4e4dcefa53f8ef69cf34200507a3c15f18a | [] | no_license | zshanabek/house-booking-app | 0ea29fb8113671eb164ead8d335a986b850898a1 | cca5225f40b8a055a2db78810258325f2ba7ded1 | refs/heads/master | 2022-11-28T00:20:12.789534 | 2020-08-14T09:16:40 | 2020-08-14T09:16:40 | 225,791,244 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 532 | py | # Generated by Django 2.2.7 on 2020-01-27 19:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reservation', '0008_auto_20200125_2102'),
]
operations = [
migrations.AlterField(
model_name='reservation',
name='check_in',
field=models.DateField(),
),
migrations.AlterField(
model_name='reservation',
name='check_out',
field=models.DateField(),
),
]
| [
"[email protected]"
] | |
f662e13a92f620780fa576c53bfe5eaaf4dc40d3 | 5dfbfa153f22b3f58f8138f62edaeef30bad46d3 | /ros_ws/build/baxter_examples/catkin_generated/pkg.develspace.context.pc.py | ce6228515fedb8113370e7fdd69d2285856a9049 | [] | no_license | adubredu/rascapp_robot | f09e67626bd5a617a569c9a049504285cecdee98 | 29ace46657dd3a0a6736e086ff09daa29e9cf10f | refs/heads/master | 2022-01-19T07:52:58.511741 | 2019-04-01T19:22:48 | 2019-04-01T19:22:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 587 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/bill/bill_ros/ros_ws/devel/include".split(';') if "/home/bill/bill_ros/ros_ws/devel/include" != "" else []
PROJECT_CATKIN_DEPENDS = "rospy;xacro;actionlib;sensor_msgs;control_msgs;trajectory_msgs;cv_bridge;dynamic_reconfigure;baxter_core_msgs;baxter_interface".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "baxter_examples"
PROJECT_SPACE_DIR = "/home/bill/bill_ros/ros_ws/devel"
PROJECT_VERSION = "1.2.0"
| [
"[email protected]"
] | |
5b7b08cc64f27b668429b60c89c6dfff39a1be47 | 3e2ec14daf3e246334e175719bc38adcf15cee5a | /challenges/graphs/black_shapes.py | 711d0422815e2bd4f14aaa3777a4b3ce9f1aaf6a | [
"CC0-1.0"
] | permissive | lukasmartinelli/sharpen | a616ee981d81efb2c844c5106ce30bd97f36e034 | 6f314fc2aa17990ede04055e7c3ac9394a6c12c0 | refs/heads/master | 2021-01-20T12:11:25.452306 | 2019-06-08T21:06:12 | 2019-06-08T21:06:12 | 58,558,368 | 13 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,670 | py | import collections
def adjacent_black_fields(matrix, row_idx, col_idx):
adjacent = [(row_idx + 1, col_idx), (row_idx - 1, col_idx),
(row_idx, col_idx + 1), (row_idx, col_idx - 1)]
def is_within_matrix(row_idx, col_idx):
row_count = len(matrix)
col_count = len(matrix[0])
return 0 <= row_idx < row_count and 0 <= col_idx < col_count
def is_black(row_idx, col_idx):
return matrix[row_idx][col_idx] == 'X'
return [f for f in adjacent if is_within_matrix(f[0], f[1]) and
is_black(f[0], f[1])]
def find_black_fields(matrix):
for row_idx, row in enumerate(matrix):
for col_idx, field in enumerate(row):
if field == 'X':
yield (row_idx, col_idx)
def count_black_shapes(matrix):
part_of_shape = {}
def is_part_of_shape(row_idx, col_idx):
return (row_idx, col_idx) in part_of_shape
def mark_shape(row_idx, col_idx):
part_of_shape[(row_idx, col_idx)] = True
for row_idx, col_idx in adjacent_black_fields(matrix, row_idx, col_idx):
if not is_part_of_shape(row_idx, col_idx):
mark_shape(row_idx, col_idx)
shape_count = 0
for row_idx, col_idx in find_black_fields(matrix):
if not is_part_of_shape(row_idx, col_idx):
shape_count += 1
mark_shape(row_idx, col_idx)
return shape_count
def test_single_black_shape():
matrix = ['XXX', 'XXX', 'XXX']
assert count_black_shapes(matrix) == 1
def test_multipel_black_shape():
matrix = ['OOOXOOO',
'OOXXOXO',
'OXOOOXO']
assert count_black_shapes(matrix) == 3
| [
"[email protected]"
] | |
83f67eb1126eb3952caf34740d621467f28863e0 | 3a19c1b17f553b6d54e5c345d550ca494c3593e1 | /td1-problem22.py | 4130c59d812ac8cfd5ae3a90bfb24b370ea9992d | [] | no_license | mines-nancy-tcss5ac-2018/td1-TomLaville | 95faf73aca9375fe7ba990043e9c371713524eaa | 4967cda4594b7706d8edcdaf99a7945ea90ad8e3 | refs/heads/master | 2020-03-31T00:16:54.328127 | 2018-10-07T19:05:42 | 2018-10-07T19:05:42 | 151,733,523 | 0 | 0 | null | null | null | null | IBM852 | Python | false | false | 876 | py | values = ["\"","A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M","N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"]
def scoreName(nom):
s = 0
for char in nom:
s+= values.index(char)
return s
def solve():
noms = [] ##liste qui contient tous les noms
##liste utile pour le score
##converti le fichier txt en liste de noms
f = open('p022_names.txt', 'r')
for l in f:
noms += l.split(',') ##lis le fichier et sÚpare les noms par les ','
##on a donc toujours les ""
##tri
noms_tries = sorted(noms, reverse = False)
##calcul du score
score_tot = 0
for i in range(len(noms_tries)):
score_tot += (i+1)*scoreName(noms_tries[i])
return score_tot
print(solve())
| [
"[email protected]"
] | |
5388aa37ce371ef5f7f6cb2f18770e3a8791d1bd | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /MY_REPOS/misc-experiments/_FIREBFIRE/grpc-SwiftPM/tools/run_tests/run_microbenchmark.py | 4b9cd4bc8e85661f9a9cfeefbcc310b227710f5d | [
"MIT",
"Apache-2.0"
] | permissive | bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 | MIT | 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null | UTF-8 | Python | false | false | 9,289 | py | #!/usr/bin/env python
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cgi
import multiprocessing
import os
import subprocess
import sys
import argparse
import python_utils.jobset as jobset
import python_utils.start_port_server as start_port_server
sys.path.append(
os.path.join(os.path.dirname(sys.argv[0]), '..', 'profiling',
'microbenchmarks', 'bm_diff'))
import bm_constants
flamegraph_dir = os.path.join(os.path.expanduser('~'), 'FlameGraph')
os.chdir(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
if not os.path.exists('reports'):
os.makedirs('reports')
start_port_server.start_port_server()
def fnize(s):
out = ''
for c in s:
if c in '<>, /':
if len(out) and out[-1] == '_': continue
out += '_'
else:
out += c
return out
# index html
index_html = """
<html>
<head>
<title>Microbenchmark Results</title>
</head>
<body>
"""
def heading(name):
global index_html
index_html += "<h1>%s</h1>\n" % name
def link(txt, tgt):
global index_html
index_html += "<p><a href=\"%s\">%s</a></p>\n" % (cgi.escape(
tgt, quote=True), cgi.escape(txt))
def text(txt):
global index_html
index_html += "<p><pre>%s</pre></p>\n" % cgi.escape(txt)
def collect_latency(bm_name, args):
"""generate latency profiles"""
benchmarks = []
profile_analysis = []
cleanup = []
heading('Latency Profiles: %s' % bm_name)
subprocess.check_call([
'make', bm_name, 'CONFIG=basicprof', '-j',
'%d' % multiprocessing.cpu_count()
])
for line in subprocess.check_output(
['bins/basicprof/%s' % bm_name, '--benchmark_list_tests']).splitlines():
link(line, '%s.txt' % fnize(line))
benchmarks.append(
jobset.JobSpec([
'bins/basicprof/%s' % bm_name,
'--benchmark_filter=^%s$' % line, '--benchmark_min_time=0.05'
],
environ={
'GRPC_LATENCY_TRACE': '%s.trace' % fnize(line)
},
shortname='profile-%s' % fnize(line)))
profile_analysis.append(
jobset.JobSpec([
sys.executable,
'tools/profiling/latency_profile/profile_analyzer.py',
'--source',
'%s.trace' % fnize(line), '--fmt', 'simple', '--out',
'reports/%s.txt' % fnize(line)
],
timeout_seconds=20 * 60,
shortname='analyze-%s' % fnize(line)))
cleanup.append(jobset.JobSpec(['rm', '%s.trace' % fnize(line)]))
# periodically flush out the list of jobs: profile_analysis jobs at least
# consume upwards of five gigabytes of ram in some cases, and so analysing
# hundreds of them at once is impractical -- but we want at least some
# concurrency or the work takes too long
if len(benchmarks) >= min(16, multiprocessing.cpu_count()):
# run up to half the cpu count: each benchmark can use up to two cores
# (one for the microbenchmark, one for the data flush)
jobset.run(benchmarks,
maxjobs=max(1,
multiprocessing.cpu_count() / 2))
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
benchmarks = []
profile_analysis = []
cleanup = []
# run the remaining benchmarks that weren't flushed
if len(benchmarks):
jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count() / 2))
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
def collect_perf(bm_name, args):
"""generate flamegraphs"""
heading('Flamegraphs: %s' % bm_name)
subprocess.check_call([
'make', bm_name, 'CONFIG=mutrace', '-j',
'%d' % multiprocessing.cpu_count()
])
benchmarks = []
profile_analysis = []
cleanup = []
for line in subprocess.check_output(
['bins/mutrace/%s' % bm_name, '--benchmark_list_tests']).splitlines():
link(line, '%s.svg' % fnize(line))
benchmarks.append(
jobset.JobSpec([
'perf', 'record', '-o',
'%s-perf.data' % fnize(line), '-g', '-F', '997',
'bins/mutrace/%s' % bm_name,
'--benchmark_filter=^%s$' % line, '--benchmark_min_time=10'
],
shortname='perf-%s' % fnize(line)))
profile_analysis.append(
jobset.JobSpec(
[
'tools/run_tests/performance/process_local_perf_flamegraphs.sh'
],
environ={
'PERF_BASE_NAME': fnize(line),
'OUTPUT_DIR': 'reports',
'OUTPUT_FILENAME': fnize(line),
},
shortname='flame-%s' % fnize(line)))
cleanup.append(jobset.JobSpec(['rm', '%s-perf.data' % fnize(line)]))
cleanup.append(jobset.JobSpec(['rm', '%s-out.perf' % fnize(line)]))
# periodically flush out the list of jobs: temporary space required for this
# processing is large
if len(benchmarks) >= 20:
# run up to half the cpu count: each benchmark can use up to two cores
# (one for the microbenchmark, one for the data flush)
jobset.run(benchmarks, maxjobs=1)
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
benchmarks = []
profile_analysis = []
cleanup = []
# run the remaining benchmarks that weren't flushed
if len(benchmarks):
jobset.run(benchmarks, maxjobs=1)
jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
def run_summary(bm_name, cfg, base_json_name):
subprocess.check_call([
'make', bm_name,
'CONFIG=%s' % cfg, '-j',
'%d' % multiprocessing.cpu_count()
])
cmd = [
'bins/%s/%s' % (cfg, bm_name),
'--benchmark_out=%s.%s.json' % (base_json_name, cfg),
'--benchmark_out_format=json'
]
if args.summary_time is not None:
cmd += ['--benchmark_min_time=%d' % args.summary_time]
return subprocess.check_output(cmd)
def collect_summary(bm_name, args):
heading('Summary: %s [no counters]' % bm_name)
text(run_summary(bm_name, 'opt', bm_name))
heading('Summary: %s [with counters]' % bm_name)
text(run_summary(bm_name, 'counters', bm_name))
if args.bigquery_upload:
with open('%s.csv' % bm_name, 'w') as f:
f.write(
subprocess.check_output([
'tools/profiling/microbenchmarks/bm2bq.py',
'%s.counters.json' % bm_name,
'%s.opt.json' % bm_name
]))
subprocess.check_call([
'bq', 'load', 'microbenchmarks.microbenchmarks',
'%s.csv' % bm_name
])
collectors = {
'latency': collect_latency,
'perf': collect_perf,
'summary': collect_summary,
}
argp = argparse.ArgumentParser(description='Collect data from microbenchmarks')
argp.add_argument('-c',
'--collect',
choices=sorted(collectors.keys()),
nargs='*',
default=sorted(collectors.keys()),
help='Which collectors should be run against each benchmark')
argp.add_argument('-b',
'--benchmarks',
choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
nargs='+',
type=str,
help='Which microbenchmarks should be run')
argp.add_argument('--bigquery_upload',
default=False,
action='store_const',
const=True,
help='Upload results from summary collection to bigquery')
argp.add_argument(
'--summary_time',
default=None,
type=int,
help='Minimum time to run benchmarks for the summary collection')
args = argp.parse_args()
try:
for collect in args.collect:
for bm_name in args.benchmarks:
collectors[collect](bm_name, args)
finally:
if not os.path.exists('reports'):
os.makedirs('reports')
index_html += "</body>\n</html>\n"
with open('reports/index.html', 'w') as f:
f.write(index_html)
| [
"[email protected]"
] | |
7acffc09af312c4cf50348b8c11ac1c2f7a9299c | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/16/usersdata/134/7136/submittedfiles/triangulo.py | ed25ffd7bbcc72ababe8bfa8ba20892f52ef7ec7 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | # -*- coding: utf-8 -*-
from __future__ import division
import math
a = input('Digite o valor de a:')
b = input('Digite o valor de b:')
c = input('Digite o valor de c:')
if a>=b>=c and a<(b+c) :
print ('S')
if (a**2)==((b**2)+(c**2)):
print
| [
"[email protected]"
] | |
0880e3a123ede3bcf2211f08a1ccf3132ed4b202 | 27f7ab32a865f4ae6b62d0b2a6a890352fea0cba | /ifmo_certs/courses/x0009_02/__init__.py | c0c67542929f42b7691098b602532d6a2ddb5444 | [] | no_license | defance/edx-ifmo-mod | 38b84381814b103e5cbf07419b1e1e142bc61a70 | 1b86a85a32c06322ab429d323f2ff2be84d0a1cc | refs/heads/master | 2021-01-22T20:59:42.575809 | 2015-10-28T12:57:43 | 2015-10-28T12:57:43 | 21,855,589 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 60 | py | __author__ = 'd.ivanyushin'
from .x0009_02 import X0009_02
| [
"[email protected]"
] | |
05947191f7a5ddb2a9ff5e8e0385d1616f07bd04 | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/machinelearningservices/v20210301preview/get_job.py | e9a9f325635bdf8571c3b2144867f1b62d2a343b | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 4,080 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetJobResult',
'AwaitableGetJobResult',
'get_job',
]
@pulumi.output_type
class GetJobResult:
"""
Azure Resource Manager resource envelope.
"""
def __init__(__self__, id=None, name=None, properties=None, system_data=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> Any:
"""
Additional attributes of the entity.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
System data associated with resource provider
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetJobResult(GetJobResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetJobResult(
id=self.id,
name=self.name,
properties=self.properties,
system_data=self.system_data,
type=self.type)
def get_job(id: Optional[str] = None,
resource_group_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetJobResult:
"""
Azure Resource Manager resource envelope.
:param str id: The name and identifier for the Job.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: Name of Azure Machine Learning workspace.
"""
__args__ = dict()
__args__['id'] = id
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:machinelearningservices/v20210301preview:getJob', __args__, opts=opts, typ=GetJobResult).value
return AwaitableGetJobResult(
id=__ret__.id,
name=__ret__.name,
properties=__ret__.properties,
system_data=__ret__.system_data,
type=__ret__.type)
| [
"[email protected]"
] | |
69291f4bb7b082fcb63d8d0a0ab580ce63b63c2a | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /GX3pQxvbTJApWYgRJ_22.py | ea51d20994790e177a6d6bdea78c3a0e36e6bd7b | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,291 | py | """
A Kaprekar Number is a positive integer that is equal to a number formed by
first squaring, then splitting and summing its two lexicographical parts:
* If the quantity of digits of the squared number is even, the left and right parts will have the same length.
* If the quantity of digits of the squared number is odd, then the right part will be the longer half, with the left part being the shorter or equal to zero if the quantity of digits is equal to 1.
Given a positive integer `n` implement a function that returns `True` if it's
a Kaprekar number, and `False` if it's not.
### Examples
is_kaprekar(3) ➞ False
# n² = "9"
# Left + Right = 0 + 9 = 9 ➞ 9 != 3
is_kaprekar(5) ➞ False
# n² = "25"
# Left + Right = 2 + 5 = 7 ➞ 7 != 5
is_kaprekar(297) ➞ True
# n² = "88209"
# Left + Right = 88 + 209 = 297 ➞ 297 == 297
### Notes
Trivially, 0 and 1 are Kaprekar Numbers being the only two numbers equal to
their square. Any number formed only by digits equal to _9_ will always be a
Kaprekar Number.
"""
def is_kaprekar(n):
if n in [0,1]:
return True
test = str(n**2)
if len(test) == 1:
return False
left = test[:len(test)//2]
right = test[len(test)//2:]
return int(left) + int(right) == n
| [
"[email protected]"
] | |
a56febed6885c0b35f0a30ddce4934f5b6836066 | 53396d12d606bebea71c149aed0150af7b17b6f5 | /array/medium/221-maximal-square-1.py | b3885e1f68c33ae2c1dc19434984e2ec1137c8ff | [] | no_license | superggn/myleetcode | 4c623bd9ad3892d826df73ad3b2c122e08aaa9e9 | 40ca33aefbf0cf746a2d0b7e7f52643ae39591be | refs/heads/master | 2023-02-02T11:06:35.163570 | 2020-12-19T10:36:45 | 2020-12-19T10:36:45 | 322,821,962 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 837 | py | """
dp
https://leetcode-cn.com/problems/maximal-square/solution/zui-da-zheng-fang-xing-by-leetcode-solution/
"""
from typing import List
class Solution:
def maximalSquare(self, matrix: List[List[str]]) -> int:
if len(matrix) == 0 or len(matrix[0]) == 0:
return 0
maxSide = 0
rows, columns = len(matrix), len(matrix[0])
dp = [[0] * columns for _ in range(rows)]
for i in range(rows):
for j in range(columns):
if matrix[i][j] == '1':
if i == 0 or j == 0:
dp[i][j] = 1
else:
dp[i][j] = min(dp[i - 1][j], dp[i][j - 1], dp[i - 1][j - 1]) + 1
maxSide = max(maxSide, dp[i][j])
maxSquare = maxSide * maxSide
return maxSquare
| [
"[email protected]"
] | |
33dc808475234978f454c5997f1ea1bd3996a31e | 05ace4491b97699333057e35f7e9225864f7130d | /dygraphsex/urls.py | 689201cc230eb577bc30387699482d5ada5e4179 | [] | no_license | scott858/dajs | b6878123748f563550fa2f5e59b1d5dcd4fdcaa5 | bc6b23d0e24be038e278490e34422d69b06d6543 | refs/heads/master | 2021-01-01T16:12:43.477340 | 2015-09-18T01:04:55 | 2015-09-18T01:04:55 | 41,645,401 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | from django.conf.urls import patterns, url
from . import views
urlpatterns = patterns(
'',
url(r'^$', views.example_app_view, name='main'),
url(r'^plot/$', views.plot_view, name='plot'),
)
| [
"[email protected]"
] | |
08482fe6214b2752de4f244241d1bee84840b9a8 | 47243c719bc929eef1475f0f70752667b9455675 | /bungeni.main/branches/oauth/bungeni/alchemist/type_info.py | 2d6b1645d913fc2122f0e3dbcb9f1f7071449eff | [] | no_license | malangalanga/bungeni-portal | bbf72ce6d69415b11287a8796b81d4eb6520f03a | 5cf0ba31dfbff8d2c1b4aa8ab6f69c7a0ae9870d | refs/heads/master | 2021-01-19T15:31:42.943315 | 2014-11-18T09:03:00 | 2014-11-18T09:03:00 | 32,453,405 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,806 | py | # Bungeni Parliamentary Information System - http://www.bungeni.org/
# Copyright (C) 2010 - Africa i-Parliaments - http://www.parliaments.info/
# Licensed under GNU GPL v2 - http://www.gnu.org/licenses/gpl-2.0.txt
"""Aggregation of information about loaded domain types.
No public methods here -- all available methods from this are those exposed
via bungeni.capi.
$Id$
"""
log = __import__("logging").getLogger("bungeni.alchemist.type_info")
from zope.interface.interfaces import IInterface
from zope.security.proxy import removeSecurityProxy
from zope.dottedname.resolve import resolve
from bungeni.alchemist.interfaces import IModelDescriptor, IIModelInterface
from bungeni.alchemist.model import (
new_custom_domain_interface,
new_custom_domain_model,
)
from bungeni.alchemist.catalyst import (
INTERFACE_MODULE,
MODEL_MODULE
)
from bungeni.models import interfaces
from bungeni.models import domain
from bungeni.core.workflow.interfaces import IWorkflow
from bungeni.utils import naming
__all__ = []
# acessors exposed via capi
def _iter():
"""Return iterator on all (key, TypeInfo) entries in TYPE_REGISTRY.
Usage: capi.iter_type_info()
"""
for type_key, ti in TYPE_REGISTRY:
yield type_key, ti
def _get(discriminator):
"""Get the TypeInfo instance for discriminator, that may be any of:
type_key: str (the lowercase underscore-separated of domain cls name)
workflow: an instance of Workflow, provides IWorkflow
interface: provides IInterface
domain model: provides IBungeniContent
domain model instance: type provides IBungeniContent
descriptor: provides IModelDescriptor
Raise KeyError if no entry matched.
Usage: capi.get_type_info(discriminator)
"""
if discriminator is None:
m = "type_info._get discriminator is None"
log.error(m)
raise ValueError(m)
discri = removeSecurityProxy(discriminator)
getter = None
# !+IALCHEMISTCONTENT normalize trickier discriminator cases to type_key
if IIModelInterface.providedBy(discri):
discri = naming.type_key("table_schema_interface_name", discri.__name__)
elif IInterface.providedBy(discri):
discri = naming.type_key("model_interface_name", discri.__name__)
elif type(discri) is type and issubclass(discri, domain.Entity):
discri = naming.polymorphic_identity(discri)
elif isinstance(discri, domain.Entity):
discri = naming.polymorphic_identity(type(discri))
if isinstance(discri, basestring):
getter = _get_by_type_key
#elif IInterface.providedBy(discri):
# getter = _get_by_interface
#!+elif interfaces.IBungeniContent.implementedBy(discri):
#elif issubclass(discri, domain.Entity):
# getter = _get_by_model
#!+elif interfaces.IBungeniContent.providedBy(discri):
#elif isinstance(discri, domain.Entity):
# getter = _get_by_instance
elif IWorkflow.providedBy(discri):
getter = _get_by_workflow
elif IModelDescriptor.implementedBy(discri):
getter = _get_by_descriptor_model
if getter is not None:
ti = getter(discri)
if ti is not None:
return ti
else:
m = "No type registered for discriminator: %r" % (discriminator)
else:
m = "Invalid type info lookup discriminator: %r" % (discriminator)
from bungeni.ui.utils import debug
log.debug(debug.interfaces(discriminator))
log.debug(m)
raise KeyError(m)
# following getters return "first matching" TypeInfo instance in registry
def _get_by_type_key(key):
for type_key, ti in _iter():
if type_key == key:
return ti
#def _get_by_interface(iface):
''' !+IALCHEMISTCONTENT fails on different interfaces with same name!
(Pdb) ti.interface
<InterfaceClass bungeni.models.interfaces.ISession>
(Pdb) ti.interface.__bases__
(<InterfaceClass ore.alchemist.interfaces.ITableSchema>, <InterfaceClass ore.alchemist.interfaces.IAlchemistContent>)
(Pdb) iface
<InterfaceClass bungeni.models.interfaces.ISession>
(Pdb) iface.__bases__
(<InterfaceClass zope.interface.Interface>,)
'''
# for type_key, ti in _iter():
# if iface is ti.interface: #!+issubclass(iface, ti.interface)?
# return ti
def _get_by_model(model):
for type_key, ti in _iter():
if model is ti.domain_model: #!+issubclass(model, ti.domain_model)?
return ti
def _get_by_instance(instance):
return _get_by_model(type(instance))
def _get_by_workflow(wf):
for type_key, ti in _iter():
if wf is ti.workflow:
return ti
def _get_by_descriptor_model(descriptor_model):
for type_key, ti in _iter():
if descriptor_model is ti.descriptor_model:
return ti
#
class TI(object):
"""TypeInfo, associates together the following attributes for a given type:
workflow_key
the workflow file name
defaults to the type_key for workflowed types that DO NOT specify
is None for non-workflowed types
workflow
same workflow insatnce may be used by multiple types
is None for non-workflowed types
interface
the manually applied application-dedicated model interface
(if any) for the type
derived_table_schema
auto-generated db schema interface, provides IIModelInterface
domain_model
the domain class
descriptor_model
the descriptor model for UI views for the type
container_class
container class for domain_model
container_interface
interface for the container class for domain_model
"""
def __init__(self, workflow_key, iface, domain_model=None):
self.workflow_key = workflow_key
self.interface = iface
self.derived_table_schema = None # provides IIModelInterface
self.workflow = None
self.domain_model = domain_model
self.descriptor_model = None
self.container_class = None
self.container_interface = None
self.custom = False # type loaded from custom configuration
# NOTE: only needed temporarily (until descriptor_model is set),
# then ti.custom not be inconsistent descriptor_model.scope i.e.
#if self.custom: assert self.descriptor_model.scope == "custom"
# !+ archetype_key?
def __str__(self):
return str(self.__dict__)
@property
def scope(self):
# !+CUSTOM_TYPE_DESCRIPTOR the self.custom check below MUST precede the
# check on self.descriptor_model.scope as otherwise the "in-transit"
# custom types will not be picked up as custom types -- as during
# loading the descriptors for all custom types may not yet have been
# autogenerated (and would therefore correctly have
# descriptor_model.scope="custom" set).
if self.custom:
return "custom"
if self.descriptor_model is not None:
return self.descriptor_model.scope
@property
def permission_type_key(self):
if self.custom:
# custom types ALWAYS have a type_key-bound workflow instance - that
# may therefore have a different name than workflow_key e.g. Office
# uses the "group" workflow, that is type-relative reloaded as the
# "office" workflow instance.
return self.workflow.name
# system types ALWAYS use workflow_key - even if multiple types use the
# same workflow e.g. UserAddress & GroupAddress.
# if no workflow, compute type_key from domain_model
# #!+REDUNDANT(mb, 2012) This type key is already known during type
# setup i.e. TYPE_REGISTRY
return (self.workflow_key or
naming.type_key("model_name", self.domain_model.__name__)
)
'''
!+TYPE_REGISTRY externalize further to bungeni_custom, currently:
- association of type key and dedicated interface are hard-wired here
- ti.workflow/ti.domain_model/ti.descriptor are added dynamically when
loading workflows and descriptors
- type_key IS the underscore-separated lowercase of the domain cls name
i.e. utils.naming.polymorphic_identity(domain_model)
- !+ ti.workflow_key SHOULD always be equal to type_key
- !+ corresponding Container/Version/X interfaces should ALWAYS be auto-generated
- !+ dedicated interfaces for archetype incantations should be auto-generated,
from specific workflow name/attr... e.g. via:
zope.interface.interface.InterfaceClass(iname, bases, __module__)
- !+ should ti.interface be automatically generated also for system types?
Usage:
from bungeni.capi import capi
capi.get_type_info(discriminator) -> TypeInfo
capi.iter_type_info() -> iterator of all registered (key, TypeInfo)
'''
TYPE_REGISTRY = [
# (key, ti)
# - the type key, unique for each type, is the underscore-separated
# lowercase name of the domain_model (the domain class)
# - order is relevant (dictates workflow loading order)
# feature "support" types, system types, required
# workflowed
("user_address", TI("address", interfaces.IUserAddress)),
("group_address", TI("address", interfaces.IGroupAddress)),
# !+Attachment (mr, jul-2011)
# a) must be loaded before any other type that *may* support attachments!
# b) MUST support versions
("attachment", TI("attachment", interfaces.IAttachment)),
("event", TI("event", interfaces.IEvent)),
("sitting", TI("sitting", interfaces.ISitting)),
("heading", TI("heading", interfaces.IHeading)),
("user", TI("user", interfaces.IBungeniUser)),
("signatory", TI("signatory", interfaces.ISignatory)),
# !+NAMING: member-related -> Group name + "Member" (no + "ship")
("group", TI("group", interfaces.IBungeniGroup)),
("group_membership", TI("group_membership", interfaces.IBungeniGroupMembership)),
("group_document_assignment", TI("group_assignment", interfaces.IGroupDocumentAssignment)),
("debate_record", TI("debate_record", interfaces.IDebateRecord)),
# non-workflowed
("o_auth_application", TI(None, interfaces.IOAuthApplication)),
("debate_media", TI(None, interfaces.IDebateMedia)),
("user_delegation", TI(None, interfaces.IUserDelegation)),
("title_type", TI(None, interfaces.ITitleType)),
("member_title", TI(None, interfaces.IMemberTitle)),
("change", TI(None, interfaces.IChange)),
("doc", TI(None, interfaces.IDoc)),
("doc_version", TI(None, None)), #interfaces.IDocVersion)), #!+IVERSION
("attachment_version", TI(None, None)), #interfaces.IAttachmentVersion)), #!+IVERSION
("venue", TI(None, interfaces.IVenue)),
("session", TI(None, interfaces.ISession)),
("sitting_attendance", TI(None, interfaces.ISittingAttendance)),
("country", TI(None, interfaces.ICountry)),
("item_schedule", TI(None, interfaces.IItemSchedule)),
("item_schedule_discussion", TI(None, interfaces.IItemScheduleDiscussion)),
("item_schedule_vote", TI(None, interfaces.IItemScheduleVote)),
("editorial_note", TI(None, interfaces.IEditorialNote)),
("sitting_report", TI(None, interfaces.ISittingReport)),
("group_membership_role", TI(None, interfaces.IGroupMembershipRole)),
# additional custom types are loaded dynamically from bungeni_custom/types.xml
]
# register custom types
def register_new_custom_type(type_key, workflow_key, archetype_key):
"""Retrieve (create if needed) a domain interface and model for type_key,
and register as new entry on TYPE_REGISTER.
"""
# generate custom domain interface
domain_iface_name = naming.model_interface_name(type_key)
try:
domain_iface = resolve("%s.%s" % (INTERFACE_MODULE.__name__, domain_iface_name))
log.warn("Custom interface ALREADY EXISTS: %s" % (domain_iface))
except ImportError:
domain_iface = new_custom_domain_interface(type_key, domain_iface_name)
# generate custom domain_model
domain_model_name = naming.model_name(type_key)
try:
domain_model = resolve("%s.%s" % (MODEL_MODULE.__name__, domain_model_name))
log.warn("Custom domain model ALREADY EXISTS: %s" % (domain_model))
except ImportError:
domain_model = new_custom_domain_model(type_key, domain_iface, archetype_key)
# type_info entry
ti = TI(workflow_key, domain_iface, domain_model)
ti.custom = True
TYPE_REGISTRY.append((type_key, ti))
log.info("Registered custom type [%s]: %s" % (archetype_key, type_key))
return type_key, ti
| [
"mianonjoka@fc5d704a-7d24-0410-8c4a-57ddeba10ffc"
] | mianonjoka@fc5d704a-7d24-0410-8c4a-57ddeba10ffc |
8de4c818ca766df5a345ae0b90065e5d770de5b1 | bc4656f6f74911f114626538294e0e275105c703 | /tests/dat/test_dar_packet.py | b02d91483b4179221c025a2cbc331f1950d03916 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | AdrianCano-01/spsdk | d8679ae58fc67c6369bceff4b31db658d9ad6bc4 | 4a31fb091f95fb035bc66241ee4e02dabb580072 | refs/heads/master | 2023-03-15T00:37:07.419191 | 2021-03-05T16:33:50 | 2021-03-05T16:33:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,839 | py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright 2020 NXP
#
# SPDX-License-Identifier: BSD-3-Clause
"""Tests with Debug Authentication Packet (DAR) Packet."""
import os
import pytest
import yaml
from spsdk.dat.dar_packet import DebugAuthenticateResponse
from spsdk.dat import DebugAuthenticationChallenge as DAC
from spsdk.dat.debug_credential import DebugCredential as DC
from spsdk.utils.misc import load_binary, use_working_directory
@pytest.mark.parametrize(
"yml_file_name, dac_bin_file, version, dck_key_file, expected_length",
[
('new_dck_rsa2048.yml', 'sample_dac.bin', '1.0', 'new_dck_2048.pem', 1200),
('new_dck_secp256.yml', 'sample_dac_ecc.bin', '2.0', 'new_dck_secp256r1.pem', 968)
]
)
def test_dar_packet_rsa(tmpdir, data_dir, yml_file_name, version, dck_key_file, expected_length, dac_bin_file):
with use_working_directory(data_dir):
dac_bytes = load_binary(os.path.join(data_dir, dac_bin_file))
with open(os.path.join(data_dir, yml_file_name), 'r') as f:
yaml_config = yaml.safe_load(f)
dc = DC.create_from_yaml_config(version=version, yaml_config=yaml_config)
dc.sign()
assert dc.VERSION == DAC.parse(dac_bytes).version, "Version of DC and DAC are different."
dar = DebugAuthenticateResponse.create(version=version, socc=dc.socc, dc=dc,
auth_beacon=0, dac=DAC.parse(dac_bytes),
dck=os.path.join(data_dir, dck_key_file))
dar_bytes = dar.export()
assert len(dar_bytes) == expected_length
assert isinstance(dar_bytes, bytes)
assert 'Authentication Beacon' in dar.info()
@pytest.mark.parametrize(
"yml_file_name, version, file_key, expected_length",
[
('new_dck_secp256_N4A.yml', '2.0', 'new_dck_secp256r1.pem', 316),
('new_dck_secp384_N4A.yml', '2.1', 'new_dck_secp384r1.pem', 444)
]
)
def test_dar_packet_4_analog_256(tmpdir, data_dir, yml_file_name, version, file_key, expected_length):
with use_working_directory(data_dir):
dac_bytes = load_binary(os.path.join(data_dir, 'sample_dac_analog.bin'))
with open(os.path.join(data_dir, yml_file_name), 'r') as f:
yaml_config = yaml.safe_load(f)
dc = DC.create_from_yaml_config(version=version, yaml_config=yaml_config)
dc.sign()
dar = DebugAuthenticateResponse.create(version=version, socc=dc.socc, dc=dc,
auth_beacon=0, dac=DAC.parse(dac_bytes),
dck=os.path.join(data_dir, file_key))
dar_bytes = dar.export()
assert len(dar_bytes) == expected_length
assert isinstance(dar_bytes, bytes)
assert 'Authentication Beacon' in dar.info()
| [
"[email protected]"
] | |
ef1d77c3ead5c963da4bda0a6758391542a24536 | 234b581de16f0eebfe3db5281d2920d50e3a3631 | /src/com/dtmilano/android/adb/dumpsys.py | efd9d4962f5cb6e26555547c6475bd84fb789c06 | [
"Apache-2.0"
] | permissive | jili0503/AndroidViewClient | 3d453884d68b508fe4d5d28f5bcea0db0cad6062 | c1c38e6fa53dc09697eadb9c1670d6bef8587ab6 | refs/heads/master | 2020-03-06T21:07:33.744022 | 2018-03-22T04:15:50 | 2018-03-22T04:15:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,612 | py | '''
Copyright (C) 2012-2018 Diego Torres Milano
Created on Dec 1, 2012
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author: Diego Torres Milano
'''
from __future__ import print_function
import re
import sys
from _warnings import warn
__version__ = '14.0.0'
DEBUG = False
class Dumpsys:
FRAMESTATS = 'framestats'
GFXINFO = 'gfxinfo'
MEMINFO = 'meminfo'
RESET = 'reset'
ACTIVITIES = 'activities'
TOTAL = 'total'
VIEW_ROOT_IMPL = 'viewRootImpl'
VIEWS = 'views'
FLAGS = 0
INTENDED_VSYNC = 1
FRAME_COMPLETED = 13
def __init__(self, adbclient, subcommand, *args):
self.nativeHeap = -1
self.dalvikHeap = -1
self.total = 0
self.views = -1
self.activities = -1
self.appContexts = -1
self.viewRootImpl = -1
self.gfxProfileData = []
self.framestats = []
if args:
args_str = ' '.join(args)
else:
args_str = ''
if adbclient:
cmd = 'dumpsys ' + subcommand + (' ' + args_str if args_str else '')
self.parse(adbclient.shell(cmd), subcommand, *args)
else:
warn('No adbclient specified')
@staticmethod
def listSubCommands(adbclient):
return Dumpsys(adbclient, '-l')
@staticmethod
def meminfo(adbclient, args=None):
return Dumpsys(adbclient, Dumpsys.MEMINFO, args)
def get(self, name):
return getattr(self, name)
def parse(self, out, subcommand, *args):
if subcommand == Dumpsys.MEMINFO:
self.parseMeminfo(out)
elif subcommand == Dumpsys.GFXINFO:
if Dumpsys.RESET in args:
# Actually, reset does not need to parse anything
pass
elif Dumpsys.FRAMESTATS in args:
self.parseGfxinfoFramestats(out)
else:
self.parseGfxinfo(out)
elif '-l':
# list dumpsys subcommands
return out
else:
pass
def parseMeminfo(self, out):
m = re.search('Native Heap[ \t]*(\d+)', out, re.MULTILINE)
if m:
self.nativeHeap = int(m.group(1))
m = re.search('Dalvik Heap[ \t]*(\d+)', out, re.MULTILINE)
if m:
self.dalvikHeap = int(m.group(1))
m = re.search('Views:[ \t]*(\d+)', out, re.MULTILINE)
if m:
self.views = int(m.group(1))
m = re.search('Activities:[ \t]*(\d+)', out, re.MULTILINE)
if m:
self.activities = int(m.group(1))
m = re.search('AppContexts:[ \t]*(\d+)', out, re.MULTILINE)
if m:
self.appContexts = int(m.group(1))
m = re.search('ViewRootImpl:[ \t]*(\d+)', out, re.MULTILINE)
if m:
self.viewRootImpl = int(m.group(1))
m = re.search('TOTAL[ \t]*(\d+)', out, re.MULTILINE)
if m:
self.total = int(m.group(1))
else:
raise RuntimeError('Cannot find TOTAL in "' + out + '"')
def parseGfxinfo(self, out):
pass
def parseGfxinfoFramestats(self, out):
pd = '---PROFILEDATA---'
l = re.findall(r'%s.*?%s' % (pd, pd), out, re.DOTALL)
if l:
s = ''
for e in l:
if not e:
continue
sl = e.splitlines()
for s in sl:
if s == pd:
continue
pda = s.split(',')
if pda[Dumpsys.FLAGS] == 'Flags':
if pda[Dumpsys.INTENDED_VSYNC] != 'IntendedVsync' and pda[
Dumpsys.FRAME_COMPLETED] != 'FrameCompleted':
raise RuntimeError('Unsupported gfxinfo version')
continue
if pda[Dumpsys.FLAGS] == '0':
# Only keep lines with Flags=0
# If this is non-zero the row should be ignored, as the frame has been determined as being an
# outlier from normal performance, where it is expected that layout & draw take longer than
# 16ms.
# See https://developer.android.com/training/testing/performance.html#timing-info for details
# on format
if DEBUG:
print('pda={}'.format(pda), file=sys.stderr)
self.gfxProfileData.append(pda[:-1])
# All done! The total time spent working on this frame can be computed by doing
# FRAME_COMPLETED - INTENDED_VSYNC.
self.framestats.append(
(int(pda[Dumpsys.FRAME_COMPLETED]) - int(pda[Dumpsys.INTENDED_VSYNC])) / 10 ** 6)
else:
raise RuntimeError('No profile data found')
@staticmethod
def gfxinfo(adbclient, *args):
return Dumpsys(adbclient, Dumpsys.GFXINFO, *args)
@staticmethod
def resetGfxinfo(adbclient, pkg):
return Dumpsys(adbclient, Dumpsys.GFXINFO, pkg, Dumpsys.RESET)
| [
"[email protected]"
] | |
1e8d16b806a47a308758f7e5980e7257ffc52afe | a8c0867109974ff7586597fe2c58521277ab9d4d | /LC645.py | 199b710d2450fe19fc854e94ce08adbfb1a3b352 | [] | no_license | Qiao-Liang/LeetCode | 1491b01d2ddf11495fbc23a65bb6ecb74ac1cee2 | dbdb227e12f329e4ca064b338f1fbdca42f3a848 | refs/heads/master | 2023-05-06T15:00:58.939626 | 2021-04-21T06:30:33 | 2021-04-21T06:30:33 | 82,885,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,222 | py | class Solution(object):
def findErrorNums(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
xor = xor0 = xor1 = 0
for num in range(1, len(nums) + 1):
xor ^= num
for num in nums:
xor ^= num
rightmost_bit = xor & -xor
for num in range(1, len(nums) + 1):
if num & rightmost_bit:
xor1 ^= num
else:
xor0 ^= num
for num in nums:
if num & rightmost_bit:
xor1 ^= num
else:
xor0 ^= num
for num in nums:
if num == xor0:
return [xor0, xor1]
return [xor1, xor0]
# if not nums:
# return None
# stat = [0] * (len(nums) + 1)
# result = [0, 0]
# for n in nums:
# stat[n] += 1
# for idx in range(1, len(stat)):
# if stat[idx] == 0:
# result[1] = idx
# if stat[idx] == 2:
# result[0] = idx
# return result
sol = Solution()
nums = [1,2,2,4]
# nums = [1,3,3]
# nums = [8,7,3,5,3,6,1,4]
print(sol.findErrorNums(nums))
| [
"[email protected]"
] | |
70ba21f2461cde284f5558daadde2b7a79b1ce76 | 781e2692049e87a4256320c76e82a19be257a05d | /assignments/python/wc/src/442.py | 0e9c48707a834d8dbb3976c1a62d17af127fe5a1 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 341 | py | """
wordcount
"""
def word_count(text):
"""
Return a dictionary of words and their
word counds
:param text:
:return:
"""
words = {}
text = ' '.join(text.split())
for word in text.split(" "):
if word in words:
words[word] += 1
else:
words[word] = 1
return words
| [
"[email protected]"
] | |
b16e3e2cc0d11fee303d0c099065faad2ac767bd | 9e549ee54faa8b037f90eac8ecb36f853e460e5e | /venv/lib/python3.6/site-packages/wtforms/ext/django/templatetags/wtforms.py | 33a60e3feffb231cb81bf5a196b76d008f86ba12 | [
"MIT"
] | permissive | aitoehigie/britecore_flask | e8df68e71dd0eac980a7de8c0f20b5a5a16979fe | eef1873dbe6b2cc21f770bc6dec783007ae4493b | refs/heads/master | 2022-12-09T22:07:45.930238 | 2019-05-15T04:10:37 | 2019-05-15T04:10:37 | 177,354,667 | 0 | 0 | MIT | 2022-12-08T04:54:09 | 2019-03-24T00:38:20 | Python | UTF-8 | Python | false | false | 2,878 | py | """
Template tags for easy WTForms access in Django templates.
"""
from __future__ import unicode_literals
import re
from django import template
from django.conf import settings
from django.template import Variable
from ....compat import iteritems
register = template.Library()
class FormFieldNode(template.Node):
def __init__(self, field_var, html_attrs):
self.field_var = field_var
self.html_attrs = html_attrs
def render(self, context):
try:
if "." in self.field_var:
base, field_name = self.field_var.rsplit(".", 1)
field = getattr(Variable(base).resolve(context), field_name)
else:
field = context[self.field_var]
except (template.VariableDoesNotExist, KeyError, AttributeError):
return settings.TEMPLATE_STRING_IF_INVALID
h_attrs = {}
for k, v in iteritems(self.html_attrs):
try:
h_attrs[k] = v.resolve(context)
except template.VariableDoesNotExist:
h_attrs[k] = settings.TEMPLATE_STRING_IF_INVALID
return field(**h_attrs)
@register.tag(name="form_field")
def do_form_field(parser, token):
"""
Render a WTForms form field allowing optional HTML attributes.
Invocation looks like this:
{% form_field form.username class="big_text" onclick="alert('hello')" %}
where form.username is the path to the field value we want. Any number
of key="value" arguments are supported. Unquoted values are resolved as
template variables.
"""
parts = token.contents.split(" ", 2)
if len(parts) < 2:
error_text = '%r tag must have the form field name as the first value, followed by optional key="value" attributes.'
raise template.TemplateSyntaxError(error_text % parts[0])
html_attrs = {}
if len(parts) == 3:
raw_args = list(args_split(parts[2]))
if (len(raw_args) % 2) != 0:
raise template.TemplateSyntaxError(
"%r tag received the incorrect number of key=value arguments."
% parts[0]
)
for x in range(0, len(raw_args), 2):
html_attrs[str(raw_args[x])] = Variable(raw_args[x + 1])
return FormFieldNode(parts[1], html_attrs)
args_split_re = re.compile(
r"""("(?:[^"\\]*(?:\\.[^"\\]*)*)"|'(?:[^'\\]*(?:\\.[^'\\]*)*)'|[^\s=]+)"""
)
def args_split(text):
""" Split space-separated key=value arguments. Keeps quoted strings intact. """
for bit in args_split_re.finditer(text):
bit = bit.group(0)
if bit[0] == '"' and bit[-1] == '"':
yield '"' + bit[1:-1].replace('\\"', '"').replace("\\\\", "\\") + '"'
elif bit[0] == "'" and bit[-1] == "'":
yield "'" + bit[1:-1].replace("\\'", "'").replace("\\\\", "\\") + "'"
else:
yield bit
| [
"[email protected]"
] | |
6d306b1850726a9a38b7a78a8b1bdffe4758ef5c | b6dd7ffc68957f381ae27b9e2a324f555793f238 | /part-1-basics/ch_10/write_message.py | ac1958c6899a46aaa6a517519f733af594560396 | [] | no_license | lopezjronald/Python-Crash-Course | 0a1100a1888238053f4865f8987cbc023d159d38 | b6add3fc70b0d09b4b5dab9b06a02be2ae94b9da | refs/heads/master | 2022-12-26T21:31:37.286430 | 2020-09-30T04:12:22 | 2020-09-30T04:12:22 | 298,722,235 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | filename = 'guest.txt'
# with open(filename, 'w') as file_object:
# file_object.write("Guest List\n")
# with open(filename, 'a') as file_object:
# response = True
# while response:
# file_object.write(input("Please enter the name of your guest: "))
# file_object.write('\n')
# continue_app = input("Continue? ('q' to quit): ")
# if continue_app.lower() == 'q':
# response = False
with open('guest.txt') as file_object:
for content in file_object:
print(content.strip()) | [
"[email protected]"
] | |
87baa53650db2e62d3ac7b05f529fe8fc7792281 | ec8fef96af2a6b6610d298637f05bcdfe67cba2b | /long_range_compare/multicut_solvers.py | 74311cc71aac426c617f2c912f4507dab3e459ec | [] | no_license | abailoni/longRangeAgglo | 8b98aca75b17d177cb5e408460f95ff20f411aeb | 260b452e106125722ae3824755584ce7bfd5b81c | refs/heads/master | 2021-06-25T14:14:57.150233 | 2020-11-06T11:14:52 | 2020-11-06T11:14:52 | 150,707,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,522 | py | import time
import numpy as np
import sys
# -------------------
# MULTICUT SOLVERS:
# -------------------
def solve_multicut(graph, edge_costs, p=None, solver_type="exact_solver",
proposal_generator_type='WS',
fusion_moves_kwargs=None,
proposal_gener_WS_kwargs=None,
proposal_gener_HC_kwargs=None,
KL_kwargs=None,
HC_kwargs=None):
"""
Accepted options:
:param solver_type: exact_solver, KL, HC, HC-KL, HC-KL-fusionMoves
:param proposal_generator_type: WS, HC
"""
if fusion_moves_kwargs is None:
fusion_moves_kwargs = {'numberOfIterations': 100, # Max number of iterations
'stopIfNoImprovement': 10, # If no improvements, I stop earlier
'numberOfThreads': 1 # Parallel solutions of the fusionMove
}
if proposal_gener_WS_kwargs is None:
proposal_gener_WS_kwargs = {'sigma': 2.0, # Amount of noise added
'numberOfSeeds': 0.009, # Fractions of nodes that are randomly selected as seeds
'seedingStrategie': "SEED_FROM_NEGATIVE"
}
if proposal_gener_HC_kwargs is None:
proposal_gener_HC_kwargs = {'sigma':1.5,
'weightStopCond':0.0,
'nodeNumStopCond':-1.0
}
if HC_kwargs is None:
HC_kwargs = {'weightStopCond': 0.0, # Stop aggl. when this weight is reached
'nodeNumStopCond': -1.0, # Stop aggl. when this nb. of nodes is found
'visitNth': 100 # How often to print
}
if KL_kwargs is None:
KL_kwargs = {'numberOfInnerIterations': sys.maxsize,
'numberOfOuterIterations': 100,
'epsilon': 1e-6
}
# Costs to the power of p:
if p is None or p==1:
p = 1
exp_costs = edge_costs.copy()
else:
neg_weights = edge_costs < 0.
exp_costs = np.abs(edge_costs)**p
exp_costs[neg_weights] *= -1
mc_obj = graph.MulticutObjective(graph=graph, weights=exp_costs)
tick = time.time()
if solver_type == "exact_solver":
log_visitor = mc_obj.loggingVisitor(verbose=True)
solverFactory = mc_obj.multicutIlpFactory()
solver = solverFactory.create(mc_obj)
final_node_labels = solver.optimize(visitor=log_visitor)
elif solver_type == "KL":
log_visitor = mc_obj.loggingVisitor(verbose=True)
solverFactory = mc_obj.kernighanLinFactory(**KL_kwargs)
solver = solverFactory.create(mc_obj)
final_node_labels = solver.optimize(visitor=log_visitor)
elif solver_type == "HC":
log_visitor = mc_obj.loggingVisitor(verbose=True, visitNth=100)
solverFactory = mc_obj.greedyAdditiveFactory(**HC_kwargs)
solver = solverFactory.create(mc_obj)
final_node_labels = solver.optimize(visitor=log_visitor)
elif solver_type == "HC-KL":
log_visitor = mc_obj.loggingVisitor(verbose=False)
solverFactory = mc_obj.greedyAdditiveFactory(**HC_kwargs)
solver = solverFactory.create(mc_obj)
node_labels = solver.optimize(visitor=log_visitor)
# 2. Use a second better warm-up solver to get a better solution:
log_visitor = mc_obj.loggingVisitor(verbose=True)
solverFactory = mc_obj.kernighanLinFactory(**KL_kwargs)
solver = solverFactory.create(mc_obj)
final_node_labels = solver.optimize(visitor=log_visitor, nodeLabels=node_labels)
elif solver_type == "HC-KL-fusionMoves":
log_visitor = mc_obj.loggingVisitor(verbose=False)
# 1. Initialize a warm-up solver and run optimization
solverFactory = mc_obj.greedyAdditiveFactory(**HC_kwargs)
solver = solverFactory.create(mc_obj)
node_labels = solver.optimize(visitor=log_visitor)
# 2. Use a second better warm-up solver to get a better solution:
log_visitor = mc_obj.loggingVisitor(verbose=True)
solverFactory = mc_obj.kernighanLinFactory(**KL_kwargs)
solver = solverFactory.create(mc_obj)
new_node_labels = solver.optimize(visitor=log_visitor, nodeLabels=node_labels)
# 4. Run the funsionMuves solver
if proposal_generator_type == "WS":
pgen = mc_obj.watershedCcProposals(**proposal_gener_WS_kwargs)
elif proposal_generator_type == "HC":
pgen = mc_obj.greedyAdditiveCcProposals(**proposal_gener_HC_kwargs)
else:
raise ValueError("Passed type of proposal generator is not implemented")
# fsMoveSett = mc_obj.fusionMoveSettings(mc_obj.cgcFactory(doCutPhase=True, doGlueAndCutPhase=True, mincutFactory=None,
# multicutFactory=None,
# doBetterCutPhase=False, nodeNumStopCond=0.1, sizeRegularizer=1.0))
solverFactory = mc_obj.ccFusionMoveBasedFactory(proposalGenerator=pgen, **fusion_moves_kwargs)
solver = solverFactory.create(mc_obj)
final_node_labels = solver.optimize(visitor=log_visitor, nodeLabels=new_node_labels)
else:
raise ValueError("Passed type of solver is not implemented")
tock = time.time()
final_edge_labels = graph.nodesLabelsToEdgeLabels(final_node_labels)
energy = (edge_costs * final_edge_labels).sum()
return energy, final_node_labels, final_edge_labels, log_visitor, tock-tick
| [
"[email protected]"
] | |
d07b700b026672f3fe65d2438f6c08a22556f2df | 34474048ec5c4850623cf0fea993b43de76fada4 | /Tests/unittest/code_gen/tac_o1/local_chars.tac | e00797894280d102fd4d8b3938dc5cdb5c77ad11 | [] | no_license | imsure/C-- | 69a80e152936e31b14319ab16c2317d2cacc9165 | 9991e7135d6ebc8f6f08f46f37b82bfe353ec17f | refs/heads/master | 2021-01-13T02:04:07.295401 | 2015-05-01T01:26:07 | 2015-05-01T01:26:07 | 30,732,455 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | tac | main:
Enter main 16
x = 'A'
y = 'B'
z = 'C'
Param x
Call print_int 1
_tstr0 = "\n"
Param _tstr0
Call print_string 1
Param y
Call print_int 1
_tstr1 = "\n"
Param _tstr1
Call print_string 1
Param z
Call print_int 1
_tstr2 = "\n"
Param _tstr2
Call print_string 1
Return
| [
"[email protected]"
] | |
1f9a65ac787ca5726e2ee5f3e366eecd9624af55 | 3eb22fd5a85676d928cd8b3cfbd69f9c9f70429a | /torch/fx/experimental/accelerator_partitioner.py | c16ba8c097957043b65e57185fdee04c86c3bd01 | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | SanBingYouYong/pytorch | b7f03e3c8dbb5e66b6a7218f7d81d893f5cfa8f6 | 469f0e42d6e2b3cd8c78b224b97d45be2dc7d0ee | refs/heads/master | 2023-05-30T20:53:59.389479 | 2021-06-16T01:18:33 | 2021-06-16T01:19:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 43,635 | py | import operator
from typing import Dict, List, Set, NamedTuple, Tuple
import torch
from torch.fx.experimental.graph_manipulation import get_size_of_all_nodes
from torch.fx.experimental.partitioner_utils import (
Partition,
Device,
PartitionerConfig,
get_partition_to_latency_mapping,
get_latency_of_partitioned_graph,
NodeLatency,
get_extra_size_of,
PartitionMode,
)
from torch.fx.graph_module import GraphModule
from torch.fx.node import Node, map_arg
from torch.fx.passes.split_module import split_module
class DAGNode:
"""DAGNode class maintains useful information for a partition (submodule),
and its input submodules and output submodules.
"""
def __init__(
self,
submodule_node: Node,
input_nodes: List[Node],
output_nodes: List[Node],
logical_device_ids: List[int],
size_bytes: int,
) -> None:
self.submodule_node: Node = submodule_node
self.input_nodes: List[Node] = input_nodes
self.output_nodes: List[Node] = output_nodes
self.logical_device_ids: List[int] = logical_device_ids
self.size_bytes = size_bytes
def __str__(self) -> str:
return str(self.submodule_node)
class DAG:
"""DAG class contains all the DAG nodes"""
def __init__(self) -> None:
self.nodes: List[DAGNode] = []
def create_node(
self,
submodule_node: Node,
input_nodes: List[Node],
output_nodes: List[Node],
logical_devices: List[int],
size_bytes: int,
) -> None:
node = DAGNode(
submodule_node, input_nodes, output_nodes, logical_devices, size_bytes
)
self.nodes.append(node)
class PartitionResult(NamedTuple):
"""NameTuple used for returning DAG and a new fx module"""
dag: DAG
module_with_submodules: GraphModule
"""Followings are some helper functions for partition manipulation"""
def reset_partition_device(partitions):
for partition in partitions:
partition.logical_device_ids = []
def combine_two_partitions(
partition_0: Partition, partition_1: Partition, partitions: List[Partition]
) -> None:
"""Given a list of partitions and its two partitions,
combine these two partitions into a new one appending to the partitions
and remove the previous two partitions from the list of partitions
"""
partition = Partition(len(partitions))
partition.nodes = partition_0.nodes.union(partition_1.nodes)
partition.recalculate_mem_size()
partitions.append(partition)
partitions.remove(partition_0)
partitions.remove(partition_1)
reorganize_partitions(partitions)
return
def set_parents_and_children(partitions: List[Partition]) -> None:
"""Given a list of partitions, mark parents and children for each partition"""
# Go through all nodes in a partition.
# If a node's user is in other partition,
# then the other partition is this partition's children.
# This partition is the other partition's parent
for partition in partitions:
partition.children = set()
partition.parents = set()
for partition in partitions:
for node in partition.nodes:
# For each node in the current partition, find its users
users = node.users
for n in users:
# Find which the partition the user node belongs to.
# Note that if the node itself is also belongs to that partition,
# that partition is not the child of the current partition
for p in partitions:
if p != partition and n in p.nodes and node not in p.nodes:
partition.children.add(p)
p.parents.add(partition)
return
def reorganize_partitions(partitions: List[Partition]) -> None:
"""Given a list of partitions, reorganzie partiton id,
its parents and its children for each partition
"""
# Rearrange partition ids
for i, partition in enumerate(partitions):
partition.partition_id = i
set_parents_and_children(partitions)
return
def get_bfs_level_partition(partitions: List[Partition]) -> None:
"""Given a list of partitions,
mark the bfs level for each partition
"""
current_level: Set[Partition] = set()
visited: Set[Partition] = set()
for partition in partitions:
# If a partition has no parent, it should be in root level
if len(partition.parents) == 0:
current_level.add(partition)
next_level: Set[Partition] = set()
level = 0
# bfs
while current_level:
partition = current_level.pop()
partition.bfs_level = level
visited.add(partition)
children = partition.children
for child in children:
if child not in next_level:
next_level.add(child)
if not current_level:
current_level = next_level.copy()
next_level = set()
level += 1
return
def get_node_to_partition_mapping(partitions: List[Partition]) -> Dict[Node, int]:
"""Given a list of partitions,return node to partition mapping"""
node_to_partition: Dict[Node, int] = {}
for partition in partitions:
for node in partition.nodes:
node_to_partition[node] = partition.partition_id
return node_to_partition
def get_device_to_partitions_mapping(
partitions: List[Partition], devices: List[Device]
):
"""Given a list of partitions and a list of devices,
map each partition into a device.
"""
def calculate_extra_mem_bytes_needed_for(
partition: Partition, partitions: List[Partition]
):
all_nodes: Set[Node] = set()
for p in partitions:
all_nodes = all_nodes.union(p.nodes)
if len(all_nodes) == 0:
return partition.used_mem_bytes
all_nodes = all_nodes.union(partition.nodes)
extra_size_needed = 0
for node in partition.nodes:
extra_size_needed += get_extra_size_of(node, all_nodes)
return extra_size_needed
def find_device_for(partition: Partition):
"""Given a partition, find a logical device for the partition
The algorithm is to put the partition on the device
that has just enough mem left for that partition.
device_to_left_mem_bytes is a dictionary between device and its left mem size
sorted by its left mem size
"""
for d in device_to_left_mem_bytes:
extra_size_needed = calculate_extra_mem_bytes_needed_for(
partition, device_to_partitions[d]
)
if extra_size_needed < device_to_left_mem_bytes[d]:
device_to_partitions[d].append(partition)
partition.logical_device_ids.append(d.logical_id)
device_to_left_mem_bytes[d] -= extra_size_needed
return True
return False
# logical id to device
logical_id_to_device: Dict[int, Device] = {}
# Track partitions on device
device_to_partitions: Dict[Device, List[Partition]] = {}
# Track device's left mem size
device_to_left_mem_bytes: Dict[Device, int] = {}
for d in devices:
logical_id_to_device[d.logical_id] = d
device_to_partitions[d] = []
device_to_left_mem_bytes[d] = d.available_mem_bytes
# Deal with the partitions that already have a device
# and also collect all partitions without a device (no_device_partitions)
no_device_partitions = []
for partition in partitions:
if partition.logical_device_ids != []:
logical_id = partition.logical_device_ids[0]
device = logical_id_to_device[logical_id]
device_to_partitions[device] = [partition]
device_to_left_mem_bytes[device] = (
d.available_mem_bytes - partition.used_mem_bytes
)
else:
no_device_partitions.append(partition)
# Find devices for all the partitions without a device
found_device = True
for partition in no_device_partitions:
device_to_left_mem_bytes = {
d: left_mem_bytes
for d, left_mem_bytes in sorted(
device_to_left_mem_bytes.items(), key=lambda item: item[1]
)
}
found_device = find_device_for(partition)
if not found_device:
break
return found_device
def check_dependency(partition):
"""Given a partition,check if there is a circular dependency on
this partition using bfs
"""
visited: Set[Partition] = set([partition])
queue: List[Partition] = [partition]
while queue:
p = queue.pop(0)
for child in p.children:
if child == partition:
return True
else:
if child not in visited:
visited.add(child)
queue.append(child)
return False
class Partitioner:
"""A fx module may not fit into one device.
Partitioner class helps partition one fx module into submodules (partitions),
so that the submodules can be executed crossing different accelerators.
The main function of this class is self.partition_graph.
It partitions the fx module based on the scheme specified in partition_config
A DAG structure is returned
along with a new fx module with submodule nodes.
"""
def __init__(self) -> None:
self.partitions: List[Partition] = []
self.node_to_partition: Dict[Node, int] = {}
self.devices: List[Device] = []
def partition_graph(
self,
fx_module: GraphModule,
torch_module: torch.nn.Module,
partitioner_config: PartitionerConfig,
) -> PartitionResult:
"""Given the fx module, torch module and partitioner_config,
find the partitions, do the partitions,
and then return a DAG and a new fx module with submodule nodes (partitions)
"""
self.graph_module = fx_module
self.torch_module = torch_module
self.devices = partitioner_config.devices
if len(self.devices) == 0:
raise RuntimeError("No devices")
# Tag the size in bytes to all nodes in the graph_module.
get_size_of_all_nodes(self.graph_module)
# Check if there are op nodes in the fx module
nodes = self.graph_module.graph.nodes
if all(node.op in {"placeholder", "get_attr", "output"} for node in nodes):
raise RuntimeError("No Partition since no operations in the module")
# Calculate total size of the fx module
total_size_of_graph = 0
for node in nodes:
if node.op == "output":
break
total_size_of_graph += node.size_bytes.total_size
# Find the device with the max mem size
device_with_max_mem = max(self.devices, key=lambda d: d.available_mem_bytes)
# AOT based partition
if partitioner_config.mode == PartitionMode.aot_based:
self.aot_based_partition(
partitioner_config.node_to_partition_mapping,
partitioner_config.partition_to_logical_device_mapping,
)
# Single partition if the whole module can be fit into one device
elif total_size_of_graph <= device_with_max_mem.available_mem_bytes:
self.find_single_partition(total_size_of_graph)
elif total_size_of_graph > sum([d.available_mem_bytes for d in self.devices]):
raise RuntimeError("Devices have no enough memory for the module")
else:
# Sparse nn based partition
if partitioner_config.mode == PartitionMode.sparse_nn:
available_mem_bytes = self.devices[0].available_mem_bytes
if not all(
device.available_mem_bytes == available_mem_bytes
for device in self.devices
):
raise RuntimeError("All devices must have same memory size!")
# sparse_nn_partition only support same memory size
# TODO: add different size support for sparse_nn_partition
self.sparse_nn_partition(available_mem_bytes)
# Cost aware partition
elif partitioner_config.mode == PartitionMode.cost_aware:
self.cost_aware_partition(
partitioner_config.transfer_rate_bytes_per_sec,
partitioner_config.node_to_latency_mapping,
)
# KL based partition
elif partitioner_config.mode == PartitionMode.kl_based:
self.kl_based_partition(
partitioner_config.transfer_rate_bytes_per_sec,
partitioner_config.node_to_latency_mapping,
)
else:
self.size_based_partition()
module_with_submodules = self.do_partition()
# The DAG contains DAGNodes with info of each partition's input nodes, output nodes
# and how partitions are connected.
dag = self.dump_dag(module_with_submodules)
ret = PartitionResult(dag, module_with_submodules)
return ret
def find_single_partition(self, total_size_of_graph) -> None:
"""Fit the whole fx module into one device"""
partition_0 = self.create_partition()
for node in self.graph_module.graph.nodes:
if node.op == "output":
break
partition_0.nodes.add(node)
partition_0.used_mem_bytes = total_size_of_graph
partition_0.logical_device_ids = [0]
# Get the node to partition mapping
self.node_to_partition = get_node_to_partition_mapping(self.partitions)
return
def size_based_partition(self) -> None:
"""This method is to partition the fx module based on memory size.
It uses greedy approach. The result may not be the best.
The basic idea is:
Step 1:
Find a device which has enough memory to fit the current node, create a empty partition
with the size of that device.
Then keep adding the following nodes into the partition until the partition is full.
Step 2:
Repeat Step 1 until no device left
Step 3:
If some nodes are left, create a partition for each left node (single node partition).
and then try to map those partitions into logical devices with enough mem left.
"""
def find_device_based_on_size(node) -> Device:
"""Given a node, this function is to find a logical device
that could fit the node.
"""
mem_size_needed = get_extra_size_of(node, set())
device = Device("", -1, -1)
for d in self.devices:
if (
d not in occupied_devices
and d.available_mem_bytes >= mem_size_needed
):
device = d
break
if device.available_mem_bytes < 0:
raise RuntimeError(str(node) + "is too large to fit any device")
occupied_devices.append(device)
return device
# Track partition and its left mem size
partition_to_left_mem_bytes: Dict[Partition, int] = {}
# Track all the devices that have been used
occupied_devices: List[Device] = []
partition = self.create_partition()
for node in self.graph_module.graph.nodes:
if node.op in {"call_module", "call_method", "call_function"}:
# Check if there are devices left
if len(self.partitions) <= len(self.devices):
total_size_of_input_nodes = get_extra_size_of(node, partition.nodes)
# Check if the current partition is the very first partition
if partition.used_mem_bytes == 0:
# Find a device to fit the first node, return available mem size
device = find_device_based_on_size(node)
occupied_devices.append(device)
# Update partition and its left mem size
partition_to_left_mem_bytes[
partition
] = device.available_mem_bytes
# Update available mem for the current partitio
partition.logical_device_ids.append(device.logical_id)
else:
# The current partition is not the first partition
# Check if the current node can fit into current partition
if (
partition_to_left_mem_bytes[partition]
< total_size_of_input_nodes
):
# Check if no device is left
if len(self.partitions) == len(self.devices):
# No device is left
# Put the previous partitions into a list (non_single_node_partitions)
non_single_node_partitions = self.partitions[:]
# Create the first single node partition for the current node
self.create_single_node_partition(node)
continue
# Some devices are still left
# Create a new partition with a mem size that is enough for the current node
device = find_device_based_on_size(node)
partition = self.create_partition()
total_size_of_input_nodes = get_extra_size_of(
node, partition.nodes
)
partition_to_left_mem_bytes[
partition
] = device.available_mem_bytes
partition.logical_device_ids.append(device.logical_id)
partition.add_node(node)
partition_to_left_mem_bytes[partition] -= total_size_of_input_nodes
# Create single node partitions if no device is left
else:
self.create_single_node_partition(node)
reorganize_partitions(self.partitions)
# Get the node to partition mapping
self.node_to_partition = get_node_to_partition_mapping(self.partitions)
# Mapping all partitions into device
found_partition_to_device_mapping = get_device_to_partitions_mapping(
self.partitions, self.devices
)
if not found_partition_to_device_mapping:
raise RuntimeError("Cannot Get a Valid Partition to Logical Device Mapping")
return
def do_partition(self) -> GraphModule:
"""Return a new fx module with submodule nodes (partitions)."""
module_with_submodules = split_module(
self.graph_module,
self.torch_module,
lambda node: self.node_to_partition[node],
)
return module_with_submodules
def dump_dag(self, module_with_submodules: GraphModule) -> DAG:
"""Return the dag structure and the new fx module with submodules"""
dag = DAG()
for node in module_with_submodules.graph.nodes:
if node.op == "output":
break
if node.op in {"placeholder", "get_attr"}:
continue
if node.target == operator.__getitem__:
continue
input_nodes: Dict[Node, None] = {}
map_arg(node.args, lambda n: input_nodes.setdefault(n))
map_arg(node.kwargs, lambda n: input_nodes.setdefault(n))
# When a node has two or more output nodes,
# it outputs its result to 'getitem' nodes.
# Those 'getitem' nodes are the output node for this node.
# Otherwise, the output node is this node itself.
if len(node.users) > 1:
output_nodes = list(node.users)
else:
output_nodes = [node]
partition_id = int(node.name.rsplit("_", 1)[-1])
device_ids = self.partitions[partition_id].logical_device_ids
size_bytes = self.partitions[partition_id].used_mem_bytes
dag.create_node(
node, list(input_nodes), output_nodes, device_ids, size_bytes
)
return dag
def create_partition(self) -> Partition:
"""Create a partition and append it to self.partitions."""
partition_id = len(self.partitions)
partition = Partition(partition_id)
self.partitions.append(partition)
return partition
def create_single_node_partition(self, node):
"""Create a partition for a single node"""
partition = self.create_partition()
partition.add_node(node)
return
def sparse_nn_partition(self, available_mem_bytes: int) -> None:
"""This method partition a sparse nn module.
It is size based partition but different from size_based_partition,
it only works when all the devices have same memory size (available_mem_bytes).
In the future, devices with different mem sizes will be supported like size_based_partition.
It first traverse all the nodes and do the partitions based on the same memory size.
If the current partition has no enough memory left for a new op node
(call_module, call_method, call_function), a new partition is created.
When crossing the boundary between non-embedding nodes and embedding nodes,
a new partition is created regardlessly.
For example, if the current node is a non-embedding node but the next node is an
embedding node, a new partition is created for the next node.
After the partition, the partitions are combined as much as possible.
The rule is that a non-embedding partition only
combines with another non-embedding one.
So as the embedding partitions.
"""
def combine_partitions_based_on_size(
partitions: List[Partition], available_mem_bytes: int
) -> None:
"""Combining small partitions together to keep as less partitions as possible.
Here is an example of the algorithm to do this:
Assume some partitions, we first sort them based on partiiton used memory size.
[(partition_4, 1), (partition_3, 1), (partition_2, 2), (partition_1, 7), (partition_0, 9)]
The available memory is 10.
step 1: self.find_partition_to_combine_based_on_size()
First, mark bfs level for each partition
Second, look the smallest partition, partition_4: 10 - 1 = 9
It means any partition has a used memory equal or less than 9 could combine this partition
We go from the largest and selection partition_0.
Check the bfs level for two partitions, if the level difference is less than 2,
it can be combined.
step 2: repeat step 1 until no partitions can be combined
"""
find_combination = True
while find_combination:
# Sort partitions based on memory size
sorted_partitions = sorted(partitions, key=lambda p: p.used_mem_bytes)
# Mark bfs level
get_bfs_level_partition(self.partitions)
find_combination, partitions = find_partition_to_combine_based_on_size(
sorted_partitions, available_mem_bytes, partitions
)
return
def calculate_mem_bytes_needed(p1, p2):
"""Given two partitions, calculate how many mem bytes
are needed if two partitions are combined
"""
nodes = p1.nodes.union(p2.nodes)
mem_bytes_needed = 0
for node in nodes:
mem_bytes_needed += get_extra_size_of(node, nodes)
return mem_bytes_needed
def find_partition_to_combine_based_on_size(
sorted_partitions: List[Partition],
available_mem_bytes: int,
partitions: List[Partition],
) -> Tuple[bool, List[Partition]]:
"""step 1 in combine_partition_based_on_size()"""
find_combination = False
smallest_partition = sorted_partitions.pop(0)
for p in sorted_partitions[::-1]:
if abs(smallest_partition.bfs_level - p.bfs_level) <= 1:
# Calculate how many bytes needed if combined
mem_bytes_needed = calculate_mem_bytes_needed(p, smallest_partition)
if mem_bytes_needed <= available_mem_bytes:
combine_two_partitions(p, smallest_partition, self.partitions)
partitions.remove(smallest_partition)
partitions.remove(p)
partitions.append(self.partitions[-1])
find_combination = True
break
return find_combination, partitions
def reset_partition_in_sparse_nn(partition, new_partition=True):
"""If crossing the boudary between non-embedding nodes and
embedding nodes, create a new partition
"""
if in_embedding_region:
embedding_partitions.append(partition)
else:
non_embedding_partitions.append(partition)
if new_partition:
partition = self.create_partition()
partition.left_mem_bytes = available_mem_bytes
return partition
return None
def is_embedding_node(node: Node) -> bool:
"""Check if a node is an embedding node"""
if node.op == "call_module":
submodule = self.graph_module
for atom in str(node.target).split("."):
if not hasattr(submodule, atom):
raise RuntimeError(
f"Module {submodule} has no attribute {atom}"
)
submodule = getattr(submodule, atom)
if "Embedding" in str(submodule):
return True
return False
# Track embedding partitons and non-embedding partitions separately
embedding_partitions: List[Partition] = []
non_embedding_partitions: List[Partition] = []
# A Flag to check the boundary
in_embedding_region: bool = False
partition = self.create_partition()
for node in self.graph_module.graph.nodes:
if node.op in {"call_module", "call_method", "call_function"}:
# Check if crossing the boundary between embedding nodes and non embedding nodes
if is_embedding_node(node) != in_embedding_region:
# Crossing the boundary
# Check if the current partition is an empty partition
if partition.used_mem_bytes != 0:
# The current partition isn't an empty partition. Create a new one.
partition = reset_partition_in_sparse_nn(partition)
in_embedding_region = not in_embedding_region
total_size_of_input_nodes = get_extra_size_of(node, partition.nodes)
if (
total_size_of_input_nodes + partition.used_mem_bytes
> available_mem_bytes
):
partition = reset_partition_in_sparse_nn(partition)
total_size_of_input_nodes = get_extra_size_of(node, partition.nodes)
if total_size_of_input_nodes > available_mem_bytes:
raise RuntimeError(
node.target + "is too large to fit into a device"
)
partition.add_node(node)
reset_partition_in_sparse_nn(partition, new_partition=False)
# Set parents and children for partitions
set_parents_and_children(self.partitions)
# Combining non-embedding partitions
combine_partitions_based_on_size(non_embedding_partitions, available_mem_bytes)
# Combining embedding partitions
combine_partitions_based_on_size(embedding_partitions, available_mem_bytes)
total_size_of_non_embedding_partitions = 0
for partition in non_embedding_partitions:
total_size_of_non_embedding_partitions += partition.used_mem_bytes
# Check if devices are enough for all partitions
if len(embedding_partitions) > len(self.devices):
msg = (
"Need "
+ str(len(embedding_partitions))
+ " devices, but only "
+ str(len(self.devices))
+ " provided"
)
raise RuntimeError(msg)
occupied_devices = []
for i, partition in enumerate(embedding_partitions):
# Check if all non-embedding partitions can fit into embedding partition devices
if (
total_size_of_non_embedding_partitions + partition.used_mem_bytes
> available_mem_bytes
):
raise RuntimeError(
"partition_"
+ str(partition.partition_id)
+ "(embedding partition) and non embedding partitions can not fit into one device"
)
else:
# Add logical device to the partition
partition.logical_device_ids = [self.devices[i].logical_id]
occupied_devices.append(self.devices[i].logical_id)
# Add logical devices to the non_embedding_partitions
for partition in non_embedding_partitions:
partition.logical_device_ids = occupied_devices
# Get the node to partition mapping
self.node_to_partition = get_node_to_partition_mapping(self.partitions)
return
def cost_aware_partition(
self,
transfer_rate_bytes_per_sec: float,
node_to_latency_mapping: Dict[Node, NodeLatency],
) -> None:
"""This method is to partition the fx module based on the cost.
The cost is the total latency of running the whole fx module.
In partitioner_utils.py, the cost model is built.
The cost aware partition algorithm is:
#1. At every begining, each node is a partition.
Then we map all the partitions to the devices
and calculate the cost
#2. Then try to pre-combine any two of the partitions if the two
partitions can be combined.
(the bfs level is less than 2 or two partitions are connected and
can find partition to device mapping)
See if any partition pair could reduce the current cost.
Choose the pair that shows the minimum cost and then combine them
#3. Repeat #2 until the cost cannot be reduced.
"""
def try_combining_partitions(p0_index, p1_index, partitions) -> float:
"""Given two partitions and a list of partitions, combine these two partitions
and see what is the cost of the modified partition list
"""
p0 = partitions[p0_index]
p1 = partitions[p1_index]
"""If two partitions' bfs level are less than 2 or two partitions are connected to each other,
then they can be combined
"""
if (
(abs(p0.bfs_level - p1.bfs_level) <= 1)
or (p0 in p1.parents)
or p0 in (p1.children)
):
combine_two_partitions(p0, p1, partitions)
# Check if a circular dependency exists after combining
if check_dependency(partitions[-1]):
return float("inf")
# Check if the modified partition list can be mapped to devices after combination
reset_partition_device(partitions)
found_deivce = get_device_to_partitions_mapping(
partitions, self.devices
)
if not found_deivce:
return float("inf")
# Calculate the new cost
partition_to_latency_mapping = get_partition_to_latency_mapping(
partitions, node_to_latency_mapping
)
cost = get_latency_of_partitioned_graph(
partitions,
partition_to_latency_mapping,
transfer_rate_bytes_per_sec,
)
return cost
# If two partition can not be combined, the cost is inf
return float("inf")
def search_combination(
transfer_rate_bytes_per_sec, node_to_latency_mapping
) -> bool:
"""Given transfer rate between partitions and each node's latency,
find two partitions to combine so the cost of the partitions can
be reduced.
The algorithm is :
1. Go through all the partition pairs and see
if any pair of partitions can be combined.
2. Calculate the cost after the combination.
3. Select the minimum cost and combine its cooresponding partition pair.
"""
partition_to_latency_mapping = get_partition_to_latency_mapping(
self.partitions, node_to_latency_mapping
)
cost = get_latency_of_partitioned_graph(
self.partitions,
partition_to_latency_mapping,
transfer_rate_bytes_per_sec,
)
if len(self.partitions) == 1:
return False
partition_pair: List[int] = []
for i in range(len(self.partitions) - 1):
for j in range(i + 1, len(self.partitions)):
# Try to combine the partition pair
# and see the new cost after combination
new_cost = try_combining_partitions(i, j, self.partitions[:])
if new_cost <= cost:
partition_pair = [i, j]
cost = new_cost
reorganize_partitions(self.partitions)
# If a partition pair is found, combine them
if len(partition_pair) != 0:
p0 = self.partitions[partition_pair[0]]
p1 = self.partitions[partition_pair[1]]
combine_two_partitions(p0, p1, self.partitions)
get_bfs_level_partition(self.partitions)
reset_partition_device(self.partitions)
get_device_to_partitions_mapping(self.partitions, self.devices)
return len(partition_pair) != 0
for node in self.graph_module.graph.nodes:
if node.op not in {"placeholder", "get_attr", "output"}:
self.create_single_node_partition(node)
# Set up parent partitions and children partitions for each partition
set_parents_and_children(self.partitions)
# Get bfs level for each partition
get_bfs_level_partition(self.partitions)
find_combination = True
while find_combination:
# Search for a pair partition to generate the minimum new cost,
# then combine them
find_combination = search_combination(
transfer_rate_bytes_per_sec, node_to_latency_mapping
)
# Make sure all partitions are set up correctly
reorganize_partitions(self.partitions)
# Set up node to partition mapping
self.node_to_partition = get_node_to_partition_mapping(self.partitions)
return
def kl_based_partition(
self,
transfer_rate_bytes_per_sec: float,
node_to_latency_mapping: Dict[Node, NodeLatency],
) -> None:
"""This function is a cost aware partition based
on Kernighan-Lin algorithm.
First, the graph is partitioned using size_based_partition.
Then, each node is swapped with any other node in a different
partition, and at the same time, the cost is estimated after
the swapping.
For example, we have nodes n0, n1, n2, n3 and n4.
Using size_based_partition, n0 and n1 are in Partition p0.
n2, n3 and n4 in Partition p1. The current cost is esimated.
We first tried using n0 to swap with n2 from the other partiton.
Then we see that swapping n0 and n2 shows a lower cost
than the current cost and it is the minimum among other pairs like
(n0, None)(This means moving n0 to Partition without swapping other nodes),
(n0, n3) and (n0, n4). We swap n0 and n2 and set the new cost
as the current cost.
Then We repeat this process for all the other nodes until all swapping pairs
are tried.
"""
def swap_nodes(n0, n1, p0, p1):
# Either n0 or n1 could be None
# That means we simply move the node
# to another partition
if n0 is not None:
p0.remove_node(n0)
p1.add_node(n0)
if n1 is not None:
p0.add_node(n1)
p1.remove_node(n1)
def try_swap_nodes(
n0, n1, p0, p1, node_to_latency_mapping, transfer_rate_per_sec
):
cost = float("inf")
swap_nodes(n0, n1, p0, p1)
# Reorganize partitions after swapping
reorganize_partitions(self.partitions)
# Check if there is a circular dependency after swapping
if (not check_dependency(p0)) and (not check_dependency(p1)):
reset_partition_device(self.partitions)
partition_to_latency_mapping = get_partition_to_latency_mapping(
self.partitions, node_to_latency_mapping
)
# Check if all partitions can be mapped to logical devices after swapping
found_device = get_device_to_partitions_mapping(
self.partitions, self.devices
)
if not found_device:
cost = float("inf")
else:
cost = get_latency_of_partitioned_graph(
self.partitions,
partition_to_latency_mapping,
transfer_rate_bytes_per_sec,
)
# Swap back and reset all partitions back to original
swap_nodes(n1, n0, p0, p1)
reorganize_partitions(self.partitions)
reset_partition_device(self.partitions)
get_device_to_partitions_mapping(self.partitions, self.devices)
return cost
def swap_node_to_partition(
node, p0, p1, node_to_latency_mapping, transfer_rate_per_sec
):
"""This function helps to swap one node from partition p0
with all the nodes in another partition p1
"""
p1_nodes = list(p1.nodes) + [None]
min_cost = float("inf")
node_pair: List[Node] = []
for n1 in p1_nodes:
# Ignore the node if it is not a op node
if n1 is not None and n1.op in {"placeholder", "get_attr"}:
continue
# Try swapping node in p0 with n1 in p1
cost = try_swap_nodes(
node, n1, p0, p1, node_to_latency_mapping, transfer_rate_per_sec
)
if cost < min_cost:
node_pair = [node, n1]
min_cost = cost
return cost, node_pair
# First use size_base_partition
self.size_based_partition()
partition_to_latency_mapping = get_partition_to_latency_mapping(
self.partitions, node_to_latency_mapping
)
# Calculate the cost of the partitions
cost = get_latency_of_partitioned_graph(
self.partitions, partition_to_latency_mapping, transfer_rate_bytes_per_sec
)
# Keep tracking the node pair that shows the better cost
node_pair: List[Node] = []
# Keep tracking the partition pair of node pair
partition_pair: List[Partition] = []
# Collect all the op nodes from the graph
op_nodes = []
for n in self.graph_module.graph.nodes:
if n.op not in {"placeholder", "get_attr", "output"}:
op_nodes.append(n)
for node in op_nodes:
# Find which partition the current node belongs
p0_index = self.node_to_partition[node]
p0 = self.partitions[p0_index]
# Go through all the other partitions to swap
# with other nodes from those partitions
for p1_index, _ in enumerate(self.partitions):
if p0_index != p1_index:
p1 = self.partitions[p1_index]
new_cost, new_node_pair = swap_node_to_partition(
node,
p0,
p1,
node_to_latency_mapping,
transfer_rate_bytes_per_sec,
)
# Update the cost
# Track the swapped node pair and their partitions
if new_cost < cost:
cost = new_cost
node_pair = new_node_pair
partition_pair = [p0, p1]
# Do the swapping after trying all the nodes from a partition
if len(node_pair) != 0:
swap_nodes(
node_pair[0], node_pair[1], partition_pair[0], partition_pair[1]
)
reorganize_partitions(self.partitions)
get_device_to_partitions_mapping(self.partitions, self.devices)
reorganize_partitions(self.partitions)
# Mapping the device to the partition
get_device_to_partitions_mapping(self.partitions, self.devices)
return
def aot_based_partition(
self, node_to_partition_mapping, partition_to_logical_device_mapping
):
"""This function helps to rebuild the partitions given the nodes and its
corresponding partition id
"""
partition_id_to_partition_mapping: Dict[int, Partition] = {}
self.node_to_partition = node_to_partition_mapping
for node in self.node_to_partition:
partition_id = self.node_to_partition[node]
# If the requested partition has not been created, create the partition
if partition_id not in partition_id_to_partition_mapping:
partition = Partition(partition_id)
self.partitions.append(partition)
partition_id_to_partition_mapping[partition_id] = partition
partition.logical_device_ids = partition_to_logical_device_mapping[
partition_id
]
else:
partition = partition_id_to_partition_mapping[
self.node_to_partition[node]
]
# Add the current node into the partition
partition.add_node(node)
| [
"[email protected]"
] | |
7f91b5fa338e5d6f010bd2a91a2b4428dc2e61f6 | c3db4c42360c47471635a97568bfc9c21bc14c06 | /pdfmerge/migrations/0002_auto_20190616_1800.py | 8708c2ce97d9ded4162bf6eb11e98c10d8063689 | [
"MIT"
] | permissive | rupin/pdfmerger | 3ede9aa9f1f374eba9b1ea2c33b6920403a8f4ad | fee19523e88362d215f1a29cdab0d140f4c9385c | refs/heads/master | 2020-04-07T20:37:56.821730 | 2019-07-18T16:58:01 | 2019-07-18T16:58:01 | 158,696,989 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,440 | py | # Generated by Django 2.1.3 on 2019-06-16 12:30
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('pdfmerge', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='formfield',
name='field_page_number',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='formfield',
name='field_type',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='formfield',
name='field_x',
field=models.DecimalField(decimal_places=2, default=0, max_digits=6),
),
migrations.AddField(
model_name='formfield',
name='field_x_increment',
field=models.DecimalField(decimal_places=2, default=0, max_digits=6),
),
migrations.AddField(
model_name='formfield',
name='field_y',
field=models.DecimalField(decimal_places=2, default=0, max_digits=6),
),
migrations.AddField(
model_name='formfield',
name='fk_pdf_id',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='pdfmerge.PDFForm'),
),
migrations.AddField(
model_name='pdfform',
name='file_path',
field=models.FileField(default='', upload_to=''),
),
migrations.AddField(
model_name='pdfform',
name='pdf_name',
field=models.CharField(default='', max_length=100),
),
migrations.AddField(
model_name='pdfform',
name='pdf_type',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='userdata',
name='field_text',
field=models.CharField(default='', max_length=200),
),
migrations.AddField(
model_name='userdata',
name='field_type',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='userdata',
name='fk_user_id',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"[email protected]"
] | |
3d6e519d8173a542b6493ac848758d22a09e11a6 | c513008cacf5592e645e7da3652d90d12a11a988 | /program/niuke-python/Sprial_2.py | 3a59db5855032d2ff2e365c91cdbded357e65f20 | [] | no_license | PiKaChu-R/code-learn | f17cb5ad95d4e8b698320d23e472eb1687576bdc | b94814ac3c72da4c840758569005b7ac6589586a | refs/heads/master | 2020-07-01T02:42:40.235753 | 2019-09-17T13:06:50 | 2019-09-17T13:06:50 | 201,021,677 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,116 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
@File : Sprial_2.py
@Time : 2019/04/22 17:09:47
@Author : R.
@Version : 2.0
@Contact : [email protected]
@Desc : None
'''
'''
Sprial.py:可实现方法
'''
# here put the import lib
import itertools
def spiral(init):
status = itertools.cycle(['right', 'down', 'left', 'up']) # 用于状态周期性的切换
movemap = {
'right': (1, 0),
'down': (0, 1),
'left': (-1, 0),
'up': (0, -1),
}
# 初始化二维数组
position_map = dict.fromkeys(
[(x, y) for x in range(init) for y in range(init)])
# 初始化当前位置以及当前方向
positon = (0, 0)
new_status = next(status)
for i in range(4*init+1, init * (init+4) + 1):
old_positon = positon
# print(list( zip(positon, movemap[new_status])))
# print('22')
# print(list(map(sum, zip(positon, movemap[new_status]))))
# 根据状态进行移动
positon = tuple(map(sum, zip(positon, movemap[new_status])))
# 如果超过范围或者碰到已经有值的位置则切换方向
if (positon not in position_map) or (position_map[positon]):
new_status = next(status)
positon = tuple(map(sum, zip(old_positon, movemap[new_status])))
position_map[old_positon] = i
# 构造输出信息
print("When:init = {}".format(init))
# 打印第一行
for i in range(1, init+1):
if i < init:
print("{}".format(i), end='\t')
else:
print("{}".format(i))
# 构造中心螺旋结构
for i in range(init):
print("{}".format(4 * init - i), end='\t')
for j in range(init):
print((str(position_map[(j, i)])), end='\t')
print("{}".format(i + init + 1))
# 添加最后一行
for i in range(init*3, init*2, -1):
# 打印第一行
print("{}".format(i), end='\t')
if i == init:
print("{}".format(i))
if __name__ == "__main__":
# 参数为init值
spiral(3)
| [
"[email protected]"
] | |
672a44ccb8cf352f213782f10b1bd23f6a7814e5 | c2e1b17001357f2c13f6b8287e2b6ee0956c955b | /sweetpea/metrics.py | 18f7356dec88fa61c916c2db35377da7329030b3 | [
"MIT"
] | permissive | musslick/sweetpea-py | e0c9fec35c571fbf846808cbdeec58f68c405d4c | b0d9769025022936d57d71a501c9ab5f51b4a4ef | refs/heads/master | 2023-03-21T01:50:51.045650 | 2021-03-24T19:37:18 | 2021-03-24T19:37:18 | 293,494,087 | 1 | 0 | null | 2020-09-07T10:20:12 | 2020-09-07T10:20:12 | null | UTF-8 | Python | false | false | 1,005 | py | import operator as op
from functools import reduce
from math import factorial
from typing import Dict
from sweetpea.blocks import Block
from sweetpea.constraints import ExactlyKInARow, AtMostKInARow
from sweetpea import __generate_cnf
"""
Given a block, this function will collect various metrics pertaining to the block
and return them in a dictionary.
"""
def collect_design_metrics(block: Block) -> Dict:
backend_request = block.build_backend_request()
dimacs_header = __generate_cnf(block).split('\n')[0].split(' ')
return {
'full_factor_count': len(block.design),
'crossing_factor_count': len(block.crossing),
'constraint_count': len(block.constraints),
'block_length': block.trials_per_sample(),
'block_length_factorial': factorial(block.trials_per_sample()),
'low_level_request_count': len(backend_request.ll_requests),
'cnf_total_variables': int(dimacs_header[2]),
'cnf_total_clauses': int(dimacs_header[3])
}
| [
"[email protected]"
] | |
61e3b909ab1ca70f4077a1f193cd2795edb13b58 | 7d2f933ed3c54e128ecaec3a771817c4260a8458 | /venv/Lib/site-packages/pandas/tests/indexing/multiindex/test_slice.py | 4bb71a3f58da28d68cb9bb93aa526aeaf087d7eb | [] | no_license | danielmoreira12/BAProject | c61dfb1d0521eb5a28eef9531a00e744bfb0e26a | 859f588305d826a35cc8f7d64c432f54a0a2e031 | refs/heads/master | 2021-01-02T07:17:39.267278 | 2020-02-25T22:27:43 | 2020-02-25T22:27:43 | 239,541,177 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,866 | py | import numpy as np
import pandas as pd
import pandas._testing as tm
import pytest
from pandas import DataFrame, Index, MultiIndex, Series, Timestamp
from pandas.core.indexing import _non_reducing_slice
from pandas.errors import UnsortedIndexError
from pandas.tests.indexing.common import _mklbl
class TestMultiIndexSlicers:
def test_per_axis_per_level_getitem(self):
# GH6134
# example test case
ix = MultiIndex.from_product(
[_mklbl("A", 5), _mklbl("B", 7), _mklbl("C", 4), _mklbl("D", 2)]
)
df = DataFrame(np.arange(len(ix.to_numpy())), index=ix)
result = df.loc[(slice("A1", "A3"), slice(None), ["C1", "C3"]), :]
expected = df.loc[
[
tuple([a, b, c, d])
for a, b, c, d in df.index.values
if (a == "A1" or a == "A2" or a == "A3") and (c == "C1" or c == "C3")
]
]
tm.assert_frame_equal(result, expected)
expected = df.loc[
[
tuple([a, b, c, d])
for a, b, c, d in df.index.values
if (a == "A1" or a == "A2" or a == "A3")
and (c == "C1" or c == "C2" or c == "C3")
]
]
result = df.loc[(slice("A1", "A3"), slice(None), slice("C1", "C3")), :]
tm.assert_frame_equal(result, expected)
# test multi-index slicing with per axis and per index controls
index = MultiIndex.from_tuples(
[("A", 1), ("A", 2), ("A", 3), ("B", 1)], names=["one", "two"]
)
columns = MultiIndex.from_tuples(
[("a", "foo"), ("a", "bar"), ("b", "foo"), ("b", "bah")],
names=["lvl0", "lvl1"],
)
df = DataFrame(
np.arange(16, dtype="int64").reshape(4, 4), index=index, columns=columns
)
df = df.sort_index(axis=0).sort_index(axis=1)
# identity
result = df.loc[(slice(None), slice(None)), :]
tm.assert_frame_equal(result, df)
result = df.loc[(slice(None), slice(None)), (slice(None), slice(None))]
tm.assert_frame_equal(result, df)
result = df.loc[:, (slice(None), slice(None))]
tm.assert_frame_equal(result, df)
# index
result = df.loc[(slice(None), [1]), :]
expected = df.iloc[[0, 3]]
tm.assert_frame_equal(result, expected)
result = df.loc[(slice(None), 1), :]
expected = df.iloc[[0, 3]]
tm.assert_frame_equal(result, expected)
# columns
result = df.loc[:, (slice(None), ["foo"])]
expected = df.iloc[:, [1, 3]]
tm.assert_frame_equal(result, expected)
# both
result = df.loc[(slice(None), 1), (slice(None), ["foo"])]
expected = df.iloc[[0, 3], [1, 3]]
tm.assert_frame_equal(result, expected)
result = df.loc["A", "a"]
expected = DataFrame(
dict(bar=[1, 5, 9], foo=[0, 4, 8]),
index=Index([1, 2, 3], name="two"),
columns=Index(["bar", "foo"], name="lvl1"),
)
tm.assert_frame_equal(result, expected)
result = df.loc[(slice(None), [1, 2]), :]
expected = df.iloc[[0, 1, 3]]
tm.assert_frame_equal(result, expected)
# multi-level series
s = Series(np.arange(len(ix.to_numpy())), index=ix)
result = s.loc["A1":"A3", :, ["C1", "C3"]]
expected = s.loc[
[
tuple([a, b, c, d])
for a, b, c, d in s.index.values
if (a == "A1" or a == "A2" or a == "A3") and (c == "C1" or c == "C3")
]
]
tm.assert_series_equal(result, expected)
# boolean indexers
result = df.loc[(slice(None), df.loc[:, ("a", "bar")] > 5), :]
expected = df.iloc[[2, 3]]
tm.assert_frame_equal(result, expected)
with pytest.raises(ValueError):
df.loc[(slice(None), np.array([True, False])), :]
# ambiguous notation
# this is interpreted as slicing on both axes (GH #16396)
result = df.loc[slice(None), [1]]
expected = df.iloc[:, []]
tm.assert_frame_equal(result, expected)
result = df.loc[(slice(None), [1]), :]
expected = df.iloc[[0, 3]]
tm.assert_frame_equal(result, expected)
# not lexsorted
assert df.index.lexsort_depth == 2
df = df.sort_index(level=1, axis=0)
assert df.index.lexsort_depth == 0
msg = (
"MultiIndex slicing requires the index to be "
r"lexsorted: slicing on levels \[1\], lexsort depth 0"
)
with pytest.raises(UnsortedIndexError, match=msg):
df.loc[(slice(None), slice("bar")), :]
# GH 16734: not sorted, but no real slicing
result = df.loc[(slice(None), df.loc[:, ("a", "bar")] > 5), :]
tm.assert_frame_equal(result, df.iloc[[1, 3], :])
def test_multiindex_slicers_non_unique(self):
# GH 7106
# non-unique mi index support
df = (
DataFrame(
dict(
A=["foo", "foo", "foo", "foo"],
B=["a", "a", "a", "a"],
C=[1, 2, 1, 3],
D=[1, 2, 3, 4],
)
)
.set_index(["A", "B", "C"])
.sort_index()
)
assert not df.index.is_unique
expected = (
DataFrame(dict(A=["foo", "foo"], B=["a", "a"], C=[1, 1], D=[1, 3]))
.set_index(["A", "B", "C"])
.sort_index()
)
result = df.loc[(slice(None), slice(None), 1), :]
tm.assert_frame_equal(result, expected)
# this is equivalent of an xs expression
result = df.xs(1, level=2, drop_level=False)
tm.assert_frame_equal(result, expected)
df = (
DataFrame(
dict(
A=["foo", "foo", "foo", "foo"],
B=["a", "a", "a", "a"],
C=[1, 2, 1, 2],
D=[1, 2, 3, 4],
)
)
.set_index(["A", "B", "C"])
.sort_index()
)
assert not df.index.is_unique
expected = (
DataFrame(dict(A=["foo", "foo"], B=["a", "a"], C=[1, 1], D=[1, 3]))
.set_index(["A", "B", "C"])
.sort_index()
)
result = df.loc[(slice(None), slice(None), 1), :]
assert not result.index.is_unique
tm.assert_frame_equal(result, expected)
# GH12896
# numpy-implementation dependent bug
ints = [
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
12,
13,
14,
14,
16,
17,
18,
19,
200000,
200000,
]
n = len(ints)
idx = MultiIndex.from_arrays([["a"] * n, ints])
result = Series([1] * n, index=idx)
result = result.sort_index()
result = result.loc[(slice(None), slice(100000))]
expected = Series([1] * (n - 2), index=idx[:-2]).sort_index()
tm.assert_series_equal(result, expected)
def test_multiindex_slicers_datetimelike(self):
# GH 7429
# buggy/inconsistent behavior when slicing with datetime-like
import datetime
dates = [
datetime.datetime(2012, 1, 1, 12, 12, 12) + datetime.timedelta(days=i)
for i in range(6)
]
freq = [1, 2]
index = MultiIndex.from_product([dates, freq], names=["date", "frequency"])
df = DataFrame(
np.arange(6 * 2 * 4, dtype="int64").reshape(-1, 4),
index=index,
columns=list("ABCD"),
)
# multi-axis slicing
idx = pd.IndexSlice
expected = df.iloc[[0, 2, 4], [0, 1]]
result = df.loc[
(
slice(
Timestamp("2012-01-01 12:12:12"), Timestamp("2012-01-03 12:12:12")
),
slice(1, 1),
),
slice("A", "B"),
]
tm.assert_frame_equal(result, expected)
result = df.loc[
(
idx[
Timestamp("2012-01-01 12:12:12") : Timestamp("2012-01-03 12:12:12")
],
idx[1:1],
),
slice("A", "B"),
]
tm.assert_frame_equal(result, expected)
result = df.loc[
(
slice(
Timestamp("2012-01-01 12:12:12"), Timestamp("2012-01-03 12:12:12")
),
1,
),
slice("A", "B"),
]
tm.assert_frame_equal(result, expected)
# with strings
result = df.loc[
(slice("2012-01-01 12:12:12", "2012-01-03 12:12:12"), slice(1, 1)),
slice("A", "B"),
]
tm.assert_frame_equal(result, expected)
result = df.loc[
(idx["2012-01-01 12:12:12":"2012-01-03 12:12:12"], 1), idx["A", "B"]
]
tm.assert_frame_equal(result, expected)
def test_multiindex_slicers_edges(self):
# GH 8132
# various edge cases
df = DataFrame(
{
"A": ["A0"] * 5 + ["A1"] * 5 + ["A2"] * 5,
"B": ["B0", "B0", "B1", "B1", "B2"] * 3,
"DATE": [
"2013-06-11",
"2013-07-02",
"2013-07-09",
"2013-07-30",
"2013-08-06",
"2013-06-11",
"2013-07-02",
"2013-07-09",
"2013-07-30",
"2013-08-06",
"2013-09-03",
"2013-10-01",
"2013-07-09",
"2013-08-06",
"2013-09-03",
],
"VALUES": [22, 35, 14, 9, 4, 40, 18, 4, 2, 5, 1, 2, 3, 4, 2],
}
)
df["DATE"] = pd.to_datetime(df["DATE"])
df1 = df.set_index(["A", "B", "DATE"])
df1 = df1.sort_index()
# A1 - Get all values under "A0" and "A1"
result = df1.loc[(slice("A1")), :]
expected = df1.iloc[0:10]
tm.assert_frame_equal(result, expected)
# A2 - Get all values from the start to "A2"
result = df1.loc[(slice("A2")), :]
expected = df1
tm.assert_frame_equal(result, expected)
# A3 - Get all values under "B1" or "B2"
result = df1.loc[(slice(None), slice("B1", "B2")), :]
expected = df1.iloc[[2, 3, 4, 7, 8, 9, 12, 13, 14]]
tm.assert_frame_equal(result, expected)
# A4 - Get all values between 2013-07-02 and 2013-07-09
result = df1.loc[(slice(None), slice(None), slice("20130702", "20130709")), :]
expected = df1.iloc[[1, 2, 6, 7, 12]]
tm.assert_frame_equal(result, expected)
# B1 - Get all values in B0 that are also under A0, A1 and A2
result = df1.loc[(slice("A2"), slice("B0")), :]
expected = df1.iloc[[0, 1, 5, 6, 10, 11]]
tm.assert_frame_equal(result, expected)
# B2 - Get all values in B0, B1 and B2 (similar to what #2 is doing for
# the As)
result = df1.loc[(slice(None), slice("B2")), :]
expected = df1
tm.assert_frame_equal(result, expected)
# B3 - Get all values from B1 to B2 and up to 2013-08-06
result = df1.loc[(slice(None), slice("B1", "B2"), slice("2013-08-06")), :]
expected = df1.iloc[[2, 3, 4, 7, 8, 9, 12, 13]]
tm.assert_frame_equal(result, expected)
# B4 - Same as A4 but the start of the date slice is not a key.
# shows indexing on a partial selection slice
result = df1.loc[(slice(None), slice(None), slice("20130701", "20130709")), :]
expected = df1.iloc[[1, 2, 6, 7, 12]]
tm.assert_frame_equal(result, expected)
def test_per_axis_per_level_doc_examples(self):
# test index maker
idx = pd.IndexSlice
# from indexing.rst / advanced
index = MultiIndex.from_product(
[_mklbl("A", 4), _mklbl("B", 2), _mklbl("C", 4), _mklbl("D", 2)]
)
columns = MultiIndex.from_tuples(
[("a", "foo"), ("a", "bar"), ("b", "foo"), ("b", "bah")],
names=["lvl0", "lvl1"],
)
df = DataFrame(
np.arange(len(index) * len(columns), dtype="int64").reshape(
(len(index), len(columns))
),
index=index,
columns=columns,
)
result = df.loc[(slice("A1", "A3"), slice(None), ["C1", "C3"]), :]
expected = df.loc[
[
tuple([a, b, c, d])
for a, b, c, d in df.index.values
if (a == "A1" or a == "A2" or a == "A3") and (c == "C1" or c == "C3")
]
]
tm.assert_frame_equal(result, expected)
result = df.loc[idx["A1":"A3", :, ["C1", "C3"]], :]
tm.assert_frame_equal(result, expected)
result = df.loc[(slice(None), slice(None), ["C1", "C3"]), :]
expected = df.loc[
[
tuple([a, b, c, d])
for a, b, c, d in df.index.values
if (c == "C1" or c == "C3")
]
]
tm.assert_frame_equal(result, expected)
result = df.loc[idx[:, :, ["C1", "C3"]], :]
tm.assert_frame_equal(result, expected)
# not sorted
with pytest.raises(UnsortedIndexError):
df.loc["A1", ("a", slice("foo"))]
# GH 16734: not sorted, but no real slicing
tm.assert_frame_equal(
df.loc["A1", (slice(None), "foo")], df.loc["A1"].iloc[:, [0, 2]]
)
df = df.sort_index(axis=1)
# slicing
df.loc["A1", (slice(None), "foo")]
df.loc[(slice(None), slice(None), ["C1", "C3"]), (slice(None), "foo")]
# setitem
df.loc(axis=0)[:, :, ["C1", "C3"]] = -10
def test_loc_axis_arguments(self):
index = MultiIndex.from_product(
[_mklbl("A", 4), _mklbl("B", 2), _mklbl("C", 4), _mklbl("D", 2)]
)
columns = MultiIndex.from_tuples(
[("a", "foo"), ("a", "bar"), ("b", "foo"), ("b", "bah")],
names=["lvl0", "lvl1"],
)
df = (
DataFrame(
np.arange(len(index) * len(columns), dtype="int64").reshape(
(len(index), len(columns))
),
index=index,
columns=columns,
)
.sort_index()
.sort_index(axis=1)
)
# axis 0
result = df.loc(axis=0)["A1":"A3", :, ["C1", "C3"]]
expected = df.loc[
[
tuple([a, b, c, d])
for a, b, c, d in df.index.values
if (a == "A1" or a == "A2" or a == "A3") and (c == "C1" or c == "C3")
]
]
tm.assert_frame_equal(result, expected)
result = df.loc(axis="index")[:, :, ["C1", "C3"]]
expected = df.loc[
[
tuple([a, b, c, d])
for a, b, c, d in df.index.values
if (c == "C1" or c == "C3")
]
]
tm.assert_frame_equal(result, expected)
# axis 1
result = df.loc(axis=1)[:, "foo"]
expected = df.loc[:, (slice(None), "foo")]
tm.assert_frame_equal(result, expected)
result = df.loc(axis="columns")[:, "foo"]
expected = df.loc[:, (slice(None), "foo")]
tm.assert_frame_equal(result, expected)
# invalid axis
with pytest.raises(ValueError):
df.loc(axis=-1)[:, :, ["C1", "C3"]]
with pytest.raises(ValueError):
df.loc(axis=2)[:, :, ["C1", "C3"]]
with pytest.raises(ValueError):
df.loc(axis="foo")[:, :, ["C1", "C3"]]
def test_loc_axis_single_level_multi_col_indexing_multiindex_col_df(self):
# GH29519
df = pd.DataFrame(
np.arange(27).reshape(3, 9),
columns=pd.MultiIndex.from_product(
[["a1", "a2", "a3"], ["b1", "b2", "b3"]]
),
)
result = df.loc(axis=1)["a1":"a2"]
expected = df.iloc[:, :-3]
tm.assert_frame_equal(result, expected)
def test_loc_axis_single_level_single_col_indexing_multiindex_col_df(self):
# GH29519
df = pd.DataFrame(
np.arange(27).reshape(3, 9),
columns=pd.MultiIndex.from_product(
[["a1", "a2", "a3"], ["b1", "b2", "b3"]]
),
)
result = df.loc(axis=1)["a1"]
expected = df.iloc[:, :3]
expected.columns = ["b1", "b2", "b3"]
tm.assert_frame_equal(result, expected)
def test_loc_ax_single_level_indexer_simple_df(self):
# GH29519
# test single level indexing on single index column data frame
df = pd.DataFrame(np.arange(9).reshape(3, 3), columns=["a", "b", "c"])
result = df.loc(axis=1)["a"]
expected = pd.Series(np.array([0, 3, 6]), name="a")
tm.assert_series_equal(result, expected)
def test_per_axis_per_level_setitem(self):
# test index maker
idx = pd.IndexSlice
# test multi-index slicing with per axis and per index controls
index = MultiIndex.from_tuples(
[("A", 1), ("A", 2), ("A", 3), ("B", 1)], names=["one", "two"]
)
columns = MultiIndex.from_tuples(
[("a", "foo"), ("a", "bar"), ("b", "foo"), ("b", "bah")],
names=["lvl0", "lvl1"],
)
df_orig = DataFrame(
np.arange(16, dtype="int64").reshape(4, 4), index=index, columns=columns
)
df_orig = df_orig.sort_index(axis=0).sort_index(axis=1)
# identity
df = df_orig.copy()
df.loc[(slice(None), slice(None)), :] = 100
expected = df_orig.copy()
expected.iloc[:, :] = 100
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc(axis=0)[:, :] = 100
expected = df_orig.copy()
expected.iloc[:, :] = 100
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[(slice(None), slice(None)), (slice(None), slice(None))] = 100
expected = df_orig.copy()
expected.iloc[:, :] = 100
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[:, (slice(None), slice(None))] = 100
expected = df_orig.copy()
expected.iloc[:, :] = 100
tm.assert_frame_equal(df, expected)
# index
df = df_orig.copy()
df.loc[(slice(None), [1]), :] = 100
expected = df_orig.copy()
expected.iloc[[0, 3]] = 100
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[(slice(None), 1), :] = 100
expected = df_orig.copy()
expected.iloc[[0, 3]] = 100
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc(axis=0)[:, 1] = 100
expected = df_orig.copy()
expected.iloc[[0, 3]] = 100
tm.assert_frame_equal(df, expected)
# columns
df = df_orig.copy()
df.loc[:, (slice(None), ["foo"])] = 100
expected = df_orig.copy()
expected.iloc[:, [1, 3]] = 100
tm.assert_frame_equal(df, expected)
# both
df = df_orig.copy()
df.loc[(slice(None), 1), (slice(None), ["foo"])] = 100
expected = df_orig.copy()
expected.iloc[[0, 3], [1, 3]] = 100
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[idx[:, 1], idx[:, ["foo"]]] = 100
expected = df_orig.copy()
expected.iloc[[0, 3], [1, 3]] = 100
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc["A", "a"] = 100
expected = df_orig.copy()
expected.iloc[0:3, 0:2] = 100
tm.assert_frame_equal(df, expected)
# setting with a list-like
df = df_orig.copy()
df.loc[(slice(None), 1), (slice(None), ["foo"])] = np.array(
[[100, 100], [100, 100]], dtype="int64"
)
expected = df_orig.copy()
expected.iloc[[0, 3], [1, 3]] = 100
tm.assert_frame_equal(df, expected)
# not enough values
df = df_orig.copy()
with pytest.raises(ValueError):
df.loc[(slice(None), 1), (slice(None), ["foo"])] = np.array(
[[100], [100, 100]], dtype="int64"
)
with pytest.raises(ValueError):
df.loc[(slice(None), 1), (slice(None), ["foo"])] = np.array(
[100, 100, 100, 100], dtype="int64"
)
# with an alignable rhs
df = df_orig.copy()
df.loc[(slice(None), 1), (slice(None), ["foo"])] = (
df.loc[(slice(None), 1), (slice(None), ["foo"])] * 5
)
expected = df_orig.copy()
expected.iloc[[0, 3], [1, 3]] = expected.iloc[[0, 3], [1, 3]] * 5
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[(slice(None), 1), (slice(None), ["foo"])] *= df.loc[
(slice(None), 1), (slice(None), ["foo"])
]
expected = df_orig.copy()
expected.iloc[[0, 3], [1, 3]] *= expected.iloc[[0, 3], [1, 3]]
tm.assert_frame_equal(df, expected)
rhs = df_orig.loc[(slice(None), 1), (slice(None), ["foo"])].copy()
rhs.loc[:, ("c", "bah")] = 10
df = df_orig.copy()
df.loc[(slice(None), 1), (slice(None), ["foo"])] *= rhs
expected = df_orig.copy()
expected.iloc[[0, 3], [1, 3]] *= expected.iloc[[0, 3], [1, 3]]
tm.assert_frame_equal(df, expected)
def test_multiindex_label_slicing_with_negative_step(self):
s = Series(
np.arange(20), MultiIndex.from_product([list("abcde"), np.arange(4)])
)
SLC = pd.IndexSlice
def assert_slices_equivalent(l_slc, i_slc):
tm.assert_series_equal(s.loc[l_slc], s.iloc[i_slc])
tm.assert_series_equal(s[l_slc], s.iloc[i_slc])
assert_slices_equivalent(SLC[::-1], SLC[::-1])
assert_slices_equivalent(SLC["d"::-1], SLC[15::-1])
assert_slices_equivalent(SLC[("d",)::-1], SLC[15::-1])
assert_slices_equivalent(SLC[:"d":-1], SLC[:11:-1])
assert_slices_equivalent(SLC[:("d",):-1], SLC[:11:-1])
assert_slices_equivalent(SLC["d":"b":-1], SLC[15:3:-1])
assert_slices_equivalent(SLC[("d",):"b":-1], SLC[15:3:-1])
assert_slices_equivalent(SLC["d":("b",):-1], SLC[15:3:-1])
assert_slices_equivalent(SLC[("d",):("b",):-1], SLC[15:3:-1])
assert_slices_equivalent(SLC["b":"d":-1], SLC[:0])
assert_slices_equivalent(SLC[("c", 2)::-1], SLC[10::-1])
assert_slices_equivalent(SLC[:("c", 2):-1], SLC[:9:-1])
assert_slices_equivalent(SLC[("e", 0):("c", 2):-1], SLC[16:9:-1])
def test_multiindex_slice_first_level(self):
# GH 12697
freq = ["a", "b", "c", "d"]
idx = MultiIndex.from_product([freq, np.arange(500)])
df = DataFrame(list(range(2000)), index=idx, columns=["Test"])
df_slice = df.loc[pd.IndexSlice[:, 30:70], :]
result = df_slice.loc["a"]
expected = DataFrame(list(range(30, 71)), columns=["Test"], index=range(30, 71))
tm.assert_frame_equal(result, expected)
result = df_slice.loc["d"]
expected = DataFrame(
list(range(1530, 1571)), columns=["Test"], index=range(30, 71)
)
tm.assert_frame_equal(result, expected)
def test_int_series_slicing(self, multiindex_year_month_day_dataframe_random_data):
ymd = multiindex_year_month_day_dataframe_random_data
s = ymd["A"]
result = s[5:]
expected = s.reindex(s.index[5:])
tm.assert_series_equal(result, expected)
exp = ymd["A"].copy()
s[5:] = 0
exp.values[5:] = 0
tm.assert_numpy_array_equal(s.values, exp.values)
result = ymd[5:]
expected = ymd.reindex(s.index[5:])
tm.assert_frame_equal(result, expected)
def test_non_reducing_slice_on_multiindex(self):
# GH 19861
dic = {
("a", "d"): [1, 4],
("a", "c"): [2, 3],
("b", "c"): [3, 2],
("b", "d"): [4, 1],
}
df = pd.DataFrame(dic, index=[0, 1])
idx = pd.IndexSlice
slice_ = idx[:, idx["b", "d"]]
tslice_ = _non_reducing_slice(slice_)
result = df.loc[tslice_]
expected = pd.DataFrame({("b", "d"): [4, 1]})
tm.assert_frame_equal(result, expected)
| [
"[email protected]"
] | |
1ec71b82893c3f64ed495e2bf6673385d2f01c5a | c7d91529db199322e39e54fe4051a75704ea843e | /chaper01_list/t1.12.py | 52c5131518690d49becba7d34818301aad263a2f | [] | no_license | 2226171237/Algorithmpractice | fc786fd47aced5cd6d96c45f8e728c1e9d1160b7 | 837957ea22aa07ce28a6c23ea0419bd2011e1f88 | refs/heads/master | 2020-12-26T07:20:37.226443 | 2020-09-13T13:31:05 | 2020-09-13T13:31:05 | 237,431,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,342 | py | #-*- coding=utf-8 -*-
'''
给定一个有序链表,其中每个节点也表示一个有序链表,
实现flatten()函数,该函数将联保扁平化成一个单链表,扁平化后也是有序的链表
'''
class LNode:
def __init__(self,x,right=None,down=None):
self._data=x
self.right=right
self.down=down
class LList:
def __init__(self,data=[]):
self.head=None
if data:
data=list(data)
for d in data:
self.push(d)
def is_empty(self):
return self.head is None
def push(self,x):
new_node=LNode(x) if not isinstance(x,LList) else x.head
if self.is_empty():
self.head=new_node
else:
if isinstance(x,LList):
node=self.head
while node.right:
node=node.right
node.right=new_node
else:
node=self.head
while node.down:
node=node.down
node.down=LNode(x)
def visit(self):
if self.is_empty():
return
print('head')
node=self.head
while node:
childnode = node
while childnode:
print(childnode._data,end='->')
childnode=childnode.down
print('end')
node=node.right
print('end')
def merge(self,a,b):
'''
合并有序链表,归并排序中的合并
:param a:
:param b:
:return:
'''
if a is None:
return b
if b is None:
return a
if a._data<b._data:
result=a
result.down=self.merge(a.down,b)
else:
result=b
result.down=self.merge(a,b.down)
return result
def flatten(self,head):
if head is None or head.right is None:
return head
head.right=self.flatten(head.right)
head=self.merge(head,head.right)
return head
if __name__ == '__main__':
L1=LList([3,6,8,31])
L2=LList([11,21])
L3=LList([15,22,50])
L4=LList([30,39,40,55])
L=LList([L1,L2,L3,L4])
L.visit()
head=L.flatten(L.head)
node=head
while node:
print(node._data,end='->')
node=node.down
| [
"[email protected]"
] | |
97b3a57cfd6020fc40c236296004864bb838d3b5 | a00f703ac6561ac99066c7220075dd4a420bb3ff | /goiteens/models/post_manager.py | 34947ba95b09a3a68bd37065db233d762cdb45c5 | [] | no_license | volodymyrhusak/goiteens_docker | 9c3e56091218f3e7633dc5d94816959254f5c8ca | 618722fce82e85fe13f8c60ee76216fbf13338a7 | refs/heads/master | 2022-12-22T02:01:43.589833 | 2018-03-14T20:55:26 | 2018-03-14T20:55:26 | 125,272,517 | 0 | 0 | null | 2022-11-04T19:17:52 | 2018-03-14T20:55:58 | Python | UTF-8 | Python | false | false | 1,172 | py | from models.model import PostModel ,UserModel ,CommentsModel
from models.base_manager import SNBaseManager
from models.user_manager import UserManager
class PostManager(SNBaseManager):
def __init__(self):
class_model = PostModel
super(PostManager, self).__init__(class_model)
def get_posts(self,user):
self.select().And([('user','=',user.object.id)]).run()
def save_post(self,form, user):
self.object.title = form.get('title', '')
self.object.photos = form.get('photos', '')
self.object.text = form.get('text', '')
self.object.user = user.object
self.save()
def _get_post_id(self, id):
self.select().And([('id', '=', str(id))]).run()
def add_comment(self,comment,user,post):
if not isinstance(post, PostModel):
post = self.get_post(post)
if not isinstance(user, UserModel):
user = UserManager().get_user(user)
comment_manager = SNBaseManager(CommentsModel)
comment_manager.object.text = comment
comment_manager.object.post = post
comment_manager.object.user = user
comment_manager.save()
| [
"[email protected]"
] | |
b6cf62297f6f5a18d8098fd663d50ceed6d2fb6a | 2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8 | /pardus/playground/mnurolcay/2009/network/chat/gyachi/actions.py | 27982a32850ab05a787bda82f499cce34b776942 | [] | no_license | aligulle1/kuller | bda0d59ce8400aa3c7ba9c7e19589f27313492f7 | 7f98de19be27d7a517fe19a37c814748f7e18ba6 | refs/heads/master | 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,444 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import get
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
def setup():
shelltools.system("sh autogen.sh")
autotools.configure("--enable-plugin_pulseaudio \
--enable-plugin_blowfish \
--enable-plugin_mcrypt \
--enable-gtkspell \
--enable-wine \
--enable-plugin_gpgme \
--disable-plugin_xmms \
--disable-rpath \
--disable-esd")
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
pisitools.insinto("/usr/share/pixmaps", "themes/gyachi-classic/gyach-icon_48.png", "gyachi.png")
pisitools.insinto("/usr/share/icons/hicolor/32x32/apps/gyachi", "themes/gyachi-classic/gyach-icon_32.png", "gyachi.png")
pisitools.insinto("/usr/share/icons/hicolor/48x48/apps/gyachi", "themes/gyachi-classic/gyach-icon_48.png", "gyachi.png")
pisitools.dodoc("ChangeLog", "VERSION", "doc/*.txt", "doc/txt/COPYING", "doc/txt/README", "doc/txt/webcams.txt", "doc/txt/gyachi-help-short.txt")
pisitools.dohtml("doc/html/*")
| [
"[email protected]"
] | |
32de413aeb5cf0e0c6a5e8ba621fb4d62f92ef03 | e872e1136887cd6753ae292939e4656130c8f7d9 | /api/urls.py | f68962bb645e1c84bfe889e90658576f6e1b915b | [
"MIT"
] | permissive | florimondmanca/personal-api | 92e9d3ba8e3b16466ba54f5e9ea0493030e9cf95 | 6300f965d3f51d1bf5f10cf1eb15d673bd627631 | refs/heads/master | 2020-03-22T14:48:06.087822 | 2019-11-16T15:31:05 | 2019-11-16T15:31:05 | 140,206,398 | 4 | 1 | MIT | 2019-10-21T19:24:44 | 2018-07-08T22:16:47 | Python | UTF-8 | Python | false | false | 423 | py | """API URLs."""
from django.urls import path
from rest_framework.routers import DefaultRouter
import blog.views
from .views import obtain_auth_token
# Enable view names as 'api:...'
app_name = "api"
router = DefaultRouter()
# Blog endpoints
router.register("posts", blog.views.PostViewSet)
router.register("popular-tags", blog.views.PopularTagViewSet)
urlpatterns = router.urls + [path("login/", obtain_auth_token)]
| [
"[email protected]"
] | |
641cf3b122ff8250505462e1748406e61cd668ae | cd052f960846ea33e22abdded3106fb492f16c31 | /爬虫项目/spider/spiders/pdd_scroll_activity_v1.py | d403d0e6762fc4b10a4003e7d587fa88ba3abacb | [] | no_license | byst4nder/his_spider | 2d96457b70894c36506e8061d8a3201ac337a5d0 | a51e31acff41292e568ac22b0e213e6cb48218fa | refs/heads/master | 2020-07-21T12:06:28.952083 | 2019-09-06T14:25:58 | 2019-09-06T14:25:58 | 206,857,595 | 1 | 0 | null | 2019-09-06T19:04:02 | 2019-09-06T19:04:02 | null | UTF-8 | Python | false | false | 13,470 | py | # -*- coding: utf-8 -*-
# 首页滚动栏活动信息
import logging
import os
import scrapy
import json, time, sys, random, urllib, pyssdb, re
from spider.items import CategoryItem
from urllib import parse as urlparse
##import mq.mq as mq
##import ssdb.ssdbapi
class PddScrollActivityNewSpider(scrapy.Spider):
name = 'pdd_scroll_activity_v1'
custom_settings = {
# 'DOWNLOADER_MIDDLEWARES': {'spider.middlewares.ProxyMiddleware': 100},
# 'LOG_FILE':'',
# 'LOG_LEVEL':'DEBUG',
# 'LOG_ENABLED':True,
'DOWNLOAD_TIMEOUT': 5,
'RETRY_TIMES': 10,
}
def start_requests(self):
headers = self.make_headers()
yield scrapy.Request(url="https://mobile.yangkeduo.com/", headers=headers)
def parse(self, response):
""" 获取首页活动信息"""
body = response.body.decode("utf-8")
result = re.search(r'{"props".*?344]}', body).group()
result = json.loads(result)
# 首页固定分类活动
active_list = self.dict_get(result, 'quickEntrances', None)
if len(active_list) > 0:
list_subject = []
for i in active_list:
if i["id"] in [36, 134, 162, 115, 41]:
list_subject.append(i)
for i in list_subject:
subject_id = i["id"]
path = i["title"]
link_url = i['link']
headers = self.make_headers()
url = "https://mobile.yangkeduo.com/" + link_url
function = ''
meta = {'path_id': [subject_id], 'path': [path]}
if subject_id == 36: # 限时秒杀活动
subject_list = []
subject_dic = {"ongoing": '100', "future": '101', "more": '102', "brand_top": '103'} # 100代表正在抢购,101代表马上抢购,102代表明日预告,103品牌秒杀
for name, subject_id in subject_dic.items():
subject = self.build_subject_info(subject_id, name, name, subject_id, 11)
subject_list.append(subject)
meta = {'subject_list': subject_list, 'path_id': [103], 'path': ["brand_top"]}
url = "https://mobile.yangkeduo.com/luxury_spike.html?refer_page_name=seckill&refer_page_id=10025_1556522752181_6xi8f7UAgH&refer_page_sn=10025" # 品牌秒杀
function = self.kill_parse_subject
elif subject_id == 134: # 断码清仓
function = self.short_parse_subject
elif subject_id == 162: # 品牌馆
function = self.brand_parse_subject
elif subject_id == 115: # 9块9特卖
function = self.special_parse_subject
elif subject_id == 41: # 爱逛街
url = "https://api.pinduoduo.com/api/gentian/7/resource_tabs?without_mix=1&platform=1&pdduid=0"
function = self.shopping_parse_subject
yield scrapy.Request(url, meta=meta, callback=function, headers=headers)
def brand_parse_subject(self, response):
""" 品牌馆 """
body = response.body.decode()
path = response.meta["path"]
path_id = response.meta["path_id"]
result = re.search(r'{"props".*?"https://cdn.yangkeduo.com"}', body).group()
if result:
subject_list = []
result = json.loads(result)
tabList = self.dict_get(result, 'tabList', None)
if tabList and len(tabList) > 0:
for i in tabList:
subject_id = str(i["web_url"])
subject_id = re.search(r"\d+", subject_id).group()
str_i = str(i)
name = re.search(r"'tab_name': '\w+'", str_i).group()
name = re.sub("'tab_name': ", "", name)
subject_info = self.build_subject_info(subject_id, name, path + [name], path_id + [subject_id], 31)
subject_list.append(subject_info)
self.save_log(json.dumps({"subject_info_brand": subject_info}))
item = CategoryItem()
logging.debug(json.dumps({'subject_list_brand': subject_list}))
self.save_log(json.dumps({'subject_list_brand': subject_list}))
item['cat_list'] = subject_list
yield item
def special_parse_subject(self, response):
""" 9块9特卖 """
subject_list = []
path = response.meta["path"]
path_id = response.meta["path_id"]
body = response.body.decode()
result = re.search(r'{"props".*?"https://cdn.yangkeduo.com"}', body).group()
if result:
result = json.loads(result)
tab_list = self.dict_get(result, 'tabList', None)
if tab_list:
for i in tab_list:
subject_id = i["tab_id"]
name = i["subject"]
subject_info = self.build_subject_info(subject_id, name, path + [name], path_id + [subject_id], 41)
subject_list.append(subject_info)
self.save_log(json.dumps({"subject_info_special": subject_info}))
item = CategoryItem()
logging.debug(json.dumps({'subject_list_special': subject_list}))
self.save_log(json.dumps({'subject_list_special': subject_list}))
item['cat_list'] = subject_list
yield item
def shopping_parse_subject(self, response):
""" 爱逛街 """
subject_list = []
path = response.meta["path"]
path_id = response.meta["path_id"]
body = response.body.decode('utf-8')
result = json.loads(body)
logging.debug(result)
list_subject = self.dict_get(result, 'list', None)
if list_subject:
for i in list_subject:
subject_id = i["tab_id"]
name = i["subject"]
subject_info = self.build_subject_info(subject_id, name, path + [name], path_id + [subject_id], 51)
subject_list.append(subject_info)
self.save_log(json.dumps({"subject_info_shopping": subject_info}))
item = CategoryItem()
logging.debug(json.dumps({'subject_list_shopping': subject_list}))
self.save_log(json.dumps({'subject_list_shopping': subject_list}))
item['cat_list'] = subject_list
yield item
def short_parse_subject(self, response):
""" 断码清仓"""
path = response.meta["path"]
path_id = response.meta["path_id"]
body = response.body.decode()
result = re.search(r'{"props".*?"https://cdn.yangkeduo.com"}', body).group()
logging.debug(result)
if result:
result = json.loads(result)
result = self.dict_get(result, 'filterTabList', None)
subject_list = []
if result and len(result) > 0:
for i in result:
subject_id = i["id"]
str_i = str(i)
d = re.search(r"'brand_name': '\w+'", str_i).group()
name = re.sub(r"'brand_name':", '', d)
subject_info = self.build_subject_info(subject_id, name, path + [name], path_id + [subject_id], 21)
subject_list.append(subject_info)
self.save_log(json.dumps({"subject_info_short": subject_info}))
item = CategoryItem()
logging.debug(json.dumps({'subject_list_short': subject_list}))
self.save_log(json.dumps({'subject_list_short': subject_list}))
item['cat_list'] = subject_list
yield item
def kill_parse_subject(self, response):
""" 限时秒杀"""
path = response.meta["path"]
path_id = response.meta["path_id"]
subject_list = response.meta["subject_list"]
self.save_log(json.dumps({"kill_subject_list": subject_list}))
body = response.body.decode()
result = re.search(r'{"props".*?"https://cdn.yangkeduo.com"}', body).group()
logging.debug(result)
if result:
result = json.loads(result)
result = self.dict_get(result, 'brandList', None)
if result:
for i in result:
subject_id = i["data"]["id"]
name = i["data"]["name"]
subject_info = self.build_subject_info(subject_id, name, path + [name], path_id + [subject_id], 14)
subject_list.append(subject_info)
self.save_log(json.dumps({"subject_info_kill": subject_info}))
item = CategoryItem()
logging.debug(json.dumps({'subject_list_kill': subject_list}))
self.save_log(json.dumps({'subject_list_kill': subject_list}))
item['cat_list'] = subject_list
yield item
'''通过url获取subject_id'''
def get_subject_id(self, link_url):
url_arr = urlparse.urlparse(link_url)
url_arr = urlparse.parse_qs(url_arr.query)
if url_arr:
keys = url_arr.keys()
if "id" in keys:
subject_id = int(url_arr['id'][0])
elif "subject_id" in keys:
subject_id = int(url_arr['subject_id'][0])
else:
return False
else:
return False
return {'subject_id': subject_id}
'''生成活动信息'''
def build_subject_info(self, subject_id, title, path, path_id, api_type, subjectType=1, activity_type=2):
info = {'subject_id': subject_id, 'name': title, 'path': path, 'type': subjectType, "api_type": api_type,
'activity_type': activity_type, 'path_id': path_id}
return info
def build_subject_goods_info(self, subject_id, title, path, path_id, api_type, goods_id_str, subjectType=1, activity_type=2):
info = {'subject_id': subject_id, 'name': title, 'path': path, 'type': subjectType, "api_type": api_type,
'activity_type': activity_type, 'path_id': path_id, 'goods_id_str': goods_id_str}
return info
'''生成headers头信息'''
def make_headers(self):
chrome_version = str(random.randint(59, 63)) + '.0.' + str(random.randint(1000, 3200)) + '.94'
headers = {
"Host": "mobile.yangkeduo.com",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Accept-Encoding": "gzip, deflate",
"Referer": "http://yangkeduo.com/goods.html?goods_id=442573047&from_subject_id=935&is_spike=0&refer_page_name=subject&refer_page_id=subject_1515726808272_1M143fWqjQ&refer_page_sn=10026",
"Connection": "keep-alive",
'cookie': 'api_uid=rBQ5vlzBpOVVmAXVEzAbAg==; ua=Mozilla%2F5.0%20(Windows%20NT%2010.0%3B%20Win64%3B%20x64)%20AppleWebKit%2F537.36%20(KHTML%2C%20like%20Gecko)%20Chrome%2F72.0.3626.109%20Safari%2F537.36; webp=1; _nano_fp=Xpdyn5gbn0T8l0Tbn9_wNR__G8~FgcKa0lATgz4y; msec=1800000; rec_list_mall_bottom=rec_list_mall_bottom_1MX53n; goods_detail=goods_detail_V2l30O; goods_detail_mall=goods_detail_mall_3vdTRG; JSESSIONID=ED2FEBAC94D04AA54FC09EBEFBF0F58C; promotion_subject=promotion_subject_x0psf8; rec_list_index=rec_list_index_pDVkjB',
"Upgrade-Insecure-Requests": 1,
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/' + chrome_version + ' Safari/537.36',
}
ip = str(random.randint(100, 200)) + '.' + str(random.randint(1, 255)) + '.' + str(
random.randint(1, 255)) + '.' + str(random.randint(1, 255))
headers['CLIENT-IP'] = ip
headers['X-FORWARDED-FOR'] = ip
return headers
def errback_httpbin(self, failure):
request = failure.request
response = failure.value.response
if response.status == 403:
return
# headers = self.make_headers()
# meta = {'proxy':self.proxy}
meta = request.meta
yield scrapy.Request(request.url, meta=meta, callback=self.parse, headers=request.headers, dont_filter=True,
errback=self.errback_httpbin)
def dict_get(self, dict, objkey, default):
tmp = dict
for k, v in tmp.items():
if k == objkey:
return v
else:
if (type(v).__name__ == 'dict'):
ret = self.dict_get(v, objkey, default)
if ret is not default:
return ret
return default
def save_log(self, content):
date = time.strftime('%Y-%m-%d')
file_path = '/data/spider/logs/activity_scroll_log'
if not os.path.exists(file_path):
os.makedirs(file_path)
content = content + ","
file_name = file_path + '/' + date + ".log"
with open(file_name, 'a+') as f:
f.write(content + "\r\n")
| [
"[email protected]"
] | |
0071f6d25357b403c468cee9f14e317f1172b23f | a0488ed86f297f5f18864bf3f317dbed48b3b00d | /setup.py | 1dafa036d3e90c0bcceec24674af2c2720c35ab2 | [
"MIT"
] | permissive | DamianArado/moya-techblog | 7aefeea5bb74fa410e7cf896a83c0af0f4b0d25c | 4f7d606b22773db40850b742945e83e328c63bb7 | refs/heads/master | 2021-12-22T02:53:40.535675 | 2017-10-09T14:07:59 | 2017-10-09T14:07:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | from setuptools import setuptools
VERSION = "0.1.0"
setup(
name='techblog',
version=VERSION,
description="Blog for Coders and Photographers",
zip_safe=False,
license="MIT",
author="Will McGugan",
author_email="[email protected]",
url="https://github.com/moyaproject/moya-techblog",
entry_points={
"console_scripts": [
'techblog = techblog:main'
]
}
) | [
"[email protected]"
] | |
01fa6bacbb73a2d447e30cf4b8e4920c7f051557 | 50a6f90c46ee83e667de08be9c74acbaa792dbc5 | /python/timeseries.py | 70d73e0ca869e4e62d52871ce0b2a56e246504b9 | [
"MIT"
] | permissive | cpausmit/Kraken | e8f51a46e5d181e855bb9d2276b66c67e5842888 | e79a19f6a4570e10ae52e543a5af9b2a3414c965 | refs/heads/master | 2023-08-16T21:36:54.426014 | 2023-08-15T14:11:08 | 2023-08-15T14:11:08 | 75,231,636 | 0 | 2 | MIT | 2018-02-08T14:25:53 | 2016-11-30T22:09:47 | Python | UTF-8 | Python | false | false | 9,050 | py | import os
import datetime,time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mlp
from optparse import OptionParser
import jsum
BASE = "%s/moni"%(os.getenv('KRAKEN_WORK'))
EXT = "moni"
NOW = datetime.datetime.now().strftime("%m/%d/%y, %H:%M")
class Timeseries:
"Time series of job summaries."
def __init__(self,key):
self.key = key
self.times = []
self.jsums = []
def get_totals(self):
totals = []
for time,jsum in zip(self.times,self.jsums):
totals.append(jsum.n_total)
return totals
def get_dones(self):
dones = []
for time,jsum in zip(self.times,self.jsums):
dones.append(jsum.n_done)
return dones
def get_nocatalogs(self):
nocatalogs = []
for time,jsum in zip(self.times,self.jsums):
nocatalogs.append(jsum.n_nocatalog)
return nocatalogs
def get_batches(self):
batches = []
for time,jsum in zip(self.times,self.jsums):
batches.append(jsum.n_batch)
return batches
def get_idles(self):
idles = []
for time,jsum in zip(self.times,self.jsums):
idles.append(jsum.n_idle)
return idles
def get_runnings(self):
runnings = []
for time,jsum in zip(self.times,self.jsums):
runnings.append(jsum.n_running)
return runnings
def get_helds(self):
helds = []
for time,jsum in zip(self.times,self.jsums):
helds.append(jsum.n_held)
return helds
def add(self,time,jsum):
if len(self.times) == 0 or time>self.times[-1]:
self.times.append()
else:
print("ERROR - time is attached out of order (last: %d, this: %d)"%(times[-1],time))
return -1
self.jsums.append(jsum)
def read(self):
with open("%s/%s.%s"%(BASE,self.key,EXT),"r") as file:
data = file.read()
# go through each row
for line in data.split("\n"):
f = line.split(',') # use a comma to separate columns
if len(f)>1 and len(line)>0 and line[0] != '#': # protect against not well formatted lines
self.times.append(int(f[0]))
self.jsums.append(jsum.Jsum(f[1:]))
def write(self):
filename = "%s/%s.%s"%(BASE,self.key,EXT)
print(" Write file: %s"%(filename))
with open(filename,"w") as f:
for time,jsum in zip(self.times,self.jsums):
f.write("%d,%s\n"%(time,jsum.string()))
def show(self):
print(" Key - %s"%(self.key))
for time,jsum in zip(self.times,self.jsums):
print("%d,%s"%(time,jsum.string()))
def drop(self,times):
new_times = []
new_jsums = []
for time,jsum in zip(self.times,self.jsums):
if time not in times:
new_times.append(time)
new_jsums.append(jsum)
else:
print(" Dropping time: %d"%(time))
self.times = new_times
self.jsums = new_jsums
return
def merge(self,key,ts):
if len(self.times) != len(ts.times):
print(" ERROR - time series have different length (this %d vs added %d)"%(len(self.times),len(ts.times)))
#return 1
i = 0
drop_times = []
for time,jsum in zip(self.times,self.jsums):
if time != ts.times[i]:
print(" ERROR - (%s,%s)"%(self.key,ts.key))
print(" time series out of sync (this %d vs added %d)"%(time,ts.times[i]))
while (time>ts.times[i]):
print(" WARNING - Ignore new record (%d)."%(ts.times[i]))
i += 1
if i>len(ts.times)-1:
break
if (time<ts.times[i]):
print(" WARNING - Drop existing record (%d)."%(time))
drop_times.append(time) # keep record of what needs to be dropped
continue
jsum.merge(ts.jsums[i])
i += 1
self.drop(drop_times)
# last update the key
self.key = key
return 0
def find_droptimes(self,ts,remove=True):
#if len(self.times) != len(ts.times):
# print(" WARNING - time series have different length (this %d vs added %d)"%(len(self.times),len(ts.times)))
i = 0
drop_times = []
for time,jsum in zip(self.times,self.jsums):
if i>=len(ts.times):
print(" WARNING - Drop existing record (%d)."%(time))
drop_times.append(time)
i += 1
continue
if time != ts.times[i]:
#print(" ERROR - (%s,%s)"%(self.key,ts.key))
#print(" time series out of sync (this %d vs added %d)"%(time,ts.times[i]))
while (time>ts.times[i]):
print(" WARNING - Ignore new record (%d)."%(ts.times[i]))
drop_times.append(ts.times[i]) # keep record of what needs to be dropped
i += 1
if i>len(ts.times)-1:
print("STOP")
break
if (time<ts.times[i]):
print(" WARNING - Drop existing record (%d)."%(time))
drop_times.append(time) # keep record of what needs to be dropped
continue
i += 1
if remove:
self.drop(drop_times)
ts.drop(drop_times)
return drop_times
def plot(self,options,figure="total",last='not-defined'):
# define the figure
plt.figure(options.name+'_'+figure)
if figure == "total":
plt.plot(self.times,self.get_totals(),marker="",ls='dashed',linewidth=1,label='total')
plt.plot(self.times,self.get_dones(),marker="o",ls='solid',linewidth=2,label='done')
plt.plot(self.times,self.get_nocatalogs(),marker="o",ls='solid',linewidth=1,label='no catalog')
plt.plot(self.times,self.get_batches(),marker="o",ls='solid',linewidth=1,label='in batch')
elif figure == "batch":
plt.figure(options.name+'_'+figure)
plt.plot(self.times,self.get_nocatalogs(),marker="o",ls='dashed',linewidth=1,label='no catalog')
plt.plot(self.times,self.get_batches(),marker="o",ls='solid',linewidth=1,label='in batch')
plt.plot(self.times,self.get_idles(),marker="o",ls='solid',linewidth=1,label='idle')
plt.plot(self.times,self.get_runnings(),marker="o",ls='solid',linewidth=1,label='running')
plt.plot(self.times,self.get_helds(),marker="o",ls='solid',linewidth=1,label='held')
plt.legend(frameon=False)
plt.legend(title='ends: '+last)
ax = plt.gca()
ax.annotate(NOW, xy=(-0.13,0),xycoords=('axes fraction','figure fraction'),
size=10, ha='left', va='bottom')
#ax.annotate('ends: '+last, xy=(0.99,0.01), xycoords=('axes fraction','axes fraction'),
# size=10, ha='right', va='bottom')
# make plot nicer
plt.xlabel(options.xtitle, fontsize=18)
plt.ylabel(figure+' '+options.ytitle, fontsize=18)
# make axis tick numbers larger
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
# make sure to noe have too much white space around the plot
plt.subplots_adjust(top=0.99, right=0.99, bottom=0.13, left=0.12)
# save plot for later viewing
plt.savefig(options.name+'_'+figure+".png",bbox_inches='tight',dpi=400)
return
# # define and get all command line arguments
# parser = OptionParser()
# parser.add_option("-n","--name",dest="name",default='graph_xy',help="name of input file")
# parser.add_option("-q","--quiet",action="store_true",dest="quiet",default=False,help="no plot show")
# parser.add_option("-x","--xtitle",dest="xtitle",default='Default x title',help="x axis title")
# parser.add_option("-y","--ytitle",dest="ytitle",default='Default y title',help="y axis title")
# (options, args) = parser.parse_args()
#
# ts1 = Timeseries("nanoao/518/BcToJPsiMuMu_inclusive_TuneCP5_13TeV-bcvegpy2-pythia8-evtgen+RunIISummer20UL16MiniAOD-106X_mcRun2_asymptotic_v13-v3+MINIAODSIM")
# ts1.read()
# ts1.show()
#
# ts2 = Timeseries("nanoao/518/BcToJPsiMuMu_inclusive_TuneCP5_13TeV-bcvegpy2-pythia8-evtgen+RunIISummer20UL17MiniAOD-106X_mc2017_realistic_v6-v3+MINIAODSIM")
# ts2.read()
# ts2.show()
#
# ts3 = Timeseries("nanoao/518/BsToMuMu_SoftQCDnonD_TuneCP5_BsLifetime1p45_13TeV-pythia8-evtgen+RunIISummer20UL18MiniAOD-106X_upgrade2018_realistic_v11_L1v1-v1+MINIAODSIM")
# ts3.read()
# ts3.show()
#
# ts1.merge("nanoao/518",ts2)
# ts1.merge("nanoao/518",ts3)
# ts1.show()
# ts1.write()
#
# ts1.plot(options)
| [
"[email protected]"
] | |
1161c88b6f97450eb92ecddc96a9913d4b4cdca6 | 6609c26b4ed72c156104ce282c3cf88c6aac59f6 | /chapter09/example02.py | 65652c91aa68ad1a2e42702ecbcabdda16ed64fc | [
"MIT"
] | permissive | yordanivh/intro_to_cs_w_python | 4ab9dbbc2963b285b22cacb6648d1300fded18ce | eebbb8efd7ef0d07be9bc45b6b1e8f20737ce01a | refs/heads/master | 2020-09-06T12:25:23.362118 | 2020-02-14T14:07:07 | 2020-02-14T14:07:07 | 220,423,698 | 0 | 0 | MIT | 2020-02-14T14:07:08 | 2019-11-08T08:41:25 | Python | UTF-8 | Python | false | false | 179 | py | #variable holding the last value of the loop
speed = 2
velocities = [0.0, 9.81, 19.62, 29.43]
for speed in velocities:
print('Metric',speed, 'm/sec')
print('Final:', speed)
| [
"[email protected]"
] | |
8d506e901341ca9f597b8e0d69a411d5cffd3adc | fdb950317e348baa10975a4b0fff55e4e39eb040 | /htdocs/plotting/auto/scripts100/p125.py | a7b5022c90e03c56ab1ff9b6ce200b8bfb8a8e1a | [
"MIT"
] | permissive | MediaPlex/iem | 5d4938f14d0c1b5c1ea4f46f8e05c84a88727fed | b6bcb1c0cedc8a75740b78f830742a765e21ab8b | refs/heads/main | 2023-04-26T10:40:50.786980 | 2021-05-30T11:43:28 | 2021-05-30T11:43:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,361 | py | """Maps of averages"""
import calendar
from collections import OrderedDict
import datetime
import numpy as np
from pandas.io.sql import read_sql
from pyiem.plot import MapPlot, get_cmap
from pyiem.util import get_autoplot_context, get_dbconn
from pyiem.exceptions import NoDataFound
import cartopy.crs as ccrs
PDICT = {
"state": "State Level Maps (select state)",
"cornbelt": "Corn Belt",
"midwest": "Midwest Map",
}
PDICT2 = {
"both": "Show both contour and values",
"values": "Show just the values",
"contour": "Show just the contour",
}
PDICT3 = OrderedDict(
[
("avg_temp", "Average Temperature"),
("avg_high", "Average High Temperature"),
("avg_low", "Average Low Temperature"),
("total_cdd65", "Total Cooling Degree Days (base=65)"),
("total_gdd32", "Total Growing Degree Days (base=32)"),
("total_gdd41", "Total Growing Degree Days (base=41)"),
("total_gdd46", "Total Growing Degree Days (base=46)"),
("total_gdd48", "Total Growing Degree Days (base=48)"),
("total_gdd50", "Total Growing Degree Days (base=50)"),
("total_gdd51", "Total Growing Degree Days (base=51)"),
("total_gdd52", "Total Growing Degree Days (base=52)"),
("total_hdd65", "Total Heating Degree Days (base=65)"),
("total_sdd86", "Total Stress Degree Days (base=86)"),
("total_precip", "Total Precipitation"),
]
)
PDICT5 = {
"climate": "Period of Record Climatology",
"climate51": "1951-Present Climatology",
"climate71": "1971-Present Climatology",
"climate81": "1981-Present Climatology",
"ncdc_climate71": "NCEI 1971-2000 Climatology",
"ncdc_climate81": "NCEI 1981-2010 Climatology",
"ncei_climate91": "NCEI 1991-2020 Climatology",
}
UNITS = {"total_precip": "inch"}
PRECISION = {
"total_precip": 2,
"total_gdd50": 0,
"total_gdd32": 0,
"total_gdd41": 0,
"total_gdd46": 0,
"total_gdd48": 0,
"total_gdd51": 0,
"total_gdd52": 0,
"total_cdd65": 0,
"total_hdd65": 0,
}
MDICT = OrderedDict(
[
("all", "No Month/Time Limit"),
("spring", "Spring (MAM)"),
("mjj", "May/June/July"),
("gs", "May thru Sep"),
("fall", "Fall (SON)"),
("winter", "Winter (DJF)"),
("summer", "Summer (JJA)"),
("jan", "January"),
("feb", "February"),
("mar", "March"),
("apr", "April"),
("may", "May"),
("jun", "June"),
("jul", "July"),
("aug", "August"),
("sep", "September"),
("oct", "October"),
("nov", "November"),
("dec", "December"),
]
)
def get_description():
"""Return a dict describing how to call this plotter"""
desc = dict()
desc["data"] = True
desc[
"description"
] = """This application produces map analysis of
climatological averages. The IEM maintains a number of different
climatologies based on period of record and source. If you pick the NCEI
Climatology, only basic temperature and precipitation variables are
available at this time."""
desc["arguments"] = [
dict(
type="select",
name="month",
default="all",
label="Month Limiter",
options=MDICT,
),
dict(
type="select",
name="sector",
default="state",
options=PDICT,
label="Select Map Region",
),
dict(
type="select",
name="src",
default="ncei_climate91",
options=PDICT5,
label=(
"Select Climatology Source to Use "
"(limits available variables)"
),
),
dict(
type="state",
name="state",
default="IA",
label="Select State to Plot (when appropriate)",
),
dict(
type="select",
name="opt",
options=PDICT2,
default="both",
label="Map Plot/Contour View Option",
),
dict(
type="select",
name="var",
options=PDICT3,
default="total_precip",
label="Which Variable to Plot",
),
dict(type="cmap", name="cmap", default="jet", label="Color Ramp:"),
]
return desc
def plotter(fdict):
"""Go"""
pgconn = get_dbconn("coop")
ctx = get_autoplot_context(fdict, get_description())
state = ctx["state"][:2]
varname = ctx["var"]
sector = ctx["sector"]
opt = ctx["opt"]
month = ctx["month"]
if month == "all":
months = range(1, 13)
elif month == "fall":
months = [9, 10, 11]
elif month == "winter":
months = [12, 1, 2]
elif month == "spring":
months = [3, 4, 5]
elif month == "mjj":
months = [5, 6, 7]
elif month == "gs":
months = [5, 6, 7, 8, 9]
elif month == "summer":
months = [6, 7, 8]
else:
ts = datetime.datetime.strptime("2000-" + month + "-01", "%Y-%b-%d")
# make sure it is length two for the trick below in SQL
months = [ts.month]
if len(months) == 1:
title = "%s %s" % (calendar.month_name[months[0]], PDICT3[varname])
else:
title = "%s" % (MDICT[month],)
mp = MapPlot(
sector=sector,
state=state,
axisbg="white",
title="%s %s for %s" % (PDICT5[ctx["src"]], PDICT3[varname], title),
nocaption=True,
)
bnds = mp.ax.get_extent(crs=ccrs.PlateCarree())
joincol = "id"
if ctx["src"] == "ncdc_climate81":
joincol = "ncdc81"
elif ctx["src"] == "ncei_climate91":
joincol = "ncei91"
extra = ""
if not ctx["src"].startswith("ncdc_"):
extra = """,
sum(cdd65) as total_cdd65,
sum(hdd65) as total_hdd65,
sum(gdd32) as total_gdd32,
sum(gdd41) as total_gdd41,
sum(gdd46) as total_gdd46,
sum(gdd48) as total_gdd48,
sum(gdd50) as total_gdd50,
sum(gdd51) as total_gdd51,
sum(gdd52) as total_gdd52
"""
df = read_sql(
"""
WITH mystations as (
select """
+ joincol
+ """ as myid,
max(ST_x(geom)) as lon, max(ST_y(geom)) as lat from stations
where network ~* 'CLIMATE' and
ST_Contains(ST_MakeEnvelope(%s, %s, %s, %s, 4326), geom)
GROUP by myid
)
SELECT station, extract(month from valid) as month,
max(lon) as lon, min(lat) as lat,
sum(precip) as total_precip,
avg(high) as avg_high,
avg(low) as avg_low,
avg((high+low)/2.) as avg_temp """
+ extra
+ """
from """
+ ctx["src"]
+ """ c
JOIN mystations t on (c.station = t.myid)
WHERE extract(month from valid) in %s
GROUP by station, month
""",
pgconn,
params=(bnds[0], bnds[2], bnds[1], bnds[3], tuple(months)),
index_col=["station", "month"],
)
if df.empty:
raise NoDataFound("No data was found for query, sorry.")
if len(months) == 1:
df2 = df
else:
if varname.startswith("total"):
df2 = df.sum(axis=0, level="station")
else:
df2 = df.mean(axis=0, level="station")
df2["lat"] = df["lat"].mean(axis=0, level="station")
df2["lon"] = df["lon"].mean(axis=0, level="station")
levels = np.linspace(df2[varname].min(), df2[varname].max(), 10)
levels = [round(x, PRECISION.get(varname, 1)) for x in levels]
if opt in ["both", "contour"]:
mp.contourf(
df2["lon"].values,
df2["lat"].values,
df2[varname].values,
levels,
units=UNITS.get(varname, "F"),
cmap=get_cmap(ctx["cmap"]),
clip_on=False,
)
if sector == "state":
mp.drawcounties()
if opt in ["both", "values"]:
mp.plot_values(
df2["lon"].values,
df2["lat"].values,
df2[varname].values,
fmt="%%.%if" % (PRECISION.get(varname, 1),),
labelbuffer=5,
)
return mp.fig, df
if __name__ == "__main__":
plotter(dict(month="gs", var="total_gdd50", src="climate51"))
| [
"[email protected]"
] | |
a193ad9d8cf0b7edb6fea29bdc621f469c12e0ba | 9653d2c933c95f6a7e956751814a38a935fabf14 | /source/code/menu_addFontGuideline.py | c211b008a1cac518efa7bbec3cc9d5b7c1ee2e90 | [
"MIT"
] | permissive | benkiel/guidetool | f98863c72920bbddc9fb355852a42c1e441f02ea | ee6f4fce8f472622ab20a3b09bf4594f5631be25 | refs/heads/main | 2023-06-18T21:39:24.269399 | 2021-07-15T00:41:33 | 2021-07-15T00:41:33 | 387,886,590 | 0 | 0 | MIT | 2021-07-20T18:54:21 | 2021-07-20T18:54:20 | null | UTF-8 | Python | false | false | 690 | py | import AppKit
from fontParts.world import CurrentGlyph
from mojo.UI import getDefault, CurrentGlyphWindow
from guideTool.guess import guessPositionAndAngleFromSelectedPoints
from guideTool.editor import GuidelineEditorController
def run():
glyph = CurrentGlyph()
if glyph is None:
return
font = glyph.font
editor = CurrentGlyphWindow()
data = guessPositionAndAngleFromSelectedPoints(glyph)
if data is None:
AppKit.NSBeep()
return
font.prepareUndo("Add Guide")
guideline = font.appendGuideline(**data)
font.performUndo()
GuidelineEditorController(guideline, glyph, editor.getGlyphView())
if __name__ == "__main__":
run()
| [
"[email protected]"
] | |
55975b35d008f8b4dddbc1c5c47ae99ff5a4998d | 04e1c60ac7864a0bdcdd41026a2336b1ff699613 | /model/ll.py | f5098ecc5ce3319e31d675292eb699f307ed938b | [] | no_license | jianzhnie/RetinaNet_Pytorch | 00ec318d2e57c6b646f193e4d3a066f9891762b3 | 03679766847757f28bb9410c31ddaf99adf524c8 | refs/heads/master | 2020-09-22T00:09:18.617565 | 2019-11-30T08:27:36 | 2019-11-30T08:27:36 | 224,981,680 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,015 | py |
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class FocalLoss(nn.Module):
def __init__(self, num_classes):
super(FocalLoss, self).__init__()
self.num_classes = num_classes
def _one_hot_embeding(self, labels):
"""Embeding labels to one-hot form.
Args:
labels(LongTensor): class labels
num_classes(int): number of classes
Returns:
encoded labels, sized[N, #classes]
"""
y = torch.eye(self.num_classes+1) # [D, D]
return y[labels] # [N, D]
def focal_loss(self, x, y):
"""Focal loss
Args:
x(tensor): size [N, D]
y(tensor): size [N, ]
Returns:
(tensor): focal loss
"""
alpha = 0.25
gamma = 2
t = self._one_hot_embeding(y.data.cpu()) # [N,21]
t = t[:, 1:] # exclude background
t = Variable(t).cuda() # [N,20]
logit = F.softmax(x)
logit = logit.clamp(1e-7, 1.-1e-7)
conf_loss_tmp = -1 * t.float() * torch.log(logit)
conf_loss_tmp = alpha * conf_loss_tmp * (1-logit)**gamma
conf_loss = conf_loss_tmp.sum()
return conf_loss
def forward(self, loc_preds, loc_targets, cls_preds, cls_targets):
"""Compute loss between (loc_preds, loc_targets) and (cls_preds, cls_targets).
Args:
loc_preds(tensor): predicted locations, sized [batch_size, #anchors, 4].
loc_targets(tensor): encoded target locations, sized [batch_size, #anchors, 4].
cls_preds(tensor): predicted class confidences, sized [batch_size, #anchors, #classes].
cls_targets(tensor): encoded target labels, sized [batch_size, #anchors].
Returns:
(tensor) loss = SmoothL1Loss(loc_preds, loc_targets) + FocalLoss(cls_preds, cls_targets).
"""
pos = cls_targets > 0 # [N,#anchors]
num_pos = pos.data.long().sum()
# loc_loss = SmoothL1Loss(pos_loc_preds, pos_loc_targets)
mask = pos.unsqueeze(2).expand_as(loc_preds) # [N,#anchors,4]
masked_loc_preds = loc_preds[mask].view(-1, 4) # [#pos,4]
masked_loc_targets = loc_targets[mask].view(-1, 4) # [#pos,4]
loc_loss = F.smooth_l1_loss(masked_loc_preds, masked_loc_targets, size_average=False)
# cls_loss = FocalLoss(loc_preds, loc_targets)
pos_neg = cls_targets > -1 # exclude ignored anchors
# num_pos_neg = pos_neg.data.long().sum()
mask = pos_neg.unsqueeze(2).expand_as(cls_preds)
masked_cls_preds = cls_preds[mask].view(-1, self.num_classes)
cls_loss = self.focal_loss(masked_cls_preds, cls_targets[pos_neg])
num_pos = max(1.0, num_pos.item())
print('loc_loss: %.3f | cls_loss: %.3f' % (loc_loss.item() / num_pos, cls_loss.item() / num_pos), end=' | ')
loss = loc_loss / num_pos + cls_loss / num_pos
return loss | [
"[email protected]"
] | |
f0ff4f0567586c4dc9dd54b5497b4819f77b2378 | 855416c669f765e4cd0f5a749e82c112641a9e11 | /Interest.blog-1.1/utils/public.py | 9cdb4342017ac3c51968c900609d649d1726535b | [
"MIT"
] | permissive | chenzhenpin/my_flask | a2f63422921d4b73c25a8e093ad09e6a48f8b568 | 0c101b7a1aa01283a0b8e3ef9b7555750ea03ecb | refs/heads/master | 2022-11-28T20:16:35.854225 | 2018-11-27T16:04:29 | 2018-11-27T16:04:29 | 159,362,205 | 0 | 0 | null | 2022-11-22T01:37:36 | 2018-11-27T16:02:52 | CSS | UTF-8 | Python | false | false | 5,254 | py | # -*- coding:utf8 -*-
import requests
import hashlib
import datetime
import random
import upyun
from uuid import uuid4
from log import Syslog
from config import SSO, MYSQL, PLUGINS
from torndb import Connection
from flask import g
#Something public variable
md5 = lambda pwd:hashlib.md5(pwd).hexdigest()
today = lambda :datetime.datetime.now().strftime("%Y-%m-%d")
logger = Syslog.getLogger()
gen_requestId = lambda :str(uuid4())
gen_filename = lambda :"%s%s" %(datetime.datetime.now().strftime('%Y%m%d%H%M%S'), str(random.randrange(1000, 10000)))
def timeChange(timestring):
logger.debug("Change time, source time is %s" %timestring)
startedat = timestring.replace('T', ' ')[:19]
try:
dt = datetime.datetime.strptime(startedat, "%Y-%m-%d %H:%M:%S") + datetime.timedelta(hours=8)
res = dt.strftime("%Y-%m-%d %H:%M:%S")
except Exception, e:
logger.warn(e, exc_info=True)
else:
logger.debug("Change time, result time is %s" %res)
return res
def ParseMySQL(mysql, callback="dict"):
try:
protocol, dburl = mysql.split("://")
if "?" in mysql:
dbinfo, dbargs = dburl.split("?")
else:
dbinfo, dbargs = dburl, "charset=utf8&timezone=+8:00"
host,port,user,password,database = dbinfo.split(":")
charset, timezone = dbargs.split("&")[0].split("charset=")[-1] or "utf8", dbargs.split("&")[-1].split("timezone=")[-1] or "+8:00"
if callback in ("list", "tuple"):
return protocol,host,port,user,password,database,charset, timezone
else:
return {"Protocol": protocol, "Host": host, "Port": port, "Database": database, "User": user, "Password": password, "Charset": charset, "Timezone": timezone}
except Exception,e:
logger.warn(e, exc_info=True)
if callback in ("list", "tuple"):
return ()
else:
return {}
mysql = Connection(
host = "%s:%s" %(ParseMySQL(MYSQL).get('Host'), ParseMySQL(MYSQL).get('Port', 3306)),
database = ParseMySQL(MYSQL).get('Database'),
user = ParseMySQL(MYSQL).get('User'),
password = ParseMySQL(MYSQL).get('Password'),
time_zone= ParseMySQL(MYSQL).get('Timezone','+8:00'),
charset = ParseMySQL(MYSQL).get('Charset', 'utf8'),
connect_timeout=3,
max_idle_time=2)
def ClickMysqlWrite(data):
if isinstance(data, dict):
if data.get("agent") and data.get("method") in ("GET", "POST", "PUT", "DELETE", "OPTIONS"):
sql = "insert into clickLog set requestId=%s, url=%s, ip=%s, agent=%s, method=%s, status_code=%s, referer=%s"
try:
mysql.insert(sql, data.get("requestId"), data.get("url"), data.get("ip"), data.get("agent"), data.get("method"), data.get("status_code"), data.get("referer"))
except Exception, e:
logger.warn(e, exc_info=True)
def isLogged_in(cookie_str):
''' To determine whether to log on with cookie '''
SSOURL = SSO.get("SSO.URL")
if cookie_str and not cookie_str == '..':
username, expires, sessionId = cookie_str.split('.')
#success = Requests(SSOURL+"/sso/").post(data={"username": username, "time": expires, "sessionId": sessionId}).get("success", False)
success = requests.post(SSOURL+"/sso/", data={"username": username, "time": expires, "sessionId": sessionId}, timeout=5, verify=False, headers={"User-Agent": SSO.get("SSO.PROJECT")}).json().get("success", False)
logger.info("check login request, cookie_str: %s, success:%s" %(cookie_str, success))
return success
else:
logger.info("Not Logged in")
return False
def chunks(arr, n):
"""arr是被分割的list,n是每个chunk中含n元素。"""
return [arr[i:i+n] for i in range(0, len(arr), n)]
def isAdmin(username):
AdminUsers = requests.get(g.apiurl + "/user/", params={"getadminuser": True}, timeout=5, verify=False, headers={"User-Agent": SSO.get("SSO.PROJECT")}).json().get("data")
if username in AdminUsers:
return True
return False
def UploadImage2Upyun(file, imgurl, kwargs=PLUGINS['UpYunStorage']):
""" Upload image to Upyun Cloud with Api """
logger.info({"UploadFile": file, "imgurl": imgurl, "kwargs": kwargs})
up = upyun.UpYun(kwargs.get("bucket"), username=kwargs.get("username"), password=kwargs.get("password"), secret=kwargs.get("secret"), timeout=kwargs.get("timeout", 10))
formkw = { 'allow-file-type': kwargs.get('allow-file-type', 'jpg,jpeg,png,gif') }
with open(file, "rb") as f:
res = up.put(imgurl, f, checksum=True, need_resume=True, form=True, **formkw)
return res
def BaiduActivePush(pushUrl, original=True, callUrl=PLUGINS['BaiduActivePush']['callUrl']):
"""百度主动推送(实时)接口提交链接"""
callUrl = callUrl + "&type=original" if original else callUrl
res = requests.post(url=callUrl, data=pushUrl, timeout=3, headers={"User-Agent": "BaiduActivePush/www.saintic.com"}).json()
logger.info("BaiduActivePush PushUrl is %s, Result is %s" % (pushUrl, res))
return res
| [
"[email protected]"
] | |
2a7d09228483b3b7c912600fea60bd9f300653f9 | 6b10d7a745b70d3b8533ea91b7bf1052e43b7d70 | /Week 4/Admin_page/main/migrations/0002_auto_20180930_2229.py | 9e5cf7c3c18d3c670ba02c13dfc5ff2f2fa3db2a | [] | no_license | Ablay09/BFDjango | 9701f6b1d36d54e6a2b511c57374e47ac0048d0e | c41423f5e86bad107769f518eeca2bfefd524919 | refs/heads/master | 2020-03-27T21:33:55.433951 | 2018-10-12T20:14:12 | 2018-10-12T20:14:12 | 147,101,564 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 712 | py | # Generated by Django 2.1.1 on 2018-09-30 16:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='task',
name='created_date',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='task',
name='finished_date',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='task_list',
name='time',
field=models.DateTimeField(auto_now_add=True),
),
]
| [
"[email protected]"
] | |
1ba7ed3f3e6972ac5d2d780b49ba2a2b641a4ba6 | d89290fd2ecc166287e065784ae290a516ca2cef | /archives/convert_framerates.py | 83c83ead40072e2feef33d6b123b8c9ad4acad3f | [
"MIT"
] | permissive | rec/bbcprc | adc85ca3fd7a7b303c83323c2bdf63d44654112f | d4f4fee5f5c0beaf9c23af6e1655dbb49f3912e3 | refs/heads/main | 2023-02-08T20:18:38.763943 | 2023-01-30T12:47:21 | 2023-01-30T12:51:38 | 130,397,044 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,066 | py | from . import audio_io
from . import constants
from . import files
from scipy import signal
import json
import numpy as np
import os
ERROR = 'fp.getframerate() != constants.FRAME_RATE: 48000'
def get_framerate_error_files():
for f in sorted(files.with_suffix(constants.METADATA_DIR, '.json')):
if json.load(open(f)).get('error') == ERROR:
yield constants.source(os.path.basename(f)[:-5])
def resample_file(filename):
if True:
original = filename
filename = filename + '.48KHz'
else:
original = filename + '.48KHz'
os.rename(filename, original)
fp, frames = audio_io.read_frames_and_fp(original)
assert fp.getframerate() == 48000
samples = audio_io.from_frames(frames, fp.getnchannels())
resampled = np.stack([signal.resample_poly(s, 160, 147) for s in samples])
audio_io.write(filename, resampled)
print('Resampled to', filename)
if __name__ == '__main__':
# resample_file(list(get_framerate_error_files())[
for f in get_framerate_error_files():
print(f)
| [
"[email protected]"
] | |
c07b75d16c6a9d140e590d268b51632f6b7f93bc | c8e44c50bcc77c6ad5d95516dcec8dada7c284bd | /gidgethub/abc.py | 444cf02043c0df46f44a1acc14c6f691ffc35f44 | [
"Apache-2.0"
] | permissive | Lukasa/gidgethub | 495510e276cd34f7b9c37431f3b3b011d02b0795 | 6b3dc032f1fcdf0fbf23dfb061f11588798c1e7e | refs/heads/master | 2021-01-18T16:04:54.012847 | 2017-03-08T00:51:29 | 2017-03-08T00:51:29 | 84,348,121 | 0 | 0 | null | 2017-03-08T17:31:09 | 2017-03-08T17:31:09 | null | UTF-8 | Python | false | false | 4,473 | py | """Provide an abstract base class for easier requests."""
import abc
import datetime
import json
from typing import Any, AsyncIterable, Dict, Mapping, Optional, Tuple
from . import sansio
class GitHubAPI(abc.ABC):
"""Provide an idiomatic API for making calls to GitHub's API."""
def __init__(self, requester: str, *, oauth_token: str = None) -> None:
self.requester = requester
self.oauth_token = oauth_token
self.rate_limit: sansio.RateLimit = None
@abc.abstractmethod
async def _request(self, method: str, url: str,
headers: Mapping[str, str],
body: bytes = b'') -> Tuple[int, Mapping[str, str], bytes]:
"""Make an HTTP request."""
@abc.abstractmethod
async def _sleep(self, seconds: float) -> None:
"""Sleep for the specified number of seconds."""
async def _make_request(self, method: str, url: str,
url_vars: Dict[str, str], data: Any,
accept) -> Tuple[Any, str]:
"""Construct and make an HTTP request."""
# If the rate limit isn't known yet then assume there's enough quota.
if self.rate_limit is not None:
if self.rate_limit:
# Proactively assume this request is counted by GitHub so as to
# not have a race condition on the final request.
self.rate_limit.remaining -= 1
else:
# /rate_limit returns the current rate limit,
# but the assumption is an async application won't be making multi-threaded calls with
# the same oauth token so the last call will have set the rate_limit accurately.
now = datetime.datetime.now(datetime.timezone.utc)
wait = self.rate_limit.reset_datetime - now
await self._sleep(wait.total_seconds())
filled_url = sansio.format_url(url, url_vars)
request_headers = sansio.create_headers(self.requester, accept=accept,
oauth_token=self.oauth_token)
if data == "":
body = b""
request_headers["content-length"] = "0"
else:
charset = "utf-8"
body = json.dumps(data).encode(charset)
request_headers['content-type'] = f"application/json; charset={charset}"
request_headers['content-length'] = str(len(body))
response = await self._request(method, filled_url, request_headers, body)
data, self.rate_limit, more = sansio.decipher_response(*response)
return data, more
async def getitem(self, url: str, url_vars: Dict[str, str] = {}, *,
accept=sansio.accept_format()) -> Any:
"""Send a GET request for a single item to the specified endpoint."""
data, _ = await self._make_request("GET", url, url_vars, "", accept)
return data
async def getiter(self, url: str, url_vars: Dict[str, str] = {}, *,
accept: str = sansio.accept_format()) -> AsyncIterable[Any]:
"""Return an async iterable for all the items at a specified endpoint."""
data, more = await self._make_request("GET", url, url_vars, "", accept)
for item in data:
yield item
if more:
# `yield from` is not supported in coroutines.
async for item in self.getiter(more, url_vars, accept=accept):
yield item
async def post(self, url: str, url_vars: Dict[str, str] = {}, *,
data: Any, accept: str = sansio.accept_format()) -> Any:
data, _ = await self._make_request("POST", url, url_vars, data, accept)
return data
async def patch(self, url: str, url_vars: Dict[str, str] = {}, *,
data: Any, accept: str = sansio.accept_format()) -> Any:
data, _ = await self._make_request("PATCH", url, url_vars, data, accept)
return data
async def put(self, url: str, url_vars: Dict[str, str] = {}, *,
data: Any = "",
accept: str = sansio.accept_format()) -> Optional[Any]:
data, _ = await self._make_request("PUT", url, url_vars, data, accept)
return data
async def delete(self, url: str, url_vars: Dict[str, str] = {}, *,
accept: str = sansio.accept_format()) -> None:
await self._make_request("DELETE", url, url_vars, "", accept)
| [
"[email protected]"
] | |
56b4116fe0ba8840df5f463b6306c8fd733d774a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02804/s286662068.py | 0f0965086da8245027d2dedeb3044d4203c93284 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 622 | py |
n, k = map(int, input().split())
a = list(map(int, input().split()))
a.sort()
# prepare combs upto 10000
mod = 10**9 + 7
facts = [1] * 100001
for i in range(0, 100000):
facts[i+1] = facts[i] * (i + 1) % mod
ifacts = [1] * 100001
ifacts[100000] = pow(facts[100000], mod - 2, mod)
for i in range(100000, 0, -1):
ifacts[i-1] = ifacts[i] * i % mod
def comb(n, k):
return facts[n] * ifacts[n-k] % mod * ifacts[k] % mod
ans = 0
for i in range(k-1, n):
# take k-1 from i
ans = (ans + a[i] * comb(i, k-1)) % mod
for i in range(0, n-k+1):
# take k-1 from n-i-1
ans = (ans - a[i] * comb(n-i-1, k-1)) % mod
print(ans) | [
"[email protected]"
] | |
63508df4d0df451b9d0818de91691ffcd18a74bb | f5a53f0f2770e4d7b3fdace83486452ddcc996e1 | /env3/lib/python3.6/site-packages/django_rq/workers.py | b6f23d91a3623ea4116dcd4691f19e56d13e2a11 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | fireman0865/PingBox | 35e8fc9966b51320d571b63967e352a134022128 | 0f00eaf88b88e9441fffd5173a1501e56c13db03 | refs/heads/master | 2023-01-20T07:55:59.433046 | 2020-03-15T13:36:31 | 2020-03-15T13:36:31 | 247,466,832 | 1 | 0 | Apache-2.0 | 2022-12-26T21:30:32 | 2020-03-15T12:59:16 | Python | UTF-8 | Python | false | false | 1,796 | py | from rq import Worker
from rq.utils import import_attribute
from django.conf import settings
from .jobs import get_job_class
from .queues import get_queues
def get_exception_handlers():
"""
Custom exception handlers could be defined in settings.py:
RQ = {
'EXCEPTION_HANDLERS': ['path.to.handler'],
}
"""
from .settings import EXCEPTION_HANDLERS
return [import_attribute(path) for path in EXCEPTION_HANDLERS]
def get_worker_class(worker_class=None):
"""
Return worker class from RQ settings, otherwise return Worker.
If `worker_class` is not None, it is used as an override (can be
python import path as string).
"""
RQ = getattr(settings, 'RQ', {})
if worker_class is None:
worker_class = Worker
if 'WORKER_CLASS' in RQ:
worker_class = RQ.get('WORKER_CLASS')
if isinstance(worker_class, str):
worker_class = import_attribute(worker_class)
return worker_class
def get_worker(*queue_names, **kwargs):
"""
Returns a RQ worker for all queues or specified ones.
"""
job_class = get_job_class(kwargs.pop('job_class', None))
queue_class = kwargs.pop('queue_class', None)
queues = get_queues(*queue_names, **{'job_class': job_class,
'queue_class': queue_class})
# normalize queue_class to what get_queues returns
queue_class = queues[0].__class__
worker_class = get_worker_class(kwargs.pop('worker_class', None))
return worker_class(queues,
connection=queues[0].connection,
exception_handlers=get_exception_handlers() or None,
job_class=job_class,
queue_class=queue_class,
**kwargs)
| [
"[email protected]"
] | |
67790833485b01272a5d8f7ba10d549f6bc187e6 | b7b8cac59c24c28efb3002f639865121d3b1f3e1 | /hyperion/grid/yt3_wrappers.py | fe04e9f80b875a00a54282dfaeb5b72899b325bd | [
"BSD-2-Clause"
] | permissive | koepferl/hyperion | 51a461f3cde30faa6dc82f63803b659a831273d1 | d43e1d06889e8b649038b85ef6721c64dd269a4e | refs/heads/master | 2020-04-01T19:11:18.373471 | 2015-01-14T13:31:36 | 2015-03-30T15:38:08 | 34,328,089 | 0 | 0 | null | 2015-04-21T13:17:41 | 2015-04-21T13:17:40 | null | UTF-8 | Python | false | false | 6,366 | py | from __future__ import print_function, division
import numpy as np
def almost_equal(a, b):
return a / b < 1. + 1.e-4 and b / a < 1. + 1.e-4
def amr_grid_to_yt_stream(levels, dust_id=0):
# Try and guess the refinement ratio - if it is not constant, then
# we can't use yt
if len(levels) == 0 or len(levels[0].grids) == 0:
raise Exception("Need at least one level with one grid to convert to a yt object")
elif len(levels) == 1:
refine = 2
else:
dx = []
dy = []
dz = []
for ilevel, level in enumerate(levels):
for igrid, grid in enumerate(level.grids):
gdx = (grid.xmax - grid.xmin) / float(grid.nx)
gdy = (grid.ymax - grid.ymin) / float(grid.ny)
gdz = (grid.zmax - grid.zmin) / float(grid.nz)
if igrid == 0:
dx.append(gdx)
dy.append(gdy)
dz.append(gdz)
else:
if not almost_equal(dx[-1], gdx):
raise Exception("dx scale differs between grids in level %i (expected %g and got %g)" % (ilevel, dx[-1], gdx))
if not almost_equal(dy[-1], gdy):
raise Exception("dy scale differs between grids in level %i (expected %g and got %g)" % (ilevel, dy[-1], gdy))
if not almost_equal(dz[-1], gdz):
raise Exception("dz scale differs between grids in level %i (expected %g and got %g)" % (ilevel, dz[-1], gdz))
dx = np.array(dx)
dy = np.array(dy)
dz = np.array(dz)
refine_x = dx[:-1] / dx[1:]
refine_y = dy[:-1] / dy[1:]
refine_z = dz[:-1] / dz[1:]
for i in range(len(levels) - 1):
if abs(refine_x[i] - round(refine_x[i])) > 1.e-5:
raise Exception("refinement ratio is not an integer (%g)" % refine_x[i])
if abs(refine_y[i] - round(refine_y[i])) > 1.e-5:
raise Exception("refinement ratio is not an integer (%g)" % refine_y[i])
if abs(refine_z[i] - round(refine_z[i])) > 1.e-5:
raise Exception("refinement ratio is not an integer (%g)" % refine_z[i])
refine_x = np.round(refine_x).astype(int)
refine_y = np.round(refine_y).astype(int)
refine_z = np.round(refine_z).astype(int)
if not np.all(np.hstack([refine_x, refine_y, refine_z]) == refine_x[0]):
raise Exception("refinement ratio changes between levels and/or directions (x = %s, y = %s, z = %s)" % (str(refine_x), str(refine_y), str(refine_z)))
refine = int(refine_x[0])
# TODO: generalize this once yt supports a custom refinement factor
if refine != 2:
raise ValueError("load_amr_grid only supports refine=2")
xmin = ymin = zmin = +np.inf
xmax = ymax = zmax = -np.inf
grid_data = []
for ilevel, level in enumerate(levels):
for grid in level.grids:
grid_dict = {}
grid_dict['left_edge'] = [grid.zmin, grid.ymin, grid.xmin]
grid_dict['right_edge'] = [grid.zmax, grid.ymax, grid.xmax]
grid_dict['dimensions'] = [grid.nz, grid.ny, grid.nx]
grid_dict['level'] = ilevel
for field in grid.quantities:
grid_dict[('gas', field)] = grid.quantities[field][dust_id]
grid_data.append(grid_dict)
xmin = min(xmin, grid.xmin)
xmax = max(xmax, grid.xmax)
ymin = min(ymin, grid.ymin)
ymax = max(ymax, grid.ymax)
zmin = min(zmin, grid.zmin)
zmax = max(zmax, grid.zmax)
# Determine domain resolution
grid0 = levels[0].grids[0]
dx = (grid0.xmax - grid0.xmin) / float(grid0.nx)
nx = int(round((xmax - xmin) / dx))
dy = (grid0.ymax - grid0.ymin) / float(grid0.ny)
ny = int(round((ymax - ymin) / dy))
dz = (grid0.zmax - grid0.zmin) / float(grid0.nz)
nz = int(round((zmax - zmin) / dz))
domain_dimensions = np.array([nz, ny, nx])
bbox = np.array([[xmin, xmax], [ymin, ymax], [zmin, zmax]])
from yt.mods import load_amr_grids
spf = load_amr_grids(grid_data, domain_dimensions, bbox=bbox)
return spf
def find_order(refined):
"""
Find the index array to use to sort the ``refined`` and ``density`` arrays
to swap the xyz <-> zyx order.
"""
order = np.zeros(refined.shape)
if not refined[0]:
return [0]
def find_nested(i):
cells = [i]
for cell in range(8):
i += 1
if refined[i]:
parent = i
i, sub_cells = find_nested(i)
cells.append(sub_cells)
else:
cells.append(i)
cells = [cells[j] for j in [0,1,5,3,7,2,6,4,8]]
return i, np.hstack(cells)
return find_nested(0)[1]
def octree_grid_to_yt_stream(grid, dust_id=0):
order = find_order(grid.refined)
refined = grid.refined[order]
xmin = grid.x - grid.dx
xmax = grid.x + grid.dx
ymin = grid.y - grid.dy
ymax = grid.y + grid.dy
zmin = grid.z - grid.dz
zmax = grid.z + grid.dz
from yt.mods import load_octree
quantities = {}
for field in grid.quantities:
quantities[('gas', field)] = np.atleast_2d(grid.quantities[field][dust_id][order][~refined]).transpose()
bbox = np.array([[xmin, xmax], [ymin, ymax], [zmin, zmax]])
octree_mask = refined.astype(np.uint8) * 8
spf = load_octree(octree_mask=octree_mask,
data=quantities,
bbox=bbox,
over_refine_factor=0,
partial_coverage=0)
return spf
def cartesian_grid_to_yt_stream(grid, xmin, xmax, ymin, ymax, zmin, zmax, dust_id=0):
# TODO: only works for regular grids, need to catch non-uniform cases here
# Make data dict which should contain (array, unit) tuples
data = {}
for field in grid.quantities:
data[field] = (grid.quantities[field][dust_id], '')
# Load cartesian grid into yt
from yt.mods import load_uniform_grid
spf = load_uniform_grid(data=data,
domain_dimensions=np.array(grid.shape, dtype=np.int32),
bbox=np.array([(xmin, xmax), (ymin, ymax), (zmin, zmax)]))
return spf
| [
"[email protected]"
] | |
67bd82c6736e3ac0503aaf31946a87f83b6a9bac | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nnschedul.py | 2bda54c807c76dff011ebad9091ac3f000811ebe | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 219 | py | ii = [('CoolWHM2.py', 1), ('GodwWSL2.py', 1), ('AubePRP2.py', 1), ('PeckJNG.py', 2), ('LyttELD.py', 1), ('WadeJEB.py', 3), ('BachARE.py', 4), ('MereHHB3.py', 3), ('MereHHB.py', 2), ('ThomGLG.py', 1), ('MereHHB2.py', 1)] | [
"[email protected]"
] | |
5db77f5c0ccc08e6f16c479642688f934e405b8b | 6757339759559cc741178ed4236b449ff27df221 | /chrZ_make_seq_for_ldhelmet.py | 5833cc08ecd26ce0740ee512fb400e2178dbabf0 | [] | no_license | anandksrao/postdoc | 18095f675cc5d67bc6e6a1b70bdc7dca29ac880d | a09e2d810bc4b562dc0e6de2999c063f5bd59cf8 | refs/heads/master | 2020-04-22T23:28:03.482966 | 2015-09-08T16:40:24 | 2015-09-08T16:40:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,695 | py | import re
import glob
import sys
import subprocess
import os
import copy
import argparse
import gzip
import time
def get_vcf_var(vcf_file, min_allele):
site_freq = {}
vcf_f = gzip.open(vcf_file, 'r')
for l in vcf_f:
if not re.search('^#', l):
d = re.split('\t', l)
allele1 = len(re.findall('\s0[\/|\:]', l)) + len(re.findall('\/0', l))
allele2 = len(re.findall('\s1[\/|\:]', l)) + len(re.findall('\/1', l))
min_ac = min(allele1, allele2)
if min_ac <= min_allele:
site_freq[int(d[1])] = 1
vcf_f.close()
return site_freq
def parse_male_haps(hap_file, site_freq, sites_file, chr):
f = open(hap_file, 'r')
var = {}
sites = {}
for l in f:
d = re.split('\s+', l.rstrip())
if int(d[2]) not in site_freq:
if int(d[2]) in sites:
var.pop(int(d[2]), None)
else:
sites[int(d[2])] = 1
for ix, i in enumerate(d[5:len(d)]):
if ix not in var:
var[ix] = dict()
if i == '0':
var[ix][int(d[2]) - 1] = d[3]
else:
var[ix][int(d[2]) - 1] = d[4]
f.close()
sites_f = open(sites_file, 'w')
for site in sorted(var[0].keys()):
sites_f.write('%s,%s\n' % (chr, site + 1))
sites_f.close()
return var
def parse_female_haps(var, vcf_file):
f = gzip.open(vcf_file)
start_hap = max(var.keys()) + 1
for l in f:
if not re.search('#', l):
l = l.rstrip()
d = re.split('\t', l)
if (int(d[1]) - 1) in var[0]:
fem_gens = []
for geno in d[9:]:
if not re.search('\S\/', geno):
allele = re.search('^(\S)', geno).group(1)
fem_gens.append(allele)
for ix, gen in enumerate(fem_gens):
hapnum = start_hap + ix
if hapnum not in var:
var[hapnum] = {}
if gen == '.':
var[hapnum][int(d[1]) - 1] = 'N'
elif gen == '1':
var[hapnum][int(d[1]) - 1] = d[4]
elif gen == '0':
var[hapnum][int(d[1]) - 1] = d[3]
f.close()
return var
def get_chromosome(genome, chr):
outfile = genome + '_' + chr
subprocess.call('~/bin/samtools-0.1.19/samtools faidx %s %s > %s' % (genome, chr, outfile), shell=True)
out_f = open(outfile, 'r')
chromosome = ''
locus_name = out_f.next()
for l in out_f:
chromosome = chromosome + l.rstrip().upper()
out_f.close()
os.remove(outfile)
return list(chromosome)
def print_seq(var, chr_as_list, masked, out_file):
out_f = open(out_file, 'w')
for ind in var:
out_f.write('>haplo%s\n' % ind)
tmp_chr = list(chr_as_list)
for pos, base in enumerate(masked):
if base in ['4', '5', '6', '7']:
tmp_chr[pos] = 'N'
for pos in var[ind]:
tmp_chr[pos] = var[ind][pos]
for i in xrange(0, len(tmp_chr), 60):
out_f.write(''.join(tmp_chr[i:i+60]) + '\n')
out_f.close()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--sp", help="species for which to run analysis")
args = parser.parse_args()
sp = args.sp
if sp == 'ZF':
vcf_file = '/mnt/gluster/home/sonal.singhal1/ZF/after_vqsr/by_chr/all_vcf/for_shapeit/gatk.ug.finch19.chrZ.allfilters.recodedsex.recoded_biallelicSNPs.vcf.gz'
hap_file = '/mnt/gluster/home/sonal.singhal1/ZF/phasing/PIR_approach/finch19/chrZ_haplotypes.haps'
masked_genome = '/mnt/gluster/home/sonal.singhal1/ZF/masked_genome/ZF.masked_genome.repeat_masked.switch_masked.fa'
if sp == 'LTF':
vcf_file = '/mnt/gluster/home/sonal.singhal1/LTF/after_vqsr/by_chr/for_shapeit/gatk.ug.ltf.chrZ.allfilters.recodedsex.recoded_biallelicSNPs.vcf.gz'
hap_file = '/mnt/gluster/home/sonal.singhal1/%s/phasing/PIR_approach/chrZ_haplotypes.haps' % sp
masked_genome = '/mnt/gluster/home/sonal.singhal1/LTF/masked_genome/LTF.masked_genome.repeat_masked.fa'
out_file = '/mnt/gluster/home/sonal.singhal1/%s/analysis/LDhelmet/chrZ_haplotypes.fasta' % sp
site_file = '/mnt/gluster/home/sonal.singhal1/%s/analysis/LDhelmet/chrZ_sites.csv' % sp
genome = '/mnt/gluster/home/sonal.singhal1/reference/taeGut1_60.bamorder.fasta'
min_allele = 1
chr = 'chrZ'
site_freq = get_vcf_var(vcf_file, min_allele)
chr_as_list = get_chromosome(genome, chr)
masked = get_chromosome(masked_genome, chr)
var = parse_male_haps(hap_file, site_freq, site_file, chr)
var = parse_female_haps(var, vcf_file)
print_seq(var, chr_as_list, masked, out_file)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
40bfd6b2d53f43ebd7039a79a1d0df64f193dd3e | f96636810509786bd7afdfb1580fd276b930ade1 | /client/sendDiagPopup.py | cbc672534de873b006ab10258fe7a3a4006185e6 | [] | no_license | Bharathkumar-nb/SCSI-simulation | 0b0d47fa2bce028e6214bf3e348c4be28cfaa118 | c94041043793deaa1ac4a1298eca9685952ff1eb | refs/heads/master | 2021-01-20T10:29:15.966464 | 2013-05-03T07:39:38 | 2013-05-03T07:39:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,734 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'send_diagnostic.ui'
#
# Created: Sat Apr 27 09:51:24 2013
# by: PyQt4 UI code generator 4.9.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_send_diagnostic(object):
def setupUi(self, send_diagnostic):
send_diagnostic.setObjectName(_fromUtf8("send_diagnostic"))
send_diagnostic.resize(632, 515)
# send_diagnostic = QtGui.QWidget(send_diagnostic)
# send_diagnostic.setObjectName(_fromUtf8("centralWidget"))
self.label = QtGui.QLabel(send_diagnostic)
self.label.setGeometry(QtCore.QRect(80, 40, 66, 17))
self.label.setText(_fromUtf8(""))
self.label.setObjectName(_fromUtf8("label"))
self.label_2 = QtGui.QLabel(send_diagnostic)
self.label_2.setGeometry(QtCore.QRect(60, 40, 121, 17))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.label_3 = QtGui.QLabel(send_diagnostic)
self.label_3.setGeometry(QtCore.QRect(70, 220, 111, 17))
self.label_3.setText(_fromUtf8(""))
self.label_3.setObjectName(_fromUtf8("label_3"))
self.label_4 = QtGui.QLabel(send_diagnostic)
self.label_4.setGeometry(QtCore.QRect(60, 90, 211, 20))
self.label_4.setObjectName(_fromUtf8("label_4"))
self.label_5 = QtGui.QLabel(send_diagnostic)
self.label_5.setGeometry(QtCore.QRect(60, 160, 211, 17))
self.label_5.setObjectName(_fromUtf8("label_5"))
self.pushButton = QtGui.QPushButton(send_diagnostic)
self.pushButton.setGeometry(QtCore.QRect(320, 370, 101, 27))
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.lineEdit = QtGui.QLineEdit(send_diagnostic)
self.lineEdit.setGeometry(QtCore.QRect(350, 30, 111, 27))
self.lineEdit.setObjectName(_fromUtf8("lineEdit"))
self.lineEdit_2 = QtGui.QLineEdit(send_diagnostic)
self.lineEdit_2.setGeometry(QtCore.QRect(350, 150, 113, 27))
self.lineEdit_2.setObjectName(_fromUtf8("lineEdit_2"))
self.lineEdit_3 = QtGui.QLineEdit(send_diagnostic)
self.lineEdit_3.setGeometry(QtCore.QRect(350, 210, 113, 27))
self.lineEdit_3.setObjectName(_fromUtf8("lineEdit_3"))
self.lineEdit_4 = QtGui.QLineEdit(send_diagnostic)
self.lineEdit_4.setGeometry(QtCore.QRect(350, 90, 113, 27))
self.lineEdit_4.setObjectName(_fromUtf8("lineEdit_4"))
self.label_6 = QtGui.QLabel(send_diagnostic)
self.label_6.setGeometry(QtCore.QRect(60, 220, 121, 17))
self.label_6.setObjectName(_fromUtf8("label_6"))
self.label_7 = QtGui.QLabel(send_diagnostic)
self.label_7.setGeometry(QtCore.QRect(60, 240, 211, 17))
self.label_7.setObjectName(_fromUtf8("label_7"))
# send_diagnostic.setCentralWidget(send_diagnostic)
#self.menuBar = QtGui.QMenuBar(send_diagnostic)
# self.menuBar.setGeometry(QtCore.QRect(0, 0, 632, 25))
# self.menuBar.setObjectName(_fromUtf8("menuBar"))
# send_diagnostic.setMenuBar(self.menuBar)
# self.mainToolBar = QtGui.QToolBar(send_diagnostic)
#self.mainToolBar.setObjectName(_fromUtf8("mainToolBar"))
#send_diagnostic.addToolBar(QtCore.Qt.TopToolBarArea, self.mainToolBar)
#self.statusBar = QtGui.QStatusBar(send_diagnostic)
#self.statusBar.setObjectName(_fromUtf8("statusBar"))
#send_diagnostic.setStatusBar(self.statusBar)
self.retranslateUi(send_diagnostic)
QtCore.QMetaObject.connectSlotsByName(send_diagnostic)
def retranslateUi(self, send_diagnostic):
send_diagnostic.setWindowTitle(QtGui.QApplication.translate("send_diagnostic", "send_diagnostic", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("send_diagnostic", "Self Test Bit (0/1)", None, QtGui.QApplication.UnicodeUTF8))
self.label_4.setText(QtGui.QApplication.translate("send_diagnostic", "DEVOFFL (Device Offline) (0/1)", None, QtGui.QApplication.UnicodeUTF8))
self.label_5.setText(QtGui.QApplication.translate("send_diagnostic", "UNITOFFL (Unit Offline) (0/1)", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton.setText(QtGui.QApplication.translate("send_diagnostic", "OK", None, QtGui.QApplication.UnicodeUTF8))
self.label_6.setText(QtGui.QApplication.translate("send_diagnostic", "Self Test Code", None, QtGui.QApplication.UnicodeUTF8))
self.label_7.setText(QtGui.QApplication.translate("send_diagnostic", "(Value between 000 and 111)", None, QtGui.QApplication.UnicodeUTF8))
| [
"[email protected]"
] | |
3a23b26701ccfc73af6fef97637eab76dab9b738 | 0a43afbcba776ed8ada0fef5425b1507aa4d51c1 | /smartbook/smartbook/web/migrations/0016_auto__del_ownercompanyname__add_ownercompany.py | b443dac4b4ee2cee94af23d6f41f3b7c34b366fb | [] | no_license | geethusuresh/inventory-systems | c76d6d10429f483499594df8c8f34d780531f18c | fd4211d29042776fa47da92162cbbbe8220090cd | refs/heads/master | 2021-01-02T08:51:31.278578 | 2014-09-28T07:35:54 | 2014-09-28T07:35:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,583 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'OwnerCompanyName'
db.delete_table(u'web_ownercompanyname')
# Adding model 'OwnerCompany'
db.create_table(u'web_ownercompany', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('company_name', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal(u'web', ['OwnerCompany'])
def backwards(self, orm):
# Adding model 'OwnerCompanyName'
db.create_table(u'web_ownercompanyname', (
('company_name', self.gf('django.db.models.fields.CharField')(max_length=100)),
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal(u'web', ['OwnerCompanyName'])
# Deleting model 'OwnerCompany'
db.delete_table(u'web_ownercompany')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'web.customer': {
'Meta': {'object_name': 'Customer'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'web.designation': {
'Meta': {'object_name': 'Designation'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
u'web.ownercompany': {
'Meta': {'object_name': 'OwnerCompany'},
'company_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'web.staff': {
'Meta': {'object_name': 'Staff'},
'designation': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.Designation']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'web.transportationcompany': {
'Meta': {'object_name': 'TransportationCompany'},
'company_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'web.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'city': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'district': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'email_id': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'house_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'land_line': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'pin': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'user_type': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
u'web.vendor': {
'Meta': {'object_name': 'Vendor'},
'contact_person': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
}
}
complete_apps = ['web'] | [
"[email protected]"
] | |
8b92d801b14bce7463cefb8954677a9b0694325a | 415920616d5efccee4667126c4bb29f91f1d5321 | /blood/donor/urls.py | f4eb33826ecb67e2f4cedf717f45045d210213b2 | [] | no_license | ManogaranArumugam/blood | 6c779b3bfe308a95d52cb730be65b25cb2c3eda6 | bb6ef86bfeaf67ed70eafa97dcb6b6c1da0c9f4f | refs/heads/master | 2020-03-25T22:43:31.004539 | 2018-08-09T12:43:50 | 2018-08-09T12:43:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 164 | py | from django.conf.urls import url
from blood.donor.views import DonorRegistrationView
urlpatterns = [
url(r'^$', DonorRegistrationView.as_view(), name='home'),
] | [
"[email protected]"
] | |
6c340cf04b054bb75cdfb63b9fbdf78c10395714 | fa93e53a9eee6cb476b8998d62067fce2fbcea13 | /build/pal_statistics/catkin_generated/generate_cached_setup.py | 91044b831264767383d53fe4fda54ceccbaa6122 | [] | no_license | oyetripathi/ROS_conclusion_project | 2947ee2f575ddf05480dabc69cf8af3c2df53f73 | 01e71350437d57d8112b6cec298f89fc8291fb5f | refs/heads/master | 2023-06-30T00:38:29.711137 | 2021-08-05T09:17:54 | 2021-08-05T09:17:54 | 392,716,311 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,355 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/melodic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/melodic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in '/home/sandeepan/tiago_public_ws/devel;/opt/ros/melodic'.split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/sandeepan/tiago_public_ws/devel/.private/pal_statistics/env.sh')
output_filename = '/home/sandeepan/tiago_public_ws/build/pal_statistics/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
# print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| [
"[email protected]"
] | |
55565979bd9e9ceb8e3a4424587b82a4a4a0688a | b94ab99f9c1f8bbb99afd23e1bfcd2332060b4bd | /library/migrations/0012_auto_20170805_0851.py | 050383e317e1d09ac7e3c45f3c1ea4e50db4dfca | [] | no_license | georgecai904/bookshelf | e54ccae00d4ee48e91ca1564a425ba4586b52d93 | 0002207dc8ca586ce1127d3ea98bb53102d043df | refs/heads/master | 2021-01-02T22:52:26.046535 | 2017-08-05T15:32:13 | 2017-08-05T15:32:13 | 99,409,971 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 597 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-08-05 08:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('library', '0011_auto_20170805_0850'),
]
operations = [
migrations.AlterField(
model_name='book',
name='pages',
field=models.IntegerField(blank=True),
),
migrations.AlterField(
model_name='book',
name='year',
field=models.IntegerField(blank=True),
),
]
| [
"[email protected]"
] | |
542650ec2698610417fd1074ecee715e3a7ecf4e | 55d6de252e61c4b60688ebd8b1f637807acc1e7c | /sale_report/wizard/aged_customer_list.py | 6bee3ea9ef1268815749ad4cfb0996f5175cc12b | [] | no_license | mosadiqit/eerna_erp_uslbd | b707a1d49a4fce7c1543b63e0120e8f9b77b26ce | 73e3994a9e32df7809d244eb6592513162ab7853 | refs/heads/main | 2023-06-30T14:53:04.837197 | 2021-08-04T11:30:46 | 2021-08-04T11:30:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,133 | py | # -*- coding: utf-8 -*-
from odoo import models, fields, api
from odoo.tools import DEFAULT_SERVER_DATE_FORMAT as DATE_FORMAT
from odoo.tools import DEFAULT_SERVER_DATETIME_FORMAT as DATETIME_FORMAT, BytesIO, xlsxwriter, base64
class AgedCustomerReportWizard(models.TransientModel):
_name = 'aged.customer.report.wizard'
company_id = fields.Many2one('res.company', string='Company', domain=lambda self:self._get_companies(),default=lambda self: self.env.user.company_id,required=True)
branch_ids = fields.Many2one( 'res.branch',string='Branch')
date_start = fields.Date(string='Start Date', required=True, default=fields.Date.today)
date_end = fields.Date(string='End Date', required=True, default=fields.Date.today)
def _get_companies(self):
query="""select * from res_company_users_rel where user_id={}""".format(self.env.user.id)
self._cr.execute(query=query)
allowed_companies=self._cr.fetchall()
allowed_company=[]
for company in allowed_companies:
allowed_company.append(company[0])
return [('id', 'in', allowed_company)]
def get_report(self):
data = {
'model': self._name,
'ids': self.ids,
'form': {
'date_start': self.date_start, 'date_end': self.date_end,'company_id':self.company_id.id, 'branch_id': self.branch_ids.id,
'branch_name': self.branch_ids.name,
},
}
# ref `module_name.report_id` as reference.
return self.env.ref('sale_report.aged_customer_list_report').report_action(
self, data=data)
class AreaWiseSalesReportView(models.AbstractModel):
"""
Abstract Model specially for report template.
_name = Use prefix `report.` along with `module_name.report_name`
"""
_name = 'report.sale_report.aged_customer_list_view'
@api.model
def _get_report_values(self, docids, data=None):
branch_id = data['form']['branch_id']
branch_name = data['form']['branch_name']
company_id=data['form']['company_id']
date_start = data['form']['date_start']
date_end = data['form']['date_end']
# query = """select (cast(date_trunc('month',current_date) as date)) startdate"""
# self._cr.execute(query=query)
# result = self._cr.fetchall()
# date_start = result[0]
#
# query = """select (cast(date_trunc('month',current_date)-INTERVAL '90 day' as date)) todate"""
# self._cr.execute(query=query)
# result_1 = self._cr.fetchall()
# date_end = result_1[0]
if branch_id:
branch_id = " m.branch_id = %s" % branch_id
else:
branch_id = "1=1"
if company_id:
company_id = " m.company_id = %s" % company_id
else:
company_id = "1=1"
query = """select distinct p.id,p.name,ca.area_name,max(m.date) as last_trans_date,sum(ml.debit) as debit,sum(ml.credit) as credit, (sum(debit)-sum(credit)) as Balance
from res_partner p
left join account_move m on m.partner_id=p.id
left join account_move_line ml on m.id=ml.move_id
left join customer_area_setup ca on ca.id=p.customer_area
where m.state='posted' --and
and p.id not in
(select distinct m.partner_id from account_move_line ml
left join account_move m on m.id=ml.move_id
where {} and m.date between '{}' and '{}' and {} and m.partner_id is not null)
group by p.id,p.name,ca.area_name
order by p.name,ca.area_name
""".format(branch_id, date_start, date_end, company_id)
self._cr.execute(query=query)
query_result = self._cr.fetchall()
return {
'date_start': date_start,
'date_end': date_end,
'branch': branch_name,
'idle_customer': query_result,
}
| [
"[email protected]"
] | |
4c0ac46f1d84771dc353dc57195fde968e23467e | 630fe47bb5aa5e49b45ab101d87c2dd2c53d180f | /Bubble_soft_json.py | 4670c4958ea3d09c64e4879c8e4c5913ea0222f9 | [] | no_license | shrivastava-himanshu/Leetcode_practice | 467497a58d82ff3ae2569d5e610dc6f27a1f31d6 | 4c59799947c2b17bfd22ca2a08707ef85e84a913 | refs/heads/main | 2023-06-12T13:14:45.381839 | 2021-07-05T04:09:05 | 2021-07-05T04:09:05 | 367,546,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 856 | py | def bubble_sort_json(elements,key='name'):
size = len(elements)
for i in range(size-1):
swapped = False
for j in range(size-1-i):
a = elements[j][key]
b = elements[j+1][key]
if a > b:
tmp = elements[j]
elements[j] = elements[j+1]
elements[j+1] = tmp
swapped = True
if not swapped:
break
if __name__ == '__main__':
elements = [
{'name': 'mona', 'transaction_amount': 1000, 'device': 'iphone-10'},
{'name': 'dhaval', 'transaction_amount': 400, 'device': 'google pixel'},
{'name': 'kathy', 'transaction_amount': 200, 'device': 'vivo'},
{'name': 'aamir', 'transaction_amount': 800, 'device': 'iphone-8'},
]
bubble_sort_json(elements,key='device')
print(elements) | [
"[email protected]"
] | |
5b52140b650b1eaf1fa06ca3369752b6bba03eb9 | ef786be9b7c7145d63797cb8c351780059996873 | /watchlist_app/migrations/0001_initial.py | 76d570588a7802ab729017622b43b470dcd0ec1d | [] | no_license | nileshnagarwal/djangorest_course_sarda | 31c27ab625139f632d1121296c981c108301de70 | 933d7b5330d7fda1b17c367d30cb903543eebb02 | refs/heads/main | 2023-07-17T11:49:43.562982 | 2021-08-27T07:19:57 | 2021-08-27T07:19:57 | 394,706,946 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 617 | py | # Generated by Django 3.2.6 on 2021-08-10 07:41
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Movie',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('description', models.CharField(max_length=200)),
('active', models.BooleanField(default=True)),
],
),
]
| [
"[email protected]"
] | |
8aa8349d9f1213a364dd5a5713676193303f913c | 159aed4755e47623d0aa7b652e178296be5c9604 | /data/scripts/templates/object/draft_schematic/chemistry/shared_medpack_disease_action_a.py | 20a75b8ac4bc4329cb51b260ec5349043fb09d4a | [
"MIT"
] | permissive | anhstudios/swganh | fb67d42776864b1371e95f769f6864d0784061a3 | 41c519f6cdef5a1c68b369e760781652ece7fec9 | refs/heads/develop | 2020-12-24T16:15:31.813207 | 2016-03-08T03:54:32 | 2016-03-08T03:54:32 | 1,380,891 | 33 | 44 | null | 2016-03-08T03:54:32 | 2011-02-18T02:32:45 | Python | UTF-8 | Python | false | false | 463 | py | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/chemistry/shared_medpack_disease_action_a.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | [
"[email protected]"
] | |
efaeae158142d783d0c4d3c5f624d9fbd08615b8 | 6f151b64427d47571ff8d02a24a98c9cbd8c68a5 | /[leetcode-08]strings-to-integer-atoi.py | 17245f772a1bae021abee1832c545d882e0b4cb2 | [
"MIT"
] | permissive | Menah3m/leetcode-Python | 50c0a0e518274cfa9a5ce939c37c075ce226dd04 | 212cae16ae868e5f031d3aeb8f614c539c1a27f1 | refs/heads/master | 2021-04-02T03:24:35.855185 | 2020-12-15T09:39:03 | 2020-12-15T09:39:03 | 248,238,533 | 0 | 0 | null | 2020-12-15T09:39:05 | 2020-03-18T13:25:55 | Python | UTF-8 | Python | false | false | 2,470 | py | """
请你来实现一个 atoi 函数,使其能将字符串转换成整数。
首先,该函数会根据需要丢弃无用的开头空格字符,直到寻找到第一个非空格的字符为止。接下来的转化规则如下:
如果第一个非空字符为正或者负号时,则将该符号与之后面尽可能多的连续数字字符组合起来,形成一个有符号整数。
假如第一个非空字符是数字,则直接将其与之后连续的数字字符组合起来,形成一个整数。
该字符串在有效的整数部分之后也可能会存在多余的字符,那么这些字符可以被忽略,它们对函数不应该造成影响。
注意:假如该字符串中的第一个非空格字符不是一个有效整数字符、字符串为空或字符串仅包含空白字符时,则你的函数不需要进行转换,即无法进行有效转换。
在任何情况下,若函数不能进行有效的转换时,请返回 0 。
提示:
本题中的空白字符只包括空格字符 ' ' 。
假设我们的环境只能存储 32 位大小的有符号整数,那么其数值范围为 [−231, 231 − 1]。如果数值超过这个范围,请返回 INT_MAX (231 − 1) 或 INT_MIN (−231) 。
示例 1:
输入: "42"
输出: 42
示例 2:
输入: " -42"
输出: -42
解释: 第一个非空白字符为 '-', 它是一个负号。
我们尽可能将负号与后面所有连续出现的数字组合起来,最后得到 -42 。
示例 3:
输入: "4193 with words"
输出: 4193
解释: 转换截止于数字 '3' ,因为它的下一个字符不为数字。
示例 4:
输入: "words and 987"
输出: 0
解释: 第一个非空字符是 'w', 但它不是数字或正、负号。
因此无法执行有效的转换。
示例 5:
输入: "-91283472332"
输出: -2147483648
解释: 数字 "-91283472332" 超过 32 位有符号整数范围。
因此返回 INT_MIN (−231) 。
来源:LeetCode-08
链接:https://leetcode-cn.com/problems/string-to-integer-atoi
"""
class Solution:
def myAtoi(self, str: str) -> int:
str = str.lstrip()
if len(str)==0 or (str[0].isdigit()==False and str[0] not in ["-", "+"]):
return 0
res, i = str[0], 1
while i < len(str) and str[i].isdigit():
res += str[i]
i += 1
try:
res = int(res)
return min(max(res, -2**31), 2**31-1)
except:
return 0 | [
"[email protected]"
] | |
d046514180a9e37274ac16c00eabafba5f77c479 | 9a9fb43d866dc8fd829211d2b47328ef1f5ed428 | /PI_ROS_WORKSPACES/ros_catkin_ws/build_isolated/rosboost_cfg/catkin_generated/pkg.develspace.context.pc.py | 72b73d066877678ca5a5c6ee1ed6b3c0bdd104c5 | [] | no_license | droter/auto_mow | 326df42a54676079cac61fe63c40d5d04beb049b | 3742cb2ef78bc06d2771ac4c679e5110909774f8 | refs/heads/master | 2022-05-19T20:18:33.409777 | 2020-04-29T00:42:24 | 2020-04-29T00:42:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "rosboost_cfg"
PROJECT_SPACE_DIR = "/home/pi/ros_catkin_ws/devel_isolated/rosboost_cfg"
PROJECT_VERSION = "1.14.4"
| [
"[email protected]"
] | |
26f1f046fbcc3e826fb8fa1f586db82a5eadc742 | aea02d626c10396c2220d5ee642cb9c279e5bc37 | /migrations/versions/34c8e6e836da_email_column.py | d039f076f6691862d68521cdf0df979072f894e7 | [
"MIT"
] | permissive | Derrick-Nyongesa/Blog | 5fb176575865a75a02658bc8622fed3b9e05c919 | aff6b97aac958e6f626c934c57fffba1bb1f845d | refs/heads/main | 2023-04-14T12:21:20.890964 | 2021-04-26T07:07:55 | 2021-04-26T07:07:55 | 360,806,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 795 | py | """email column
Revision ID: 34c8e6e836da
Revises: 4fb50df0a785
Create Date: 2021-04-23 14:54:17.658067
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '34c8e6e836da'
down_revision = '4fb50df0a785'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('email', sa.String(length=255), nullable=True))
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_users_email'), table_name='users')
op.drop_column('users', 'email')
# ### end Alembic commands ###
| [
"[email protected]"
] | |
bd3b3d45311a3acf29007751bcf7d26209d85391 | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part000492.py | 7a98b03c8016f65306e467ddfb470f008a62508e | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,532 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher43335(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({0: 1, 1: 1}), [
(VariableWithCount('i2.2.1.2.2.2.0', 1, 1, S(0)), Add)
]),
1: (1, Multiset({2: 1}), [
(VariableWithCount('i2.2.1.2.2.2.0', 1, 1, S(0)), Add)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Add
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher43335._instance is None:
CommutativeMatcher43335._instance = CommutativeMatcher43335()
return CommutativeMatcher43335._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 43334
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.2.2.2.1.0', S(1))
except ValueError:
pass
else:
pass
# State 43336
if len(subjects) >= 1 and isinstance(subjects[0], Pow):
tmp2 = subjects.popleft()
subjects3 = deque(tmp2._args)
# State 43337
if len(subjects3) >= 1:
tmp4 = subjects3.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.2.2.2.1.1', tmp4)
except ValueError:
pass
else:
pass
# State 43338
if len(subjects3) >= 1 and subjects3[0] == Integer(2):
tmp6 = subjects3.popleft()
# State 43339
if len(subjects3) == 0:
pass
# State 43340
if len(subjects) == 0:
pass
# 0: g*x**2
yield 0, subst2
subjects3.appendleft(tmp6)
subjects3.appendleft(tmp4)
subjects.appendleft(tmp2)
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.2.2.2.1.0_1', S(1))
except ValueError:
pass
else:
pass
# State 43348
if len(subjects) >= 1:
tmp8 = subjects.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.2.2.2.1.1', tmp8)
except ValueError:
pass
else:
pass
# State 43349
if len(subjects) == 0:
pass
# 1: f*x
yield 1, subst2
subjects.appendleft(tmp8)
if len(subjects) >= 1:
tmp10 = subjects.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.2.2.2.1.0', tmp10)
except ValueError:
pass
else:
pass
# State 55141
if len(subjects) == 0:
pass
# 2: x*f
yield 2, subst2
subjects.appendleft(tmp10)
if len(subjects) >= 1 and isinstance(subjects[0], Mul):
tmp12 = subjects.popleft()
associative1 = tmp12
associative_type1 = type(tmp12)
subjects13 = deque(tmp12._args)
matcher = CommutativeMatcher43342.get()
tmp14 = subjects13
subjects13 = []
for s in tmp14:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp14, subst0):
pass
if pattern_index == 0:
pass
# State 43347
if len(subjects) == 0:
pass
# 0: g*x**2
yield 0, subst1
if pattern_index == 1:
pass
# State 43350
if len(subjects) == 0:
pass
# 1: f*x
yield 1, subst1
if pattern_index == 2:
pass
# State 55142
if len(subjects) == 0:
pass
# 2: x*f
yield 2, subst1
subjects.appendleft(tmp12)
return
yield
from .generated_part000493 import *
from matchpy.matching.many_to_one import CommutativeMatcher
from collections import deque
from matchpy.utils import VariableWithCount
from multiset import Multiset | [
"[email protected]"
] | |
2347536eb37a719bc87f0edc137313e5e7eacfe6 | 5b9f9b4ea1494943e6f7f842df55909599ed1304 | /python/onshape_client/oas/models/security_scheme.py | 90cf4db87b065dc49b92d77bf05e8d217d9c5b3c | [] | no_license | jenniferyoung02/onshape-clients | f50534f033428027515b7fc0b801b1caab4d0aec | 8ee31a17d7af32f105b851e45f69fd4a3006e1ba | refs/heads/master | 2020-09-07T06:44:37.682545 | 2019-10-08T18:52:06 | 2019-10-08T18:52:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,638 | py | # coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.104
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class SecurityScheme(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'type': 'str',
'description': 'str',
'name': 'str',
'getref': 'str',
'_in': 'str',
'scheme': 'str',
'bearer_format': 'str',
'flows': 'OAuthFlows',
'open_id_connect_url': 'str',
'extensions': 'dict(str, object)'
}
attribute_map = {
'type': 'type',
'description': 'description',
'name': 'name',
'getref': 'get$ref',
'_in': 'in',
'scheme': 'scheme',
'bearer_format': 'bearerFormat',
'flows': 'flows',
'open_id_connect_url': 'openIdConnectUrl',
'extensions': 'extensions'
}
def __init__(self, type=None, description=None, name=None, getref=None, _in=None, scheme=None, bearer_format=None, flows=None, open_id_connect_url=None, extensions=None): # noqa: E501
"""SecurityScheme - a model defined in OpenAPI""" # noqa: E501
self._type = None
self._description = None
self._name = None
self._getref = None
self.__in = None
self._scheme = None
self._bearer_format = None
self._flows = None
self._open_id_connect_url = None
self._extensions = None
self.discriminator = None
if type is not None:
self.type = type
if description is not None:
self.description = description
if name is not None:
self.name = name
if getref is not None:
self.getref = getref
if _in is not None:
self._in = _in
if scheme is not None:
self.scheme = scheme
if bearer_format is not None:
self.bearer_format = bearer_format
if flows is not None:
self.flows = flows
if open_id_connect_url is not None:
self.open_id_connect_url = open_id_connect_url
if extensions is not None:
self.extensions = extensions
@property
def type(self):
"""Gets the type of this SecurityScheme. # noqa: E501
:return: The type of this SecurityScheme. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this SecurityScheme.
:param type: The type of this SecurityScheme. # noqa: E501
:type: str
"""
allowed_values = ["apiKey", "http", "oauth2", "openIdConnect"] # noqa: E501
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}" # noqa: E501
.format(type, allowed_values)
)
self._type = type
@property
def description(self):
"""Gets the description of this SecurityScheme. # noqa: E501
:return: The description of this SecurityScheme. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this SecurityScheme.
:param description: The description of this SecurityScheme. # noqa: E501
:type: str
"""
self._description = description
@property
def name(self):
"""Gets the name of this SecurityScheme. # noqa: E501
:return: The name of this SecurityScheme. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this SecurityScheme.
:param name: The name of this SecurityScheme. # noqa: E501
:type: str
"""
self._name = name
@property
def getref(self):
"""Gets the getref of this SecurityScheme. # noqa: E501
:return: The getref of this SecurityScheme. # noqa: E501
:rtype: str
"""
return self._getref
@getref.setter
def getref(self, getref):
"""Sets the getref of this SecurityScheme.
:param getref: The getref of this SecurityScheme. # noqa: E501
:type: str
"""
self._getref = getref
@property
def _in(self):
"""Gets the _in of this SecurityScheme. # noqa: E501
:return: The _in of this SecurityScheme. # noqa: E501
:rtype: str
"""
return self.__in
@_in.setter
def _in(self, _in):
"""Sets the _in of this SecurityScheme.
:param _in: The _in of this SecurityScheme. # noqa: E501
:type: str
"""
allowed_values = ["cookie", "header", "query"] # noqa: E501
if _in not in allowed_values:
raise ValueError(
"Invalid value for `_in` ({0}), must be one of {1}" # noqa: E501
.format(_in, allowed_values)
)
self.__in = _in
@property
def scheme(self):
"""Gets the scheme of this SecurityScheme. # noqa: E501
:return: The scheme of this SecurityScheme. # noqa: E501
:rtype: str
"""
return self._scheme
@scheme.setter
def scheme(self, scheme):
"""Sets the scheme of this SecurityScheme.
:param scheme: The scheme of this SecurityScheme. # noqa: E501
:type: str
"""
self._scheme = scheme
@property
def bearer_format(self):
"""Gets the bearer_format of this SecurityScheme. # noqa: E501
:return: The bearer_format of this SecurityScheme. # noqa: E501
:rtype: str
"""
return self._bearer_format
@bearer_format.setter
def bearer_format(self, bearer_format):
"""Sets the bearer_format of this SecurityScheme.
:param bearer_format: The bearer_format of this SecurityScheme. # noqa: E501
:type: str
"""
self._bearer_format = bearer_format
@property
def flows(self):
"""Gets the flows of this SecurityScheme. # noqa: E501
:return: The flows of this SecurityScheme. # noqa: E501
:rtype: OAuthFlows
"""
return self._flows
@flows.setter
def flows(self, flows):
"""Sets the flows of this SecurityScheme.
:param flows: The flows of this SecurityScheme. # noqa: E501
:type: OAuthFlows
"""
self._flows = flows
@property
def open_id_connect_url(self):
"""Gets the open_id_connect_url of this SecurityScheme. # noqa: E501
:return: The open_id_connect_url of this SecurityScheme. # noqa: E501
:rtype: str
"""
return self._open_id_connect_url
@open_id_connect_url.setter
def open_id_connect_url(self, open_id_connect_url):
"""Sets the open_id_connect_url of this SecurityScheme.
:param open_id_connect_url: The open_id_connect_url of this SecurityScheme. # noqa: E501
:type: str
"""
self._open_id_connect_url = open_id_connect_url
@property
def extensions(self):
"""Gets the extensions of this SecurityScheme. # noqa: E501
:return: The extensions of this SecurityScheme. # noqa: E501
:rtype: dict(str, object)
"""
return self._extensions
@extensions.setter
def extensions(self, extensions):
"""Sets the extensions of this SecurityScheme.
:param extensions: The extensions of this SecurityScheme. # noqa: E501
:type: dict(str, object)
"""
self._extensions = extensions
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SecurityScheme):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
843d08c826f8a29b84e9e209cbd7cf14df5aad5d | bcc199a7e71b97af6fbfd916d5a0e537369c04d9 | /acmicpc/solved/2448_Draw_Stars/solution.py | bd9a9474145925faf28b5b29de95d694d48808eb | [] | no_license | sungminoh/algorithms | 9c647e82472905a2c4e505c810b622b734d9d20d | 1389a009a02e90e8700a7a00e0b7f797c129cdf4 | refs/heads/master | 2023-05-01T23:12:53.372060 | 2023-04-24T06:34:12 | 2023-04-24T06:34:12 | 87,406,513 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 496 | py | import math
base = [' * ', ' * * ', '*****']
n = input()
def get_k(n):
return int(math.log(n/3., 2))
def concat1(a, b):
return [x[0]+' '+x[1] for x in zip(a, b)]
def concat2(a, b):
return [x[0]+x[1] for x in zip(a, b)]
def nth(i):
if i == 0:
return base
else:
child = nth(i-1)
space = [' '*(2**(i-1)) for x in range(len(child))]
return concat2(space, concat2(child, space)) + concat1(child, child)
print '\n'.join(nth(get_k(n))) | [
"[email protected]"
] | |
d12b330c8a86dae7b2e3ff874faa4a0c84278ccd | d697c1d45e96bd440be9c17ab14243a5882b1f52 | /hm/oop/Tool.py | bc86e8847c84a48bbaf2b70702747c4f2cdd6d05 | [] | no_license | ithjl521/python | 9eeda2e60dda97ee36e8764c06400eb12818689f | f4fe50799501c483cb64445fd05ee0f30f56576c | refs/heads/master | 2020-07-12T23:10:53.608276 | 2019-11-08T08:59:35 | 2019-11-08T08:59:35 | 204,931,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | class Tool(object):
count = 0
def __init__(self,name):
self.name = name
Tool.count += 1
# self.count += 1
tool1= Tool('futou')
tool2= Tool('maoie')
tool3= Tool('fuzi')
Tool.count = 99
print(Tool.count)
| [
"[email protected]"
] | |
a6223c3a60b16697d235aa1eeeb4a1c5dda89b26 | 5c254373f6725107931b68704436c2dbcd39d877 | /data_utils/FS_utils/eval_map.py | ff3f00e38f9b4ccfb4a4a595343df50faa23d6c3 | [
"MIT"
] | permissive | JunLi-Galios/unsup_temp_embed_alternating | 22330346094720ecba2e5af305febe586566b92f | 1b054fd82aadcfe1aa219be17beb77c89efd974e | refs/heads/master | 2023-03-21T04:06:16.044321 | 2021-03-20T06:06:06 | 2021-03-20T06:06:06 | 322,737,110 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,472 | py | #!/usr/bin/env python
"""Eval level activity
"""
__author__ = 'Anna Kukleva'
__date__ = 'January 2019'
import os
from ute.utils.arg_pars import opt
import data_utils.FS_utils.update_argpars as fs_utils
fs_utils.update()
actions = ['add_dressing',
'add_oil',
'add_pepper',
'cut',
'mix_dressing',
'mix_ingredients',
'peel_cucumber',
'place',
'serve_salad_onto_plate']
eval = {}
eval['action_start'] = ['action_start']
eval['add_dressing'] = ['add_dressing']
eval['add_oil'] = ['add_oil']
eval['add_pepper'] = ['add_pepper']
eval['cut'] = ['cut_cucumber',
'cut_tomato',
'cut_cheese',
'cut_lettuce']
eval['mix_dressing'] = ['mix_dressing']
eval['mix_ingredients'] = ['mix_ingredients']
eval['peel_cucumber'] = ['peel_cucumber']
eval['place'] = ['place_cucumber_into_bowl',
'place_tomato_into_bowl',
'place_cheese_into_bowl',
'place_lettuce_into_bowl']
eval['serve_salad_onto_plate'] = ['serve_salad_onto_plate']
eval['null'] = ['add_salt',
'add_vinegar']
eval['action_end'] = ['action_end']
label2idx = {}
idx2label = {}
path = os.path.join(opt.dataset_root, opt.gt, 'mapping', 'mappingeval.txt')
with open(path, 'w') as f:
for idx, (high_act, mid_acts) in enumerate(eval.items()):
for mid_act in mid_acts:
f.write('%d %s\n' % (idx, mid_act))
| [
"[email protected]"
] | |
07b1dc0344a5647316639af1cc3e0d015e5e107f | 7a5c9962ee40996a9f24f5493c715d5553052cf7 | /jobs/apps.py | 659d18b194f99c3b388bbaea0e799ca23b237f8c | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | dymaxionlabs/satlomas-back | da6355d1fc90a2e9d4a7795b4751e3ebe043ffa6 | f4568f6535755fd4a2432ecc661a264872206c6c | refs/heads/master | 2023-07-17T17:07:43.037314 | 2021-08-28T15:54:21 | 2021-08-28T15:54:21 | 262,424,687 | 0 | 0 | null | 2020-05-08T20:42:49 | 2020-05-08T20:42:48 | null | UTF-8 | Python | false | false | 132 | py | from django.apps import AppConfig
class JobsConfig(AppConfig):
name = 'jobs'
def ready(self):
import jobs.signals | [
"[email protected]"
] | |
78befbcc094a1c019b67d6b56a7b35cf4e3d6b6b | a89c739589d0ee29ff6fff1a1508a426dfe4489a | /basics/assert.py | b7872ff4b51952a23919213ac3ad863af939f0fd | [] | no_license | macabdul9/python-learning | 107e3533998e3f373b804d6b59152fc41938604b | f0d5e0e37cbed3d846684be80f0f92e5cbb9ceb5 | refs/heads/master | 2020-04-27T04:31:47.907486 | 2020-03-05T16:48:53 | 2020-03-05T16:48:53 | 174,057,604 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 448 | py | """
@author : macab (macab@debian)
@file : assert
@created : Sunday Mar 17, 2019 00:28:58 IST
"""
'''
Python provides the assert statement to check if a given logical expression is true or false. Program execution proceeds
only if the expression is true and raises the AssertionError when it is false. The following code shows the usage of the
assert statement.It is much like an if-else
'''
x = int(input())
assert x >= 0
print(x)
| [
"[email protected]"
] | |
7a6c254cbc7e0b5d94437a6f0cb3061191327052 | 5942e3e75ef7dc22a67b04fb1f12e14658a2093d | /documentation_files/platform.py | d83912b2d5f5be349492150ecc6894802fff344d | [] | no_license | the-factory/kdevelop-python | 9e94d2a4d4906a31a4d2a8a08300766e02d41a59 | 1e91f2cb4c94d9455a2ee22fef13df680aeed1ab | refs/heads/master | 2021-01-18T08:57:16.707711 | 2012-04-09T22:37:47 | 2012-04-09T22:37:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,818 | py | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
""":synopsis: Retrieves as much platform identifying data as possible.
"""
def machine():
"""
Returns the machine type, e.g. ``'i386'``. An empty string is returned if the
value cannot be determined.
"""
pass
def node():
"""
Returns the computer's network name (may not be fully qualified!). An empty
string is returned if the value cannot be determined.
"""
pass
def platform(aliased=0,terse=0):
"""
Returns a single string identifying the underlying platform with as much useful
information as possible.
The output is intended to be *human readable* rather than machine parseable. It
may look different on different platforms and this is intended.
If *aliased* is true, the function will use aliases for various platforms that
report system names which differ from their common names, for example SunOS will
be reported as Solaris. The :func:`system_alias` function is used to implement
this.
Setting *terse* to true causes the function to return only the absolute minimum
information needed to identify the platform.
"""
pass
def processor():
"""
Returns the (real) processor name, e.g. ``'amdk6'``.
An empty string is returned if the value cannot be determined. Note that many
platforms do not provide this information or simply return the same value as for
:func:`machine`. NetBSD does this.
"""
pass
def python_build():
"""
Returns a tuple ``(buildno, builddate)`` stating the Python build number and
date as strings.
"""
pass
def python_compiler():
"""
Returns a string identifying the compiler used for compiling Python.
"""
pass
def python_branch():
"""
Returns a string identifying the Python implementation SCM branch.
"""
pass
def python_implementation():
"""
Returns a string identifying the Python implementation. Possible return values
are: 'CPython', 'IronPython', 'Jython', 'PyPy'.
"""
pass
def python_revision():
"""
Returns a string identifying the Python implementation SCM revision.
"""
pass
def python_version():
"""
Returns the Python version as string ``'major.minor.patchlevel'``
Note that unlike the Python ``sys.version``, the returned value will always
include the patchlevel (it defaults to 0).
"""
pass
def python_version_tuple():
"""
Returns the Python version as tuple ``(major, minor, patchlevel)`` of strings.
Note that unlike the Python ``sys.version``, the returned value will always
include the patchlevel (it defaults to ``'0'``).
"""
pass
def release():
"""
Returns the system's release, e.g. ``'2.2.0'`` or ``'NT'`` An empty string is
returned if the value cannot be determined.
"""
pass
def system():
"""
Returns the system/OS name, e.g. ``'Linux'``, ``'Windows'``, or ``'Java'``. An
empty string is returned if the value cannot be determined.
"""
pass
def system_alias(system,release,version):
"""
Returns ``(system, release, version)`` aliased to common marketing names used
for some systems. It also does some reordering of the information in some cases
where it would otherwise cause confusion.
"""
pass
def version():
"""
Returns the system's release version, e.g. ``'#3 on degas'``. An empty string is
returned if the value cannot be determined.
"""
pass
def uname():
"""
Fairly portable uname interface. Returns a tuple of strings ``(system, node,
release, version, machine, processor)`` identifying the underlying platform.
Note that unlike the :func:`os.uname` function this also returns possible
processor information as additional tuple entry.
Entries which cannot be determined are set to ``''``.
Java Platform
-------------
"""
pass
def java_ver(release='',vendor='',vminfo=('','',''),osinfo=('','','')):
"""
Version interface for Jython.
Returns a tuple ``(release, vendor, vminfo, osinfo)`` with *vminfo* being a
tuple ``(vm_name, vm_release, vm_vendor)`` and *osinfo* being a tuple
``(os_name, os_version, os_arch)``. Values which cannot be determined are set to
the defaults given as parameters (which all default to ``''``).
Windows Platform
----------------
"""
pass
def win32_ver(release='',version='',csd='',ptype=''):
"""
Get additional version information from the Windows Registry and return a tuple
``(version, csd, ptype)`` referring to version number, CSD level and OS type
(multi/single processor).
As a hint: *ptype* is ``'Uniprocessor Free'`` on single processor NT machines
and ``'Multiprocessor Free'`` on multi processor machines. The *'Free'* refers
to the OS version being free of debugging code. It could also state *'Checked'*
which means the OS version uses debugging code, i.e. code that checks arguments,
ranges, etc.
"""
pass
def popen(cmd,mode='r',bufsize=None):
"""
Portable :func:`popen` interface. Find a working popen implementation
preferring :func:`win32pipe.popen`. On Windows NT, :func:`win32pipe.popen`
should work; on Windows 9x it hangs due to bugs in the MS C library.
Mac OS Platform
---------------
"""
pass
def mac_ver(release='',versioninfo=('','',''),machine=''):
"""
Get Mac OS version information and return it as tuple ``(release, versioninfo,
machine)`` with *versioninfo* being a tuple ``(version, dev_stage,
non_release_version)``.
Entries which cannot be determined are set to ``''``. All tuple entries are
strings.
Documentation for the underlying :cfunc:`gestalt` API is available online at
http://www.rgaros.nl/gestalt/.
Unix Platforms
--------------
"""
pass
def dist(distname='',version='',id='',supported_dists=('SuSE','debian','redhat','mandrake',more)):
"""
This is an old version of the functionality now provided by
:func:`linux_distribution`. For new code, please use the
:func:`linux_distribution`.
The only difference between the two is that ``dist()`` always
returns the short name of the distribution taken from the
``supported_dists`` parameter.
"""
pass
def linux_distribution(distname='',version='',id='',supported_dists=('SuSE','debian','redhat','mandrake',more),full_distribution_name=1):
"""
Tries to determine the name of the Linux OS distribution name.
``supported_dists`` may be given to define the set of Linux distributions to
look for. It defaults to a list of currently supported Linux distributions
identified by their release file name.
If ``full_distribution_name`` is true (default), the full distribution read
from the OS is returned. Otherwise the short name taken from
``supported_dists`` is used.
Returns a tuple ``(distname,version,id)`` which defaults to the args given as
parameters. ``id`` is the item in parentheses after the version number. It
is usually the version codename.
"""
pass
| [
"[email protected]"
] | |
428bd78c26371e93841a86cf15ea344b9b336399 | 50d39f7a91047c7498714fd68958156320efdf5f | /cwr/grammar/record/writer_territory.py | f2dcc1c3454b7d7ebcad3fc23a15bd1c95df4167 | [
"MIT"
] | permissive | toddrimes/CWR-DataApi | a82784ec198e35ab311bf5576d31eefb9269939c | 4d9f504d9032cf1aa1bd86db6efbe26042c6a6ae | refs/heads/master | 2021-01-24T23:00:57.383927 | 2015-03-13T10:04:29 | 2015-03-13T10:04:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,830 | py | # -*- coding: utf-8 -*-
from data.accessor import CWRConfiguration
from cwr.grammar.field import table as field_table
from cwr.grammar.field import special as field_special
from cwr.grammar.field import record as field_record
from cwr.grammar.field import society as field_society
from cwr.grammar.field import writer_territory as field_writer_territory
from cwr.interested_party import IPTerritoryRecord
"""
CWR Writer Territory of Control (SWT) records grammar.
"""
__author__ = 'Bernardo Martínez Garrido'
__license__ = 'MIT'
__status__ = 'Development'
# Acquires data sources
_config = CWRConfiguration()
"""
General fields.
"""
"""
Patterns.
"""
territory = field_special.lineStart + field_record.record_prefix(
_config.record_type(
'writer_territory'),
compulsory=True) + field_special.ip_n() + field_society.pr_share() + field_society.mr_share() + field_society.sr_share() + \
field_table.ie_indicator() + field_table.tis_code() + field_writer_territory.shares_change + field_writer_territory.sequence_n + field_special.lineEnd
"""
Parsing actions for the patterns.
"""
territory.setParseAction(lambda p: _to_writerterritory(p))
"""
Parsing methods.
These are the methods which transform nodes into instances of classes.
"""
def _to_writerterritory(parsed):
"""
Transforms the final parsing result into an IPTerritoryRecord instance.
:param parsed: result of parsing the Territory record
:return: an IPTerritoryRecord created from the parsed record
"""
return IPTerritoryRecord(parsed.record_type, parsed.transaction_sequence_n, parsed.record_sequence_n,
parsed.ip_n, parsed.ie_indicator, parsed.tis_code, parsed.sequence_n,
parsed.pr_share, parsed.mr_share, parsed.sr_share, parsed.shares_change) | [
"[email protected]"
] | |
b4528282c5d0f3c4f595fde399e3016578457b11 | b2ff5ac2ef633e41ecec6ff7baae4b89254bf151 | /Hello_World/src/mainapp/profiles/migrations/0005_auto_20201015_1938.py | aa9304e6331f9e2da41aa37edffbcfac7c8524bf | [] | no_license | r3bunker/Python-Projects | 2bda2be348bc4e0aa530cadbf8c26a7f163bcd3f | e8742a9c5ed92424b5aeee0041e6e2267f26ccc6 | refs/heads/master | 2023-01-04T20:30:50.250655 | 2020-10-29T17:41:14 | 2020-10-29T17:41:14 | 300,012,282 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | # Generated by Django 3.1.2 on 2020-10-16 01:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0004_auto_20201015_1913'),
]
operations = [
migrations.AlterField(
model_name='profiles',
name='prefix',
field=models.CharField(choices=[('Mrs.', 'Mrs.'), ('Mr.', 'Mr.'), ('Ms.', 'Ms.')], default='', max_length=20),
),
]
| [
"[email protected]"
] | |
c0e8d2a4dd57f7f8365f36b333ff42431805e131 | 6300fcf67d4fcb5387a9f0f7370a8ffe8f4097d9 | /AutoParts/Tests/account_auth/views/sign_out_test.py | 958b55a8fb6505816eee2d92d73bf3c17334b56b | [] | no_license | Borislav-source/Final-Project | e34ac1cbb71e3a32ed490361d3583c2e1e8bfbc9 | 501b258d103c2e1b8947451f4bdf750709d040fd | refs/heads/master | 2023-07-17T15:03:19.390774 | 2021-09-01T14:06:09 | 2021-09-01T14:06:09 | 393,977,540 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 319 | py | from django.urls import reverse
from Tests.base.tests import AutoPartsTestCase
class SignOutTest(AutoPartsTestCase):
def test_sign_out__if_user_is_logged_out(self):
self.client.force_login(self.user)
self.assertTrue(self.user)
self.client.logout()
self.assertTrue(self.user)
| [
"[email protected]"
] | |
11db259082bfba48cb8bd0c27e64e3d77bc28f6a | b87ea98bc166cade5c78d246aeb0e23c59183d56 | /samples/openapi3/client/3_0_3_unit_test/python/unit_test_api/model/uniqueitems_false_validation.py | 9a1260048babd250a0448eb1540cbf3fbb04e197 | [
"Apache-2.0"
] | permissive | holisticon/openapi-generator | 88f8e6a3d7bc059c8f56563c87f6d473694d94e5 | 6a67551ea54a1aa9a49eb48ee26b4e9bb7fb1272 | refs/heads/master | 2023-05-12T02:55:19.037397 | 2023-04-14T08:31:59 | 2023-04-14T08:31:59 | 450,034,139 | 1 | 0 | Apache-2.0 | 2022-01-20T09:34:14 | 2022-01-20T09:34:13 | null | UTF-8 | Python | false | false | 1,499 | py | # coding: utf-8
"""
openapi 3.0.3 sample spec
sample spec for testing openapi functionality, built from json schema tests for draft6 # noqa: E501
The version of the OpenAPI document: 0.0.1
Generated by: https://openapi-generator.tech
"""
from datetime import date, datetime # noqa: F401
import decimal # noqa: F401
import functools # noqa: F401
import io # noqa: F401
import re # noqa: F401
import typing # noqa: F401
import typing_extensions # noqa: F401
import uuid # noqa: F401
import frozendict # noqa: F401
from unit_test_api import schemas # noqa: F401
class UniqueitemsFalseValidation(
schemas.AnyTypeSchema,
):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
class MetaOapg:
unique_items = False
def __new__(
cls,
*_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ],
_configuration: typing.Optional[schemas.Configuration] = None,
**kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes],
) -> 'UniqueitemsFalseValidation':
return super().__new__(
cls,
*_args,
_configuration=_configuration,
**kwargs,
)
| [
"[email protected]"
] | |
ae132c35f9378954b35796886aca3491386db3b5 | 255e19ddc1bcde0d3d4fe70e01cec9bb724979c9 | /all-gists/5804612/snippet.py | 365ec99e3c6e1802b17ec70aaac6ebe7524850f5 | [
"MIT"
] | permissive | gistable/gistable | 26c1e909928ec463026811f69b61619b62f14721 | 665d39a2bd82543d5196555f0801ef8fd4a3ee48 | refs/heads/master | 2023-02-17T21:33:55.558398 | 2023-02-11T18:20:10 | 2023-02-11T18:20:10 | 119,861,038 | 76 | 19 | null | 2020-07-26T03:14:55 | 2018-02-01T16:19:24 | Python | UTF-8 | Python | false | false | 4,333 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import curses
from math import floor
from datetime import datetime as date
from subprocess import Popen as popen
# Globals:
screen = curses.initscr()
last_width = 0
last_height = 0
alarm_hour = 0
alarm_minute = 0
alarm_state = False
alarm = None
glyph = {
'0': [" ##### ", " ## ## ", "## ## ", "## ## ", "## ## ", " ## ## ", " ##### "],
'1': [" ## ", " #### ", " ## ", " ## ", " ## ", " ## ", " ###### "],
'2': [" ####### ", "## ## ", " ## ", " ####### ", "## ", "## ", "######### "],
'3': [" ####### ", "## ## ", " ## ", " ####### ", " ## ", "## ## ", " ####### "],
'4': ["## ", "## ## ", "## ## ", "## ## ", "######### ", " ## ", " ## "],
'5': [" ######## ", " ## ", " ## ", " ####### ", " ## ", " ## ## ", " ###### "],
'6': [" ####### ", "## ## ", "## ", "######## ", "## ## ", "## ## ", " ####### "],
'7': [" ######## ", " ## ## ", " ## ", " ## ", " ## ", " ## ", " ## "],
'8': [" ####### ", "## ## ", "## ## ", " ####### ", "## ## ", "## ## ", " ####### "],
'9': [" ####### ", "## ## ", "## ## ", " ######## ", " ## ", "## ## ", " ####### "],
':': [" ", " ", " # ", " ", " # ", " ", " "]
}
def addstr(y, x, string, color):
try:
screen.addstr( origin_y + y, origin_x + x, string, color)
screen.refresh()
except: return
def print_time(now):
time_line = now.strftime("%I:%M:%S")
time_array = ["" for i in range(0,7)]
# Concat glyphs:
for char in time_line:
char_array = glyph[char]
for row in range(0, len(char_array)):
time_array[row] += char_array[row]
# Print glyphs:
for y in range(0, len(time_array)):
for x in range(0, len(time_array[y])):
char = time_array[y][x]
color = 1 if char == " " else 3
addstr( y, x, " ",
curses.color_pair(color))
# Add meridian:
addstr( 6, len(time_array[0]), now.strftime("%p"),
curses.color_pair(2) | curses.A_BOLD)
def print_date(now):
day_line = now.strftime("%A").center(11," ")
date_line = now.strftime("%B %d, %Y")
addstr(8, 0, day_line, curses.color_pair(3))
addstr(8, len(day_line) + 1, date_line, curses.color_pair(2) | curses.A_BOLD)
def print_alarm():
minute = alarm_minute
hour = alarm_hour - 12 if alarm_hour > 12 else (12 if not alarm_hour else alarm_hour)
meridian = "AM" if alarm_hour < 12 else "PM"
state = "ACT" if alarm_state else "OFF"
time = " %02d:%02d %s " % (hour, minute, meridian)
addstr(8, 46, state.center(5," "), curses.color_pair(3))
addstr(8, 52, " < ", curses.color_pair(3))
addstr(8, 55, time, curses.color_pair(2) | curses.A_BOLD)
addstr(8, 65, " > ", curses.color_pair(3))
def step_alarm(direction):
global alarm_minute, alarm_hour
alarm_minute = (30 if alarm_minute == 0 else 0)
if direction and alarm_minute == 0: alarm_hour = (alarm_hour + 1) % 24
elif not direction and alarm_minute == 30: alarm_hour = (alarm_hour - 1) % 24
def handle_mouse():
global alarm_state
(i, x, y, z, bstate) = curses.getmouse()
if y == origin_y + 8 and bstate == curses.BUTTON1_CLICKED:
if x > origin_x + 51 and x < origin_x + 55:
step_alarm(False)
if x > origin_x + 64 and x < origin_x + 68:
step_alarm(True)
if x > origin_x + 45 and x < origin_x + 51:
alarm_state = not alarm_state
# Setup
screen.keypad(1)
curses.curs_set(0)
curses.start_color()
curses.init_pair(1, 0, 0) # BB
curses.init_pair(2, 3, 0) # YB
curses.init_pair(3, 0, 3) # BY
curses.mousemask(curses.ALL_MOUSE_EVENTS)
curses.noecho()
curses.cbreak()
# Main
a = 0
while True:
width = screen.getmaxyx()[1]
height = screen.getmaxyx()[0]
origin_x = floor(width / 2) - 34
origin_y = floor(height / 2) - 4
now = date.now()
if width != last_width or height != last_height: screen.clear()
last_width = width
last_height = height
print_time(now)
print_date(now)
print_alarm()
if alarm_state and \
int(now.hour) == alarm_hour and \
int(now.minute) == alarm_minute and \
int(now.second) == 0:
pass
screen.timeout(30)
char = screen.getch()
if (char != -1):
if char == curses.KEY_MOUSE: handle_mouse()
elif char == 113: break
# Cleanup:
curses.endwin()
| [
"[email protected]"
] | |
288c2466e40aa160e37a43870d3d002aa2fb3ecd | 6fb37fee016346120d4c14c4343516532304055a | /src/genie/libs/parser/iosxr/tests/test_show_lag.py | c6fd6cb76e151730fba4cb6d7f539f3f7ed993be | [
"Apache-2.0"
] | permissive | devbollinger/genieparser | 011526ebbd747c6dcd767535ce4bd33167e15536 | ad5ce7ba8f5153d1aeb9cffcfc4dde0871f3401c | refs/heads/master | 2020-12-20T11:36:00.750128 | 2020-01-24T18:45:40 | 2020-01-24T18:45:40 | 236,061,155 | 0 | 0 | Apache-2.0 | 2020-01-24T18:38:43 | 2020-01-24T18:38:42 | null | UTF-8 | Python | false | false | 35,946 | py | #!/bin/env python
import unittest
from unittest.mock import Mock
from ats.topology import Device
from genie.metaparser.util.exceptions import SchemaEmptyParserError, SchemaMissingKeyError
from genie.libs.parser.iosxr.show_lag import ShowLacpSystemId, ShowBundle, ShowLacp
###################################################
# unit test for show lacp system-id
####################################################
class test_show_lacp_sysid(unittest.TestCase):
"""unit test for show lacp system-id"""
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
"system_priority": 100,
"system_id_mac": "00-1b-0c-10-5a-26"
}
golden_output = {'execute.return_value': '''
RP/0/RP0/CPU0:iosxrv9000-1#show lacp system-id
Tue Apr 3 20:33:23.108 UTC
Priority MAC Address
-------- -----------------
0x0064 00-1b-0c-10-5a-26
'''}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowLacpSystemId(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden(self):
self.maxDiff = None
self.device = Mock(**self.golden_output)
obj = ShowLacpSystemId(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output)
###################################################
# unit test for show bundle
####################################################
class test_show_bundle(unittest.TestCase):
"""unit test for show bundle"""
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output_1 = {
"interfaces": {
"Bundle-Ether1": {
"name": "Bundle-Ether1",
"bundle_id": 1,
"oper_status": "up",
"local_links": {
"active": 2,
"standby": 0,
"configured": 2
},
"local_bandwidth_kbps": {
"effective": 2000000,
"available": 2000000
},
"mac_address": "001b.0c10.5a25",
"mac_address_source": "Chassis pool",
"inter_chassis_link": "No",
"min_active_link": 1,
"min_active_bw_kbps": 1,
"max_active_link": 8,
"wait_while_timer_ms": 2000,
"load_balance": {
"link_order_signaling": "Not configured",
"hash_type": "Default",
"locality_threshold": "None"
},
"lacp": {
"lacp": "Operational",
"flap_suppression_timer": "Off",
"cisco_extensions": "Disabled",
"non_revertive": "Disabled"
},
"mlacp": {
"mlacp": "Not configured"
},
"ipv4_bfd": {
"ipv4_bfd": "Not configured"
},
"ipv6_bfd": {
"ipv6_bfd": "Not configured"
},
"port": {
"GigabitEthernet0/0/0/0": {
"interface": "GigabitEthernet0/0/0/0",
"device": "Local",
"state": "Active",
"port_id": "0x000a, 0x0001",
"bw_kbps": 1000000,
"link_state": "Active"
},
"GigabitEthernet0/0/0/1": {
"interface": "GigabitEthernet0/0/0/1",
"device": "Local",
"state": "Active",
"port_id": "0x8000, 0x0002",
"bw_kbps": 1000000,
"link_state": "Active"
}
}
},
"Bundle-Ether2": {
"name": "Bundle-Ether2",
"bundle_id": 2,
"oper_status": "up",
"local_links": {
"active": 2,
"standby": 1,
"configured": 3
},
"local_bandwidth_kbps": {
"effective": 2000000,
"available": 2000000
},
"mac_address": "001b.0c10.5a24",
"mac_address_source": "Chassis pool",
"inter_chassis_link": "No",
"min_active_link": 2,
"min_active_bw_kbps": 1,
"max_active_link": 2,
"wait_while_timer_ms": 2000,
"load_balance": {
"link_order_signaling": "Not configured",
"hash_type": "Default",
"locality_threshold": "None"
},
"lacp": {
"lacp": "Operational",
"flap_suppression_timer": "Off",
"cisco_extensions": "Disabled",
"non_revertive": "Disabled"
},
"mlacp": {
"mlacp": "Not configured"
},
"ipv4_bfd": {
"ipv4_bfd": "Not configured"
},
"ipv6_bfd": {
"ipv6_bfd": "Not configured"
},
"port": {
"GigabitEthernet0/0/0/2": {
"interface": "GigabitEthernet0/0/0/2",
"device": "Local",
"state": "Standby",
"port_id": "0x8000, 0x0005",
"bw_kbps": 1000000,
"link_state": "Standby due to maximum-active links configuration"
},
"GigabitEthernet0/0/0/3": {
"interface": "GigabitEthernet0/0/0/3",
"device": "Local",
"state": "Active",
"port_id": "0x8000, 0x0004",
"bw_kbps": 1000000,
"link_state": "Active"
},
"GigabitEthernet0/0/0/4": {
"interface": "GigabitEthernet0/0/0/4",
"device": "Local",
"state": "Active",
"port_id": "0x8000, 0x0003",
"bw_kbps": 1000000,
"link_state": "Active"
}
}
}
}
}
golden_output_1 = {'execute.return_value': '''
RP/0/RP0/CPU0:iosxrv9000-1#show bundle
Tue Apr 3 20:30:23.603 UTC
Bundle-Ether1
Status: Up
Local links <active/standby/configured>: 2 / 0 / 2
Local bandwidth <effective/available>: 2000000 (2000000) kbps
MAC address (source): 001b.0c10.5a25 (Chassis pool)
Inter-chassis link: No
Minimum active links / bandwidth: 1 / 1 kbps
Maximum active links: 8
Wait while timer: 2000 ms
Load balancing:
Link order signaling: Not configured
Hash type: Default
Locality threshold: None
LACP: Operational
Flap suppression timer: Off
Cisco extensions: Disabled
Non-revertive: Disabled
mLACP: Not configured
IPv4 BFD: Not configured
IPv6 BFD: Not configured
Port Device State Port ID B/W, kbps
-------------------- --------------- ----------- -------------- ----------
Gi0/0/0/0 Local Active 0x000a, 0x0001 1000000
Link is Active
Gi0/0/0/1 Local Active 0x8000, 0x0002 1000000
Link is Active
Bundle-Ether2
Status: Up
Local links <active/standby/configured>: 2 / 1 / 3
Local bandwidth <effective/available>: 2000000 (2000000) kbps
MAC address (source): 001b.0c10.5a24 (Chassis pool)
Inter-chassis link: No
Minimum active links / bandwidth: 2 / 1 kbps
Maximum active links: 2
Wait while timer: 2000 ms
Load balancing:
Link order signaling: Not configured
Hash type: Default
Locality threshold: None
LACP: Operational
Flap suppression timer: Off
Cisco extensions: Disabled
Non-revertive: Disabled
mLACP: Not configured
IPv4 BFD: Not configured
IPv6 BFD: Not configured
Port Device State Port ID B/W, kbps
-------------------- --------------- ----------- -------------- ----------
Gi0/0/0/2 Local Standby 0x8000, 0x0005 1000000
Link is Standby due to maximum-active links configuration
Gi0/0/0/3 Local Active 0x8000, 0x0004 1000000
Link is Active
Gi0/0/0/4 Local Active 0x8000, 0x0003 1000000
Link is Active
'''}
golden_parsed_output_2 = {
"interfaces": {
"Bundle-Ether 2": {
"name": "Bundle-Ether 2",
"bundle_id": 2,
"oper_status": "up",
"local_links": {
"active": 1,
"standby": 0,
"configured": 1
},
"local_bandwidth_kbps": {
"effective": 100000,
"available": 100000
},
"mac_address": "1234.4321.1111",
"mac_address_source": "GigabitEthernet0/0/0/1",
"min_active_link": 1,
"min_active_bw_kbps": 500,
"max_active_link": 32,
"wait_while_timer_ms": 2000,
"load_balance": {
"load_balance": "Default"
},
"lacp": {
"lacp": "Operational",
"flap_suppression_timer": "2500 ms",
"cisco_extensions": "Disabled"
},
"mlacp": {
"mlacp": "Operational",
"iccp_group": "3",
"foreign_links_active": 1,
"foreign_links_configured": 1,
"switchover_type": "Revertive",
"recovery_delay": "300 s",
"maximize_threshold": "2 links"
},
"ipv4_bfd": {
"ipv4_bfd": "Not operational",
"state": "Off",
"fast_detect": "Enabled",
"start_timer": "Off",
"neighbor_unconfigured_timer": "Off",
"preferred_min_interval_ms": 150,
"preferred_multiple": 3,
"destination_address": "Not Configured"
},
"port": {
"GigabitEthernet0/0/0/1": {
"interface": "GigabitEthernet0/0/0/1",
"bw_kbps": 100000,
"device": "Local",
"state": "Active",
"port_id": "0x8000, 0x0001"
},
"MyFirstInterface": {
"interface": "MyFirstInterface",
"bw_kbps": 100000,
"device": "10.10.10.123",
"state": "Negotiating",
"port_id": "0x8000, 0x0032"
}
}
},
"Bundle-Ether 3": {
"name": "Bundle-Ether 3",
"bundle_id": 3,
"oper_status": "up",
"local_links": {
"active": 1,
"standby": 0,
"configured": 1
},
"local_bandwidth_kbps": {
"effective": 100000,
"available": 100000
},
"mac_address": "1234.4321.2222",
"mac_address_source": "chassis pool",
"min_active_link": 1,
"min_active_bw_kbps": 500,
"max_active_link": 32,
"wait_while_timer_ms": 100,
"load_balance": {
"link_order_signaling": "Operational",
"hash_type": "Src-IP"
},
"lacp": {
"lacp": "Operational",
"flap_suppression_timer": "120 s",
"cisco_extensions": "Enabled"
},
"mlacp": {
"mlacp": "Not configured"
},
"ipv4_bfd": {
"ipv4_bfd": "Not operational"
},
"port": {
"GigabitEthernet0/0/0/2": {
"interface": "GigabitEthernet0/0/0/2",
"bw_kbps": 100000,
"device": "Local",
"state": "Active",
"port_id": "0x8000, 0x0002"
}
}
}
}
}
golden_output_2 = {'execute.return_value': '''
RP/0/RSP0/CPU0:router# show bundle
Bundle-Ether 2
Status: Up
Local links <active/standby/configured>: 1 / 0 / 1
Local bandwidth <effective/available>: 100000 (100000) kbps
MAC address (source): 1234.4321.1111 (Gi0/0/0/1)
Minimum active links / bandwidth: 1 / 500 kbps
Maximum active links: 32
Wait-while timer: 2000 ms
Load-balancing: Default
LACP: Operational
Flap suppression timer: 2500 ms
Cisco extensions: Disabled
mLACP: Operational
Interchassis group: 3
Foreign links <active/configured>: 1 / 1
Switchover type: Revertive
Recovery delay: 300 s
Maximize threshold: 2 links
IPv4 BFD: Not operational
State: Off
Fast detect: Enabled
Start timer: Off
Neighbor-unconfigured timer: Off
Preferred min interval: 150 ms
Preferred multiple: 3
Destination address: Not Configured
Port Device State Port ID B/W, kbps
-------------------- --------------- ----------- -------------- -----------
Gi0/0/0/1 Local Active 0x8000, 0x0001 100000
MyFirstInterface 10.10.10.123 Negotiating 0x8000, 0x0032 100000
Bundle-Ether 3
Status: Up
Local links <active/standby/configured>: 1 / 0 / 1
Local bandwidth <effective/available>: 100000 / 100000 kbps
MAC address (source): 1234.4321.2222 (chassis pool)
Minimum active links / bandwidth: 1 / 500 kbps
Maximum active links: 32 (from partner)
Wait-while timer: 100 ms
Load-balancing:
Link order signaling: Operational
Hash type: Src-IP
LACP: Operational
Flap suppression timer: 120 s
Cisco extensions: Enabled
mLACP: Not configured
IPv4 BFD: Not operational
Port Device State Port ID B/W, kbps
-------------------- --------------- ----------- -------------- -----------
Gi0/0/0/2 Local Active 0x8000, 0x0002 100000
'''}
golden_parsed_output_3 = {
"interfaces": {
"Bundle-Ether1": {
"name": "Bundle-Ether1",
"bundle_id": 1,
"oper_status": "up",
"local_links": {
"active": 1,
"standby": 0,
"configured": 1
},
"local_bandwidth_kbps": {
"effective": 1000000,
"available": 1000000
},
"mac_address": "0000.deaf.0000",
"mac_address_source": "Configured",
"min_active_link": 1,
"min_active_bw_kbps": 1,
"max_active_link": 64,
"wait_while_timer_ms": 100,
"lacp": {
"lacp": "Operational",
"flap_suppression_timer": "300 ms"
},
"mlacp": {
"mlacp": "Operational",
"role": "Active",
"foreign_links_active": 0,
"foreign_links_configured": 1,
"switchover_type": "Non-revertive",
"recovery_delay": "300 s",
"maximize_threshold": "Not configured"
},
"ipv4_bfd": {
"ipv4_bfd": "Not configured"
},
"port": {
"GigabitEthernet0/0/0/0": {
"interface": "GigabitEthernet0/0/0/0",
"bw_kbps": 1000000,
"device": "10.81.3.2",
"state": "Standby",
"port_id": "0x8002, 0xa001",
"link_state": "marked as Standby by mLACP peer"
}
}
}
}
}
golden_output_3 = {'execute.return_value': '''
RP/0/RSP0/CPU0:router# show bundle
Bundle-Ether1
Status: Up
Local links <active/standby/configured>: 1 / 0 / 1
Local bandwidth <effective/available>: 1000000 (1000000) kbps
MAC address (source): 0000.deaf.0000 (Configured)
Minimum active links / bandwidth: 1 / 1 kbps
Maximum active links: 64
Wait while timer: 100 ms
LACP: Operational
Flap suppression timer: 300 ms
mLACP: Operational
ICCP Group: 1
Role: Active
Foreign links <active/configured>: 0 / 1
Switchover type: Non-revertive
Recovery delay: 300 s
Maximize threshold: Not configured
IPv4 BFD: Not configured
Port Device State Port ID B/W, kbps
-------------------- --------------- ----------- -------------- ----------
Gi0/0/0/0 Local Active 0x8001, 0x9001 1000000
Link is Active
Gi0/0/0/0 10.81.3.2 Standby 0x8002, 0xa001 1000000
Link is marked as Standby by mLACP peer
'''}
golden_parsed_output_4 = {
"interfaces": {
"Bundle-Ether1": {
"name": "Bundle-Ether1",
"bundle_id": 1,
"oper_status": "mlacp hot standby",
"local_links": {
"active": 0,
"standby": 1,
"configured": 1
},
"local_bandwidth_kbps": {
"effective": 0,
"available": 0
},
"mac_address": "0000.deaf.0000",
"mac_address_source": "Configured",
"min_active_link": 1,
"min_active_bw_kbps": 1,
"max_active_link": 64,
"wait_while_timer_ms": 100,
"lacp": {
"lacp": "Operational",
"flap_suppression_timer": "300 ms"
},
"mlacp": {
"mlacp": "Operational",
"role": "Standby",
"foreign_links_active": 1,
"foreign_links_configured": 1,
"switchover_type": "Non-revertive",
"recovery_delay": "300 s",
"maximize_threshold": "Not configured"
},
"ipv4_bfd": {
"ipv4_bfd": "Not configured"
},
"port": {
"GigabitEthernet0/0/0/0": {
"interface": "GigabitEthernet0/0/0/0",
"bw_kbps": 1000000,
"device": "10.81.3.2",
"state": "Active",
"port_id": "0x8002, 0xa001",
"link_state": "Active"
}
}
}
}
}
golden_output_4 = {'execute.return_value': '''
RP/0/0/CPU0:router#show bundle
Mon Jun 7 06:04:17.778 PDT
Bundle-Ether1
Status: mLACP hot standby
Local links <active/standby/configured>: 0 / 1 / 1
Local bandwidth <effective/available>: 0 (0) kbps
MAC address (source): 0000.deaf.0000 (Configured)
Minimum active links / bandwidth: 1 / 1 kbps
Maximum active links: 64
Wait while timer: 100 ms
LACP: Operational
Flap suppression timer: 300 ms
mLACP: Operational
ICCP Group: 1
Role: Standby
Foreign links <active/configured>: 1 / 1
Switchover type: Non-revertive
Recovery delay: 300 s
Maximize threshold: Not configured
IPv4 BFD: Not configured
Port Device State Port ID B/W, kbps
-------------------- --------------- ----------- -------------- ----------
Gi0/0/0/0 Local Standby 0x8003, 0x9001 1000000
mLACP peer is active
Gi0/0/0/0 10.81.3.2 Active 0x8002, 0xa001 1000000
Link is Active
RP/0/0/CPU0:router#
'''}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowBundle(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden_1(self):
self.maxDiff = None
self.device = Mock(**self.golden_output_1)
obj = ShowBundle(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output_1)
def test_golden_2(self):
self.maxDiff = None
self.device = Mock(**self.golden_output_2)
obj = ShowBundle(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output_2)
def test_golden_3(self):
self.maxDiff = None
self.device = Mock(**self.golden_output_3)
obj = ShowBundle(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output_3)
def test_golden_4(self):
self.maxDiff = None
self.device = Mock(**self.golden_output_4)
obj = ShowBundle(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output_4)
###################################################
# unit test for show lacp
####################################################
class test_show_lacp(unittest.TestCase):
"""unit test for show lacp"""
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output = {
"interfaces": {
"Bundle-Ether1": {
"name": "Bundle-Ether1",
"bundle_id": 1,
"lacp_mode": "active",
"port": {
"GigabitEthernet0/0/0/0": {
"interface": "GigabitEthernet0/0/0/0",
"bundle_id": 1,
"rate": 30,
"state": "ascdA---",
"port_id": "0x000a,0x0001",
"key": "0x0001",
"system_id": "0x0064,00-1b-0c-10-5a-26",
"aggregatable": True,
"synchronization": "in_sync",
"collecting": True,
"distributing": True,
"partner": {
"rate": 30,
"state": "ascdA---",
"port_id": "0x000a,0x0001",
"key": "0x0001",
"system_id": "0x8000,00-0c-86-5e-68-23",
"aggregatable": True,
"synchronization": "in_sync",
"collecting": True,
"distributing": True
},
"receive": "Current",
"period": "Slow",
"selection": "Selected",
"mux": "Distrib",
"a_churn": "None",
"p_churn": "None"
},
"GigabitEthernet0/0/0/1": {
"interface": "GigabitEthernet0/0/0/1",
"bundle_id": 1,
"rate": 30,
"state": "ascdA---",
"port_id": "0x8000,0x0002",
"key": "0x0001",
"system_id": "0x0064,00-1b-0c-10-5a-26",
"aggregatable": True,
"synchronization": "in_sync",
"collecting": True,
"distributing": True,
"partner": {
"rate": 30,
"state": "ascdA---",
"port_id": "0x8000,0x0005",
"key": "0x0001",
"system_id": "0x8000,00-0c-86-5e-68-23",
"aggregatable": True,
"synchronization": "in_sync",
"collecting": True,
"distributing": True
},
"receive": "Current",
"period": "Slow",
"selection": "Selected",
"mux": "Distrib",
"a_churn": "None",
"p_churn": "None"
}
}
},
"Bundle-Ether2": {
"name": "Bundle-Ether2",
"bundle_id": 2,
"lacp_mode": "active",
"port": {
"GigabitEthernet0/0/0/2": {
"interface": "GigabitEthernet0/0/0/2",
"bundle_id": 2,
"rate": 30,
"state": "a---A---",
"port_id": "0x8000,0x0005",
"key": "0x0002",
"system_id": "0x0064,00-1b-0c-10-5a-26",
"aggregatable": True,
"synchronization": "out_sync",
"collecting": False,
"distributing": False,
"partner": {
"rate": 30,
"state": "as--A---",
"port_id": "0x8000,0x0004",
"key": "0x0002",
"system_id": "0x8000,00-0c-86-5e-68-23",
"aggregatable": True,
"synchronization": "in_sync",
"collecting": False,
"distributing": False
},
"receive": "Current",
"period": "Slow",
"selection": "Standby",
"mux": "Waiting",
"a_churn": "Churn",
"p_churn": "None"
},
"GigabitEthernet0/0/0/3": {
"interface": "GigabitEthernet0/0/0/3",
"bundle_id": 2,
"rate": 30,
"state": "ascdA---",
"port_id": "0x8000,0x0004",
"key": "0x0002",
"system_id": "0x0064,00-1b-0c-10-5a-26",
"aggregatable": True,
"synchronization": "in_sync",
"collecting": True,
"distributing": True,
"partner": {
"rate": 30,
"state": "ascdA---",
"port_id": "0x8000,0x0003",
"key": "0x0002",
"system_id": "0x8000,00-0c-86-5e-68-23",
"aggregatable": True,
"synchronization": "in_sync",
"collecting": True,
"distributing": True
},
"receive": "Current",
"period": "Slow",
"selection": "Selected",
"mux": "Distrib",
"a_churn": "None",
"p_churn": "None"
},
"GigabitEthernet0/0/0/4": {
"interface": "GigabitEthernet0/0/0/4",
"bundle_id": 2,
"rate": 30,
"state": "ascdA---",
"port_id": "0x8000,0x0003",
"key": "0x0002",
"system_id": "0x0064,00-1b-0c-10-5a-26",
"aggregatable": True,
"synchronization": "in_sync",
"collecting": True,
"distributing": True,
"partner": {
"rate": 30,
"state": "ascdA---",
"port_id": "0x8000,0x0002",
"key": "0x0002",
"system_id": "0x8000,00-0c-86-5e-68-23",
"aggregatable": True,
"synchronization": "in_sync",
"collecting": True,
"distributing": True
},
"receive": "Current",
"period": "Slow",
"selection": "Selected",
"mux": "Distrib",
"a_churn": "None",
"p_churn": "None"
}
}
}
}
}
golden_output = {'execute.return_value': '''
RP/0/RP0/CPU0:iosxrv9000-1#show lacp
Tue Apr 3 20:32:49.966 UTC
State: a - Port is marked as Aggregatable.
s - Port is Synchronized with peer.
c - Port is marked as Collecting.
d - Port is marked as Distributing.
A - Device is in Active mode.
F - Device requests PDUs from the peer at fast rate.
D - Port is using default values for partner information.
E - Information about partner has expired.
Bundle-Ether1
Port (rate) State Port ID Key System ID
-------------------- -------- ------------- ------ ------------------------
Local
Gi0/0/0/0 30s ascdA--- 0x000a,0x0001 0x0001 0x0064,00-1b-0c-10-5a-26
Partner 30s ascdA--- 0x000a,0x0001 0x0001 0x8000,00-0c-86-5e-68-23
Gi0/0/0/1 30s ascdA--- 0x8000,0x0002 0x0001 0x0064,00-1b-0c-10-5a-26
Partner 30s ascdA--- 0x8000,0x0005 0x0001 0x8000,00-0c-86-5e-68-23
Port Receive Period Selection Mux A Churn P Churn
-------------------- ---------- ------ ---------- --------- ------- -------
Local
Gi0/0/0/0 Current Slow Selected Distrib None None
Gi0/0/0/1 Current Slow Selected Distrib None None
Bundle-Ether2
Port (rate) State Port ID Key System ID
-------------------- -------- ------------- ------ ------------------------
Local
Gi0/0/0/2 30s a---A--- 0x8000,0x0005 0x0002 0x0064,00-1b-0c-10-5a-26
Partner 30s as--A--- 0x8000,0x0004 0x0002 0x8000,00-0c-86-5e-68-23
Gi0/0/0/3 30s ascdA--- 0x8000,0x0004 0x0002 0x0064,00-1b-0c-10-5a-26
Partner 30s ascdA--- 0x8000,0x0003 0x0002 0x8000,00-0c-86-5e-68-23
Gi0/0/0/4 30s ascdA--- 0x8000,0x0003 0x0002 0x0064,00-1b-0c-10-5a-26
Partner 30s ascdA--- 0x8000,0x0002 0x0002 0x8000,00-0c-86-5e-68-23
Port Receive Period Selection Mux A Churn P Churn
-------------------- ---------- ------ ---------- --------- ------- -------
Local
Gi0/0/0/2 Current Slow Standby Waiting Churn None
Gi0/0/0/3 Current Slow Selected Distrib None None
Gi0/0/0/4 Current Slow Selected Distrib None None
'''}
def test_empty(self):
self.device = Mock(**self.empty_output)
obj = ShowLacp(device=self.device)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = obj.parse()
def test_golden(self):
self.maxDiff = None
self.device = Mock(**self.golden_output)
obj = ShowLacp(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
d3ca685b9c2136a3c1c581a042146b3f9b4be186 | 4a0eb422dea8b3b911d56d4eae54137753cdefb0 | /python-52-weeks/Device_classes/train_netmiko.py | aa215f6fdeb69e540b32f016fa3a73ddec8598dc | [] | no_license | aramidetosin/python-netmon | a52c85bf124c051ec4ffe9a252501520d0f7bb39 | de6f935bfcb8134e769eb2be81c8ebc0abd3df1d | refs/heads/master | 2023-03-27T17:01:56.080756 | 2021-03-28T12:57:45 | 2021-03-28T12:57:45 | 332,056,355 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,577 | py | import re
from netmiko import Netmiko
junos = {
"hostname": "192.168.1.229",
"username": "admin",
"password": "juniper1",
"device_type": "juniper",
}
connection = Netmiko(
host=junos["hostname"],
username=junos["username"],
password=junos["password"],
device_type=junos["device_type"],
)
show_hostname_output = connection.send_command("show system information")
show_uptime_output = connection.send_command("show system uptime")
show_serial_output = connection.send_command("show chassis hardware")
show_interface_output = connection.send_command("show interface terse")
print(show_hostname_output)
print(show_uptime_output)
print(show_serial_output)
print(show_interface_output)
def junos_get_information(show_hostname_output):
information = {}
pattern = re.compile(r"Model: (.*)")
model = pattern.search(show_hostname_output)
if model:
information['model'] = model.group(1)
else:
information['model'] = None
pattern = re.compile(r"Junos: (.*)")
model = pattern.search(show_hostname_output)
if model:
information['Version'] = model.group(1)
else:
information['Version'] = None
pattern = re.compile(r"Hostname: (.*)")
model = pattern.search(show_hostname_output)
if model:
information['Hostname'] = model.group(1)
else:
information['Hostname'] = None
return information
def junos_get_uptime_from_show(show_uptime_output):
re_junos_uptime = re.compile(r'System booted: .*\((\d{2}:\d{2}:\d{2}) ago\)')
junos_uptime_match = re_junos_uptime.search(show_uptime_output)
if junos_uptime_match:
uptime = junos_uptime_match.group(1)
uptime_split = uptime.split(":")
hours = int(uptime_split[0])
minutes = int(uptime_split[1])
seconds = int(uptime_split[2])
return hours * 3600 + minutes * 60 + seconds
def junos_get_serial_number(show_serial_output):
re_serial_number = re.compile(r"Chassis\s*(\w*)\s*")
serial_number_match = re_serial_number.search(show_serial_output)
if serial_number_match:
return serial_number_match.group(1)
print(junos_get_information(show_hostname_output))
print(junos_get_uptime_from_show(show_uptime_output))
print(junos_get_serial_number(show_serial_output))
line_show_interface_output = show_interface_output.splitlines()
interfaces = []
for line in line_show_interface_output:
xx = line.split(" ")[0]
if xx != "Interface" and xx != '':
if '.' not in xx:
interfaces.append(xx)
print(interfaces)
| [
"[email protected]"
] | |
ccad155c93d1dc3713bc931ca59362dda019cbe3 | aaf306b4117027bd66dfdbac80f2147a9b48a455 | /Day66-75/code/example01.py | b1fee7aa04cbe05762d013ada68b220f217fb53c | [] | no_license | xiangsxuan/Python-100-Days | 309da160fc4c85aa9699a0c522525e2b01e0421d | e86dece224b0a77103f6d6b734fecd9eef7dca97 | refs/heads/master | 2020-03-18T19:56:59.744032 | 2018-05-28T15:21:07 | 2018-05-28T15:21:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,467 | py | from urllib.error import URLError
from urllib.request import urlopen
import re
import pymysql
import ssl
from pymysql import Error
def decode_page(page_bytes, charsets=('utf-8', )):
page_html = None
for charset in charsets:
try:
page_html = page_bytes.decode(charset)
break
except UnicodeDecodeError:
pass
# logging.error('Decode:', error)
return page_html
def get_page_html(seed_url, *, retry_times=3, charsets=('utf-8', )):
page_html = None
try:
page_html = decode_page(urlopen(seed_url).read(), charsets)
except URLError:
# logging.error('URL:', error)
if retry_times > 0:
return get_page_html(seed_url, retry_times=retry_times - 1,
charsets=charsets)
return page_html
def get_matched_parts(page_html, pattern_str, pattern_ignore_case=re.I):
pattern_regex = re.compile(pattern_str, pattern_ignore_case)
return pattern_regex.findall(page_html) if page_html else []
def start_crawl(seed_url, match_pattern):
conn = pymysql.connect(host='localhost', port=3306,
database='crawler', user='root',
password='123456', charset='utf8')
try:
with conn.cursor() as cursor:
url_list = [seed_url]
while url_list:
current_url = url_list.pop(0)
page_html = get_page_html(current_url, charsets=('utf-8', 'gbk', 'gb2312'))
links_list = get_matched_parts(page_html, match_pattern)
url_list += links_list
param_list = []
for link in links_list:
page_html = get_page_html(link, charsets=('utf-8', 'gbk', 'gb2312'))
headings = get_matched_parts(page_html, r'<h1>(.*)<span')
if headings:
param_list.append((headings[0], link))
cursor.executemany('insert into tb_result values (default, %s, %s)',
param_list)
conn.commit()
except Error:
pass
# logging.error('SQL:', error)
finally:
conn.close()
def main():
ssl._create_default_https_context = ssl._create_unverified_context
start_crawl('http://sports.sohu.com/nba_a.shtml',
r'<a[^>]+test=a\s[^>]*href=["\'](.*?)["\']')
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
2dde9d6b6d7882fd2f8221971f5affeee6735fa2 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03072/s635930653.py | 041f13523a4cc93f92b5e54f56ee442444872570 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | n = int(input())
h = list(map(int, input().split()))
max = h[0]
count = 0
for i in h:
if max <= i:
count += 1
max = i
else:
continue
print(count) | [
"[email protected]"
] | |
87dec6ef073bd02fe7709d0c6b011cbfd0f6b878 | 7861798672463b239a3102b8407ec56c593c2811 | /setup.py | 0130f5685229a7217c8d3b52ceae52cde7687776 | [] | no_license | nag92/strokeRehabSystem | 33b38cb41de4a357e1a9c99cb30b5608d97932b7 | f51752bd355c91e162f94c26b4078e7d7bcee744 | refs/heads/master | 2020-03-31T14:25:06.853916 | 2018-05-02T15:35:50 | 2018-05-02T15:35:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 314 | py | ## ! DO NOT MANUALLY INVOKE THIS setup.py, USE CATKIN INSTEAD
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
# fetch values from package.xml
setup_args = generate_distutils_setup(
packages=['strokeRehabSystem'],
package_dir={'': 'src'})
setup(**setup_args) | [
"[email protected]"
] | |
0224791ecdacf52585dc82bcf696f6feda3eb560 | b0c02d7ca86c1ef84af18a8c701702e8bb212b64 | /robotcar/robot_demo.py | d835d77755ccbeb16aa91e9b243d69dbd81e23e3 | [] | no_license | flashypepo/myMicropython-Examples | 24fa2f372e68742abe0f74913df000dfe64a9e55 | b2b63df865b5ad471b351ca5f279135025859f5d | refs/heads/master | 2021-09-24T18:52:18.083444 | 2018-10-13T11:59:19 | 2018-10-13T11:59:19 | 98,223,412 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 846 | py | import machine, motor, bot, time
print('creating i2c and motors ...')
i2c = machine.I2C(scl=machine.Pin(5), sda=machine.Pin(4))
motors = motor.DCMotors(i2c) #creates motors object
LEFT=0 #M0 - left motor
RIGHT=3 #M4 - right motor
print('creating robot ...')
robot = bot.Robot(motors, LEFT, RIGHT) # creates robot
dt = 3 # duration in seconds
print('robot moves ...')
robot.left(2000, dt) #turn left
time.sleep(0.3)
robot.right(2000, dt) # turn right
time.sleep(0.3)
robot.forward(2000, dt) #forward
time.sleep(0.3)
robot.backward(2000, dt) #backwards
time.sleep(0.3)
print('robot demo ...')
speed = 3000 #motorspeed
for i in range(3):
robot.left(speed, dt)
time.sleep(0.3)
robot.right(speed, dt)
time.sleep(0.3)
robot.forward(speed, dt)
time.sleep(0.3)
robot.backward(speed, dt)
time.sleep(1.0)
print('done')
| [
"[email protected]"
] | |
6fa666ea6d1840544f96f471b1e3fa431e6625ce | 2b468b1d22ecc5668529255676a1d43936829074 | /codes/personal_backend/support/test/api/account/test_account_staff_api.py | 77c65d5ee97fc600c4f0bfb8569cff2aaa68c41e | [] | no_license | MaseraTiGo/4U | 5ac31b4cccc1093ab9a07d18218c3d8c0157dc9c | f572830aa996cfe619fc4dd8279972a2f567c94c | refs/heads/master | 2023-07-26T09:44:21.014294 | 2023-07-13T03:43:34 | 2023-07-13T03:43:34 | 149,217,706 | 0 | 0 | null | 2020-06-05T20:38:16 | 2018-09-18T02:34:29 | Python | UTF-8 | Python | false | false | 1,720 | py | # coding=UTF-8
import json
from support.common.testcase.api_test_case import APITestCase
'''
class Add(APITestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_account_staff_add(self):
"""test account staff to add"""
flag = "user"
api = "account.staff.add"
user_info = json.dumps({
'username': "fengshiyu002",
'name': "冯时宇002",
'birthday': "2018-04-16",
'phone': "15232626262",
'email': "[email protected]",
'gender': "man",
'number': "008",
'identity': "123456789",
'role_ids' :[1,17],
'department_ids' :[1,7],
})
result = self.access_api(flag = flag, api = api, user_info = user_info)
class UpdatePassword(APITestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_account_staff_update_password(self):
"""test account staff to update password"""
flag = "user"
api = "account.staff.update.password"
uid = 2
newpassword = "e10adc3949ba59abbe56e057f20f883e"
oldpassword = "123456"
result = self.access_api(flag = flag, api = api, oldpassword = oldpassword, \
newpassword = newpassword)
'''
class Generate(APITestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_account_staff_generate(self):
"""test account staff to generate"""
flag = "user"
api = "account.staff.generate"
staff_id = 11
username = "fsy"
result = self.access_api(flag = flag, api = api, staff_id = staff_id)
| [
"[email protected]"
] | |
87b3c9e11b14cb7d689ba36d1587e35e28f58976 | c97b9ae1bf06757ba61f90905e4d9b9dd6498700 | /venv/Lib/site-packages/skimage/draw/draw_nd.py | 03c268fb11faaef98beb8414071d9f7ed38a343a | [] | no_license | Rahulk1p/image-processor | f7ceee2e3f66d10b2889b937cdfd66a118df8b5d | 385f172f7444bdbf361901108552a54979318a2d | refs/heads/main | 2023-03-27T10:09:46.080935 | 2021-03-16T13:04:02 | 2021-03-16T13:04:02 | 348,115,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:c5e7f1c5ad5f275def9df43f330f4af4782e674274fb765bbb93af0c05902092
size 3841
| [
"[email protected]"
] | |
86ce704f77b7c265463560e188583cbaa2aac01e | f29d69eea45f4383db37b1b6876be4bcfd286312 | /user_portrait_0320/user_portrait/cron/network/cron_network.py | ed9c43bd9b965c0a28d58ca37f802ddade6ad69a | [] | no_license | xuzhiq/user_portrait_ending2 | 5ac9952cf275923677d6e2f575289236df4dde9b | f2978135ff672f58090e202e588f7321ed121477 | refs/heads/master | 2021-05-31T05:15:21.316687 | 2016-05-11T11:56:38 | 2016-05-11T11:56:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,804 | py | # -*- coding:utf-8 -*-
import tempfile
import sys
import json
import time
import tempfile
reload(sys)
sys.path.append('../../')
from spam.pagerank_for_portrait import pagerank
from time_utils import ts2datetime, datetime2ts, ts2date
from keywords_utils import get_task_information, identify_task_exist,\
compute_network_task, write_tmp_file, save_task_results,\
push_task_information
#use to read task information from queue
def scan_network_keywords_task():
#step1: read task information from redis queue
#step2: identify the task information is exist in es
#step3: compute the network trend task
while True:
#read task informaiton from redis queue
network_task_information = get_task_information()
print network_task_information
#when redis queue null - file break
if not network_task_information:
break
#identify the task is exist in es
exist_mark = identify_task_exist(network_task_information)
print 'exist_mark:', exist_mark
if exist_mark:
print 'step 1: compute', ts2date(time.time())
results = compute_network_task(network_task_information)
if results:
tmp_file = tempfile.NamedTemporaryFile(delete=False)
write_tmp_file(tmp_file, results)
tmp_file.close()
if not tmp_file:
return
input_tmp_path = tmp_file.name
print input_tmp_path
ITER_COUNT = 10
TOP_N = 50
print 'step 2: pagerank', ts2date(time.time())
all_uids_count, dg_sorted_uids, pr_sorted_uids = pagerank(ITER_COUNT, input_tmp_path, TOP_N, 'keywords')
#save results
print 'step 3: save', ts2date(time.time())
save_mark = save_task_results(dg_sorted_uids, pr_sorted_uids, network_task_information)
print 'save done', ts2date(time.time())
#identify save status
if not save_mark:
#status fail: push task information to redis queue
push_mark = push_task_information(network_task_information)
if not push_mark:
print 'error push task queue'
else:
#if no exist - pass
pass
if __name__=='__main__':
log_time_ts = time.time()
log_time_date = ts2date(log_time_ts)
print 'cron/network/cron_network.py&start&' + log_time_date
try:
scan_network_keywords_task()
except Exception, e:
print e, '&error&', ts2date(time.time())
log_time_ts = time.time()
log_time_date = ts2date(log_time_ts)
print 'cron/network/cron_network.py&end&' + log_time_date
| [
"[email protected]"
] | |
220d3da93147ba464b5fd1a2eeefdba19a37c65f | 26552adb0d8889affd40e009d3c311e41a873e43 | /Python_Solutions/9095.py | 6b8aaf7f8f39b7b0b8e579984c319a8acee871ab | [] | no_license | Isaac-Lee/BOJ-Algorithm | 3b9b64aba9ab3b48d15133cbf5ad122822e441d0 | 27f0339195c48f416e672390758e85305203b71a | refs/heads/main | 2022-06-29T21:36:11.500158 | 2022-06-25T06:35:05 | 2022-06-25T06:35:05 | 203,349,860 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 466 | py | import sys
def make123(n):
if memo[n] > 0:
return memo[n]
if n == 1 or n == 0:
memo[n] = 1
return memo[n]
memo[n] += make123(n-1)
if n-3 >= 0:
memo[n] += make123(n-3)
if n-2 >= 0:
memo[n] += make123(n-2)
return memo[n]
if __name__ == "__main__":
n = int(sys.stdin.readline())
for _ in range(n):
k = int(sys.stdin.readline())
memo = [0] * (k + 1)
print(make123(k)) | [
"[email protected]"
] | |
c015abc83aad9d4d4eb62342b203ad222667c74b | 24684138f7a74672e084511e2f0202680b318112 | /lib/nmdc_metaassembly/nmdc_metaassemblyImpl.py | e0e0904a89147ec217adef5203a3d73a74881dd3 | [
"MIT"
] | permissive | microbiomedata/nmdc_kbase_metaassembly | 531abc003bace8ead6334966f90a8e925bd583ca | 2cb091007e556933e90c7c342a3e800d931e15ca | refs/heads/master | 2023-03-16T06:16:24.445768 | 2021-03-05T16:56:48 | 2021-03-05T16:56:48 | 341,439,883 | 0 | 2 | MIT | 2021-02-24T18:53:34 | 2021-02-23T05:31:18 | Python | UTF-8 | Python | false | false | 2,579 | py | # -*- coding: utf-8 -*-
#BEGIN_HEADER
import logging
import os
from installed_clients.KBaseReportClient import KBaseReport
from nmdc_metaassembly.assemble import nmdc_mg_assembly
#END_HEADER
class nmdc_metaassembly:
'''
Module Name:
nmdc_metaassembly
Module Description:
A KBase module: nmdc_metaassembly
'''
######## WARNING FOR GEVENT USERS ####### noqa
# Since asynchronous IO can lead to methods - even the same method -
# interrupting each other, you must be *very* careful when using global
# state. A method could easily clobber the state set by another while
# the latter method is running.
######################################### noqa
VERSION = "0.0.1"
GIT_URL = ""
GIT_COMMIT_HASH = ""
#BEGIN_CLASS_HEADER
#END_CLASS_HEADER
# config contains contents of config file in a hash or None if it couldn't
# be found
def __init__(self, config):
#BEGIN_CONSTRUCTOR
self.callback_url = os.environ['SDK_CALLBACK_URL']
self.shared_folder = config['scratch']
logging.basicConfig(format='%(created)s %(levelname)s: %(message)s',
level=logging.INFO)
print(os.getcwd())
self.asu = nmdc_mg_assembly(self.callback_url, self.shared_folder)
#END_CONSTRUCTOR
pass
def run_nmdc_metaassembly(self, ctx, params):
"""
This example function accepts any number of parameters and returns results in a KBaseReport
:param params: instance of mapping from String to unspecified object
:returns: instance of type "ReportResults" -> structure: parameter
"report_name" of String, parameter "report_ref" of String
"""
# ctx is the context object
# return variables are: output
#BEGIN run_nmdc_metaassembly
os.chdir(self.shared_folder)
output = self.asu.assemble(params)
#END run_nmdc_metaassembly
# At some point might do deeper type checking...
if not isinstance(output, dict):
raise ValueError('Method run_nmdc_metaassembly return value ' +
'output is not type dict as required.')
# return the results
return [output]
def status(self, ctx):
#BEGIN_STATUS
returnVal = {'state': "OK",
'message': "",
'version': self.VERSION,
'git_url': self.GIT_URL,
'git_commit_hash': self.GIT_COMMIT_HASH}
#END_STATUS
return [returnVal]
| [
"[email protected]"
] | |
b07717ae965c5aa2e55fdbcbf027e893ba95b680 | 46ae8264edb9098c9875d2a0a508bc071201ec8b | /res/scripts/client/gui/scaleform/daapi/view/battlegas_attack.py | c45a4342116a35a23080122c5c705cc4d96ee7d0 | [] | no_license | Difrex/wotsdk | 1fc6156e07e3a5302e6f78eafdea9bec4c897cfb | 510a34c67b8f4c02168a9830d23f5b00068d155b | refs/heads/master | 2021-01-01T19:12:03.592888 | 2016-10-08T12:06:04 | 2016-10-08T12:06:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,223 | py | # Embedded file name: scripts/client/gui/Scaleform/daapi/view/battle/gas_attack.py
from gui.Scaleform.locale.FALLOUT import FALLOUT
from gui.battle_control import g_sessionProvider
from gui.shared.utils.plugins import IPlugin
from gui import makeHtmlString
from helpers import i18n
class GasAttackPlugin(IPlugin):
def start(self):
super(GasAttackPlugin, self).start()
self._parentObj.movie.falloutItems.as_loadGasItems(i18n.makeString(FALLOUT.SAFEZONE_MESSAGE), self.__getPanelText())
g_sessionProvider.getGasAttackCtrl().start(self._parentObj)
def stop(self):
g_sessionProvider.getGasAttackCtrl().stop()
super(GasAttackPlugin, self).stop()
def __getPanelText(self):
infoStr = i18n.makeString(FALLOUT.GASATTACKPANEL_SAFEZONE_MESSAGE)
return (FALLOUT.GASATTACKPANEL_START_TITLE,
FALLOUT.GASATTACKPANEL_START_MESSAGE,
FALLOUT.GASATTACKPANEL_GASATTACK_TITLE,
FALLOUT.GASATTACKPANEL_GASATTACK_MESSAGE,
FALLOUT.GASATTACKPANEL_INSIDE_TITLE,
FALLOUT.GASATTACKPANEL_INSIDE_MESSAGE,
FALLOUT.GASATTACKPANEL_SAFEZONE_TITLE,
makeHtmlString('html_templates:battle/gasAtackPanel', 'safeZone', infoStr)) | [
"[email protected]"
] | |
c0604ecc3e5fec3aa2883092810bbfee31e16a8e | f50368f3165c182a0adc914dec56f0cc03d9fb5a | /visual_mpc/envs/sawyer_robot/vanilla_sawyer_env.py | dc1c25c0a1aabdcacfee00d56d4d3d2dbb6b5243 | [
"MIT"
] | permissive | anestisdotpy/visual_foresight | 16ea71f938458a35892c1f557903ed885810dda3 | 957df706b4c7a11b7a0c9ba2de15853df62cd4ed | refs/heads/master | 2020-06-22T05:59:10.578361 | 2019-07-18T20:23:51 | 2019-07-18T20:23:51 | 197,651,312 | 0 | 0 | null | 2019-07-18T20:17:26 | 2019-07-18T20:17:26 | null | UTF-8 | Python | false | false | 487 | py | from .base_sawyer_env import BaseSawyerEnv
import copy
class VanillaSawyerEnv(BaseSawyerEnv):
def __init__(self, env_params, _=None):
self._hyper = copy.deepcopy(env_params)
BaseSawyerEnv.__init__(self, env_params)
self._adim, self._sdim = self._base_adim, self._base_sdim
def _next_qpos(self, action):
assert action.shape[0] == self._base_adim, "Action should have shape (5,)"
return self._previous_target_qpos * self.mode_rel + action | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.