content
stringlengths 7
1.05M
|
---|
# Dany jest ciąg klocków (K1, ..., Kn). Klocek K[i] zaczyna sie na pozycji a[i] i ciągnie się do pozycji
# b[i] (wszystkie pozycje to nieujemne liczby naturalne) oraz ma wysokość 1. Klocki układane są po
# kolei–jeśli klocek nachodzi na któryś z poprzednich, to jest przymocowywany na szczycie poprzedzajacego
# klocka). Na przykład dla klocków o pozycjach (1, 3), (2, 5), (0, 3), (8, 9), (4, 6) powstaje
# konstrukcja o wysokosci trzech klocków. Proszę podac możliwie jak najszybszy algorytm, który oblicza
# wysokość powstałej konstrukcji.
class Node:
def __init__(self, a, b):
self.a = a
self.b = b
self.height = 0
self.left = None
self.right = None
self.parent = None
def place_a_brick(root, brick):
while root is not None:
if brick[0] > root.b or brick[1] < root.a:
node = Node(brick[0], brick[1])
root.left = node
node.height = root.height + 1
return
elif root.right is not None:
root.a = min(root.a, brick[0])
root.b = max(root.b, brick[1])
root = root.right
else:
node = Node(brick[0], brick[1])
root.a = min(root.a, brick[0])
root.b = min(root.b, brick[1])
root.right = node
node.height = root.height + 1
return
def get_max_height(root, height):
if root is None:
return height
height = max(height, root.height)
return get_max_height(root.right, height) or get_max_height(root.left, height)
def height_of_bricks(bricks):
root = Node(bricks[0][0], bricks[0][1])
root.height = 1
for i in range(1, len(bricks)):
place_a_brick(root, bricks[i])
height = get_max_height(root, 0)
return height
bricks = [(1, 3), (2, 5), (0, 3), (8, 9), (4, 6)]
print(height_of_bricks(bricks))
|
'''
0: 1: 2: 3: 4:
aaaa .... aaaa aaaa ....
b c . c . c . c b c
b c . c . c . c b c
.... .... dddd dddd dddd
e f . f e . . f . f
e f . f e . . f . f
gggg .... gggg gggg ....
5: 6: 7: 8: 9:
aaaa aaaa aaaa aaaa aaaa
b . b . . c b c b c
b . b . . c b c b c
dddd dddd .... dddd dddd
. f e f . f e f . f
. f e f . f e f . f
gggg gggg .... gggg gggg
0 1 2 3 4 5 6 7 8 9
a x x x x x x x x
b x x x x x
c x x x x x x x x
d x x x x x x x
e x x x x x
f x x x x x x x x x
g x x x x x x x
'''
def get_easy_seg_count(filename):
easy = [2,3,4,7]
easy_segs = 0
with open(filename) as f:
while True:
line = f.readline().strip()
if not line:
break
else:
for seg in line.split('|')[1].split():
if len(seg) in easy:
easy_segs += 1
print("Number of easy signals in {} is {}".format(filename, easy_segs))
return easy_segs
def decode_fives(helper_rels, fives):
decoded = dict()
for f in fives:
if helper_rels['1'][0] in f and helper_rels['1'][1] in f:
decoded[f] = '3'
elif helper_rels["4-1"][0] in f and helper_rels["4-1"][1] in f:
decoded[f] = '5'
else:
decoded[f] = '2'
return decoded
def decode_sixes(helper_rels, sixes):
decoded = dict()
for s in sixes:
if not( helper_rels['1'][0] in s and helper_rels['1'][1] in s):
decoded[s] = '6'
elif helper_rels["4-1"][0] in s and helper_rels["4-1"][1] in s:
decoded[s] = '9'
else:
decoded[s] = '0'
return decoded
def decode_line(line):
decoded = dict()
helper_rels = dict()
## Decode simple digits
easy = { 2 : '1', 3 : '7', 4 : '4', 7 : '8'}
fives = list()
sixes = list()
for seg in line.split('|')[0].split():
seg = ''.join(sorted(seg))
if len(seg) in easy:
decoded[seg] = easy[len(seg)]
helper_rels[easy[len(seg)]] = seg
elif len(seg) == 5:
fives.append(seg)
elif len(seg) == 6:
sixes.append(seg)
helper_rels["7-1"] = helper_rels['7']
helper_rels["4-1"] = helper_rels['4']
for n in helper_rels['1']:
helper_rels["7-1"] = helper_rels["7-1"].replace(n,'')
helper_rels["4-1"] = helper_rels["4-1"].replace(n,'')
decoded.update(decode_fives(helper_rels,fives))
decoded.update(decode_sixes(helper_rels,sixes))
return decoded
def decoded_output(line):
decoded = decode_line(line)
output = ""
for seg in line.split('|')[1].split():
output = output + decoded[''.join(sorted(seg))]
return int(output)
def sum_all_outputs(filename):
all_outputs = 0
with open(filename) as f:
while True:
line = f.readline().strip()
if not line:
break
else:
all_outputs += decoded_output(line)
print("Sum of all outputs in {} is {}".format(filename,all_outputs))
return all_outputs
print("-- Part 1")
assert get_easy_seg_count("sample.txt") == 26
assert get_easy_seg_count("input.txt") == 554
print("\n-- Part 2")
assert sum_all_outputs("sample.txt") == 61229
assert sum_all_outputs("input.txt") == 990964
|
# Convert to str
number = 18
number_string = str(number)
print(type(number_string)) # 'str' |
n = int(input("Informe a quantidade de caracteres: "))
caracteres = []
consoantes = 0
i = 1
while i <= n:
c = input("Informe o %d caracter: " %i)
caracteres.append(c)
i += 1
i = 0
while i < n:
if caracteres[i] not in "aeiou":
consoantes += 1
i += 1
print("O total de consoantes eh: ", consoantes)
|
# Advent of Code 2019 Solutions: Day 2, Puzzle 1
# https://github.com/emddudley/advent-of-code-solutions
with open('input', 'r') as f:
program = [int(x) for x in f.read().strip().split(',')]
program[1] = 12
program[2] = 2
for opcode_index in range(0, len(program), 4):
opcode = program[opcode_index]
if opcode == 99:
break
addr_a = program[opcode_index + 1]
addr_b = program[opcode_index + 2]
dest = program[opcode_index + 3]
if opcode == 1:
program[dest] = program[addr_a] + program[addr_b]
elif opcode == 2:
program[dest] = program[addr_a] * program[addr_b]
print(program[0])
|
class Solution:
def minDeletionSize(self, A: List[str]) -> int:
minDel = m = len(A[0])
dp = [1] * m
for j in range(m):
for i in range(j):
if all(A[k][i] <= A[k][j] for k in range(len(A))):
dp[j] = max(dp[j], dp[i] + 1)
minDel = min(minDel, m - dp[j])
return minDel
|
"""
https://leetcode.com/problems/power-of-two/
Given an integer, write a function to determine if it is a power of two.
Example 1:
Input: 1
Output: true
Explanation: 20 = 1
Example 2:
Input: 16
Output: true
Explanation: 24 = 16
Example 3:
Input: 218
Output: false
"""
# time complexity: O(logn), space complexity: O(1)
class Solution:
def isPowerOfTwo(self, n: int) -> bool:
if n <= 0:
return False
if n == 1:
return True
while n > 1:
if n % 2 == 0:
n = n // 2
else:
return False
return True |
key = int(input())
n = int(input())
message = ""
for x in range(n):
letters = input()
message += chr(ord(letters) + key)
print(message) |
def action_sanitize():
'''Make action suitable for use as a Pose Library
'''
pass
def apply_pose(pose_index=-1):
'''Apply specified Pose Library pose to the rig
:param pose_index: Pose, Index of the pose to apply (-2 for no change to pose, -1 for poselib active pose)
:type pose_index: int in [-2, inf], (optional)
'''
pass
def browse_interactive(pose_index=-1):
'''Interactively browse poses in 3D-View
:param pose_index: Pose, Index of the pose to apply (-2 for no change to pose, -1 for poselib active pose)
:type pose_index: int in [-2, inf], (optional)
'''
pass
def new():
'''Add New Pose Library to active Object
'''
pass
def pose_add(frame=1, name="Pose"):
'''Add the current Pose to the active Pose Library
:param frame: Frame, Frame to store pose on
:type frame: int in [0, inf], (optional)
:param name: Pose Name, Name of newly added Pose
:type name: string, (optional, never None)
'''
pass
def pose_move(pose='', direction='UP'):
'''Move the pose up or down in the active Pose Library
:param pose: Pose, The pose to move
:type pose: enum in [], (optional)
:param direction: Direction, Direction to move the chosen pose towards
:type direction: enum in ['UP', 'DOWN'], (optional)
'''
pass
def pose_remove(pose=''):
'''Remove nth pose from the active Pose Library
:param pose: Pose, The pose to remove
:type pose: enum in [], (optional)
'''
pass
def pose_rename(name="RenamedPose", pose=''):
'''Rename specified pose from the active Pose Library
:param name: New Pose Name, New name for pose
:type name: string, (optional, never None)
:param pose: Pose, The pose to rename
:type pose: enum in [], (optional)
'''
pass
def unlink():
'''Remove Pose Library from active Object
'''
pass
|
class TreeNode:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
class Solution(object):
def is_same_tree(self, p, q):
if p is None or q is None:
if p != q:
return False
return True
if p.val != q.val:
return False
if not self.is_same_tree(p.left, q.left):
return False
return self.is_same_tree(p.right, q.right)
|
frase = str(input('Digite uma frase : ')).strip()
lower=frase.lower()
print('##########################################################################')
print('################# A N A L I Z A N D O A F R A S E #################')
print('##########################################################################')
print ('A Letra A aparece {} '.format(lower.count('a')))
print ('A Letra A aparece na posição {}'.format (lower.find('a')))
print ('A letra A aparece pela ultima vez na frase na posição {}'.format(lower.rfind('a')))
print ('A quantidade de caracteres é de : {}'.format(len(lower)))
print ('A quantidade de caracteres sem espaço corresponde : {}'.format (len(lower.replace(" ",""))))
print ('############################################################################') |
#!/usr/bin/env python3
n = int(input())
i = 1
while i < n + 1:
if i % 3 == 0 and i % 5 != 0:
print("fizz")
elif i % 5 == 0 and i % 3 != 0:
print("buzz")
elif i % 5 == 0 and i % 3 == 0:
print("fizz-buzz")
else:
print(i)
i = i + 1
|
def _printf(fh, fmt, *args):
"""Implementation of perl $fh->printf method"""
global OS_ERROR, TRACEBACK, AUTODIE
try:
print(_format(fmt, *args), end='', file=fh)
return True
except Exception as _e:
OS_ERROR = str(_e)
if TRACEBACK:
if isinstance(fmt, str):
fmt = fmt.replace("\n", '\\n')
_cluck(f"printf({fmt},...) failed: {OS_ERROR}",skip=2)
if AUTODIE:
raise
return False
|
region = 'us-west-2'
vpc = dict(
source='./vpc'
)
inst = dict(
source='./inst',
vpc_id='${module.vpc.vpc_id}'
)
config = dict(
provider=dict(
aws=dict(region=region)
),
module=dict(
vpc=vpc,
inst=inst
)
)
|
{
"targets": [
{
"target_name": "userid",
"sources": [ '<!@(ls -1 src/*.cc)' ],
"include_dirs": ["<!@(node -p \"require('node-addon-api').include\")"],
"dependencies": ["<!(node -p \"require('node-addon-api').gyp\")"],
"cflags!": [ "-fno-exceptions" ],
"cflags_cc!": [ "-fno-exceptions" ],
"xcode_settings": {
"GCC_ENABLE_CPP_EXCEPTIONS": "YES",
"CLANG_CXX_LIBRARY": "libc++",
"MACOSX_DEPLOYMENT_TARGET": "10.7",
},
"msvs_settings": {
"VCCLCompilerTool": { "ExceptionHandling": 1 },
},
"variables" : {
"generate_coverage": "<!(echo $GENERATE_COVERAGE)",
},
"conditions": [
['OS=="mac"', {
"cflags+": ["-fvisibility=hidden"],
"xcode_settings": {
"GCC_SYMBOLS_PRIVATE_EXTERN": "YES", # -fvisibility=hidden
},
}],
['generate_coverage=="yes"', {
"cflags+": ["--coverage"],
"cflags_cc+": ["--coverage"],
"link_settings": {
"libraries+": ["-lgcov"],
},
}],
],
},
],
}
|
"""
Single linked list based on two pointer approach.
"""
class Node:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class slist:
"""
singly linked list class
"""
def __init__(self):
self._first = None
self._last = None
def _build_a_node(self, i: int, append:bool=True):
n = Node(val=i)
# Handle empty list case
if self._first is None and self._last is None:
self._first = n
self._last = n
else:
if append:
self._last.next = n
self._last = n
else:
n.next = self._first
self._first = n
def _find(self, x: int):
nodes = [self._first, None]
while nodes[0] != None:
if nodes[0].val == x:
return nodes
nodes[1] = nodes[0]
nodes[0] = nodes[0].next
return nodes
def append(self, i:int):
self._build_a_node(i)
def build_slist_from_list(self, a:list):
for i in a:
self.append(i)
def find(self, x:int):
nodes = self._find(x)
if nodes[0]:
return True
else:
return False
def delete(self, x:int):
nodes = self._find(x)
# in case of node found
if(nodes[0]):
currentnode = nodes[0]
previousnode = nodes[1]
# list has only one element and that element is x
if currentnode == self._first and currentnode == self._last and previousnode == None:
self._first = None
self._last = None
# x at first position and being removed
elif currentnode == self._first:
self._first = currentnode.next
# x at last position
elif currentnode == self._last:
previousnode.next = None
self._last = previousnode
# x is in between
else:
previousnode.next = currentnode.next
def reverse(self):
c = self._first
self._last = self._first
p = None
while c != None:
n = c.next
c.next = p
p = c
c = n
self._first = p
def has_a_cycle(self):
"""
find linked list has a cycle or not
:return:
"""
if self._first == None or self._first.next == None:
return False
ptr1 = self._first
ptr2 = self._first.next
while ptr1 != ptr2:
if ptr2 == None or ptr2.next == None:
return False
ptr1 = ptr1.next
ptr2 = ptr2.next.next
return True
def find_mid_point(self):
ptr1 = self._first
ptr2 = self._first
while(ptr2.next != None):
ptr1 = ptr1.next
ptr2 = ptr2.next.next
return ptr1.val
def __str__(self):
s = ""
n = self._first
# in case slist is empty
if n == None:
s = s + "Null"
# for other cases
while n != None:
s = s + str(n.val)
if n.next is not None:
s = s + "->"
else:
s = s + '->Null'
n = n.next
return s
def __len__(self):
l = 0
n = self._first
if not n:
return l
else:
while n is not None:
l +=1
n = n.next
return l
if __name__ == '__main__':
a = [1, 2, 3, 4, 5, 12, 6, 7, 8, 10, 10]
s = slist()
s.build_slist_from_list(a)
print(s.has_a_cycle())
|
algorithm_defaults = {
'ERM': {
'train_loader': 'standard',
'uniform_over_groups': False,
'eval_loader': 'standard',
'randaugment_n': 2, # When running ERM + data augmentation
},
'groupDRO': {
'train_loader': 'standard',
'uniform_over_groups': True,
'distinct_groups': True,
'eval_loader': 'standard',
'group_dro_step_size': 0.01,
},
'deepCORAL': {
'train_loader': 'group',
'uniform_over_groups': True,
'distinct_groups': True,
'eval_loader': 'standard',
'coral_penalty_weight': 1.,
'randaugment_n': 2,
'additional_train_transform': 'randaugment', # Apply strong augmentation to labeled & unlabeled examples
},
'IRM': {
'train_loader': 'group',
'uniform_over_groups': True,
'distinct_groups': True,
'eval_loader': 'standard',
'irm_lambda': 100.,
'irm_penalty_anneal_iters': 500,
},
'DANN': {
'train_loader': 'group',
'uniform_over_groups': True,
'distinct_groups': True,
'eval_loader': 'standard',
'randaugment_n': 2,
'additional_train_transform': 'randaugment', # Apply strong augmentation to labeled & unlabeled examples
},
'AFN': {
'train_loader': 'standard',
'uniform_over_groups': False,
'eval_loader': 'standard',
'use_hafn': False,
'afn_penalty_weight': 0.01,
'safn_delta_r': 1.0,
'hafn_r': 1.0,
'additional_train_transform': 'randaugment', # Apply strong augmentation to labeled & unlabeled examples
'randaugment_n': 2,
},
'FixMatch': {
'train_loader': 'standard',
'uniform_over_groups': False,
'eval_loader': 'standard',
'self_training_lambda': 1,
'self_training_threshold': 0.7,
'scheduler': 'FixMatchLR',
'randaugment_n': 2,
'additional_train_transform': 'randaugment', # Apply strong augmentation to labeled examples
},
'PseudoLabel': {
'train_loader': 'standard',
'uniform_over_groups': False,
'eval_loader': 'standard',
'self_training_lambda': 1,
'self_training_threshold': 0.7,
'pseudolabel_T2': 0.4,
'scheduler': 'FixMatchLR',
'randaugment_n': 2,
'additional_train_transform': 'randaugment', # Apply strong augmentation to labeled & unlabeled examples
},
'NoisyStudent': {
'train_loader': 'standard',
'uniform_over_groups': False,
'eval_loader': 'standard',
'noisystudent_add_dropout': True,
'noisystudent_dropout_rate': 0.5,
'scheduler': 'FixMatchLR',
'randaugment_n': 2,
'additional_train_transform': 'randaugment', # Apply strong augmentation to labeled & unlabeled examples
}
}
|
"""django-anchors"""
__version__ = '0.1.dev0'
__license__ = 'BSD License'
__author__ = 'Fantomas42'
__email__ = '[email protected]'
__url__ = 'https://github.com/Fantomas42/django-anchors'
|
def isinteger(s):
return s.isdigit() or s[0] == '-' and s[1:].isdigit()
def isfloat(x):
s = x.partition(".")
if s[1]=='.':
if s[0]=='' or s[0]=='-':
if s[2]=='' or s[2][0]=='-':
return False
else:
return isinteger(s[2])
elif isinteger(s[0]):
if s[2]!='' and s[2][0]=='-':
return False
return s[2]=='' or isinteger(s[2])
else:
return False
else:
return False
print(isfloat(".112"))
print(isfloat("-.112"))
print(isfloat("3.14"))
print(isfloat("-3.14"))
print(isfloat("-3.14"))
print(isfloat("5.0"))
print(isfloat("-777.0"))
print(isfloat("-777."))
print(isfloat("."))
print(isfloat(".."))
print(isfloat("-21.-1"))
print(isfloat("-.-1"))
|
class Solution:
def diameterOfBinaryTree(self, root: TreeNode) -> int:
if not root:
return 0
elif not root.right and not root.left:
return 0
elif not root.right:
return max(self.diameterOfBinaryTree(root.left),
1 + self.height(root.left))
elif not root.left:
return max(self.diameterOfBinaryTree(root.right),
1 + self.height(root.right))
else:
return max(self.diameterOfBinaryTree(root.right),
self.diameterOfBinaryTree(root.left),
self.height(root.left) + self.height(root.right) + 2)
def height(self, root):
if not root:
return 0
if not root.right and not root.left:
return 0
return 1 + max(self.height(root.left), self.height(root.right))
class Solution:
def diameterOfBinaryTree(self, root):
self.ans = 0
def depth(node):
if not node:
return 0
r = depth(node.right)
l = depth(node.left)
self.ans = max(self.ans, r+l)
return 1 + max(r, l)
depth(root)
return self.ans
|
# Copyright (C) 2019 FireEye, Inc. All Rights Reserved.
"""
english letter probabilities
table from http://en.algoritmy.net/article/40379/Letter-frequency-English
"""
english_letter_probs_percent = [
['a', 8.167],
['b', 1.492],
['c', 2.782],
['d', 4.253],
['e', 12.702],
['f', 2.228],
['g', 2.015],
['h', 6.094],
['i', 6.966],
['j', 0.153],
['k', 0.772],
['l', 4.025],
['m', 2.406],
['n', 6.749],
['o', 7.507],
['p', 1.929],
['q', 0.095],
['r', 5.987],
['s', 6.327],
['t', 9.056],
['u', 2.758],
['v', 0.978],
['w', 2.360],
['x', 0.150],
['y', 1.974],
['z', 0.074]]
english_letter_probs = {lt: (per * 0.01) for lt, per in english_letter_probs_percent}
"""
Scrabble Scores
table from https://en.wikipedia.org/wiki/Scrabble_letter_distributions
"""
scrabble_dict = {"a": 1, "b": 3, "c": 3, "d": 2, "e": 1, "f": 4,
"g": 2, "h": 4, "i": 1, "j": 8, "k": 5, "l": 1,
"m": 3, "n": 1, "o": 1, "p": 3, "q": 10, "r": 1,
"s": 1, "t": 1, "u": 1, "v": 4, "w": 4, "x": 8,
"y": 4, "z": 10}
|
arima = {
'order':[(2,1,0),(0,1,2),(1,1,1)],
'seasonal_order':[(0,0,0,0),(0,1,1,12)],
'trend':['n','c','t','ct']
}
elasticnet = {
'alpha':[i/10 for i in range(1,101)],
'l1_ratio':[0,0.25,0.5,0.75,1],
'normalizer':['scale','minmax',None]
}
gbt = {
'max_depth':[2,3],
'n_estimators':[100,500]
}
hwes = {
'trend':[None,'add','mul'],
'seasonal':[None,'add','mul'],
'damped_trend':[True,False]
}
knn = {
'n_neighbors':range(2,20),
'weights':['uniform','distance']
}
lightgbm = {
'max_depth':[i for i in range(5)] + [-1]
}
mlp = {
'activation':['relu','tanh'],
'hidden_layer_sizes':[(25,),(25,25,)],
'solver':['lbfgs','adam'],
'normalizer':['scale','minmax',None],
'random_state':[20]
}
mlr = {
'normalizer':['scale','minmax',None]
}
prophet = {
'n_changepoints':range(5)
}
rf = {
'max_depth':[5,10,None],
'n_estimators':[100,500,1000]
}
silverkite = {
'changepoints':range(5)
}
svr={
'kernel':['linear'],
'C':[.5,1,2,3],
'epsilon':[0.01,0.1,0.5]
}
xgboost = {
'max_depth':[2,3,4,5,6]
}
|
str_back = '↪ !Назад в меню%r'
str_yes = '✅ Да%g'
str_no = '❌ Нет%r'
str_yes_or_no = '✅ Да или ❌ Нет ?'
str_menu_out = 'Вышли в главное меню. '
str_maybe_later = 'Ну ладно) заходи потом'
str_error = 'Опаньки... ошибка, повтори позже'
str_help = '⚙ !Помощь%g'
|
"""Incoming data loaders
This file contains loader classes that allow reading iteratively through
vehicle entry data for various different data formats
Classes:
Vehicle
Entry
"""
class Vehicle():
"""Representation of a single vehicle."""
def __init__(self, entry, pce):
# vehicle properties
self.id = entry.id
self.type = entry.type
# set pce multiplier based on type
if self.type in ["passenger", "DEFAULT_VEHTYPE", "veh_passenger"]:
self.multiplier = pce["car"]
elif self.type in ["motorcycle", "veh_motorcycle"]:
self.multiplier = pce["moto"]
elif self.type in ["truck", "veh_truck"]:
self.multiplier = pce["truck"]
elif self.type in ["bus", "veh_bus"]:
self.multiplier = pce["bus"]
elif self.type in ["taxi", "veh_taxi"]:
self.multiplier = pce["taxi"]
else:
self.multiplier = pce["other"]
self.last_entry = None
self.new_entry = entry
def update(self):
"""Shift new entries to last and prepare for new"""
if self.new_entry != None:
self.last_entry = self.new_entry
self.new_entry = None
def distance_moved(self):
"""Calculate the distance the vehicle traveled within the same edge"""
return self.new_entry.pos - self.last_entry.pos
def approx_distance_moved(self, time_diff):
"""Approximate the distance the vehicle traveled between edges"""
return self.new_entry.speed * time_diff
def __repr__(self):
return ('{}({})'.format(self.__class__.__name__, self.id))
class Entry():
"""Representation of a single timestep sensor entry of a vehicle."""
def __init__(self, entry, time):
# vehicle properties
self.id = entry['id']
self.type = entry['type']
self.time = time
# extract edge and lane ids
self.edge_id = entry['lane'].rpartition('_')[0]
self.lane_id = entry['lane'].rpartition('_')[1]
# store position in edge
self.pos = float(entry['pos'])
self.speed = float(entry['speed'])
# store location/speed data
# self.x = float(entry['x'])
# self.y = float(entry['y'])
def __repr__(self):
return ('{}({})'.format(self.__class__.__name__, self.id))
|
# ---------全局变量--------
# 商品字典
dict_commodity_infos = {
1001: {"name": "屠龙刀", "price": 10000},
1002: {"name": "倚天剑", "price": 10000},
1003: {"name": "金箍棒", "price": 52100},
1004: {"name": "口罩", "price": 20},
1005: {"name": "酒精", "price": 30},
}
# 订单列表
list_orders = [
{"cid": 1001, "count": 1},
{"cid": 1002, "count": 3},
{"cid": 1005, "count": 2},
]
# ---------函数--------
# 1.定义函数,打印所有商品信息
# 格式:商品编号xx,商品名称xx,商品单价xx.
def print_commodity_all():
for k,v in dict_commodity_infos.items():
print("商品编号%d,商品名称%s,商品单价%d."%
(k,v["name"],v["price"]))
print_commodity_all()
# 2.定义函数,打印所有订单中的信息,
# 格式:商品编号xx,购买数量xx.
|
# n 回繰り返す
N = 10
for i in range(N):
print(i)
print('Hello World')
# インデントが異なるので、
# 'Hello World'を繰り返されない
for i in range(N):
print(i)
print('Hello World')
|
# This is a pytest config file
# https://docs.pytest.org/en/2.7.3/plugins.html
# It allows us to tell nbval (the py.text plugin we use to run
# notebooks and check their output is unchanged) to skip comparing
# notebook outputs for particular mimetypes.
def pytest_collectstart(collector):
if (
collector.fspath
and collector.fspath.ext == ".ipynb"
and hasattr(collector, "skip_compare")
):
# Skip plotly comparison, because something to do with
# responsive plot sizing makes output different in test
# environment
collector.skip_compare += ("application/vnd.plotly.v1+json",)
|
# model
model = Model()
i1 = Input("op1", "TENSOR_FLOAT32", "{2, 2, 2, 2}")
i3 = Output("op3", "TENSOR_FLOAT32", "{2, 2, 2, 2}")
model = model.Operation("RSQRT", i1).To(i3)
# Example 1. Input in operand 0,
input0 = {i1: # input 0
[1.0, 36.0, 2.0, 90, 4.0, 16.0, 25.0, 100.0,
23.0, 19.0, 40.0, 256.0, 4.0, 43.0, 8.0, 36.0]}
output0 = {i3: # output 0
[1.0, 0.166667, 0.70710678118, 0.105409, 0.5, 0.25, 0.2, 0.1,
0.208514, 0.229416, 0.158114, 0.0625, 0.5, 0.152499, 0.35355339059, 0.166667]}
# Instantiate an example
Example((input0, output0))
|
#It's a simple calculator for doing Addition, Subtraction, Multiplication, Division and Percentage.
first_number = int(input("Enter your first number: "))
operators = input("Enter what you wanna do +,-,*,/,%: ")
second_number = int(input("Enter your second Number: "))
if operators == "+" :
first_number += second_number
print(f"Your Addition result is: {first_number}")
elif operators == "-" :
first_number -= second_number
print(f"Your Subtraction result is: {first_number}")
elif operators == "*" :
first_number *= second_number
print(f"Your Multiplication result is: {first_number}")
elif operators == "/" :
first_number /= second_number
print(f"Your Division result is: {first_number}")
elif operators == "%" :
first_number %= second_number
print(f"Your Modulus result is: {first_number}")
else :
print("You have chosen a wrong operator") |
description = 'system setup'
group = 'lowlevel'
sysconfig = dict(
cache='localhost',
instrument='Fluco',
experiment='Exp',
datasinks=['conssink', 'filesink', 'daemonsink',],
)
modules = ['nicos.commands.standard', 'nicos_ess.commands.epics']
devices = dict(
Fluco=device('nicos.devices.instrument.Instrument',
description='instrument object',
instrument='Fluco',
responsible='S. Body <[email protected]>',
),
Sample=device('nicos.devices.sample.Sample',
description='The currently used sample',
),
Exp=device('nicos.devices.experiment.Experiment',
description='experiment object',
dataroot='/opt/nicos-data',
sendmail=True,
serviceexp='p0',
sample='Sample',
),
filesink=device('nicos.devices.datasinks.AsciiScanfileSink',
),
conssink=device('nicos.devices.datasinks.ConsoleScanSink',
),
daemonsink=device('nicos.devices.datasinks.DaemonSink',
),
Space=device('nicos.devices.generic.FreeSpace',
description='The amount of free space for storing data',
path=None,
minfree=5,
),
)
|
i=2
while i < 10:
j=1
while j < 10:
print(i,"*",j,"=",i*j)
j += 1
i += 1
|
# Program that asks the user to input any positive integer and
# outputs the successive value of the following calculation.
# It should at each step calculate the next value by taking the current value
# if the it is even, divide it by two, if it is odd, multiply
# it by three and add one
# the program ends if the current value is one.
# first number and then check if it has a positive value
n = int(input("please enter a number: " ))
while n != 1:
# eliminating 0 and negative numbers
if n <= 0:
print("Please enter a positive number.")
break
# for even numbers:
elif n % 2== 0:
n=int(n/2)
print(n)
# for other integers (odd numbers)
else:
n=int(n*3+1)
print(n)
|
df =[['4', '1', '2', '7', '2', '5', '1'], ['9', '9', '8', '0', '2', '0', '8', '5', '0', '1', '3']]
str=''
dcf = ''.join(df)
print(dcf)
print() |
parameters = {
"results": [
{
"type": "max",
"identifier":
{
"symbol": "S22",
"elset": "ALL_ELEMS",
"position": "Element 1 Int Point 1 Sec Pt SPOS, (fraction = 1:0)"
},
"referenceValue": 62.3, # YT
"tolerance": 0.05
},
{
"type": "disp_at_zero_y",
"step": "Step-1",
"identifier": [
{ # x
"symbol": "U2",
"nset": "Y+",
"position": "Node 3"
},
{ # y
"symbol": "S22",
"elset": "ALL_ELEMS",
"position": "Element 1 Int Point 1 Sec Pt SPOS, (fraction = 1:0)"
}
],
"zeroTol": 0.00623, # Defines how close to zero the y value needs to be
"referenceValue": 0.00889, # u_f = 2*GYT/YT
"tolerance": 1e-5
},
{
"type": "max",
"identifier":
{
"symbol": "SDV_CDM_d2",
"elset": "ALL_ELEMS",
"position": "Element 1 Int Point 1 Sec Pt SPOS, (fraction = 1:0)"
},
"referenceValue": 1.0,
"tolerance": 0.0
},
{
"type": "max",
"identifier":
{
"symbol": "SDV_CDM_d1T",
"elset": "ALL_ELEMS",
"position": "Element 1 Int Point 1 Sec Pt SPOS, (fraction = 1:0)"
},
"referenceValue": 0.0,
"tolerance": 0.0
},
{
"type": "max",
"identifier":
{
"symbol": "SDV_CDM_d1C",
"elset": "ALL_ELEMS",
"position": "Element 1 Int Point 1 Sec Pt SPOS, (fraction = 1:0)"
},
"referenceValue": 0.0,
"tolerance": 0.0
},
{
"type": "continuous",
"identifier":
{
"symbol": "S22",
"elset": "ALL_ELEMS",
"position": "Element 1 Int Point 1 Sec Pt SPOS, (fraction = 1:0)"
},
"referenceValue": 0.0,
"tolerance": 0.1
}
]
} |
numOne = int(input("Digite o primero número: "))
numTwo = int (input("Digite o segundo número: "))
numThree = int(input("Digite o terceiro número: "))
if numOne < numTwo and numTwo < numThree :
print("crescente")
else:
print("não está em ordem crescente") |
class Carro():
def __init__(self, tanque):
self.__tanque = tanque#Atributo privado
def andar(self, km):
self.__tanque -= (km * 0.5)
def abastecer(self, gasolina):
self.__tanque += float(gasolina)
def getTanque(self):
return self.__tanque
def setTanque(self, novo):
self.__tanque = novo
corsa = Carro(0)
valorAbastecimento = input("Frentista: Quanto você quer abastecer?\n ")
float(valorAbastecimento)
corsa.abastecer(valorAbastecimento)
tanqueAtual = valorAbastecimento
float(tanqueAtual)
print("Frentista: Abastecemos {} litros do seu tanque. Agora você tem {} litros no seu tanque\n".format(valorAbastecimento,tanqueAtual))
distancia = input("Amigo: Que tal darmos uma voltinha na estrada? Quantos km você consegue correr?\n")
distancia = float(distancia)
corsa.andar(distancia)
tanqueAtual = float(valorAbastecimento) - float(distancia * 0.5)
print("Que irado! corremos {} km! Agora temos {}L no tanque!".format(distancia,tanqueAtual)) |
class Solution:
def wordBreak(self, s: str, wordDict: List[str]) -> bool:
def check(t, a, dp):
n=len(t)
for k in range(1, n):
if dp[a+0][a+k-1] and dp[a+k][a+n-1]:
return 1
return 0
n=len(s)
dp=[[0 for j in range(n)] for i in range(n)]
for i in range(n):
for j in range(n-i):
t=s[j:j+i+1]
if t in wordDict:
dp[j][j+i]=1
else:
dp[j][j+i]=check(t, j, dp)
return dp[0][n-1]==1
|
expected_output = {
"clock_state": {
"system_status": {
"associations_address": "10.16.2.2",
"associations_local_mode": "client",
"clock_offset": 27.027,
"clock_refid": "127.127.1.1",
"clock_state": "synchronized",
"clock_stratum": 3,
"root_delay": 5.61,
}
},
"peer": {
"10.16.2.2": {
"local_mode": {
"client": {
"delay": 5.61,
"jitter": 3.342,
"mode": "synchronized",
"offset": 27.027,
"poll": 64,
"reach": 7,
"receive_time": 25,
"refid": "127.127.1.1",
"remote": "10.16.2.2",
"stratum": 3,
"configured": True,
"local_mode": "client",
}
}
},
"10.36.3.3": {
"local_mode": {
"client": {
"delay": 0.0,
"jitter": 15937.0,
"mode": "unsynchronized",
"offset": 0.0,
"poll": 512,
"reach": 0,
"receive_time": "-",
"refid": ".STEP.",
"remote": "10.36.3.3",
"stratum": 16,
"configured": True,
"local_mode": "client",
}
}
},
},
}
|
#!/usr/python3.5
#-*- coding: utf-8 -*-
for row in range(10):
for j in range(row):
print (" ",end=" ")
for i in range(10-row):
print (i,end=" ")
print ()
|
# SUM
def twoSumI(nums, target):
result = {}
for k, v in enumerate(nums):
sub = target - v
if sub in result:
return [result[sub], k]
result[v] = k
def twoSum(nums, target):
l, r = 0, len(nums) - 1
result = []
while l < r:
total = nums[l] + nums[r]
if total > target or (r < len(nums) - 1 and nums[r] == nums[r + 1]):
r -= 1
elif total < target or (l > 0 and nums[l - 1] == nums[l]):
l += 1
else:
result.append([nums[l], nums[r]])
l += 1
r -= 1
return result
def kSum(nums, target, k):
result = []
if k == 2:
return twoSum(nums, target)
for i in range(len(nums)):
if i == 0 or nums[i - 1] != nums[i]:
for sub in kSum(nums[i + 1:], target - nums[i], k - 1):
result.append([nums[i]] + sub)
return result
def threeSum(nums, target):
return kSum(nums, 0, 3)
def fourSum(nums, target):
return kSum(nums, 0, 4)
def pair(k, arr):
ret = dict()
count = 0
for i, val in enumerate(arr):
total = val + k
if total in ret:
print("{} - {}".format(val, total))
count += 1
ret[val] = i
return count
## String
def is_p(s):
i, j = 0, len(s) - 1
while i < j:
if s[i] != s[j]:
return False
i += 1
j -= 1
return True
def longest_palindrome(s):
if not s:
return
ret = ''
for i in range(len(s)):
old = expand(s, i, i)
even = expand(s, i, i + 1)
if len(old) > len(even):
tmp = old
else:
tmp = even
if len(ret) < len(tmp):
ret = tmp
return ret
def expand(s, i, j):
while i >= 0 and j <= len(s) - 1 and s[i] == s[j]:
i -= 1
j += 1
return s[i + 1: j]
def bubble_sort(nums):
# We set swapped to True so the loop looks runs at least once
swapped = True
while swapped:
swapped = False
for i in range(len(nums) - 1):
if nums[i] > nums[i + 1]:
# Swap the elements
nums[i], nums[i + 1] = nums[i + 1], nums[i]
# Set the flag to True so we'll loop again
swapped = True
def selection_sort(arr):
s = len(arr)
for i in range(s):
lowest_idx = i
for j in range(i + 1, s):
if arr[j] < arr[i]:
lowest_idx = j
arr[i], arr[lowest_idx] = arr[lowest_idx], arr[i]
def insertion_sort(nums):
# Start on the second element as we assume the first element is sorted
for i in range(1, len(nums)):
item_to_insert = nums[i]
# And keep a reference of the index of the previous element
j = i - 1
# Move all items of the sorted segment forward if they are larger than
# the item to insert
while j >= 0 and nums[j] > item_to_insert:
nums[j + 1] = nums[j]
j -= 1
# Insert the item
nums[j + 1] = item_to_insert
def quick_sort(arr, l, r):
if l < r:
lo, ro = l, r
l1 = partition(arr, lo, ro)
quick_sort(arr, l, l1)
quick_sort(arr, l1 + 1, r)
def partition(arr, lo, ro):
l, r = lo, ro
if l >= r:
return
pivot = arr[(l+r)//2]
while l <= r:
while arr[l] < pivot and l <= r:
l += 1
while arr[r] > pivot and r >= l:
r -= 1
if l >= r:
break
arr[l], arr[r] = arr[r], arr[l]
l += 1
r -= 1
return l
## Example
# two sum
# nums, target = [3, 2, 4], 6
# print(twoSum(nums, target))
#
# # three sum
# nums = [-2,0,1,1,2]
# nums.sort()
# print(kSum(nums, 0, 3))
# Palindronearr
# print(longest_palindrome('bacabd'))
# ## PAIR
# arr = [1, 5, 3, 4, 2]
# k = 2
# arr.sort(reverse=True)
# print(pair(2, arr))
## Sort
arr = [22, 5, 1, 18, 99, 0]
# quick_sort(arr, 0, len(arr) - 1)
selection_sort(arr)
print(arr)
# print(insertion_sort(arr))
def bubble_soft(arr):
s = len(arr)
for i in range(s):
for j in range(i + 1, s):
if arr[j] < arr[i]:
arr[i], arr[j] = arr[j], arr[i]
def selection_sort(arr):
s = len(arr)
for i in range(s):
min_idx = i
for j in range(i + 1, s):
if arr[j] < arr[i]:
min_idx = j
arr[i], arr[min_idx] = arr[min_idx], arr[i]
def insertion_sort(arr):
s = len(arr)
for i in range(1, s):
insert_num = arr[i]
j = i - 1
while j >= 0 and arr[j] > insert_num:
arr[j + 1] = arr[j]
j -= 1
arr[j + 1] = insert_num
def quick_sort(arr, l, r):
if l >= r:
return
def partition(lo, ro):
pivot = arr[(lo + ro)//2]
while lo <= ro:
while arr[lo] < pivot and lo <= ro:
lo += 1
while arr[ro] > pivot and ro >= lo:
ro -= 1
if lo <= ro:
arr[lo], arr[ro] = arr[ro], arr[lo]
ro -= 1
lo += 1
return lo - 1
mid = partition(l, r)
quick_sort(arr, l, mid)
quick_sort(arr, mid + 1, r)
#
# arr = [1, 3, 3, 2, 5, 0]
# quick_sort(arr, 0, len(arr) - 1)
#
# print(arr)
def isolated_area(arr):
total = 0
for i in range(len(arr)):
for j in range(len(arr[0])):
if arr[i][j] == 1:
dfs(arr, (i, j))
total += 1
return total
def dfs(arr, p):
stack = [p]
while not stack:
i, j = stack.pop()
arr[i][j] = 2
if arr[i - 1] == 0:
stack.insert(0, (i, j))
def int_para(val):
num_arr = []
while val > 0:
mod = val%10
num_arr.insert(0, mod)
val = val//10
def is_p():
l, r = 0, len(num_arr) - 1
while l <= r:
if num_arr[l] != num_arr[r]:
return False
l += 1
r -= 1
return True
return is_p()
# def expand(arr, i, j):
# while arr[i] == arr[j]:
# print(int_para(124521))
# [["A","B","C","E"],["S","F","C","S"],["A","D","E","E"]]
def word_search(board, word):
visited = set()
for i in range(len(board)):
for j in range(len(board[0])):
if board[i][j] == word[0]:
if dfs(board, visited, (i, j), word, 1):
return True
return False
def dfs(board, visited, p, word, idx):
s1 = len(board) - 1
s2 = len(board[0]) - 1
i, j = p
# visited.add((i, j))
if idx >= len(word):
return True
if i + 1 <= s1 and (i+1, j) not in visited and board[i+1][j] == word[idx]:
if dfs(board, visited, (i+1, j), word, idx + 1):
return True
if i - 1 >= 0 and (i-1, j) not in visited and board[i-1][j] == word[idx]:
if dfs(board, visited, (i-1, j), word, idx + 1):
return True
if j + 1 <= s2 and (i, j + 1) not in visited and board[i][j + 1] == word[idx]:
if dfs(board, visited, (i, j + 1), word, idx + 1):
return True
if j - 1 >= 0 and (i, j - 1) not in visited and board[i][j - 1] == word[idx]:
if dfs(board, visited, (i, j - 1), word, idx + 1):
return True
class Solution(object):
def exist(self, board, word):
"""
:type board: List[List[str]]
:type word: str
:rtype: bool
"""
visited = set()
for i in range(len(board)):
for j in range(len(board[0])):
if board[i][j] == word[0]:
if self.dfs(board, visited, (i, j), word, 1):
return True
return False
def dfs(self, board, visited, p, word, idx):
s1 = len(board) - 1
s2 = len(board[0]) - 1
i, j = p
# visited.add((i, j))
tmp = board[i][j]
board[i][j] = '#'
if idx >= len(word):
return True
if i + 1 <= s1 and (i + 1, j) not in visited and board[i + 1][j] == word[idx]:
if self.dfs(board, visited, (i + 1, j), word, idx + 1):
return True
if i - 1 >= 0 and (i - 1, j) not in visited and board[i - 1][j] == word[idx]:
if self.dfs(board, visited, (i - 1, j), word, idx + 1):
return True
if j + 1 <= s2 and (i, j + 1) not in visited and board[i][j + 1] == word[idx]:
if self.dfs(board, visited, (i, j + 1), word, idx + 1):
return True
if j - 1 >= 0 and (i, j - 1) not in visited and board[i][j - 1] == word[idx]:
if self.dfs(board, visited, (i, j - 1), word, idx + 1):
return True
board[i][j] = tmp
# board = [["C","A","A"],
# ["A","A","A"],
# ["B","C","D"]]
board = [["A","B","C","E"],
["S","F","E","S"],
["A","D","E","E"]]
word = "ABCESEEEFS"
# sol = Solution()
# print(sol.exist(board, word))
class Solution:
def setZeroes(self, matrix) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
visited = set()
for i in range(len(matrix)):
for j in range(len(matrix[0])):
if matrix[i][j] == 0 and (i, j) not in visited:
visited.add((i, j))
self.draw(matrix, visited, i, j)
def draw(self, matrix, visited, i, j):
s1 = len(matrix) - 1
s2 = len(matrix[0]) - 1
while s1 >= 0:
if matrix[s1][j] != 0:
visited.add((s1, j))
matrix[s1][j] = 0
s1 -= 1
while s2 >= 0:
if matrix[i][s2] != 0:
visited.add((i, s2))
matrix[i][s2] = 0
s2 -= 1
# matrix = [[0,1,2,0],[3,4,5,2],[1,3,1,5]]
#
# sol = Solution()
# print(sol.setZeroes(matrix))
# print(matrix)
class Solution:
def num_decodings(self, s) -> int:
total = 0
self.ds(s, total)
return total
def ds(self, s, total):
sz = len(s)
i = 0
while i < sz:
if int(s[i]) <= 2:
total += 1
if i + 1 >= sz:
break
self.ds(s[i + 1:], total)
if i + 1 < sz and int(s[i + 1]) <= 6:
total += 1
if i + 2 >= sz:
break
self.ds(s[i + 2:], total)
else:
total += 1
if i + 1 >= sz:
break
self.ds(s[i + 1:], total)
s = "226"
sol = Solution()
print(sol.num_decodings(s))
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
CREDITS = {
'source': 'Wikipedia',
'url': 'https://en.wikipedia.org/wiki/Wonders_of_the_World'
}
WONDERS = {
'ancient world': [
{
'name': 'Great Pyramid of Giza',
'Location': 'Giza, Egypt',
'url': 'https://en.wikipedia.org/wiki/Great_Pyramid_of_Giza'
},
{
'name': 'Colossus of Rhodes',
'location': 'Rhodes, Greece',
'url': 'https://en.wikipedia.org/wiki/Colossus_of_Rhodes'
},
{
'name': 'Hanging Gardens of Babylon',
'location': 'Babylon (near Hillah, Babil province, in Iraq present-day)',
'url': 'https://en.wikipedia.org/wiki/Hanging_Gardens_of_Babylon'
},
{
'name': 'Lighthouse of Alexandria',
'location': 'Alexandria, Egypt',
'url': 'https://en.wikipedia.org/wiki/Lighthouse_of_Alexandria'
},
{
'name': 'Mausoleum at Halicarnassus',
'location': 'Halicarnassus, Achaemenid Empire (modern day Turkey)',
'url': 'https://en.wikipedia.org/wiki/Mausoleum_at_Halicarnassus'
},
{
'name': 'Statue of Zeus at Olympia',
'location': 'Olympia, Greece',
'url': 'https://en.wikipedia.org/wiki/Statue_of_Zeus_at_Olympia'
},
{
'name': 'Temple of Artemis',
'location': 'Ephesus (near the modern town of Selçuk in present-day Turkey)',
'url': 'https://en.wikipedia.org/wiki/Temple_of_Artemis'
},
],
'natural': [
{
'name': 'Aurora in the high-latitude regions',
'location': 'Arctic and Antarctica',
'url': 'https://en.wikipedia.org/wiki/Aurora_(astronomy)'
},
{
'name': 'Grand Canyon',
'location': 'Arizona, USA',
'url': 'https://en.wikipedia.org/wiki/Grand_Canyon'
},
{
'name': 'Great Barrier Reef',
'location': 'Queensland, Australia',
'url': 'https://en.wikipedia.org/wiki/Great_Barrier_Reef'
},
{
'name': 'Harbor of Rio de Janeiro',
'location': 'Rio de Janeiro, Brazil',
'url': 'https://en.wikipedia.org/wiki/Guanabara_Bay'
},
{
'name': 'Mount Everest',
'location': 'Nepal',
'url': 'https://en.wikipedia.org/wiki/Mount_Everest'
},
{
'name': 'Parícutin volcano',
'location': 'Michoacán, Mexico',
'url': 'https://en.wikipedia.org/wiki/Par%C3%ADcutin'
},
{
'name': 'Victoria Falls',
'location': 'Border of Zambia and Zimbabwe',
'url': 'https://en.wikipedia.org/wiki/Victoria_Falls,_Zambia'
},
],
'new seven wonders': [
{
'name': 'Great Wall of China',
'location': 'China',
'url': 'https://en.wikipedia.org/wiki/Great_Wall_of_China'
},
{
'name': 'Petra',
'location': 'Jordan',
'url': 'https://en.wikipedia.org/wiki/Petra'
},
{
'name': 'Christ the Redeemer',
'location': 'Rio de Janeiro, Brazil',
'url': 'https://en.wikipedia.org/wiki/Christ_the_Redeemer_(statue)'
},
{
'name': 'Machu Picchu',
'location': 'Peru',
'url': 'https://en.wikipedia.org/wiki/Machu_Picchu'
},
{
'name': 'Chichen Itza',
'location': 'Mexico',
'url': 'https://en.wikipedia.org/wiki/Chichen_Itza'
},
{
'name': 'Colosseum',
'location': 'Rome, Italy',
'url': 'https://en.wikipedia.org/wiki/Colosseum'
},
{
'name': 'Taj Mahal',
'location': 'India',
'url': 'https://en.wikipedia.org/wiki/Taj_Mahal'
},
]
}
|
nome = input('Digite seu nome completo: ').strip()
print('Seu nome completo com todas as letras maiusculas: \n{}'.format(nome.upper()))
print('Seu nome com todas as letras minusculas: \n{}'.format(format(nome.lower())))
print('Seu nome tem:\n{} caracteres sem os espaços.'.format(len(nome) - nome.count(' ')))
dividido = nome.split()
print('Seu primeiro nome é:\n{} e tem {} caracteres'.format(dividido[0], len(dividido[1])))
|
# -*- coding: utf-8 -*-
"""
Created on 24 Dec 2019 20:40:06
@author: jiahuei
"""
def get_dict(fp):
data = {}
with open(fp, 'r') as f:
for ll in f.readlines():
_ = ll.split(',')
data[_[0]] = _[1].rstrip()
return data
def dump(data, keys, out_path):
out_str = ''
for k in keys:
out_str += '{},{}\r\n'.format(k, data[k])
with open(out_path, 'w') as f:
f.write(out_str)
SPLITS = ['train', 'valid', 'test']
for split in SPLITS:
print('Checking {} ... '.format(split), end='')
a = get_dict('/master/datasets/mscoco/captions_py2/mscoco_{}_v25595_s15.txt'.format(split))
b = get_dict('/master/datasets/mscoco/captions/mscoco_{}_v25595_s15.txt'.format(split))
a_keys = sorted(a.keys())
b_keys = sorted(b.keys())
a_values = sorted(a.values())
b_values = sorted(b.values())
if a_keys == b_keys and a_values == b_values:
print('OK')
else:
print('DIFFERENT')
del a, b
# dump(a, a_keys, '/master/datasets/insta/py2.txt')
# dump(b, a_keys, '/master/datasets/insta/py3.txt')
|
__author__ = 'Aaron Yang'
__email__ = '[email protected]'
__date__ = '1/10/2021 10:53 PM'
class Solution:
def smallestStringWithSwaps(self, s: str, pairs) -> str:
chars = list(s)
pairs.sort(key=lambda item: (item[0], item[1]))
for pair in pairs:
a, b = pair[0], pair[1]
chars[a], chars[b] = chars[b], chars[a]
return ''.join(chars)
Solution().smallestStringWithSwaps("dcab", [[0, 3], [1, 2], [0, 2]])
|
"""Exceptions for Ambee."""
class AmbeeError(Exception):
"""Generic Ambee exception."""
class AmbeeConnectionError(AmbeeError):
"""Ambee connection exception."""
class AmbeeAuthenticationError(AmbeeConnectionError):
"""Ambee authentication exception."""
class AmbeeConnectionTimeoutError(AmbeeConnectionError):
"""Ambee connection Timeout exception."""
|
"""
> Task
Call two arms equally strong if the heaviest weights they each are able to
lift are equal. Call two people equally strong if their strongest arms are
equally strong (the strongest arm can be both the right and the left), and
so are their weakest arms. Given your and your friend's arms' lifting
capabilities find out if you two are equally strong.
>Example
For your_left = 10, your_right = 15, friends_left = 15, and friends_right = 10, the output should be true
For your_left = 15, your_right = 10, friends_left = 15, and friends_right = 10, the output should be true
For your_left = 15, your_right = 10, friends_left = 15, and friends_right = 9, the output should be false
> Input/Output
- execution time limit: 4 seconds (py3)
- input: integer your_left
A non-negative integer representing the heaviest weight you can lift with your left arm.
- guaranteed constraints:
0 ≤ your_left ≤ 20.
- input: integer your_right
A non-negative integer representing the heaviest weight you can lift with your right arm.
- guaranteed constraints:
0 ≤ your_right ≤ 20.
- input: integer friends_left
A non-negative integer representing the heaviest weight your friend can lift with his or her left arm.
- guaranteed constraints:
0 ≤ friends_left ≤ 20.
- input: integer friends_right
A non-negative integer representing the heaviest weight your friend can lift with his or her right arm.
- guaranteed constraints:
0 ≤ friends_right ≤ 20.
- output:
true if you and your friend are equally strong, false otherwise.
"""
def are_equally_strong(your_left, your_right, friends_left, friends_right):
your_max = max(your_left, your_right)
your_min = min(your_left, your_right)
friends_max = max(friends_left, friends_right)
friends_min = min(friends_left, friends_right)
if your_max == friends_max and your_min == friends_min:
return True
else:
return False
if __name__ == '__main__':
# test_n = [your_left, your_right, friends_left, friends_right]
# true
test_1 = [10, 15, 15, 10]
# true
test_2 = [15, 10, 15, 10]
# false
test_3 = [15, 10, 15, 9]
# true
test_4 = [10, 5, 5, 10]
# false
test_5 = [10, 15, 5, 20]
# true
test_6 = [10, 20, 10, 20]
# true
test_7 = [5, 20, 20, 5]
# false
test_8 = [20, 15, 5, 20]
# true
test_9 = [5, 10, 5, 10]
# false
test_10 = [1, 10, 10, 0]
# false
test_11 = [5, 5, 10, 10]
# false
test_12 = [10, 5, 10, 6]
# true
test_13 = [1, 1, 1, 10]
# true
test_14 = [0, 10, 10, 0]
print(are_equally_strong(test_1[0], test_1[1], test_1[2], test_1[3]))
print(are_equally_strong(test_2[0], test_2[1], test_2[2], test_2[3]))
print(are_equally_strong(test_3[0], test_3[1], test_3[2], test_3[3]))
print(are_equally_strong(test_4[0], test_4[1], test_4[2], test_4[3]))
print(are_equally_strong(test_5[0], test_5[1], test_5[2], test_5[3]))
print(are_equally_strong(test_6[0], test_6[1], test_6[2], test_6[3]))
print(are_equally_strong(test_7[0], test_7[1], test_7[2], test_7[3]))
print(are_equally_strong(test_8[0], test_8[1], test_8[2], test_8[3]))
print(are_equally_strong(test_9[0], test_9[1], test_9[2], test_9[3]))
print(are_equally_strong(test_10[0], test_10[1], test_10[2], test_10[3]))
print(are_equally_strong(test_11[0], test_11[1], test_11[2], test_11[3]))
print(are_equally_strong(test_12[0], test_12[1], test_12[2], test_12[3]))
print(are_equally_strong(test_13[0], test_13[1], test_13[2], test_13[3]))
print(are_equally_strong(test_14[0], test_14[1], test_14[2], test_14[3]))
|
# test bignum unary operations
i = 1 << 65
print(bool(i))
print(+i)
print(-i)
print(~i)
|
def zeroes(n, cnt):
if n == 0:
return cnt
elif n % 10 == 0:
return zeroes(n//10, cnt+1)
else:
return zeroes(n//10, cnt)
n = int(input())
print(zeroes(n, 0))
|
req = {
"userId": "admin",
"metadata": {
"@context": [
"https://w3id.org/ro/crate/1.0/context",
{
"@vocab": "https://schema.org/",
"osfcategory": "https://www.research-data-services.org/jsonld/osfcategory",
"zenodocategory": "https://www.research-data-services.org/jsonld/zenodocategory",
},
],
"@graph": [
{
"@id": "ro-crate-metadata.json",
"@type": "CreativeWork",
"about": {"@id": "./"},
"identifier": "ro-crate-metadata.json",
"conformsTo": {"@id": "https://w3id.org/ro/crate/1.0"},
"license": {"@id": "https://creativecommons.org/licenses/by-sa/3.0"},
"description": "Made with Describo: https://uts-eresearch.github.io/describo/",
},
{
"@type": "Dataset",
"datePublished": "2020-09-29T22:00:00.000Z",
"name": ["testtitle"],
"description": ["Beispieltest. Ganz viel\n\nasd mit umbruch"],
"creator": [
{"@id": "#edf6055e-9985-4dfe-9759-8f1aa640d396"},
{"@id": "#ac356e5f-fb71-400e-904e-a473c4fc890d"},
],
"zenodocategory": "publication/thesis",
"osfcategory": "analysis",
"@id": "./",
},
{
"@type": "Person",
"@reverse": {"creator": [{"@id": "./"}]},
"name": "Peter Heiss",
"familyName": "Heiss",
"givenName": "Peter",
"affiliation": [{"@id": "#4bafacfd-e123-44dc-90b9-63f974f85694"}],
"@id": "#edf6055e-9985-4dfe-9759-8f1aa640d396",
},
{
"@type": "Organization",
"name": "WWU",
"@reverse": {
"affiliation": [{"@id": "#edf6055e-9985-4dfe-9759-8f1aa640d396"}]
},
"@id": "#4bafacfd-e123-44dc-90b9-63f974f85694",
},
{
"@type": "Person",
"name": "Jens Stegmann",
"familyName": "Stegmann",
"givenName": "Jens",
"email": "",
"@reverse": {"creator": [{"@id": "./"}]},
"@id": "#ac356e5f-fb71-400e-904e-a473c4fc890d",
},
],
},
}
result = {
"data": {
"type": "nodes",
"attributes": {
"description": "Beispieltest. Ganz viel asd mit umbruch",
"category": "analysis",
"title": "testtitle",
},
}
}
|
class GoogleException(Exception):
def __init__(self, code, message, response):
self.status_code = code
self.error_type = message
self.message = message
self.response = response
self.get_error_type()
def get_error_type(self):
json_response = self.response.json()
if 'error' in json_response and 'errors' in json_response['error']:
self.error_type = json_response['error']['errors'][0]['reason']
|
command = input().lower()
in_progress = True
car_stopped = True
while in_progress:
if command == 'help':
print("start - to start the car")
print("stop - to stop the car")
print("quit - to ext")
elif command == 'start':
if car_stopped:
print("You started the car")
car_stopped = False
else:
print("The car has already been started")
elif command == 'stop':
if not car_stopped:
print("You stopped the car")
car_stopped = True
else:
print("The car is already stopped")
elif command == 'quit':
print("Exiting the program now")
in_progress = False
break
else:
print("That was not a valid command, try again. Enter 'help' for a list of valid commands")
command = input().lower()
|
class Solution:
def __init__(self):
pass
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
#遞回尋找排序後的真實數字
def answer_values(nums, target):
total_nums = len(nums)
index_ans=[]
sorted_nums = sorted(nums)
for i in range(total_nums):
for j in range(i+1, total_nums):
if int(sorted_nums[i])+int(sorted_nums[j]) == target:
num1 = sorted_nums[i]
num2 = sorted_nums[j]
return num1, num2
break
elif int(sorted_nums[i])+int(sorted_nums[j]) > target:
break
else:
pass
def return_index(nums, num1, num2):
ans_list = []
if num1 == num2:
for i , value in enumerate(nums):
if value == num1:
ans_list.append(i)
else:
pass
if len(ans_list) == 2:
break
else:
for i , value in enumerate(nums) :
if value == num1:
ans_list.append(i)
elif value == num2:
ans_list.append(i)
else:
pass
if len(ans_list) == 2:
break
return ans_list
num1, num2 = answer_values(nums, target)
return return_index(nums, num1, num2)
S = Solution()
print(S.twoSum([0,3,0],0))
|
def create_xml_doc(text):
JS("""
try //Internet Explorer
{
var xmlDoc=new ActiveXObject("Microsoft['XMLDOM']");
xmlDoc['async']="false";
xmlDoc['loadXML'](@{{text}});
}
catch(e)
{
try //Firefox, Mozilla, Opera, etc.
{
var parser=new DOMParser();
xmlDoc=parser['parseFromString'](@{{text}},"text/xml");
}
catch(e)
{
return null;
}
}
return xmlDoc;
""")
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2014, Chris Hoffman <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
DOCUMENTATION = r'''
---
module: win_service
short_description: Manage and query Windows services
description:
- Manage and query Windows services.
- For non-Windows targets, use the M(ansible.builtin.service) module instead.
options:
dependencies:
description:
- A list of service dependencies to set for this particular service.
- This should be a list of service names and not the display name of the
service.
- This works by C(dependency_action) to either add/remove or set the
services in this list.
type: list
elements: str
dependency_action:
description:
- Used in conjunction with C(dependency) to either add the dependencies to
the existing service dependencies.
- Remove the dependencies to the existing dependencies.
- Set the dependencies to only the values in the list replacing the
existing dependencies.
type: str
choices: [ add, remove, set ]
default: set
desktop_interact:
description:
- Whether to allow the service user to interact with the desktop.
- This can only be set to C(yes) when using the C(LocalSystem) username.
- This can only be set to C(yes) when the I(service_type) is
C(win32_own_process) or C(win32_share_process).
type: bool
default: no
description:
description:
- The description to set for the service.
type: str
display_name:
description:
- The display name to set for the service.
type: str
error_control:
description:
- The severity of the error and action token if the service fails to start.
- A new service defaults to C(normal).
- C(critical) will log the error and restart the system with the last-known
good configuration. If the startup fails on reboot then the system will
fail to operate.
- C(ignore) ignores the error.
- C(normal) logs the error in the event log but continues.
- C(severe) is like C(critical) but a failure on the last-known good
configuration reboot startup will be ignored.
choices:
- critical
- ignore
- normal
- severe
type: str
failure_actions:
description:
- A list of failure actions the service controller should take on each
failure of a service.
- The service manager will run the actions from first to last defined until
the service starts. If I(failure_reset_period_sec) has been exceeded then
the failure actions will restart from the beginning.
- If all actions have been performed the the service manager will repeat
the last service defined.
- The existing actions will be replaced with the list defined in the task
if there is a mismatch with any of them.
- Set to an empty list to delete all failure actions on a service
otherwise an omitted or null value preserves the existing actions on the
service.
type: list
elements: dict
suboptions:
delay_ms:
description:
- The time to wait, in milliseconds, before performing the specified action.
default: 0
type: raw
aliases:
- delay
type:
description:
- The action to be performed.
- C(none) will perform no action, when used this should only be set as
the last action.
- C(reboot) will reboot the host, when used this should only be set as
the last action as the reboot will reset the action list back to the
beginning.
- C(restart) will restart the service.
- C(run_command) will run the command specified by I(failure_command).
required: yes
type: str
choices:
- none
- reboot
- restart
- run_command
failure_actions_on_non_crash_failure:
description:
- Controls whether failure actions will be performed on non crash failures
or not.
type: bool
failure_command:
description:
- The command to run for a C(run_command) failure action.
- Set to an empty string to remove the command.
type: str
failure_reboot_msg:
description:
- The message to be broadcast to users logged on the host for a C(reboot)
failure action.
- Set to an empty string to remove the message.
type: str
failure_reset_period_sec:
description:
- The time in seconds after which the failure action list begings from the
start if there are no failures.
- To set this value, I(failure_actions) must have at least 1 action
present.
- Specify C('0xFFFFFFFF') to set an infinite reset period.
type: raw
aliases:
- failure_reset_period
force_dependent_services:
description:
- If C(yes), stopping or restarting a service with dependent services will
force the dependent services to stop or restart also.
- If C(no), stopping or restarting a service with dependent services may
fail.
type: bool
default: no
load_order_group:
description:
- The name of the load ordering group of which this service is a member.
- Specify an empty string to remove the existing load order group of a
service.
type: str
name:
description:
- Name of the service.
- If only the name parameter is specified, the module will report
on whether the service exists or not without making any changes.
required: yes
type: str
path:
description:
- The path to the executable to set for the service.
type: str
password:
description:
- The password to set the service to start as.
- This and the C(username) argument should be supplied together when using a local or domain account.
- If omitted then the password will continue to use the existing value password set.
- If specifying C(LocalSystem), C(NetworkService), C(LocalService), the C(NT SERVICE), or a gMSA this field can be
omitted as those accounts have no password.
type: str
pre_shutdown_timeout_ms:
description:
- The time in which the service manager waits after sending a preshutdown
notification to the service until it proceeds to continue with the other
shutdown actions.
aliases:
- pre_shutdown_timeout
type: raw
required_privileges:
description:
- A list of privileges the service must have when starting up.
- When set the service will only have the privileges specified on its
access token.
- The I(username) of the service must already have the privileges assigned.
- The existing privileges will be replace with the list defined in the task
if there is a mismatch with any of them.
- Set to an empty list to remove all required privileges, otherwise an
omitted or null value will keep the existing privileges.
- See L(privilege text constants,https://docs.microsoft.com/en-us/windows/win32/secauthz/privilege-constants)
for a list of privilege constants that can be used.
type: list
elements: str
service_type:
description:
- The type of service.
- The default type of a new service is C(win32_own_process).
- I(desktop_interact) can only be set if the service type is
C(win32_own_process) or C(win32_share_process).
choices:
- user_own_process
- user_share_process
- win32_own_process
- win32_share_process
type: str
sid_info:
description:
- Used to define the behaviour of the service's access token groups.
- C(none) will not add any groups to the token.
- C(restricted) will add the C(NT SERVICE\<service name>) SID to the access
token's groups and restricted groups.
- C(unrestricted) will add the C(NT SERVICE\<service name>) SID to the
access token's groups.
choices:
- none
- restricted
- unrestricted
type: str
start_mode:
description:
- Set the startup type for the service.
- A newly created service will default to C(auto).
type: str
choices: [ auto, delayed, disabled, manual ]
state:
description:
- The desired state of the service.
- C(started)/C(stopped)/C(absent)/C(paused) are idempotent actions that will not run
commands unless necessary.
- C(restarted) will always bounce the service.
- Only services that support the paused state can be paused, you can
check the return value C(can_pause_and_continue).
- You can only pause a service that is already started.
- A newly created service will default to C(stopped).
type: str
choices: [ absent, paused, started, stopped, restarted ]
update_password:
description:
- When set to C(always) and I(password) is set, the module will always report a change and set the password.
- Set to C(on_create) to only set the password if the module needs to create the service.
- If I(username) was specified and the service changed to that username then I(password) will also be changed if
specified.
- The current default is C(on_create) but this behaviour may change in the future, it is best to be explicit here.
choices:
- always
- on_create
type: str
username:
description:
- The username to set the service to start as.
- Can also be set to C(LocalSystem) or C(SYSTEM) to use the SYSTEM account.
- A newly created service will default to C(LocalSystem).
- If using a custom user account, it must have the C(SeServiceLogonRight)
granted to be able to start up. You can use the M(ansible.windows.win_user_right) module
to grant this user right for you.
- Set to C(NT SERVICE\service name) to run as the NT SERVICE account for that service.
- This can also be a gMSA in the form C(DOMAIN\gMSA$).
type: str
notes:
- This module historically returning information about the service in its return values. These should be avoided in
favour of the M(ansible.windows.win_service_info) module.
- Most of the options in this module are non-driver services that you can view in SCManager. While you can edit driver
services, not all functionality may be available.
- The user running the module must have the following access rights on the service to be able to use it with this
module - C(SERVICE_CHANGE_CONFIG), C(SERVICE_ENUMERATE_DEPENDENTS), C(SERVICE_QUERY_CONFIG), C(SERVICE_QUERY_STATUS).
- Changing the state or removing the service will also require futher rights depending on what needs to be done.
seealso:
- module: ansible.builtin.service
- module: community.windows.win_nssm
- module: ansible.windows.win_service_info
- module: ansible.windows.win_user_right
author:
- Chris Hoffman (@chrishoffman)
'''
EXAMPLES = r'''
- name: Restart a service
ansible.windows.win_service:
name: spooler
state: restarted
- name: Set service startup mode to auto and ensure it is started
ansible.windows.win_service:
name: spooler
start_mode: auto
state: started
- name: Pause a service
ansible.windows.win_service:
name: Netlogon
state: paused
- name: Ensure that WinRM is started when the system has settled
ansible.windows.win_service:
name: WinRM
start_mode: delayed
# A new service will also default to the following values:
# - username: LocalSystem
# - state: stopped
# - start_mode: auto
- name: Create a new service
ansible.windows.win_service:
name: service name
path: C:\temp\test.exe
- name: Create a new service with extra details
ansible.windows.win_service:
name: service name
path: C:\temp\test.exe
display_name: Service Name
description: A test service description
- name: Remove a service
ansible.windows.win_service:
name: service name
state: absent
# This is required to be set for non-service accounts that need to run as a service
- name: Grant domain account the SeServiceLogonRight user right
ansible.windows.win_user_right:
name: SeServiceLogonRight
users:
- DOMAIN\User
action: add
- name: Set the log on user to a domain account
ansible.windows.win_service:
name: service name
state: restarted
username: DOMAIN\User
password: Password
- name: Set the log on user to a local account
ansible.windows.win_service:
name: service name
state: restarted
username: .\Administrator
password: Password
- name: Set the log on user to Local System
ansible.windows.win_service:
name: service name
state: restarted
username: SYSTEM
- name: Set the log on user to Local System and allow it to interact with the desktop
ansible.windows.win_service:
name: service name
state: restarted
username: SYSTEM
desktop_interact: yes
- name: Set the log on user to Network Service
ansible.windows.win_service:
name: service name
state: restarted
username: NT AUTHORITY\NetworkService
- name: Set the log on user to Local Service
ansible.windows.win_service:
name: service name
state: restarted
username: NT AUTHORITY\LocalService
- name: Set the log on user as the services' virtual account
ansible.windows.win_service:
name: service name
username: NT SERVICE\service name
- name: Set the log on user as a gMSA
ansible.windows.win_service:
name: service name
username: DOMAIN\gMSA$ # The end $ is important and should be set for all gMSA
- name: Set dependencies to ones only in the list
ansible.windows.win_service:
name: service name
dependencies: [ service1, service2 ]
- name: Add dependencies to existing dependencies
ansible.windows.win_service:
name: service name
dependencies: [ service1, service2 ]
dependency_action: add
- name: Remove dependencies from existing dependencies
ansible.windows.win_service:
name: service name
dependencies:
- service1
- service2
dependency_action: remove
- name: Set required privileges for a service
ansible.windows.win_service:
name: service name
username: NT SERVICE\LocalService
required_privileges:
- SeBackupPrivilege
- SeRestorePrivilege
- name: Remove all required privileges for a service
ansible.windows.win_service:
name: service name
username: NT SERVICE\LocalService
required_privileges: []
- name: Set failure actions for a service with no reset period
ansible.windows.win_service:
name: service name
failure_actions:
- type: restart
- type: run_command
delay_ms: 1000
- type: restart
delay_ms: 5000
- type: reboot
failure_command: C:\Windows\System32\cmd.exe /c mkdir C:\temp
failure_reboot_msg: Restarting host because service name has failed
failure_reset_period_sec: '0xFFFFFFFF'
- name: Set only 1 failure action without a repeat of the last action
ansible.windows.win_service:
name: service name
failure_actions:
- type: restart
delay_ms: 5000
- type: none
- name: Remove failure action information
ansible.windows.win_service:
name: service name
failure_actions: []
failure_command: '' # removes the existing command
failure_reboot_msg: '' # removes the existing reboot msg
'''
RETURN = r'''
exists:
description: Whether the service exists or not.
returned: success
type: bool
sample: true
name:
description: The service name or id of the service.
returned: success and service exists
type: str
sample: CoreMessagingRegistrar
display_name:
description: The display name of the installed service.
returned: success and service exists
type: str
sample: CoreMessaging
state:
description: The current running status of the service.
returned: success and service exists
type: str
sample: stopped
start_mode:
description: The startup type of the service.
returned: success and service exists
type: str
sample: manual
path:
description: The path to the service executable.
returned: success and service exists
type: str
sample: C:\Windows\system32\svchost.exe -k LocalServiceNoNetwork
can_pause_and_continue:
description: Whether the service can be paused and unpaused.
returned: success and service exists
type: bool
sample: true
description:
description: The description of the service.
returned: success and service exists
type: str
sample: Manages communication between system components.
username:
description: The username that runs the service.
returned: success and service exists
type: str
sample: LocalSystem
desktop_interact:
description: Whether the current user is allowed to interact with the desktop.
returned: success and service exists
type: bool
sample: false
dependencies:
description: A list of services that is depended by this service.
returned: success and service exists
type: list
sample: false
depended_by:
description: A list of services that depend on this service.
returned: success and service exists
type: list
sample: false
'''
|
class Solution:
"""
@param candidates: A list of integers
@param target: An integer
@return: A list of lists of integers
"""
def combinationSum(self, candidates, target):
# write your code here
if not candidates or len(candidates) == 0:
return [[]]
candidates.sort()
self.results = []
self.target = target
visited = [False for _ in range(len(candidates))]
self._find_combination_sum(candidates, [], visited, 0)
return self.results
def _find_combination_sum(self, candidates, current_combination, visited, start):
if sum(current_combination) == self.target:
current_combination.sort()
self.results.append(current_combination[:])
return
for i in range(start, len(candidates)):
if sum(current_combination) + candidates[i] > self.target:
break
if i > 0 and candidates[i - 1] == candidates[i] and not visited[i - 1]:
continue
current_combination.append(candidates[i])
visited[i] = True
self._find_combination_sum(candidates, current_combination, visited, i)
current_combination.pop()
visited[i] = False |
# test exception matching against a tuple
try:
fail
except (Exception,):
print('except 1')
try:
fail
except (Exception, Exception):
print('except 2')
try:
fail
except (TypeError, NameError):
print('except 3')
try:
fail
except (TypeError, ValueError, Exception):
print('except 4')
|
def printMaximum(num):
d = {}
for i in range(10):
d[i] = 0
for i in str(num):
d[int(i)] += 1
res = 0
m = 1
for i in list(d.keys()):
while d[i] > 0:
res = res + i*m
d[i] -= 1
m *= 10
return res
# Driver code
num = 38293367
print(printMaximum(num)) |
NUMERIC = "numeric"
CATEGORICAL = "categorical"
TEST_MODEL = "test"
SINGLE_MODEL = "single"
MODEL_SEARCH = "search"
SHUTDOWN = "shutdown"
DEFAULT_PORT = 8042
DEFAULT_MAX_JOBS = 4
ERROR = "error"
QUEUED = "queued"
STARTED = "started"
IN_PROGRESS = "in-progress"
FINISHED = "finished"
# This can be any x where np.exp(x) + 1 == np.exp(x) Going up to 512
# isn't strictly necessary, but hey, why not?
LARGE_EXP = 512
EPSILON = 1e-4
# Parameters that can appear in the layers of models
MATRIX_PARAMS = [
'weights'
]
VEC_PARAMS = [
'mean',
'variance',
'offset',
'scale',
'stdev'
]
# Model search parameters
VALIDATION_FRAC = 0.15
MAX_VALIDATION_ROWS = 4096
LEARN_INCREMENT = 8
MAX_QUEUE = LEARN_INCREMENT * 4
N_CANDIDATES = MAX_QUEUE * 64
|
class TopTen:
def __init__(self):
self.num = 1
def __iter__(self):
return self
def __next__(self):
if self.num <= 10:
val = self.num
self.num += 1
return val
else:
raise StopIteration
values = TopTen()
print(next(values))
for i in values:
print(i)
|
class cached_property:
def __init__(self, func):
self.__doc__ = getattr(func, "__doc__")
self.func = func
def __get__(self, obj, cls):
if obj is None:
return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
|
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
def tf_pack_ext(pb):
assert (pb.attr["N"].i == len(pb.input))
return {
'axis': pb.attr["axis"].i,
'N': pb.attr["N"].i,
'infer': None
}
|
class ZoomAdminAccount(object):
""" Model to hold Zoom Admin Account info """
def __init__(self, api_key, api_secret):
self.api_key = api_key
self.api_secret = api_secret
|
#!/usr/bin/python3
ls = [l.strip().split(" ") for l in open("inputs/08.in", "r").readlines()]
def run(sw):
acc,p,ps = 0,0,[]
while p < len(ls):
if p in ps: return acc if sw == -1 else -1
ps.append(p)
acc += int(ls[p][1]) if ls[p][0] == "acc" else 0
p += int(ls[p][1]) if (ls[p][0]=="jmp" and sw!=p) or (ls[p][0]=="nop" and sw==p) else 1
return acc
def brute():
for i,l in enumerate(ls):
if l[0] == "acc": continue
ans = run(i)
if ans != -1: return ans
print("Part 1:", run(-1))
print("Part 2:", brute()) |
counter = 0
def merge(array, left, right):
i = j = k = 0
global counter
while i < len(left) and j < len(right):
if left[i] <= right[j]:
array[k] = left[i]
k += 1
i += 1
else:
array[k] = right[j]
counter += len(left) - i
k += 1
j += 1
if len(left) > i:
array[k:] = left[i:]
elif len(right) > j:
array[k:] = right[j:]
def mergesort(array):
if len(array) > 1:
mid = len(array) // 2
left = array[:mid]
right = array[mid:]
mergesort(left)
mergesort(right)
merge(array, left, right)
return array
fin = open("inversions.in")
fout = open("inversions.out", "w")
n = int(fin.readline())
array = list(map(int, fin.readline().split()))
mergesort(array)
print(counter, file=fout)
fin.close()
fout.close()
|
# Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
"""
This file contains C++ code needed to export one dimensional static arrays.
"""
namespace = "pyplusplus::convenience"
file_name = "__convenience.pypp.hpp"
code = \
"""// Copyright 2004-2008 Roman Yakovenko.
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef __convenience_pyplusplus_hpp__
#define __convenience_pyplusplus_hpp__
#include "boost/python.hpp"
namespace pyplusplus{ namespace convenience{
//TODO: Replace index_type with Boost.Python defined ssize_t type.
// This should be done by checking Python and Boost.Python version.
typedef int index_type;
inline void
raise_error( PyObject *exception, const char *message ){
PyErr_SetString(exception, message);
boost::python::throw_error_already_set();
}
inline index_type sequence_len(boost::python::object const& obj){
if( !PySequence_Check( obj.ptr() ) ){
raise_error( PyExc_TypeError, "Sequence expected" );
}
index_type result = PyObject_Length( obj.ptr() );
if( PyErr_Occurred() ){
boost::python::throw_error_already_set();
}
return result;
}
inline void
ensure_sequence( boost::python::object seq, index_type expected_length=-1 ){
index_type length = sequence_len( seq );
if( expected_length != -1 && length != expected_length ){
std::stringstream err;
err << "Expected sequence length is " << expected_length << ". "
<< "Actual sequence length is " << length << ".";
raise_error( PyExc_ValueError, err.str().c_str() );
}
}
template< class ExpectedType >
void ensure_uniform_sequence( boost::python::object seq, index_type expected_length=-1 ){
ensure_sequence( seq, expected_length );
index_type length = sequence_len( seq );
for( index_type index = 0; index < length; ++index ){
boost::python::object item = seq[index];
boost::python::extract<ExpectedType> type_checker( item );
if( !type_checker.check() ){
std::string expected_type_name( boost::python::type_id<ExpectedType>().name() );
std::string item_type_name("different");
PyObject* item_impl = item.ptr();
if( item_impl && item_impl->ob_type && item_impl->ob_type->tp_name ){
item_type_name = std::string( item_impl->ob_type->tp_name );
}
std::stringstream err;
err << "Sequence should contain only items with type \\"" << expected_type_name << "\\". "
<< "Item at position " << index << " has \\"" << item_type_name << "\\" type.";
raise_error( PyExc_ValueError, err.str().c_str() );
}
}
}
template< class Iterator, class Inserter >
void copy_container( Iterator begin, Iterator end, Inserter inserter ){
for( Iterator index = begin; index != end; ++index )
inserter( *index );
}
template< class Inserter >
void copy_sequence( boost::python::object const& seq, Inserter inserter ){
index_type length = sequence_len( seq );
for( index_type index = 0; index < length; ++index ){
inserter = seq[index];
}
}
template< class Inserter, class TItemType >
void copy_sequence( boost::python::object const& seq, Inserter inserter, boost::type< TItemType > ){
index_type length = sequence_len( seq );
for( index_type index = 0; index < length; ++index ){
boost::python::object item = seq[index];
inserter = boost::python::extract< TItemType >( item );
}
}
struct list_inserter{
list_inserter( boost::python::list& py_list )
: m_py_list( py_list )
{}
template< class T >
void operator()( T const & value ){
m_py_list.append( value );
}
private:
boost::python::list& m_py_list;
};
template < class T >
struct array_inserter_t{
array_inserter_t( T* array, index_type size )
: m_array( array )
, m_curr_pos( 0 )
, m_size( size )
{}
void insert( const T& item ){
if( m_size <= m_curr_pos ){
std::stringstream err;
err << "Index out of range. Array size is" << m_size << ", "
<< "current position is" << m_curr_pos << ".";
raise_error( PyExc_ValueError, err.str().c_str() );
}
m_array[ m_curr_pos ] = item;
m_curr_pos += 1;
}
array_inserter_t<T>&
operator=( boost::python::object const & item ){
insert( boost::python::extract< T >( item ) );
return *this;
}
private:
T* m_array;
index_type m_curr_pos;
const index_type m_size;
};
template< class T>
array_inserter_t<T> array_inserter( T* array, index_type size ){
return array_inserter_t<T>( array, size );
}
inline boost::python::object
get_out_argument( boost::python::object result, const char* arg_name ){
if( !PySequence_Check( result.ptr() ) ){
return result;
}
boost::python::object cls = boost::python::getattr( result, "__class__" );
boost::python::object cls_name = boost::python::getattr( cls, "__name__" );
std::string name = boost::python::extract< std::string >( cls_name );
if( "named_tuple" == name ){
return boost::python::getattr( result, arg_name );
}
else{
return result;
}
}
inline boost::python::object
get_out_argument( boost::python::object result, index_type index ){
if( !PySequence_Check( result.ptr() ) ){
return result;
}
boost::python::object cls = boost::python::getattr( result, "__class__" );
boost::python::object cls_name = boost::python::getattr( cls, "__name__" );
std::string name = boost::python::extract< std::string >( cls_name );
if( "named_tuple" == name ){
return result[ index ];
}
else{
return result;
}
}
} /*pyplusplus*/ } /*convenience*/
namespace pyplus_conv = pyplusplus::convenience;
#endif//__convenience_pyplusplus_hpp__
"""
|
"""
This module contains the error messages issued by the Cerberus Validator.
The test suite uses this module as well.
"""
ERROR_SCHEMA_MISSING = "validation schema missing"
ERROR_SCHEMA_FORMAT = "'%s' is not a schema, must be a dict"
ERROR_DOCUMENT_MISSING = "document is missing"
ERROR_DOCUMENT_FORMAT = "'%s' is not a document, must be a dict"
ERROR_UNKNOWN_RULE = "unknown rule '%s' for field '%s'"
ERROR_DEFINITION_FORMAT = "schema definition for field '%s' must be a dict"
ERROR_UNKNOWN_FIELD = "unknown field"
ERROR_REQUIRED_FIELD = "required field"
ERROR_UNKNOWN_TYPE = "unrecognized data-type '%s'"
ERROR_BAD_TYPE = "must be of %s type"
ERROR_MIN_LENGTH = "min length is %d"
ERROR_MAX_LENGTH = "max length is %d"
ERROR_UNALLOWED_VALUES = "unallowed values %s"
ERROR_UNALLOWED_VALUE = "unallowed value %s"
ERROR_ITEMS_LIST = "length of list should be %d"
ERROR_READONLY_FIELD = "field is read-only"
ERROR_MAX_VALUE = "max value is %d"
ERROR_MIN_VALUE = "min value is %d"
ERROR_EMPTY_NOT_ALLOWED = "empty values not allowed"
ERROR_NOT_NULLABLE = "null value not allowed"
ERROR_REGEX = "value does not match regex '%s'"
ERROR_DEPENDENCIES_FIELD = "field '%s' is required"
|
# -*- encoding: latin-1 -*-
# Latin-1 encoding needed for countries list.
"""Place names and other constants often used in web forms.
"""
def uk_counties():
"""\
Return a list of UK county names.
"""
# Based on http://www.gbet.com/AtoZ_counties/
# Updated 2007-10-24
return [x.strip()[2:] for x in """\
* Avon
* Bedfordshire
* Berkshire
* Borders
* Buckinghamshire
* Cambridgeshire
* Central
* Cheshire
* Cleveland
* Clwyd
* Cornwall
* County Antrim
* County Armagh
* County Down
* County Fermanagh
* County Londonderry
* County Tyrone
* Cumbria
* Derbyshire
* Devon
* Dorset
* Dumfries and Galloway
* Durham
* Dyfed
* East Sussex
* Essex
* Fife
* Gloucestershire
* Grampian
* Greater Manchester
* Gwent
* Gwynedd County
* Hampshire
* Herefordshire
* Hertfordshire
* Highlands and Islands
* Humberside
* Isle of Wight
* Kent
* Lancashire
* Leicestershire
* Lincolnshire
* Lothian
* Merseyside
* Mid Glamorgan
* Norfolk
* North Yorkshire
* Northamptonshire
* Northumberland
* Nottinghamshire
* Oxfordshire
* Powys
* Rutland
* Shropshire
* Somerset
* South Glamorgan
* South Yorkshire
* Staffordshire
* Strathclyde
* Suffolk
* Surrey
* Tayside
* Tyne and Wear
* Warwickshire
* West Glamorgan
* West Midlands
* West Sussex
* West Yorkshire
* Wiltshire
* Worcestershire""".split('\n')]
_country_codes = None
def country_codes():
"""Return a list of all country names as tuples. The tuple value is the
country's 2-letter ISO code and its name; e.g.,
``("GB", "United Kingdom")``. The countries are in name order.
Can be used like this::
import webhelpers.constants as constants
from webhelpers.html.tags import select
select("country", country_codes(),
prompt="Please choose a country ...")
See here for more information:
http://www.iso.org/iso/english_country_names_and_code_elements
"""
# Updated on 2007-10-24.
#
# This might seem a funny implementation but it makes it easier to update
# next time there is a change
global _country_codes
if _country_codes is not None:
return _country_codes
else:
text_directly_from_iso_website = """
A
AFGHANISTAN AF
ÅLAND ISLANDS AX
ALBANIA AL
ALGERIA DZ
AMERICAN SAMOA AS
ANDORRA AD
ANGOLA AO
ANGUILLA AI
ANTARCTICA AQ
ANTIGUA AND BARBUDA AG
ARGENTINA AR
ARMENIA AM
ARUBA AW
AUSTRALIA AU
AUSTRIA AT
AZERBAIJAN AZ
B
BAHAMAS BS
BAHRAIN BH
BANGLADESH BD
BARBADOS BB
BELARUS BY
BELGIUM BE
BELIZE BZ
BENIN BJ
BERMUDA BM
BHUTAN BT
BOLIVIA BO
BOSNIA AND HERZEGOVINA BA
BOTSWANA BW
BOUVET ISLAND BV
BRAZIL BR
BRITISH INDIAN OCEAN TERRITORY IO
BRUNEI DARUSSALAM BN
BULGARIA BG
BURKINA FASO BF
BURUNDI BI
C
CAMBODIA KH
CAMEROON CM
CANADA CA
CAPE VERDE CV
CAYMAN ISLANDS KY
CENTRAL AFRICAN REPUBLIC CF
CHAD TD
CHILE CL
CHINA CN
CHRISTMAS ISLAND CX
COCOS (KEELING) ISLANDS CC
COLOMBIA CO
COMOROS KM
CONGO CG
CONGO, THE DEMOCRATIC REPUBLIC OF THE CD
COOK ISLANDS CK
COSTA RICA CR
CÔTE D'IVOIRE CI
CROATIA HR
CUBA CU
CYPRUS CY
CZECH REPUBLIC CZ
D
DENMARK DK
DJIBOUTI DJ
DOMINICA DM
DOMINICAN REPUBLIC DO
E
ECUADOR EC
EGYPT EG
EL SALVADOR SV
EQUATORIAL GUINEA GQ
ERITREA ER
ESTONIA EE
ETHIOPIA ET
F
FALKLAND ISLANDS (MALVINAS) FK
FAROE ISLANDS FO
FIJI FJ
FINLAND FI
FRANCE FR
FRENCH GUIANA GF
FRENCH POLYNESIA PF
FRENCH SOUTHERN TERRITORIES TF
G
GABON GA
GAMBIA GM
GEORGIA GE
GERMANY DE
GHANA GH
GIBRALTAR GI
GREECE GR
GREENLAND GL
GRENADA GD
GUADELOUPE GP
GUAM GU
GUATEMALA GT
GUERNSEY GG
GUINEA GN
GUINEA-BISSAU GW
GUYANA GY
H
HAITI HT
HEARD ISLAND AND MCDONALD ISLANDS HM
HOLY SEE (VATICAN CITY STATE) VA
HONDURAS HN
HONG KONG HK
HUNGARY HU
I
ICELAND IS
INDIA IN
INDONESIA ID
IRAN, ISLAMIC REPUBLIC OF IR
IRAQ IQ
IRELAND IE
ISLE OF MAN IM
ISRAEL IL
ITALY IT
J
JAMAICA JM
JAPAN JP
JERSEY JE
JORDAN JO
K
KAZAKHSTAN KZ
KENYA KE
KIRIBATI KI
KOREA, DEMOCRATIC PEOPLE'S REPUBLIC OF KP
KOREA, REPUBLIC OF KR
KUWAIT KW
KYRGYZSTAN KG
L
LAO PEOPLE'S DEMOCRATIC REPUBLIC LA
LATVIA LV
LEBANON LB
LESOTHO LS
LIBERIA LR
LIBYAN ARAB JAMAHIRIYA LY
LIECHTENSTEIN LI
LITHUANIA LT
LUXEMBOURG LU
M
MACAO MO
MACEDONIA, THE FORMER YUGOSLAV REPUBLIC OF MK
MADAGASCAR MG
MALAWI MW
MALAYSIA MY
MALDIVES MV
MALI ML
MALTA MT
MARSHALL ISLANDS MH
MARTINIQUE MQ
MAURITANIA MR
MAURITIUS MU
MAYOTTE YT
MEXICO MX
MICRONESIA, FEDERATED STATES OF FM
MOLDOVA, REPUBLIC OF MD
MONACO MC
MONGOLIA MN
MONTENEGRO ME
MONTSERRAT MS
MOROCCO MA
MOZAMBIQUE MZ
MYANMAR MM
N
NAMIBIA NA
NAURU NR
NEPAL NP
NETHERLANDS NL
NETHERLANDS ANTILLES AN
NEW CALEDONIA NC
NEW ZEALAND NZ
NICARAGUA NI
NIGER NE
NIGERIA NG
NIUE NU
NORFOLK ISLAND NF
NORTHERN MARIANA ISLANDS MP
NORWAY NO
O
OMAN OM
P
PAKISTAN PK
PALAU PW
PALESTINIAN TERRITORY, OCCUPIED PS
PANAMA PA
PAPUA NEW GUINEA PG
PARAGUAY PY
PERU PE
PHILIPPINES PH
PITCAIRN PN
POLAND PL
PORTUGAL PT
PUERTO RICO PR
Q
QATAR QA
R
RÉUNION RE
ROMANIA RO
RUSSIAN FEDERATION RU
RWANDA RW
S
SAINT BARTHÉLEMY BL
SAINT HELENA SH
SAINT KITTS AND NEVIS KN
SAINT LUCIA LC
SAINT MARTIN MF
SAINT PIERRE AND MIQUELON PM
SAINT VINCENT AND THE GRENADINES VC
SAMOA WS
SAN MARINO SM
SAO TOME AND PRINCIPE ST
SAUDI ARABIA SA
SENEGAL SN
SERBIA RS
SEYCHELLES SC
SIERRA LEONE SL
SINGAPORE SG
SLOVAKIA SK
SLOVENIA SI
SOLOMON ISLANDS SB
SOMALIA SO
SOUTH AFRICA ZA
SOUTH GEORGIA AND THE SOUTH SANDWICH ISLANDS GS
SPAIN ES
SRI LANKA LK
SUDAN SD
SURINAME SR
SVALBARD AND JAN MAYEN SJ
SWAZILAND SZ
SWEDEN SE
SWITZERLAND CH
SYRIAN ARAB REPUBLIC SY
T
TAIWAN, PROVINCE OF CHINA TW
TAJIKISTAN TJ
TANZANIA, UNITED REPUBLIC OF TZ
THAILAND TH
TIMOR-LESTE TL
TOGO TG
TOKELAU TK
TONGA TO
TRINIDAD AND TOBAGO TT
TUNISIA TN
TURKEY TR
TURKMENISTAN TM
TURKS AND CAICOS ISLANDS TC
TUVALU TV
U
UGANDA UG
UKRAINE UA
UNITED ARAB EMIRATES AE
UNITED KINGDOM GB
UNITED STATES US
UNITED STATES MINOR OUTLYING ISLANDS UM
URUGUAY UY
UZBEKISTAN UZ
V
VANUATU VU
VATICAN CITY STATE see HOLY SEE
VENEZUELA VE
VIET NAM VN
VIRGIN ISLANDS, BRITISH VG
VIRGIN ISLANDS, U.S. VI
W
WALLIS AND FUTUNA WF
WESTERN SAHARA EH
Y
YEMEN YE
Z
ZAIRE see CONGO, THE DEMOCRATIC REPUBLIC OF THE
ZAMBIA ZM
ZIMBABWE ZW
""".replace('\t',' ').split('\n')
e = []
for item in text_directly_from_iso_website:
if len(item) > 1:
p=[]
parts = item.split(' ')
for part in parts:
if part.strip():
p.append(part.strip())
if len(p)>2:
raise Exception("Invalid entry %s" % p)
p.reverse()
if len(p) == 1:
# It is just a letter
continue
if len(p[0]) != 2:
if p[0][:3] != 'see':
raise Exception('Unknown entry %s'%(p))
else:
# We just want to ignore it
continue
p = tuple(p)
e.append(p)
_country_codes = e
return _country_codes
def us_states():
"""List of USA states.
Return a list of ``(abbreviation, name)`` for all US states, sorted by name.
Includes the District of Columbia.
"""
# From http://www.usps.com/ncsc/lookups/abbreviations.html
#Updated 2008-05-01
return [
("AL", "Alabama"),
("AK", "Alaska"),
("AZ", "Arizona"),
("AR", "Arkansas"),
("CA", "California"),
("CO", "Colorado"),
("CT", "Connecticut"),
("DE", "Delaware"),
("DC", "District of Columbia"),
("FL", "Florida"),
("GA", "Georgia"),
("HI", "Hawaii"),
("ID", "Idaho"),
("IL", "Illinois"),
("IN", "Indiana"),
("IA", "Iowa"),
("KS", "Kansas"),
("KY", "Kentucky"),
("LA", "Louisiana"),
("ME", "Maine"),
("MD", "Maryland"),
("MA", "Massachusetts"),
("MI", "Michigan"),
("MN", "Minnesota"),
("MS", "Mississippi"),
("MO", "Missouri"),
("MT", "Montana"),
("NE", "Nebraska"),
("NV", "Nevada"),
("NH", "New Hampshire"),
("NJ", "New Jersey"),
("NM", "New Mexico"),
("NY", "New York"),
("NC", "North Carolina"),
("ND", "North Dakota"),
("OH", "Ohio"),
("OK", "Oklahoma"),
("OR", "Oregon"),
("PA", "Pennsylvania"),
("RI", "Rhode Island"),
("SC", "South Carolina"),
("SD", "South Dakota"),
("TN", "Tennessee"),
("TX", "Texas"),
("UT", "Utah"),
("VT", "Vermont"),
("VA", "Virginia"),
("WA", "Washington"),
("WV", "West Virginia"),
("WI", "Wisconsin"),
("WY", "Wyoming"),
]
def us_territories():
"""USA postal abbreviations for territories, protectorates, and military.
The return value is a list of ``(abbreviation, name)`` tuples. The
locations are sorted by name.
"""
# From http://www.usps.com/ncsc/lookups/abbreviations.html
# Updated 2008-05-01
return [
("AS", "American Samoa"),
("AA", "Armed Forces Americas"),
("AE", "Armed Forces Europe/Canada/Middle East/Africa"),
("AP", "Armed Forces Pacific"),
("FM", "Federated States of Micronesia"),
("GU", "Guam"),
("MH", "Marshall Islands"),
("MP", "Northern Mariana Islands"),
("PW", "Palau"),
("PR", "Puerto Rico"),
("VI", "Virgin Islands"),
]
def canada_provinces():
"""List of Canadian provinces.
Return a list of ``(abbreviation, name)`` tuples for all Canadian
provinces and territories, sorted by name.
"""
# Based on:
# http://en.wikipedia.org/wiki/Canadian_subnational_postal_abbreviations
# See also:
# http://en.wikipedia.org/wiki/Provinces_and_territories_of_Canada
# Updated 2008-05-01
provinces = [
("Alberta", "AB"),
("British Columbia", "BC"),
("Manitoba", "MB"),
("New Brunswick", "NB"),
("Newfoundland and Labrador", "NL"),
("Nova Scotia", "NS"),
("Northwest Territories", "NT"),
("Nunavut", "NU"),
("Ontario", "ON"),
("Prince Edward Island", "PE"),
("Quebec", "QC"),
("Saskatchewan", "SK"),
("Yukon", "YT"),
]
provinces.sort()
return [(x[1], x[0]) for x in provinces]
|
numeros = [[], []]
temp = []
for c in range(1, 8):
n = int(input(f'Digite o {c}º numero: '))
if n % 2 == 0:
numeros[0].append(n)
else:
numeros[1].append(n)
print('*^^*'*12)
print(f'Os valores pares digitados foram {sorted(numeros[0])}')
print(f'Os valores impares digitados foram {sorted(numeros[1])}')
print('*^^*'*12)
|
class ExtDefines(object):
EDEFINE1 = 'ED1'
EDEFINE2 = 'ED2'
EDEFINE3 = 'ED3'
EDEFINES = (
(EDEFINE1, 'EDefine 1'),
(EDEFINE2, 'EDefine 2'),
(EDEFINE3, 'EDefine 3'),
)
class EmptyDefines(object):
"""
This should not show up when a module is dumped!
"""
pass
|
# n = n
# time = O(logn)
# space = O(1)
# done time = 5m
class Solution:
def firstBadVersion(self, n):
"""
:type n: int
:rtype: int
"""
left = 1
right = n + 1
while left < right:
mid = left + right >> 1
comp = isBadVersion(mid)
if comp:
right = mid
else:
left = mid + 1
return right
|
def fib(n: int) -> int:
if n < 2: # base case
return n
return fib(n - 2) + fib(n - 1)
if __name__ == "__main__":
print(fib(2))
print(fib(10))
|
#!/usr/bin/env python3
class ApiDefaults:
url_verify = "/api/verify_api_key"
url_refresh = "/api/import/refresh"
url_add = "/api/import/add"
|
"""
entradas
nbilletes50-->int-->n1
nbilletes20-->int-->n2
nbilletes10-->int-->n3
nbilletes5-->int-->n4
nbilletes2-->int-->n5
nbilletes1-->int-->n6
nbilletes500-->int-->n7
nbilletes100-->int-->n8
salidas
total_dinero-->str-->td
"""
n1=(int(input("digite la cantidad de billetes de $50000 ")))
n2=(int(input("digite la cantidad de billetes de $20000 ")))
n3=(int(input("digite la cantidad de billetes de $10000 ")))
n4=(int(input("digite la cantidad de billetes de $5000 ")))
n5=(int(input("digite la cantidad de billetes de $2000 ")))
n6=(int(input("digite la cantidad de billetes de $1000 ")))
n7=(int(input("digite la cantidad de billetes de $500 ")))
n8=(int(input("digite la cantidad de billetes de $100 ")))
td=(n1*50000)+(n2*20000)+(n3*10000)+(n4*5000)+(n5*2000)+(n6*1000)+(n7*500)+(n8*100)
print("el total de dinero es $" + str (td)) |
'''Desenvolva um programa que pergunte a distância de uma viagem em Km. Calcule o preço da passagem,
cobrando R$0,50 por Km para viagens de até 200Km e R$0,45 para viagens mais longas.'''
# Meu
num = float(input('Qual a distância da viagem: '))
if num <= 200:
print('O valor da passagem é \033[1;33mR${:.2f}\033[m.'.format(num * 0.5))
else:
print('O valor da passagem é \033[1;33mR${:.2f}\033[m.'.format(num * 0.45))
# Gustavo Guanabara
distância = float(input('Qual é a distância da sua viagem? '))
print('Você está prestes a começar uma viagem em {}Km.'.format(distância))
preço = distância * 0.50 if distância <= 200 else distância * 0.45
print('E o preço da sua passagem será de R${:.2f}'.format(preço)) |
def display_best_portfolio(portfolio):
print(f"\nLe meilleur portefeuille trouvé : \n \nComposition: \n \n{portfolio} \nPour un prix total de {portfolio.price} \nPour un profit total de {portfolio.profit}")
def display_best_branch(branch):
composition = ""
for action in branch.composition:
composition += f"{action} \n"
print(f"\nLe meilleur portefeuille trouvé : \n \nComposition: \n \n{composition} \nPour un prix total de {branch.price} \nPour un profit total de {branch.net_profit}")
|
class Solution(object):
def islandPerimeter(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
sum = 0
for w in range(len(grid)):
for h in range(len(grid[0])):
if grid[w][h] == 1: add_length = 4
else: continue
if w != 0 and grid[w - 1][h] == 1: add_length -= 2
if h != 0 and grid[w][h - 1] == 1: add_length -= 2
sum += add_length
return sum
class Solution2:
def islandPerimeter(self, grid: List[List[int]]) -> int:
total = 0
direct = [(0, 1), (0, -1), (1, 0), (-1, 0)]
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j] == 1:
cur = 4
for dx, dy in direct:
new_x = i + dx
new_y = j + dy
if 0 <= new_x < len(grid) and 0 <= new_y < len(grid[0]):
if grid[new_x][new_y] == 1:
cur -= 1
total += cur
return total
|
"""Exceptions raised by this package."""
class MetaloaderError(Exception):
"""Base exception for all errors within this package."""
class MetaloaderNotImplemented(MetaloaderError):
"""Something is yet not implemented in the library."""
|
words = "sort the inner content in descending order"
result = []
for w in words.split():
if len(w)>3:
result.append(w[0]+''.join(sorted(w[1:-1], reverse=True))+w[-1])
else:
result.append(w)
|
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def __init__(self):
self.total = 0
def sumOfLeftLeaves(self, root: TreeNode) -> int:
if not root:
return 0
def dfs(node, type):
if node is None:
return
if node.left is None and node.right is None and type == 1:
self.total += node.val
dfs(node.left, 1)
dfs(node.right, 2)
dfs(root.left, 1)
dfs(root.right, 2)
return self.total
t1 = TreeNode(2)
t1.left = TreeNode(3)
slu = Solution()
print(slu.sumOfLeftLeaves(t1))
|
"""
As Organizações Tabajara resolveram dar um aumento de salário aos seus colaboradores e lhe contraram para desenvolver o programa que calculará os reajustes.
Faça um programa que recebe o salário de um colaborador e o reajuste segundo o seguinte critério, baseado no salário atual:
salários até R$ 280,00 (incluindo) : aumento de 20%
salários entre R$ 280,00 e R$ 700,00 : aumento de 15%
salários entre R$ 700,00 e R$ 1500,00 : aumento de 10%
salários de R$ 1500,00 em diante : aumento de 5% Após o aumento ser realizado, informe na tela:
o salário antes do reajuste;
o percentual de aumento aplicado;
o valor do aumento;
o novo salário, após o aumento.
"""
salario = float(input("Informe o salário do colaborador: "))
if salario > 0:
if salario <= 280:
percentual = 0.2
elif salario > 280 and salario < 700:
percentual = 0.15
elif salario >=700 and salario < 1500:
percentual = 0.1
elif salario >= 1500:
percentual = 0.05
aumento = salario*percentual
novo_salario = salario+aumento
percentual_formatado = str(percentual*100)+"%"
print(f"Salário antigo: {salario}\nPercentual de aumento: {percentual_formatado}")
print(f"Valor do aumento: {aumento}\nNovo Salário: {novo_salario}")
else:
print("Valor inválido!")
|
val = list()
for i in range(5):
val.append(int(input('Entre um inteiro: ')))
print('\nMaior valor:', max(val))
print('Posições:', end=' ')
for i in range(5):
if max(val) == val[i]: print(i, end=' ')
print('\n\nMenor valor:', min(val))
print('Posições:', end=' ')
for i in range(5):
if min(val) == val[i]: print(i, end=' ')
print()
input()
|
class CalcularMedia:
def total(self, a, b, c):
p = a + b + c
return p
def calcular_media(a, b, c):
p = a + b + c
return p
CalcularMedia = CalcularMedia()
print('qtd numeros')
n = int(input())
nums = []
for i in range(n):
print('Digite o {} número'.format(i + 1))
v = int(input())
nums.append(v)
while True:
if v == 0:
break
print("informe o valor 1: ")
a = int(input())
print("informe o valor 2: ")
b = int(input())
print('informe o valor 3')
c = int(input())
x = CalcularMedia.total(a, b, c)
x = calcular_media(a, b, c)
if x <= 0:
print('Informe os dados para calcular a media')
if x > 0:
media = x / 3
print(media) |
language_map = {
'ko': 'ko_KR',
'ja': 'ja_JP',
'zh': 'zh_CN'
}
|
# Time: O(n)
# Space: O(1)
# Given an array nums of integers, you can perform operations on the array.
#
# In each operation, you pick any nums[i] and delete it to earn nums[i] points.
# After, you must delete every element equal to nums[i] - 1 or nums[i] + 1.
#
# You start with 0 points.
# Return the maximum number of points you can earn by applying such operations.
#
# Example 1:
# Input: nums = [3, 4, 2]
# Output: 6
# Explanation:
# Delete 4 to earn 4 points, consequently 3 is also deleted.
# Then, delete 2 to earn 2 points. 6 total points are earned.
#
# Example 2:
# Input: nums = [2, 2, 3, 3, 3, 4]
# Output: 9
# Explanation:
# Delete 3 to earn 3 points, deleting both 2's and the 4.
# Then, delete 3 again to earn 3 points, and 3 again to earn 3 points.
# 9 total points are earned.
#
# Note:
# - The length of nums is at most 20000.
# - Each element nums[i] is an integer in the range [1, 10000].
class Solution(object):
def deleteAndEarn(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
vals = [0] * 10001
for num in nums:
vals[num] += num
val_i, val_i_1 = vals[0], 0
for i in xrange(1, len(vals)):
val_i_1, val_i_2 = val_i, val_i_1
val_i = max(vals[i] + val_i_2, val_i_1)
return val_i
|
global numbering
numbering = [0,1,2,3]
num = 0
filterbegin = '''res(); for (i=0, o=0; i<115; i++){'''
o = ['MSI', 'APPV', 'Citrix MSI', 'Normal','Express','Super Express (BOPO)', 'Simple', 'Medium', 'Complex', 'May', 'June', 'July', 'August']
filterending = '''if (val1 == eq1){ if (val2 == eq2){ if (val3 == eq3){ if (val4 == eq4){ if (val5 == eq5){ if (val6 == eq6){ if (val7 == eq7){ if (val8 == eq8){ op() }}}}}}}}}'''
l = [0, 1, 2]
m = [3, 4, 5]
n = [6, 7, 8]
p = [9, 10, 11, 12]
filtnum = 0
filtername = f'''\nfunction quadfilter{filtnum}()'''
with open("quadfilterfile.txt", 'a') as f:
f.write('''
function op(){
z = o + 1.1
document.getElementById(z).style.display = ""
document.getElementById(z).innerHTML = snum[i]
z = o + 1.2
document.getElementById(z).style.display = ""
document.getElementById(z).innerHTML = tnum[i]
z = o + 1.3
document.getElementById(z).style.display = ""
document.getElementById(z).innerHTML = pname[i]
z = o + 1.4
document.getElementById(z).style.display = ""
document.getElementById(z).innerHTML = bau[i]
z = o + 1.5
document.getElementById(z).style.display = ""
document.getElementById(z).innerHTML = ptype[i]
z = o + 1.6
document.getElementById(z).style.display = ""
document.getElementById(z).innerHTML = rtype[i]
z = o + 1.7
document.getElementById(z).style.display = ""
document.getElementById(z).innerHTML = bopo[i]
z = o + 1.8
document.getElementById(z).style.display = ""
document.getElementById(z).innerHTML = comp[i]
z = o + 1.9
document.getElementById(z).style.display = ""
document.getElementById(z).innerHTML = sdate[i]
z = o + 1.16
document.getElementById(z).style.display = ""
document.getElementById(z).innerHTML = edate[i]
z = o + 1.17
document.getElementById(z).style.display = ""
document.getElementById(z).innerHTML = month[i]
z = o + 1.27
document.getElementById(z).style.display = ""
document.getElementById(z).innerHTML = ssla[i]
z = o + 1.26
document.getElementById(z).style.display = ""
document.getElementById(z).innerHTML = slame[i]
z = o + 1.21
document.getElementById(z).style.display = ""
document.getElementById(z).innerHTML = slam[i]
z = o + 1.15
document.getElementById(z).style.display = ""
document.getElementById(z).innerHTML = remark[i]
o++; document.getElementById("mainsum").innerHTML = "Found "+o+" Results For Your Search"
}
''')
for a1 in range(0, 3):
a2 = o[a1]
if a1 in l:
a1a = "ptype[i]"
elif a1 in m:
a1a = "rtype[i]"
elif a1 in n:
a1a = "comp[i]"
elif a1 in p:
a1a = "month[i]"
for b1 in range(0, 3):
b2 = o[b1]
if b1 != a1:
if b1 in l:
b1b = "ptype[i]"
elif b1 in m:
b1b = "rtype[i]"
elif b1 in n:
b1b = "comp[i]"
elif b1 in p:
b1b = "month[i]"
for c1 in range(3, 6):
c2 = o[c1]
if c1 != a1:
if c1 != b1:
if c1 in l:
c1c = "ptype[i]"
elif c1 in m:
c1c = "rtype[i]"
elif c1 in n:
c1c = "comp[i]"
elif c1 in p:
c1c = "month[i]"
for d1 in range(3, 6):
d2 = o[d1]
if d1 != a1:
if d1 != b1:
if d1 != c1:
if d1 in l:
d1d = "ptype[i]"
elif d1 in m:
d1d = "rtype[i]"
elif d1 in n:
d1d = "comp[i]"
elif d1 in p:
d1d = "month[i]"
for e1 in range(6, 9):
e2 = o[e1]
if e1 != a1:
if e1 != b1:
if e1 != c1:
if e1 != d1:
if e1 in l:
e1e = "ptype[i]"
elif e1 in m:
e1e = "rtype[i]"
elif e1 in n:
e1e = "comp[i]"
elif e1 in p:
e1e = "month[i]"
for f1 in range(6, 9):
f2 = o[f1]
if f1 != a1:
if f1 != b1:
if f1 != c1:
if f1 != d1:
if f1 != e1:
if f1 in l:
f1f = "ptype[i]"
elif f1 in m:
f1f = "rtype[i]"
elif f1 in n:
f1f = "comp[i]"
elif f1 in p:
f1f = "month[i]"
for g1 in range(9, 13):
g2 = o[g1]
if g1 != a1:
if g1 != b1:
if g1 != c1:
if g1 != d1:
if g1 != e1:
if g1 != f1:
if g1 in l:
g1g = "ptype[i]"
elif g1 in m:
g1g = "rtype[i]"
elif g1 in n:
g1g = "comp[i]"
elif g1 in p:
g1g = "month[i]"
for x in range(9, 13):
if x != a1:
if x != b1:
if x != c1:
if x != d1:
if x != e1:
if x != f1:
if x != g1:
if x in l:
x1 = "ptype[i]"
elif x in m:
x1 = "rtype[i]"
elif x in n:
x1 = "comp[i]"
elif x in p:
x1 = "month[i]"
filtnum = filtnum + 1
with open("quadfilterfile.txt", 'a') as f:
f.write(f'''function quadfilter{filtnum}()'''+"{"+f'''{filterbegin}
val1 = "{a2}"; eq1 = "{a1a}"; val2 = "{b2}"; eq2 = "{b1b}"; val3 = "{c2}"; eq3 = "{c1c}"; val4 = "{d2}"; eq4 = "{d1d}"; val5 = "{e2}"; eq5 = "{e1e}"; val6 = "{f2}"; eq6 = "{f1f}"; val7 = "{g2}"; eq7 = "{g1g}"; val8 = "{o[x]}"; eq8 = "{x1}"
{filterending}\n''')
m1 = ['a1', 'a2', 'a3', 'a4','a5','a6', 'a7', 'a8', 'a9', 'a10', 'a11', 'a12', 'a13']
filtnum = 0
for z1 in range(0, 3):
for z2 in range(0, 3):
if z2 != z1:
for z3 in range(3, 6):
if z2 != z1:
if z3 != z2:
for z4 in range(3, 6):
if z2 != z1:
if z3 != z2:
if z4 != z3:
for z5 in range(6, 9):
if z2 != z1:
if z3 != z2:
if z4 != z3:
if z5 != z4:
for z6 in range(6, 9):
if z2 != z1:
if z3 != z2:
if z4 != z3:
if z5 != z4:
if z6 != z5:
for z7 in range(9, 13):
if z2 != z1:
if z3 != z2:
if z4 != z3:
if z5 != z4:
if z6 != z5:
if z7 != z6:
for z8 in range(9, 13):
if z2 != z1:
if z3 != z2:
if z4 != z3:
if z5 != z4:
if z6 != z5:
if z7 != z6:
if z8 != z7:
filtnum = filtnum + 1
with open("b13wala.txt", 'a') as f:
f.write(f'''if (document.getElementById("{m1[z1]}").style.backgroundColor == "rgb(66, 153, 225)" && if (document.getElementById("{m1[z2]}").style.backgroundColor == "rgb(66, 153, 225)" && if (document.getElementById("{m1[z3]}").style.backgroundColor == "rgb(66, 153, 225)" && if (document.getElementById("{m1[z4]}").style.backgroundColor == "rgb(66, 153, 225)" && if (document.getElementById("{m1[z5]}").style.backgroundColor == "rgb(66, 153, 225)" && if (document.getElementById("{m1[z6]}").style.backgroundColor == "rgb(66, 153, 225)" && if (document.getElementById("{m1[z7]}").style.backgroundColor == "rgb(66, 153, 225)" && if (document.getElementById("{m1[z8]}").style.backgroundColor == "rgb(66, 153, 225)"'''+"{\n"+f'''triplefilter{filtnum}\n'''+"}")
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def countUnivalSubtrees(self, root: TreeNode) -> int:
self.count = 0
self.is_uni(root)
return self.count
# Check whether subtree is uni-value
def is_uni(self, node):
if not node:
return True
# Node has no children
if node.left is None and node.right is None:
self.count += 1
return True
# Check if all chidren are univalue subtrees
is_uni = True
if node.left:
is_uni = self.is_uni(node.left) and is_uni and node.val == node.left.val
if node.right:
is_uni = self.is_uni(node.right) and is_uni and node.val == node.right.val
# If all children are univalue subtrees, increment count
if is_uni:
self.count += 1
return is_uni
|
# Copyright 2021 BlobCity, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This python file consists of class PyComments, which has dictionary models and procedures utilized to add comments/meta description in Code generation.
"""
class IpynbComments:
models={
'Classification':{
'TF':"""### Neural Network/Deep Learning Model \nDeep learning is a subset of machine learning, which is essentially a neural network with three or more layers. These neural networks attempt to simulate the behavior of the human brain-albeit far from matching its ability-allowing it to 'learn' from large amounts of data. While a neural network with a single layer can still make approximate predictions, additional hidden layers can help to optimize and refine for accuracy.""",
'LinearDiscriminantAnalysis':"""### Model\n A classifier with a linear decision boundary, generated by fitting class conditional densities to the data and using Bayes’ rule.\n The model fits a Gaussian density to each class, assuming that all classes share the same covariance matrix.\n The fitted model can also be used to reduce the dimensionality of the input by projecting it to the most discriminative directions, using the transform method.
1. solver: Solver to use, possible values: {'svd', 'lsqr', 'eigen'}
2. shrinkage: Shrinkage parameter
3. store_covariance: If True, explicitly compute the weighted within-class covariance matrix when solver is 'svd'. The matrix is always computed and stored for the other solvers.
4. tol: Absolute threshold for a singular value of X to be considered significant, used to estimate the rank of X.
""",
'PassiveAggressiveClassifier':"""### Model \nThe passive-aggressive algorithms are a family of algorithms for large-scale learning. They are similar to the Perceptron in that they do not require a learning rate. However, contrary to the Perceptron, they include a regularization parameter C.\n
1. C: Maximum step size (regularization). Defaults to 1.0.
2. max_iter: The maximum number of passes over the training data (aka epochs). It only impacts the behavior in the fit method, and not the partial_fit method.
3. tol: The stopping criterion. If it is not None, the iterations will stop when (loss > previous_loss - tol).
4. early_stopping: Whether to use early stopping to terminate training when validation score is not improving.
5. n_iter_no_change: Number of iterations with no improvement to wait before early stopping.
6. loss: The loss function to be used: hinge: equivalent to PA-I in the reference paper. squared_hinge: equivalent to PA-II in the reference paper.
""",
'LGBMClassifier':"""### Model \n Light GBM is a fast, distributed, high-performance gradient boosting framework based on decision tree algorithm, used for ranking, classification and many other machine learning tasks.
Since it is based on decision tree algorithms, it splits the tree leaf wise with the best fit whereas other boosting algorithms split the tree depth wise or level wise rather than leaf-wise. So when growing on the same leaf in Light GBM, the leaf-wise algorithm can reduce more loss than the level-wise algorithm and hence results in much better accuracy which can rarely be achieved by any of the existing boosting algorithms.
Refer [API](https://lightgbm.readthedocs.io/en/latest/Python-API.html) for more.""",
'LogisticRegression':"""### Model
**Logistic regression :**\n
Logistic regression is a statistical model that in its basic form uses a logistic function to model a binary dependent variable, although many more complex extensions exist. In regression analysis, logistic regression (or logit regression) is estimating the parameters of a logistic model (a form of binary regression). This can be extended to model several classes of events.
#### Model Tuning Parameters
1. penalty : Used to specify the norm used in the penalization.
2. C : Inverse of regularization strength; must be a positive float. Like in support vector machines, smaller values specify stronger regularization.
3. tol : Tolerance for stopping criteria.
4. solver : Algorithm to use in the optimization problem.
5. max_iter : Maximum number of iterations taken for the solvers to converge.
6. multi_class : If the option chosen is 'ovr', then a binary problem is fit for each label. For 'multinomial' the loss minimised is the multinomial loss fit across the entire probability distribution, even when the data is binary. 'multinomial' is unavailable when solver='liblinear'. 'auto' selects 'ovr' if the data is binary, or if solver='liblinear', and otherwise selects 'multinomial'.
""",
'RidgeClassifier':"""### Model
Classifier using Ridge regression.
This classifier first converts the target values into {-1, 1} and then treats the problem as a regression task (multi-output regression in the multiclass case).
#### Model Tuning Parameters
> **alpha** -> Regularization strength; must be a positive float. Regularization improves the conditioning of the problem and reduces the variance of the estimates. Larger values specify stronger regularization.
> **solver** -> Solver to use in the computational routines {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag', 'saga'}
> **tol** -> Precision of the solution.
""",
'SGDClassifier':"""### Model
Stochastic Gradient Descent (SGD) is a simple yet very efficient approach to fitting linear classifiers and regressors under convex loss functions such as (linear) Support Vector Machines and Logistic Regression. SGD is merely an optimization technique and does not correspond to a specific family of machine learning models. It is only a way to train a model. Often, an instance of SGDClassifier or SGDRegressor will have an equivalent estimator in the scikit-learn API, potentially using a different optimization technique.
For example, using SGDClassifier(loss='log') results in logistic regression, i.e. a model equivalent to LogisticRegression which is fitted via SGD instead of being fitted by one of the other solvers in LogisticRegression.
#### Model Tuning Parameters
> - **loss** -> The loss function to be used.
> - **penalty** -> The penalty (aka regularization term) to be used.
> - **alpha** -> Constant that multiplies the regularization term. The higher the value, the stronger the regularization. Also used to compute the learning rate when set to learning_rate is set to 'optimal'.
> - **l1_ratio** -> The Elastic Net mixing parameter.
> - **tol** -> The stopping criterion
> - **learning_rate** -> The learning rate schedule,possible values {'optimal','constant','invscaling','adaptive'}
> - **eta0** -> The initial learning rate for the 'constant', 'invscaling' or 'adaptive' schedules.
> - **power_t** -> The exponent for inverse scaling learning rate.
> - **epsilon** -> Epsilon in the epsilon-insensitive loss functions; only if loss is 'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.""",
'ExtraTreesClassifier':"""### Model
ExtraTreesClassifier is an ensemble learning method fundamentally based on decision trees. ExtraTreesClassifier, like RandomForest, randomizes certain decisions and subsets of data to minimize over-learning from the data and overfitting.
#### Model Tuning Parameters
1. n_estimators: The number of trees in the forest.
2. criterion: The function to measure the quality of a split. Supported criteria are 'gini' for the Gini impurity and 'entropy' for the information gain.
3. max_depth: The maximum depth of the tree. If None, then nodes are expanded until all leaves are pure or until all leaves contain less than min_samples_split samples.
4. max_features: The number of features to consider when looking for the best split:""",
'RandomForestClassifier':"""### Model
A random forest is a meta estimator that fits a number of decision tree classifiers on various sub-samples of the dataset and uses averaging to improve the predictive accuracy and control over-fitting. The sub-sample size is controlled with the <code>max_samples</code> parameter if <code>bootstrap=True</code> (default), otherwise the whole dataset is used to build each tree.
#### Model Tuning Parameters
1. n_estimators : The number of trees in the forest.
2. criterion : The function to measure the quality of a split. Supported criteria are 'gini' for the Gini impurity and 'entropy' for the information gain.
3. max_depth : The maximum depth of the tree.
4. max_features : The number of features to consider when looking for the best split:
5. bootstrap : Whether bootstrap samples are used when building trees. If False, the whole dataset is used to build each tree.
6. oob_score : Whether to use out-of-bag samples to estimate the generalization accuracy.
""",
'AdaBoostClassifier':"""### Model
AdaBoost is one of the initial boosting ensemble algorithms to be adapted in solving studies. It helps by combine multiple “weak classifiers” into a single “strong classifier.” The core concept of the algorithm is to fit a sequence of weak learners on repeatedly modified versions of the data. The predictions from all the Weak learners are then combined through a weighted majority vote or sum to produce the outcome/Prediction. The data modifications at each iteration consist of applying weights to each of the training samples. Initially, those weights are all set so that the first iteration only trains a weak learner on the original data. For every successive iteration, the sample weights are individually modified, and the algorithm is reapplied to the reweighted data. At a given iteration, those training examples which get incorrectly classified by the model at the previous iteration have their weights increased. Whereas the weight gets decreased for data that has been predicted accurately.As iterations continue, data that are difficult to predict or incorrectly classified receive ever-increasing influence. Each subsequent weak learner is thereby forced to concentrate on the data that are missed by the previous ones in the sequence
#### Tuning Parameters
1. base_estimator: The base estimator from which the boosted ensemble is built. Support for sample weighting is required, as well as proper classes_ and n_classes_ attributes. If None, then the base estimator is DecisionTreeClassifier initialized with max_depth=1.
2. n_estimators: The maximum number of estimators at which boosting is terminated. In case of perfect fit, the learning procedure is stopped early.
3. learning_rate: Learning rate shrinks the contribution of each classifier by learning_rate. There is a trade-off between learning_rate and n_estimators.
4. algorithm: If 'SAMME.R' then use the SAMME.R real boosting algorithm. base_estimator must support calculation of class probabilities. If 'SAMME' then use the SAMME discrete boosting algorithm. The SAMME.R algorithm typically converges faster than SAMME, achieving a lower test error with fewer boosting iterations.
#### Note:
>For better performance of the Adaboost model, the base estimator (Decision Tree Model) can be fine-tuned.
""",
'CatBoostClassifier':"""### Model
CatBoost is an algorithm for gradient boosting on decision trees. Developed by Yandex researchers and engineers, it is the successor of the MatrixNet algorithm that is widely used within the company for ranking tasks, forecasting and making recommendations
#### Tuning parameters
1. **learning_rate**:, The learning rate. Used for reducing the gradient step.
2. **l2_leaf_reg**: Coefficient at the L2 regularization term of the cost function. Any positive value is allowed.
3. **bootstrap_type**: Bootstrap type. Defines the method for sampling the weights of objects.
4. **subsample**: Sample rate for bagging. This parameter can be used if one of the following bootstrap types is selected:
For more information refer: [API](https://catboost.ai/en/docs/concepts/python-reference_catboostclassifier)""",
'QuadraticDiscriminantAnalysis':"""### Model
Quadratic Discriminant Analysis is classifier with a quadratic decision boundary, generated by fitting class conditional densities to the data and using Bayes' rule.
The model fits a Gaussian density to each class.
#### Model Tuning Parameter
1. reg_param: Regularizes the per-class covariance estimates by transforming S2 as S2 = (1 - reg_param) * S2 + reg_param * np.eye(n_features), where S2 corresponds to the scaling_ attribute of a given class.
2. tol: Absolute threshold for a singular value to be considered significant, used to estimate the rank of Xk where Xk is the centered matrix of samples in class k. This parameter does not affect the predictions. It only controls a warning that is raised when features are considered to be colinear.
""",
'GaussianNB':"""### Model
Gaussian NB is a variant of Naive Bayes that follows Gaussian normal distribution and supports continuous data. An approach to creating a simple model is to assume that the data is described by a Gaussian distribution with no co-variance between features.
#### Model Tuning Parameters
1. priors : array-like of shape (n_classes,)
> Prior probabilities of the classes. If specified the priors are not adjusted according to the data.
2. var_smoothing : float, default=1e-9
> Portion of the largest variance of all features that is added to variances for calculation stability.
""",
'GradientBoostingClassifier':"""## Model
**GradientBoostingClassifier**
Gradient Boosting builds an additive model in a forward stage-wise fashion; it allows for the optimization of arbitrary differentiable loss functions.In each stage nclasses regression trees are fit on the negative gradient of the binomial or multinomial deviance loss function.
#### Model Tuning Parameters
1. loss : The loss function to be optimized. 'deviance' refers to deviance (= logistic regression) for classification with probabilistic outputs. For loss 'exponential' gradient boosting recovers the AdaBoost algorithm.
2. learning_rate : Learning rate shrinks the contribution of each tree by learning_rate. There is a trade-off between learning_rate and n_estimators.
3. n_estimators : The number of trees in the forest.
4. criterion : The function to measure the quality of a split. Supported criteria are 'friedman_mse' for the mean squared error with improvement score by Friedman, 'mse' for mean squared error, and 'mae' for the mean absolute error. The default value of 'friedman_mse' is generally the best as it can provide a better approximation in some cases.
5. max_depth : The maximum depth of the individual regression estimators. The maximum depth limits the number of nodes in the tree. Tune this parameter for best performance; the best value depends on the interaction of the input variables.
6. max_features : The number of features to consider when looking for the best split:
7. n_iter_no_change : n_iter_no_change is used to decide if early stopping will be used to terminate training when validation score is not improving. By default it is set to None to disable early stopping. If set to a number, it will set aside validation_fraction size of the training data as validation and terminate training when validation score is not improving in all of the previous n_iter_no_change numbers of iterations. The split is stratified.
8. tol : Tolerance for the early stopping. When the loss is not improving by at least tol for <code>n_iter_no_change</code> iterations (if set to a number), the training stops.""",
'HistGradientBoostingClassifier':"""### Model
Histogram-based Gradient Boosting Classification Tree.This estimator is much faster than GradientBoostingClassifier for big datasets (n_samples >= 10 000).This estimator has native support for missing values (NaNs).
[Reference](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.HistGradientBoostingClassifier.html#sklearn.ensemble.HistGradientBoostingClassifier)
> **loss**: The loss function to use in the boosting process.
> **learning_rate**: The learning rate, also known as shrinkage. This is used as a multiplicative factor for the leaves values. Use 1 for no shrinkage.
> **max_iter**: The maximum number of iterations of the boosting process, i.e. the maximum number of trees.
> **max_depth**: The maximum depth of each tree. The depth of a tree is the number of edges to go from the root to the deepest leaf. Depth isn't constrained by default.
> **l2_regularization**: The L2 regularization parameter. Use 0 for no regularization (default).
> **early_stopping**: If 'auto', early stopping is enabled if the sample size is larger than 10000. If True, early stopping is enabled, otherwise early stopping is disabled.
> **n_iter_no_change**: Used to determine when to 'early stop'. The fitting process is stopped when none of the last n_iter_no_change scores are better than the n_iter_no_change - 1 -th-to-last one, up to some tolerance. Only used if early stopping is performed.
> **tol**: The absolute tolerance to use when comparing scores during early stopping. The higher the tolerance, the more likely we are to early stop: higher tolerance means that it will be harder for subsequent iterations to be considered an improvement upon the reference score.
> **scoring**: Scoring parameter to use for early stopping. """,
'SVC':"""### Model
Support vector machines (SVMs) are a set of supervised learning methods used for classification, regression and outliers detection.
A Support Vector Machine is a discriminative classifier formally defined by a separating hyperplane. In other terms, for a given known/labelled data points, the SVM outputs an appropriate hyperplane that classifies the inputted new cases based on the hyperplane. In 2-Dimensional space, this hyperplane is a line separating a plane into two segments where each class or group occupied on either side.
Here we have used SVC, the svc implementation is based on libsvm.
### Model Tuning Parameters
1. C -> Regularization parameter. The strength of the regularization is inversely proportional to C. Must be strictly positive.
2. kernel -> Specifies the kernel type to be used in the algorithm.
3. gamma -> Gamma is a hyperparameter that we have to set before the training model. Gamma decides how much curvature we want in a decision boundary.
4. degree -> Degree of the polynomial kernel function ('poly'). Increasing degree parameter leads to higher training times.
""",
'NuSVC':"""### Model
Support vector machines (SVMs) are a set of supervised learning methods used for classification, regression and outliers detection.
A Support Vector Machine is a discriminative classifier formally defined by a separating hyperplane. In other terms, for a given known/labelled data points, the SVM outputs an appropriate hyperplane that classifies the inputted new cases based on the hyperplane. In 2-Dimensional space, this hyperplane is a line separating a plane into two segments where each class or group occupied on either side.
SVC and NuSVC are similar methods, but accept slightly different sets of parameters and have different mathematical formulations.
* #### Model Tuning Parameters
1. nu -> An upper bound on the fraction of margin errors and a lower bound of the fraction of support vectors. Should be in the interval (0, 1].
2. kernel -> Specifies the kernel type to be used in the algorithm.
3. gamma -> Gamma is a hyperparameter that we have to set before the training model. Gamma decides how much curvature we want in a decision boundary.
4. degree -> Degree of the polynomial kernel function ('poly'). Increasing degree parameter leads to higher training times.""",
'LinearSVC':"""### Model
Support vector machines (SVMs) are a set of supervised learning methods used for classification, regression and outliers detection.
A Support Vector Machine is a discriminative classifier formally defined by a separating hyperplane. In other terms, for a given known/labelled data points, the SVM outputs an appropriate hyperplane that classifies the inputted new cases based on the hyperplane. In 2-Dimensional space, this hyperplane is a line separating a plane into two segments where each class or group occupied on either side.
LinearSVC is similar to SVC with kernel='linear'. It has more flexibility in the choice of tuning parameters and is suited for large samples.
* #### Model Tuning Parameters
1. * penalty -> Specifies the norm used in the penalization.
2. * Loss -> Specifies the loss function.
3. * C -> Regularization parameter. The strength of the regularization is inversely proportional to C. Must be strictly positive.
4. * tolerance -> Tolerance for stopping criteria.
5. * dual -> Select the algorithm to either solve the dual or primal optimization problem. Prefer dual=False when n_samples > n_features.""",
'DecisionTreeClassifier':"""### Model
Decision tree is the most powerful and popular tool for classification and prediction. A Decision tree is a flowchart like tree structure, where each internal node denotes a test on an attribute, each branch represents an outcome of the test, and each leaf node holds a outcome label.
As with other classifiers, DecisionTreeClassifier takes as input two arrays: an array X, sparse or dense, of shape (n_samples, n_features) holding the training samples, and an array Y of integer values, shape (n_samples,), holding the class labels for the training samples.
It is capable of both binary ([-1,1] or [0,1]) classification and multiclass ([0, …,K-1]) classification.
#### Model Tuning Parameter
1. criterion -> The function to measure the quality of a split. Supported criteria are 'gini' for the Gini impurity and 'entropy' for the information gain.
2. max_depth -> The maximum depth of the tree. If None, then nodes are expanded until all leaves are pure or until all leaves contain less than min_samples_split samples.
3. max_leaf_nodes -> Grow a tree with max_leaf_nodes in best-first fashion. Best nodes are defined as relative reduction in impurity. If None then unlimited number of leaf nodes.
4. max_features -> The number of features to consider when looking for the best split: **{auto , sqrt, log2}**""",
'KNeighborsClassifier':"""## Model
**KNeighborsClassifier :**
KNN is one of the easiest Machine Learning algorithms based on Supervised Machine Learning technique. The algorithm stores all the available data and classifies a new data point based on the similarity. It assumes the similarity between the new data and data and put the new case into the category that is most similar to the available categories.KNN algorithm at the training phase just stores the dataset and when it gets new data, then it classifies that data into a category that is much similar to the available data. Model Tuning Parameters:
* n_neighbors -> Number of neighbors to use by default for kneighbors queries.
* weights -> weight function used in prediction. {uniform,distance}
* algorithm-> Algorithm used to compute the nearest neighbors. {'auto', 'ball_tree', 'kd_tree', 'brute'}
* p -> Power parameter for the Minkowski metric. When p = 1, this is equivalent to using manhattan_distance (l1), and euclidean_distance (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
* leaf_size -> Leaf size passed to BallTree or KDTree. This can affect the speed of the construction and query, as well as the memory required to store the tree. The optimal value depends on the nature of the problem.""",
'RadiusNeighborsClassifier':"""### Model
RadiusNeighborsClassifier implements learning based on the number of neighbors within a fixed radius of each training point, where is a floating-point value specified by the user.
In cases where the data is not uniformly sampled, radius-based neighbors classification can be a better choice.
#### Model Tuning Parameters
1. **radius** : Range of parameter space to use by default for radius_neighbors queries.
2. **weights** : weight function used in prediction. Possible values {'uniform','distance'}
3. **algorithm** : Algorithm used to compute the nearest neighbors
4. **leaf_size** : Leaf size passed to BallTree or KDTree. This can affect the speed of the construction and query, as well as the memory required to store the tree. The optimal value depends on the nature of the problem.""",
'MultinomialNB':"""### Model
With a multinomial event model, samples (feature vectors) represent the frequencies with which certain events have been generated by a multinomial probability that an event occurs.
The multinomial Naive Bayes classifier is suitable for classification with discrete features. The multinomial distribution normally requires integer feature counts. However, in practice, fractional counts such as tf-idf may also work.
Model Tuning Parameters
1. alpha : Additive (Laplace/Lidstone) smoothing parameter (0 for no smoothing).
2. fit_prior : Whether to learn class prior probabilities or not. If false, a uniform prior will be used.
3. class_prior : Prior probabilities of the classes. If specified the priors are not adjusted according to the data.""",
'CategoricalNB':"""### Model
CategoricalNB implements the categorical naive Bayes algorithm for categorically distributed data. It assumes that each feature, which is described by the index , has its own categorical distribution.
The categorical Naive Bayes classifier is suitable for classification with discrete features that are categorically distributed. The categories of each feature are drawn from a categorical distribution.
Model Tuning Parameters
1. alpha : Additive (Laplace/Lidstone) smoothing parameter (0 for no smoothing).
2. fit_prior : Whether to learn class prior probabilities or not. If false, a uniform prior will be used.
3. class_prior : Prior probabilities of the classes. If specified the priors are not adjusted according to the data.""",
'XGBClassifier':"""### Model
XGBoost is an optimized distributed gradient boosting library designed to be highly efficient, flexible and portable. It implements machine learning algorithms under the Gradient Boosting framework. XGBoost provides a parallel tree boosting (also known as GBDT, GBM) that solve many data science problems in a fast and accurate way.
For Tuning parameters, details refer to official API documentation [Tunning Parameters](https://xgboost.readthedocs.io/en/latest/python/python_api.html#module-xgboost.sklearn) """,
'NearestCentroid':"""### Model
The NearestCentroid classifier is a simple algorithm that represents each class by the centroid of its members. In effect, this makes it similar to the label updating phase of the KMeans algorithm. It also has no parameters to choose, making it a good baseline classifier. It does, however, suffer on non-convex classes, as well as when classes have drastically different variances, as equal variance in all dimensions is assumed.
#### Tuning Parameter
1. **metric** : The metric to use when calculating distance between instances in a feature array. If metric is a string or callable, it must be one of the options allowed by metrics.pairwise.pairwise_distances for its metric parameter. The centroids for the samples corresponding to each class is the point from which the sum of the distances of all samples that belong to that particular class are minimized. If the “manhattan” metric is provided, this centroid is the median and for all other metrics, the centroid is now set to be the mean.
2. **shrink_threshold** :Threshold for shrinking centroids to remove features.""",
'Perceptron':"""### Model
the perceptron is an algorithm for supervised learning of binary classifiers.
The algorithm learns the weights for the input signals in order to draw a linear decision boundary.This enables you to distinguish between the two linearly separable classes +1 and -1.
#### Model Tuning Parameters
1. **penalty** ->The penalty (aka regularization term) to be used. {'l2','l1','elasticnet'}
2. **alpha** -> Constant that multiplies the regularization term if regularization is used.
3. **l1_ratio** -> The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1. l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1. Only used if penalty='elasticnet'.
4. **tol** -> The stopping criterion. If it is not None, the iterations will stop when (loss > previous_loss - tol).
5. **early_stopping**-> Whether to use early stopping to terminate training when validation. score is not improving. If set to True, it will automatically set aside a stratified fraction of training data as validation and terminate training when validation score is not improving by at least tol for n_iter_no_change consecutive epochs.
6. **validation_fraction** -> The proportion of training data to set aside as validation set for early stopping. Must be between 0 and 1. Only used if early_stopping is True.
7. **n_iter_no_change** -> Number of iterations with no improvement to wait before early stopping."""
},
'Regression':{
'TF':"""### Neural Network/Deep Learning Model \nDeep learning is a subset of machine learning, which is essentially a neural network with three or more layers. These neural networks attempt to simulate the behavior of the human brain-albeit far from matching its ability-allowing it to 'learn' from large amounts of data. While a neural network with a single layer can still make approximate predictions, additional hidden layers can help to optimize and refine for accuracy.""",
'OrthogonalMatchingPursuit':"""### Model \nOrthogonalMatchingPursuit and orthogonal_mp implements the OMP algorithm for approximating the fit of a linear model with constraints imposed on the number of non-zero coefficients \n
OMP is based on a greedy algorithm that includes at each step the atom most highly correlated with the current residual. It is similar to the simpler matching pursuit (MP) method, but better in that at each iteration, the residual is recomputed using an orthogonal projection on the space of the previously chosen dictionary elements.
#### Tuning Parameters:
1. tol : Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
2. fit_intercept: Whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (i.e. data is expected to be centered).
3. n_nonzero_coefs: Desired number of non-zero entries in the solution. If None (by default) this value is set to 10% of n_features
""",
'LinearRegression':"""### Model
Linear regression algorithm attempts to model the relationship between two variables by fitting a linear equation to observed data. One variable is considered to be an independent variable, and the other is considered to be a dependent variable.
LinearRegression fits a linear model with coefficients w = (w1, …, wp) to minimize the residual sum of squares between the observed targets in the dataset, and the targets predicted by the linear approximation.""",
'Ridge':"""### Model
Ridge regression addresses some of the problems of Ordinary Least Squares by imposing a penalty on the size of the coefficients. The ridge coefficients minimize a penalized residual sum of squares:
The complexity parameter controls the amount of shrinkage: the larger the value of , the greater the amount of shrinkage and thus the coefficients become more robust to collinearity.
This model solves a regression model where the loss function is the linear least squares function and regularization is given by the l2-norm. Also known as Ridge Regression or Tikhonov regularization. This estimator has built-in support for multi-variate regression (i.e., when y is a 2d-array of shape (n_samples, n_targets)).
#### Model Tuning Parameters
1. **alpha** -> Regularization strength; must be a positive float. Regularization improves the conditioning of the problem and reduces the variance of the estimates. Larger values specify stronger regularization.
2. **solver** -> Solver to use in the computational routines.""",
'SGDRegressor':"""### Model
Stochastic Gradient Descent (SGD) is a simple yet very efficient approach to fitting linear classifiers and regressors under convex loss functions such as (linear) Support Vector Machines and Logistic Regression. SGD is merely an optimization technique and does not correspond to a specific family of machine learning models. It is only a way to train a model. Often, an instance of SGDClassifier or SGDRegressor will have an equivalent estimator in the scikit-learn API, potentially using a different optimization technique.
#### Model Tuning Parameters
1. **loss** -> The loss function to be used. The possible values are ‘squared_loss’, ‘huber’, ‘epsilon_insensitive’, or ‘squared_epsilon_insensitive’
2. **penalty** -> The penalty (aka regularization term) to be used. Defaults to ‘l2’ which is the standard regularizer for linear SVM models. ‘l1’ and ‘elasticnet’ might bring sparsity to the model (feature selection) not achievable with ‘l2’.
3. **alpha** -> Constant that multiplies the regularization term. The higher the value, the stronger the regularization. Also used to compute the learning rate when set to learning_rate is set to ‘optimal’.
4. **l1_ratio** -> The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1. l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1. Only used if penalty is ‘elasticnet’.
5. **tol** -> The stopping criterion
6. **learning_rate** -> The learning rate schedule,possible values {'optimal','constant','invscaling','adaptive'}
7. **eta0** -> The initial learning rate for the ‘constant’, ‘invscaling’ or ‘adaptive’ schedules.
8. **power_t** -> The exponent for inverse scaling learning rate.
9. **epsilon** -> Epsilon in the epsilon-insensitive loss functions; only if loss is ‘huber’, ‘epsilon_insensitive’, or ‘squared_epsilon_insensitive’.""",
'ExtraTreesRegressor':"""### Model
ExtraTrees Regressor model implements a meta estimator that fits a number of randomized decision trees (a.k.a. extra-trees) on various sub-samples of the dataset and uses averaging to improve the predictive accuracy and control over-fitting.
#### Model Tuning Parameters
1. n_estimators: The number of trees in the forest.
2.criterion: The function to measure the quality of a split. Supported criteria are “mse” for the mean squared error, which is equal to variance reduction as feature selection criterion, and “mae” for the mean absolute error.
3.max_depth: The maximum depth of the tree. If None, then nodes are expanded until all leaves are pure or until all leaves contain less than min_samples_split samples.
4.max_features: The number of features to consider when looking for the best split""",
'RandomForestRegressor':"""### Model
A random forest is a meta estimator that fits a number of classifying decision trees on various sub-samples of the dataset and uses averaging to improve the predictive accuracy and control over-fitting. The sub-sample size is controlled with the <code>max_samples</code> parameter if <code>bootstrap=True</code> (default), otherwise the whole dataset is used to build each tree.
#### Model Tuning Parameters
1. n_estimators : The number of trees in the forest.
2. criterion : The function to measure the quality of a split. Supported criteria are 'mse' for the mean squared error, which is equal to variance reduction as feature selection criterion, and 'mae' for the mean absolute error.
3. max_depth : The maximum depth of the tree.
4. max_features : The number of features to consider when looking for the best split:
5. bootstrap : Whether bootstrap samples are used when building trees. If False, the whole dataset is used to build each tree.
6. oob_score : Whether to use out-of-bag samples to estimate the generalization accuracy.""",
'AdaBoostRegressor':"""### Model
AdaBoost is one of the initial boosting ensemble algorithms to be adapted in solving studies. It helps by combine multiple 'weak classifiers' into a single 'strong classifier.' The core concept of the algorithm is to fit a sequence of weak learners on repeatedly modified versions of the data. The predictions from all the Weak learners are then combined through a weighted majority vote or sum to produce the outcome/Prediction. The data modifications at each iteration consist of applying weights to each of the training samples. Initially, those weights are all set so that the first iteration only trains a weak learner on the original data. For every successive iteration, the sample weights are individually modified, and the algorithm is reapplied to the reweighted data. At a given iteration, those training examples which get incorrectly classified by the model at the previous iteration have their weights increased. Whereas the weight gets decreased for data that has been predicted accurately.As iterations continue, data that are difficult to predict or incorrectly classified receive ever-increasing influence. Each subsequent weak learner is thereby forced to concentrate on the data that are missed by the previous ones in the sequence
#### Model Tuning Parameters:
1. base_estimator: The base estimator from which the boosted ensemble is built. If None, then the base estimator is DecisionTreeRegressor initialized with max_depth=3.
2. n_estimators: The maximum number of estimators at which boosting is terminated. In case of perfect fit, the learning procedure is stopped early.
3. learning_rate: Learning rate shrinks the contribution of each regressor by learning_rate. There is a trade-off between learning_rate and n_estimators.
4. loss: The loss function to use when updating the weights after each boosting iteration.
#### Note: For better performance of the Adaboost model, the base estimator (Decision Tree Model) can be fine-tuned.
""",
'GradientBoostingRegressor':"""### Model
Gradient Boosting builds an additive model in a forward stage-wise fashion; it allows for the optimization of arbitrary differentiable loss functions. In each stage a regression tree is fit on the negative gradient of the given loss function.
#### Model Tuning Parameters
1. loss : Loss function to be optimized.
2. learning_rate: Learning rate shrinks the contribution of each tree by learning_rate. There is a trade-off between learning_rate and n_estimators.
3. n_estimators : The number of trees in the forest.
4. criterion : The function to measure the quality of a split. Supported criteria are 'friedman_mse' for the mean squared error with improvement score by Friedman, 'mse' for mean squared error, and 'mae' for the mean absolute error. The default value of 'friedman_mse' is generally the best as it can provide a better approximation in some cases.
5. max_depth : The maximum depth of the individual regression estimators. The maximum depth limits the number of nodes in the tree. Tune this parameter for best performance; the best value depends on the interaction of the input variables.
6. max_features : The number of features to consider when looking for the best split:
7. n_iter_no_change : Is used to decide if early stopping will be used to terminate training when validation score is not improving. By default it is set to None to disable early stopping. If set to a number, it will set aside <code>validation_fraction</code> size of the training data as validation and terminate training when validation score is not improving in all of the previous <code>n_iter_no_change</code> numbers of iterations. The split is stratified.
8. tol : Tolerance for the early stopping.""",
'HistGradientBoostingRegressor':"""### Model
Histogram-based Gradient Boosting Regression Tree.This estimator is much faster than GradientBoostingRegressor for big datasets (n_samples >= 10 000).This estimator has native support for missing values (NaNs).
#### Tuning Parameters
[Reference](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.HistGradientBoostingRegressor.html)
1. **loss**: The loss function to use in the boosting process.
2. **learning_rate**: The learning rate, also known as shrinkage. This is used as a multiplicative factor for the leaves values.
3. **max_iter**: The maximum number of iterations of the boosting process.
4. **max_depth**: The maximum depth of each tree. The depth of a tree is the number of edges to go from the root to the deepest leaf.
5. **l2_regularization**: The L2 regularization parameter.
6. **early_stopping**: If 'auto', early stopping is enabled if the sample size is larger than 10000. If True, early stopping is enabled, otherwise early stopping is disabled.
7. **n_iter_no_change**: Used to determine when to 'early stop'. The fitting process is stopped when none of the last n_iter_no_change scores are better than the n_iter_no_change - 1 -th-to-last one, up to some tolerance.
8. **tol**: The absolute tolerance to use when comparing scores during early stopping.
9. **scoring**: Scoring parameter to use for early stopping. """,
'SVR':"""### Model
Support vector machines (SVMs) are a set of supervised learning methods used for classification, regression and outliers detection.
A Support Vector Machine is a discriminative classifier formally defined by a separating hyperplane. In other terms, for a given known/labelled data points, the SVM outputs an appropriate hyperplane that classifies the inputted new cases based on the hyperplane. In 2-Dimensional space, this hyperplane is a line separating a plane into two segments where each class or group occupied on either side.
Here we will use SVR, the svr implementation is based on libsvm. The fit time scales at least quadratically with the number of samples and maybe impractical beyond tens of thousands of samples.
#### Model Tuning Parameters
1. C : Regularization parameter. The strength of the regularization is inversely proportional to C. Must be strictly positive. The penalty is a squared l2 penalty.
2. kernel : Specifies the kernel type to be used in the algorithm.
3. gamma : Gamma is a hyperparameter that we have to set before the training model. Gamma decides how much curvature we want in a decision boundary.
4. degree : Degree of the polynomial kernel function ('poly'). Ignored by all other kernels. Increasing degree parameter leads to higher training times.
""",
'NuSVR':"""### Model
Support vector machines (SVMs) are a set of supervised learning methods used for classification, regression and outliers detection.
A Support Vector Machine is a discriminative classifier formally defined by a separating hyperplane. In other terms, for a given known/labelled data points, the SVM outputs an appropriate hyperplane that classifies the inputted new cases based on the hyperplane. In 2-Dimensional space, this hyperplane is a line separating a plane into two segments where each class or group occupied on either side.
Here we will use NuSVR, the NuSVR implementation is based on libsvm. Similar to NuSVC, for regression, uses a parameter nu to control the number of support vectors. However, unlike NuSVC, where nu replaces C, here nu replaces the parameter epsilon of epsilon-SVR.
#### Model Tuning Parameters
1. nu : An upper bound on the fraction of training errors and a lower bound of the fraction of support vectors. Should be in the interval (0, 1]. By default 0.5 will be taken.
2. C : Regularization parameter. The strength of the regularization is inversely proportional to C. Must be strictly positive. The penalty is a squared l2 penalty.
3. kernel : Specifies the kernel type to be used in the algorithm.
4. gamma : Gamma is a hyperparameter that we have to set before the training model. Gamma decides how much curvature we want in a decision boundary.
5. degree : Degree of the polynomial kernel function ('poly'). Ignored by all other kernels.Using degree 1 is similar to using a linear kernel. Also, Increasing degree parameter leads to higher training times.""",
'LinearSVR':"""### Model
Support vector machines (SVMs) are a set of supervised learning methods used for classification, regression and outliers detection.
A Support Vector Machine is a discriminative classifier formally defined by a separating hyperplane. In other terms, for a given known/labelled data points, the SVM outputs an appropriate hyperplane that classifies the inputted new cases based on the hyperplane. In 2-Dimensional space, this hyperplane is a line separating a plane into two segments where each class or group occupied on either side.
LinearSVR is similar to SVR with kernel='linear'. It has more flexibility in the choice of tuning parameters and is suited for large samples.
#### Model Tuning Parameters
1. epsilon : Epsilon parameter in the epsilon-insensitive loss function.
2. loss : Specifies the loss function. 'hinge' is the standard SVM loss (used e.g. by the SVC class) while 'squared_hinge' is the square of the hinge loss. The combination of penalty='l1' and loss='hinge' is not supported.
3. C : Regularization parameter. The strength of the regularization is inversely proportional to C. Must be strictly positive.
4. tol : Tolerance for stopping criteria.
5. dual : Select the algorithm to either solve the dual or primal optimization problem. Prefer dual=False when n_samples > n_features.""",
'DecisionTreeRegressor':"""### Model
Decision tree is the most powerful and popular tool for classification and prediction. A Decision tree is a flowchart like tree structure, where each internal node denotes a test on an attribute, each branch represents an outcome of the test, and each leaf node holds a outcome label.
Decision trees can also be applied to regression problems, using the DecisionTreeRegressor class.
As in the classification setting, the fit method will take as argument arrays X and y, only that in this case y is expected to have floating point values instead of integer values
#### Model Tuning Parameter
1. criterion -> The function to measure the quality of a split. Supported criteria are “mse” for the mean squared error, which is equal to variance reduction as feature selection criterion and minimizes the L2 loss using the mean of each terminal node, “friedman_mse”, which uses mean squared error with Friedman’s improvement score for potential splits, “mae” for the mean absolute error, which minimizes the L1 loss using the median of each terminal node, and “poisson” which uses reduction in Poisson deviance to find splits.
2. max_depth -> The maximum depth of the tree. If None, then nodes are expanded until all leaves are pure or until all leaves contain less than min_samples_split samples.
3. max_leaf -> Grow a tree with max_leaf_nodes in best-first fashion. Best nodes are defined as relative reduction in impurity. If None then unlimited number of leaf nodes.
4. max_features -> The number of features to consider when looking for the best split: **{auto , sqrt, log2}**""",
'KNeighborsRegressor':"""### Model
KNN is one of the easiest Machine Learning algorithms based on Supervised Machine Learning technique. The algorithm stores all the available data and classifies a new data point based on the similarity. It assumes the similarity between the new data and data and put the new case into the category that is most similar to the available categories.KNN algorithm at the training phase just stores the dataset and when it gets new data, then it classifies that data into a category that is much similar to the available data.
#### Model Tuning Parameters
1. n_neighbors -> Number of neighbors to use by default for kneighbors queries.
2. weights -> weight function used in prediction. {**uniform,distance**}
3. algorithm -> Algorithm used to compute the nearest neighbors. {**'auto', 'ball_tree', 'kd_tree', 'brute'**}
4. p -> Power parameter for the Minkowski metric. When p = 1, this is equivalent to using manhattan_distance (l1), and euclidean_distance (l2) for p = 2.
5. leaf_size -> Leaf size passed to BallTree or KDTree. This can affect the speed of the construction and query, as well as the memory required to store the tree. The optimal value depends on the nature of the problem.""",
'Lasso':"""### Model
Linear Model trained with L1 prior as regularizer (aka the Lasso)
The Lasso is a linear model that estimates sparse coefficients. It is useful in some contexts due to its tendency to prefer solutions with fewer non-zero coefficients, effectively reducing the number of features upon which the given solution is dependent. For this reason Lasso and its variants are fundamental to the field of compressed sensing.
#### Model Tuning Parameter
1. **alpha** -> Constant that multiplies the L1 term. Defaults to 1.0. alpha = 0 is equivalent to an ordinary least square, solved by the LinearRegression object. For numerical reasons, using alpha = 0 with the Lasso object is not advised.
2. **selection** -> If set to ‘random’, a random coefficient is updated every iteration rather than looping over features sequentially by default. This (setting to ‘random’) often leads to significantly faster convergence especially when tol is higher than 1e-4.
3. **tol** -> The tolerance for the optimization: if the updates are smaller than tol, the optimization code checks the dual gap for optimality and continues until it is smaller than tol.
4. **max_iter** -> The maximum number of iterations.""",
'Lars':"""### Model
Least-angle regression (LARS) is a regression algorithm for high-dimensional data, developed by Bradley Efron, Trevor Hastie, Iain Johnstone and Robert Tibshirani. LARS is similar to forward stepwise regression. At each step, it finds the feature most correlated with the target. When there are multiple features having equal correlation, instead of continuing along the same feature, it proceeds in a direction equiangular between the features.
### Tuning parameters
1. **jitter** -> Upper bound on a uniform noise parameter to be added to the y values, to satisfy the model’s assumption of one-at-a-time computations. Might help with stability.
2. **eps** -> The machine-precision regularization in the computation of the Cholesky diagonal factors. Increase this for very ill-conditioned systems. Unlike the tol parameter in some iterative optimization-based algorithms, this parameter does not control the tolerance of the optimization.
3. **n_nonzero_coefs** -> Target number of non-zero coefficients. Use np.inf for no limit.
4. **precompute** -> Whether to use a precomputed Gram matrix to speed up calculations. """,
'BayesianRidge':"""### Model
Bayesian Regression can be very useful when we have insufficient data in the dataset or the data is poorly distributed. The output of a Bayesian Regression model is obtained from a probability distribution, as compared to regular regression techniques where the output is just obtained from a single value of each attribute.
Bayesian regression techniques can be used to include regularization parameters in the estimation procedure: the regularization parameter is not set in a hard sense but tuned to the data at hand.
If there is a large amount of data available for our dataset, the Bayesian approach is not good for such cases.
#### Model Tuning Parameters
1. **alpha_1** : shape parameter for the Gamma distribution prior over the alpha parameter.
2. **alpha_2** : inverse scale parameter (rate parameter) for the Gamma distribution prior over the alpha parameter.
3. **lambda_1** : shape parameter for the Gamma distribution prior over the lambda parameter.
4. **lambda_2** : inverse scale parameter (rate parameter) for the Gamma distribution prior over the lambda parameter.""",
'LassoLars':"""### Model
LassoLars is a lasso model implemented using the LARS algorithm, and unlike the implementation based on coordinate descent, this yields the exact solution, which is piecewise linear as a function of the norm of its coefficients.
### Tuning parameters
1. **fit_intercept** -> whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations
2. **alpha** -> Constant that multiplies the penalty term. Defaults to 1.0. alpha = 0 is equivalent to an ordinary least square, solved by LinearRegression. For numerical reasons, using alpha = 0 with the LassoLars object is not advised and you should prefer the LinearRegression object.
3. **eps** -> The machine-precision regularization in the computation of the Cholesky diagonal factors. Increase this for very ill-conditioned systems. Unlike the tol parameter in some iterative optimization-based algorithms, this parameter does not control the tolerance of the optimization.
4. **max_iter** -> Maximum number of iterations to perform.
5. **positive** -> Restrict coefficients to be >= 0. Be aware that you might want to remove fit_intercept which is set True by default. Under the positive restriction the model coefficients will not converge to the ordinary-least-squares solution for small values of alpha. Only coefficients up to the smallest alpha value (alphas_[alphas_ > 0.].min() when fit_path=True) reached by the stepwise Lars-Lasso algorithm are typically in congruence with the solution of the coordinate descent Lasso estimator.
6. **precompute** -> Whether to use a precomputed Gram matrix to speed up calculations. """,
'XGBRegressor':"""### Model
XGBoost is an optimized distributed gradient boosting library designed to be highly efficient, flexible and portable. It implements machine learning algorithms under the Gradient Boosting framework. XGBoost provides a parallel tree boosting (also known as GBDT, GBM) that solve many data science problems in a fast and accurate way.
For Tuning parameters, details refer to official API documentation [Tunning Parameters](https://xgboost.readthedocs.io/en/latest/python/python_api.html#module-xgboost.sklearn) """,
'ARDRegressor':"""### Model
Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of the regression model are assumed to be in Gaussian distributions. Also estimate the parameters lambda (precisions of the distributions of the weights) and alpha (precision of the distribution of the noise). The estimation is done by an iterative procedures (Evidence Maximization)
#### Parameters:
1. **n_iter**: Maximum number of iterations.
2. **tol**: Stop the algorithm if w has converged.
3. **alpha_1**: shape parameter for the Gamma distribution prior over the alpha parameter.
4. **alpha_2**: inverse scale parameter (rate parameter) for the Gamma distribution prior over the alpha parameter.
5. **lambda_1**: shape parameter for the Gamma distribution prior over the lambda parameter.
6. **lambda_2**: inverse scale parameter (rate parameter) for the Gamma distribution prior over the lambda parameter.""",
'CatBoostRegressor':"""### Model
CatBoost is an algorithm for gradient boosting on decision trees. Developed by Yandex researchers and engineers, it is the successor of the MatrixNet algorithm that is widely used within the company for ranking tasks, forecasting and making recommendations
#### Tuning parameters
1. **learning_rate**:, The learning rate. Used for reducing the gradient step.
2. **l2_leaf_reg**: Coefficient at the L2 regularization term of the cost function. Any positive value is allowed.
3. **bootstrap_type**: Bootstrap type. Defines the method for sampling the weights of objects.
4. **subsample**: Sample rate for bagging. This parameter can be used if one of the following bootstrap types is selected:
For more information refer: [API](https://catboost.ai/docs/concepts/python-reference_catboostregressor.html)""",
'GammaRegressor':"""### Model
Generalized Linear Model with a Gamma distribution.This regressor uses the 'log' link function.
1. alpha: Constant that multiplies the penalty term and thus determines the regularization strength.
2. max_iter: The maximal number of iterations for the solver.
3. tol: Stopping criterion.
For Ref: https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.GammaRegressor.html""",
'LGBMRegressor':"""### Model
LightGBM is a gradient boosting framework that uses tree based learning algorithms. It is designed to be distributed and efficient with the following advantages:
- Faster training speed and higher efficiency.
- Lower memory usage.
- Better accuracy.
- Support of parallel, distributed, and GPU learning.
- Capable of handling large-scale data.
### Tuning parameters
1. **boosting_type** - 'gbdt', traditional Gradient Boosting Decision Tree. 'dart', Dropouts meet Multiple Additive Regression Trees. 'goss', Gradient-based One-Side Sampling. 'rf', Random Forest
2. **num_leaves** - Maximum tree leaves for base learners.
3. **max_depth** - Maximum tree depth for base learners, <=0 means no limit.
4. **p** - Power parameter for the Minkowski metric.
5. **learning_rate** - Boosting learning rate. You can use callbacks parameter of fit method to shrink/adapt learning rate in training using reset_parameter callback. Note, that this will ignore the learning_rate argument in training.
6. **min_split_gain** - Minimum loss reduction required to make a further partition on a leaf node of the tree.
7. **min_child_samples** - Minimum number of data needed in a child (leaf).
For more information refer: [API](https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.LGBMRegressor.html)""",
'RadiusNeighborsRegressor':"""### Model
RadiusNeighborsRegressor implements learning based on the neighbors within a fixed radius of the query point, where is a floating-point value specified by the user.
#### Tuning parameters
1. **radius**: Range of parameter space to use by default for radius_neighbors queries.
2. **algorithm**: Algorithm used to compute the nearest neighbors:
3. **leaf_size**: Leaf size passed to BallTree or KDTree.
4. **p**: Power parameter for the Minkowski metric.
5. **metric**: the distance metric to use for the tree.
6. **outlier_label**: label for outlier samples
7. **weights**: weight function used in prediction.
For more information refer: [API](https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.RadiusNeighborsRegressor.html#sklearn.neighbors.RadiusNeighborsRegressor)""",
'PassiveAggressiveRegressor':"""### Model
The passive-aggressive algorithms are a family of algorithms for large-scale learning. They are similar to the Perceptron in that they do not require a learning rate. However, contrary to the Perceptron, they include a regularization parameter C
1. **C** ->Maximum step size (regularization). Defaults to 1.0.
2. **max_iter** ->The maximum number of passes over the training data (aka epochs). It only impacts the behavior in the fit method, and not the partial_fit method.
3. **tol**->The stopping criterion. If it is not None, the iterations will stop when (loss > previous_loss - tol).
4. **early_stopping**->Whether to use early stopping to terminate training when validation. score is not improving. If set to True, it will automatically set aside a fraction of training data as validation and terminate training when validation score is not improving by at least tol for n_iter_no_change consecutive epochs.
5. **validation_fraction**->The proportion of training data to set aside as validation set for early stopping. Must be between 0 and 1. Only used if early_stopping is True.
6. **n_iter_no_change**->Number of iterations with no improvement to wait before early stopping.
7. **shuffle**->Whether or not the training data should be shuffled after each epoch.
8. **loss**->The loss function to be used: epsilon_insensitive: equivalent to PA-I in the reference paper. squared_epsilon_insensitive: equivalent to PA-II in the reference paper.
9. **epsilon**->If the difference between the current prediction and the correct label is below this threshold, the model is not updated.""",
'HuberRegressor':"""### Model
Linear regression model that is robust to outliers. The Huber Regressor optimizes the squared loss for the samples where |(y - X'w) / sigma| < epsilon and the absolute loss for the samples where |(y - X'w) / sigma| > epsilon, where w and sigma are parameters to be optimized. The parameter sigma makes sure that if y is scaled up or down by a certain factor, one does not need to rescale epsilon to achieve the same robustness. Note that this does not take into account the fact that the different features of X may be of different scales.
This makes sure that the loss function is not heavily influenced by the outliers while not completely ignoring their effect.
#### Tuning Parameters:
1. epsilon: The parameter epsilon controls the number of samples that should be classified as outliers. The smaller the epsilon, the more robust it is to outliers.
2. max_iter: Maximum number of iterations that scipy.optimize.minimize(method="L-BFGS-B") should run for.
3. alpha: Regularization parameter.
4. tol: The iteration will stop when max{|proj g_i | i = 1, ..., n} <= tol where pg_i is the i-th component of the projected gradient.
""",
'ElasticNet':"""### Model
Elastic Net first emerged as a result of critique on Lasso, whose variable selection can be too dependent on data and thus unstable. The solution is to combine the penalties of Ridge regression and Lasso to get the best of both worlds.
#### Model Tuning Parameters
1. alpha : Constant that multiplies the penalty terms.
2. l1_ratio : The ElasticNet mixing parameter.
3. max_iter : The maximum number of iterations.
4. tol : The tolerance for the optimization: if the updates are smaller than tol, the optimization code checks the dual gap for optimality and continues until it is smaller than tol.
5. selection : If set to 'random', a random coefficient is updated every iteration rather than looping over features sequentially by default. 'random' often leads to significantly faster convergence especially when tol is higher than 1e-4.
""",
'PoissonRegressor':"""### Model
Poisson regression is a generalized linear model form of regression used to model count data and contingency tables. It assumes the response variable or target variable Y has a Poisson distribution, and assumes the logarithm of its expected value can be modeled by a linear combination of unknown parameters. It is sometimes known as a log-linear model, especially when used to model contingency tables.
#### Model Tuning Parameters
1. **alpha** -> Constant that multiplies the penalty term and thus determines the regularization strength. alpha = 0 is equivalent to unpenalized GLMs.
2. **tol** -> Stopping criterion.
3. **max_iter** -> The maximal number of iterations for the solver.""",
}
}
procedure={
'datafetch':"### Data Fetch\n Pandas is an open-source, BSD-licensed library providing high-performance,easy-to-use data manipulation and data analysis tools.",
'missing':"### Data Preprocessing\n Since the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value,we have to explicitly remove or replace null values.The below snippet have functions, which removes the null value if any exists.",
'encoding':"### Data Encoding\n Converting the string classes data in the datasets by encoding them to integer either using OneHotEncoding or LabelEncoding",
'datasplit':"### Train & Test\n The train-test split is a procedure for evaluating the performance of an algorithm.The procedure involves taking a dataset and dividing it into two subsets.The first subset is utilized to fit/train the model.The second subset is used for prediction.The main motive is to estimate the performance of the model on new data.",
'metrics':"### Accuracy Metrics\n Performance metrics are a part of every machine learning pipeline. They tell you if you're making progress, and put a number on it. All machine learning models,whether it's linear regression, or a SOTA technique like BERT, need a metric to judge performance.",
'x&y':"### Feature Selection\n It is the process of reducing the number of input variables when developing a predictive model.Used to reduce the number of input variables to reduce the computational cost of modelling and,in some cases,to improve the performance of the model.",
'cor_matrix': "### Correlation Matrix\n In order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns.",
'rescale':"### Data Rescaling\n Feature scaling or Data scaling is a method used to normalize the range of independent variables or features of data. In data processing, it is also known as data normalization",
'image_fetch':"### Image Fetch\n Fetching image dataset from local storage or from network.",
'image_download':"### Downloading Image\n Downloading Image dataset from the URL using python request library.",
'image_decomp':"### File Decompression\n Decompressing the archive file using either zipfile or tarfile library and store it in local directory.",
'image_sample':"### Sample Image\n Using OpenCV to display sample images from each available target classes.",
'image_features':"### Feature Selection\n Creating X and Y features from the flatten image dataset.",
'image_cleaning':"### Image Preprocessing\n Preprocessing Image to appropriate matrix format for Machine learning."
} |
#!/usr/bin/env python3
# Label key for repair state
# (IN_SERVICE, OUT_OF_POOL, READY_FOR_REPAIR, IN_REPAIR, AFTER_REPAIR)
REPAIR_STATE = "REPAIR_STATE"
# Annotation key for the last update time of the repair state
REPAIR_STATE_LAST_UPDATE_TIME = "REPAIR_STATE_LAST_UPDATE_TIME"
# Annotation key for the last email time for jobs on node in repair
REPAIR_STATE_LAST_EMAIL_TIME = "REPAIR_STATE_LAST_EMAIL_TIME"
# Annotation key for unhealthy rules
REPAIR_UNHEALTHY_RULES = "REPAIR_UNHEALTHY_RULES"
# Annotation key for whether the node is in repair cycle.
# An unschedulable node that is not in repair cycle can be manually repaired
# by administrator without repair cycle interruption.
REPAIR_CYCLE = "REPAIR_CYCLE"
# Annotation key for repair message - what phase the node is undergoing
REPAIR_MESSAGE = "REPAIR_MESSAGE"
|
######################################################################################################################
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Amazon Software License (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://aws.amazon.com/asl/ #
# #
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
######################################################################################################################
BOOLEAN_FALSE_VALUES = [
"false",
"no",
"disabled",
"off",
"0"
]
BOOLEAN_TRUE_VALUES = [
"true",
"yes",
"enabled",
"on",
"1"
]
# name of environment variable that holds the name of the configuration table
ENV_CONFIG_TABLE = "CONFIG_TABLE"
ENV_CONFIG_BUCKET = "CONFIG_BUCKET"
TASKS_OBJECTS = "TaskConfigurationObjects"
# names of attributes in configuration
# name of the action
CONFIG_ACTION_NAME = "Action"
# debug parameter
CONFIG_DEBUG = "Debug"
# notifications for started/ended tasks
CONFIG_TASK_NOTIFICATIONS = "TaskNotifications"
# list of cross account roles
CONFIG_ACCOUNTS = "Accounts"
# name of alternative cross account role
CONFIG_TASK_CROSS_ACCOUNT_ROLE_NAME = "CrossAccountRole"
# description
CONFIG_DESCRIPTION = "Description"
# Switch to enable/disable task
CONFIG_ENABLED = "Enabled"
# tag filter for tags of source resource of an event
CONFIG_EVENT_SOURCE_TAG_FILTER = "SourceEventTagFilter"
# cron expression interval for time/date based tasks
CONFIG_INTERVAL = "Interval"
# internal task
CONFIG_INTERNAL = "Internal"
# name of the task
CONFIG_TASK_NAME = "Name"
# parameters of a task
CONFIG_PARAMETERS = "Parameters"
# switch to indicate if resource in the account of the scheduler should be processed
CONFIG_THIS_ACCOUNT = "ThisAccount"
# timezone for time/date scheduled task
CONFIG_TIMEZONE = "Timezone"
# tag filter to select resources processed by the task
CONFIG_TAG_FILTER = "TagFilter"
# regions where to select/process resources
CONFIG_REGIONS = "Regions"
# dryrun switch, passed to the tasks action
CONFIG_DRYRUN = "Dryrun"
# events that trigger the task
CONFIG_EVENTS = "Events"
# event scopes
CONFIG_EVENT_SCOPES = "EventScopes"
# stack id if created from cloudformation stack
CONFIG_STACK_ID = "StackId"
# action timeout
CONFIG_TASK_TIMEOUT = "TaskTimeout"
# action select memory
CONFIG_TASK_SELECT_SIZE = "SelectSize"
# action select memory
CONFIG_TASK_EXECUTE_SIZE = "ExecuteSize"
# action completion memory
CONFIG_TASK_COMPLETION_SIZE = "CompletionSize"
# action completion memory when running in ECS
CONFIG_ECS_COMPLETION_MEMORY = "CompletionEcsMemoryValue"
# action select memory when running in ECS
CONFIG_ECS_SELECT_MEMORY = "SelectEcsMemoryValueValue"
# action select memory when running in ECS
CONFIG_ECS_EXECUTE_MEMORY = "ExecuteEcsMemoryValue"
# Task metrics
CONFIG_TASK_METRICS = "TaskMetrics"
|
{
"targets": [
{
"target_name": "ecdh",
"include_dirs": ["<!(node -e \"require('nan')\")"],
"cflags": ["-Wall", "-O2"],
"sources": ["ecdh.cc"],
"conditions": [
["OS=='win'", {
"conditions": [
[
"target_arch=='x64'", {
"variables": {
"openssl_root%": "C:/OpenSSL-Win64"
},
}, {
"variables": {
"openssl_root%": "C:/OpenSSL-Win32"
}
}
]
],
"libraries": [
"-l<(openssl_root)/lib/libeay32.lib",
],
"include_dirs": [
"<(openssl_root)/include",
],
}, {
"conditions": [
[
"target_arch=='ia32'", {
"variables": {
"openssl_config_path": "<(nodedir)/deps/openssl/config/piii"
}
}
],
[
"target_arch=='x64'", {
"variables": {
"openssl_config_path": "<(nodedir)/deps/openssl/config/k8"
},
}
],
[
"target_arch=='arm'", {
"variables": {
"openssl_config_path": "<(nodedir)/deps/openssl/config/arm"
}
}
],
[
"target_arch=='arm64'", {
"variables": {
"openssl_config_path": "<(nodedir)/deps/openssl/config/aarch64"
}
}
],
],
"include_dirs": [
"<(nodedir)/deps/openssl/openssl/include",
"<(openssl_config_path)"
]
}
]]
}
]
}
|
class Contact:
def __init__(self, name, phone, email):
self.name = name
self.phone = phone
self.email = email
|
"""
Discover & provide the log group name
"""
class LogGroupProvider(object):
"""
Resolve the name of log group given the name of the resource
"""
@staticmethod
def for_lambda_function(function_name):
"""
Returns the CloudWatch Log Group Name created by default for the AWS Lambda function with given name
Parameters
----------
function_name : str
Name of the Lambda function
Returns
-------
str
Default Log Group name used by this function
"""
return "/aws/lambda/{}".format(function_name)
|
# class juego:
# def __init__(self, tablero):
# self.tablero = tablero
# for i in self.tablero:
# print(i)
# def __str__():
# return
# def mover(self, letra, posicion):
num=[["1","3","4","5"],["8","10","2","6"],["7","9","11"," "]]
#t=[["a","h","ñ","u","b","i","o"], ["v","c","j","p","w","d","k"], ["u","x", "e"," ","l", "r", "f"], ["m", "s", "y", "g", "n", "t", "z"]]
#juego(num)
for i in num:
print(i)
for i in num:
a=num[i].find(" ")
print(a) |
'''
This module contains some exception classes
'''
class SecondryStructureError(Exception):
'''
Raised when the Secondry structure is not correct
'''
def __init__(self, residue, value):
messgae = '''
ERROR: Secondary Structure Input is not parsed correctly.
Please make sure the value after the C alpha shift is
one of the following
- alpha : a, alpha, h, helix
- beta : b, beta, sheet, strand
- coil : c, coil, r, (assumed to be 50%% alpha and 50%% beta)
- A number between 0 and 1. 1 = 100%% alpha helix, 0. = 100%% beta sheet
The Value given for residue %s is %s ''' % (residue, value)
Exception.__init__(self, messgae)
class ShiftMatrixStatesOrder(Exception):
'''
Raised when the order of the states in the chemical shift file is
incorrect
'''
def __init__(self, file_):
messgae = '''
ERROR: The order of the states in the file containing the
chemical shifts is different. Please correct this.
This is File: %s''' % (file_)
Exception.__init__(self, messgae)
|
class get_method_name_decorator:
def __init__(self, fn):
self.fn = fn
def __set_name__(self, owner, name):
owner.method_names.add(self.fn)
setattr(owner, name, self.fn)
|
'''
Exercise 2: from a resit paper.
You must use the file textanalysis.py to answer this question. Write a function
get_words_starting_with(text, letter) that returns the list of words starting
with letter in the string text. The result should not be case sensitive, e.g. ’about’ should
be returned if the letter ’a’ or ’A’ is given as parameter. For simplicity, we assume for exercises
2,3, and 4 that the text does not have any punctuations, and words are separated by at least one
blank space.
For example, using the variable sample_text we should obtain:
>>> get_words_starting_with (sample_text, ’a’)
[’As’, ’a’, ’about’, ’adding’, ’a’, ’ago’, ’a’,
’around’, ’Amsterdam’, ’a’, ’and’, ’an’, ’about’,
’a’, ’ABC’, ’appeal’, ’as’, ’a’, ’a’, ’a’]
>>> get_words_starting_with(sample_text, ’z’)
[]
Hint: You may want to use the method split() from the str type.
Exercise 3: from a resit paper.
As you can see in question 2, there are many repetitions of the word ’a’ in the list. Improve
your solution so no repetition of the same word occurs in the list.
>>> get_words_starting_with(sample_text, ’a’)
[’As’, ’a’, ’about’, ’adding’, ’ago’, ’around’,
’Amsterdam’, ’and’, ’an’, ’ABC’, ’appeal’, ’as’]
'''
def get_words_starting_with(text, letter):
counter=''
result=[]
NoRepeatedResult=set()
for x in (text.strip()+' '):
if(x==' '):
if(counter[0].lower()==letter.lower()):
result.append(counter)
NoRepeatedResult.add(counter)
counter=''
else:
counter+=x
return (result, NoRepeatedResult)
print(get_words_starting_with(input('Enter Your Sentence: '),input('Enter the alphabet: '))) |
# https://www.devdungeon.com/content/colorize-terminal-output-python
# https://www.geeksforgeeks.org/print-colors-python-terminal/
class CONST:
class print_color:
class control:
'''
Full name: Perfect_color_text
'''
reset='\033[0m'
bold='\033[01m'
disable='\033[02m'
underline='\033[04m'
reverse='\033[07m'
strikethrough='\033[09m'
invisible='\033[08m'
class fore:
'''
Full name: Perfect_fore_color
'''
black='\033[30m'
red='\033[31m'
green='\033[32m'
orange='\033[33m'
blue='\033[34m'
purple='\033[35m'
cyan='\033[36m'
lightgrey='\033[37m'
darkgrey='\033[90m'
lightred='\033[91m'
lightgreen='\033[92m'
yellow='\033[93m'
lightblue='\033[94m'
pink='\033[95m'
lightcyan='\033[96m'
class background:
'''
Full name: Perfect_background_color
'''
black='\033[40m'
red='\033[41m'
green='\033[42m'
orange='\033[43m'
blue='\033[44m'
purple='\033[45m'
cyan='\033[46m'
lightgrey='\033[47m'
class cv_color:
line = (0,255,0)
circle = (255,255,0)
if __name__ == "__main__":
print (CONST.print_color.fore.yellow + 'Hello world. ' + CONST.print_color.fore.red + 'Hey!') |
found = False
while not found:
num = float(input())
if 1 <= num <= 100:
print(f"The number {num} is between 1 and 100")
found = True
|
# MathHelper.py - Some helpful math utilities.
# Created by Josh Kennedy on 18 May 2014
#
# Pop a Dots
# Copyright 2014 Chad Jensen and Josh Kennedy
# Copyright 2015-2016 Sirkles LLC
def lerp(value1, value2, amount):
return value1 + ((value2 - value1) * amount)
def isPowerOfTwo(value):
return (value > 0) and ((value & (value - 1)) == 0)
def toDegrees(radians):
return radians * 57.295779513082320876798154814105
def toRadians(degrees):
return degrees * 0.017453292519943295769236907684886
def clamp(value, low, high):
if value < low:
return low
else:
if value > high:
return high
else:
return value
def nextPowerOfTwo(value):
return_value = 1
while return_value < value:
return_value <<= 1
return return_value
|
## Function
def insertShiftArray(arr, num):
"""
This function takes in two parameters: a list, and an integer. insertShiftArray will place the integer at the middle index of the list provided.
"""
answerArr = []
middle = 0
# No math methods, if/else to determine odd or even to find middle index
if len(arr) % 2 == 0:
middle = len(arr) / 2
else:
middle = len(arr) / 2 + 0.5
# Loop through originally arr length + 1 more iteration for our addition.
for i in range(len(arr) + 1):
if i < middle:
# append first half
answerArr.append(arr[i])
elif i == middle:
# append parameter2 num to middle of our have list
answerArr.append(num)
answerArr.append(arr[i])
elif i > middle and i < len(arr):
# append second half
answerArr.append(arr[i])
return answerArr
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.