content
stringlengths 7
1.05M
|
---|
def data_cart(request):
total_cart=0
total_items=0
if request.user.is_authenticated and request.session.__contains__('cart'):
for key, value in request.session['cart'].items():
if not key == "is_modify" and not key == 'id_update':
total_cart = total_cart + float(value['price'])*value['quantity']
total_items+=1
return {
'total_cart': total_cart,
'total_items': total_items,
}
|
# int: Minimum number of n-grams occurence to be retained
# All Ngrams that occur less than n times are removed
# default value: 0
ngram_min_to_be_retained = 0
# real: Minimum ratio n-grams to skip
# (will be chosen among the ones that occur rarely)
# expressed as a ratio of the cumulated histogram
# default value: 0
ngram_min_rejected_ratio = 0
# NB: the n-gram skipping will go on until:
# ( minimal_ngram_count < ngram_min_to_be_retained ) OR ( rejected_ratio <= ngram_min_rejected_ratio)
# int: the 'n' of 'n'-gram
# default value: 2
gram_size = 3
# File containing the textual data to convert
# Image in file_name*.img
# Image in file_name*.txt
# Output will be :
# file_name*.onehot
# file_name*.ngram
# file_name*.info
files_path = '/u/lisa/db/babyAI/textual_v2'
file_name_train_amat = 'BABYAI_gray_4obj_64x64.train.img'
file_name_valid_amat = 'BABYAI_gray_4obj_64x64.valid.img'
file_name_test_amat = 'BABYAI_gray_4obj_64x64.test.img'
image_size = 32*32
# Use empty string if no dataset
file_name_train_img = 'BABYAI_gray_4obj_64x64.train.img'
file_name_valid_img = 'BABYAI_gray_4obj_64x64.valid.img'
file_name_test_img = 'BABYAI_gray_4obj_64x64.test.img'
file_name_train_txt = 'BABYAI_gray_4obj_64x64.color-size-location-shape.train.txt'
file_name_valid_txt = 'BABYAI_gray_4obj_64x64.color-size-location-shape.valid.txt'
file_name_test_txt = 'BABYAI_gray_4obj_64x64.color-size-location-shape.test.txt'
file_name_train_out = 'BABYAI_gray_4obj_64x64.color-size-location-shape.train'
file_name_valid_out = 'BABYAI_gray_4obj_64x64.color-size-location-shape.valid'
file_name_test_out = 'BABYAI_gray_4obj_64x64.color-size-location-shape.test'
# None or a file '*.info'
file_info = None # 'BABYAI_gray_4obj_64x64.color-size-location-shape.train.info'
|
customcb = {'_smps_flo': ["#000000",
"#000002",
"#000004",
"#000007",
"#000009",
"#00000b",
"#00000d",
"#000010",
"#000012",
"#000014",
"#000016",
"#000019",
"#00001b",
"#00001d",
"#00001f",
"#000021",
"#000024",
"#000026",
"#000028",
"#00002a",
"#00002d",
"#00002f",
"#000031",
"#000033",
"#000036",
"#000038",
"#00003a",
"#00003c",
"#00003e",
"#000041",
"#000043",
"#000045",
"#000047",
"#00004a",
"#00004c",
"#00004e",
"#000050",
"#000053",
"#000055",
"#000057",
"#000059",
"#00005b",
"#00005e",
"#000060",
"#000062",
"#000064",
"#000067",
"#000069",
"#00006b",
"#00006d",
"#000070",
"#000072", # "#000074",
"#000076",
"#000078",
"#00007b", # "#00007d",
"#00007f",
"#000081", # "#000084",
"#000086",
"#000088", # "#00008a",
"#00008d",
"#00018e", # "#00038c",
"#00068b",
"#000989", # "#000b88",
"#000e87",
"#001185",
"#001384", # "#001682",
"#001881",
"#001b7f",
"#001e7e",
"#00207d",
"#00237b",
"#00267a",
"#002878",
"#002b77",
"#002e75",
"#003074",
"#003372",
"#003671",
"#003870",
"#003b6e",
"#003d6d",
"#00406b",
"#00436a",
"#004568",
"#004867",
"#004b65",
"#004d64",
"#005063",
"#005361",
"#005560",
"#00585e",
"#005b5d",
"#005d5b",
"#00605a",
"#006258",
"#006557",
"#006856",
"#006a54",
"#006d53", # dark azul
"#007051",
"#007151", # input
"#007150", # input
"#007250",
"#00754e", # azul
"#00764E",
"#00774E",
"#00784d",
"#007a4b",
"#007d4a", # azul green
"#007f49",
"#008247",
"#008546",
"#008744", # donker groen blauwig
"#008a43",
"#008B42",
"#008B41",
"#008C41",
"#008d41",
"#008d41",
"#008f40",
"#00923e",
"#00953d",
"#00963D",
"#00973c",
"#009a3a",
"#009d39",
"#009e38",
"#009f38",
"#009f37",
"#00a236", # 61 licht groen
"#009F35", # 62
"#00a434", # 64
"#00A534", # 64
"#00a634", # 64
"#00A633", # 65
"#00a733", # 65
"#00a434", # 64
"#00A534", # 64
"#00A634", # 64
"#00a733", # 65
"#00A635", # 65
"#02a732",
"#05a431",
"#08a230",
"#0c9f2f",
"#0f9d2f",
"#129a2e",
"#16972d",
"#19952c",
"#1c922c",
"#208f2b",
"#238d2a",
"#268a29",
"#2a8728",
"#2d8528",
"#308227", # donkergroen
"#337f26",
"#377d25",
"#3a7a24",
"#3d7824",
"#417523",
"#447222",
"#477021",
"#4b6d21",
"#4e6a20", # bruingroen
"#51681f",
"#55651e",
"#58621d",
"#5b601d",
"#5f5d1c",
"#625b1b",
"#65581a",
"#695519",
"#6c5319",
"#6f5018",
"#734d17",
"#764b16",
"#794815",
"#7d4515",
"#804314", # bruin
"#834013",
"#873d12",
"#8a3b12",
"#8d3811",
"#903610",
"#94330f",
"#97300e",
"#9a2e0e",
"#9e2b0d",
"#a1280c",
"#a4260b",
"#a8230a",
"#ab200a",
"#ae1e09",
"#b21b08",
"#b51807",
"#b81607",
"#bc1306",
"#bf1105",
"#c20e04",
"#c60b03",
"#c90903",
"#cc0602",
"#d00301",
"#d30100",# donker rood
"#d40200",
"#d40300",
"#d40400",
"#d40500",
"#d40600",
"#d40700",
"#d40800",
"#d40900", # fel rood
"#d40c00",
"#d41000",
"#D41100",
"#D41200",
"#d41300", #
"#D41400",
"#D41500",
"#d41600",
"#d41a00",
"#d41d00",
"#d42000", #
"#d42400",
"#d42700",
"#d42a00", # begin oranje
"#d42b00",
"#d42c00",
"#d42d00",
"#d42e00",
"#D43100",
"#D43200",
"#D43300",
"#d43400",
"#d43500",
"#D43600",
"#D43700",
"#d43800", # 16 donker oranje
"#d43b00", # 18
"#D43C00",
"#D43D00",
"#d43e00", # 18
"#D44200", # hh
"#d44200", # 20
"#d44300",
"#d44400",
"#d44500",
"#d44800",
"#d44c00",
"#d44f00",
"#d45200",
"#d45600",
"#d45900",
"#d45c00",
"#d45f00",
"#d46300",
"#d46600",
"#d46900",
"#d46d00",
"#d47000",
"#d47300",
"#d47700", # wat lichter oranje
"#d47a00",
"#D47B00",
"#D47C00",
"#d47d00",
"#d48100",
"#D48200",
"#D48300",
"#d48400",
"#d48700",
"#d48b00",
"#d48e00",
"#d49100",
"#d49500",
"#d49800",
"#d49b00",
"#d49f00",
"#d4a200",
"#d4a500",
"#d4a900",
"#d4ac00",
"#d4af00", # donker geel
"#d4b300",
"#d4b600",
"#d4b900",
"#d4bc00",
"#d4c000",
"#d4c300",
"#d4c600",
"#d4ca00",
"#d4cd00",
"#d4d000",
"#d4d400",
"#D7D700",
"#DADA00",
"#DCDC00",
"#DFDF00",
"#E1E100",
"#E4E400",
"#E6E600",
"#E9E900",
"#ECEC00",
"#F1F100",
"#F6F200",
"#F6F300",
"#F6F400",
"#F6F600",
"#F6F700",
"#F8F800",
"#FBFB00",
"#FDFD00",
"#FDFE00",
"#FFFD00",
"#FDFF00",
"#FFFF00",
],
'_smps_flo_w' : ["#FFFFFF",
"#000002",
"#000004",
"#000007",
"#000009",
"#00000b",
"#00000d",
"#000010",
"#000012",
"#000014",
"#000016",
"#000019",
"#00001b",
"#00001d",
"#00001f",
"#000021",
"#000024",
"#000026",
"#000028",
"#00002a",
"#00002d",
"#00002f",
"#000031",
"#000033",
"#000036",
"#000038",
"#00003a",
"#00003c",
"#00003e",
"#000041",
"#000043",
"#000045",
"#000047",
"#00004a",
"#00004c",
"#00004e",
"#000050",
"#000053",
"#000055",
"#000057",
"#000059",
"#00005b",
"#00005e",
"#000060",
"#000062",
"#000064",
"#000067",
"#000069",
"#00006b",
"#00006d",
"#000070",
"#000072", # "#000074",
"#000076",
"#000078",
"#00007b", # "#00007d",
"#00007f",
"#000081", # "#000084",
"#000086",
"#000088", # "#00008a",
"#00008d",
"#00018e", # "#00038c",
"#00068b",
"#000989", # "#000b88",
"#000e87",
"#001185",
"#001384", # "#001682",
"#001881",
"#001b7f",
"#001e7e",
"#00207d",
"#00237b",
"#00267a",
"#002878",
"#002b77",
"#002e75",
"#003074",
"#003372",
"#003671",
"#003870",
"#003b6e",
"#003d6d",
"#00406b",
"#00436a",
"#004568",
"#004867",
"#004b65",
"#004d64",
"#005063",
"#005361",
"#005560",
"#00585e",
"#005b5d",
"#005d5b",
"#00605a",
"#006258",
"#006557",
"#006856",
"#006a54",
"#006d53", # dark azul
"#007051",
"#007151", # input
"#007150", # input
"#007250",
"#00754e", # azul
"#00764E",
"#00774E",
"#00784d",
"#007a4b",
"#007d4a", # azul green
"#007f49",
"#008247",
"#008546",
"#008744", # donker groen blauwig
"#008a43",
"#008B42",
"#008B41",
"#008C41",
"#008d41",
"#008d41",
"#008f40",
"#00923e",
"#00953d",
"#00963D",
"#00973c",
"#009a3a",
"#009d39",
"#009e38",
"#009f38",
"#009f37",
"#00a236", # 61 licht groen
"#009F35", # 62
"#00a434", # 64
"#00A534", # 64
"#00a634", # 64
"#00A633", # 65
"#00a733", # 65
"#00a434", # 64
"#00A534", # 64
"#00A634", # 64
"#00a733", # 65
"#00A635", # 65
"#02a732",
"#05a431",
"#08a230",
"#0c9f2f",
"#0f9d2f",
"#129a2e",
"#16972d",
"#19952c",
"#1c922c",
"#208f2b",
"#238d2a",
"#268a29",
"#2a8728",
"#2d8528",
"#308227", # donkergroen
"#337f26",
"#377d25",
"#3a7a24",
"#3d7824",
"#417523",
"#447222",
"#477021",
"#4b6d21",
"#4e6a20", # bruingroen
"#51681f",
"#55651e",
"#58621d",
"#5b601d",
"#5f5d1c",
"#625b1b",
"#65581a",
"#695519",
"#6c5319",
"#6f5018",
"#734d17",
"#764b16",
"#794815",
"#7d4515",
"#804314", # bruin
"#834013",
"#873d12",
"#8a3b12",
"#8d3811",
"#903610",
"#94330f",
"#97300e",
"#9a2e0e",
"#9e2b0d",
"#a1280c",
"#a4260b",
"#a8230a",
"#ab200a",
"#ae1e09",
"#b21b08",
"#b51807",
"#b81607",
"#bc1306",
"#bf1105",
"#c20e04",
"#c60b03",
"#c90903",
"#cc0602",
"#d00301",
"#d30100",# donker rood
"#d40200",
"#d40300",
"#d40400",
"#d40500",
"#d40600",
"#d40700",
"#d40800",
"#d40900", # fel rood
"#d40c00",
"#d41000",
"#D41100",
"#D41200",
"#d41300", #
"#D41400",
"#D41500",
"#d41600",
"#d41a00",
"#d41d00",
"#d42000", #
"#d42400",
"#d42700",
"#d42a00", # begin oranje
"#d42b00",
"#d42c00",
"#d42d00",
"#d42e00",
"#D43100",
"#D43200",
"#D43300",
"#d43400",
"#d43500",
"#D43600",
"#D43700",
"#d43800", # 16 donker oranje
"#d43b00", # 18
"#D43C00",
"#D43D00",
"#d43e00", # 18
"#D44200", # hh
"#d44200", # 20
"#d44300",
"#d44400",
"#d44500",
"#d44800",
"#d44c00",
"#d44f00",
"#d45200",
"#d45600",
"#d45900",
"#d45c00",
"#d45f00",
"#d46300",
"#d46600",
"#d46900",
"#d46d00",
"#d47000",
"#d47300",
"#d47700", # wat lichter oranje
"#d47a00",
"#D47B00",
"#D47C00",
"#d47d00",
"#d48100",
"#D48200",
"#D48300",
"#d48400",
"#d48700",
"#d48b00",
"#d48e00",
"#d49100",
"#d49500",
"#d49800",
"#d49b00",
"#d49f00",
"#d4a200",
"#d4a500",
"#d4a900",
"#d4ac00",
"#d4af00", # donker geel
"#d4b300",
"#d4b600",
"#d4b900",
"#d4bc00",
"#d4c000",
"#d4c300",
"#d4c600",
"#d4ca00",
"#d4cd00",
"#d4d000",
"#d4d400",
"#D7D700",
"#DADA00",
"#DCDC00",
"#DFDF00",
"#E1E100",
"#E4E400",
"#E6E600",
"#E9E900",
"#ECEC00",
"#F1F100",
"#F6F200",
"#F6F300",
"#F6F400",
"#F6F600",
"#F6F700",
"#F8F800",
"#FBFB00",
"#FDFD00",
"#FDFE00",
"#FFFD00",
"#FDFF00",
"#FFFF00",
]}
|
dados = []
cadastrados = 0
pesado = []
leve = []
nomes_leves = []
nomes_pesados = []
def linha():
print('-' * 80)
linha()
while True:
dados.append(str(input('Nome: ')).capitalize())
dados.append(int(input('Peso: ')))
cadastrados += 1
resp = str(input('Quer continuar: ')).upper()[0]
if resp == 'N':
break
if dados[1] <= 70:
leve.append(dados[1])
nomes_leves.append(dados[0])
if dados[1] > 70:
pesado.append(dados[1])
nomes_pesados.append(dados[0])
linha()
print(f'Ao todo, você cadastrou {cadastrados}')
print(f'O maior peso foi {pesado}Kg. Peso de {nomes_pesados}')
print(f'O menor peso foi {leve}Kg. Peso de {nomes_leves}')
linha()
|
print('#######################')
print('###### Exemplo 1 ######')
print('#######################')
# Exemplo 1
for i in range(5):
print('Iterating ', i)
print(' ')
print('#######################')
print('###### Exemplo 2 ######')
print('#######################')
# Utilizando os itens da lista como indice.
smoothies = ['coconut', 'strawberry', 'banana', 'tropical', 'acai']
length = len(smoothies)
for i in range(length):
print('Smoothies ', smoothies[i])
print(' ')
print('#######################')
print('###### Exemplo 3 ######')
print('#######################')
# Cria um intervalo de 5 até 10.
for i in range(5, 10):
print(i)
print(' ')
print('#######################')
print('###### Exemplo 4 ######')
print('#######################')
# Podemos adicionar um passo: 2 para contar em incrementos
# Então cria uma sequencia começando de 3 e indo até 10, mas, contando em passos de 2.
for i in range(3, 10, 2):
print(i)
print(' ')
print('#######################')
print('###### Exemplo 5 ######')
print('#######################')
# Podemos adicionar um passo: -1 para contar de traz para frente.
# Então cria uma sequencia começando de 10 e indo até 0.
for i in range(10, 0, -1):
print(i)
print(' ')
print('#######################')
print('###### Exemplo 6 ######')
print('#######################')
# Podemos iniciar a partir de números negativos.
# Então cria uma sequencia começando de -10 e indo até 2.
for i in range(-10, 2):
print(i)
|
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
"""A helper method to register a rule (and optionally a view function)
to the application. The endpoint is automatically prefixed with the
blueprint's name.
"""
if self.url_prefix is not None:
if rule:
rule = "/".join((self.url_prefix.rstrip("/"), rule.lstrip("/")))
else:
rule = self.url_prefix
options.setdefault("subdomain", self.subdomain)
if endpoint is None:
endpoint = _endpoint_from_view_func(view_func)
defaults = self.url_defaults
if "defaults" in options:
defaults = dict(defaults, **options.pop("defaults"))
self.app.add_url_rule(
rule,
f"{self.blueprint.name}.{endpoint}",
view_func,
defaults=defaults,
**options,
)
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
NOTES
-----
py2app includes all packages in Spyder.app/Contents/Resources/lib/
python<ver>.zip, but some packages have issues when placed there.
The following packages are included in py2app's PACKAGES option so that
they will be placed in Spyder.app/Contents/Resources/lib/python<ver>
instead.
humanfriendly :
spyder-terminal plugin
ModuleNotFoundError: No module named 'humanfriendly.tables'
pkg_resources:
ImportError: The 'more_itertools' package is required; normally this is
bundled with this package so if you get this warning, consult the
packager of your distribution.
pyls_spyder :
Mandatory: pyls_spyder >=0.1.1 : None (NOK)
pylsp_black :
Mandatory: python-pyls-black >=1.0.0 : None (NOK)
setuptools :
Mandatory: setuptools >=49.6.0 : None (NOK)
spyder :
NotADirectoryError: [Errno 20] Not a directory: '<path>/Resources/lib/
python38.zip/spyder/app/mac_stylesheet.qss'
spyder_kernels :
No module named spyder_kernels.console.__main__
spyder_terminal :
No module named spyder_terminal.server
"""
# Packages that cannot be in the zip folder
PACKAGES = [
'humanfriendly',
'pkg_resources',
'pyls_spyder',
'pylsp_black',
'setuptools',
'spyder',
'spyder_kernels',
'spyder_terminal',
]
# Packages to exclude
EXCLUDES = []
# modules that py2app misses
INCLUDES = [
'jellyfish',
'pylsp',
]
SCIENTIFIC = [
'cython',
'defusedxml',
'matplotlib',
'numpy',
'openpyxl',
'pandas',
'scipy',
'sympy',
]
|
cont_9 = cont_3 = cont_pares = 0
numeros_pares = ""
tupla_numeros = (int(input("Digite o primeiro número: ")), int(input("Digite o segundo número: ")),
int(input("Digite o terceiro número: ")), int(input("Digite o quarto número: ")))
for c in range(0, 4):
if tupla_numeros[c] == 9:
cont_9 += 1
if tupla_numeros[c] == 3:
cont_3 += 1
if tupla_numeros[c] % 2 == 0:
numeros_pares += f"{tupla_numeros[c]} "
cont_pares += 1
print(f"Você digitou os valores {tupla_numeros}.")
print(f"O número 9 apareceu {cont_9} vezes.")
#print(f"O número 9 apareceu {tupla_numeros.count(9)} vezes.")
if cont_3 == 0:
print("O valor 3 não foi digitado!")
else:
pos = tupla_numeros.index(3) + 1
print(f"O primeiro valor 3 foi digitado na {pos}ª posição .")
"""
if 3 in tupla_numeros:
pos = tupla_numeros.index(3) + 1
print(f"O primeiro valor 3 foi digitado na {pos}ª posição .")
else:
print("O valor 3 não foi digitado!")
"""
if cont_pares == 0:
print("Não teve números pares.")
else:
print(f"Os números pares são {numeros_pares}.")
|
class ModelSingleton(object):
# https://stackoverflow.com/a/6798042
_instances = {}
def __new__(class_, *args, **kwargs):
if class_ not in class_._instances:
class_._instances[class_] = super(ModelSingleton, class_).__new__(class_, *args, **kwargs)
return class_._instances[class_]
class SharedModel(ModelSingleton):
shared_model = None
mapper = None # mappers realID <-> innerID updates with new data such as models
|
tabuleiro_easy_1 = [
[4, 0, 1, 8, 3, 9, 5, 2, 0],
[3, 0, 9, 2, 7, 5, 1, 4, 6],
[5, 2, 7, 6, 0, 1, 9, 8, 0],
[0, 5, 8, 1, 0, 7, 3, 9, 4],
[0, 7, 3, 9, 8, 4, 2, 5, 0],
[9, 1, 4, 5, 2, 3, 6, 7, 8],
[7, 4, 0, 3, 0, 6, 8, 1, 2],
[8, 0, 6, 4, 1, 2, 7, 3, 5],
[1, 3, 2, 7, 5, 8, 4, 0, 9],
]
tabuleiro_easy_2 = [
[0, 6, 1, 8, 0, 0, 0, 0, 7],
[0, 8, 9, 2, 0, 5, 0, 4, 0],
[0, 0, 0, 0, 4, 0, 9, 0, 3],
[2, 0, 0, 1, 6, 0, 3, 0, 0],
[6, 7, 0, 0, 0, 0, 0, 5, 1],
[0, 0, 4, 0, 2, 3, 0, 0, 8],
[7, 0, 5, 0, 9, 0, 0, 0, 0],
[0, 9, 0, 4, 0, 2, 7, 3, 0],
[1, 0, 0, 0, 0, 8, 4, 6, 0],
]
tabuleiro_med = [
[0, 5, 0, 3, 6, 0, 0, 0, 0],
[2, 8, 0, 7, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 8, 0, 9, 0],
[6, 0, 0, 0, 0, 0, 0, 8, 3],
[0, 0, 4, 0, 0, 0, 2, 0, 0],
[8, 9, 0, 0, 0, 0, 0, 0, 6],
[0, 7, 0, 5, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 3, 9],
[0, 0, 0, 0, 4, 3, 0, 6, 0],
]
tabuleiro_hard = [
[0, 7, 0, 0, 0, 0, 0, 9, 0],
[0, 0, 0, 0, 5, 0, 4, 0, 2],
[0, 0, 0, 0, 0, 0, 0, 3, 0],
[6, 0, 0, 0, 1, 3, 2, 0, 0],
[0, 0, 9, 0, 8, 0, 0, 0, 0],
[0, 3, 1, 0, 0, 6, 0, 0, 0],
[4, 6, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 8, 0, 0, 4, 6, 0, 0],
[0, 0, 0, 0, 3, 5, 0, 0, 0],
]
|
class Config(object):
def __init__(self, vocab_size, max_length):
self.vocab_size = vocab_size
self.embedding_size = 300
self.hidden_size = 200
self.filters = [3, 4, 5]
self.num_filters = 256
self.num_classes = 10
self.max_length = max_length
self.num_epochs = 20
self.lr = 0.001
self.dropout = 0.5
self.attention = True |
#Mayor de edad
mayor=int(input("edad: "))
if(mayor >=18):
print("es mator de edad")
else:
print("es menor de edad")
|
"""Top-level package for Python Boilerplate."""
__author__ = """Nate Solon"""
__email__ = '[email protected]'
__version__ = '0.1.0'
|
"""
Good morning! Here's your coding interview problem for today.
This problem was asked by Google.
Given a word W and a string S, find all starting indices in S which are anagrams of W.
For example, given that W is "ab", and S is "abxaba", return 0, 3, and 4.
"""
def verify_string_combos(word, string):
"""
Finds all possible indices of a word in a given string
"""
if type(word) is not str or type(string) is not str:
return None
if len(word) > len(string):
return []
indices = []
def isWordPresent(stringSlice):
tracker = word[:]
for i in range(0, len(word)):
charIndex = tracker.find(stringSlice[i])
if charIndex > -1:
tracker = tracker[:charIndex] + tracker[charIndex + 1:]
else:
break
return True if len(tracker) == 0 else False
for i in range(0, len(string) - len(word) + 1):
if word.find(string[i]) > -1:
if len(string[i:]) >= len(word) and isWordPresent(string[i:]):
indices.append(i)
return indices
|
#!/usr/bin/env python3
# implicit None return
def say_hi():
print("hey there")
# implicit artument return "name"
def yell_name(name='Adrienne'):
"""
this is a doc string
also this function returns the arg by default
"""
print("YO {0} ".format(name.upper()))
# scoping
def add(num1=0, num2=0):
"""
adds the 2 numbers. doy
"""
num1 = num1 + num2 # baaad idea but the scoping allows this
print(num1)
return num1
# named parameter
def madlibs(name, noun="shoes", adj="blue"):
return f"{name} has {adj} {noun}"
say_hi()
yell_name()
print(madlibs('tracie', adj='suede', noun='shoes')) # keyword args out of order, or using normal positional args
# can't do this print(madlibs('tracie', adj='suede', 'shoes')) # once you start keywords, you have to finish
|
""" Module for a sudoku Cell with row, column position """
class Cell:
""" A Sudoku cell """
def __init__(self, row: int, column: int):
"""
A Cell at the given row and column
:param row: The 1-indexed row of the cell
:param column: The 1-indexed column of the cell
"""
self.row = row
self.column = column
def __eq__(self, o: object) -> bool:
return isinstance(o, Cell) and self.row == o.row and self.column == o.column
def __hash__(self) -> int:
return hash((self.row, self.column))
def __repr__(self):
return f"r{self.row}c{self.column}"
def is_orthogonal(self, other: 'Cell') -> bool:
return (
self.column == other.column and abs(self.row - other.row) == 1 or
self.row == other.row and abs(self.column - other.column) == 1
)
|
def getBASIC():
list = []
n = input()
while True:
line = n
if n.endswith('END'):
list.append(line)
return list
else:
list.append(line)
newN = input()
n = newN
|
type_of_flowers = input()
count = int(input())
budget = int(input())
prices = {
"Roses": 5.00,
"Dahlias": 3.80,
"Tulips": 2.80,
"Narcissus": 3.00,
"Gladiolus": 2.50
}
price = count * prices[type_of_flowers]
if type_of_flowers == "Roses" and count > 80:
price -= 0.10 * price
elif type_of_flowers == "Dahlias" and count > 90:
price -= 0.15 * price
elif type_of_flowers == "Tulips" and count > 80:
price -= 0.15 * price
elif type_of_flowers == "Narcissus" and count < 120:
price += 0.15 * price
elif type_of_flowers == "Gladiolus" and count < 80:
price += 0.20 * price
money_left = budget - price
money_needed = price - budget
if money_left >= 0:
print(f"Hey, you have a great garden with {count} {type_of_flowers} and {money_left:.2f} leva left.")
else:
print(f"Not enough money, you need {money_needed:.2f} leva more.")
|
#170
# Time: O(n)
# Space: O(n)
# Design and implement a TwoSum class. It should support the following operations: add and find.
#
# add - Add the number to an internal data structure.
# find - Find if there exists any pair of numbers which sum is equal to the value.
#
# For example,
# add(1); add(3); add(5);
# find(4) -> true
# find(7) -> false
class twoSumIII():
def __init__(self):
self.num_count={}
def add(self,val):
if val in self.num_count:
self.num_count[val]+=1
else:
self.num_count[val]=1
def find(self,target):
for num in self.num_count:
if target-num in self.num_count and (target-num!=num or self.num_count[target-num]>1):
return True
return False
|
def flatten(d: dict, new_d, path=''):
for key, value in d.items():
if not isinstance(value, dict):
new_d[path + key] = value
else:
path += f'{key}.'
return flatten(d[key], new_d, path=path)
return new_d
if __name__ == "__main__":
d = {"foo": 42,
"bar": "qwe",
"buz": {
"one": 1,
"two": 2,
"nested": {
"deep": "blue",
"deeper": {
"song": "my heart",
}
}
}
}
print(flatten(d, {}))
|
class BotovodException(Exception):
pass
class AgentException(BotovodException):
pass
class AgentNotExistException(BotovodException):
def __init__(self, name: str):
super().__init__(f"Botovod have not '{name}' agent")
self.name = name
class HandlerNotPassed(BotovodException):
def __init__(self):
super().__init__("Handler not passed")
|
class Solution:
def carPooling(self, trips: List[List[int]], capacity: int) -> bool:
tripOccupancy = [0] * 1002
for trip in trips:
tripOccupancy[trip[1]] += trip[0]
tripOccupancy[trip[2]] -= trip[0]
occupancy = 0
for occupancyDelta in tripOccupancy:
occupancy += occupancyDelta
if occupancy > capacity:
return False
return True |
class Result:
def __init__(self, value=None, error=None):
self._value = value
self._error = error
def is_ok(self) -> bool:
return self._error is None
def is_error(self) -> bool:
return self._error is not None
@property
def value(self):
return self._value
@property
def error(self):
return self._error
@staticmethod
def make_value(value, state):
return value, state
@staticmethod
def make_error(error, state):
return Result(value=None, error=error), state
|
cv_results = cross_validate(
model, X, y, cv=cv,
return_estimator=True, return_train_score=True,
n_jobs=-1,
)
cv_results = pd.DataFrame(cv_results)
|
# """
# This is Master's API interface.
# You should not implement it, or speculate about its implementation
# """
#class Master:
# def guess(self, word):
# """
# :type word: str
# :rtype int
# """
class Solution:
def findSecretWord(self, wordlist, master):
n = 0
while n < 6:
count = collections.Counter(w1 for w1, w2 in itertools.permutations(wordlist, 2) if sum(i == j for i, j in zip(w1, w2)) == 0)
guess = min(wordlist, key = lambda w: count[w])
n = master.guess(guess)
wordlist = [w for w in wordlist if sum(i == j for i, j in zip(w, guess)) == n] |
class Usercredentials:
'''
class to generate new instances of usercredentials
'''
user_credential_list = [] #empty list for user creddential
def __init__(self,site_name,password):
'''
method to define properties of the object
'''
self.site_name = site_name
self.password = password
def save_credentials(self):
'''
method to save a credential into the user credential list
'''
Usercredentials.user_credential_list.append(self)
def delete_credentials(self):
'''
method to delete saved credential
'''
Usercredentials.user_credential_list.remove(self)
@classmethod
def display_credentials(cls):
return cls.user_credential_list |
"""Fixed and parsed data for testing"""
SERIES = {
"_links": { "nextepisode": { "href": "http://api.tvmaze.com/episodes/664353"},
"previousepisode": { "href": "http://api.tvmaze.com/episodes/631872"},
"self": { "href": "http://api.tvmaze.com/shows/60"}
},
"externals": { "imdb": "tt0364845", "thetvdb": 72108, "tvrage": 4628},
"genres": ["Drama", "Action", "Crime"],
"id": 60, "image": { "medium": "http://tvmazecdn.com/uploads/images/medium_portrait/34/85849.jpg", "original": "http://tvmazecdn.com/uploads/images/original_untouched/34/85849.jpg" }, "language": "English", "name": "NCIS",
"network": { "country": { "code": "US", "name": "United States",
"timezone": "America/New_York"},
"id": 2,
"name": "CBS"
},
"premiered": "2003-09-23",
"rating": { "average": 8.8},
"runtime": 60,
"schedule": { "days": ["Tuesday"], "time": "20:00"},
"status": "Running",
"summary": """
<p>NCIS (Naval Criminal Investigative Service) is more than
just an action drama. With liberal doses of humor, it\"s a show that focuses
on the sometimes complex and always amusing dynamics of a team forced to work
together in high-stress situations. Leroy Jethro Gibbs, a former Marine
gunnery sergeant, whose skills as an investigator are unmatched, leads this
troupe of colorful personalities. Rounding out the team are Anthony DiNozzo,
an ex-homicide detective whose instincts in the field are unparalleled and
whose quick wit and humorous take on life make him a team favorite; the
youthful and energetic forensic specialist Abby Sciuto, a talented scientist
whose sharp mind matches her Goth style and eclectic tastes; Caitlin Todd, an
ex-Secret Service Agent; and Timothy McGee, an MIT graduate whose brilliance
with computers far overshadows his insecurities in the field; Assisting the
team is medical examiner Dr. Donald "Ducky" Mallard, who knows it all because
he\"s seen it all, and he\"s not afrad to let you know. From murder and
espionage to terrorism and stolen submarines, these special agents travel the
globe to investigate all crimes with Navy or Marine Corps ties.</p>
""",
"type": "Scripted",
"updated": 1460310820,
"url": "http://www.tvmaze.com/shows/60/ncis",
"webChannel": None,
"weight": 11
}
PREV = {
'_links': {'self': {'href': 'http://api.tvmaze.com/episodes/2284'}},
'airdate': '2003-09-23',
'airstamp': '2003-09-23T20:00:00-04:00',
'airtime': '20:00',
'id': 2284,
'image': None,
'name': 'Yankee White',
'number': 1,
'runtime': 60,
'season': 1,
'summary': """
<p>A Marine mysteriously drops dead aboard Air Force
One and jurisdiction problems force Gibbs to share the
investigation with a Secret Service agent.</p>
""" ,
'url': 'http://www.tvmaze.com/episodes/2284/ncis-1x01-yankee-white'
}
NEXT = {
'_links': {'self': {'href': 'http://api.tvmaze.com/episodes/2285'}},
'airdate': '2003-09-30',
'airstamp': '2003-09-30T20:00:00-04:00',
'airtime': '20:00',
'id': 2285,
'image': None,
'name': 'Hung Out to Dry',
'number': 2,
'runtime': 60,
'season': 1,
'summary': """
<p>A Marine dies when his parachute fails and he crashes
through a car during a training exercise, and Gibbs
suspects he was murdered.</p>
""",
'url': 'http://www.tvmaze.com/episodes/2285/ncis-1x02-hung-out-to-dry'
}
mock_data = {"series": SERIES, "previousepisode": PREV, "nextepisode": NEXT}
def mock_get_data(data_name, **kwargs):
"""
Mocks the response JSON from TVMaze.
:param data_name: a string, either series, prev or next
:kwargs: a dictionary that change the mock data dict
:return: a dict, representing the mocked data
"""
data = mock_data[data_name]
if kwargs:
for key in kwargs.keys():
if data.get(key):
data[key] = kwargs[key]
return data
|
#!/usr/bin/env python
def clean_links(text):
"""Remove brackets around a wikilink, keeping the label instead of the page
if it exists.
"[[foobar]]" will become "foobar", but "[[foobar|code words]]" will return
"code words".
Args:
text (str): Full text of a Wikipedia article as a single string.
Returns:
str: A copy of the full text with all wikilinks cleaned.
"""
good_char_list = []
next_char = None
skip = 0
for pos,char in enumerate(text):
try:
next_char = text[pos+1]
except IndexError:
next_char = None
# Skip the character
if skip:
skip -= 1
continue
# Otherwise check if we have found a link
if char == '[' and next_char == '[':
skip = 1
# Check if we are in a comment with
pipe_pos = text.find('|', pos)
if pipe_pos == -1:
continue
end_pos = text.find(']]', pos)
if pipe_pos < end_pos:
skip = pipe_pos - pos
elif char == ']' and next_char == ']':
skip = 1
# Otherwise just append the character
else:
good_char_list.append(char)
return ''.join(good_char_list)
|
def GetParent(node, parent):
if node == parent[node]:
return node
parent[node] = GetParent(parent[node], parent)
return parent[node]
def union(u, v, parent, rank):
u = GetParent(u, parent)
v = GetParent(v, parent)
if rank[u] < rank[v]:
parent[u] = v
elif rank[u] > rank[v]:
parent[v] = u
else:
parent[v] = u
rank[u] += 1
# union
def Kruskal(n, m):
edges = []
for _ in range(m):
x, y, w = map(int, input().split())
edges.append(x, y, w)
edges = sorted(edges, key=lambda x: x[2])
parent = [0]*n
rank = [0]*n
for i in range(n):
parent[i] = i
cost = 0
mst = []
for x, y, w in edges:
if(GetParent(x, parent) != GetParent(y, parent)):
cost += w
mst.append([x, y])
union(x, y, parent, rank)
for i, j in mst:
print(i, '-', j)
return cost
|
#!/usr/bin/env python3
n, *h = map(int, open(0).read().split())
dp = [0] * n
dp[0] = 0
a = abs
for i in range(1, n):
dp[i] = min(dp[i], dp[i-1] + a(h[i] - h[i-1]))
dp[i+1] = min(dp[i+1], dp[i-1] + a(h[i] - h[i-2]))
print(dp[n-1])
|
"""
In this Bite you complete the divide_numbers function that takes a
numerator and a denominator (the number above and below the line
respectively when doing a division).
First you try to convert them to ints, if that raises a ValueError
you will re-raise it (using raise).
To keep things simple we can expect this function to be called
with int/float/str types only (read the tests why ...)
Getting passed that exception (no early bail out, we're still in
business) you try to divide numerator by denominator returning
its result.
If denominator is 0 though, Python throws another exception.
Figure out which one that is and catch it. In that case return 0.
"""
def divide_numbers(numerator, denominator):
"""For this exercise you can assume numerator and denominator are of type
int/str/float.
Try to convert numerator and denominator to int types, if that raises a
ValueError reraise it. Following do the division and return the result.
However if denominator is 0 catch the corresponding exception Python
throws (cannot divide by 0), and return 0"""
try:
numerator = int(numerator)
denominator = int(denominator)
except ValueError:
raise ValueError
try:
return numerator / denominator
except ZeroDivisionError:
return 0
# Check why this code did't work...
# if int(numerator) and int(denominator):
# try:
# return int(numerator)/int(denominator)
# except ZeroDivisionError:
# return 0
# else:
# raise ValueError
|
"""
You are given a map in form of a two-dimensional integer grid where 1 represents land and 0 represents water. Grid cells are connected horizontally/vertically (not diagonally). The grid is completely surrounded by water, and there is exactly one island (i.e., one or more connected land cells). The island doesn't have "lakes" (water inside that isn't connected to the water around the island). One cell is a square with side length 1. The grid is rectangular, width and height don't exceed 100. Determine the perimeter of the island.
Example:
[[0,1,0,0],
[1,1,1,0],
[0,1,0,0],
[1,1,0,0]]
Answer: 16
Explanation: The perimeter is the 16 yellow stripes in the image below:
"""
class Solution(object):
def islandPerimeter(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
sides = 0
for i in range(len(grid)):
prev = 0
for t in grid[i]:
if t!=prev:
prev = t
sides+=1
if grid[i][len(grid[i])-1]==1:
sides+=1
for i in range(len(grid[0])):
prev = 0
for t in range(len(grid)):
if grid[t][i]!=prev:
prev = grid[t][i]
sides+=1
if grid[len(grid)-1][i]==1:
sides+=1
return sides |
"""This file defines the unified tensor framework interface required by DeepXDE.
The principles of this interface:
* There should be as few interfaces as possible.
* The interface is used by DeepXDE system so it is more important to have
clean definition rather than convenient usage.
* Default arguments should be avoided.
* Keyword or positional arguments should be avoided.
* Argument type should be easier to understand.
It is recommended the frameworks implement all the interfaces. However, it is
also OK to skip some. The generated backend module has an ``is_enabled`` function
that returns whether the interface is supported by the framework or not.
"""
# For now the backend only has one API
tf = None
|
# User input
minutes = float(input("Enter the time in minutes: "))
# Program operation
hours = minutes/60
# Computer output
print("The time in hours is: " + str(hours))
|
x = 12
y = 3
print(x > y) # True
x = "12"
y = "3"
print(x > y) # ! False / Why it's false ?
print(x < y) # ! True
# x = 12
# y = "3"
# print(x > y)
x2 = "45"
y2 = "321"
print(x2 > y2) # True
x2 = "45"
y2 = "621"
X2_Length = len(x2)
Y2_Length = len(y2)
print(X2_Length < Y2_Length) # True
print( ord('4') ) # 52
print( ord('5') ) # 53
print( ord('6') ) # 54
print( ord('2') ) # 50
print( ord('1') ) # 49
|
s = """auto 1
break 2
case 3
char 4
const 5
continue 6
default 7
do 8
double 9
else 10
enum 11
extern 12
float 13
for 14
goto 15
if 16
int 17
long 18
register 19
return 20
short 21
signed 22
sizeof 23
static 24
struct 25
switch 26
typedef 27
union 28
unsigned 29
void 30
volatile 31
while 32
- 33
-- 34
-= 35
-> 36
! 37
!= 38
% 39
%= 40
& 41
&& 42
&= 43
( 44
) 45
* 46
*= 47
, 48
. 49
/ 50
/= 51
: 52
; 53
? 54
[ 55
] 56
^ 57
^= 58
{ 59
| 60
|| 61
|= 62
} 63
~ 64
+ 65
++ 66
+= 67
< 68
<< 69
<<= 70
<= 71
= 72
== 73
> 74
>= 75
>> 76
>>= 77
\" 78
/* 注释 */ 79
常数 80
标识符 81"""
sl = s.split("\n")
for one in sl:
tl = one.split(" ")
print('{\"' + tl[0] + '\", ' + tl[1] + '},')
|
'''
For 35 points, answer the following questions.
Create a new folder named file_io and create a new Python file in the
folder. Run this 4 line program. Then look in the folder.
1) What did the program do?
'''
f = open('workfile.txt', 'w')
f.write('Bazarr 10 points\n')
f.write('Iko 3 points')
f.close()
'''Run this short program.
2) What did the program do?
'''
f = open('workfile.txt', 'r')
line = f.readline()
while line:
print(line)
line = f.readline()
f.close()
'''Run this short program.
3) What did the program do?
'''
f = open('value.txt', 'w')
f.write('14')
f.close()
'''Run this short program.
4) What did the program do?
'''
f = open('value.txt', 'r')
line = f.readline()
f.close()
x = int(line)
print(x*2)
'''Run this short program. Make sure to look at the value.txt file
before and after running this program.
5) What did the program do?
'''
f = open('value.txt', 'r')
line = f.readline()
f.close()
x = int(line)
x = x*2
f = open('value.txt', 'w')
f.write(str(x))
f.close()
|
# -*- coding: utf-8 -*-
def main():
n = int(input())
a = [int(input()) for _ in range(n)][::-1]
ans = 0
for i in range(n):
p, q = divmod(a[i], 2)
ans += p
if q == 1:
if (i + 1 <= n - 1) and (a[i + 1] >= 1):
ans += 1
a[i + 1] -= 1
print(ans)
if __name__ == '__main__':
main()
|
'''
escreva num arquivo texto.txt o conteudo de uma lista de uma vez
'''
arquivo = open('texto.txt', 'a')
frases = list()
frases.append('TreinaWeb \n')
frases.append('Python \n')
frases.append('Arquivos \n')
frases.append('Django \n')
arquivo.writelines(frases) |
print("Jogo da advinhação")
print("******************")
numero_secreto = 30
palpite_str = input("Digite o seu palpite: ")
palpite = int(palpite_str)
maior = palpite > numero_secreto
menor = palpite < numero_secreto
if palpite == numero_secreto:
print("Você acertou!")
elif maior:
print("Seu palpite ultrapassou o numero secreto")
elif menor:
print("Seu palpite foi menor do que o numero secreto")
print("Game Over") |
'''Exceptions for my orm'''
class ObjectNotInitializedError(Exception):
pass
class ObjectNotFoundError(Exception):
pass
|
def a(x): # Объявление функции
n = 1 # Присваивание переменной n начального значения.
while n < len(x): # Задаётся условие для выполнения программы:"Пока порядковый номер элемента меньше количества элементов в списке:"
for i in range(len(x)-n): # Задается условие для выполнения программы:"После каждого прохода цикла по списку количество проверяемых на
# соответствие предыдущему условию объектов списка равно разности количества чисел в списке и количества выполненных циклов"
if x[i] > x[i+1]: # Задается условие: "Если при сравнении двух соседних чисел первое больше второго,
x[i],x[i+1] = x[i+1],x[i] # в списке они меняются местами"
print(x) # Визуальный вывод функции для проверки правильности.
n += 1 # Счетчик проходов(элементов, которые больше не участвуют в цикле)
a([1, 9, 4, 5, 7, 10, 6, 3, 2, 11])
|
def download_file(my_socket):
print("[+] Downloading file")
filename = my_socket.receive_data()
my_socket.receive_file(filename)
|
def return_for_conditions(obj, raise_ex=False, **kwargs):
"""
Get a function that returns/raises an object and is suitable as a
``side_effect`` for a mock object.
Args:
obj:
The object to return/raise.
raise_ex:
A boolean indicating if the object should be raised instead
of returned.
**kwargs:
The keyword arguments that must be provided to the function
being mocked in order for the provided object to be returned
or raised. As long as the mocked function is called with at
least the arguments provided to this function, the handler
is triggered.
Returns:
A function usable as the side effect of a mocked object.
"""
def handler(**inner_kwargs):
if kwargs.items() <= inner_kwargs.items():
if raise_ex:
raise obj
return obj
return handler
|
#
# @lc app=leetcode id=223 lang=python3
#
# [223] Rectangle Area
#
# @lc code=start
class Solution:
def computeArea(self, A: int, B: int, C: int, D: int, E: int, F: int, G: int, H: int) -> int:
overlap = max(min(C, G) - max(A, E), 0) * max(min(D, H) - max(B, F), 0)
total = (A - C) * (B - D) + (E - G) * (F - H)
return total - overlap
# @lc code=end
# Accepted
# 3082/3082 cases passed(56 ms)
# Your runtime beats 81.4 % of python3 submissions
# Your memory usage beats 100 % of python3 submissions(12.8 MB)
|
gameList = [
'Riverraid-v0',
'SpaceInvaders-v0',
'StarGunner-v0',
'Pitfall-v0',
'Centipede-v0'
]
|
class Solution:
def boxDelivering(self, boxes: List[List[int]], portsCount: int, maxBoxes: int, maxWeight: int) -> int:
n = len(boxes)
# dp[i] := min trips to deliver boxes[0..i) and return to the storage
dp = [0] * (n + 1)
trips = 2
weight = 0
l = 0
for r in range(n):
weight += boxes[r][1]
# current box is different from previous one, need to make one more trip
if r > 0 and boxes[r][0] != boxes[r - 1][0]:
trips += 1
# loading boxes[l] in the previous turn is always no bad than loading it in this turn
while r - l + 1 > maxBoxes or weight > maxWeight or (l < r and dp[l + 1] == dp[l]):
weight -= boxes[l][1]
if boxes[l][0] != boxes[l + 1][0]:
trips -= 1
l += 1
# min trips to deliver boxes[0..r]
# = min trips to deliver boxes[0..l) + trips to deliver boxes[l..r]
dp[r + 1] = dp[l] + trips
return dp[n]
|
{{AUTO_GENERATED_NOTICE}}
CompilerInfo = provider(fields = ["platform", "bsc", "bsb_helper"])
def _rescript_compiler_impl(ctx):
return [CompilerInfo(
platform = ctx.attr.name,
bsc = ctx.file.bsc,
bsb_helper = ctx.file.bsb_helper,
)]
rescript_compiler = rule(
implementation = _rescript_compiler_impl,
attrs = {
"bsc": attr.label(
allow_single_file = True,
executable = True,
cfg = "exec",
),
"bsb_helper": attr.label(
allow_single_file = True,
executable = True,
cfg = "exec",
),
},
)
RescriptOutputArtifacts = provider(fields = [
"cmi",
"cmj",
"js",
])
RescriptModuleProvider = provider(fields = [
"module_artifacts",
# Includes js_file and all of its transitive deps
"js_depset",
"data_depset",
])
def _perhaps_compile_to_iast(ctx, interface_file, iast_file):
if interface_file == None:
return None
iast_args = ctx.actions.args()
iast_args.add("-bs-v", "{{COMPILER_VERSION}}")
iast_args.add("-bs-ast")
iast_args.add("-o", iast_file)
iast_args.add(interface_file)
ctx.actions.run(
mnemonic = "CompileToiAST",
executable = ctx.attr.compiler[CompilerInfo].bsc,
arguments = [iast_args],
inputs = depset([interface_file]),
outputs = [iast_file],
)
def _compile_to_ast(ctx, src_file, ast_file):
ast_args = ctx.actions.args()
ast_args.add("-bs-v", "{{COMPILER_VERSION}}")
ast_args.add("-bs-ast")
ast_args.add("-o", ast_file)
ast_args.add(src_file)
ctx.actions.run(
mnemonic = "CompileToAST",
executable = ctx.attr.compiler[CompilerInfo].bsc,
arguments = [ast_args],
inputs = depset([src_file]),
outputs = [ast_file],
)
def _unique(l):
set = {}
for item in l:
set[item] = True
return set.keys()
def _join_path(is_windows, items):
parts = [item for item in items if item != ""]
if is_windows:
return "\\".join(parts)
return "/".join(parts)
def _collect_cmi_cmj_and_js_depset(deps):
return depset([], transitive = [depset(item) for item in [[
mod[RescriptModuleProvider].module_artifacts.cmi,
mod[RescriptModuleProvider].module_artifacts.cmj,
mod[RescriptModuleProvider].module_artifacts.js,
] for mod in deps]])
def _get_module_name(src):
return src.basename[:-4]
def _rescript_module_impl(ctx):
ast_file = ctx.actions.declare_file(_join_path(ctx.attr.is_windows, [_get_module_name(ctx.file.src) + ".ast"]))
_compile_to_ast(ctx, ctx.file.src, ast_file)
iast_file = None
if ctx.file.interface != None:
iast_file = ctx.actions.declare_file(_join_path(ctx.attr.is_windows, [_get_module_name(ctx.file.src) + ".iast"]))
_perhaps_compile_to_iast(ctx, ctx.file.interface, iast_file)
# Generate cmi, cmj, and js artifacts
cmi_file = ctx.actions.declare_file(_join_path(ctx.attr.is_windows, [_get_module_name(ctx.file.src) + ".cmi"]))
cmj_file = ctx.actions.declare_file(_join_path(ctx.attr.is_windows, [_get_module_name(ctx.file.src) + ".cmj"]))
js_file = ctx.actions.declare_file(_join_path(ctx.attr.is_windows, [_get_module_name(ctx.file.src) + ".js"]))
# includes dependencies's artifacts and js_file artifacts in the search paths.
deps_artifacts = _collect_cmi_cmj_and_js_depset(ctx.attr.deps)
dep_module_dirs = _unique([deps_artifact.dirname for deps_artifact in deps_artifacts.to_list()])
# Module without interface
if iast_file == None:
# Generates all targets cmi, cmj and js all at the same time.
cmi_cmj_js_args = ctx.actions.args()
cmi_cmj_js_args.add("-bs-v", "{{COMPILER_VERSION}}")
cmi_cmj_js_args.add("-I", cmi_file.dirname) # include the cmi dir.
for dep_module_dir in dep_module_dirs:
cmi_cmj_js_args.add("-I", dep_module_dir)
cmi_cmj_js_args.add("-o", cmi_file)
cmi_cmj_js_args.add("-o", cmj_file)
cmi_cmj_js_args.add(ast_file)
ctx.actions.run_shell(
mnemonic = "CompileToCmiCmjJs",
tools = [ctx.attr.compiler[CompilerInfo].bsc],
inputs = [ctx.file.src, ast_file] + deps_artifacts.to_list(),
outputs = [cmi_file, cmj_file, js_file],
command = "{} $@ > {}".format(ctx.attr.compiler[CompilerInfo].bsc.path, js_file.path),
arguments = [cmi_cmj_js_args],
)
else: # Module with interface provided.
# Generates cmi separately.
cmi_args = ctx.actions.args()
cmi_args.add("-I", ctx.file.interface.dirname)
for dep_module_dir in dep_module_dirs:
cmi_args.add("-I", dep_module_dir)
cmi_args.add("-o", cmi_file)
cmi_args.add(iast_file)
ctx.actions.run_shell(
mnemonic = "CompileToCmi",
tools = [ctx.attr.compiler[CompilerInfo].bsc],
inputs = [ctx.file.interface, iast_file] + deps_artifacts.to_list(),
outputs = [cmi_file],
command = "{} $@".format(ctx.attr.compiler[CompilerInfo].bsc.path),
arguments = [cmi_args],
)
# Generates cmj and js files
cmi_js_args = ctx.actions.args()
cmi_js_args.add("-bs-read-cmi") # Read the CMI file generated from previous step (from iAST file.)
cmi_js_args.add("-I", cmi_file.dirname) # include the cmi dir.
for dep_module_dir in dep_module_dirs:
cmi_js_args.add("-I", dep_module_dir)
cmi_js_args.add("-o", cmj_file)
cmi_js_args.add(ast_file)
ctx.actions.run_shell(
mnemonic = "CompileToCmjJs",
tools = [ctx.attr.compiler[CompilerInfo].bsc],
inputs = [ctx.file.src, ast_file, cmi_file] + deps_artifacts.to_list(),
outputs = [cmj_file, js_file],
command = "{} $@ > {}".format(ctx.attr.compiler[CompilerInfo].bsc.path, js_file.path),
arguments = [cmi_js_args],
)
module_artifacts = RescriptOutputArtifacts(
cmi = cmi_file,
cmj = cmj_file,
js = js_file,
)
js_files = [js_file]
output_files = [
module_artifacts.js,
module_artifacts.cmj,
module_artifacts.cmi,
]
return [
DefaultInfo(
files = depset(
output_files,
transitive = [dep[RescriptModuleProvider].js_depset for dep in ctx.attr.deps],
),
runfiles = ctx.runfiles(
files = ctx.files.data + [module_artifacts.js],
transitive_files = depset([], transitive = [dep[RescriptModuleProvider].data_depset for dep in ctx.attr.deps]),
),
),
RescriptModuleProvider(
js_depset = depset(js_files, transitive = [dep[RescriptModuleProvider].js_depset for dep in ctx.attr.deps]),
data_depset = depset(ctx.files.data, transitive = [dep[RescriptModuleProvider].data_depset for dep in ctx.attr.deps]),
module_artifacts = module_artifacts,
),
]
_rescript_module = rule(
implementation = _rescript_module_impl,
executable = False,
attrs = {
"compiler": attr.label(
default = Label("@{{REPO_NAME}}//compiler:darwin"),
providers = [CompilerInfo],
),
"is_windows": attr.bool(),
"src": attr.label(
doc = "Rescript source file",
allow_single_file = [".res"],
mandatory = True,
),
"interface": attr.label(
doc = "Rescript interface file",
allow_single_file = [".resi"],
),
"deps": attr.label_list(
doc = "List of dependencies, must be rescript_module targets.",
providers = [RescriptModuleProvider],
),
"data": attr.label_list(
doc = "List of data files to include at runtime (consumed by rescript_binary).",
allow_files = True,
),
},
)
def get_is_windows():
select(
{
"@platforms//os:windows": True,
"//conditions:default": False,
},
)
def get_compiler():
select(
{
"@platforms//os:osx": "@{{REPO_NAME}}//compiler:darwin",
"@platforms//os:windows": "@{{REPO_NAME}}//compiler:windows",
"@platforms//os:linux": "@{{REPO_NAME}}//compiler:linux",
"//conditions:default": None,
},
)
def rescript_module(
name,
src,
interface = None,
deps = [],
data = [],
**kwargs):
"""
Produces a rescript module's artifacts.
"""
_rescript_module(
name = name,
src = src,
interface = interface,
deps = deps,
data = data,
# Private attribute not expected to be provided
is_windows = get_is_windows(),
compiler = get_compiler(),
**kwargs
)
######################################################################################################
def _rescript_binary_impl(ctx):
srcFile = ctx.file.src
ast_file = ctx.actions.declare_file(_join_path(ctx.attr.is_windows, [ctx.label.name + ".ast"]))
_compile_to_ast(ctx, ctx.file.src, ast_file)
cmi_file = ctx.actions.declare_file(_join_path(ctx.attr.is_windows, [ctx.label.name + ".cmi"]))
cmj_file = ctx.actions.declare_file(_join_path(ctx.attr.is_windows, [ctx.label.name + ".cmj"]))
js_file = ctx.actions.declare_file(_join_path(ctx.attr.is_windows, [ctx.label.name + ".js"]))
deps_artifacts = _collect_cmi_cmj_and_js_depset(ctx.attr.deps)
dep_module_dirs = _unique([deps_artifact.dirname for deps_artifact in deps_artifacts.to_list()])
# Generates all targets cmi, cmj and js all at the same time.
cmi_cmj_js_args = ctx.actions.args()
cmi_cmj_js_args.add("-bs-v", "{{COMPILER_VERSION}}")
cmi_cmj_js_args.add("-I", cmi_file.dirname) # include the cmi dir.
for dep_module_dir in dep_module_dirs:
cmi_cmj_js_args.add("-I", dep_module_dir)
cmi_cmj_js_args.add("-o", cmi_file)
cmi_cmj_js_args.add("-o", cmj_file)
cmi_cmj_js_args.add(ast_file)
ctx.actions.run_shell(
mnemonic = "CompileToCmiCmjJs",
tools = [ctx.attr.compiler[CompilerInfo].bsc],
inputs = [ctx.file.src, ast_file] + deps_artifacts.to_list(),
outputs = [cmi_file, cmj_file, js_file],
command = "{} $@ > {}".format(ctx.attr.compiler[CompilerInfo].bsc.path, js_file.path),
arguments = [cmi_cmj_js_args],
)
return [
DefaultInfo(
executable = js_file,
runfiles = ctx.runfiles(
files = ctx.files.data,
transitive_files = depset(
[],
transitive = [dep[RescriptModuleProvider].data_depset for dep in ctx.attr.deps] + [dep[RescriptModuleProvider].js_depset for dep in ctx.attr.deps],
),
),
),
]
_rescript_binary = rule(
implementation = _rescript_binary_impl,
executable = True,
attrs = {
"compiler": attr.label(
default = Label("@{{REPO_NAME}}//compiler:darwin"),
providers = [CompilerInfo],
),
"is_windows": attr.bool(),
"src": attr.label(
doc = "Rescript source file",
mandatory = True,
allow_single_file = [".res"],
),
"deps": attr.label_list(
doc = "List of dependencies, must be rescript_module targets.",
providers = [RescriptModuleProvider],
),
"data": attr.label_list(
doc = "List of data files to include at runtime.",
allow_files = True,
),
},
)
def rescript_binary(
name,
src,
deps = [],
data = [],
**kwargs):
"""
Produces Js binary artifacts.
"""
_rescript_binary(
name = name,
src = src,
deps = deps,
data = data,
is_windows = get_is_windows(),
compiler = get_compiler(),
**kwargs
)
|
'''
https://leetcode.com/problems/longest-absolute-file-path/
388. Longest Absolute File Path
Suppose we have a file system that stores both files and directories. An example of one system is represented in the following picture:
Here, we have dir as the only directory in the root. dir contains two subdirectories, subdir1 and subdir2. subdir1 contains a file file1.ext
and subdirectory subsubdir1. subdir2 contains a subdirectory subsubdir2, which contains a file file2.ext.
In text form, it looks like this (with ⟶ representing the tab character):
dir
⟶ subdir1
⟶ ⟶ file1.ext
⟶ ⟶ subsubdir1
⟶ subdir2
⟶ ⟶ subsubdir2
⟶ ⟶ ⟶ file2.ext
If we were to write this representation in code, it will look like this:
"dir\n\tsubdir1\n\t\tfile1.ext\n\t\tsubsubdir1\n\tsubdir2\n\t\tsubsubdir2\n\t\t\tfile2.ext". Note that the '\n' and '\t' are the new-line
and tab characters.
Every file and directory has a unique absolute path in the file system, which is the order of directories that must be opened to reach
the file/directory itself, all concatenated by '/'s. Using the above example, the absolute path to file2.ext is "dir/subdir2/subsubdir2/file2.ext".
Each directory name consists of letters, digits, and/or spaces. Each file name is of the form name.extension, where name and extension consist of
letters, digits, and/or spaces.
Given a string input representing the file system in the explained format, return the length of the longest absolute path to a file in the abstracted
file system. If there is no file in the system, return 0.
'''
'''
Accepted
'''
class Solution:
def lengthLongestPath(self, input: str) -> int:
# each time we read from the input, we need to look for the pattern
# (\n(\t)+)* => the presence of no \n means we're at level 0
# if we see \n, then the number of \t tells us which level we're at
# first let's split input by \n so that we can have \t as the only symbol
# to use to know on which level we are
if len(input) == 0:
return 0
max_length = 0
# we're sure we have at least one character in input
input = input.split("\n")
# we will use this to keep track of the path while going through the input
stack = []
# now we go through input element by element and we use \t to know the level
for el in input:
# first let's check if el contains any tabs
level = el.count("\t")
# we remove the tabs from the element
el = el.replace('\t', '')
# we try to push the current element in the stack
while len(stack) > 0 and stack[len(stack) - 1][1] >= level:
top_element, top_element_level = stack.pop()
# if top_element is a file, then we evaluate the length of absolute path and move on
if '.' in top_element:
# then it's a file
# the absolute path of this file is all the elements in the stack
if len(stack) > 0:
file_path = '/'.join([x[0] for x in stack]) + '/' + top_element
else:
file_path = top_element
max_length = max(len(file_path), max_length)
# we are done with popping the current path branch from the stack
# we push the current element to start a new one
stack.append((el, level))
# when we are done with the above, there might still be elements in the stack that could lead to a valid
# absolute path so we need to cater for these
if len(stack) > 0:
top_element, top_element_level = stack.pop()
# if top_element is a file, then we evaluate the length of absolute path and move on
if '.' in top_element:
# then it's a file
# the absolute path of this file is all the elements in the stack
if len(stack) > 0:
file_path = '/'.join([x[0] for x in stack]) + '/' + top_element
else:
file_path = top_element
max_length = max(len(file_path), max_length)
return max_length
# input = "dir\n\tsubdir1\n\t\tfile1.ext\n\t\tsubsubdir1\n\tsubdir2\n\t\tsubsubdir2\n\t\t\tfile2.ext"
# input = "a"
input = "dir\n\tsubdir1\n\tsubdir2\n\t\tfile.ext"
# input = "file1.txt\nfile2.txt\nlongfile.txt"
print(Solution().lengthLongestPath(input))
|
# https://leetcode.com/explore/featured/card/fun-with-arrays/521/introduction/3238/
class Solution:
def findMaxConsecutiveOnes(self, nums):
left = 0
prev = 0
while left < len(nums):
counter = 0
if nums[left] == 1:
right = left
while right < len(nums):
if nums[right] == 1:
counter += 1
right += 1
else:
break
right += 1
left = right
else:
left += 1
if counter > prev:
prev = counter
return prev
if __name__ == "__main__":
assert Solution().findMaxConsecutiveOnes([0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1]) == 6
assert Solution().findMaxConsecutiveOnes([0, 1]) == 1
|
# ------------------------------
# 274. H-Index
#
# Description:
# Given an array of citations (each citation is a non-negative integer) of a researcher, write a function to compute the researcher's h-index.
# According to the definition of h-index on Wikipedia: "A scientist has index h if h of his/her N papers have at least h citations each, and the other N − h papers have no more than h citations each."
# Example:
# Input: citations = [3,0,6,1,5]
# Output: 3
# Explanation: [3,0,6,1,5] means the researcher has 5 papers in total and each of them had
# received 3, 0, 6, 1, 5 citations respectively.
# Since the researcher has 3 papers with at least 3 citations each and the remaining
# two with no more than 3 citations each, her h-index is 3.
#
# Version: 1.0
# 09/27/18 by Jianfa
# ------------------------------
class Solution(object):
def hIndex(self, citations):
"""
:type citations: List[int]
:rtype: int
"""
if not citations:
return 0
citations.sort(reverse=True)
h = 0
for i in range(len(citations)):
if citations[i] >= i+1:
h += 1
else:
break
return h
# Used for testing
if __name__ == "__main__":
test = Solution()
# ------------------------------
# Summary:
# Sort at first, and check each citation from high to low. |
class Solution(object):
def minTimeToVisitAllPoints(self, points):
"""
:type points: List[List[int]]
:rtype: int
"""
current_pos, time = points[0], 0
for p in points:
if current_pos[0] != p[0] or current_pos[1] != p[1]:
delta_x = abs(p[0]-current_pos[0])
delta_y = abs(p[1]-current_pos[1])
time += max(delta_x, delta_y)
current_pos = p
return time
s = Solution()
print("Solution 1 : ", s.minTimeToVisitAllPoints([[1, 1], [3, 4], [-1, 0]]))
print("Solution 2 : ", s.minTimeToVisitAllPoints([[3, 2], [-2, 2]]))
|
#
# PySNMP MIB module Nortel-Magellan-Passport-FrameRelayNniTraceMIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Nortel-Magellan-Passport-FrameRelayNniTraceMIB
# Produced by pysmi-0.3.4 at Mon Apr 29 20:17:48 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, SingleValueConstraint, ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint")
frNni, frNniIndex = mibBuilder.importSymbols("Nortel-Magellan-Passport-FrameRelayNniMIB", "frNni", "frNniIndex")
Unsigned32, DisplayString, RowPointer, StorageType, RowStatus = mibBuilder.importSymbols("Nortel-Magellan-Passport-StandardTextualConventionsMIB", "Unsigned32", "DisplayString", "RowPointer", "StorageType", "RowStatus")
NonReplicated, AsciiString = mibBuilder.importSymbols("Nortel-Magellan-Passport-TextualConventionsMIB", "NonReplicated", "AsciiString")
passportMIBs, = mibBuilder.importSymbols("Nortel-Magellan-Passport-UsefulDefinitionsMIB", "passportMIBs")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Unsigned32, TimeTicks, Counter32, Counter64, Bits, ObjectIdentity, Gauge32, Integer32, ModuleIdentity, IpAddress, MibIdentifier, NotificationType, iso, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "Unsigned32", "TimeTicks", "Counter32", "Counter64", "Bits", "ObjectIdentity", "Gauge32", "Integer32", "ModuleIdentity", "IpAddress", "MibIdentifier", "NotificationType", "iso", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
frameRelayNniTraceMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 2, 106))
frNniTrace = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 70, 7))
frNniTraceRowStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 70, 7, 1), )
if mibBuilder.loadTexts: frNniTraceRowStatusTable.setStatus('mandatory')
frNniTraceRowStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 70, 7, 1, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-FrameRelayNniMIB", "frNniIndex"), (0, "Nortel-Magellan-Passport-FrameRelayNniTraceMIB", "frNniTraceIndex"))
if mibBuilder.loadTexts: frNniTraceRowStatusEntry.setStatus('mandatory')
frNniTraceRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 70, 7, 1, 1, 1), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frNniTraceRowStatus.setStatus('mandatory')
frNniTraceComponentName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 70, 7, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frNniTraceComponentName.setStatus('mandatory')
frNniTraceStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 70, 7, 1, 1, 4), StorageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frNniTraceStorageType.setStatus('mandatory')
frNniTraceIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 70, 7, 1, 1, 10), NonReplicated())
if mibBuilder.loadTexts: frNniTraceIndex.setStatus('mandatory')
frNniTraceOperationalTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 70, 7, 10), )
if mibBuilder.loadTexts: frNniTraceOperationalTable.setStatus('mandatory')
frNniTraceOperationalEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 70, 7, 10, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-FrameRelayNniMIB", "frNniIndex"), (0, "Nortel-Magellan-Passport-FrameRelayNniTraceMIB", "frNniTraceIndex"))
if mibBuilder.loadTexts: frNniTraceOperationalEntry.setStatus('mandatory')
frNniTraceReceiverName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 70, 7, 10, 1, 2), AsciiString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frNniTraceReceiverName.setStatus('mandatory')
frNniTraceDuration = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 70, 7, 10, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 9999)).clone(60)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frNniTraceDuration.setStatus('mandatory')
frNniTraceQueueLimit = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 70, 7, 10, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 100)).clone(20)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frNniTraceQueueLimit.setStatus('mandatory')
frNniTraceSession = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 70, 7, 10, 1, 5), RowPointer()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frNniTraceSession.setStatus('mandatory')
frNniTraceFilter = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 70, 7, 2))
frNniTraceFilterRowStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 70, 7, 2, 1), )
if mibBuilder.loadTexts: frNniTraceFilterRowStatusTable.setStatus('mandatory')
frNniTraceFilterRowStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 70, 7, 2, 1, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-FrameRelayNniMIB", "frNniIndex"), (0, "Nortel-Magellan-Passport-FrameRelayNniTraceMIB", "frNniTraceIndex"), (0, "Nortel-Magellan-Passport-FrameRelayNniTraceMIB", "frNniTraceFilterIndex"))
if mibBuilder.loadTexts: frNniTraceFilterRowStatusEntry.setStatus('mandatory')
frNniTraceFilterRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 70, 7, 2, 1, 1, 1), RowStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frNniTraceFilterRowStatus.setStatus('mandatory')
frNniTraceFilterComponentName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 70, 7, 2, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frNniTraceFilterComponentName.setStatus('mandatory')
frNniTraceFilterStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 70, 7, 2, 1, 1, 4), StorageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frNniTraceFilterStorageType.setStatus('mandatory')
frNniTraceFilterIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 70, 7, 2, 1, 1, 10), NonReplicated())
if mibBuilder.loadTexts: frNniTraceFilterIndex.setStatus('mandatory')
frNniTraceFilterOperationalTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 70, 7, 2, 10), )
if mibBuilder.loadTexts: frNniTraceFilterOperationalTable.setStatus('mandatory')
frNniTraceFilterOperationalEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 70, 7, 2, 10, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-FrameRelayNniMIB", "frNniIndex"), (0, "Nortel-Magellan-Passport-FrameRelayNniTraceMIB", "frNniTraceIndex"), (0, "Nortel-Magellan-Passport-FrameRelayNniTraceMIB", "frNniTraceFilterIndex"))
if mibBuilder.loadTexts: frNniTraceFilterOperationalEntry.setStatus('mandatory')
frNniTraceFilterTraceType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 70, 7, 2, 10, 1, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1).clone(hexValue="e0")).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frNniTraceFilterTraceType.setStatus('mandatory')
frNniTraceFilterTracedDlci = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 70, 7, 2, 10, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 1007))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frNniTraceFilterTracedDlci.setStatus('mandatory')
frNniTraceFilterDirection = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 70, 7, 2, 10, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 1)).setFixedLength(1).clone(hexValue="c0")).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frNniTraceFilterDirection.setStatus('mandatory')
frNniTraceFilterTracedLength = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 70, 7, 2, 10, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 2000)).clone(2000)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: frNniTraceFilterTracedLength.setStatus('mandatory')
frameRelayNniTraceGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 2, 106, 1))
frameRelayNniTraceGroupBD = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 2, 106, 1, 4))
frameRelayNniTraceGroupBD01 = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 2, 106, 1, 4, 2))
frameRelayNniTraceGroupBD01A = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 2, 106, 1, 4, 2, 2))
frameRelayNniTraceCapabilities = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 2, 106, 3))
frameRelayNniTraceCapabilitiesBD = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 2, 106, 3, 4))
frameRelayNniTraceCapabilitiesBD01 = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 2, 106, 3, 4, 2))
frameRelayNniTraceCapabilitiesBD01A = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 2, 106, 3, 4, 2, 2))
mibBuilder.exportSymbols("Nortel-Magellan-Passport-FrameRelayNniTraceMIB", frNniTraceFilterDirection=frNniTraceFilterDirection, frNniTraceReceiverName=frNniTraceReceiverName, frNniTraceFilterStorageType=frNniTraceFilterStorageType, frNniTraceRowStatusTable=frNniTraceRowStatusTable, frameRelayNniTraceCapabilities=frameRelayNniTraceCapabilities, frNniTraceIndex=frNniTraceIndex, frNniTraceRowStatusEntry=frNniTraceRowStatusEntry, frNniTraceFilterOperationalEntry=frNniTraceFilterOperationalEntry, frNniTraceFilterRowStatusTable=frNniTraceFilterRowStatusTable, frNniTraceFilterTraceType=frNniTraceFilterTraceType, frNniTraceFilterIndex=frNniTraceFilterIndex, frameRelayNniTraceCapabilitiesBD=frameRelayNniTraceCapabilitiesBD, frameRelayNniTraceCapabilitiesBD01A=frameRelayNniTraceCapabilitiesBD01A, frNniTraceFilterRowStatus=frNniTraceFilterRowStatus, frameRelayNniTraceGroup=frameRelayNniTraceGroup, frNniTrace=frNniTrace, frNniTraceComponentName=frNniTraceComponentName, frameRelayNniTraceCapabilitiesBD01=frameRelayNniTraceCapabilitiesBD01, frNniTraceFilterRowStatusEntry=frNniTraceFilterRowStatusEntry, frameRelayNniTraceGroupBD01A=frameRelayNniTraceGroupBD01A, frNniTraceSession=frNniTraceSession, frameRelayNniTraceMIB=frameRelayNniTraceMIB, frNniTraceQueueLimit=frNniTraceQueueLimit, frNniTraceFilterOperationalTable=frNniTraceFilterOperationalTable, frNniTraceOperationalEntry=frNniTraceOperationalEntry, frameRelayNniTraceGroupBD=frameRelayNniTraceGroupBD, frNniTraceFilterTracedLength=frNniTraceFilterTracedLength, frNniTraceOperationalTable=frNniTraceOperationalTable, frameRelayNniTraceGroupBD01=frameRelayNniTraceGroupBD01, frNniTraceFilterComponentName=frNniTraceFilterComponentName, frNniTraceDuration=frNniTraceDuration, frNniTraceStorageType=frNniTraceStorageType, frNniTraceRowStatus=frNniTraceRowStatus, frNniTraceFilter=frNniTraceFilter, frNniTraceFilterTracedDlci=frNniTraceFilterTracedDlci)
|
RSS_PERF_NAME = "rss"
FLT_PERF_NAME = "flt"
CPU_CLOCK_PERF_NAME = "cpu_clock"
TSK_CLOCK_PERF_NAME = "task_clock"
SIZE_PERF_NAME = "file_size"
EXEC_OVER_HEAD_KEY = 'exec'
MEM_USE_OVER_HEAD_KEY = 'mem'
FILE_SIZE_OVER_HEAD_KEY = 'size'
|
LINE_LEN = 25
while True:
try:
n = int(input())
if n == 0:
break
surface = []
max_len = 0
for i in range(n):
line = str(input())
line = line.strip()
l_space = line.find(' ')
if l_space == -1:
max_len = LINE_LEN
else:
r_space = line.rfind(' ')
cur_len = l_space + LINE_LEN - r_space - 1
if cur_len > max_len:
max_len = cur_len
surface.append(cur_len)
res = sum(map(lambda x: max_len - x, surface))
print(res)
except(EOFError):
break
|
fs = None
sys = None
dataJar = None
version = '1.1.0'
windows = []
manifest = {
'name': 'Hybrid App Engine',
'version': version,
'description': '轻量级Web桌面应用引擎',
'path': '',
'icon': '',
'width': 800,
'height': 600,
'visible': True,
'resizable': True,
'frameless': False,
'transBackground': False
} |
f = open('rosalind_motif.txt')
a = []
for line in f.readlines():
a.append(line.strip())
dna1, dna2 = a[0], a[1]
f.close()
def find_motif(dna1, dna2):
"""Given: Two DNA strings s and t (each of length at most 1 kbp).
Return: All locations of t as a substring of s."""
k = len(dna2)
indexes = []
for i in range(len(dna1)-k+1):
if dna1[i:i+k] == dna2:
indexes.append(str(i+1))
print(" ".join(indexes))
find_motif(dna1, dna2)
|
class Solution:
def findMin(self, nums: List[int]) -> int:
# solution 1
# return min(nums)
# solution 2
left = 0
right = len(nums)-1
while left < right:
mid = int((left + right)/2)
if nums[mid] < nums[right]:
right = mid
elif nums[mid] == nums[right]:
right = right - 1
else:
left = mid+1
return nums[left]
|
def cyclicQ(ll):
da_node = ll
sentient = ll
poop = ll
poop = poop.next.next
ll = ll.next
while poop != ll and poop is not None and poop.next is not None:
poop = poop.next.next
ll = ll.next
if poop is None or poop.next is None:
return None
node_in_cycle = ll
while ll.next != node_in_cycle:
ll = ll.next
da_node = da_node.next
da_node = da_node.next
while sentient != da_node:
sentient = sentient.next
da_node = da_node.next
return da_node
|
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
class Trace:
'''An object that can cause a trace entry'''
def trace(self) -> str:
'''Return a representation of the entry for tracing
This is used by things like the standalone ISS with -v
'''
raise NotImplementedError()
class TracePC(Trace):
def __init__(self, pc: int):
self.pc = pc
def trace(self) -> str:
return "pc = {:#x}".format(self.pc)
|
WAGTAILSEARCH_BACKENDS = {
"default": {"BACKEND": "wagtail.contrib.postgres_search.backend"}
}
|
def key_in_dict_not_empty(key, dictionary):
"""
"""
if key in dictionary:
return is_not_empty(dictionary[key])
return False
def is_empty(value):
"""
"""
if value is None:
return True
return value in ['', [], {}]
def is_not_empty(value):
return not is_empty(value)
def drop_dupplicates_values(values):
seen = set()
seen_add = seen.add
return [x for x in values if not (x in seen or seen_add(x))] |
# https://www.codingame.com/training/easy/1d-spreadsheet
def add_dependency(cell, arg_cell, in_deps, out_deps):
if cell not in out_deps: out_deps[cell] = set()
out_deps[cell].add(arg_cell)
if arg_cell not in in_deps: in_deps[arg_cell] = set()
in_deps[arg_cell].add(cell)
def remove_dependency(cell, in_deps, out_deps):
rc = []
if cell not in in_deps: return rc
for o in in_deps[cell]:
if cell in out_deps[o]:
out_deps[o].remove(cell)
if not out_deps[o]:
rc.append(o)
return rc
def evaluate_dependencies(cells, in_deps, out_deps, cell_operations):
ready_cells = set()
evaluated_cells = set()
for cell in out_deps:
if not out_deps[cell]:
ready_cells.add(cell)
while ready_cells:
cell = ready_cells.pop()
evaluated_cells.add(cell)
perform_operation(cell=cell, **cell_operations[cell], cells=cells)
rc = remove_dependency(cell, in_deps, out_deps)
ready_cells.update([o for o in rc if o not in evaluated_cells])
for cell in cells:
print(cell)
def get_arg_val(arg, cells):
if '$' in arg:
return cells[int(arg[1:])]
elif '_' in arg:
return 0
else:
return int(arg)
def perform_operation(cell, operation, arg1, arg2, cells):
val1 = get_arg_val(arg1, cells)
val2 = get_arg_val(arg2, cells)
if operation == 'VALUE':
cells[cell] = val1
elif operation == 'ADD':
cells[cell] = val1 + val2
elif operation == 'SUB':
cells[cell] = val1 - val2
else:
cells[cell] = val1 * val2
def solution():
num_cells = int(input())
in_deps = {}
out_deps = {}
cells = [0] * num_cells
cell_operations = [{} for _ in range(num_cells)]
for cell in range(num_cells):
operation, arg1, arg2 = input().split()
cell_operations[cell] = {
'operation': operation,
'arg1': arg1,
'arg2': arg2
}
if cell not in out_deps: out_deps[cell] = set()
if '$' in arg1: add_dependency(cell, int(arg1[1:]), in_deps, out_deps)
if '$' in arg2: add_dependency(cell, int(arg2[1:]), in_deps, out_deps)
evaluate_dependencies(cells, in_deps, out_deps, cell_operations)
solution()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
class Word:
def __init__(self, id, name, image, category=None):
self.id = id
self.name = name
self.image = image
self.category = category
|
def optimal_order(predecessors_map, weight_map):
vertices = frozenset(predecessors_map.keys())
# print(vertices)
memo_map = {frozenset(): (0, [])}
# print(memo_map)
return optimal_order_helper(predecessors_map, weight_map, vertices, memo_map)
def optimal_order_helper(predecessors_map, weight_map, vertices, memo_map):
if vertices in memo_map:
return memo_map[vertices]
possibilities = []
# koop through my verticies (number reducing by 1 each time)
for v in vertices:
# use DFS to get to either x or v which are the 2 smallest numbers
if any(u in vertices for u in predecessors_map[v]):
continue
sub_obj, sub_order = optimal_order_helper(predecessors_map, weight_map, vertices - frozenset({v}), memo_map)
# x + x*y + x*y*z = x*(1 + y*(1 + z))
possibilities.append((weight_map[v] * (1.0 + sub_obj), [v] + sub_order))
print(possibilities)
best = min(possibilities)
memo_map[vertices] = best
# print(memo_map)
return best
print(optimal_order({'u': [], 'v': ['u'], 'w': [], 'x': ['w']}, {'u': 1.2, 'v': 0.5, 'w': 1.1, 'x': 1.001})) |
# Program to implement First Come First Served CPU scheduling algorithm
print("First Come First Served scheduling Algorithm")
print("============================================\n")
headers = ['Processes','Arrival Time','Burst Time','Waiting Time'
,'Turn-Around Time','Completion Time']
# Dictionary to store the output
out = dict()
# Get number of processes from User
N = int(input("Number of processes : "))
a, b = 0, 0
# Get Arrival time and Burst time of N processes
for i in range(0,N):
k = f"P{i+1}"
a = int(input(f"Enter Arrival time of process{i+1} :: "))
b = int(input(f"Enter Burst time of process{i+1} :: "))
out[k] = [a,b]
# storing processes in order of increasing arrival time
out = sorted(out.items(),key=lambda i:i[1][0])
# storing Completion times
for i in range(0,N):
if i == 0:
out[i][1].append(out[i][1][0]+out[i][1][1])
else:
out[i][1].append(out[i-1][1][2]+out[i][1][1])
# storing turn-around times
for i in range(0,N):
out[i][1].append(out[i][1][2]-out[i][1][0])
# storing waiting time
for i in range(0,N):
out[i][1].append(out[i][1][3]-out[i][1][1])
# storing avg waiting time and avg turn around time
avgWaitTime = 0
avgTATime = 0
for i in range(0,N):
avgWaitTime += out[i][1][4]
avgTATime += out[i][1][3]
avgWaitTime /= N
avgTATime /= N
print(f"\n{headers[0]:^15}{headers[1]:^15}{headers[2]:^15}{headers[3]:^15}{headers[4]:^20}{headers[5]:^20}")
for a in out:
print(f"{a[0]:^15}{a[1][0]:^15}{a[1][1]:^15}{a[1][4]:^15}{a[1][3]:^20}{a[1][2]:^20}")
print(f"\nAverage Waiting Time : {avgWaitTime:.2f}\nAverage Turn-Around Time : {avgTATime:.2f}") |
# -*- coding: utf-8 -*-
__author__ = 'Sinval Vieira Mendes Neto'
__email__ = '[email protected]'
__version__ = '0.1.0'
|
# -*- coding: utf-8 -*-
#
# This file is part of the SKATestDevice project
#
#
#
# Distributed under the terms of the none license.
# See LICENSE.txt for more info.
"""Release information for Python Package"""
name = """tangods-skatestdevice"""
version = "1.0.0"
version_info = version.split(".")
description = """A generic Test device for testing SKA base class functionalites."""
author = "cam"
author_email = "cam at ska.ac.za"
license = """BSD-3-Clause"""
url = """www.tango-controls.org"""
copyright = """"""
|
#--coding:utf-8 --
"""
"""
class cDBSCAN:
"""
The major class of the cDBSCAN algorithm, belong to CAO Yaqiang, CHEN Xingwei.
"""
def __init__(self, mat, eps, minPts):
"""
@param mat: the raw or normalized [pointId,X,Y] data matrix
@type mat : np.array
@param eps: The clustering distance threshold, key parameter in DBSCAN.
@type eps: float
@param minPts: The min point in neighbor to define a core point, key
parameter in DBSCAN.
@type minPts: int
"""
#: build the data in the class for global use
self.eps = eps
self.minPts = minPts
#: cell width, city block distance
self.cw = self.eps
#: build the square index for quick neighbor search
self.buildGrids(mat)
#: get the points for all neighbors
self.buildGridNeighbors()
#: remove noise grids
self.removeNoiseGrids()
#: get the points for all neighbors
self.buildGridNeighbors()
#: get the clusters
self.callClusters()
del self.Gs, self.Gs2, self.ps
def getDist(self, p, q):
"""
Basic function 1, city block distance funciton.
"""
x = self.ps[p]
y = self.ps[q]
d = abs(x[0] - y[0]) + abs(x[1] - y[1])
#euclidean distance ,just in case.
#d = np.sqrt((x[0] - y[0])**2 + (x[1] - y[1])**2)
return d
def getNearbyGrids(self, cell):
"""
Basic funciton 2, 9 grid as searching neghbors, grid width is eps.
"""
x, y = cell[0], cell[1]
#keys = [(x, y),
keys = [(x, y - 1), (x, y + 1), (x - 1, y), (x + 1, y), (x - 1, y - 1),
(x - 1, y + 1), (x + 1, y - 1), (x + 1, y + 1)]
#keys = [(x, y), (x, y - 1), (x, y + 1), (x - 1, y), (x - 1, y - 1),
# (x - 1, y + 1), (x + 1, y), (x + 1, y - 1), (x + 1, y + 1),
# (x, y + 2), (x, y - 2), (x + 1, y + 2), (x + 1, y - 2),
# (x - 1, y + 2), (x - 1, y - 2), (x + 2, y), (x + 2, y + 1),
# (x + 2, y - 1), (x - 2, y), (x - 2, y + 1), (x - 2, y - 1)]
ncells = []
for key in keys:
if key in self.Gs:
ncells.append(key)
return ncells
def buildGrids(self, mat):
"""
Algorithm 1: Construct the grids.
@param mat: the raw or normalized [pointId,X,Y] data matrix
"""
minX, minY = mat[0][1], mat[0][2]
for t in mat:
minX = min([minX, t[1]])
minY = min([minY, t[2]])
Gs = {}
ps = {}
for d in mat:
nx = int((d[1] - minX) / self.cw) + 1
ny = int((d[2] - minY) / self.cw) + 1
Gs.setdefault((nx, ny), [])
Gs[(nx, ny)].append(d[0])
#last elements marks the class, initially -1 as noise
ps[d[0]] = [d[1], d[2], nx, ny, -1]
self.Gs, self.ps = Gs, ps
def buildGridNeighbors(self):
"""
Algorithm 2 : Grid index with all neighbor points.
"""
Gs2 = {}
for cell in self.Gs.keys():
nps = []
nps.extend(self.Gs[cell])
for cellj in self.getNearbyGrids(cell):
nps.extend(self.Gs[cellj])
Gs2[cell] = nps
self.Gs2 = Gs2
def removeNoiseGrids(self):
"""
Algorithm 3: Remove noise grid according to KNN and get the obvious core points and core grids.
"""
#: noise cells without neighbors
tode = set()
#: noise cells with neighbors
tode2 = set()
for cell in self.Gs.keys():
if len(self.Gs2[cell]) < self.minPts:
tode2.add(cell)
#KNN to noise cells with neighbors
for cell in tode2:
cells = self.getNearbyGrids(cell)
ncells = set(cells) & tode2
#all neighbor cells are noise
if len(cells) == len(ncells):
tode.add(cell)
for cell in tode:
for p in self.Gs[cell]:
del self.ps[p]
del self.Gs[cell]
def callClusters(self):
"""
Algorithm 4: Do DBSCAN clustering by go through all points in the sets.
"""
#: clustering id, noise is -2 and unclassified point is -1.
clusterId = 0
for key in self.ps:
if self.ps[key][-1] == -1:
if self.expandCluster(key, clusterId):
clusterId += 1
#remove the noise and unclassified points
labels = {}
cs = {}
for p in self.ps.keys():
c = self.ps[p][-1]
if c == -2:
continue
labels[p] = c
if c not in cs:
cs[c] = []
cs[c].append(p)
for key in cs.keys():
if len(cs[key]) < self.minPts:
for p in cs[key]:
del labels[p]
self.labels = labels
def expandCluster(self, pointKey, clusterId):
"""
Search connection for given point to others.
@param pointKey: the key in self.dataPoints
@type pointKey:
@param clusterId: the cluster id for the current
@type clusterId: int
@return: bool
"""
seeds = self.regionQuery(pointKey)
if len(seeds) < self.minPts:
self.ps[pointKey][-1] = -2
return False
else:
for key in seeds:
self.ps[key][-1] = clusterId
while len(seeds) > 0:
currentP = seeds[0]
result = self.regionQuery(currentP)
if len(result) >= self.minPts:
for key in result:
if self.ps[key][-1] in [-1, -2]:
if self.ps[key][-1] == -1:
seeds.append(key)
self.ps[key][-1] = clusterId
del (seeds[0])
return True
def regionQuery(self, pointKey):
"""
Find the related points to the queried point, city block distance is used.
@param pointKey: the key in self.dataPoints
@type pointKey:
@return: list
"""
p = self.ps[pointKey]
x = p[2]
y = p[3]
#scan square and get nearby points.
result = [pointKey]
for q in self.Gs2[(x, y)]:
if q == pointKey:
continue
if self.getDist(pointKey, q) <= self.eps:
result.append(q)
return result
|
# Tuples in python
def swap(m, n, k):
temp = m
m = n
n = k
k = temp
return m, n, k
def main():
a = (1, "iPhone 12 Pro Max", 1.8, 1)
print(a)
print(a[1])
print(a.index(1.8))
print(a.count(1))
b = a + (999, 888)
print(b)
m = 1
n = 2
k = 3
m, n, k = swap(m, n, k)
print(m, n, k)
if __name__ == "__main__":
main() |
# -*- coding: utf-8 -*-
"""
***************************************************************************
__init__.py
---------------------
Date : August 2014
Copyright : (C) 2014-2016 Boundless, http://boundlessgeo.com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2014'
__copyright__ = '(C) 2014-2016 Boundless, http://boundlessgeo.com'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
MASTER = 'master'
HEAD = 'HEAD'
WORK_HEAD = 'WORK_HEAD'
STAGE_HEAD = 'STAGE_HEAD'
RESET_MODE_HARD = "hard"
RESET_MODE_MIXED = "mixed"
RESET_MODE_SOFT = "soft"
NULL_ID = "0" * 40
TYPE_BOOLEAN = "BOOLEAN"
TYPE_BYTE = "BYTE"
TYPE_SHORT = "SHORT"
TYPE_INTEGER = "INTEGER"
TYPE_LONG = "LONG"
TYPE_FLOAT = "FLOAT"
TYPE_DOUBLE = "DOUBLE"
TYPE_POINT = "POINT"
TYPE_LINESTRING = "LINESTRING"
TYPE_POLYGON = "POINT"
TYPE_MULTIPOINT = "MULTIPOINT"
TYPE_MULTILINESTRING = "MULTILINESTRING"
TYPE_MULTIPOLYGON = "MULTIPOLYGON"
TYPE_STRING = "STRING"
GEOMTYPES = [TYPE_MULTIPOINT, TYPE_MULTILINESTRING, TYPE_POLYGON,
TYPE_POINT, TYPE_LINESTRING, TYPE_MULTIPOLYGON]
USER_NAME = "user.name"
USER_EMAIL = "user.email"
STORAGE_OBJECTS = "storage.objects"
MONGODB = "mongodb"
STORAGE_GRAPH = "storage.graph"
STORAGE_STAGING = "storage.staging"
MONGODB_VERSION = "mongodb.version"
OURS = "ours"
THEIRS = "theirs"
ORIGIN = "origin"
|
# O programa le um valor em metros e o exibe convertido em centimetros e milimetros
metros = float(input('Entre com a distancia em metros: '))
centimetros = metros * 100
milimetros = metros * 1000
kilometros = metros / 1000.0
print('{} metros e {:.0f} centimetros'.format(metros, centimetros))
print('{} metros e {} milimetros'.format(metros, milimetros))
print('{} metros e {} kilometros'.format(metros, kilometros)) |
try: # TENTA VER SE DÁ CERTO
a = int(input('Digite um número: '))
b = int(input('Digite um número: '))
print(f'{a} / {b} = {a / b}')
except Exception as erro: # SE DER UM ERRO, RETORNA A VARIÁVEL ERRO COM A INFORMAÇÃO
print(f'\033[31mERRO! {erro.__class__}\033[0m')
except ValueError: # SE DER UM ERRO DA CLASSE "ValueError"
print('\033[31mErro de valor\033[0m')
else: # SE DER CERTO (N DEU NENHUM DOS ERROS ESPECIFICADOS)
print('\033[32mCERTO\033[0m')
finally: # ACONTECE DE QUALQUER FORMA DEPOIS DE TUDO
print('FIM DO PROGRAMA!')
|
train_datagen = ImageDataGenerator(rescale=1./255) # rescaling on the fly
# Updated to do image augmentation
train_datagen = ImageDataGenerator(
rescale = 1./255, # recaling
rotation_range = 40, # randomly rotate b/w 0 and 40 degrees
width_shift_range = 0.2, # shifting the images
height_shift_range = 0.2, # Randomly shift b/w 0 to 20 %
shear_range = 0.2, # It will shear the image by specified amounts upto the specified portion of the image
zoom_range = 0.2, # Zoom the image by 20% of random amounts of the image
horizontal_flip = True, # Flip the image/ Mirror image at random
fill_mode = 'nearest' # Fills the pixels that might have been lost in the operations
)
|
#!/usr/bin/env python
##
# Print a heading.
#
# @var string text
# @return string
##
def heading(text):
return '-----> ' + text;
##
# Print a single line.
#
# @var string text
# @return string
##
def line(text):
return ' ' + text;
##
# Print a single new line.
#
# @return string
##
def nl():
return line('');
|
load("@build_bazel_rules_nodejs//:providers.bzl", "run_node")
def _create_worker_module_impl(ctx):
output_file = ctx.actions.declare_file(ctx.attr.out_file)
if (len(ctx.files.worker_file) != 1):
fail("Expected a single file but got " + str(ctx.files.worker_file))
cjs = ["--cjs"] if ctx.attr.cjs else []
run_node(
ctx,
executable = "create_worker_module_bin",
inputs = ctx.files.worker_file,
outputs = [output_file],
arguments = cjs + [
ctx.files.worker_file[0].path,
output_file.path,
],
)
return [DefaultInfo(files = depset([output_file]))]
create_worker_module = rule(
implementation = _create_worker_module_impl,
attrs = {
"cjs": attr.bool(
default = False,
doc = "Whether to output commonjs",
),
"create_worker_module_bin": attr.label(
executable = True,
cfg = "exec",
default = Label("@//tfjs-backend-wasm/scripts:create_worker_module_bin"),
doc = "The script that creates the worker module",
),
"out_file": attr.string(
mandatory = True,
doc = "The name for the output file",
),
"worker_file": attr.label(
doc = "The worker file to transform",
allow_files = [".js"],
),
},
doc = """Modify the Emscripten WASM worker script so it can be inlined
...by the tf-backend-wasm bundle.
""",
)
|
"""ANSI color codes for the command line output."""
RESET = u"\u001b[0m"
BLACK = u"\u001b[30m"
RED = u"\u001b[31m"
GREEN = u"\u001b[32m"
YELLOW = u"\u001b[33m"
BLUE = u"\u001b[34m"
MAGENTA = u"\u001b[35m"
CYAN = u"\u001b[36m"
WHITE = u"\u001b[37m"
BRIGHT_BLACK = u"\u001b[30;1m"
BRIGHT_RED = u"\u001b[31;1m"
BRIGHT_GREEN = u"\u001b[32;1m"
BRIGHT_YELLOW = u"\u001b[33;1m"
BRIGHT_BLUE = u"\u001b[34;1m"
BRIGHT_MAGENTA = u"\u001b[35;1m"
BRIGHT_CYAN = u"\u001b[36;1m"
BRIGHT_WHITE = u"\u001b[37;1m"
BACKGROUND_BLACK = u"\u001b[40m"
BACKGROUND_RED = u"\u001b[41m"
BACKGROUND_GREEN = u"\u001b[42m"
BACKGROUND_YELLOW = u"\u001b[43m"
BACKGROUND_BLUE = u"\u001b[44m"
BACKGROUND_MAGENTA = u"\u001b[45m"
BACKGROUND_CYAN = u"\u001b[46m"
BACKGROUND_WHITE = u"\u001b[47m"
BACKGROUND_BRIGHT_BLACK = u"\u001b[40;1m"
BACKGROUND_BRIGHT_RED = u"\u001b[41;1m"
BACKGROUND_BRIGHT_GREEN = u"\u001b[42;1m"
BACKGROUND_BRIGHT_YELLOW = u"\u001b[43;1m"
BACKGROUND_BRIGHT_BLUE = u"\u001b[44;1m"
BACKGROUND_BRIGHT_MAGENTA = u"\u001b[45;1m"
BACKGROUND_BRIGHT_CYAN = u"\u001b[46;1m"
BACKGROUND_BRIGHT_WHITE = u"\u001b[47;1m"
def color_by_id(color_id):
return u"\u001b[38;5;{}m".format(color_id)
def background_color_by_id(color_id):
return u"\u001b[48;5;{}m".format(color_id)
if __name__ == '__main__':
print(BRIGHT_BLACK + "hello world!" + RESET)
print(BRIGHT_RED + "hello world!" + RESET)
print(BRIGHT_GREEN + "hello world!" + RESET)
for i in range(0, 16):
for j in range(0, 16):
code = str(i*16+j)
print(color_by_id(i*16+j) + " " + code.ljust(4), end="")
print(RESET)
for i in range(0, 16):
for j in range(0, 16):
code = str(i*16+j)
print(background_color_by_id(i*16+j) + " " + code.ljust(4), end="")
print(RESET)
|
print('Laboratory work 1.1 measurements of the R-load.')
measurements = []
measurements_count = int(input('Enter number of measurements : '))
for cur_measurement_number in range(1, measurements_count+1):
print(f'Enter value for measurement {cur_measurement_number} -> ', end='')
measurement = float(input())
measurements.append(measurement)
print('Results of the measurements: ')
for index, cur_measurement_number in enumerate(measurements):
print (f'({index}): {cur_measurement_number} Ohms')
|
#tests: depend_extra
depend_extra=('pelle',)
def synthesis():
pass
|
### ANSI
'''
\033[style; text; backgroundm
\033[0;33;44m
style - 0 = faz nada, 1 = negrito, 4 = subliando, 7 = inverter configurações
text - 30 = branco, 31 = vermelho, 32 = verde, 33 = amarelo, 34 = azul, 35 = roxo, 36 = ciano, 37 = cinza
background - 40 = branco, 41 = vermelho, 42 = verde, 43 = amarelo, 44 = azul, 45 = roxo, 46 = ciano, 47 = cinza
'''
'''
\033[0;30;41m
\033[4;33;44m
\033[1;35;43m
\033[30;42m
\033[m
\033[7;30m
'''
### Brincando
'''
print('\033[7;33;44mOlá Mundo!\033[m')
'''
'''
a = 3
b = 5
print('Os valores são \033[32m{}\033[m e \033[31m{}\033[m!!'.format(a, b))
'''
'''
nome = 'Jean'
print('Olá! Muito prazer em te conhecer, {}{}{}!!'.format('\033[4;34m', nome, '\033[m'))
'''
'''
nome = 'Jean'
cores = {'limpa':'\033[m',
'azul':'\033[34m',
'amarelo':'\033[33m',
'pretoebranco':'\033[7;30m'}
print('Olá! Muito prazer em te conhecer,{}{}{}!!'.format(cores['pretoebranco'], nome, cores['limpa']))
'''
|
def _capnp_toolchain_gen_impl(ctx):
ctx.template(
"toolchain/BUILD.bazel",
ctx.attr._build_tpl,
substitutions = {
"{capnp_tool}": str(ctx.attr.capnp_tool),
},
)
capnp_toolchain_gen = repository_rule(
implementation = _capnp_toolchain_gen_impl,
attrs = {
"capnp_tool": attr.label(
allow_single_file = True,
cfg = "host",
executable = True,
),
"_build_tpl": attr.label(
default = "@rules_capnproto//capnp/internal:BUILD.toolchain.tpl",
),
},
doc = "Creates the Capnp toolchain that will be used by all capnp_library targets",
)
|
expected_output = {
'steering_entries': {
'sgt': {
2057: {
'entry_last_refresh': '10:32:00',
'entry_state': 'COMPLETE',
'peer_name': 'Unknown-2057',
'peer_sgt': '2057-01',
'policy_rbacl_src_list': {
'entry_status': 'UNKNOWN',
'installed_elements': '0x00000C80',
'installed_peer_policy': {
'peer_policy': '0x7F3ADDAFEA08',
'policy_flag': '0x00400001'
},
'installed_sgt_policy': {
'peer_policy': '0x7F3ADDAFF308',
'policy_flag': '0x41400001'
},
'policy_expires_in': '0:23:59:55',
'policy_refreshes_in': '0:23:59:55',
'received_elements': '0x00000C80',
'received_peer_policy': {
'peer_policy': '0x00000000',
'policy_flag': '0x00000000'
},
'retry_timer': 'not running',
'sgt_policy_last_refresh': '10:32:00 UTC '
'Fri Oct 15 '
'2021',
'sgt_policy_refresh_time_secs': 86400,
'staled_peer_policy': {
'peer_policy': '0x00000000',
'policy_flag': '0x00000000'
}
},
'requested_elements': '0x00001401'
},
3053: {
'entry_last_refresh': '10:30:42',
'entry_state': 'COMPLETE',
'peer_name': 'Unknown-3053',
'peer_sgt': '3053-01',
'policy_rbacl_src_list': {
'entry_status': 'UNKNOWN',
'installed_elements': '0x00000C80',
'installed_peer_policy': {
'peer_policy': '0x7F3ADDAFEC08',
'policy_flag': '0x00400001'
},
'installed_sgt_policy': {
'peer_policy': '0x7F3ADDAFF1B8',
'policy_flag': '0x41400001'
},
'policy_expires_in': '0:23:58:37',
'policy_refreshes_in': '0:23:58:37',
'received_elements': '0x00000C80',
'received_peer_policy': {
'peer_policy': '0x00000000',
'policy_flag': '0x00000000'
},
'retry_timer': 'not running',
'sgt_policy_last_refresh': '10:30:42 UTC '
'Fri Oct 15 '
'2021',
'sgt_policy_refresh_time_secs': 86400,
'staled_peer_policy': {
'peer_policy': '0x00000000',
'policy_flag': '0x00000000'
}
},
'requested_elements': '0x00001401'
},
65521: {
'entry_last_refresh': '00:00:00',
'entry_state': 'FAILURE',
'peer_name': 'Unknown-65521',
'peer_sgt': '65521',
'policy_rbacl_src_list': {
'entry_status': 'UNKNOWN',
'installed_elements': '0x00000000',
'installed_peer_policy': {
'peer_policy': '0x00000000',
'policy_flag': '0x00000000'
},
'received_elements': '0x00000000',
'received_peer_policy': {
'peer_policy': '0x7F3ADDAFEB08',
'policy_flag': '0x00000005'
},
'refresh_timer': 'not running',
'retry_timer': 'running',
'staled_peer_policy': {
'peer_policy': '0x00000000',
'policy_flag': '0x00000000'
}
},
'requested_elements': '0x00000081'
}
}
}
}
|
contacts = {"John": "01217000111", "Addison": "01217000222", "Jack": "01227000123"}
print("contacts: ", contacts)
print("Element count: ", len(contacts))
contactsAsString = str(contacts)
print("str(contacts): ", contactsAsString)
# Một đối tượng đại diện lớp 'dict'.
aType = type(contacts)
print("type(contacts): ", aType)
# Hàm dir(dict) trả về các thành viên của lớp 'dict'.
print("dir(dict): ", dir(dict))
# ------------------------------------------------------------------------------------
# ['__class__', '__contains__', '__delattr__', '__delitem__', '__dir__', '__doc__',
# '__eq__', '__format__', '__ge__', '__getattribute__', '__getitem__', '__gt__',
# '__hash__', '__init__', '__init_subclass__', '__iter__', '__le__', '__len__',
# '__lt__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__',
# '__setattr__', '__setitem__', '__sizeof__', '__str__', '__subclasshook__', 'clear',
# 'copy', 'fromkeys', 'get', 'items', 'keys', 'pop', 'popitem',
# 'setdefault', 'update', 'values']
# ------------------------------------------------------------------------------------- |
'''
给定一个有序数组a = [3,7,8,9],一个二维数组querys = [[2,5], [3,7], [2, 8]], 要求返回对于query中每一项的range,数组在这个范围内的数字个数。
解法一:遍历数组,依次判断数组是否在querys的每个queray的range中并计数,时间复杂度O(n*m)。
解法二:遍历querys,对每个query的start和end在数组中进行二分查找,得到对应位置后再相减。
总结:需要注意边界条件的检查,包括
1. 二分查找的停止条件,是否可能存在死循环。计算middle是否可能溢出(python中不用考虑)。
2. 当整个数组都小于或大于target时,是否需要特殊处理。
'''
def binarySearch(a, target):
lo = 0
hi = len(a)
while lo < hi:
middle = (lo + hi) // 2
if target == a[middle]:
return middle
elif target < a[middle]:
hi = middle - 1
else:
lo = middle + 1
if a[lo] < target:
return lo + 1
return lo
def queryCount(a, querys):
if len(a) == 0 or len(querys) == 0:
return None
result = []
for query in querys:
if a[0] > query[1] or a[-1] < query[0]:
result.append(0)
else:
start = binarySearch(a, query[0])
end = binarySearch(a, query[1])
result.append(end - start + 1)
return result |
__author__ = 'Stormpath, Inc.'
__copyright__ = 'Copyright 2012-2014 Stormpath, Inc.'
__version_info__ = ('1', '3', '1')
__version__ = '.'.join(__version_info__)
__short_version__ = '.'.join(__version_info__)
|
J
# welcoming the user
name = input("What is your name? ")
print("Hello, " + name, "It is time to play hangman!")
print("Start guessing...")
# here we set the secret
word = "secret"
# creates a variable with an empty value
guesses = ''
# determine the number of turns
turns = 10
while turns > 0:
failed = 0
for char in word:
if char in guesses:
print(char)
else:
print("_")
failed += 1
if failed == 0:
print("You won!")
break
guess = input("guess a character:")
guesses += guess
if guess not in word:
turns -= 1
print("Wrong")
print("You have", + turns, 'more guesses')
if turns == 0:
print("You lose")
|
conductores = {
'azambrano': ('Andres Zambrano', 5.6, ('Hyundai', 'Elantra')),
'jojeda': ('Juan Ojeda', 1.1, ('Hyundai', 'Accent')),
# ...
}
def agrega_conductor(conductores, nuevo_conductor):
username, nombre, puntaje, (marca, modelo) = nuevo_conductor
if username in conductores:
return False
conductores[username] = (nombre, puntaje, (marca, modelo))
return True
def elimina_conductor(conductores, username):
if username not in conductores:
return False
del conductores[username]
return True
def ranking(conductores):
r = []
for c in conductores:
r.append((conductores[c][1], conductores[c][0]))
r.sort()
r.reverse()
final = []
for puntaje, nombre in r:
final.append((nombre, puntaje))
return final
|
"""
Implement a class for a Least Recently Used (LRU) Cache. The cache should
support inserting key/value paris, retrieving a key's value and retrieving the
most recently active key.
Each of these methods should run in constant time. When a key/value pair is
inserted or a key's value is retrieved, the key in question should become the
most recenty key. Also, the LRUCache class should store a max_size property set
to the size of the cache, which is passed in as an argument during
instantiation. This size represents the maximum numebr of key/value pairs that
the cache can hold at onece. If a key/value pair is added to che cache when it
has reached maximum capcacity, teh least recently used (active) key/value pair
should be evicted from the cache and no loger retrievable. The newly added
key/value pair shuld effectively replace it. Inserting a key/pari with an
already existing key should simply replace the key's value in the cache with
the new value and should not evict a key/value pair if the cache is full.
Attempting to retrieve a value from a key that is not in the cache should
return the None value.
"""
# Do not edit the class below except for the insertKeyValuePair,
# getValueFromKey, and getMostRecentKey methods. Feel free
# to add new properties and methods to the class.
"""
doubly linked list
node class
hash table with nodes
"""
class LRUCache:
def __init__(self, max_size):
self.max_size = max_size or 1
self.lru_dict = {}
self.lru_dll = DoublyLinkedList()
self.cur_size = 0
def update(self, node):
self.lru_dll.setHead(node)
def insertKeyValuePair(self, key, value):
if key in self.lru_dict:
node = self.lru_dict[key]
node.value = value
self.update(node)
return
elif self.cur_size == self.max_size:
self.remove_least_recent()
else:
self.cur_size += 1
node = DoublyLinkedListNode(key, value)
self.lru_dll.setHead(node)
# we know key is not in lru_dict
self.lru_dict[key] = node
def remove_least_recent(self):
if self.cur_size < 1:
return
rem_key = self.lru_dll.tail.key
self.lru_dll.remove_tail()
del self.lru_dict[rem_key]
def getValueFromKey(self, key):
if key not in self.lru_dict:
return None
else:
node = self.lru_dict[key]
self.update(node)
return node.value
def getMostRecentKey(self):
# Write your code here.
return self.lru_dll.head.key
class DoublyLinkedList:
def __init__(self):
self.head = None
self.tail = None
def setHead(self, node):
if node is self.head:
return
self.remove(node)
if self.head is None:
self.head = node
self.tail = node
elif self.head is self.tail:
node.next = self.tail
node.prev = None # not really necessary but for clarity
self.tail.prev = node
self.head = node
else:
self.remove(node)
node.next = self.head.next
self.head.prev = node
self.head = node
def remove_tail(self):
self.remove(self.tail)
def remove(self, node):
if node is self.head:
self.head = self.head.next
if node is self.tail:
self.tail = self.tail.prev
node.remove_bindings()
class DoublyLinkedListNode:
def __init__(self, key, value):
self.key = key
self.value = value
self.prev = None
self.next = None
def remove_bindings(self):
if self.prev:
self.prev.next = self.next
if self.next:
self.next.prev = self.prev
self.next = None
self.prev = None
|
#calculator
def add(n1,n2):
return n1 +n2
def substract(n1,n2):
return n1-n2
def multiply(n1,n2):
return n1 * n2
def devide(n1,n2):
return n1/n2
operator = {
"+": add,
"-": substract,
"*": multiply,
"/": devide,
}
num1 = float(input("Enter number: "))
op_simbol = input("enter + - * / ")
num2 = float(input("Enter number: "))
calc_func = operator[op_simbol]
ans = calc_func(num1,num2)
print(ans)
###########
studen_score = {
"Harry": 10,
"alex" :51,
"bibo": 91,
}
studen_grades = {}
for student in studen_score.keys():
score = studen_score[student]
if score > 90:
studen_grades[student] = "outstanding"
elif score > 50:
studen_grades[student] = "good"
elif score < 15:
studen_grades[student] = "poor"
#print(studen_grades)
travel_log = [
{"spain": {"city": ["malorka","tenerife","lanzarote"]}},
{"bulgaria": {"city": ["sf", "pld"]}}
]
def add_city(country,city):
newdict = {}
newdict[country] = {f"city: {city}"}
travel_log.append(newdict)
add_city("france", ["nice","monte"])
print(travel_log)
####################
name = ""
bid = ""
others = True
bidders = {}
while others is True:
name = input("Name? ")
bid = int(input("Bid? "))
others_bid = input("anyone else? Y or N ").lower()
if others_bid == "n":
others = False
bidders[name] = bid
print(bidders)
biggest = max(bidders.values())
for k,v in bidders.items():
if v == biggest:
name = k
winner = {}
winner[name] = biggest
print("winner is ", winner) |
a1 = int(input('Insira o primeiro termo dessa p.a.: '))
r = int(input('Insitra a razão dessa p.a.'))
quant = 1
continuar = 10
tot = 1
while continuar != 0:
tot += continuar
while quant <= tot:
an = a1 + r*(quant - 1)
quant += 1
print(an)
continuar = int(input('Quantos termos a mais você quer? Digite [0] para interromper o programa '))
|
# https://www.codechef.com/problems/SUMPOS
for T in range(int(input())):
l=sorted(list(map(int,input().split())))
print("NO") if(l[2]!=l[1]+l[0]) else print("YES") |
'''
Faça um programa q mostre a tabuada
de varios numeros 1 de cada vez e só pare se o numero digitado for
negativo
'''
while True:
num = int(input("Diga um numero [digite um negativo para sair]"))
if num < 0:
print()
print("Voce saiu")
break
for i in range (1,11):
print(num, " x ", i," = ", num*i)
print()
print() |
class LoggerTemplate():
def __init__(self, *args, **kwargs):
raise NotImplementedError
def update_loss(self, phase, value, step):
raise NotImplementedError
def update_metric(self, phase, metric, value, step):
raise NotImplementedError
|
var={"car":"volvo", "fruit":"apple"}
print(var["fruit"])
for f in var:
print("key: " + f + " value: " + var[f])
print()
print()
var1={"donut":["chocolate","glazed","sprinkled"]}
print(var1["donut"][0])
print("My favorite donut flavors are:", end= " ")
for f in var1["donut"]:
print(f, end=" ")
print()
print()
#Using the examples above write code to print one value of each JSON structure and a loop to print all values below.
var={"vegetable":"carrot", "fruit":"apple","animal":"cat","day":"Friday"}
print(var["vegetable"])
for f in var:
print("key: " + f + " value: " + var[f])
print()
print()
var1={"animal":["dog","cat","fish","tiger","camel"]}
print(var1["animal"][0])
print("My favorite animals are:", end= " ")
for f in var1["animal"]:
print(f, end=" ")
print()
print()
myvar={"dessert":"ice cream", "exercise":"push ups","eyes":"blue","gender":"male"}
print(myvar["exercise"])
for f in myvar:
print("key: " + f + " value: " + myvar[f])
print()
print()
myvar1={"dessert":["cake","candy","ice cream","pudding","cookies"]}
print(myvar1["dessert"][0])
print("My favorite desserts are:", end= " ")
for f in myvar1["dessert"]:
print(f, end=" ")
|
#Challenge 4: Take a binary tree and reverse it
#I decided to create two classes. One to hold the node, and one to act as the Binary Tree.
#Node class
#Only contains the information for the node. Val is the value of the node, left is the left most value, and right is the right value
class Node:
def __init__(self, val):
self.left = None
self.right = None
self.val = val
#BinaryTree class
class BinaryTree:
#Initialize the tree with a blank root
def __init__(self):
self.root = None
def getRoot(self):
return self.root
#Recursively add node objects
def add(self,val):
if self.root is None:
self.root = Node(val)
else:
self._add(val, self.root)
def _add(self, val, node):
if val < node.val:
if node.left is not None:
self._add(val, node.left)
else:
node.left = Node(val)
else:
if node.right is not None:
self._add(val, node.right)
else:
node.right = Node(val)
#Recursively print each node in the tree
def printTree(self):
if self.root is not None:
self._printTree(self.root)
def _printTree(self, node):
if node is not None:
self._printTree(node.left)
print(node.val)
self._printTree(node.right)
#returns a nested list of each level and the nodes in it
def getTree(self):
currLevel = [self.root]
tree = list()
while currLevel:
lowerLevel = list()
currNodes = list()
for node in currLevel:
currNodes.append(node.val)
if node.left:
lowerLevel.append(node.left)
if node.right:
lowerLevel.append(node.right)
tree.append(currNodes)
#print(currNodes)
currLevel = lowerLevel
return tree
if __name__ == '__main__':
#create sample tree from example
tree = BinaryTree()
tree.add(4)
tree.add(2)
tree.add(7)
tree.add(1)
tree.add(3)
tree.add(6)
tree.add(9)
#getTree returns the tree formatted in nested lists
formattedTree = tree.getTree()
#reverse the levels
for level in formattedTree:
level.reverse()
print(level) |
DAOLIPROXY_VENDOR = "OpenStack Foundation"
DAOLIPROXY_PRODUCT = "DaoliProxy"
DAOLIPROXY_PACKAGE = None # OS distro package version suffix
loaded = False
class VersionInfo(object):
release = "1.el7.centos"
version = "2015.1.21"
def version_string(self):
return self.version
def release_string(self):
return self.release
version_info = VersionInfo()
version_string = version_info.version_string
def vendor_string():
return DAOLIPROXY_VENDOR
def product_string():
return DAOLIPROXY_PRODUCT
def package_string():
return DAOLIPROXY_PACKAGE
def version_string_with_package():
if package_string() is None:
return version_info.version_string()
else:
return "%s-%s" % (version_info.version_string(), package_string())
|
# Space: O(n)
# Time: O(n)
class Solution:
def findUnsortedSubarray(self, nums):
length = len(nums)
if length <= 1: return 0
stack = []
left, right = length - 1, 0
for i in range(length):
while stack and nums[stack[-1]] > nums[i]:
left = min(left, stack.pop())
stack.append(i)
if left == length - 1: return 0
stack = []
for j in range(length - 1, -1, -1):
while stack and nums[stack[-1]] < nums[j]:
right = max(right, stack.pop())
stack.append(j)
return right - left + 1
|
# encoding: utf-8
# module Autodesk.AutoCAD.DatabaseServices
# from D:\Python\ironpython-stubs\release\stubs\Autodesk\AutoCAD\DatabaseServices\__init__.py
# by generator 1.145
# no doc
# no imports
# no functions
# no classes
# variables with complex values
__path__ = [
'D:\\Python\\ironpython-stubs\\release\\stubs\\Autodesk\\AutoCAD\\DatabaseServices',
] |
# coding=utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License
# 文件缓存文件名前缀
JSON_FILE_NAME_PREFIX = "stage_"
# 文件缓存文件名后缀
JSON_FILE_NAME_SUFFIX = ".json"
# 当前stage
STAGE_ACS_CURRENT = "ACSCurrent"
# cbc模块key
CBC_MODE_KEY = '001'
# iv长度
IV_LENGTH = 16
# 密钥长度
KEY_LENGTH = 16
# 产品名称
PRODUCT_NAME = "kms"
# 数据类型 text
TEXT_DATA_TYPE = "text"
# 数据类型 binary
BINARY_DATA_TYPE = "binary"
# 默认ttl时间 单位:ms
DEFAULT_TTL = 60 * 60 * 1000
# 默认最大重试次数
DEFAULT_RETRY_MAX_ATTEMPTS = 5
# 重试时间间隔,单位ms
DEFAULT_RETRY_INITIAL_INTERVAL_MILLS = 2000
# 最大等待时间,单位ms
DEFAULT_CAPACITY = 10000
# 默认日志名称
DEFAULT_LOGGER_NAME = "CacheClient"
# user agent
USER_AGENT_OF_SECRETS_MANAGER_PYTHON = "alibabacloud-secretsmanager-python"
# 版本号
PROJECT_VERSION = "0.0.4"
# 默认配置文件名称
DEFAULT_CONFIG_NAME = "secretsmanager.properties"
# 配置文件 client_key_password_from_file_path 属性名
PROPERTIES_CLIENT_KEY_PASSWORD_FROM_FILE_PATH_NAME = "client_key_password_from_file_path"
# 配置文件 secret_names 属性名
PROPERTIES_SECRET_NAMES_KEY = "secret_names"
|
f = str ( input( 'Escreva alguma frase:'))
print (' Sua frase contém {} letras "as".'.format(f.count('a')))
print (' Ela aparece pela primeira fez na posição {}.'.format(f.find('a')))
print (' E.aparece pela última vez na posição {}.' .format( f.rfind ('a')))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.