content
stringlengths 7
1.05M
|
---|
'''
Play with numbers
You are given an array of n numbers and q queries. For each query you have to print the floor of the expected value(mean) of the subarray from L to R.
First line contains two integers N and Q denoting number of array elements and number of queries.
Next line contains N space seperated integers denoting array elements.
Next Q lines contain two integers L and R(indices of the array).
print a single integer denoting the answer.
:
1<= N ,Q,L,R <= 10^6
1<= Array elements <= 10^9
NOTE
Use Fast I/O
'''
x = list(map(int,input().split()))
a = list(map(int,input().split()))
b = [a[0]]
for i in range (1, len(a)):
b.append(a[i]+b[-1])
for i in range (0,x[1]):
c = list(map(int,input().split()))
if c[0] == 1:
print(b[c[1]-1]//(c[1]))
else:
print((b[c[1]-1] - b[c[0] - 2])//((c[1] - c[0])+1)) |
def glyphs():
return 97
_font =\
b'\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a'\
b'\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a'\
b'\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a'\
b'\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a'\
b'\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a'\
b'\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a'\
b'\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x11\x4b\x59\x51\x4b\x4e'\
b'\x4c\x4c\x4e\x4b\x51\x4b\x53\x4c\x56\x4e\x58\x51\x59\x53\x59'\
b'\x56\x58\x58\x56\x59\x53\x59\x51\x58\x4e\x56\x4c\x53\x4b\x51'\
b'\x4b\x05\x4c\x58\x4c\x4c\x4c\x58\x58\x58\x58\x4c\x4c\x4c\x04'\
b'\x4b\x59\x52\x4a\x4b\x56\x59\x56\x52\x4a\x05\x4c\x58\x52\x48'\
b'\x4c\x52\x52\x5c\x58\x52\x52\x48\x0b\x4a\x5a\x52\x49\x50\x4f'\
b'\x4a\x4f\x4f\x53\x4d\x59\x52\x55\x57\x59\x55\x53\x5a\x4f\x54'\
b'\x4f\x52\x49\x0d\x4c\x58\x50\x4c\x50\x50\x4c\x50\x4c\x54\x50'\
b'\x54\x50\x58\x54\x58\x54\x54\x58\x54\x58\x50\x54\x50\x54\x4c'\
b'\x50\x4c\x05\x4b\x59\x52\x4b\x52\x59\x20\x52\x4b\x52\x59\x52'\
b'\x05\x4d\x57\x4d\x4d\x57\x57\x20\x52\x57\x4d\x4d\x57\x08\x4d'\
b'\x57\x52\x4c\x52\x58\x20\x52\x4d\x4f\x57\x55\x20\x52\x57\x4f'\
b'\x4d\x55\x22\x4e\x56\x51\x4e\x4f\x4f\x4e\x51\x4e\x53\x4f\x55'\
b'\x51\x56\x53\x56\x55\x55\x56\x53\x56\x51\x55\x4f\x53\x4e\x51'\
b'\x4e\x20\x52\x4f\x51\x4f\x53\x20\x52\x50\x50\x50\x54\x20\x52'\
b'\x51\x4f\x51\x55\x20\x52\x52\x4f\x52\x55\x20\x52\x53\x4f\x53'\
b'\x55\x20\x52\x54\x50\x54\x54\x20\x52\x55\x51\x55\x53\x1a\x4e'\
b'\x56\x4e\x4e\x4e\x56\x56\x56\x56\x4e\x4e\x4e\x20\x52\x4f\x4f'\
b'\x4f\x55\x20\x52\x50\x4f\x50\x55\x20\x52\x51\x4f\x51\x55\x20'\
b'\x52\x52\x4f\x52\x55\x20\x52\x53\x4f\x53\x55\x20\x52\x54\x4f'\
b'\x54\x55\x20\x52\x55\x4f\x55\x55\x10\x4d\x57\x52\x4c\x4d\x55'\
b'\x57\x55\x52\x4c\x20\x52\x52\x4f\x4f\x54\x20\x52\x52\x4f\x55'\
b'\x54\x20\x52\x52\x52\x51\x54\x20\x52\x52\x52\x53\x54\x10\x4c'\
b'\x55\x4c\x52\x55\x57\x55\x4d\x4c\x52\x20\x52\x4f\x52\x54\x55'\
b'\x20\x52\x4f\x52\x54\x4f\x20\x52\x52\x52\x54\x53\x20\x52\x52'\
b'\x52\x54\x51\x10\x4d\x57\x52\x58\x57\x4f\x4d\x4f\x52\x58\x20'\
b'\x52\x52\x55\x55\x50\x20\x52\x52\x55\x4f\x50\x20\x52\x52\x52'\
b'\x53\x50\x20\x52\x52\x52\x51\x50\x10\x4f\x58\x58\x52\x4f\x4d'\
b'\x4f\x57\x58\x52\x20\x52\x55\x52\x50\x4f\x20\x52\x55\x52\x50'\
b'\x55\x20\x52\x52\x52\x50\x51\x20\x52\x52\x52\x50\x53\x08\x44'\
b'\x60\x44\x52\x60\x52\x20\x52\x44\x52\x52\x62\x20\x52\x60\x52'\
b'\x52\x62\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00'\
b'\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00'\
b'\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00'\
b'\x4a\x5a\x00\x4a\x5a\x11\x4b\x59\x51\x4b\x4e\x4c\x4c\x4e\x4b'\
b'\x51\x4b\x53\x4c\x56\x4e\x58\x51\x59\x53\x59\x56\x58\x58\x56'\
b'\x59\x53\x59\x51\x58\x4e\x56\x4c\x53\x4b\x51\x4b\x05\x4c\x58'\
b'\x4c\x4c\x4c\x58\x58\x58\x58\x4c\x4c\x4c\x04\x4b\x59\x52\x4a'\
b'\x4b\x56\x59\x56\x52\x4a\x05\x4c\x58\x52\x48\x4c\x52\x52\x5c'\
b'\x58\x52\x52\x48\x0b\x4a\x5a\x52\x49\x50\x4f\x4a\x4f\x4f\x53'\
b'\x4d\x59\x52\x55\x57\x59\x55\x53\x5a\x4f\x54\x4f\x52\x49\x0d'\
b'\x4c\x58\x50\x4c\x50\x50\x4c\x50\x4c\x54\x50\x54\x50\x58\x54'\
b'\x58\x54\x54\x58\x54\x58\x50\x54\x50\x54\x4c\x50\x4c\x05\x4b'\
b'\x59\x52\x4b\x52\x59\x20\x52\x4b\x52\x59\x52\x05\x4d\x57\x4d'\
b'\x4d\x57\x57\x20\x52\x57\x4d\x4d\x57\x08\x4d\x57\x52\x4c\x52'\
b'\x58\x20\x52\x4d\x4f\x57\x55\x20\x52\x57\x4f\x4d\x55\x22\x4e'\
b'\x56\x51\x4e\x4f\x4f\x4e\x51\x4e\x53\x4f\x55\x51\x56\x53\x56'\
b'\x55\x55\x56\x53\x56\x51\x55\x4f\x53\x4e\x51\x4e\x20\x52\x4f'\
b'\x51\x4f\x53\x20\x52\x50\x50\x50\x54\x20\x52\x51\x4f\x51\x55'\
b'\x20\x52\x52\x4f\x52\x55\x20\x52\x53\x4f\x53\x55\x20\x52\x54'\
b'\x50\x54\x54\x20\x52\x55\x51\x55\x53\x1a\x4e\x56\x4e\x4e\x4e'\
b'\x56\x56\x56\x56\x4e\x4e\x4e\x20\x52\x4f\x4f\x4f\x55\x20\x52'\
b'\x50\x4f\x50\x55\x20\x52\x51\x4f\x51\x55\x20\x52\x52\x4f\x52'\
b'\x55\x20\x52\x53\x4f\x53\x55\x20\x52\x54\x4f\x54\x55\x20\x52'\
b'\x55\x4f\x55\x55\x10\x4d\x57\x52\x4c\x4d\x55\x57\x55\x52\x4c'\
b'\x20\x52\x52\x4f\x4f\x54\x20\x52\x52\x4f\x55\x54\x20\x52\x52'\
b'\x52\x51\x54\x20\x52\x52\x52\x53\x54\x10\x4c\x55\x4c\x52\x55'\
b'\x57\x55\x4d\x4c\x52\x20\x52\x4f\x52\x54\x55\x20\x52\x4f\x52'\
b'\x54\x4f\x20\x52\x52\x52\x54\x53\x20\x52\x52\x52\x54\x51\x10'\
b'\x4d\x57\x52\x58\x57\x4f\x4d\x4f\x52\x58\x20\x52\x52\x55\x55'\
b'\x50\x20\x52\x52\x55\x4f\x50\x20\x52\x52\x52\x53\x50\x20\x52'\
b'\x52\x52\x51\x50\x10\x4f\x58\x58\x52\x4f\x4d\x4f\x57\x58\x52'\
b'\x20\x52\x55\x52\x50\x4f\x20\x52\x55\x52\x50\x55\x20\x52\x52'\
b'\x52\x50\x51\x20\x52\x52\x52\x50\x53\x08\x44\x60\x44\x52\x60'\
b'\x52\x20\x52\x44\x52\x52\x62\x20\x52\x60\x52\x52\x62\x00\x4a'\
b'\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a'\
b'\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a'\
b'\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a\x5a\x00\x4a'\
b'\x5a'
_index =\
b'\x00\x00\x03\x00\x06\x00\x09\x00\x0c\x00\x0f\x00\x12\x00\x15'\
b'\x00\x18\x00\x1b\x00\x1e\x00\x21\x00\x24\x00\x27\x00\x2a\x00'\
b'\x2d\x00\x30\x00\x33\x00\x36\x00\x39\x00\x3c\x00\x3f\x00\x42'\
b'\x00\x45\x00\x48\x00\x4b\x00\x4e\x00\x51\x00\x54\x00\x57\x00'\
b'\x5a\x00\x5d\x00\x60\x00\x63\x00\x88\x00\x95\x00\xa0\x00\xad'\
b'\x00\xc6\x00\xe3\x00\xf0\x00\xfd\x00\x10\x01\x57\x01\x8e\x01'\
b'\xb1\x01\xd4\x01\xf7\x01\x1a\x02\x2d\x02\x30\x02\x33\x02\x36'\
b'\x02\x39\x02\x3c\x02\x3f\x02\x42\x02\x45\x02\x48\x02\x4b\x02'\
b'\x4e\x02\x51\x02\x54\x02\x57\x02\x5a\x02\x5d\x02\x82\x02\x8f'\
b'\x02\x9a\x02\xa7\x02\xc0\x02\xdd\x02\xea\x02\xf7\x02\x0a\x03'\
b'\x51\x03\x88\x03\xab\x03\xce\x03\xf1\x03\x14\x04\x27\x04\x2a'\
b'\x04\x2d\x04\x30\x04\x33\x04\x36\x04\x39\x04\x3c\x04\x3f\x04'\
b'\x42\x04\x45\x04\x48\x04\x4b\x04\x4e\x04\x51\x04\x54\x04'
_mvfont = memoryview(_font)
def _chr_addr(ordch):
offset = 2 * (ordch - 32)
return int.from_bytes(_index[offset:offset + 2], 'little')
def get_ch(ordch):
offset = _chr_addr(ordch if 32 <= ordch <= 127 else ord('?'))
count = _font[offset]
return _mvfont[offset:offset+(count+2)*2-1]
|
class Solution:
def buddyStrings(self, A: str, B: str) -> bool:
if len(A) != len(B):
return False
if A == B:
seen = set()
for char in A:
if char in seen:
return True
seen.add(char)
return False
else:
pairs = []
for a, b in zip(A, B):
if a != b:
pairs.append((a, b))
if len(pairs) > 2:
return False
return len(pairs) == 2 and pairs[0] == pairs[1][::-1] |
# ------------------------------
# 560. Subarray Sum Equals K
#
# Description:
# Given an array of integers and an integer k, you need to find the total number of
# continuous subarrays whose sum equals to k.
#
# Example 1:
# Input:nums = [1,1,1], k = 2
# Output: 2
#
# Note:
# The length of the array is in range [1, 20,000].
# The range of numbers in the array is [-1000, 1000] and the range of the integer k
# is [-1e7, 1e7].
#
# Version: 1.0
# 10/29/19 by Jianfa
# ------------------------------
class Solution:
def subarraySum(self, nums: List[int], k: int) -> int:
preSum = 0
count = 0
sumDict = {0: 1} # if there is a subarray whose sum is k, can get sumDict[0] = 1
for n in nums:
preSum += n
if preSum - k in sumDict:
# if there exists a sum of subarray nums[:i] that is preSum - k
# then sum of subarray between current number and nums[i] is k
count += sumDict[preSum - k]
sumDict[preSum] = sumDict.get(preSum, 0) + 1
return count
# Used for testing
if __name__ == "__main__":
test = Solution()
# ------------------------------
# Summary:
# Idea from: https://leetcode.com/problems/subarray-sum-equals-k/discuss/102106/Java-Solution-PreSum-%2B-HashMap
# I thought about the accumulated sum, but I didn't think about using the map to store
# previous accumulated sum, so that it helps to check if the difference between current
# sum and a certain sum is k. |
# testSIF = "123456789012"
testSIF = open("input.txt", 'r').read()
# testSIF = "0222112222120000"
x_size = 25
y_size = 6
layer_count = len(testSIF) // (x_size * y_size)
layers = []
index = 0
for z in range(layer_count):
layer = []
for y in range(y_size):
for x in range(x_size):
layer += testSIF[index]
index += 1
layers.append(layer)
print(len(layers) * y_size * x_size)
print(len(testSIF))
def decode_pixel(layers, index):
color = "2"
for l in layers:
if color == "2":
color = l[index]
return color
index = 0
for y in range(y_size):
line = ""
for x in range(x_size):
line += ("0" if (decode_pixel(layers, index) == "1") else " ") + " "
index += 1
print(line)
|
class Object:
## Lisp apply() to `that` object in context
def apply(self, env, that):
raise NotImplementedError(['apply', self, env, that])
|
loop = 1
while (loop < 10):
noun1 = input("Choose a noun: ")
plur_noun = input("Choose a plural noun: ")
noun2 = input("Choose a noun: ")
place = input("Name a place: ")
adjective = input("Choose an adjective (Describing word): ")
noun3 = input("Choose a noun: ")
print ("------------------------------------------")
print ("Be kind to your ",noun1,"- footed", plur_noun)
print ("For a duck may be somebody's", noun2,",")
print ("Be kind to your",plur_noun,"in",place)
print ("Where the weather is always",adjective,".")
print ()
print ("You may think that is this the",noun3,",")
print ("Well it is.")
print ("------------------------------------------")
loop = loop + 1 |
def aumentar(n,p,formatar=False):
v = n + (n * p / 100)
if formatar:
return moeda(v)
else:
return v
def diminuir(n,p,formatar=False):
v = n - (n * p / 100)
if formatar:
return moeda(v)
else:
return v
def dobro(n,formatar=False):
v = n*2
if formatar:
return moeda(v)
else:
return v
def metade(n,formatar=False):
v = n/2
if formatar:
return moeda(v)
else:
return v
def moeda(n=0, moeda='R$'):
return f'{moeda}{n:>2.2f}'.replace('.', ',') |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Product Availability',
'category': 'Website/Website',
'summary': 'Manage product inventory & availability',
'description': """
Manage the inventory of your products and display their availability status in your eCommerce store.
In case of stockout, you can decide to block further sales or to keep selling.
A default behavior can be selected in the Website settings.
Then it can be made specific at the product level.
""",
'depends': [
'website_sale',
'sale_stock',
],
'data': [
'views/product_template_views.xml',
'views/res_config_settings_views.xml',
'views/website_sale_stock_templates.xml',
'views/stock_picking_views.xml'
],
'demo': [
'data/website_sale_stock_demo.xml',
],
'auto_install': True,
'license': 'LGPL-3',
}
|
#!/usr/bin/env python3
"""
Advent of Code!
--- Day 2: Dive! ---
Now doing advent of code day 2 in-between working on finals...
I sure hope the puzzles don't ramp up too much the first few days!
"""
def process_input(input_name):
with open(input_name) as input:
return input.readlines()
def calculate_pos(input, has_aim=False):
aim, horizontal, depth = 0, 0, 0
for line in input:
split_line = line.split()
command = split_line[0]
value = int(split_line[1])
if command == 'forward':
horizontal += value
depth += aim * value
elif command == 'down':
aim += value
elif command == 'up':
aim -= value
if has_aim:
return horizontal * depth
else:
return horizontal * aim
input = process_input('input.txt')
p1 = calculate_pos(input)
p2 = calculate_pos(input, True)
print('Part 1:', p1)
print('Part 2:', p2) |
"""
Base class implementing class level locking capability.
MIT License
(C) Copyright [2020] Hewlett Packard Enterprise Development LP
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE."""
class LockHolder:
"""Wrapper for etcd3 locks to allow them to be acquired with a timeout
on the acquisition for non-blocking lock checks.
"""
def __init__(self, etcd, name, ttl=60, timeout=None):
""" Constructor
"""
self.my_lock = etcd.lock(name, ttl=ttl)
self.timeout = timeout
def acquire(self, ):
"""Manually acquire the lock for this instance, obtained by calling
lock() above. The timeout and TTL set in the lock() call will
be used for the acquisition.
"""
self.my_lock.acquire(self.timeout)
return self
def release(self):
"""Manually release the lock for this instance.
"""
return self.my_lock.release()
def is_acquired(self):
"""Find out whether the lock for this instance is currently acquired
(uesful when using non-blocking or timed acquisitions).
"""
return self.my_lock.is_acquired()
def __enter__(self):
"""Acquire the lock as a context manager.
"""
return self.acquire()
def __exit__(self, exception_type, exception_value, traceback):
""" Release the lock at the end of a managed context.
"""
self.release()
return False
class Lockable:
"""Base class with a lock() method that produces distinct locks per
object-id in ETCD for use with 'with'. Call the lock() method on an
instance of the class to obtain the lock.
Example:
class Foo(Lockable):
def __init__(self):
assert self # keep lint happy
my_instance = Foo()
with my_instance.lock():
print("I am in the lock"
print("No longer in the lock")
"""
def __init__(self, name, etcd):
"""Constructor
The paramters are:
name
The unique name that identifies the resource to be locked
by this partitcular lockable instance.
etcd
The ETCD client to be used for creating a lock in this
particular lockable instance.
"""
self.name = name
self.etcd = etcd
def lock(self, ttl=60, timeout=None):
"""Method to use with the 'with' statement for locking the instance
resource. This creates the lock that will be shared across
all instances (through ETCD). The parameters are as follows:
ttl
The time-to-live (TTL) of a lock acquisition on this
lock. This is expressed in seconds. If TTL expires while
the lock is held, the lock is dropped allowing someone
else to acquire it. To keep a lock locked for long
processing stages, call the refresh() method periodically
(within the TTL time limit) on the lock object returned
by this method.
timeout
The length of time (in seconds) to wait for the lock
before giving up. If this is 0, acquiring the lock is
non-blocking, and the lock may not result in an acquired
lock. If this is greater than 0, attempts to acquire will
give up after that number of seconds and may not result in
an acquired lock. If this is None attempts to acquire the
lock will block indefinitely and are guaranteed to return
an acquired lock. To test whether the lock is acquired,
use the is_aquired() method on the lock object returned by
this method.
"""
return LockHolder(self.etcd, self.name, ttl, timeout)
|
"""
Given an integer array nums sorted in non-decreasing order,
remove the duplicates in-place such that each unique element appears only once.
The relative order of the elements should be kept the same.
Since it is impossible to change the length of the array in some languages,
you must instead have the result be placed in the first part of the array nums.
More formally, if there are k elements after removing the duplicates,
then the first k elements of nums should hold the final result.
It does not matter what you leave beyond the first k elements.
Return k after placing the final result in the first k slots of nums.
Do not allocate extra space for another array.
You must do this by modifying the input array in-place with O(1) extra memory.
Input: nums = [1,1,2]
Output: 2, nums = [1,2,_]
Explanation: Your function should return k = 2, with the first two elements of nums being 1 and 2 respectively.
It does not matter what you leave beyond the returned k (hence they are underscores).
Input: nums = [0,0,1,1,1,2,2,3,3,4]
Output: 5, nums = [0,1,2,3,4,_,_,_,_,_]
Explanation: Your function should return k = 5, with the first five elements of nums being 0, 1, 2, 3, and 4 respectively.
It does not matter what you leave beyond the returned k (hence they are underscores).
Constraints:
0 <= nums.length <= 3 * 104
-100 <= nums[i] <= 100
nums is sorted in non-decreasing order.
"""
class Solution:
def removeDuplicates(self, nums: List[int]) -> int:
idx_cur = 0
for i in range(1, len(nums)):
if nums[i] != nums[idx_cur]:
idx_cur += 1
nums[idx_cur] = nums[i]
return idx_cur + 1
|
# coding:utf-8
"""
Simplized version of 0-1 knapSack method.
Donghui Chen, Wangmeng Song
May 15, 2017
Wangmeng Song
August 16, 2017
"""
def zeros(rows, cols):
row = []
data = []
for i in range(cols):
row.append(0)
for i in range(rows):
data.append(row[:])
return data
def getItemsUsed(w, c):
# item count
i = len(c) - 1
# weight
currentW = len(c[0]) - 1
# set everything to not marked
marked = []
for i in range(i + 1):
marked.append(0)
while (i >= 0 and currentW >= 0):
# if this weight is different than the same weight for the last item
# then we used this item to get this profit
# if the number is the same we could not add this item because it was too heavy
if (i is 0 and c[i][currentW] > 0) or (c[i][currentW] is not c[i - 1][currentW] and i > 0):
marked[i] = 1
currentW = currentW - w[i]
i = i - 1
return marked
# w = list of item weight or cost
# W = max weight or max cost for the knapsack
# v = size of lat
def zeroOneKnapsack(w, v, W):
# c is the cost matrix
c = []
n = len(w)
# set inital values to zero
c = zeros(n, W + 1)
# the rows of the matrix are weights and the columns are items
# cell c[i,j] is the optimal profit for i items of cost j
# for every item
for i in range(0, n):
# for ever possible weight
for j in range(0, W + 1):
# if this weight can be added to this cell then add it if it is better than what we aready have
if (w[i] > j):
# this item is to large or heavy to add so we just keep what we aready have
c[i][j] = c[i - 1][j]
else:
# we can add this item if it gives us more value than skipping it
# c[i-1][j-w[i]] is the max profit for the remaining weight after we add this item.
# if we add the profit of this item to the max profit of the remaining weight and it is more than
# adding nothing , then it't the new max profit if not just add nothing.
c[i][j] = max(c[i - 1][j], v[i] + c[i - 1][j - w[i]])
mostvalueindex = getItemsUsed(w, c)
personindex = [j for j, x in enumerate(mostvalueindex) if x is 1]
mostvalueperson = sum([w[n] for n in personindex])
return [mostvalueperson, personindex]
# if __name__ == '__main__':
# w = [2, 3, 2, 3, 2]
# v = [2, 1, 4, 3, 0]
# MAX = 5
# print zeroOneKnapsack(w, v, MAX)
|
""" Think of something you could store in a list.
Write a program that creates a list containing these items """
start = True
languages = []
while start:
name = input('Hi, what\'s your name?: ')
print(f'Welcome {name.title()}')
fav_language = input('What is your favorite programming language?:\n')
languages.append(fav_language)
finish = input('would you like to continue (y/n): ')
if finish == 'y':
continue
else:
break
print('People likes the following programming languages:')
for language in languages:
print(f'- {language.title()}')
|
numbers = [54, 23, 66, 12]
sumEle = numbers[1] + numbers[2]
print(sumEle) |
class Solution:
def search(self, nums: List[int], target: int) -> int:
#start pointers
low = 0
high = len(nums) - 1
# iterate trought the array
while low <= high:
# define middle of array
mid = low + (high - low) // 2
# if position == target return position
if nums[mid] == target:
return mid
# if middle position lets say(5) in a array of 10
# if the number of the middle position 5 is bigger than target
# high - 1
elif nums[mid] < target:
low = mid + 1
# if the number of the middle position 5 is smaller than target
# low + 1
else:
high = mid - 1
return -1
#not found return -1
|
#-*- coding: utf-8 -*-
#!/usr/bin/env python
## dummy class to replace usbio class
class I2C(object):
def __init__(self,module,slave):
pass
def searchI2CDev(self,begin,end):
pass
def write_register(self, reg, data):
pass
# def I2C(self,module,slave,channel):
# pass
def autodetect():
pass
def setup():
pass
|
# Variables representing the number of candies collected by alice, bob, and carol
alice_candies = 121
bob_candies = 77
carol_candies = 109
# Your code goes here! Replace the right-hand side of this assignment with an expression
# involving alice_candies, bob_candies, and carol_candies
total = (alice_candies + bob_candies + carol_candies)
print((total % 3)) |
class ProfileDoesNotExist(Exception):
def __init__(self, message, info={}):
self.message = message
self.info = info
class ScraperError(Exception):
def __init__(self, message, info={}):
self.message = message
self.info = info
|
field_size = 40
snakeTailX = [40,80,120]
snakeTailY = [0,0,0 ]
Xhead, Yhead, Xfood, Yfood = 120,0,-40,- 40
snakeDirection = 'R'
foodkey = True
snakeLeight = 2
deadKey = False
score = 0
def setup():
global img
smooth()
size(1024,572)
img = loadImage("gameover.jpg")
size(1024,572)
noStroke()
frameRate(5)
smooth(250)
def draw():
global Xhead, Yhead, snakeDirection1, Xfood, Yfood, snakeLeight,snakeTailX,snakeTailY, deadKey, img
println(snakeTailX)
imageMode(CENTER)
image( img, width/2, height/2)
if deadKey == False:
background(100)
snakeDirection1()
snakeGO()
food()
foodEat()
snakeTail()
for i in range(snakeLeight):
if snakeTailX[i] == Xhead and snakeTailY[i] == Yhead:
snakeDead()
if Xhead + field_size/2 > width or Xhead < 0 or Yhead + field_size/2 > height or Yhead < 0:
snakeDead()
fill(0)
textSize(25)
text(score, width - 50,50)
fill(250, 75, 50,)
ellipse(Xhead, Yhead, field_size, field_size)
def keyPressed():
global snakeDirection, foodkey, snakeLeight, deadKey, score, Xhead, Yhead,snakeTailX,snakeTailY
if deadKey and keyCode == RIGHT:
snakeDirection = 'R'
foodkey = True
snakeLeight = 2
deadKey = False
score = 0
Xhead, Yhead = 120, 0
snakeTailX = [40,80,120]
snakeTailY = [0,0,0 ]
def snakeTail():
global foodkey
if foodkey == False:
for i in range(snakeLeight):
snakeTailX[i] = snakeTailX[i+1]
snakeTailY[i] = snakeTailY[i+1]
snakeTailX[snakeLeight] = Xhead
snakeTailY[snakeLeight] = Yhead
fill(random(0,250),random(0,250),random(0,250))
for i in range(snakeLeight):
ellipse(snakeTailX[i], snakeTailY[i],field_size,field_size)
def foodEat():
global Xhead, Yhead, Xfood, Yfood, foodkey, snakeLeight, score
if Xhead == Xfood and Yhead == Yfood:
foodkey = True
score += 1
snakeTailX.append(Xhead)
snakeTailY.append(Yhead)
snakeLeight += 1
def food():
global foodkey, Xfood, Yfood
fill(255, 0, 0)
ellipse(Xfood, Yfood, field_size, field_size)
if foodkey:
Xfood = int(random(0, width/field_size))*field_size
Yfood = int(random(0, height/field_size))*field_size
foodkey = False
for i in range(snakeLeight):
if snakeTailX[i] == Xfood and snakeTailY[i] == Yfood:
foodkey = True
food()
def snakeGO():
global snakeDirection, Xhead, Yhead
if snakeDirection == 'R':
Xhead += field_size
if snakeDirection == 'D':
Yhead += field_size
if snakeDirection == 'L':
Xhead -= field_size
if snakeDirection == 'U':
Yhead -= field_size
def snakeDirection1():
global snakeDirection
if keyCode == RIGHT and snakeDirection != 'L':
snakeDirection = 'R'
if keyCode == LEFT and snakeDirection != 'R':
snakeDirection = 'L'
if keyCode == UP and snakeDirection != 'D':
snakeDirection = 'U'
if keyCode == DOWN and snakeDirection != 'U':
snakeDirection = 'D'
def snakeDead():
global deadKey
deadKey = True
|
#function with default parameter
def describe_pet(pet_name,animal_type='dog'):
"""Displays information about a pet."""
print(f"I have a {animal_type}")
print(f"My {animal_type}'s name is {pet_name.title()}")
#a dog named willie
describe_pet('willie')
#a hamster named harry
describe_pet('harry','hamster') #positional arguments
describe_pet(animal_type='hamster',pet_name='harry') #key word arguments
describe_pet(pet_name='harry',animal_type='hamster')
|
"""
Utility functions for handling price data
"""
def get_min_prices(assets: list) -> list:
min_price = min([asset.get("price") for asset in assets])
return [asset for asset in assets if asset.get("price") == min_price]
def get_average_price(assets: list) -> float:
num_assets = len(assets)
return sum_prices(assets)/num_assets if num_assets else 0
def sum_prices(assets: list) -> float:
return sum([asset.get("price") for asset in assets]) |
class AncapBotError(Exception):
pass
class InsufficientFundsError(AncapBotError):
pass
class NonexistentUserError(AncapBotError):
pass
|
#Kunal Gautam
#Codewars : @Kunalpod
#Problem name: Build Tower
#Problem level: 6 kyu
def tower_builder(n_floors):
return [' '*(n_floors-x)+'*'*(2*x-1)+' '*(n_floors-x) for x in range(1,n_floors+1)]
|
class Solution:
def hitBricks(self, grid: List[List[int]], hits: List[List[int]]) -> List[int]:
def dfs(y, x):
if y < 0 or y > m-1 or x < 0 or x > n-1 or grid[y][x]!=1:
return 0
grid[y][x] =2
ret = 1
return ret + sum(dfs(y+shift_y, x+shift_x) for shift_y, shift_x in [[1, 0], [0, 1], [-1, 0], [0, -1]])
def is_connected(y, x):
return y==0 or (0<=y<m and 0<=x<n and any([grid[y+shift_y][x+shift_x]== 2 for shift_y, shift_x in [[1, 0], [0, 1], [-1, 0], [0, -1]] if 0<=y+shift_y<m and 0<=x+shift_x<n]))
if not grid:
return []
m, n = len(grid), len(grid[0])
for (y, x) in hits:
grid[y][x] -= 1
for i in range(n):
dfs(0, i)
res = [0] * len(hits)
for idx, (y, x) in enumerate(hits[::-1]):
grid[y][x] += 1
current_cnt = 0
if grid[y][x] and is_connected(y, x):
current_cnt = dfs(y, x)-1
res[len(hits)-idx-1] = current_cnt
return res |
class Handler:
def __init__(self):
self.trace = []
def handle(self, data):
self.trace.append(f'HANDLE {data}')
return data
|
largura = float(input('Digite a largura da parede: '))
altura = float(input('Digite a altura da parede: '))
area = altura * largura
tinta = area / 2
print('Area: {} m2'.format(area))
print('Com as medidas {:.2f}x{:.2f} a quantidade de litros de tinta necessários é: {:.2f} L'.format(altura,largura,tinta))
|
"""
字段 含义 数据类型 说明
SecurityID 证券代码 STRING
DateTime 日期时间 NUMBER 20151123091630
PreClosePx 昨收价 NUMBER(3)
OpenPx 开始价 NUMBER(3)
HighPx 最高价 NUMBER(3)
LowPx 最低价 NUMBER(3)
LastPx 最新价 NUMBER(3)
TotalVolumeTrade 成交总量 NUMBER 股票:股 基金:份 债券:手指数:手
TotalValueTrade 成交总金额 NUMBER(2) 元
InstrumentStatus 交易状态 STRING
BidPrice[10] 申买十价 NUMBER(3)
BidOrderQty[10] 申买十量 NUMBER
BidNumOrders[10] 申买十实际总委托笔数 NUMBER
BidOrders[50] 申买一前 50 笔订单 NUMBER
OfferPrice[10] 申卖十价 NUMBER(3)
OfferOrderQty[10] 申卖十量 NUMBER
OfferNumOrders[10] 申卖十实际总委托笔数 NUMBER
OfferOrders[50] 申卖一前 50 笔订单 NUMBER
NumTrades 成交笔数 NUMBER
IOPV ETF 净值估值 NUMBER (3)
TotalBidQty 委托买入总量 NUMBER 股票:股 基金:份 债券:手
TotalOfferQty 委托卖出总量 NUMBER 股票:股 基金:份 债券:手
WeightedAvgBidPx 加权平均委买价格 NUMBER (3)
WeightedAvgOfferPx 加权平均委卖价格 NUMBER (3)
TotalBidNumber 买入总笔数 NUMBER
TotalOfferNumber 卖出总笔数 NUMBER
BidTradeMaxDuration 买入成交最大等待时间 NUMBER
OfferTradeMaxDuration 卖出成交最大等待时间 NUMBER
NumBidOrders 买方委托价位数 NUMBER
NumOfferOrders 卖方委托价位数 NUMBER
WithdrawBuyNumber 买入撤单笔数 NUMBER
WithdrawBuyAmount 买入撤单数量 NUMBER
WithdrawBuyMoney 买入撤单金额 NUMBER (2)
WithdrawSellNumber 卖出撤单笔数 NUMBER
WithdrawSellAmount 卖出撤单数量 NUMBER
WithdrawSellMoney 卖出撤单金额 NUMBER (2)
ETFBuyNumber ETF 申购笔数 NUMBER
ETFBuyAmount ETF 申购数量 NUMBER
ETFBuyMoney ETF 申购金额 NUMBER (2)
ETFSellNumber ETF 赎回笔数 NUMBER
ETFSellAmount ETF 赎回数量 NUMBER
ETFSellMoney ETF 赎回金额 NUMBER (2)
"""
"""
SecurityID 证券代码 STRING
TradeTime 成交时间 NUMBER 2015112309163002
精确到百分之一秒
TradePrice 成交价格 NUMBER (3)
TradeQty 成交量 NUMBER
TradeAmount 成交金额 NUMBER (3)
BuyNo 买方订单号 NUMBER
SellNo 卖方订单号 NUMBER
TradeIndex 成交序号 NUMBER 自 2021 年 4 月 26 日启用
ChannelNo 频道代码 NUMBER 自 2021 年 4 月 26 日启用
TradeBSFlag 内外盘标志 STRING 内外盘标志:
B – 外盘,主动买
S – 内盘,主动卖
N – 未知
自 2021 年 4 月 26 日启用
BizIndex 业务序列号 NUMBER 业务序列号
与竞价逐笔委托消息合并后
的连续编号,从 1 开始,按
Channel 连续
自 2021 年 4 月 26 日启用
"""
shold_tick_columns = ['TradeTime', 'TradeChannel', 'SendingTime', 'SellNo', 'TradeAmount',
'TradeBSFlag', 'TradeIndex', 'TradePrice', 'TradeQty', 'BuyNo']
shold_snapshot_columns = ['NumTrades', 'OfferTradeMaxDuration', 'ImageStatus', 'TotalBidNumber',
'TotalWarrantExecQty', 'WithdrawSellMoney', 'IOPV', 'BidOrders',
'ETFSellAmount', 'TotalOfferQty', 'WithdrawBuyNumber',
'WeightedAvgOfferPx', 'ETFBuyNumber', 'WarLowerPx', 'MsgSeqNum',
'WithdrawSellAmount', 'ETFSellMoney', 'Volume', 'BidOrderQty', 'OpenPx',
'HighPx', 'PreClosePx', 'LowPx', 'WeightedAvgBidPx', 'ETFSellNumber',
'OfferNumOrders', 'WithdrawSellNumber', 'ETFBuyAmount',
'TotalOfferNumber', 'OfferPrice', 'NumOfferOrders', 'BidPrice',
'OfferOrderQty', 'TotalBidQty', 'SendingTime', 'ETFBuyMoney',
'InstrumentStatus', 'WithdrawBuyAmount', 'ClosePx',
'BidTradeMaxDuration', 'NumBidOrders', 'LastPx', 'Amount', 'AveragePx',
'WarUpperPx', 'YieldToMaturity', 'BidNumOrders', 'WithdrawBuyMoney',
'TradingPhaseCode', 'QuotTime', 'OfferOrders']
sz_snapshot_columns = ['NumTrades', 'OfferNumOrders', 'LowerLimitPx', 'ImageStatus',
'OfferPrice', 'BidPrice', 'BidOrders', 'OfferOrderQty', 'PeRatio2',
'TotalBidQty', 'SendingTime', 'PeRatio1', 'TotalOfferQty', 'ClosePx',
'WeightedAvgPxChg', 'Change2', 'Change1', 'LastPx',
'WeightedAvgOfferPx', 'Amount', 'UpperLimitPx', 'AveragePx',
'TotalLongPosition', 'MsgSeqNum', 'Volume', 'BidNumOrders',
'BidOrderQty', 'TradingPhaseCode', 'QuotTime', 'OpenPx', 'OfferOrders',
'PreWeightedAvgPx', 'HighPx', 'PreClosePx', 'LowPx',
'WeightedAvgBidPx']
sz_order = ['OrderQty', 'OrdType', 'TransactTime', 'ExpirationDays', 'Side',
'ApplSeqNum', 'Contactor', 'SendingTime', 'Price', 'ChannelNo',
'ExpirationType', 'ContactInfo', 'ConfirmID']
sz_tick_columns = ['ApplSeqNum', 'BidApplSeqNum', 'SendingTime', 'Price', 'ChannelNo',
'Qty', 'OfferApplSeqNum', 'Amt', 'ExecType', 'TransactTime']
sh_tick_columns = ['SecurityID', 'TradeTime', 'TradePrice', 'TradeQty', 'TradeAmount',
'BuyNo', 'SellNo', 'TradeIndex', 'ChannelNo', 'TradeBSFlag', 'BizIndex']
sh_snapshot_columns = ['SecurityID', 'DateTime', 'PreClosePx', 'OpenPx', 'HighPx', 'LowPx', 'LastPx',
'TotalVolumeTrade', 'TotalValueTrade', 'InstrumentStatus',
'BidPrice0', 'BidPrice1', 'BidPrice2', 'BidPrice3', 'BidPrice4', 'BidPrice5', 'BidPrice6', 'BidPrice7', 'BidPrice8', 'BidPrice9',
'BidOrderQty0', 'BidOrderQty1', 'BidOrderQty2', 'BidOrderQty3', 'BidOrderQty4', 'BidOrderQty5', 'BidOrderQty6', 'BidOrderQty7', 'BidOrderQty8', 'BidOrderQty9',
'BidNumOrders0', 'BidNumOrders1', 'BidNumOrders2', 'BidNumOrders3', 'BidNumOrders4', 'BidNumOrders5', 'BidNumOrders6', 'BidNumOrders7', 'BidNumOrders8', 'BidNumOrders9',
'BidOrders0', 'BidOrders1', 'BidOrders2', 'BidOrders3', 'BidOrders4', 'BidOrders5', 'BidOrders6', 'BidOrders7', 'BidOrders8', 'BidOrders9',
'BidOrders10', 'BidOrders11', 'BidOrders12', 'BidOrders13', 'BidOrders14', 'BidOrders15', 'BidOrders16', 'BidOrders17', 'BidOrders18', 'BidOrders19',
'BidOrders20', 'BidOrders21', 'BidOrders22', 'BidOrders23', 'BidOrders24', 'BidOrders25', 'BidOrders26', 'BidOrders27', 'BidOrders28', 'BidOrders29',
'BidOrders30', 'BidOrders31', 'BidOrders32', 'BidOrders33', 'BidOrders34', 'BidOrders35', 'BidOrders36', 'BidOrders37', 'BidOrders38', 'BidOrders39',
'BidOrders40', 'BidOrders41', 'BidOrders42', 'BidOrders43', 'BidOrders44', 'BidOrders45', 'BidOrders46', 'BidOrders47', 'BidOrders48', 'BidOrders49',
'OfferPrice0', 'OfferPrice1', 'OfferPrice2', 'OfferPrice3', 'OfferPrice4', 'OfferPrice5', 'OfferPrice6', 'OfferPrice7', 'OfferPrice8', 'OfferPrice9',
'OfferOrderQty0', 'OfferOrderQty1', 'OfferOrderQty2', 'OfferOrderQty3', 'OfferOrderQty4', 'OfferOrderQty5', 'OfferOrderQty6', 'OfferOrderQty7', 'OfferOrderQty8', 'OfferOrderQty9',
'OfferNumOrders0', 'OfferNumOrders1', 'OfferNumOrders2', 'OfferNumOrders3', 'OfferNumOrders4', 'OfferNumOrders5', 'OfferNumOrders6', 'OfferNumOrders7', 'OfferNumOrders8', 'OfferNumOrders9',
'OfferOrders0', 'OfferOrders1', 'OfferOrders2', 'OfferOrders3', 'OfferOrders4', 'OfferOrders5', 'OfferOrders6', 'OfferOrders7', 'OfferOrders8', 'OfferOrders9',
'OfferOrders10', 'OfferOrders11', 'OfferOrders12', 'OfferOrders13', 'OfferOrders14', 'OfferOrders15', 'OfferOrders16', 'OfferOrders17', 'OfferOrders18', 'OfferOrders19',
'OfferOrders20', 'OfferOrders21', 'OfferOrders22', 'OfferOrders23', 'OfferOrders24', 'OfferOrders25', 'OfferOrders26', 'OfferOrders27', 'OfferOrders28', 'OfferOrders29',
'OfferOrders30', 'OfferOrders31', 'OfferOrders32', 'OfferOrders33', 'OfferOrders34', 'OfferOrders35', 'OfferOrders36', 'OfferOrders37', 'OfferOrders38', 'OfferOrders39',
'OfferOrders40', 'OfferOrders41', 'OfferOrders42', 'OfferOrders43', 'OfferOrders44', 'OfferOrders45', 'OfferOrders46', 'OfferOrders47', 'OfferOrders48', 'OfferOrders49',
'NumTrades', 'IOPV', 'TotalBidQty', 'TotalOfferQty', 'WeightedAvgBidPx', 'WeightedAvgOfferPx', 'TotalBidNumber',
'TotalOfferNumber', 'BidTradeMaxDuration', 'OfferTradeMaxDuration', 'NumBidOrders', 'NumOfferOrders',
'WithdrawBuyNumber', 'WithdrawBuyAmount', 'WithdrawBuyMoney', 'WithdrawSellNumber', 'WithdrawSellAmount', 'WithdrawSellMoney',
'ETFBuyNumber', 'ETFBuyAmount', 'ETFBuyMoney', 'ETFSellNumber', 'ETFSellAmount', 'ETFSellMoney']
def maketime(time):
time = str(time)
return time[0:4]+'-'+time[4:6]+'-' + time[6:8] + ' '+time[8:10]+':'+time[10:12]+':'+time[12:14]
def maketime_tick(time):
time = str(time)
return time[0:4]+'-'+time[4:6]+'-' + time[6:8] + ' '+time[8:10]+':'+time[10:12]+':'+time[12:14]+'.' + time[14:]
|
description = 'Verify that the user cannot log in if username value is missing'
pages = ['login']
def setup(data):
pass
def test(data):
go_to('http://localhost:8000/')
send_keys(login.password_input, 'admin')
click(login.login_button)
capture('Verify the correct error message is shown')
verify_text_in_element(login.error_list, 'Username is required')
def teardown(data):
close()
|
__description__ = 'lamuda common useful tools'
__license__ = 'MIT'
__uri__ = 'https://github.com/hanadumal/lamud'
__version__ = '21.6.28'
__author__ = 'hanadumal'
__email__ = '[email protected]'
|
""" Ex - 048 - Faça um programa que calcule a soma entre todos os números impares que
são múltipços de três e que se encotram no intervalo de 1 até 500"""
# Como eu Fiz
print(f'{"> Soma Dos Números Multiploes de 3 <":=^40}')
# Criar laço de repetição
s = 0
for n in range(3, 501, 3):
if n % 2 == 1:
s = s + n
print(s)
# Como o professor Guanabara fez
soma = 0
cont = 0
for c in range(1, 501, 2):
if c % 3 == 0:
cont += 1
soma += c
print(f'A soma de todos os {cont} valores solicitados é {soma}')
|
# encoding=utf-8
"""
tweet_data_dict contains CONSTANTS and word lists for nlp processing.
tw2vec is a pretrained vector model trained on a tweet corpus from google
ADDITIONAL SUB-DIRECTORIES OFF GSTWEET FOR THIS PROJECT:
./project/ - articles and documents on this topic
./twitter/ - files with batches of tweets, json format, from twitter developer api
endpoints which I access using Postman. easier to tweek queries and check results
than via scripting the http get in python.
/templates/ - html, javascript, json and yaml templates. html+js as I'm looking at some
cool d3 viz stuff I can do with this data, json for playing with parsing schemas, and
yaml for config files if I make the twitter api calls from py scripts.
/output/ - my 'deliverables' such as serializing my data to file, the gensim models I
generate, wordclouds, and other visualizations and saved data.
/models/ - pretrained or pre-labeled data for word2vec or nltk models, such as large
vocabulary files with vector embeddings or tweets or phrases with sentiment labels
"""
MODELDIR = '/Users/bgh/dev/pydev/superleague/models/'
TWEETSDIR = '/Users/bgh/dev/pydev/superleague/twitter/'
ESLDIR = '/Users/bgh/dev/pydev/superleague/twitter/superleague/'
METADIR = '/Users/bgh/dev/pydev/superleague/twitter/facebook_meta/'
META_OUT = '/Users/bgh/dev/pydev/superleague/output/facebook_meta'
OUTDIR = '/Users/bgh/dev/pydev/superleague/output/'
class PRNF:
"""
Class PRNF (PRiNt Formatting) makes it easy to embed escape-type formatting commands
in a python print statement. to print bold: print("PRNF.BOLD blah-blah PRNF.END")
"""
PURPLE = "\033[95m"
CYAN = "\033[96m"
DARKCYAN = "\033[36m"
BLUE = "\033[94m"
GREEN = "\033[92m"
YELLOW = "\033[93m"
RED = "\033[91m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
END = "\033[0m"
W2VEC_PRE = '/Users/bgh/dev/pydev/superleague/models/freebase-vectors-skipgram1000-en.bin'
TW2VEC_PRE = '/Users/bgh/dev/pydev/superleague/models/word2vec_twitter_tokens.bin'
GS_ABSOLUTE = ["always", "horrible", "never", "perfect", "worthless", "useless",
"infinitely", "absolutely", "completely", "totally", "exponentially",
"idiotic"]
GS_EXTREME = ["insane", "evil", "psycho", "idiot", "rube", "crazy", "neurotic", "retarded",
"stupid"]
GS_BALANCE = ["relative", "preferable", "optimal", "better", "inferior", "superior"]
# these are standard Adverb and Pronoun STOPS on many nlp projects
GS_ADVB = ["am", "are", "as", "be", "been", "being", "do", "does", "doing", "did",
"have", "has", "had", "having", "is", "until", "was", "were"]
GS_PPRON = ["we", "he", "her", "him", "me", "she", "them", "us", "they"]
# Words to Remove: standard plus stops based on project type, context, or tfidf analysis
STOPS_ESL = ["team", "club", "clubs", "UEFA", "ESL", "UEFA", "game", 'superlega',
"english", "arsenal", "barcelona", "chelsea", "juventus", "liverpool",
"mancity", "manutd", "MUFC", "madeid", "real madrid", 'tottenham',
"sheffield", "shiffeld", 'spurs', "united", "esl", "tesol",
'lfc', 'mufc', 'mcfc', 'cfc', 'thfc',
'european', 'superleague', 'football', 'florentino',
'super', 'league', "days", "week", "today", "year",
"europesuperleague", 'europeansuperleague', "league", "'pundit",
'sport_en', 'spacetravel', 'leemarkjudges', '08069830203', '2PACMUFC',
'phil__howarth', 'john_nufc', 'sheshe_tom', 'russell_vine', '[at', '2021]',
'\u2066\u2066mjshrimper\u2069', '\u2066kieranmaguire\u2069', 'mikel',
'tourtalk', 'PGA', '⛳️', 'golf', 'pgatour', 'markslearnenglish',
"legacyfan", '1reddevil2', '\u2066kevinhunterday\u2069', 'david_ornstein',
"learning", "content", "efl", "learn", "english", "tesol", "tefl",
"onlinelearning", "language", "speakenglish", "learnenglish", "lesson",
"plan", "tefl", "grammar", "englishonline", "speakenglish", "vocabulary",
"ELT", "reading", "education", "practice", "conference", "remote",
"classroom", "students"]
GS_STOP = ["RT", "a", "about", "all", "almost", "also", "among", "am", "an", "and","already",
"any", "are", "as", "at", "back", "because", "but", "by", "cause", "come",
"could", "did", "dont", "does", "either", "else", "ever", "even", "for", "like",
"from", "go", "going", "has", "had", "have", "his", "her", "hers", "how", "not",
"however", "if", "I", "in", "into", "is", "it", "its", "just", "least", "who",
"let", "lets", "likely", "may", "me", "might", "must", "much", "my", "need",
"no", "now", "of", "often", "on", "one", "only", "or", "other", "our", "own", "rather",
"part", "really", "same", "seems", "shall", "show", "should", "since", "so", "some",
"something", 'still', 'such', "than", "that", "the", "their", "them", "then",
"there", "these", "they", "think", "this", "those", "thus", "to", "too",
"was", "watch", "well", "were", "what", "while", "who", "will", "would", "whom",
"with", "yet", "your", "rt", "we", "what", "been", "more", "when", "big", "after"
"he", "man", "us", "off", "les", "des", "et", "il", "en", "before", "di",
"us", "very", "you"]
STOP_ADD = ['_', '(', ')', '…', '[', ']', '_', '__', ':', '"', '️', ':', '"', '/', ']',
'201920', '20212022', '235', '244', '247', '28t', '6', '651m', '7.406', '️',
'about', 'acdvd', 'actual', 'additional', 'de', 'A', '|', 'le', '[', ']',
'admi', 'affect', 'affects', 'again', 'ahead', 'ake', 'allowed',
'alonehowever', 'als', 'anybody', 'anyone', 'anyway', 'apostrophe_',
'app', 'ar', 'around', 'asterisk', 'b.', 'ba', 'bara', 'be', 'being',
'besides', 'breaka', 'c', 'can', 'chuchilips', 'cian', 'clearly',
'colon', 'comma', 'cos', 'do', 'da', 'definitely', 'delve', 'despite',
'differen', 'doing', 'dr', 'e', 'each', 'ed', 'eg', 'el', 'eltoc',
'emailed', 'erstes', 'everyone', 'ewww', 'f', 'fa', 'fairly', 'fe',
'feel', 'flies', 'fully', 'gave', "give", 'get', 'gil', 'going', 'got',
'gua', 'guess', 'happened', 'hashtag', 'having', 'hea', 'helicopter',
'here', 'hey', 'hows', 'hyphen', 'i', 'id', 'ielts', 'ill', 'ings',
'ins', 'instead', 'ipad', 'iphone', 'ipod', 'ive', 'j', 'ju', 'keeping',
'l', 'la', 'lea', 'lev', 'literally', 'lot', 'm',
'mark', 'marks', 'mars', 'matter', "'the", 'minutes', 'TV', "news",
'maybe', 'micr', 'middleweigh', 'mobile', 'most', 'mr', 'n', 'name',
'nasa', 'nearly', 'nevertheless', 'notes', 'o', 'oa', 'orbn', 'orry',
'p', 'per', 'play', 'possibly', 'potentially', 'pu', 'punctuation',
'put', 'quite', 'quotation', 'r', 'remains', 'ro', 'rotorcraft',
'rul', 'said', 'say', 'seem', 'semicolon', 'single', 'sl', 'slash',
'soo', 'st', 'sto', 'supe', 't', 'taken', 'talked', "talk", "look", "give",
'th', 'tha', 'thats', 'themselves', 'theres', 'thing', 'things', "'save",
'tho', 'thst', 'towar', 'trying', 'type', 'u', 'un', 'und', 'underscore',
'uns', 'until', 'vary', 'view', 'w', 'way', 'well', 'went', 'whe',
'whether', 'which', 'whoever', 'writes', 'x', 'y', 'ya', 'ye',
'yep', 'yer', 'youd']
STOP_TWEET = ["says", "take", "know", "every", "time", "people", "want", "wants",
'approximately', 'typifies', 'continuously', "many", "every", "happen",
'GTTO', 'describes', 'remembering', 'reconsidering', 'developp',
'phantasy_foods', "point", "artistrtweeters", "gnev",
'see', 'compared', '=', '/', 'sure', '&', "''", "'d", "'ll", "'s",
'great:', 'why', '1', '2', '01', '–', "according",
'sta…', 'pod:', '4', 'thoughts', 'pgas', '"', 'theyre', '&',
'60', '182', 'yall', 'OK', 'onto', '"this', 'him', 'call',
'""suffer', 'become', 'ttanslated', 'الدوري_الانجليزي', 'دوري_السوبر_الاوروبي',
'dear', 'youre', 'mot', 'others', 'both', '[thread]', '30',
'station', '24ur', 'im', 'basically', 'soon', 'where',
's', 'saidthe', 'though', 'thinks', 'thought', 'do:', 'hes', 'al',
'five', 'sense', 'form', "make", 'needs', 'tv', '3pm', 'show', 'due', 'watch',
'between', 'turned', 'different', 'simply', 'through', 'actually', 'support'
]
STOP_NONALPHA: list = [':', '!', '?', '…', '"', '😂', '️', '🤣', '🚨', '12', '⚽', '1', '👏', '3', '_', '[', '2', '/',
']', '5', '2021', '🤔', '0', '👇', '🔴', '🗣', '6', '🏻', '😭', '✅', '\U000e0067', '💥', '10',
'🇪', '1futbol', '👍', '\u200d', '23', 'วันทองตอนจบ', '🎙', '$', '14', '4', '7', '🚫', '🏼',
'😎', '🏽', '€', '❌', '♂', '⚪', '❤', '💯', '😆', '⚒', '👎', '⬇', '🏉', '100', '👉', '2022', '&',
'🏆', '20', '💙', '🔥', 'ℹ️', '▪', '24', '2024', '🤷', '11', '2020', '🇹', '🏴', '\U000e0062',
'\U000e007f', '🙄', '💪', '🇸', '(', '•', '🇮', '\U000e0065', '\U000e006e', '17', '🎶', '👋',
'😌', '🇬', '22', ')', '21', '🔊', '😉', '❗', '🇺', '🔵', '|', '«', '😳', '😏', 'pics_by_tony',
'🇧', '🤦', '🙌', '8', '💸', 'https://t…', '\u2066', '\u2069', '48', '18', '➡', '🥳', '👊', '👀',
'😒', '15', '72', '🤡', '😅', '50', '40', '13', '1890', 'all_outside', ':)', '🤑', '🙏', '🎉',
'36', '🤬', '👌', '9', '99', '😃', '🇷', '🇩', '92', ':/', '✊', '_chaxza', 'p_myte', 'ryan_utb',
'⚫', '😍', 'https://t', '43', '😇', '🤯', 'celtic_now', '39', '😄', 'NUFC_HQ', '93', '19',
'_befoot', 'albie_dunn', '2026', '😜', 'efo_phil', 'i̇lerliyoruz', 'B_HQ', '🟡', '501', '5pm',
'89', '🇫', '90', '💰', '😁', '😡', '🎧', '♀', 'football_bm', '😫', 'uefacom_fr', 'aisa_arsenal',
'AB_18___', '7395', '1️⃣2️⃣', 'fu_invest', '🤨', 'nitin_cul', 'jm3s_', '1863 2021',
'john_w_henry', '🏟', '🆚', '360', '📺', '⏰', '300', '🚀', '96', '05', '64', '💵', '❓',
'3sportsgh', '📢', '🖥', '💀', '💬', '85', '😙', '80', '2023', '🎥', '🤞',
'lil_mr_dynamite', '🔁', '16', '»', '1994', '😢', 'pirlo_official', 'F1', '2035', '3rd', '🌏',
'rindai__gari', '44', 'lw_newcastle', '_owurakuampofo', 'lu_class', '中2英語', 'cfc__kevin',
'tommy_viccetti', 'yashveen_xyz', '210', '😬', '1021',
'แฟนบอลหลายกลุ่มได้ออกมารวมตัวกันแสดงจุดยืนไม่เห็นด้วยกับไอเดียยูโรเปียนซูเปอร์ลีก',
'การแข่งขันรายการใหม่ที่เพิ่งประกาศเปิด', 'p_hollas', 'spring_steen', '🤓', 'kookie_kuhle',
'betway_za', 'matty_west', '🤮', 'ms_sportsbiz', '6013092', '🪓', '11pics', 'n_kayy',
'buchi_laba', 'f24debate', '📻', '2004', '94', 'cc_eckner', '⚠', '🌍', ']:', '101', '77', '💩',
'😤', '123', 'ed_aarons', 'https://…', '5th', '1995', '🌎', 'mmu_law', '1st', '28', '8m', '😱',
'34', '✍', '1duval', '420day', '📍', '🌱', '😊', 'dw_sports', '8000', '10k', '️the', '__', '83',
'2⃣', '2manutd', '60m', '2019', '4sale', '🤝', '2005', '1904', '🟢', 'public_archive', '4evra',
'1545', 'i̇lkay', '__robh__', '1019', 'under_thecosh', '350000', 'sascha_p', 'TN03S',
'boss_himselff', 'x_godden', '310780', 'j_castelobranco', 'gala_news', '1905', 'justin_cash',
'_1', '📕', 'rbleipzig_fr', '1410', '56789', 'naushabah_khan', '🥇', '´', '🍀', '🥴', '📝',
'2021/22', '9ja', '️florentino', '⚡', '⏩', '21st', '█', '↓', 'adelaide_rams', 'voz_populi',
'2016', '49', '♦', 'interactive_key', '😲', '75', '📋', '91', '🔄', '09', '🙃', '👨', '00',
'redemmed_odds', '199', 'mr_ceyram', '_abdul_m_', '2021/2022', '⚰', 'der_augustus',
'theboy_whoiived', '1972', 'official_lyckez', 'british_gambler', '🥱', 'alex_slwk', '3sports',
'◾', 'y3nko', 'padmnaabh_s', '🤭', '65M', 'dorian__bdlp', '🔹', 'r1100gsbluenose', '🇦', '1878',
'04', '25', 'C1', 'o_machel', '804m', '030bn', '651m', '📰', 'nabil_djellit', 'WW1', 'lkn_jr',
'86', '👈', '💛', '!', '67', '🧡', '6pm', '3ingsilly', '02', '97', '1/3', '27', '15:00', '400',
'📸', '🛑', '🔗', '💷', 'th3n', 'wo4k', '🎵', '🖕', '8181', 'fcsm_eng', 'b_fernandes', '👑',
'danzila_jr', 'benjamin_watch', '☑', 're_dailymail', 'ellscott_mufc', 'shem_nyakeriga',
'ruvu_shootingfc', 'dave_otu', '😹', '📈', '📉', '19deabril', '30th', '12th', '🕒', '️⃣', '87',
'gj_thomas', 'sky_maxb', 'CR7', '14th', '💦', '300m', '45', '81', '29', '400m', 'letter_to_jack',
'9pm', '9th', '📌', 'caitlyn_jenner', '=', '180', '36th', '🟠', '_59', 'handsome_szn', '500M',
'✨', '📃', '50p', 'figo_anane', 'okt_ranking', 'M25', 'nkd_mzrx', '247', '🐯', ':D', '☕',
'eddie_l', '73', '🧗', '🤰', 'pat_que', 'byl_al', '19aprile', 'cfc_vish', '🤩', 'fausto__ii',
'250', '_2', 'fu4ad', '4wn', 'jesus_olimart', '021', '4trigger', '000', '🔎', '70', '200', 'KS1',
'🌈', '🌿', '🍃', 'front_threepod', 'my11', '30', '💶', '⬅', 'G7', '48hrs', '9876', '13officiel',
'35', '🎮', '1aaronpaul', '35x', '1m', '76', 'theo_shanks', '180m', 'af_spurs_osc',
'bee_lamsing', 'เรื่องด่วนตอนนี้', 'จะมีการประชุมคณะกรรมการบริหารของยูฟ่า',
'เย็นวันนี้ที่เมืองมองเทรอซ์', 'แน่นอนคือ', 'ถกปัญหา', 'และรับมือ', '2GB873', 'simon_hughes',
'owl_reporter', '405', '_1992', '5/6', 'prosper_tv', '_1102', '30BG', '65', '🖖', 'le__foireux',
'7barton', 'fums_magazin', '🏾', '90lcfc', 'sz_unitedcr', '️ESL', '1980', '🇨', '🇭', '8p', '31',
'7p', '😩', '😘', '2572', '405m', '🇳', '4th', '⏫', 'sv98', '88', '1500', '400k', '5⃣', '\u200b',
'fcbarcelona_cat', 'favp_org', 'ps6811', 'arminia_int', '😐', '💭', 's_redmist', '_3aaz_',
'cadiz_cfin', 'soccernews_nl', 'fcb_newsfr', '11freunde_de', 'reich_against', 'mr_sergiowalker',
'nicola_sellitti', '_le_20', 'valenciacf_en', '🥈', '🥉', 'CV1874', 'i_nautilus', '💐', '🌷',
'316', 'king_simian', 'newsome_mimi', '95dot9', '2308', 'teh_dewah', 'LE_FIVE', '😔',
'leew_sport', '4rsene', '4seen', 'ff_titans', 'lr2cblog', '3/4', '1992', '07', '⚖', 'ARIS__FC',
'barca_buzz', '304', '️joses', '️man', 'playmaker_en', '👆', '95', '8FM', '722', '💻', '18th',
'90s', '️euro', '️hopefully', '📣', '40yrs', '71', '🍎', '⚓', '⃣', 'ast_arsenal', '3⃣',
'2011/12', 'iam_presider', 'scott_rebnoise', 'mrhandsome_za', 'ryan_padraic', '42_ie', '🐍',
'jai_d', 'alex_dreyfus', '412', '1990', '989FM', 'tam_baw', 'K12', 'fpl_tactician',
'scoreboard_us', 'alison_mcgovern', '️statement', '500', '932', '😵', '_88', '90plusshow',
'm69098997eye', '10s', 'avance_carrental_official', 'twiggy_garcia', 'kathrinm_hansen',
'lfc_wahome', '3arnie', 'POF_POD', '📚', '✏', 'james_barker', '🥧', '12thmantweets_', 'org_scp',
'88r', 'cycling_memes', '03_ezz', '7514', 'P12', 'mphoeng_m', '1apponline', '👚', '👖', '🎩',
'160', '1961', '5bn', '🗯', '2008', '600m', '✌', '🅱️', '🎨', '🎤', '1/2', '37', '😰', '_08',
'💼', '1⃣', 'al___d', '🏐', '54', '⬆', '🏫', 'phantasy_foods', '350M', '5️⃣0️⃣', '1786',
'2arsenal', 'socialist_app', 'TV3', '00x', '⏲', '️late', 'broadcast_sport', 'broadcast_sports',
'jake_bickerton', 'vis_mac', '10hd', 'kofi_kwarteng', 'queen_uk', '325', 'socialist_party',
'_67', 'jas_pod', 'mcgowan_stephen', '🗳', 'mas_que_pelotas', '️seguimentfcb', '28t', '51',
'pisto_gol', 'stone_skynews', '4m', 'so_cal_geordie', 'carshaltona_fc', '92bible', '450',
'boycie_marlene', 'aha_com', '66', 'nick_pye', 'aaron_challoner', '_7', 'D5', 'dean_geary',
'l0uisqlf', '2045', 'uci_cycling', '📥', '82', '📲', 'R98UIQIGL6', '️english', '☠', '‼', '114',
'74', '8ème', '🇵', '55', '191', '2nd', '5sport', '41', '01918030', '68ASR', 'pin_klo', '🍻',
'kog_mark', '🤧', 'guardian_sport', '🤪', '🚩', '😀', '💴', 'neil_moxley', 'beinsports_en',
'1ère', '69', 'red_white', '9230861192', '🏦', 'the_crab_man', 'diego_bxl', '6th', '7th',
'dsj_itv', '400M', 'sop_soppi', 'psg_inside', 'ghrhull_eyork', '1892redspodcast', '⭐',
'shuhel_miah', 'dan_k', '🤜', '🤛', 'timothy_kls', '💡', '50000', '2k', '🎬', 'fx9',
'marina_sirtis', 'gr_yassine', '2794526', 'phil_wellbrook', 'kmtv_kent', 'ep_president',
'0ptimumvelocity', '😮', '_kalule', '600M', '151', 'kendrick_kamal', '1xbet', '5050',
'75_paname', 'the_ahmed_hayat', 'de_essieno', '😪', '️saynotoeurop', '_jenky88', '810', '92a',
'🍺', '🖤', '4sug2', 'offthepitch_com', 'talking_toffees', '1985', '2002', '202', '7i',
'owen_miller', 'MU_ST', 'ast_ars', 'sw6lionstv', 'deji_ooniabj', '2boxfutbol', 'shouvonik_bose',
'🐲', 'm_nialler', '2009', 'L1', 'acecast_nation', 'footy_prime', '🟥', '9supermac',
'rich_banks', '8billion', '15編楽しめるよ', '1417', 'mufc_larn', '006', 'psg_english', '125',
'kicker_bl_li', '🤢', '_frankiam', '3_', '7has', '️QUARTER', '42', '🆕', '𝟏𝟖𝟕𝟒', '️MCFC',
'9am', '8s', 'G20', 'shane_mangan', '🗞', '_9', 'a_liberty_rebel', 'longevity_dan', '_201',
'matt_law_dt', 'antifa_celtic', '1888', 'sams_keef', 'nathaniel_john', '🔛', '7221818', '6s',
'jo_napolitano', '_93', 'freelancers_usa', '10pm', 'football_prizes', 'رسميـًا', 'no14gifts',
'iam_wilsons', '☺', '50million', 'angry_voice', '996', 'thechai_vinist', '🦁', '️boris', '1981',
'c_barraud', '8B', 'beinsports_fr', '97th', '🤥', '⏪', 'kam_lfc', '🇱', 'juba_obr', '2050',
'️supporting', '4yo', 'mike_ananin', '26', '🔟', 'mose_louis', 'mmu_laws', '12thmantalks', '🍓',
'💚', '7jasar', '😧', '50plus1', '0perry', '32', 'pierik_agesport', 'تُطالب', 'المُلاك', '1nda',
'7newsperth', '3000000', '↔', '_54', '7newssydney', 'ayman_scents', '📦', '📊', 'english_atl',
'4886889', 'bruce_levell', '700m', 'lewiscox_star', '_H3NDRY', '1800', '595', '194', '18H10',
'1819R', 'a_claudereitz', '🖋', 'jp__be', 'political_wasp', '🦊', '✔', '️what', '️if', '10000',
'l_cetta', '72h', '4billion', 'alex_pd', 'biko_dz', '1_plate_biryani', 'oli_holmes', '67book',
'nana_elkh', '👂', 'S2', 'E71', '1890s', '\U000e0077', '\U000e006c', '\U000e0073', 'england_rl',
'liv_fit', '2010', 'craig_pankhurst', 'monkeys_show', '3liga', '2889', '224m', 'm_star_online',
'lerato_mkhondo', '1894', '200k', 'manutd_id', 'arsenalfc_fl', '112k', '13th', 'cb_ignoranza',
'1411nico', '22:30', '2XL', '☎', '7:30', '5livesport', '🪐', '🌑', 'you_m', '18280',
'kevin_maguire', '🙈', 'olivier_truchot', '749', 'inafr_officiel', '1976', 'lpha_bloke',
'111words', 'ochim_victor', '📷', '25th', 'lazio_uk', 'the_lutonian', 'rojos_municipal', '️VAR',
'️changes', '️sanctions', 'keir_starmer', 'พรีเมียร์ลีก', 'ลงดาบเด้งตัวแทนบิ๊ก',
'ของสโมสรอย่างแมนเชสเตอร์', 'ซิตี้', 'แมนเชสเตอร์', 'ยูไนเต็ด', 'เชลซี', 'ลิเวอร์พูล',
'อาร์เซนอล', 'ฮอตสเปอร์', 'พ้นตำแหน่งเซ่นพิษที่พวกเขาเป็นหนึ่งในทีมร่วมก่อตั้งซูเปอร์ลีก', '⛔',
'rash_podcast', '6amclub', '30pm', '889', 'i̇stanbul', '1xbet_campany', '7pm', '1893',
'neunzig_plus', '1ce', '100m', '500k', '1_fc_nuernberg', '10betsports', '1️⃣', '2️⃣', 'inter_en',
'1218', 'coolboy_lanre', '️lattuale', 'tim_ecksteen', '365scores', 'alamin_ys',
'اراضي_شرق_الرياض', 'مرزوقه_الحربي', 'هند_القحطاني_تعود_للحجاب', 'تانج_والا_فيمتو', 'جده_Iلان',
'الدمام_الان', '05552230', 'عماله_منزليه', 'الفلبين_سيرلانكا_بنقلاديشيه_كينيا_اثيوبيا_اغندا',
'20tencreative', 'fcstpauli_en', '2018', '33', 'kevin_blundell', '1983', 'axel_remaster', '1955',
'🙂', '؟', 'ريال_مدريد', 'انتر_نابولي', 'مانشستر_يونايتد', '24ca', '_graydorian', '324', '311',
'3bill', 'michael_i_jones', '️superleague', '️uefas', '2707_', 'sam_inkersoletm', 'andrew_vas',
'777', 'italian_average', '🍪', 'forb_english', '👫', '100s', '9fowler', 'ian_rush', '78', '48h',
'a_schillhaneck', '7280000', '243', '09082000056', '1kroenkeout', 't7219860live', 'lfcdt_gav',
'channels_sports', 'ctv_tayos', 'ctv_ceceo', '_8', '2004er', '🧠', 'dortmund_french', '2/3',
'DE_DON', '007', '_10', '🚗', 'bvb_goleador', '️kick', 'zrss_si', 'ep55', '2573331', 'king_fut',
'22nd', '23rd', 'uefacom_it', '_goalpoint', '1º', '04fussball', 'david_clarke', '1357913',
'4TAG', '2/36', 'gunner_x', '7bn', '10press', 'vintage_utd', '⤵', 'millar_colin', '2015',
'5live', '🥅', '1kminute', '🐦', '📱', '121200', '🤙', '613 750 1200', '📩', '1200', '🦈', '4H',
'20/04', '2footballuk', '1088', '️ex', '2025', 'vi_nl', 'giselle_zm', '🧐', 'scott_geelan',
'changeorg_india', '2🅰️', '°', '2014', '07190097', '150m', '232', '350m', '🍒', 'D:',
'️unitedcity', '🔋', 'C20', '230AH', '12V', '115000', '2017', ':p', '12promax', '12pro',
'12mini', 'asso_mediapi', '🚴', '👥', '🛹', '🐺', '033u', '16h', 'alamin_ghost', '307', '2003',
'₦', '950000', '️norwichs', '2020s', 'flohempel_darts', '🔞', '️for', '3million', '22aprile',
'1510', '1280', '528', '272', '2day', '00n', 'lawson_sv', 'kzs_si', 'nzs_si', 'u20', '🍑',
'6500', 'tk_cele', 'samurai_ando', 'shush_larawk', 'curtis_peprah', 'siya_phungula', '🙋', '🍫',
'6m', '38', '589m', '450m', '335m', '277m', '273m', '📽', '21aprile', '28th',
'4209505', '95nufc', '7⃣', '3⃣0⃣pm', 'me_granturco', '️is', 'liberty_media', '25anni', '1/4',
'😷', 'FM21', '🖊', 'fm21', '2013', '📄', '𝟏𝟏𝟎𝟎', '·', '155', 's_roche', '60', '120', '909',
'HALIT_KARAGOOZ', '130', '365', 'zonal_marking', '️ryan', '100celleasr', '_dajedepunta_',
'football__tweet', '🗑', '️WBA', '️top', 'OE3', 'oe3wecker', 'lobs_sport_biz', '386m', '202m',
'771m', '177bn', '757m', '752m', '247m', '406', '0526', '1965wendy', 'lola_united', '111',
'_16',
'123tupac', '371', 'rio_f', '79', '8e', ':d', '59', '52', '46', '']
STOP_NONALPHA2: list = ['100k', 'rojos_municipal', 'fcstpauli_en', 'اراضي_شرق_الرياض', 'مرزوقه_الحربي',
'هند_القحطاني_تعود_للحجاب', 'تانج_والا_فيمتو', 'جده_Iلان', 'الدمام_الان', '05552230', 'عماله_منزليه',
'الفلبين_سيرلانكا_بنقلاديشيه_كينيا_اثيوبيا_اغندا', '1950', 'k24tv', '0704', '217', '192', '365scores',
'🍓', '36th', 'matt_law_dt', '🙂', 'the_gfp', '؟', 'ريال_مدريد', 'انتر_نابولي', 'مانشستر_يونايتد',
'donny_beek', 'manutd_es', '2nd', 'slc_live', 'bruce_levell', '700m', 'you_make_me_day', '50million',
'⚡', '️boris', 'mikekeegan_dm', '40m', 'kevin_blundell', '🐍', '⏩', '3bill', 'andrew_vas', '777',
'sam_inkersoletm', 'tim_ecksteen', '⚧', '1972', '_8', '9fowler', 'ian_rush', '2/3', '_10', '935GP',
'08131271', '06', '09082000056', '1kroenkeout', '2008', '2013', 'seriea_en', 'adelaide_rams', '1000s',
'🧐', '4TAG', '75', '52', 'phil_wellbrook', 'kmtv_kent', 'uefacom_it', '34million', 'جريمه_صباح_السالم',
'1348 7610', 'laura_woodsy', '50000', 'gunner_x', '1892 2021', 'PL2', '░', '⇢', '▉', 'vintage_utd',
'weare_theshed', '3/3', '200k', '🥵', '🤍', '304', '📣', 'alison_mcgovern', '5bn', '7bn', '️seguimentfcb',
'000', '🍻', '25', '⌚', '5parks', '1088', '5️⃣0️⃣', '😥', '10000', '1m', '🕺', '🥂', '🍾', '🎂',
'🎊', '🌹', '🎁', '5live', '🏃', '💹', 'GW32', '1980', '4yo', '8th', '1889/90', '🦈', '4H',
'1kminute', '🐦', '121200', '🤙', '613 750 1200', '1200', '26', 'vi_nl', '0526', '1965wendy',
'lola_united', '111', '160', '1961', 'john_nufc', '42', '74', '_16', '2manutd',
'123tupac', '371', 'rio_f', '79', 'D:', '22:30', '4b', '🪐', '🌑', 'you_make_me',
'🛑', '️unitedcity', '5⃣', '167', '6amclub', ':p', '🐺', '033u', '16h', '12promax',
'12pro', '12mini', '300m', '🧵', '1/14', '2020s', 'flohempel_darts', '00n', 'lawson_sv',
'07118081', '6500', '2348161148', '090', 'inger_stitche', '5K', '534K', '278K', 'matt_santangelo',
'bd29b1', '2day', 'kzs_si', 'nzs_si', 'u20', '🍑', '___', '╭', '╮', '▏', '▕', '┏', '┳', '┓', '┻',
'1510', '1280', '528', '272', '125', '630', '458', '152', '1173', '901', '494', '7723', '🔲', 'tk_cele',
'samurai_ando', 'shush_larawk', 'curtis_peprah', 'siya_phungula', '_7220000', '7220000',
'lfcdt_gav', '\U000e0063', '\U000e0074', '2bn', '6bn', 'rp3', '5b', '92a', 'slbenfica_en',
'4k', '7k', 'sportrecht_dus', 'mark_e_orth']
STOP_UTF2: list = ['🌙', '🍭', '😋', '🌶', ]
# bracket special chars for RE compares. RE and python compare (if x in Y) different
JUNC_PUNC: str = "[*+%;',]"
XTRA_PUNC: str = "([.!?]+)"
END_PUNC: str = "[.!?]" # keep ! and ?, they both have effect on tweet sentiment
PUNC_STR: list = ["*", "+", "%", ":", ";", "/", "|", ",", "'"]
GS_SENT_END: list = ["!", "?", ";", ".", "..", "..."]
# capture special Tweet text: user_mention, hashtag, urls, stuff inside paras, punctuation
GS_PAREN = "\((.+?)\)"
GS_URL = r'https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-zA-Z0-9()]{1,6}\b'\
r'[-a-zA-Z0-9()@:%_\+.~#?&//=]*'
GS_MENT = "@(\w*)"
GS_HASH = "[#](\w*)"
GS_UCS4 = r"\\\\[x][0-9,a-f]{4}" # find ucs-2 aka double-byte characters
GS_UCS = "\\u[0-9,a-f]{4}"
UCS_SYM = "\\[u][0,2]{2}[0-9,a-f]{2}"
# contractions expansions, includes forms with missing apostrophe
GS_CONTRACT = {
"-": " ",
"ain't": "aint",
"aren't": "are not",
"arent": "are not",
"can't": "can not",
"cant": "can not",
"'cause": "because",
"could've": "could have",
"couldn't": "could not",
"didn't": "did not",
"doesn't": "does not",
"don't": "do not",
"dont": "do not",
"hadn't": "had not",
"hasn't": "has not",
"haven't": "have not",
"he'd": "he would",
"he'll": "he will",
"he's": "he is",
"how'd": "how did",
"how'll": "how will",
"how's": "how is",
"i'd": "i would",
"i'll": "i will",
"i'm": "i am",
"i've": "i have",
"isn't": "is not",
"isnt": "is not",
"it'd": "it would",
"it'll": "it will",
"it'll've": "it will have",
"its": "it is",
"it's": "it is", # the contraction is often mis-spelled
"let's": "let us",
"ma'am": "mam",
"mayn't": "may not",
"might've": "might have",
"mightn't": "might not",
"must've": "must have",
"mustn't": "must not",
"needn't": "need not",
"o'clock": "oclock",
"oughtn't": "ought not",
"shan't": "shall not",
"she'd": "she would",
"she'll": "she will",
"she's": "she is",
"should've": "should have",
"shouldn't": "should not",
"so've": "so have",
"so's": "so as",
"that'd": "that would",
"that's": "that is",
"there'd": "there would",
"there's": "there is",
"they'd": "they would",
"they'll": "they will",
"theyll": "they will",
"they're": "they are",
"theyre": "they are",
"they've": "they have",
"wasn't": "was not",
"we'd": "we would",
"we'll": "we will",
"we're": "we are",
"we've": "we have",
"weren't": "were not",
"what'll": "what will",
"what're": "what are",
"what's": "what is",
"whats": "what is",
"what've": "what have",
"when's": "when is",
"whens": "when is",
"when've": "when have",
"where'd": "where did",
"where's": "where is",
"who'll": "who will",
"who's": "who is",
"whos ": "who is ",
"who've": "who have",
"why's": "why is",
"won't": "will not",
"wont": "will not",
"would've": "would have",
"wouldn't": "would not",
"y'all": "yall",
"you'd": "you would",
"youd": "you would",
"you'll": "you will",
"you're": "you are",
"you've": "you have"
}
# repr(xx).strip("'") displays char represented by \uxxxx code
GS_EMOJI: dict = {"\ud83d\udea8": "🚨",
"\ud83e\udd23": "🤣",
"\u26aa\ufe0f": "⚪",
"\u26a0\ufe0f": "⚠",
"\u26BD\uFE0F": "⚽️",
"\u2b07\ufe0f": "⬇",
"\ud83e\udd2c": "🤬", # angry, cussing head
"\ud83d\udcca": "📊",
"\ud83d\udde3\ufe0f": "🗣",
"\ud83d\udeab": "🚫",
"\ud83c\uddea\ud83c\uddfa": "🇪🇺",
"\ud83c\udde9\ud83c\uddea": "🇩🇪",
"\ud83d\ude4c": "🙌 ",
"\ud83d\udd34\u26aa\ufe0f": "🔴⚪",
"\ud83d\udd34": "🔴 ",
"\ud83d\udeab\ud83d\udd35": "🚫🔵",
"\ud83e\udd21": "🤡",
"\ud83d\udc80": "💀",
"\ud83d\udc51": "👑"
}
# GS_UCS2 shows ucs-1 symbol equivalent to ucs-2 if symbol exists
GS_UCS2: dict = {"\u003b": ";",
"\u003c": "<",
"\u003e": ">",
r"\u003f": r"?",
r"\u0040": r"@",
r"\u00a1": r"!", # '¡'
r"\u00a2": "", # '¢'
r"\u00a3": "brP", # '£'
r"\u00a4": "", # '¤'
r"\u00a6": r":", # '¦'
r"\u00a8": "", # unlaut '¨'
r"\u00a9": "cpyrt", # '©'
r"\u00ae": "reg copyrt", # reg copyrt '®'
r"\u00b6": r"<p>", # para mark '¶'
r"\u00b8": r".", # period "."
r"\u00bd": "1/2", # symbol '½'
r"\u00bf": "", # spanish inverted question '¿'
r"\u00e0": "a", # a with accent grave 'à'
r"\u00e7": "c", # c with lower accent "ç"
r"\u2012": "-",
r"\u2013": "–",
r"\u2014": "–",
r"\u2015": "–",
r"\u2016": "", # '‖'
r"\u2017": "", # '‗'
r"\u2018": r"'",
r"\u2019": r"'",
r"\u201a": r",",
r"\u201b": r"'",
r"\u201c": r"'",
r"\u201d": r"'",
r"\u201e": r"'",
r"\u201f": r"'",
}
emoji_dict: dict = {
":-)" : "basic smiley",
":)" : "midget smiley",
",-)" : "winking smiley",
"(-:" : "left hand smiley",
"(:-)" : "big face smiley",
":-(" : "sad face",
":-(-" : "very sad face",
"8-O" : "omg face",
"B-)" : "smiley with glasses",
":-)>" : "bearded smiley",
"'-)" : "winking smiley",
":-#" : "my lips are scaled",
":-*" : "kiss",
":-/" : "skeptical smiley",
":->" : "sarcastic smiley",
":-@" : "screaming smiley",
":-V" : "shouting smiley",
":-X" : "a big wet kiss",
":-\\" : "undecided smiley",
":-]" : "smiley blockhead",
";-(-" : "crying sad face",
">;->" : "lewd remark",
";^)" : "smirking smiley",
"%-)" : "too many screens",
"):-(-": "nordic smiley",
":-&" : "tongue tied",
":-O" : "talkaktive smiley",
"+:-)" : "priest smiley",
"O:-)" : "angel smiley",
":-<:" : "walrus smiley",
":-E" : "bucktoothed vampire",
":-Q" : "smoking smiley",
":-}X" : "bowtie smiley",
":-[" : "vampire smiley",
":-{-" : "mustache smiley",
":-{}" : "smiley wears lipstick",
":^)" : "smiley with personality",
"<:-l" : "dunce smiley",
":=)" : "orangutan smiley",
">:->" : "devilish smiley",
">:-l" : "klingon smiley",
"@:-)" : "smiley wearing turban",
"@:-}" : "smiley with hairdo",
"C=:-)": "chef smiley",
"X:-)" : "smiley with propeller beanie",
"[:-)" : "smiley with earbuds",
"[:]" : "robot smiley",
"{:-)" : "smiley wears toupee",
"l^o" : "hepcat smiley",
"}:^)" : "pointy nosed smiley",
"(:-(" : "saddest smiley",
":-(=)": "bucktooth smiley",
"O-)" : "message from cyclops",
":-3" : "handlebar mustache smiley",
":-=" : "beaver smiley",
"P-(" : "pirate smiley",
"?-(" : "black eye",
"d:-)" : "baseball smiley",
":8)" : "piggy smiley",
":-7" : "smirking smiley",
"):-)" : "impish smiley",
":/\\)": "bignose smiley",
":-(*)": "vomit face",
":(-" : "turtle smiley",
":,(" : "crying smiley",
":-S" : "confuzled face",
":-[ " : "unsmiley blockhead",
":-C" : "real unhappy smiley",
":-t" : "pouting smiley",
":-W" : "forked tongue",
"X-(" : "brain dead" }
IDIOM_MODS = {'darth vader': -2.5, 'male privilege': -2.5, "good guys": 0.5}
VADER_MODS = {"amf":-2.0, "sociopathic": -2.5, "cartel": -1.0, "ideologues": -0.5,
"blunder": -0.5, "commodotize": -0.5}
TRACE_COLRS = ["rgb(255, 153, 51)", "rgb(204, 204, 102)", "rgb(0, 153, 0)",
"rgb(0, 153, 255)", "rgb(153, 102, 0)", "rgb(0, 102, 153)",
"rgb(255, 51, 153)", "rgb( 255, 102, 204)", "rgb(51, 51, 51)",
"rgb(102, 0, 153)", "rgb(0, 102, 153)", "rgb(51, 102, 153)",
"rgb(0, 102, 0)", "rgb(204, 102, 51)", "rgb(153, 153, 153)"]
GSC = {
"dkblu": "rgb(0, 102, 153)",
"ltblu": "rgb(0, 153, 255)",
"grn": "rgb(0, 204, 102)",
"oblk": "rgb(51, 51, 51)",
"prpl": "rgb(51, 51, 153)",
"dkgrn": "rgb(51, 102, 51)",
"dkryl": "rgb(51, 102, 153)",
"brwn": "rgb( 102, 51, 51)",
"drkrd": "rgb(153, 51, 102)",
"brnz": "rgb(153, 102, 0)",
"gray": "rgb(153, 153, 153)",
"brnorg": "rgb(153, 102, 51)",
"lgrn": "rgb(153, 153, 51)",
"slvr": "rgb(153, 153, 153)",
"org": "rgb(204, 102, 51)",
"gld": "rgb(204, 153, 51)",
"olv": "rgb(204, 204, 102)",
"beig": "rgb(204, 204, 153)",
"ltgry": "rgb(204, 204, 204)",
"mgnta": "rgb(255, 51, 255)"
}
"""
eyes = "[8:=;]"
nose = "['`\-]?"
smile = "\[|[)\]"
frown = \(+|\)+#
neutral = [\/|l*]/
elongated = \b(\S*?)(.)\2{2,}\b # repetition of last letter ex 'wayyy cool'
EMOJI_2BYTE = re.compile(u'([\u2600-\u27BF])|([\uD83C][\uDF00-\uDFFF])|\
([\uD83D][\uDC00-\uDE4F])|([\uD83D][\uDE80-\uDEFF])')
GS_SMILE = re.compile(r"(\s?:X|:|;|=)(?:-)?(?:\)+|\(|O|D|P|S|\\|\/\s){1,}", re.IGNORECASE)
emoji_dict: dict = {
:-) - basic smiley
:) - midget smiley
,-) - winking happy smiley
(-: - left hand smiley
(:-) - smiley big face
(:-( - very unhappy smiley
,-} - wry and winking smiley
8-O - Omigod
'-) - winking smiley
:-# - my lips are scaled
:-* - kiss
:-/ - skeptical smiley
:-> - sarcastic smiley
:-@ - screaming smiley
:-d - said with a smile
:-V - shouting smiley
:-X - a big wet kiss
:-\\ - undecided smiley
:-] - smiley blockhead
;-( - crying smiley
>;-> - a very lewd remark was just made
;^) - smirking smiley
%-) - smiley after staring at a screen for 15 hours straight
):-( - nordic smiley
3:] - Lucy my pet dog smiley
:-& - tongue tied
8:-) - little girl smiley
:-)8< - big girl smiley
:-O - talkaktive smiley
:-6 - smiley after eating something spicy
+:-) - priest smiley
O:-) - angel smiley
:-< - walrus smiley
:-? - smiley smokes a pipe
:-E - bucktoothed vampire
:-Q - smoking smiley
:-}X - bow tie-wearing smiley
:-[ - vampire smiley
:-a - smiley touching her tongue to her nose
:-{ - mustache
:-{} - smiley wears lipstick
:^) - smiley with a personality
<:-l - dunce smiley
:=) - orangutan smiley
>:-> - devilish smiley
>:-l - klingon smiley
@:-) - smiley wearing a turban
@:-} - smiley just back from the hairdresser
C=:-) - chef smiley
X:-) - little kid with a propeller beanie
[:-) - smiley wearing a walkman
[:] - robot smiley
{:-) - smiley wears a toupee
l^o - hepcat smiley
}:^#) - pointy nosed smiley
(:-( - the saddest smiley
:-(=) - bucktooth smiley
O-) - message from cyclops
:-3 - handlebar mustache smiley
: = - beaver smiley
:-" - whistling smiley
P-( - pirate smiley
?-( - black eye
d:-) - baseball smiley
:8) - pigish smiley
:-7 - smirking smiley
):-) - impish smiley
:/\\) - extremely bignosed smiley
([( - Robocop
:-(*) - that comment made me sick
:( - sad-turtle smiley
:,( - crying smiley
:-( - boo hoo
:-S - what you say makes no sense
:-[ - un-smiley blockhead
:-C - real unhappy smiley
:-r - smiley raspberry
:-W - speak with forked tongue
X-( - you are brain dead
l-O - smiley is yawning
l:-O - flattop loudmouth smiley
$-) - yuppie smiley
:-! - foot in mouth
:----} - you lie like pinnochio
O-) - smiley after smoking a banana
=:-) - smiley is a punk
=:-( - real punks never smile
3:[ - pit bull smiley
8<:-) - smiley is a wizard
:#) - drunk smiley
8-# - dead smiley
B-) - smiley wears glasses
8-) - smiley with big eyes...perhaps wearing contact lenses...
H-) - cross-eyed smiley
]-I - smiley wearing sunglasses (cool...therefore no smile, only a smirk)
+-( - smiley, shot between the eyes
}
"""
|
# Just taken from terminal_service.py for Seeker
# Will modify based on this as a template
#from xmlrpc.client import Fault
class TerminalService:
"""
A service that handles terminal operations.
The responsibility of a TerminalService is to provide input and output operations for the
terminal.
def read_a_character(self, prompt):
def _is_alphabetic_letter(self, letter, num = 1):
def write_text(self, text):
"""
def read_a_character(self, prompt):
"""Gets a text input from the terminal. Directs the user with the given prompt.
Args:
self (TerminalService): An instance of TerminalService.
prompt (string): The prompt to display on the terminal.
# mode (default = a): selection for the kind of a imput character
Returns:
string: The user's input as a text character.
"""
# result after lowered and checked between a to z.
# will add a loop to check
#
'''
accept only a-z or A to Z
convert upper_case to lower_case
'''
judge = False
while judge != True:
input_letter = input(prompt).lower()
judge = self._is_alphabetic_letter(input_letter)
return input_letter
def _is_alphabetic_letter(self, letter, num = 1):
"""
Check the input - alphabetic or not.
Args:
letter: A letter to be checked.
num (integer): number of input character (default = 1)
Returns:
True: When the letter is an alphabetic one.
"""
'''
accept only a-z or A to Z
convert upper_case to lower_case
'''
is_alphabetic = False
if len(letter) == num:
if letter.isalpha():
if letter.isascii():
is_alphabetic = True
return(is_alphabetic)
def write_text(self, text):
"""
Displays the given text on the terminal.
Args:
self (TerminalService): An instance of TerminalService.
text (string): The text to display.
"""
print(text)
|
{
"includes": [
"common.gypi",
],
"targets": [
{
"target_name": "colony",
"product_name": "colony",
"type": "executable",
'cflags': [ '-Wall', '-Wextra', '-Werror' ],
"sources": [
'<(runtime_path)/colony/cli.c',
],
'xcode_settings': {
'OTHER_LDFLAGS': [
'-pagezero_size', '10000', '-image_base', '100000000'
],
},
"include_dirs": [
'<(runtime_path)/',
'<(runtime_path)/colony/',
"<(colony_lua_path)/src",
],
"dependencies": [
'libcolony.gyp:libcolony',
'libtm.gyp:libtm',
],
}
]
}
|
def int_to_bytes(n, num_bytes):
return n.to_bytes(num_bytes, 'big')
def int_from_bytes(bites):
return int.from_bytes(bites, 'big')
class fountain_header:
length = 6
def __init__(self, encode_id, total_size=None, chunk_id=None):
if total_size is None:
self.encode_id, self.total_size, self.chunk_id = self.from_encoded(encode_id)
else:
self.encode_id = encode_id
self.total_size = total_size
self.chunk_id = chunk_id
def __bytes__(self):
eid = self.encode_id + ((self.total_size & 0x1000000) >> 17)
sz = self.total_size & 0xFFFFFF
return int_to_bytes(eid, 1) + int_to_bytes(sz, 3) + int_to_bytes(self.chunk_id, 2)
@classmethod
def from_encoded(cls, encoded_bytes):
encode_id = int_from_bytes(encoded_bytes[0:1])
total_size = int_from_bytes(encoded_bytes[1:4])
chunk_id = int_from_bytes(encoded_bytes[4:6])
total_size = total_size | ((encode_id & 0x80) << 17)
encode_id = encode_id & 0x7F
return encode_id, total_size, chunk_id
|
'''
The MIT License (MIT)
Copyright (c) 2016 WavyCloud
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def can_paginate(operation_name=None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name\nas the method name on the client. For example, if the\nmethod name is create_foo, and you\'d normally invoke the\noperation as client.create_foo(**kwargs), if the\ncreate_foo operation can be paginated, you can use the\ncall client.get_paginator('create_foo').
"""
pass
def delete_lexicon(Name=None):
"""
Deletes the specified pronunciation lexicon stored in an AWS Region. A lexicon which has been deleted is not available for speech synthesis, nor is it possible to retrieve it using either the GetLexicon or ListLexicon APIs.
For more information, see Managing Lexicons .
See also: AWS API Documentation
Exceptions
Examples
Deletes a specified pronunciation lexicon stored in an AWS Region.
Expected Output:
:example: response = client.delete_lexicon(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]\nThe name of the lexicon to delete. Must be an existing lexicon in the region.\n
:rtype: dict
ReturnsResponse Syntax{}
Response Structure
(dict) --
Exceptions
Polly.Client.exceptions.LexiconNotFoundException
Polly.Client.exceptions.ServiceFailureException
Examples
Deletes a specified pronunciation lexicon stored in an AWS Region.
response = client.delete_lexicon(
Name='example',
)
print(response)
Expected Output:
{
'ResponseMetadata': {
'...': '...',
},
}
:return: {}
:returns:
Polly.Client.exceptions.LexiconNotFoundException
Polly.Client.exceptions.ServiceFailureException
"""
pass
def describe_voices(Engine=None, LanguageCode=None, IncludeAdditionalLanguageCodes=None, NextToken=None):
"""
Returns the list of voices that are available for use when requesting speech synthesis. Each voice speaks a specified language, is either male or female, and is identified by an ID, which is the ASCII version of the voice name.
When synthesizing speech ( SynthesizeSpeech ), you provide the voice ID for the voice you want from the list of voices returned by DescribeVoices .
For example, you want your news reader application to read news in a specific language, but giving a user the option to choose the voice. Using the DescribeVoices operation you can provide the user with a list of available voices to select from.
You can optionally specify a language code to filter the available voices. For example, if you specify en-US , the operation returns a list of all available US English voices.
This operation requires permissions to perform the polly:DescribeVoices action.
See also: AWS API Documentation
Exceptions
Examples
Returns the list of voices that are available for use when requesting speech synthesis. Displayed languages are those within the specified language code. If no language code is specified, voices for all available languages are displayed.
Expected Output:
:example: response = client.describe_voices(
Engine='standard'|'neural',
LanguageCode='arb'|'cmn-CN'|'cy-GB'|'da-DK'|'de-DE'|'en-AU'|'en-GB'|'en-GB-WLS'|'en-IN'|'en-US'|'es-ES'|'es-MX'|'es-US'|'fr-CA'|'fr-FR'|'is-IS'|'it-IT'|'ja-JP'|'hi-IN'|'ko-KR'|'nb-NO'|'nl-NL'|'pl-PL'|'pt-BR'|'pt-PT'|'ro-RO'|'ru-RU'|'sv-SE'|'tr-TR',
IncludeAdditionalLanguageCodes=True|False,
NextToken='string'
)
:type Engine: string
:param Engine: Specifies the engine (standard or neural ) used by Amazon Polly when processing input text for speech synthesis.
:type LanguageCode: string
:param LanguageCode: The language identification tag (ISO 639 code for the language name-ISO 3166 country code) for filtering the list of voices returned. If you don\'t specify this optional parameter, all available voices are returned.
:type IncludeAdditionalLanguageCodes: boolean
:param IncludeAdditionalLanguageCodes: Boolean value indicating whether to return any bilingual voices that use the specified language as an additional language. For instance, if you request all languages that use US English (es-US), and there is an Italian voice that speaks both Italian (it-IT) and US English, that voice will be included if you specify yes but not if you specify no .
:type NextToken: string
:param NextToken: An opaque pagination token returned from the previous DescribeVoices operation. If present, this indicates where to continue the listing.
:rtype: dict
ReturnsResponse Syntax
{
'Voices': [
{
'Gender': 'Female'|'Male',
'Id': 'Aditi'|'Amy'|'Astrid'|'Bianca'|'Brian'|'Camila'|'Carla'|'Carmen'|'Celine'|'Chantal'|'Conchita'|'Cristiano'|'Dora'|'Emma'|'Enrique'|'Ewa'|'Filiz'|'Geraint'|'Giorgio'|'Gwyneth'|'Hans'|'Ines'|'Ivy'|'Jacek'|'Jan'|'Joanna'|'Joey'|'Justin'|'Karl'|'Kendra'|'Kimberly'|'Lea'|'Liv'|'Lotte'|'Lucia'|'Lupe'|'Mads'|'Maja'|'Marlene'|'Mathieu'|'Matthew'|'Maxim'|'Mia'|'Miguel'|'Mizuki'|'Naja'|'Nicole'|'Penelope'|'Raveena'|'Ricardo'|'Ruben'|'Russell'|'Salli'|'Seoyeon'|'Takumi'|'Tatyana'|'Vicki'|'Vitoria'|'Zeina'|'Zhiyu',
'LanguageCode': 'arb'|'cmn-CN'|'cy-GB'|'da-DK'|'de-DE'|'en-AU'|'en-GB'|'en-GB-WLS'|'en-IN'|'en-US'|'es-ES'|'es-MX'|'es-US'|'fr-CA'|'fr-FR'|'is-IS'|'it-IT'|'ja-JP'|'hi-IN'|'ko-KR'|'nb-NO'|'nl-NL'|'pl-PL'|'pt-BR'|'pt-PT'|'ro-RO'|'ru-RU'|'sv-SE'|'tr-TR',
'LanguageName': 'string',
'Name': 'string',
'AdditionalLanguageCodes': [
'arb'|'cmn-CN'|'cy-GB'|'da-DK'|'de-DE'|'en-AU'|'en-GB'|'en-GB-WLS'|'en-IN'|'en-US'|'es-ES'|'es-MX'|'es-US'|'fr-CA'|'fr-FR'|'is-IS'|'it-IT'|'ja-JP'|'hi-IN'|'ko-KR'|'nb-NO'|'nl-NL'|'pl-PL'|'pt-BR'|'pt-PT'|'ro-RO'|'ru-RU'|'sv-SE'|'tr-TR',
],
'SupportedEngines': [
'standard'|'neural',
]
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
Voices (list) --
A list of voices with their properties.
(dict) --
Description of the voice.
Gender (string) --
Gender of the voice.
Id (string) --
Amazon Polly assigned voice ID. This is the ID that you specify when calling the SynthesizeSpeech operation.
LanguageCode (string) --
Language code of the voice.
LanguageName (string) --
Human readable name of the language in English.
Name (string) --
Name of the voice (for example, Salli, Kendra, etc.). This provides a human readable voice name that you might display in your application.
AdditionalLanguageCodes (list) --
Additional codes for languages available for the specified voice in addition to its default language.
For example, the default language for Aditi is Indian English (en-IN) because it was first used for that language. Since Aditi is bilingual and fluent in both Indian English and Hindi, this parameter would show the code hi-IN .
(string) --
SupportedEngines (list) --
Specifies which engines (standard or neural ) that are supported by a given voice.
(string) --
NextToken (string) --
The pagination token to use in the next request to continue the listing of voices. NextToken is returned only if the response is truncated.
Exceptions
Polly.Client.exceptions.InvalidNextTokenException
Polly.Client.exceptions.ServiceFailureException
Examples
Returns the list of voices that are available for use when requesting speech synthesis. Displayed languages are those within the specified language code. If no language code is specified, voices for all available languages are displayed.
response = client.describe_voices(
LanguageCode='en-GB',
)
print(response)
Expected Output:
{
'Voices': [
{
'Gender': 'Female',
'Id': 'Emma',
'LanguageCode': 'en-GB',
'LanguageName': 'British English',
'Name': 'Emma',
},
{
'Gender': 'Male',
'Id': 'Brian',
'LanguageCode': 'en-GB',
'LanguageName': 'British English',
'Name': 'Brian',
},
{
'Gender': 'Female',
'Id': 'Amy',
'LanguageCode': 'en-GB',
'LanguageName': 'British English',
'Name': 'Amy',
},
],
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'Voices': [
{
'Gender': 'Female'|'Male',
'Id': 'Aditi'|'Amy'|'Astrid'|'Bianca'|'Brian'|'Camila'|'Carla'|'Carmen'|'Celine'|'Chantal'|'Conchita'|'Cristiano'|'Dora'|'Emma'|'Enrique'|'Ewa'|'Filiz'|'Geraint'|'Giorgio'|'Gwyneth'|'Hans'|'Ines'|'Ivy'|'Jacek'|'Jan'|'Joanna'|'Joey'|'Justin'|'Karl'|'Kendra'|'Kimberly'|'Lea'|'Liv'|'Lotte'|'Lucia'|'Lupe'|'Mads'|'Maja'|'Marlene'|'Mathieu'|'Matthew'|'Maxim'|'Mia'|'Miguel'|'Mizuki'|'Naja'|'Nicole'|'Penelope'|'Raveena'|'Ricardo'|'Ruben'|'Russell'|'Salli'|'Seoyeon'|'Takumi'|'Tatyana'|'Vicki'|'Vitoria'|'Zeina'|'Zhiyu',
'LanguageCode': 'arb'|'cmn-CN'|'cy-GB'|'da-DK'|'de-DE'|'en-AU'|'en-GB'|'en-GB-WLS'|'en-IN'|'en-US'|'es-ES'|'es-MX'|'es-US'|'fr-CA'|'fr-FR'|'is-IS'|'it-IT'|'ja-JP'|'hi-IN'|'ko-KR'|'nb-NO'|'nl-NL'|'pl-PL'|'pt-BR'|'pt-PT'|'ro-RO'|'ru-RU'|'sv-SE'|'tr-TR',
'LanguageName': 'string',
'Name': 'string',
'AdditionalLanguageCodes': [
'arb'|'cmn-CN'|'cy-GB'|'da-DK'|'de-DE'|'en-AU'|'en-GB'|'en-GB-WLS'|'en-IN'|'en-US'|'es-ES'|'es-MX'|'es-US'|'fr-CA'|'fr-FR'|'is-IS'|'it-IT'|'ja-JP'|'hi-IN'|'ko-KR'|'nb-NO'|'nl-NL'|'pl-PL'|'pt-BR'|'pt-PT'|'ro-RO'|'ru-RU'|'sv-SE'|'tr-TR',
],
'SupportedEngines': [
'standard'|'neural',
]
},
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to\nClientMethod.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid\nfor. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By\ndefault, the http method is whatever is used in the method\'s model.
"""
pass
def get_lexicon(Name=None):
"""
Returns the content of the specified pronunciation lexicon stored in an AWS Region. For more information, see Managing Lexicons .
See also: AWS API Documentation
Exceptions
Examples
Returns the content of the specified pronunciation lexicon stored in an AWS Region.
Expected Output:
:example: response = client.get_lexicon(
Name='string'
)
:type Name: string
:param Name: [REQUIRED]\nName of the lexicon.\n
:rtype: dict
ReturnsResponse Syntax{
'Lexicon': {
'Content': 'string',
'Name': 'string'
},
'LexiconAttributes': {
'Alphabet': 'string',
'LanguageCode': 'arb'|'cmn-CN'|'cy-GB'|'da-DK'|'de-DE'|'en-AU'|'en-GB'|'en-GB-WLS'|'en-IN'|'en-US'|'es-ES'|'es-MX'|'es-US'|'fr-CA'|'fr-FR'|'is-IS'|'it-IT'|'ja-JP'|'hi-IN'|'ko-KR'|'nb-NO'|'nl-NL'|'pl-PL'|'pt-BR'|'pt-PT'|'ro-RO'|'ru-RU'|'sv-SE'|'tr-TR',
'LastModified': datetime(2015, 1, 1),
'LexiconArn': 'string',
'LexemesCount': 123,
'Size': 123
}
}
Response Structure
(dict) --
Lexicon (dict) --Lexicon object that provides name and the string content of the lexicon.
Content (string) --Lexicon content in string format. The content of a lexicon must be in PLS format.
Name (string) --Name of the lexicon.
LexiconAttributes (dict) --Metadata of the lexicon, including phonetic alphabetic used, language code, lexicon ARN, number of lexemes defined in the lexicon, and size of lexicon in bytes.
Alphabet (string) --Phonetic alphabet used in the lexicon. Valid values are ipa and x-sampa .
LanguageCode (string) --Language code that the lexicon applies to. A lexicon with a language code such as "en" would be applied to all English languages (en-GB, en-US, en-AUS, en-WLS, and so on.
LastModified (datetime) --Date lexicon was last modified (a timestamp value).
LexiconArn (string) --Amazon Resource Name (ARN) of the lexicon.
LexemesCount (integer) --Number of lexemes in the lexicon.
Size (integer) --Total size of the lexicon, in characters.
Exceptions
Polly.Client.exceptions.LexiconNotFoundException
Polly.Client.exceptions.ServiceFailureException
Examples
Returns the content of the specified pronunciation lexicon stored in an AWS Region.
response = client.get_lexicon(
Name='',
)
print(response)
Expected Output:
{
'Lexicon': {
'Content': '<?xml version="1.0" encoding="UTF-8"?>\\r\
<lexicon version="1.0" \\r\
xmlns="http://www.w3.org/2005/01/pronunciation-lexicon"\\r\
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" \\r\
xsi:schemaLocation="http://www.w3.org/2005/01/pronunciation-lexicon \\r\
http://www.w3.org/TR/2007/CR-pronunciation-lexicon-20071212/pls.xsd"\\r\
alphabet="ipa" \\r\
xml:lang="en-US">\\r\
<lexeme>\\r\
<grapheme>W3C</grapheme>\\r\
<alias>World Wide Web Consortium</alias>\\r\
</lexeme>\\r\
</lexicon>',
'Name': 'example',
},
'LexiconAttributes': {
'Alphabet': 'ipa',
'LanguageCode': 'en-US',
'LastModified': 1478542980.117,
'LexemesCount': 1,
'LexiconArn': 'arn:aws:polly:us-east-1:123456789012:lexicon/example',
'Size': 503,
},
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'Lexicon': {
'Content': 'string',
'Name': 'string'
},
'LexiconAttributes': {
'Alphabet': 'string',
'LanguageCode': 'arb'|'cmn-CN'|'cy-GB'|'da-DK'|'de-DE'|'en-AU'|'en-GB'|'en-GB-WLS'|'en-IN'|'en-US'|'es-ES'|'es-MX'|'es-US'|'fr-CA'|'fr-FR'|'is-IS'|'it-IT'|'ja-JP'|'hi-IN'|'ko-KR'|'nb-NO'|'nl-NL'|'pl-PL'|'pt-BR'|'pt-PT'|'ro-RO'|'ru-RU'|'sv-SE'|'tr-TR',
'LastModified': datetime(2015, 1, 1),
'LexiconArn': 'string',
'LexemesCount': 123,
'Size': 123
}
}
"""
pass
def get_paginator(operation_name=None):
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name\nas the method name on the client. For example, if the\nmethod name is create_foo, and you\'d normally invoke the\noperation as client.create_foo(**kwargs), if the\ncreate_foo operation can be paginated, you can use the\ncall client.get_paginator('create_foo').
:rtype: L{botocore.paginate.Paginator}
ReturnsA paginator object.
"""
pass
def get_speech_synthesis_task(TaskId=None):
"""
Retrieves a specific SpeechSynthesisTask object based on its TaskID. This object contains information about the given speech synthesis task, including the status of the task, and a link to the S3 bucket containing the output of the task.
See also: AWS API Documentation
Exceptions
:example: response = client.get_speech_synthesis_task(
TaskId='string'
)
:type TaskId: string
:param TaskId: [REQUIRED]\nThe Amazon Polly generated identifier for a speech synthesis task.\n
:rtype: dict
ReturnsResponse Syntax{
'SynthesisTask': {
'Engine': 'standard'|'neural',
'TaskId': 'string',
'TaskStatus': 'scheduled'|'inProgress'|'completed'|'failed',
'TaskStatusReason': 'string',
'OutputUri': 'string',
'CreationTime': datetime(2015, 1, 1),
'RequestCharacters': 123,
'SnsTopicArn': 'string',
'LexiconNames': [
'string',
],
'OutputFormat': 'json'|'mp3'|'ogg_vorbis'|'pcm',
'SampleRate': 'string',
'SpeechMarkTypes': [
'sentence'|'ssml'|'viseme'|'word',
],
'TextType': 'ssml'|'text',
'VoiceId': 'Aditi'|'Amy'|'Astrid'|'Bianca'|'Brian'|'Camila'|'Carla'|'Carmen'|'Celine'|'Chantal'|'Conchita'|'Cristiano'|'Dora'|'Emma'|'Enrique'|'Ewa'|'Filiz'|'Geraint'|'Giorgio'|'Gwyneth'|'Hans'|'Ines'|'Ivy'|'Jacek'|'Jan'|'Joanna'|'Joey'|'Justin'|'Karl'|'Kendra'|'Kimberly'|'Lea'|'Liv'|'Lotte'|'Lucia'|'Lupe'|'Mads'|'Maja'|'Marlene'|'Mathieu'|'Matthew'|'Maxim'|'Mia'|'Miguel'|'Mizuki'|'Naja'|'Nicole'|'Penelope'|'Raveena'|'Ricardo'|'Ruben'|'Russell'|'Salli'|'Seoyeon'|'Takumi'|'Tatyana'|'Vicki'|'Vitoria'|'Zeina'|'Zhiyu',
'LanguageCode': 'arb'|'cmn-CN'|'cy-GB'|'da-DK'|'de-DE'|'en-AU'|'en-GB'|'en-GB-WLS'|'en-IN'|'en-US'|'es-ES'|'es-MX'|'es-US'|'fr-CA'|'fr-FR'|'is-IS'|'it-IT'|'ja-JP'|'hi-IN'|'ko-KR'|'nb-NO'|'nl-NL'|'pl-PL'|'pt-BR'|'pt-PT'|'ro-RO'|'ru-RU'|'sv-SE'|'tr-TR'
}
}
Response Structure
(dict) --
SynthesisTask (dict) --SynthesisTask object that provides information from the requested task, including output format, creation time, task status, and so on.
Engine (string) --Specifies the engine (standard or neural ) for Amazon Polly to use when processing input text for speech synthesis. Using a voice that is not supported for the engine selected will result in an error.
TaskId (string) --The Amazon Polly generated identifier for a speech synthesis task.
TaskStatus (string) --Current status of the individual speech synthesis task.
TaskStatusReason (string) --Reason for the current status of a specific speech synthesis task, including errors if the task has failed.
OutputUri (string) --Pathway for the output speech file.
CreationTime (datetime) --Timestamp for the time the synthesis task was started.
RequestCharacters (integer) --Number of billable characters synthesized.
SnsTopicArn (string) --ARN for the SNS topic optionally used for providing status notification for a speech synthesis task.
LexiconNames (list) --List of one or more pronunciation lexicon names you want the service to apply during synthesis. Lexicons are applied only if the language of the lexicon is the same as the language of the voice.
(string) --
OutputFormat (string) --The format in which the returned output will be encoded. For audio stream, this will be mp3, ogg_vorbis, or pcm. For speech marks, this will be json.
SampleRate (string) --The audio frequency specified in Hz.
The valid values for mp3 and ogg_vorbis are "8000", "16000", "22050", and "24000". The default value for standard voices is "22050". The default value for neural voices is "24000".
Valid values for pcm are "8000" and "16000" The default value is "16000".
SpeechMarkTypes (list) --The type of speech marks returned for the input text.
(string) --
TextType (string) --Specifies whether the input text is plain text or SSML. The default value is plain text.
VoiceId (string) --Voice ID to use for the synthesis.
LanguageCode (string) --Optional language code for a synthesis task. This is only necessary if using a bilingual voice, such as Aditi, which can be used for either Indian English (en-IN) or Hindi (hi-IN).
If a bilingual voice is used and no language code is specified, Amazon Polly will use the default language of the bilingual voice. The default language for any voice is the one returned by the DescribeVoices operation for the LanguageCode parameter. For example, if no language code is specified, Aditi will use Indian English rather than Hindi.
Exceptions
Polly.Client.exceptions.InvalidTaskIdException
Polly.Client.exceptions.ServiceFailureException
Polly.Client.exceptions.SynthesisTaskNotFoundException
:return: {
'SynthesisTask': {
'Engine': 'standard'|'neural',
'TaskId': 'string',
'TaskStatus': 'scheduled'|'inProgress'|'completed'|'failed',
'TaskStatusReason': 'string',
'OutputUri': 'string',
'CreationTime': datetime(2015, 1, 1),
'RequestCharacters': 123,
'SnsTopicArn': 'string',
'LexiconNames': [
'string',
],
'OutputFormat': 'json'|'mp3'|'ogg_vorbis'|'pcm',
'SampleRate': 'string',
'SpeechMarkTypes': [
'sentence'|'ssml'|'viseme'|'word',
],
'TextType': 'ssml'|'text',
'VoiceId': 'Aditi'|'Amy'|'Astrid'|'Bianca'|'Brian'|'Camila'|'Carla'|'Carmen'|'Celine'|'Chantal'|'Conchita'|'Cristiano'|'Dora'|'Emma'|'Enrique'|'Ewa'|'Filiz'|'Geraint'|'Giorgio'|'Gwyneth'|'Hans'|'Ines'|'Ivy'|'Jacek'|'Jan'|'Joanna'|'Joey'|'Justin'|'Karl'|'Kendra'|'Kimberly'|'Lea'|'Liv'|'Lotte'|'Lucia'|'Lupe'|'Mads'|'Maja'|'Marlene'|'Mathieu'|'Matthew'|'Maxim'|'Mia'|'Miguel'|'Mizuki'|'Naja'|'Nicole'|'Penelope'|'Raveena'|'Ricardo'|'Ruben'|'Russell'|'Salli'|'Seoyeon'|'Takumi'|'Tatyana'|'Vicki'|'Vitoria'|'Zeina'|'Zhiyu',
'LanguageCode': 'arb'|'cmn-CN'|'cy-GB'|'da-DK'|'de-DE'|'en-AU'|'en-GB'|'en-GB-WLS'|'en-IN'|'en-US'|'es-ES'|'es-MX'|'es-US'|'fr-CA'|'fr-FR'|'is-IS'|'it-IT'|'ja-JP'|'hi-IN'|'ko-KR'|'nb-NO'|'nl-NL'|'pl-PL'|'pt-BR'|'pt-PT'|'ro-RO'|'ru-RU'|'sv-SE'|'tr-TR'
}
}
:returns:
(string) --
"""
pass
def get_waiter(waiter_name=None):
"""
Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters\nsection of the service docs for a list of available waiters.
:rtype: botocore.waiter.Waiter
"""
pass
def list_lexicons(NextToken=None):
"""
Returns a list of pronunciation lexicons stored in an AWS Region. For more information, see Managing Lexicons .
See also: AWS API Documentation
Exceptions
Examples
Returns a list of pronunciation lexicons stored in an AWS Region.
Expected Output:
:example: response = client.list_lexicons(
NextToken='string'
)
:type NextToken: string
:param NextToken: An opaque pagination token returned from previous ListLexicons operation. If present, indicates where to continue the list of lexicons.
:rtype: dict
ReturnsResponse Syntax{
'Lexicons': [
{
'Name': 'string',
'Attributes': {
'Alphabet': 'string',
'LanguageCode': 'arb'|'cmn-CN'|'cy-GB'|'da-DK'|'de-DE'|'en-AU'|'en-GB'|'en-GB-WLS'|'en-IN'|'en-US'|'es-ES'|'es-MX'|'es-US'|'fr-CA'|'fr-FR'|'is-IS'|'it-IT'|'ja-JP'|'hi-IN'|'ko-KR'|'nb-NO'|'nl-NL'|'pl-PL'|'pt-BR'|'pt-PT'|'ro-RO'|'ru-RU'|'sv-SE'|'tr-TR',
'LastModified': datetime(2015, 1, 1),
'LexiconArn': 'string',
'LexemesCount': 123,
'Size': 123
}
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
Lexicons (list) --A list of lexicon names and attributes.
(dict) --Describes the content of the lexicon.
Name (string) --Name of the lexicon.
Attributes (dict) --Provides lexicon metadata.
Alphabet (string) --Phonetic alphabet used in the lexicon. Valid values are ipa and x-sampa .
LanguageCode (string) --Language code that the lexicon applies to. A lexicon with a language code such as "en" would be applied to all English languages (en-GB, en-US, en-AUS, en-WLS, and so on.
LastModified (datetime) --Date lexicon was last modified (a timestamp value).
LexiconArn (string) --Amazon Resource Name (ARN) of the lexicon.
LexemesCount (integer) --Number of lexemes in the lexicon.
Size (integer) --Total size of the lexicon, in characters.
NextToken (string) --The pagination token to use in the next request to continue the listing of lexicons. NextToken is returned only if the response is truncated.
Exceptions
Polly.Client.exceptions.InvalidNextTokenException
Polly.Client.exceptions.ServiceFailureException
Examples
Returns a list of pronunciation lexicons stored in an AWS Region.
response = client.list_lexicons(
)
print(response)
Expected Output:
{
'Lexicons': [
{
'Attributes': {
'Alphabet': 'ipa',
'LanguageCode': 'en-US',
'LastModified': 1478542980.117,
'LexemesCount': 1,
'LexiconArn': 'arn:aws:polly:us-east-1:123456789012:lexicon/example',
'Size': 503,
},
'Name': 'example',
},
],
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'Lexicons': [
{
'Name': 'string',
'Attributes': {
'Alphabet': 'string',
'LanguageCode': 'arb'|'cmn-CN'|'cy-GB'|'da-DK'|'de-DE'|'en-AU'|'en-GB'|'en-GB-WLS'|'en-IN'|'en-US'|'es-ES'|'es-MX'|'es-US'|'fr-CA'|'fr-FR'|'is-IS'|'it-IT'|'ja-JP'|'hi-IN'|'ko-KR'|'nb-NO'|'nl-NL'|'pl-PL'|'pt-BR'|'pt-PT'|'ro-RO'|'ru-RU'|'sv-SE'|'tr-TR',
'LastModified': datetime(2015, 1, 1),
'LexiconArn': 'string',
'LexemesCount': 123,
'Size': 123
}
},
],
'NextToken': 'string'
}
"""
pass
def list_speech_synthesis_tasks(MaxResults=None, NextToken=None, Status=None):
"""
Returns a list of SpeechSynthesisTask objects ordered by their creation date. This operation can filter the tasks by their status, for example, allowing users to list only tasks that are completed.
See also: AWS API Documentation
Exceptions
:example: response = client.list_speech_synthesis_tasks(
MaxResults=123,
NextToken='string',
Status='scheduled'|'inProgress'|'completed'|'failed'
)
:type MaxResults: integer
:param MaxResults: Maximum number of speech synthesis tasks returned in a List operation.
:type NextToken: string
:param NextToken: The pagination token to use in the next request to continue the listing of speech synthesis tasks.
:type Status: string
:param Status: Status of the speech synthesis tasks returned in a List operation
:rtype: dict
ReturnsResponse Syntax
{
'NextToken': 'string',
'SynthesisTasks': [
{
'Engine': 'standard'|'neural',
'TaskId': 'string',
'TaskStatus': 'scheduled'|'inProgress'|'completed'|'failed',
'TaskStatusReason': 'string',
'OutputUri': 'string',
'CreationTime': datetime(2015, 1, 1),
'RequestCharacters': 123,
'SnsTopicArn': 'string',
'LexiconNames': [
'string',
],
'OutputFormat': 'json'|'mp3'|'ogg_vorbis'|'pcm',
'SampleRate': 'string',
'SpeechMarkTypes': [
'sentence'|'ssml'|'viseme'|'word',
],
'TextType': 'ssml'|'text',
'VoiceId': 'Aditi'|'Amy'|'Astrid'|'Bianca'|'Brian'|'Camila'|'Carla'|'Carmen'|'Celine'|'Chantal'|'Conchita'|'Cristiano'|'Dora'|'Emma'|'Enrique'|'Ewa'|'Filiz'|'Geraint'|'Giorgio'|'Gwyneth'|'Hans'|'Ines'|'Ivy'|'Jacek'|'Jan'|'Joanna'|'Joey'|'Justin'|'Karl'|'Kendra'|'Kimberly'|'Lea'|'Liv'|'Lotte'|'Lucia'|'Lupe'|'Mads'|'Maja'|'Marlene'|'Mathieu'|'Matthew'|'Maxim'|'Mia'|'Miguel'|'Mizuki'|'Naja'|'Nicole'|'Penelope'|'Raveena'|'Ricardo'|'Ruben'|'Russell'|'Salli'|'Seoyeon'|'Takumi'|'Tatyana'|'Vicki'|'Vitoria'|'Zeina'|'Zhiyu',
'LanguageCode': 'arb'|'cmn-CN'|'cy-GB'|'da-DK'|'de-DE'|'en-AU'|'en-GB'|'en-GB-WLS'|'en-IN'|'en-US'|'es-ES'|'es-MX'|'es-US'|'fr-CA'|'fr-FR'|'is-IS'|'it-IT'|'ja-JP'|'hi-IN'|'ko-KR'|'nb-NO'|'nl-NL'|'pl-PL'|'pt-BR'|'pt-PT'|'ro-RO'|'ru-RU'|'sv-SE'|'tr-TR'
},
]
}
Response Structure
(dict) --
NextToken (string) --
An opaque pagination token returned from the previous List operation in this request. If present, this indicates where to continue the listing.
SynthesisTasks (list) --
List of SynthesisTask objects that provides information from the specified task in the list request, including output format, creation time, task status, and so on.
(dict) --
SynthesisTask object that provides information about a speech synthesis task.
Engine (string) --
Specifies the engine (standard or neural ) for Amazon Polly to use when processing input text for speech synthesis. Using a voice that is not supported for the engine selected will result in an error.
TaskId (string) --
The Amazon Polly generated identifier for a speech synthesis task.
TaskStatus (string) --
Current status of the individual speech synthesis task.
TaskStatusReason (string) --
Reason for the current status of a specific speech synthesis task, including errors if the task has failed.
OutputUri (string) --
Pathway for the output speech file.
CreationTime (datetime) --
Timestamp for the time the synthesis task was started.
RequestCharacters (integer) --
Number of billable characters synthesized.
SnsTopicArn (string) --
ARN for the SNS topic optionally used for providing status notification for a speech synthesis task.
LexiconNames (list) --
List of one or more pronunciation lexicon names you want the service to apply during synthesis. Lexicons are applied only if the language of the lexicon is the same as the language of the voice.
(string) --
OutputFormat (string) --
The format in which the returned output will be encoded. For audio stream, this will be mp3, ogg_vorbis, or pcm. For speech marks, this will be json.
SampleRate (string) --
The audio frequency specified in Hz.
The valid values for mp3 and ogg_vorbis are "8000", "16000", "22050", and "24000". The default value for standard voices is "22050". The default value for neural voices is "24000".
Valid values for pcm are "8000" and "16000" The default value is "16000".
SpeechMarkTypes (list) --
The type of speech marks returned for the input text.
(string) --
TextType (string) --
Specifies whether the input text is plain text or SSML. The default value is plain text.
VoiceId (string) --
Voice ID to use for the synthesis.
LanguageCode (string) --
Optional language code for a synthesis task. This is only necessary if using a bilingual voice, such as Aditi, which can be used for either Indian English (en-IN) or Hindi (hi-IN).
If a bilingual voice is used and no language code is specified, Amazon Polly will use the default language of the bilingual voice. The default language for any voice is the one returned by the DescribeVoices operation for the LanguageCode parameter. For example, if no language code is specified, Aditi will use Indian English rather than Hindi.
Exceptions
Polly.Client.exceptions.InvalidNextTokenException
Polly.Client.exceptions.ServiceFailureException
:return: {
'NextToken': 'string',
'SynthesisTasks': [
{
'Engine': 'standard'|'neural',
'TaskId': 'string',
'TaskStatus': 'scheduled'|'inProgress'|'completed'|'failed',
'TaskStatusReason': 'string',
'OutputUri': 'string',
'CreationTime': datetime(2015, 1, 1),
'RequestCharacters': 123,
'SnsTopicArn': 'string',
'LexiconNames': [
'string',
],
'OutputFormat': 'json'|'mp3'|'ogg_vorbis'|'pcm',
'SampleRate': 'string',
'SpeechMarkTypes': [
'sentence'|'ssml'|'viseme'|'word',
],
'TextType': 'ssml'|'text',
'VoiceId': 'Aditi'|'Amy'|'Astrid'|'Bianca'|'Brian'|'Camila'|'Carla'|'Carmen'|'Celine'|'Chantal'|'Conchita'|'Cristiano'|'Dora'|'Emma'|'Enrique'|'Ewa'|'Filiz'|'Geraint'|'Giorgio'|'Gwyneth'|'Hans'|'Ines'|'Ivy'|'Jacek'|'Jan'|'Joanna'|'Joey'|'Justin'|'Karl'|'Kendra'|'Kimberly'|'Lea'|'Liv'|'Lotte'|'Lucia'|'Lupe'|'Mads'|'Maja'|'Marlene'|'Mathieu'|'Matthew'|'Maxim'|'Mia'|'Miguel'|'Mizuki'|'Naja'|'Nicole'|'Penelope'|'Raveena'|'Ricardo'|'Ruben'|'Russell'|'Salli'|'Seoyeon'|'Takumi'|'Tatyana'|'Vicki'|'Vitoria'|'Zeina'|'Zhiyu',
'LanguageCode': 'arb'|'cmn-CN'|'cy-GB'|'da-DK'|'de-DE'|'en-AU'|'en-GB'|'en-GB-WLS'|'en-IN'|'en-US'|'es-ES'|'es-MX'|'es-US'|'fr-CA'|'fr-FR'|'is-IS'|'it-IT'|'ja-JP'|'hi-IN'|'ko-KR'|'nb-NO'|'nl-NL'|'pl-PL'|'pt-BR'|'pt-PT'|'ro-RO'|'ru-RU'|'sv-SE'|'tr-TR'
},
]
}
:returns:
(string) --
"""
pass
def put_lexicon(Name=None, Content=None):
"""
Stores a pronunciation lexicon in an AWS Region. If a lexicon with the same name already exists in the region, it is overwritten by the new lexicon. Lexicon operations have eventual consistency, therefore, it might take some time before the lexicon is available to the SynthesizeSpeech operation.
For more information, see Managing Lexicons .
See also: AWS API Documentation
Exceptions
Examples
Stores a pronunciation lexicon in an AWS Region.
Expected Output:
:example: response = client.put_lexicon(
Name='string',
Content='string'
)
:type Name: string
:param Name: [REQUIRED]\nName of the lexicon. The name must follow the regular express format [0-9A-Za-z]{1,20}. That is, the name is a case-sensitive alphanumeric string up to 20 characters long.\n
:type Content: string
:param Content: [REQUIRED]\nContent of the PLS lexicon as string data.\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
Polly.Client.exceptions.InvalidLexiconException
Polly.Client.exceptions.UnsupportedPlsAlphabetException
Polly.Client.exceptions.UnsupportedPlsLanguageException
Polly.Client.exceptions.LexiconSizeExceededException
Polly.Client.exceptions.MaxLexemeLengthExceededException
Polly.Client.exceptions.MaxLexiconsNumberExceededException
Polly.Client.exceptions.ServiceFailureException
Examples
Stores a pronunciation lexicon in an AWS Region.
response = client.put_lexicon(
Content='file://example.pls',
Name='W3C',
)
print(response)
Expected Output:
{
'ResponseMetadata': {
'...': '...',
},
}
:return: {}
:returns:
(dict) --
"""
pass
def start_speech_synthesis_task(Engine=None, LanguageCode=None, LexiconNames=None, OutputFormat=None, OutputS3BucketName=None, OutputS3KeyPrefix=None, SampleRate=None, SnsTopicArn=None, SpeechMarkTypes=None, Text=None, TextType=None, VoiceId=None):
"""
Allows the creation of an asynchronous synthesis task, by starting a new SpeechSynthesisTask . This operation requires all the standard information needed for speech synthesis, plus the name of an Amazon S3 bucket for the service to store the output of the synthesis task and two optional parameters (OutputS3KeyPrefix and SnsTopicArn). Once the synthesis task is created, this operation will return a SpeechSynthesisTask object, which will include an identifier of this task as well as the current status.
See also: AWS API Documentation
Exceptions
:example: response = client.start_speech_synthesis_task(
Engine='standard'|'neural',
LanguageCode='arb'|'cmn-CN'|'cy-GB'|'da-DK'|'de-DE'|'en-AU'|'en-GB'|'en-GB-WLS'|'en-IN'|'en-US'|'es-ES'|'es-MX'|'es-US'|'fr-CA'|'fr-FR'|'is-IS'|'it-IT'|'ja-JP'|'hi-IN'|'ko-KR'|'nb-NO'|'nl-NL'|'pl-PL'|'pt-BR'|'pt-PT'|'ro-RO'|'ru-RU'|'sv-SE'|'tr-TR',
LexiconNames=[
'string',
],
OutputFormat='json'|'mp3'|'ogg_vorbis'|'pcm',
OutputS3BucketName='string',
OutputS3KeyPrefix='string',
SampleRate='string',
SnsTopicArn='string',
SpeechMarkTypes=[
'sentence'|'ssml'|'viseme'|'word',
],
Text='string',
TextType='ssml'|'text',
VoiceId='Aditi'|'Amy'|'Astrid'|'Bianca'|'Brian'|'Camila'|'Carla'|'Carmen'|'Celine'|'Chantal'|'Conchita'|'Cristiano'|'Dora'|'Emma'|'Enrique'|'Ewa'|'Filiz'|'Geraint'|'Giorgio'|'Gwyneth'|'Hans'|'Ines'|'Ivy'|'Jacek'|'Jan'|'Joanna'|'Joey'|'Justin'|'Karl'|'Kendra'|'Kimberly'|'Lea'|'Liv'|'Lotte'|'Lucia'|'Lupe'|'Mads'|'Maja'|'Marlene'|'Mathieu'|'Matthew'|'Maxim'|'Mia'|'Miguel'|'Mizuki'|'Naja'|'Nicole'|'Penelope'|'Raveena'|'Ricardo'|'Ruben'|'Russell'|'Salli'|'Seoyeon'|'Takumi'|'Tatyana'|'Vicki'|'Vitoria'|'Zeina'|'Zhiyu'
)
:type Engine: string
:param Engine: Specifies the engine (standard or neural ) for Amazon Polly to use when processing input text for speech synthesis. Using a voice that is not supported for the engine selected will result in an error.
:type LanguageCode: string
:param LanguageCode: Optional language code for the Speech Synthesis request. This is only necessary if using a bilingual voice, such as Aditi, which can be used for either Indian English (en-IN) or Hindi (hi-IN).\nIf a bilingual voice is used and no language code is specified, Amazon Polly will use the default language of the bilingual voice. The default language for any voice is the one returned by the DescribeVoices operation for the LanguageCode parameter. For example, if no language code is specified, Aditi will use Indian English rather than Hindi.\n
:type LexiconNames: list
:param LexiconNames: List of one or more pronunciation lexicon names you want the service to apply during synthesis. Lexicons are applied only if the language of the lexicon is the same as the language of the voice.\n\n(string) --\n\n
:type OutputFormat: string
:param OutputFormat: [REQUIRED]\nThe format in which the returned output will be encoded. For audio stream, this will be mp3, ogg_vorbis, or pcm. For speech marks, this will be json.\n
:type OutputS3BucketName: string
:param OutputS3BucketName: [REQUIRED]\nAmazon S3 bucket name to which the output file will be saved.\n
:type OutputS3KeyPrefix: string
:param OutputS3KeyPrefix: The Amazon S3 key prefix for the output speech file.
:type SampleRate: string
:param SampleRate: The audio frequency specified in Hz.\nThe valid values for mp3 and ogg_vorbis are '8000', '16000', '22050', and '24000'. The default value for standard voices is '22050'. The default value for neural voices is '24000'.\nValid values for pcm are '8000' and '16000' The default value is '16000'.\n
:type SnsTopicArn: string
:param SnsTopicArn: ARN for the SNS topic optionally used for providing status notification for a speech synthesis task.
:type SpeechMarkTypes: list
:param SpeechMarkTypes: The type of speech marks returned for the input text.\n\n(string) --\n\n
:type Text: string
:param Text: [REQUIRED]\nThe input text to synthesize. If you specify ssml as the TextType, follow the SSML format for the input text.\n
:type TextType: string
:param TextType: Specifies whether the input text is plain text or SSML. The default value is plain text.
:type VoiceId: string
:param VoiceId: [REQUIRED]\nVoice ID to use for the synthesis.\n
:rtype: dict
ReturnsResponse Syntax
{
'SynthesisTask': {
'Engine': 'standard'|'neural',
'TaskId': 'string',
'TaskStatus': 'scheduled'|'inProgress'|'completed'|'failed',
'TaskStatusReason': 'string',
'OutputUri': 'string',
'CreationTime': datetime(2015, 1, 1),
'RequestCharacters': 123,
'SnsTopicArn': 'string',
'LexiconNames': [
'string',
],
'OutputFormat': 'json'|'mp3'|'ogg_vorbis'|'pcm',
'SampleRate': 'string',
'SpeechMarkTypes': [
'sentence'|'ssml'|'viseme'|'word',
],
'TextType': 'ssml'|'text',
'VoiceId': 'Aditi'|'Amy'|'Astrid'|'Bianca'|'Brian'|'Camila'|'Carla'|'Carmen'|'Celine'|'Chantal'|'Conchita'|'Cristiano'|'Dora'|'Emma'|'Enrique'|'Ewa'|'Filiz'|'Geraint'|'Giorgio'|'Gwyneth'|'Hans'|'Ines'|'Ivy'|'Jacek'|'Jan'|'Joanna'|'Joey'|'Justin'|'Karl'|'Kendra'|'Kimberly'|'Lea'|'Liv'|'Lotte'|'Lucia'|'Lupe'|'Mads'|'Maja'|'Marlene'|'Mathieu'|'Matthew'|'Maxim'|'Mia'|'Miguel'|'Mizuki'|'Naja'|'Nicole'|'Penelope'|'Raveena'|'Ricardo'|'Ruben'|'Russell'|'Salli'|'Seoyeon'|'Takumi'|'Tatyana'|'Vicki'|'Vitoria'|'Zeina'|'Zhiyu',
'LanguageCode': 'arb'|'cmn-CN'|'cy-GB'|'da-DK'|'de-DE'|'en-AU'|'en-GB'|'en-GB-WLS'|'en-IN'|'en-US'|'es-ES'|'es-MX'|'es-US'|'fr-CA'|'fr-FR'|'is-IS'|'it-IT'|'ja-JP'|'hi-IN'|'ko-KR'|'nb-NO'|'nl-NL'|'pl-PL'|'pt-BR'|'pt-PT'|'ro-RO'|'ru-RU'|'sv-SE'|'tr-TR'
}
}
Response Structure
(dict) --
SynthesisTask (dict) --
SynthesisTask object that provides information and attributes about a newly submitted speech synthesis task.
Engine (string) --
Specifies the engine (standard or neural ) for Amazon Polly to use when processing input text for speech synthesis. Using a voice that is not supported for the engine selected will result in an error.
TaskId (string) --
The Amazon Polly generated identifier for a speech synthesis task.
TaskStatus (string) --
Current status of the individual speech synthesis task.
TaskStatusReason (string) --
Reason for the current status of a specific speech synthesis task, including errors if the task has failed.
OutputUri (string) --
Pathway for the output speech file.
CreationTime (datetime) --
Timestamp for the time the synthesis task was started.
RequestCharacters (integer) --
Number of billable characters synthesized.
SnsTopicArn (string) --
ARN for the SNS topic optionally used for providing status notification for a speech synthesis task.
LexiconNames (list) --
List of one or more pronunciation lexicon names you want the service to apply during synthesis. Lexicons are applied only if the language of the lexicon is the same as the language of the voice.
(string) --
OutputFormat (string) --
The format in which the returned output will be encoded. For audio stream, this will be mp3, ogg_vorbis, or pcm. For speech marks, this will be json.
SampleRate (string) --
The audio frequency specified in Hz.
The valid values for mp3 and ogg_vorbis are "8000", "16000", "22050", and "24000". The default value for standard voices is "22050". The default value for neural voices is "24000".
Valid values for pcm are "8000" and "16000" The default value is "16000".
SpeechMarkTypes (list) --
The type of speech marks returned for the input text.
(string) --
TextType (string) --
Specifies whether the input text is plain text or SSML. The default value is plain text.
VoiceId (string) --
Voice ID to use for the synthesis.
LanguageCode (string) --
Optional language code for a synthesis task. This is only necessary if using a bilingual voice, such as Aditi, which can be used for either Indian English (en-IN) or Hindi (hi-IN).
If a bilingual voice is used and no language code is specified, Amazon Polly will use the default language of the bilingual voice. The default language for any voice is the one returned by the DescribeVoices operation for the LanguageCode parameter. For example, if no language code is specified, Aditi will use Indian English rather than Hindi.
Exceptions
Polly.Client.exceptions.TextLengthExceededException
Polly.Client.exceptions.InvalidS3BucketException
Polly.Client.exceptions.InvalidS3KeyException
Polly.Client.exceptions.InvalidSampleRateException
Polly.Client.exceptions.InvalidSnsTopicArnException
Polly.Client.exceptions.InvalidSsmlException
Polly.Client.exceptions.EngineNotSupportedException
Polly.Client.exceptions.LexiconNotFoundException
Polly.Client.exceptions.ServiceFailureException
Polly.Client.exceptions.MarksNotSupportedForFormatException
Polly.Client.exceptions.SsmlMarksNotSupportedForTextTypeException
Polly.Client.exceptions.LanguageNotSupportedException
:return: {
'SynthesisTask': {
'Engine': 'standard'|'neural',
'TaskId': 'string',
'TaskStatus': 'scheduled'|'inProgress'|'completed'|'failed',
'TaskStatusReason': 'string',
'OutputUri': 'string',
'CreationTime': datetime(2015, 1, 1),
'RequestCharacters': 123,
'SnsTopicArn': 'string',
'LexiconNames': [
'string',
],
'OutputFormat': 'json'|'mp3'|'ogg_vorbis'|'pcm',
'SampleRate': 'string',
'SpeechMarkTypes': [
'sentence'|'ssml'|'viseme'|'word',
],
'TextType': 'ssml'|'text',
'VoiceId': 'Aditi'|'Amy'|'Astrid'|'Bianca'|'Brian'|'Camila'|'Carla'|'Carmen'|'Celine'|'Chantal'|'Conchita'|'Cristiano'|'Dora'|'Emma'|'Enrique'|'Ewa'|'Filiz'|'Geraint'|'Giorgio'|'Gwyneth'|'Hans'|'Ines'|'Ivy'|'Jacek'|'Jan'|'Joanna'|'Joey'|'Justin'|'Karl'|'Kendra'|'Kimberly'|'Lea'|'Liv'|'Lotte'|'Lucia'|'Lupe'|'Mads'|'Maja'|'Marlene'|'Mathieu'|'Matthew'|'Maxim'|'Mia'|'Miguel'|'Mizuki'|'Naja'|'Nicole'|'Penelope'|'Raveena'|'Ricardo'|'Ruben'|'Russell'|'Salli'|'Seoyeon'|'Takumi'|'Tatyana'|'Vicki'|'Vitoria'|'Zeina'|'Zhiyu',
'LanguageCode': 'arb'|'cmn-CN'|'cy-GB'|'da-DK'|'de-DE'|'en-AU'|'en-GB'|'en-GB-WLS'|'en-IN'|'en-US'|'es-ES'|'es-MX'|'es-US'|'fr-CA'|'fr-FR'|'is-IS'|'it-IT'|'ja-JP'|'hi-IN'|'ko-KR'|'nb-NO'|'nl-NL'|'pl-PL'|'pt-BR'|'pt-PT'|'ro-RO'|'ru-RU'|'sv-SE'|'tr-TR'
}
}
:returns:
(string) --
"""
pass
def synthesize_speech(Engine=None, LanguageCode=None, LexiconNames=None, OutputFormat=None, SampleRate=None, SpeechMarkTypes=None, Text=None, TextType=None, VoiceId=None):
"""
Synthesizes UTF-8 input, plain text or SSML, to a stream of bytes. SSML input must be valid, well-formed SSML. Some alphabets might not be available with all the voices (for example, Cyrillic might not be read at all by English voices) unless phoneme mapping is used. For more information, see How it Works .
See also: AWS API Documentation
Exceptions
Examples
Synthesizes plain text or SSML into a file of human-like speech.
Expected Output:
:example: response = client.synthesize_speech(
Engine='standard'|'neural',
LanguageCode='arb'|'cmn-CN'|'cy-GB'|'da-DK'|'de-DE'|'en-AU'|'en-GB'|'en-GB-WLS'|'en-IN'|'en-US'|'es-ES'|'es-MX'|'es-US'|'fr-CA'|'fr-FR'|'is-IS'|'it-IT'|'ja-JP'|'hi-IN'|'ko-KR'|'nb-NO'|'nl-NL'|'pl-PL'|'pt-BR'|'pt-PT'|'ro-RO'|'ru-RU'|'sv-SE'|'tr-TR',
LexiconNames=[
'string',
],
OutputFormat='json'|'mp3'|'ogg_vorbis'|'pcm',
SampleRate='string',
SpeechMarkTypes=[
'sentence'|'ssml'|'viseme'|'word',
],
Text='string',
TextType='ssml'|'text',
VoiceId='Aditi'|'Amy'|'Astrid'|'Bianca'|'Brian'|'Camila'|'Carla'|'Carmen'|'Celine'|'Chantal'|'Conchita'|'Cristiano'|'Dora'|'Emma'|'Enrique'|'Ewa'|'Filiz'|'Geraint'|'Giorgio'|'Gwyneth'|'Hans'|'Ines'|'Ivy'|'Jacek'|'Jan'|'Joanna'|'Joey'|'Justin'|'Karl'|'Kendra'|'Kimberly'|'Lea'|'Liv'|'Lotte'|'Lucia'|'Lupe'|'Mads'|'Maja'|'Marlene'|'Mathieu'|'Matthew'|'Maxim'|'Mia'|'Miguel'|'Mizuki'|'Naja'|'Nicole'|'Penelope'|'Raveena'|'Ricardo'|'Ruben'|'Russell'|'Salli'|'Seoyeon'|'Takumi'|'Tatyana'|'Vicki'|'Vitoria'|'Zeina'|'Zhiyu'
)
:type Engine: string
:param Engine: Specifies the engine (standard or neural ) for Amazon Polly to use when processing input text for speech synthesis. Using a voice that is not supported for the engine selected will result in an error.
:type LanguageCode: string
:param LanguageCode: Optional language code for the Synthesize Speech request. This is only necessary if using a bilingual voice, such as Aditi, which can be used for either Indian English (en-IN) or Hindi (hi-IN).\nIf a bilingual voice is used and no language code is specified, Amazon Polly will use the default language of the bilingual voice. The default language for any voice is the one returned by the DescribeVoices operation for the LanguageCode parameter. For example, if no language code is specified, Aditi will use Indian English rather than Hindi.\n
:type LexiconNames: list
:param LexiconNames: List of one or more pronunciation lexicon names you want the service to apply during synthesis. Lexicons are applied only if the language of the lexicon is the same as the language of the voice. For information about storing lexicons, see PutLexicon .\n\n(string) --\n\n
:type OutputFormat: string
:param OutputFormat: [REQUIRED]\nThe format in which the returned output will be encoded. For audio stream, this will be mp3, ogg_vorbis, or pcm. For speech marks, this will be json.\nWhen pcm is used, the content returned is audio/pcm in a signed 16-bit, 1 channel (mono), little-endian format.\n
:type SampleRate: string
:param SampleRate: The audio frequency specified in Hz.\nThe valid values for mp3 and ogg_vorbis are '8000', '16000', '22050', and '24000'. The default value for standard voices is '22050'. The default value for neural voices is '24000'.\nValid values for pcm are '8000' and '16000' The default value is '16000'.\n
:type SpeechMarkTypes: list
:param SpeechMarkTypes: The type of speech marks returned for the input text.\n\n(string) --\n\n
:type Text: string
:param Text: [REQUIRED]\nInput text to synthesize. If you specify ssml as the TextType , follow the SSML format for the input text.\n
:type TextType: string
:param TextType: Specifies whether the input text is plain text or SSML. The default value is plain text. For more information, see Using SSML .
:type VoiceId: string
:param VoiceId: [REQUIRED]\nVoice ID to use for the synthesis. You can get a list of available voice IDs by calling the DescribeVoices operation.\n
:rtype: dict
ReturnsResponse Syntax
{
'AudioStream': StreamingBody(),
'ContentType': 'string',
'RequestCharacters': 123
}
Response Structure
(dict) --
AudioStream (StreamingBody) --
Stream containing the synthesized speech.
ContentType (string) --
Specifies the type audio stream. This should reflect the OutputFormat parameter in your request.
If you request mp3 as the OutputFormat , the ContentType returned is audio/mpeg.
If you request ogg_vorbis as the OutputFormat , the ContentType returned is audio/ogg.
If you request pcm as the OutputFormat , the ContentType returned is audio/pcm in a signed 16-bit, 1 channel (mono), little-endian format.
If you request json as the OutputFormat , the ContentType returned is audio/json.
RequestCharacters (integer) --
Number of characters synthesized.
Exceptions
Polly.Client.exceptions.TextLengthExceededException
Polly.Client.exceptions.InvalidSampleRateException
Polly.Client.exceptions.InvalidSsmlException
Polly.Client.exceptions.LexiconNotFoundException
Polly.Client.exceptions.ServiceFailureException
Polly.Client.exceptions.MarksNotSupportedForFormatException
Polly.Client.exceptions.SsmlMarksNotSupportedForTextTypeException
Polly.Client.exceptions.LanguageNotSupportedException
Polly.Client.exceptions.EngineNotSupportedException
Examples
Synthesizes plain text or SSML into a file of human-like speech.
response = client.synthesize_speech(
LexiconNames=[
'example',
],
OutputFormat='mp3',
SampleRate='8000',
Text='All Gaul is divided into three parts',
TextType='text',
VoiceId='Joanna',
)
print(response)
Expected Output:
{
'AudioStream': 'TEXT',
'ContentType': 'audio/mpeg',
'RequestCharacters': 37,
'ResponseMetadata': {
'...': '...',
},
}
:return: {
'AudioStream': StreamingBody(),
'ContentType': 'string',
'RequestCharacters': 123
}
:returns:
If you request mp3 as the OutputFormat , the ContentType returned is audio/mpeg.
If you request ogg_vorbis as the OutputFormat , the ContentType returned is audio/ogg.
If you request pcm as the OutputFormat , the ContentType returned is audio/pcm in a signed 16-bit, 1 channel (mono), little-endian format.
If you request json as the OutputFormat , the ContentType returned is audio/json.
"""
pass
|
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
def _maybe(repo_rule, name, **kwargs):
if name not in native.existing_rules():
repo_rule(name = name, **kwargs)
def load_bark():
_maybe(
native.local_repository,
name = "bark_project",
path="/home/chenyang/bark",
)
#_maybe(
#git_repository,
#name = "bark_project",
#branch= "master",
#remote = "https://github.com/ChenyangTang/bark",
#)
|
#then look for the enumerated Intel Movidius NCS Device();quite program if none found.
devices = mvnc.EnumerateDevice();
if len(devices) == 0:
print("No any Devices found");
quit;
#Now get a handle to the first enumerated device and open it.
device = mvnc.Device(devices[0]);
device.OpenDevice();
|
def mirror(text):
words = text.split(" ")
nwords = []
for w in words:
nw = w[::-1]
nwords.append(nw)
return " ".join(nwords)
print(mirror("s'tI suoregnad ot eb thgir nehw eht tnemnrevog si .gnorw"))
|
UNK_TOKEN = '<unk>'
PAD_TOKEN = '<pad>'
BOS_TOKEN = '<s>'
EOS_TOKEN = '<\s>'
PAD, UNK, BOS, EOS = [0, 1, 2, 3]
LANGUAGE_TOKENS = {lang: '<%s>' % lang
for lang in sorted(['en', 'de', 'fr', 'he'])}
|
"""This is a test script."""
def hello_world():
"""Function that prints Hello World."""
print("Hello World")
if __name__ == "__main__":
hello_world()
|
# https://www.codewars.com/kata/550f22f4d758534c1100025a/train/python
# Once upon a time, on a way through the old wild mountainous west,…
# … a man was given directions to go from one point to another. The
# directions were "NORTH", "SOUTH", "WEST", "EAST". Clearly "NORTH"
# and "SOUTH" are opposite, "WEST" and "EAST" too.
# Going to one direction and coming back the opposite direction right
# away is a needless effort. Since this is the wild west, with
# dreadfull weather and not much water, it's important to save
# yourself some energy, otherwise you might die of thirst!
# How I crossed a mountain desert the smart way.
# The directions given to the man are, for example, the following
# (depending on the language):
# ["NORTH", "SOUTH", "SOUTH", "EAST", "WEST", "NORTH", "WEST"].
# or
# { "NORTH", "SOUTH", "SOUTH", "EAST", "WEST", "NORTH", "WEST" };
# or
# [North, South, South, East, West, North, West]
# You can immediatly see that going "NORTH" and immediately "SOUTH"
# is not reasonable, better stay to the same place! So the task is
# to give to the man a simplified version of the plan. A better
# plan in this case is simply:
# ["WEST"]
# or
# { "WEST" }
# or
# [West]
# Other examples:
# In ["NORTH", "SOUTH", "EAST", "WEST"], the direction "NORTH" +
# "SOUTH" is going north and coming back right away.
# The path becomes ["EAST", "WEST"], now "EAST" and "WEST"
# annihilate each other, therefore, the final result is []
# (nil in Clojure).
# In ["NORTH", "EAST", "WEST", "SOUTH", "WEST", "WEST"],
# "NORTH" and "SOUTH" are not directly opposite but they
# become directly opposite after the reduction of "EAST"
# and "WEST" so the whole path is reducible to ["WEST",
# "WEST"].
# Task
# Write a function dirReduc which will take an array of
# strings and returns an array of strings with the
# needless directions removed (W<->E or S<->N side
# by side).
# The Haskell version takes a list of directions with
# data Direction = North | East | West | South.
# The Clojure version returns nil when the path is
# reduced to nothing.
# The Rust version takes a slice of enum Direction
# {NORTH, SOUTH, EAST, WEST}.
# See more examples in "Sample Tests:"
# Notes
# Not all paths can be made simpler. The path
# ["NORTH", "WEST", "SOUTH", "EAST"] is not reducible.
# "NORTH" and "WEST", "WEST" and "SOUTH", "SOUTH"
# and "EAST" are not directly opposite of each
# other and can't become such. Hence the result
# path is itself : ["NORTH", "WEST", "SOUTH",
# "EAST"].
# if you want to translate, please ask before
# translating.
def dir_reduc(arr):
stack = []
for dir in arr:
if len(stack) > 0:
if (
(dir == "NORTH" and stack[-1] == "SOUTH")
or (dir == "SOUTH" and stack[-1] == "NORTH")
or (dir == "EAST" and stack[-1] == "WEST")
or (dir == "WEST" and stack[-1] == "EAST")
):
del stack[-1]
else:
stack.append(dir)
else:
stack.append(dir)
return stack
# Alternative
# opposite = {'NORTH': 'SOUTH', 'EAST': 'WEST', 'SOUTH': 'NORTH',
# 'WEST': 'EAST'}
# def dirReduc(plan):
# new_plan = []
# for d in plan:
# if new_plan and new_plan[-1] == opposite[d]:
# new_plan.pop()
# else:
# new_plan.append(d)
# return new_plan
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
def list_sort(func):
def inner(s):
return sorted(func(s))
return inner
@list_sort
def get_list(s):
return [int(i) for i in s.split()]
if __name__ == '__main__':
print(get_list(input('Введите числа через пробел: ')))
|
def get_prime_numbers(max_number):
# Crear una lista que contiene el estado (tachado/no tachado)
# de cada número desde 2 hasta max_number.
numbers = [True, True] + [True] * (max_number-1)
# Se comienza por el 2. Esta variable siempre tiene un
# número primo.
last_prime_number = 2
# Esta variable contiene el número actual en la lista,
# que siempre es un múltiplo de last_prime_number.
i = last_prime_number
# Proceder siempre que el cuadrado de last_prime_number (es decir,
# el último número primo obtenido) sea menor o igual que max_number.
while last_prime_number**2 <= max_number:
# Tachar todos los múltiplos del último número primo obtenido.
i += last_prime_number
while i <= max_number:
numbers[i] = False
i += last_prime_number
# Obtener el número inmediatamente siguiente al último
# número primo obtenido (last_prime_number) que no esté tachado.
j = last_prime_number + 1
while j < max_number:
if numbers[j]:
last_prime_number = j
break
j += 1
i = last_prime_number
# Retornar todas los números de la lista que no están tachados.
return [i + 2 for i, not_crossed in enumerate(numbers[2:]) if not_crossed]
print(get_prime_numbers(5)) |
class Argument(object):
def __init__(self, name=None, names=(), kind=bool, default=None):
if name and names:
msg = "Cannot give both 'name' and 'names' arguments! Pick one."
raise TypeError(msg)
if not (name or names):
raise TypeError("An Argument must have at least one name.")
self.names = names if names else (name,)
self.kind = kind
self.raw_value = self._value = None
self.default = default
def __str__(self):
return "Arg: %r (%s)" % (self.names, self.kind)
@property
def takes_value(self):
return self.kind is not bool
@property
def value(self):
return self._value if self._value is not None else self.default
@value.setter
def value(self, arg):
self.raw_value = arg
self._value = self.kind(arg)
|
# The Pipelines section. Pipelines group related components together by piping the output
# of one component into the input of another. This is a good place to write your
# package / unit tests as well as initialize your flags. Package tests check that the input
# received by a component is of the right specification and unit tests check that expected
# inputs into a component produce the expected output.
flags.set('pf_settings', [100, "PF"])
flags.set('inv_pf_settings', [100, "InvPF"])
flags.set('rr_fifo_settings', [100, "RR_FIFO"])
flags.set('rr_random_settings', [100, "RR_random"])
flags.set('hybrid_inv_pf_fifo_settings', [100, "Hybrid_InvPF_FIFO"])
flags.set('broadcast_settings', [1000, 5])
flags.set('run_interval', [1800])
CreateProportionalFairNetwork = Pipeline([
("create_network", "fetch_flag_inline('pf_settings')"),
])
CreateInverseProportionalFairNetwork = Pipeline([
("create_network", "fetch_flag_inline('inv_pf_settings')"),
])
CreateRoundRobinFIFONetwork = Pipeline([
("create_network", "fetch_flag_inline('rr_fifo_settings')"),
])
CreateRoundRobinRandomNetwork = Pipeline([
("create_network", "fetch_flag_inline('rr_random_settings')"),
])
CreateInvPFCrossFIFONetwork = Pipeline([
("create_network", "fetch_flag_inline('hybrid_inv_pf_fifo_settings')"),
])
GenerateNetworkTraffic = Pipeline([
("broadcast", "fetch_flag_inline('broadcast_settings')"),
])
RunNetworkSimulation = Pipeline([
("network_simulator", "fetch_flag_inline('run_interval')"),
])
Terminate = Pipeline([
("Pass", [])
])
|
#List comprehension
"""
The concept of list comprehension takes in three parameters
- expression
- iteration
- codition (This usually optional)
syntax of list comprehension
list_of_numbers_from_1_to_10 = [expression iteration condition]
"""
#example of list comprehension with without condition
list_of_numbers_from_1_to_10 = [number for number in range(11)]
#example of list comprehension with without condition
list1 = [x/2 for x in range(110)]
print(list1)
#example of list comprehension with with condition
names = ["Rogers", "Edgar", "Joan", "Bam", "Hellen", "Angei"]
list_of_names_with_letter_o = [name for name in names if 'o' in name]
print(list_of_names_with_letter_o)
list_of_names_with_letter_e = [name for name in names if 'e' in name.lower()]
print(list_of_names_with_letter_e)
list_names_starting_with_letter_h = [name for name in names if name.lower().startswith('h')]
print(list_names_starting_with_letter_h)
|
""" append and extend"""
first_line = [1,2,3,4,5]
first_line.append(6)
print(first_line)
# first_line.append(7,8,9) #doesn't work
first_line.append([7,8,9])
print(first_line)
correct_list = [1,2,3,4,5]
correct_list.extend([6,7,8,9,10])
print(correct_list)
""" insert """
print()
correct_list.insert(0, 100)
print(correct_list)
""" remove """
print()
first_line.remove(5)
print(first_line)
""" clear """
print()
first_line.clear()
print(first_line)
""" pop """
print()
print(correct_list)
correct_list.pop()
print(correct_list)
correct_list.pop(2)
print(correct_list)
""" index """
print()
lst = ["A", "B", "C", "D", "E", "B", "F"]
print(lst)
print(lst.index("E"))
print(lst.index("B"))
print(lst.index("B", 3, 7))
""" count """
print()
print(lst.count("A"))
print(lst.count("B"))
""" reverse """
print()
correct_list = [1,2,3,4,5]
print(correct_list)
correct_list.reverse()
print(correct_list)
""" sort """
print()
another_list = [23, 11, 456, 2, 1, 52]
print(another_list)
another_list.sort()
print(another_list)
""" join """
lst = ["Coding", "is", "fun"]
st = " ".join(lst)
print(st)
""" slicing """
lst = ["A", "B", "C", "D", "E"]
print(lst)
print(lst[1::2])
print(lst[::2])
print(lst[-2:])
print(lst[::-1])
print(lst[1::-1])
""" swapping values """
names = ["Harry", "Potter"]
names[0], names[1] = names[1], names[0]
print(names) |
pjesma = '''\
Programiranje je zabava
Kada je posao gotov
ako zelis da ti posao bude razbirbriga
korisit Python!
'''
f = open("pjesma.txt", "w") #otvara fajl sa w-pisanje
f.write(pjesma) #upisuje tekst u fajl
f.close() #zatvara fajl
f = open("pjesma.txt") #ako nismo naveli podazumjevno je r-citanje fajla
while True:
linija = f.readline()
if len(linija) == 0: #duzina od 0 znaci da smo dostigli EOF- kraj fajla
break
print(linija, end = " ")
f.close() #zavra fajl
|
# from __future__ import division
class WarheadTypeEnum(object):
UNDEFINED = 0
CONE_WARHEAD = 1
ARC_WARHEAD = 2
CARMEN_WARHEAD = 3
class SternTypeEnum(object):
UNDEFINED = 0
CONE_STERN = 1
ARC_STERN = 2
class BaseMissile(object):
def __init__(self):
self.para_dict = {'type_warhead': WarheadTypeEnum.UNDEFINED, # missile body
'length_warhead': float(0),
'diameter_warhead': float(0),
'diameter_column': float(0),
'length_column': float(0),
'type_stern': SternTypeEnum.UNDEFINED,
'length_stern': float(0),
'diameter_tail': float(0),
'diameter_nozzle': float(0),
'num_group_wings': 0, # missile wing
'pos_wings': float(0),
'num_wings_per_group': float(0),
'layout_angle_wings': float(0),
'root_chord': float(0),
'tip_chord': float(0),
'distance_root_chord': float(0),
'distance_tip_chord': float(0),
'thickness_root_chord': float(0),
'thickness_tip_chord': float(0),
'angle_front_edge': float(0),
'angle_rear_edge': float(0),
'height_flight': float(0), # condition of flight
'mach_flight': float(0),
'angle_flight': float(0),
'barycenter_ref': float(0), # reference
'length_ref': float(0),
'area_ref': float(0)}
# fsw
if __name__ == "__main__":
pass
|
CREATED_AT_KEY = "created_at"
TITLE_KEY = "title"
DESCRIPTION_KEY = "description"
PUBLISHED_AT = "published_at"
LIMIT_KEY = "limit"
OFFSET_KEY = "offset"
# Data query limiters
DEFAULT_OFFSET = 0
DEFAULT_PER_PAGE_LIMIT = 20
INDEX_KEY = "video_index"
VIDEO_ID_KEY = "video_id"
VIDEOS_SEARCH_QUERY_KEY = "video_search_query"
|
"""
contest 12/21/2019
"""
class Solution:
def maxFreq(self, s: str, maxLetters: int, minSize: int, maxSize: int) -> int:
freq = {}
count_dict = {}
def count_letters(word):
if word in count_dict:
return count_dict[word]
unq = {}
for item in word:
if item in unq:
continue
else:
unq[item] = True
if len(unq) > maxLetters:
count_dict[word] = False
return False
count_dict[word] = True
return True
size = minSize
for i in range(0, len(s)-size+1):
current = s[i:i+size]
if count_letters(current):
if current in freq:
freq[current] += 1
else:
freq[current] = 1
if len(freq) == 0:
return 0
return freq[max(freq, key=freq.get)]
"ffhrimojtdwnwrwsmwxxprahdofmwzzcziskfyxvlteunhyjvmexcbxlrxtcsozrxyaxppdztpzqfcnpiwzhcvyyvpnlwwkhjlctlsbboosvyabdglhzvwdtazcyrumynkhqywrmyljhkxbpnwmfkxnqpchyjckwwpiqjljynsidcccffguyqmvnubgznsjzgkublxwvdjequsguchpzcfncervajafyhyjvoqetaxkybvqgbglmcoxxapmymxmmsqpddpctymxkkztnpiqcgrsybfrqzepnteiuzkvfnnfwsjwrshjclvkvjiwfqbvprbknvxotekxskjofozxiisnomismymubikpagnvgrchynsyjmwadhqzbfssktjmdkbztodwidpwbihfguxzgrjsuksfjuxfqvmqqojoyjznvoktfbwykwhaxorlduchkefnbpgknyoodaizarigbozvsikhxhokfpedydzxlcbasrxnenxrqxgkyfncgnhmbtxnigznqaawmslxehbshmelgfxaayttbsbhvrpsehituihvleityqckpfpmcjffhhgxdprsylnjvrezjdwjrzgqbdwdctfnvibhgcpmudxnoedfgejnbctrcxcvresawrgpvmgptwnwudqfdpqiokqbujzkalfwddfpeptqhewwrlrwdabafodecuxtoxgcsbezhkoceyydjkniryftqdoveipatvfrfkhdztibywbajknxvkrcvfhgbnjxnoefgdwbekrvaalzuwypkhwhmxtnmoggsogczhemzysagznnmjiiwwyekibytqhgwfzmsqlntvakyhaaxiqvlxbhgknvdxjwecccsquqqqmrxsysfyidsbtqytgonmzyjcydyqqqmixrbrllbcbbnwvriqcrznobzippyssjypvjemvadgdcriydntlpyrmxejtdyzhzdljhbyifxewdyokbhcgkhpdjoeqexfxqwvxys"
18
2
22
"fehjblcdljlmckggcigkedfjcejklicihegfhkfbgegjiikcjgfacicaiheibcicmbilbkhhejfdifdehbjelcalcjellkaimhelkjhafcmjhikbgihjlmjclibceecelkaccklbdaifgdflidhidagiahlbjcfbijgeldjgedldbdchkblbdmcdjbjhccikelcmjjbfkhlfekdhbcakgbclgeijbdhmcmemebkgjeeeickifjglmjfjcmjidjgjmijceiikhmmaagebhifhkfhemfeigdlijffcjgmdehjgllkaallheikhghceekhcckfegghdcalalhkhlgikaamladheakecccgafkimibhiafkkkdbflklbhdagdefdgjfihbiakmjbdlhmlhalekjhmjagjahbjflkjiljjbgfhmekifjdejijehfgfjajbbabcgdbhmjmjabfackghfjflcejdcbdfdamcagjbgicbilhdmfclmaemdgkfdgegicikmifbkcckfkkblldhidlmfgckiiceghfcedjbaggmfkkfiacaffkfmliligeadeghklcbhdkgdcgkijklhkbgjicmfiffaaebimmeicaajfikmfbfkemmadgdaiiicjfcfeffmmhhejfgilkalglmfbgckgcdmcbhimfkmhmcccibjcalhfbgmhkckjfmdaamaffheimfihmaifalbamkfeibghkghfbmkghdimmjcmbdbafdfakaideemalgijieifiaakdfbcjggmelclmijhjgjigfhcabgmimcmkbdidhdagbbjeablcdleleijagkaijlgfgiehimklcaidcdeaekeddijlhaijlfclfcflblklgadbdabickelhdlkhefilhcecejkfacfbhcabcjjjhllhelljdmkjgihfebdhbiljijlhclmhgejaecihjfigbdmleebhcaehcgadidbfjjhkkcgddlieidgabhhcghaeehbhghhacgckmkhklchaeeieghjibkmebcifllamgflhikhfkhhmaeekecbcgfblbikgehhdjmedggfdghaafmeghiiiaahgilfibddilfbkdgbjiecibbdekhjbkdhigigffcgmbikhdmbgelgkfidfjkddhfifkdgmihkbdlhlmlkhkbjlhdhgaafkcebcjjaagmkecechalmbheieibihefcllgliamigjgbjcjkgdjeimffhehcjciabgjhgkgmcmemfchiemfldfjimmbeiiiaedkhlkeeijecedclbkhkkekjecfjlilidfigammdgjkgahibdbbkbgjgbabebjcglgfaldgiglilhgfbicchideehgffhfcheamklkkdgfmakhdgmdclejcfgfdlmmbgjamlgchaabelcllalccckajmmkfghaefbebaibdkeegicgmfdgbilhllkfhcgfdeddkfciiibgjhikhaagdkkdmjllalfifjcijhljfebiaflhjdkhmaeejgjkkaelgglefccejidmgkddekjjffcbfjmbmkihmemaibadaihhchdfgiejglmkclcfjgajlgbeillgfbhkgldmfekjbdegjmiddaeaebiaedkdbmciceggbalffddijfccadhhkfgebakkfcmdegdkdbglaeblabjahcjillgmihifbgmiejbefjjecgfkjibejeemcibmcmiifmaiggljgikhiebgijfjafchcjbdmiffjigkmcfhejjagmddjmeckcdhbbdgdcmgfhlcaggjlijjhghihlammgkdekgbkfellfdkcfkigjjecffmgeikafadbfdaadiembbmiadbkbljmkfedllghlhemeaimbamlfcehegbgccfbcjblahdlaakeafmlkjljlkiaglmeideifgdbadjehhmmkfhdkldebegbbiiblkmidlmeejlaemkhfajmidlfcjgiejmmihllbigelbekkfagdcjdbmifdmmchcllmihjlmhblkfcbcjiiaejhgldjmieejhjiadfkfmgamcdlcljbfclkaflhjbeajdkdkjecifikmleblijjedcaccikggjcgidmfjegkbhcacalmbcdgbfjkjajclgdbfcdkemajlajeklieibjhcdheglagfeeagjbacmjdhadgelhemeefikmejlkdcghahfdkhaacghieffcgfgllmdgbkhejkjdcdddhdfdcdidejaekjeclccmedjjmaellmcgfiacbhdfmcdcielcalchbgagelhjjmmkljfagkfjijmddafglimkekjagmhgfiidjefjfmaihhbhhhaafhiekmdkgidjmljfgmgcijbbjmbjiikailalbffjhedbfbbhcbbbicblagibbdamalkiblhblhacdckllbliccmjgedkjbeihhglhbcfaefaimlbjfhmjadlmgdikjjkkghidlfblkdgdbagkldghadhmmckfhkddedlgdfdifghagkdjiklmfbdajfemjcjlamfflgiekmabhcigclbdfefkfmdaffeccgcdflacahhademhjlchabeabbfjfeefhmmbaajmmlmgfhbclkfaihkehjljjhdbkkieikajbbgmfiilkehcliacgggmidlkgjmcjkhjklddijjmjdkejajgllcechmmbfbibdddfgakfmgebkfcbbkjehemckcaefimgfiamhddahklgdhcdgicdmmdfgemlhdcaglcdkeehjkccgcllcldbkggjihdafcfkhkifmkadgkmbgkbgkmilldfhjebdjdfkhmfdhldjmkbcebbbaiemgkihggeebkaibkhajkamfhcbcckgkjbfamlbghhdcehigmehmafalbjedgdgddgjfkfmmeicjlcaajemkjiligbfcbliagicggjclclgidkibkddfgfkclfgdblfebfkcjelghejlejckbgiibedgaebaffcleemmcdgfgjlhdagdmgagiambakabajcjmlifiikckjjfbmafiahmlbhcfegdaekjcgjdbhefkcfdcgkkmlibchbfjbalkbkmgjfbgjlbiffeeabbmgjgbillamjeefklbbibkddcifdakjdlekbkcemkmgdhabdeiccijlicgaecbefmcjeemccegaldfaeafdedbakmiaakjlcbddkkidmkdkdifdgaeflhbkbadgebhhhlaeeajfheamkfkakgmamhaialdmbllbddfidaibffmihfehddlhbemlgdkkikfhkigfkbfjijfiahkfhihkgmblfgidflleameaicgkmimdejkkddddfagfjceffmmkmcffkdfmfjbgjdkbgbelkgjcfhiijlijfeiimcblamiecbmaifejeklfeggfkeiamalhjgklhaellimjelhbgjcghjbfkdjhlmhgkafkkdkkfldbafljgchilbleabgiejfgjhhgcejjjbhkmblkiljbeafhlbdecimdejflhkbkccbkmljldjaihddjmajefjkkdmjkhghdhkhbhmkhjkldlfjjdhdklkheajceelahchhicmkjhekdejdefabaceemjbhimlfjihdmcbhlgihkhgdaibgfbfebadiadkmbjmhgifhefejjgkihkfcbdkjcecjmcifjidfegblklbbabjcfbighkaemgklbidlckebdlgmklifibghalbglmaihkggjcjljgibahghealfhhfiglljdhbffleccdjechchicddkfgimahhmgbjhdlheadfmahelbkhkkgmchljaaekcjhclhghdkebfkcadfajbihemfmjibaidhabdmblakajkddbajemkhebkdkafchalahkijkblmmfakkmdeikhbfhmekakhkmfgjkgljggacmamklbmkdkldmgggajmkaaeimjbffigdjffemcjdfklgbmclkjfhljhfldjkbdfihcjhiaeccafjajldibdlmbkigidecbecbgmlbfcljhieejegclgdeclcfblglgkbmfkhecjgkkkkleeledlmigcijbblhbkeeeifggbkihglgekbjedficgafflgdmhbgajjdajcjalggbciefmbimgabjcbehacagejjbcldalbgfgmiflicdcbabhkmddemieaheldmihcagiledmafagiajgffflfihfghhkkdhlijdgiimbdefehhdkeakddmhedcamjbimigmfajjemlgfdaalelecbifmkjccaefemaijddlmbkmlldhfbklljdedhahajhjmcmaglmbhjagjiifhkdbiehggajddkjchkbeddkahljjgefeffcbdlhkemmecdmbimdmamljhcicfiaambjehjmkjhfajadkeacgcadmcmfkbghbljbfiadkmaacabflejigcialheaibehjblkieaalbclbmhlfekgmggdakhicfaicceggahmidhemaibaiaabfhdjjifbdbkceicgdikhljdhimamghcgjljacdikilhcahedamkgfafhffmlifdeclkekmchmlbigjhijlmfejjjhcdfmjaggfllkdijhadlgfhiiikefglibjclhgedfdmeifeegeelmliefjfjldkdihciclagljcgajdmeijljfdhjkkajfckgaddeaakmjhhahkijhjhfjijamdeakeabfhfifdfkcejjfdgcjjlehkbmmbabiblgjkdhglgjgecfhicildemlaakikfbcdflejfgclmlclbldgldddclhjgdelfjdegbhglmhakdagmgkecdkeihdijijlkckjbammeiafkhmfjieflkcbhiggdjdeaiccaaaaildkmcffkhajefjakgjcglibjcejabfhlddimighmlcggbebbdlhbbjhikagificilmlcbidehkdfeimialijcbfmlgejldbleljgclfhiamhhgcgfjgcjgkmahkchbagfkkakcklefiimhekhckagcmcjadblhljjljdklcgidggmebmfifbfjcgcbhcgehkdikefecmhajjheaecjdiblhhcfcgfgdkjcfgjmhegahfeamclcmjemidkmkjfaecekchmkigdejeeiihlekgiggkcgmblaiblalacddicmehmjhlhmkfleaamamgbdaghdilgcjmfaklbcbldcmikakbmailkkjjlgjiaddfcbcfciladbeedhglebmefjgjfdhebjikbeldkmjldaekgjglbkiagkmlagblideedeehembjdliladifemkgchmlchlbjiaglmbikleclgeefhjlimalibckjgfjfgffhikllghbldhelgjmiifilgkkbdclkggijikbkieldgmggbjcgcfbjaedgclfahajlahllflihbkmakehbgdjbchdajigbdgiefaaadjkkjbjbekdfhaidjfgjgjablkggbagbbhmlkikdhblmfifldbmefjbljgkmdgbbcellefjgmbeladfjbibbjedccaebjakkadcmclihbgcfmjbdldmfcjifcaadibkfkdighjfhgjjaeifdebdkbjhbkibjimmmembkliildfbchbfablcmmjeigemdlkgbgbcfgibekbihkhklhkhkdacjlibkkdlbebbbdkkfdmlbijhammeeeejlfbheicdbcbgeeccfbabjlhadbhbhkmfgfichadjjiakjgagjadkkbggcjkbdciddjmflgedcihmgalkbehccmcagmmifcckcadgclbehhddbcaaiglachgdmhlammfhifahggigbkjblhlbedjldcjkfkglfjkjidciemkjkhkflfldkbhkjgcigdfdlblfkigalkijgmdmiabdiakbcfdldcmkkffihmemakiakfggadcjccckflemckgldjhiblgkhakfccbabfbjidhfmlbkjbedkfmhjjijijfbemffccmccckmhhaadcamfhmikmabkcmklbcikhkhfmdghhihllmekhefbdhgbdhldakljemeggdgabieebcklgkjmcgddhgfmkdbcafgkmhdjfkgdcfalkaadllcmglbkefkllhjghhdfdejbmfkcagaicfmigbdgaldjebejbhmggbkacickeidiimecglbdeeaceedgabballkmjjbjlkjgcjhiibbiflkggcgdemhimegghdjmlcbmhgmhblegehmecflcmmljakfidkmlbhjjdkhmccadkckalkgdiijmbgmceiejkmkabdbmikmlgabheidhbmdkdalhgfigafmccdhkggmbjabkdflckkflacecklaccmlailedldkkbddcjhbhldkimedlhblckbagdbcekmgicjaeemmjiljbiglfggfmgjmabcialkffdamjgfbgmjdfjgafjehdfcgideedgigalffjgcgdkbkfiijiaiglggdbmbflickgamjgghdllfjmhajmgleebdghejihmimlclfidcalfijmlbmejhijfgfjjhechfachlfekgacfmimhbalgcecaijajamchbfaghlljmaihfdajflhmhbgkmjdckdldfgmmcjijebafblikkklbheejfgfhfhmejgfmcakjdfdleejlmaahafgfikhjmlbjbbekbjlkkjflkagmhkfgabcildgfbdckelakmbckeigdddicbkacbfgdejjmegkcflhcajjmhlhkbccfgebhamhgfaggcdjgejcdfcjkcdmbijabjgfbfkgdbagmdflfhfjgaeimajljaamadglkmahjmfbbjhhkmdclcichackjdhmdmegfjdhghmhmkefhklgbjcdbmlblmjmkhcdbdmhhfkhicdlmidbgfcdiakgdmmlldfkafjeaegiifcbkgcbaghbcbcfdmkkalcibdahekgdhkflimafkdekmmdahmhedmakdahjidabhggegfcihkjieeffhefbfjfhemjfbmjfkjidgddimajdimjlljfjahiehafeijhmhilkekdcdiekimaicdfalkgemdjdijfdldajmhgdcmgkcdmmbaiceabkdmejfgdfdcgihibmahmkhmelihggeklgamcecifigekhimdbgkhddlhaeimmgleiikjcjkijfkblgemmefecdahbeckgjjfklmlekkgjlccjfgblkkibljfegbdifcjgdmecglilcmibbdcbficdbheclcejcbagfhgmihamehmligjbmaccimbmejdcabmacfabkkfkacffhhbdechlbgeifjmbkbhdikhahkebafjjkjcejcaciagahjghhjhkeefhjjcfmmahfdkhchhklegjlbbbcdlfcclflgfiibljmbbjhkdjdleegekccaejbhejikkchmmfjejjljiggieabmefajhkgkledgkkejibmbahhehmfdakcfbhemdmemjbgjfgbfgdlflbhkmfackkceeigejdaggfidmfcdaccmmhlmifdddgagmfmejhfbaicccdeijbhefabejkghlmckfdbkjddgdakldccfdgjdghcdhdhjdlkgccehhlbjbkkmeceihgcmiklblkabfmmilicjilgehfhbdihmikgckieggbbbbmmcakkadfbbcffeaijfjmalmlfbdbjdckkfmbefihjiefhfgldmgahmlbgkcdeachjfjccjlcicfleblfdekilcfkgjefflhjckakgkfkdeikhjflddgebmhiiidcdhifhefcdableckklcmiekdgmlcdhjfljlcdbcafekbecaeemgjfcdjhfgeimddmaafihgffmfjmledefikjhefakdiabbkfjkfahhljklagjfbjhjbbcgejbaalhcjdcgfdcbkkjaemmmfgmbdadfmdiaifdmfgfmecdcbkcmbfcgmachffflaicadkjkdekbcidbkcbfdikfdmjlailmgalabejgldcdmfalhakmlgfblikgcaicdmkaiacehchjhkfjflkmfkclibdcljhhgmiecekecdbcemfahfheejmmiljemkdfflfiaijlkilhaeejackljkccllahkfhebmcbimmmbiabaalmdhiebefchkbabgkfmiabdfiaglgbaemmggdebjgbdchakdgekgekflmkllabadegfmegjhkgflelilhghalmmhimelmfcjgiabkbckkkeedbldbdhhmiclfjekmhhhfcfglclgglmifjihfgfgjgalhhbgbahbdfbdmjdlglicjhahljkejkcafdlikahemllljhgkeeiblkhfkjalgflcdlidkdceiefgjlifllchkhdmekimflfakiahbliflilkcmiihhckilkgkhlekfaikkjklbjjfabdfjeiikkibflgaediekjdiaiabileafkehimhbhbmmhcbdgfhiigbdebimecfhllaggdhlmfhijiekaaaffhmimejjcahhckhjmiamgbblkbjdhmmcccidcifmkkjhejicfmegclemfidelicjambgmkjeabffahiemehkglhmfilcbfiglfhfdemebkbmmeeimkadekmelffemllaachaemkikkemehfjkhmdfdkakdgbimedmmckidamlgdfeibkgickhldagfhflmecdmcglifedaeabfckjlkigecfhejlaicfifbffjmejhfbikflickdjadjjfdcglbhljbabefcammkicdlfbiklbjbkjhdcdbfafjleibdhjdcabjlfcddikhjbbchdffjdmdbkmgdafcbjchihjgiiijcgjmjkaahbdhljhfcmljhcaakickjdjifljmhebgkdhlhaadjimhemgbbegcjbgiafbmleklgahdamiegbfkekjkgkejbmlflkkdgkieecgkjhafblgkhhbkdbbfgkggccbgdchflkkcbakhcdkdbiailcighigcdedjekhmhihblgiiciffikaahghababklkegihiflmdahhgjmgbdjgclmjdlgcgeghffmdcahkilbajkggdbdijccmjbbdkhjmefeehfcadgeemghibiiimabmimhhdfffdejjibekdlkjghkhhhaaeemheedhkigcljkfjjmikaaaegjdkiefibcabelijmkgkkchjkaadfhjhackmbjelieefmljfbhkimkifigicmcfiidfcebmeadcagdikcmjcgkcfihdgmkeeigibjidghjmcaeccihdhljcmbdbellbdhfakhmdkjgbcgdkcaefdfkmamfjgkhkdemlmijjfichfkdhejchmmbggedmhifklkckaiciicibcemfhbjbcleljbcdelmbkheafbmddbgdamafgkachfedgahkllkekifldahlmeljkgekljeecmbbidkfhkfkdbkjbljbgbbabmfcbagbebdjiccjgciefkghmclijjhgcjeailbbbbcmjgjcgglggeckdmdmdhhjlgdkijbdefadcklcbjkghahlhafelbbhaeehecbckcdmfkiiadkkcaghbafejclbmbjhddhfibafligideflgdjfleehllfdbacibdbhejbcjldiemhccimgidkmfmgmdihgeelbalfmgghkaecfeijfblghabbkejbmackmkjffbdimccakldblefljbbddbaedjbibhafdjmlflfbgefjcghlgmalbjjbgbgdmbhghajblalbaacdiibhcblijgjcbjbfmedmiahlibbbdidlcelelklflemiemklfdckillga"
6
5
26
sol = Solution()
s = "fehjblcdljlmckggcigkedfjcejklicihegfhkfbgegjiikcjgfacicaiheibcicmbilbkhhejfdifdehbjelcalcjellkaimhelkjhafcmjhikbgihjlmjclibceecelkaccklbdaifgdflidhidagiahlbjcfbijgeldjgedldbdchkblbdmcdjbjhccikelcmjjbfkhlfekdhbcakgbclgeijbdhmcmemebkgjeeeickifjglmjfjcmjidjgjmijceiikhmmaagebhifhkfhemfeigdlijffcjgmdehjgllkaallheikhghceekhcckfegghdcalalhkhlgikaamladheakecccgafkimibhiafkkkdbflklbhdagdefdgjfihbiakmjbdlhmlhalekjhmjagjahbjflkjiljjbgfhmekifjdejijehfgfjajbbabcgdbhmjmjabfackghfjflcejdcbdfdamcagjbgicbilhdmfclmaemdgkfdgegicikmifbkcckfkkblldhidlmfgckiiceghfcedjbaggmfkkfiacaffkfmliligeadeghklcbhdkgdcgkijklhkbgjicmfiffaaebimmeicaajfikmfbfkemmadgdaiiicjfcfeffmmhhejfgilkalglmfbgckgcdmcbhimfkmhmcccibjcalhfbgmhkckjfmdaamaffheimfihmaifalbamkfeibghkghfbmkghdimmjcmbdbafdfakaideemalgijieifiaakdfbcjggmelclmijhjgjigfhcabgmimcmkbdidhdagbbjeablcdleleijagkaijlgfgiehimklcaidcdeaekeddijlhaijlfclfcflblklgadbdabickelhdlkhefilhcecejkfacfbhcabcjjjhllhelljdmkjgihfebdhbiljijlhclmhgejaecihjfigbdmleebhcaehcgadidbfjjhkkcgddlieidgabhhcghaeehbhghhacgckmkhklchaeeieghjibkmebcifllamgflhikhfkhhmaeekecbcgfblbikgehhdjmedggfdghaafmeghiiiaahgilfibddilfbkdgbjiecibbdekhjbkdhigigffcgmbikhdmbgelgkfidfjkddhfifkdgmihkbdlhlmlkhkbjlhdhgaafkcebcjjaagmkecechalmbheieibihefcllgliamigjgbjcjkgdjeimffhehcjciabgjhgkgmcmemfchiemfldfjimmbeiiiaedkhlkeeijecedclbkhkkekjecfjlilidfigammdgjkgahibdbbkbgjgbabebjcglgfaldgiglilhgfbicchideehgffhfcheamklkkdgfmakhdgmdclejcfgfdlmmbgjamlgchaabelcllalccckajmmkfghaefbebaibdkeegicgmfdgbilhllkfhcgfdeddkfciiibgjhikhaagdkkdmjllalfifjcijhljfebiaflhjdkhmaeejgjkkaelgglefccejidmgkddekjjffcbfjmbmkihmemaibadaihhchdfgiejglmkclcfjgajlgbeillgfbhkgldmfekjbdegjmiddaeaebiaedkdbmciceggbalffddijfccadhhkfgebakkfcmdegdkdbglaeblabjahcjillgmihifbgmiejbefjjecgfkjibejeemcibmcmiifmaiggljgikhiebgijfjafchcjbdmiffjigkmcfhejjagmddjmeckcdhbbdgdcmgfhlcaggjlijjhghihlammgkdekgbkfellfdkcfkigjjecffmgeikafadbfdaadiembbmiadbkbljmkfedllghlhemeaimbamlfcehegbgccfbcjblahdlaakeafmlkjljlkiaglmeideifgdbadjehhmmkfhdkldebegbbiiblkmidlmeejlaemkhfajmidlfcjgiejmmihllbigelbekkfagdcjdbmifdmmchcllmihjlmhblkfcbcjiiaejhgldjmieejhjiadfkfmgamcdlcljbfclkaflhjbeajdkdkjecifikmleblijjedcaccikggjcgidmfjegkbhcacalmbcdgbfjkjajclgdbfcdkemajlajeklieibjhcdheglagfeeagjbacmjdhadgelhemeefikmejlkdcghahfdkhaacghieffcgfgllmdgbkhejkjdcdddhdfdcdidejaekjeclccmedjjmaellmcgfiacbhdfmcdcielcalchbgagelhjjmmkljfagkfjijmddafglimkekjagmhgfiidjefjfmaihhbhhhaafhiekmdkgidjmljfgmgcijbbjmbjiikailalbffjhedbfbbhcbbbicblagibbdamalkiblhblhacdckllbliccmjgedkjbeihhglhbcfaefaimlbjfhmjadlmgdikjjkkghidlfblkdgdbagkldghadhmmckfhkddedlgdfdifghagkdjiklmfbdajfemjcjlamfflgiekmabhcigclbdfefkfmdaffeccgcdflacahhademhjlchabeabbfjfeefhmmbaajmmlmgfhbclkfaihkehjljjhdbkkieikajbbgmfiilkehcliacgggmidlkgjmcjkhjklddijjmjdkejajgllcechmmbfbibdddfgakfmgebkfcbbkjehemckcaefimgfiamhddahklgdhcdgicdmmdfgemlhdcaglcdkeehjkccgcllcldbkggjihdafcfkhkifmkadgkmbgkbgkmilldfhjebdjdfkhmfdhldjmkbcebbbaiemgkihggeebkaibkhajkamfhcbcckgkjbfamlbghhdcehigmehmafalbjedgdgddgjfkfmmeicjlcaajemkjiligbfcbliagicggjclclgidkibkddfgfkclfgdblfebfkcjelghejlejckbgiibedgaebaffcleemmcdgfgjlhdagdmgagiambakabajcjmlifiikckjjfbmafiahmlbhcfegdaekjcgjdbhefkcfdcgkkmlibchbfjbalkbkmgjfbgjlbiffeeabbmgjgbillamjeefklbbibkddcifdakjdlekbkcemkmgdhabdeiccijlicgaecbefmcjeemccegaldfaeafdedbakmiaakjlcbddkkidmkdkdifdgaeflhbkbadgebhhhlaeeajfheamkfkakgmamhaialdmbllbddfidaibffmihfehddlhbemlgdkkikfhkigfkbfjijfiahkfhihkgmblfgidflleameaicgkmimdejkkddddfagfjceffmmkmcffkdfmfjbgjdkbgbelkgjcfhiijlijfeiimcblamiecbmaifejeklfeggfkeiamalhjgklhaellimjelhbgjcghjbfkdjhlmhgkafkkdkkfldbafljgchilbleabgiejfgjhhgcejjjbhkmblkiljbeafhlbdecimdejflhkbkccbkmljldjaihddjmajefjkkdmjkhghdhkhbhmkhjkldlfjjdhdklkheajceelahchhicmkjhekdejdefabaceemjbhimlfjihdmcbhlgihkhgdaibgfbfebadiadkmbjmhgifhefejjgkihkfcbdkjcecjmcifjidfegblklbbabjcfbighkaemgklbidlckebdlgmklifibghalbglmaihkggjcjljgibahghealfhhfiglljdhbffleccdjechchicddkfgimahhmgbjhdlheadfmahelbkhkkgmchljaaekcjhclhghdkebfkcadfajbihemfmjibaidhabdmblakajkddbajemkhebkdkafchalahkijkblmmfakkmdeikhbfhmekakhkmfgjkgljggacmamklbmkdkldmgggajmkaaeimjbffigdjffemcjdfklgbmclkjfhljhfldjkbdfihcjhiaeccafjajldibdlmbkigidecbecbgmlbfcljhieejegclgdeclcfblglgkbmfkhecjgkkkkleeledlmigcijbblhbkeeeifggbkihglgekbjedficgafflgdmhbgajjdajcjalggbciefmbimgabjcbehacagejjbcldalbgfgmiflicdcbabhkmddemieaheldmihcagiledmafagiajgffflfihfghhkkdhlijdgiimbdefehhdkeakddmhedcamjbimigmfajjemlgfdaalelecbifmkjccaefemaijddlmbkmlldhfbklljdedhahajhjmcmaglmbhjagjiifhkdbiehggajddkjchkbeddkahljjgefeffcbdlhkemmecdmbimdmamljhcicfiaambjehjmkjhfajadkeacgcadmcmfkbghbljbfiadkmaacabflejigcialheaibehjblkieaalbclbmhlfekgmggdakhicfaicceggahmidhemaibaiaabfhdjjifbdbkceicgdikhljdhimamghcgjljacdikilhcahedamkgfafhffmlifdeclkekmchmlbigjhijlmfejjjhcdfmjaggfllkdijhadlgfhiiikefglibjclhgedfdmeifeegeelmliefjfjldkdihciclagljcgajdmeijljfdhjkkajfckgaddeaakmjhhahkijhjhfjijamdeakeabfhfifdfkcejjfdgcjjlehkbmmbabiblgjkdhglgjgecfhicildemlaakikfbcdflejfgclmlclbldgldddclhjgdelfjdegbhglmhakdagmgkecdkeihdijijlkckjbammeiafkhmfjieflkcbhiggdjdeaiccaaaaildkmcffkhajefjakgjcglibjcejabfhlddimighmlcggbebbdlhbbjhikagificilmlcbidehkdfeimialijcbfmlgejldbleljgclfhiamhhgcgfjgcjgkmahkchbagfkkakcklefiimhekhckagcmcjadblhljjljdklcgidggmebmfifbfjcgcbhcgehkdikefecmhajjheaecjdiblhhcfcgfgdkjcfgjmhegahfeamclcmjemidkmkjfaecekchmkigdejeeiihlekgiggkcgmblaiblalacddicmehmjhlhmkfleaamamgbdaghdilgcjmfaklbcbldcmikakbmailkkjjlgjiaddfcbcfciladbeedhglebmefjgjfdhebjikbeldkmjldaekgjglbkiagkmlagblideedeehembjdliladifemkgchmlchlbjiaglmbikleclgeefhjlimalibckjgfjfgffhikllghbldhelgjmiifilgkkbdclkggijikbkieldgmggbjcgcfbjaedgclfahajlahllflihbkmakehbgdjbchdajigbdgiefaaadjkkjbjbekdfhaidjfgjgjablkggbagbbhmlkikdhblmfifldbmefjbljgkmdgbbcellefjgmbeladfjbibbjedccaebjakkadcmclihbgcfmjbdldmfcjifcaadibkfkdighjfhgjjaeifdebdkbjhbkibjimmmembkliildfbchbfablcmmjeigemdlkgbgbcfgibekbihkhklhkhkdacjlibkkdlbebbbdkkfdmlbijhammeeeejlfbheicdbcbgeeccfbabjlhadbhbhkmfgfichadjjiakjgagjadkkbggcjkbdciddjmflgedcihmgalkbehccmcagmmifcckcadgclbehhddbcaaiglachgdmhlammfhifahggigbkjblhlbedjldcjkfkglfjkjidciemkjkhkflfldkbhkjgcigdfdlblfkigalkijgmdmiabdiakbcfdldcmkkffihmemakiakfggadcjccckflemckgldjhiblgkhakfccbabfbjidhfmlbkjbedkfmhjjijijfbemffccmccckmhhaadcamfhmikmabkcmklbcikhkhfmdghhihllmekhefbdhgbdhldakljemeggdgabieebcklgkjmcgddhgfmkdbcafgkmhdjfkgdcfalkaadllcmglbkefkllhjghhdfdejbmfkcagaicfmigbdgaldjebejbhmggbkacickeidiimecglbdeeaceedgabballkmjjbjlkjgcjhiibbiflkggcgdemhimegghdjmlcbmhgmhblegehmecflcmmljakfidkmlbhjjdkhmccadkckalkgdiijmbgmceiejkmkabdbmikmlgabheidhbmdkdalhgfigafmccdhkggmbjabkdflckkflacecklaccmlailedldkkbddcjhbhldkimedlhblckbagdbcekmgicjaeemmjiljbiglfggfmgjmabcialkffdamjgfbgmjdfjgafjehdfcgideedgigalffjgcgdkbkfiijiaiglggdbmbflickgamjgghdllfjmhajmgleebdghejihmimlclfidcalfijmlbmejhijfgfjjhechfachlfekgacfmimhbalgcecaijajamchbfaghlljmaihfdajflhmhbgkmjdckdldfgmmcjijebafblikkklbheejfgfhfhmejgfmcakjdfdleejlmaahafgfikhjmlbjbbekbjlkkjflkagmhkfgabcildgfbdckelakmbckeigdddicbkacbfgdejjmegkcflhcajjmhlhkbccfgebhamhgfaggcdjgejcdfcjkcdmbijabjgfbfkgdbagmdflfhfjgaeimajljaamadglkmahjmfbbjhhkmdclcichackjdhmdmegfjdhghmhmkefhklgbjcdbmlblmjmkhcdbdmhhfkhicdlmidbgfcdiakgdmmlldfkafjeaegiifcbkgcbaghbcbcfdmkkalcibdahekgdhkflimafkdekmmdahmhedmakdahjidabhggegfcihkjieeffhefbfjfhemjfbmjfkjidgddimajdimjlljfjahiehafeijhmhilkekdcdiekimaicdfalkgemdjdijfdldajmhgdcmgkcdmmbaiceabkdmejfgdfdcgihibmahmkhmelihggeklgamcecifigekhimdbgkhddlhaeimmgleiikjcjkijfkblgemmefecdahbeckgjjfklmlekkgjlccjfgblkkibljfegbdifcjgdmecglilcmibbdcbficdbheclcejcbagfhgmihamehmligjbmaccimbmejdcabmacfabkkfkacffhhbdechlbgeifjmbkbhdikhahkebafjjkjcejcaciagahjghhjhkeefhjjcfmmahfdkhchhklegjlbbbcdlfcclflgfiibljmbbjhkdjdleegekccaejbhejikkchmmfjejjljiggieabmefajhkgkledgkkejibmbahhehmfdakcfbhemdmemjbgjfgbfgdlflbhkmfackkceeigejdaggfidmfcdaccmmhlmifdddgagmfmejhfbaicccdeijbhefabejkghlmckfdbkjddgdakldccfdgjdghcdhdhjdlkgccehhlbjbkkmeceihgcmiklblkabfmmilicjilgehfhbdihmikgckieggbbbbmmcakkadfbbcffeaijfjmalmlfbdbjdckkfmbefihjiefhfgldmgahmlbgkcdeachjfjccjlcicfleblfdekilcfkgjefflhjckakgkfkdeikhjflddgebmhiiidcdhifhefcdableckklcmiekdgmlcdhjfljlcdbcafekbecaeemgjfcdjhfgeimddmaafihgffmfjmledefikjhefakdiabbkfjkfahhljklagjfbjhjbbcgejbaalhcjdcgfdcbkkjaemmmfgmbdadfmdiaifdmfgfmecdcbkcmbfcgmachffflaicadkjkdekbcidbkcbfdikfdmjlailmgalabejgldcdmfalhakmlgfblikgcaicdmkaiacehchjhkfjflkmfkclibdcljhhgmiecekecdbcemfahfheejmmiljemkdfflfiaijlkilhaeejackljkccllahkfhebmcbimmmbiabaalmdhiebefchkbabgkfmiabdfiaglgbaemmggdebjgbdchakdgekgekflmkllabadegfmegjhkgflelilhghalmmhimelmfcjgiabkbckkkeedbldbdhhmiclfjekmhhhfcfglclgglmifjihfgfgjgalhhbgbahbdfbdmjdlglicjhahljkejkcafdlikahemllljhgkeeiblkhfkjalgflcdlidkdceiefgjlifllchkhdmekimflfakiahbliflilkcmiihhckilkgkhlekfaikkjklbjjfabdfjeiikkibflgaediekjdiaiabileafkehimhbhbmmhcbdgfhiigbdebimecfhllaggdhlmfhijiekaaaffhmimejjcahhckhjmiamgbblkbjdhmmcccidcifmkkjhejicfmegclemfidelicjambgmkjeabffahiemehkglhmfilcbfiglfhfdemebkbmmeeimkadekmelffemllaachaemkikkemehfjkhmdfdkakdgbimedmmckidamlgdfeibkgickhldagfhflmecdmcglifedaeabfckjlkigecfhejlaicfifbffjmejhfbikflickdjadjjfdcglbhljbabefcammkicdlfbiklbjbkjhdcdbfafjleibdhjdcabjlfcddikhjbbchdffjdmdbkmgdafcbjchihjgiiijcgjmjkaahbdhljhfcmljhcaakickjdjifljmhebgkdhlhaadjimhemgbbegcjbgiafbmleklgahdamiegbfkekjkgkejbmlflkkdgkieecgkjhafblgkhhbkdbbfgkggccbgdchflkkcbakhcdkdbiailcighigcdedjekhmhihblgiiciffikaahghababklkegihiflmdahhgjmgbdjgclmjdlgcgeghffmdcahkilbajkggdbdijccmjbbdkhjmefeehfcadgeemghibiiimabmimhhdfffdejjibekdlkjghkhhhaaeemheedhkigcljkfjjmikaaaegjdkiefibcabelijmkgkkchjkaadfhjhackmbjelieefmljfbhkimkifigicmcfiidfcebmeadcagdikcmjcgkcfihdgmkeeigibjidghjmcaeccihdhljcmbdbellbdhfakhmdkjgbcgdkcaefdfkmamfjgkhkdemlmijjfichfkdhejchmmbggedmhifklkckaiciicibcemfhbjbcleljbcdelmbkheafbmddbgdamafgkachfedgahkllkekifldahlmeljkgekljeecmbbidkfhkfkdbkjbljbgbbabmfcbagbebdjiccjgciefkghmclijjhgcjeailbbbbcmjgjcgglggeckdmdmdhhjlgdkijbdefadcklcbjkghahlhafelbbhaeehecbckcdmfkiiadkkcaghbafejclbmbjhddhfibafligideflgdjfleehllfdbacibdbhejbcjldiemhccimgidkmfmgmdihgeelbalfmgghkaecfeijfblghabbkejbmackmkjffbdimccakldblefljbbddbaedjbibhafdjmlflfbgefjcghlgmalbjjbgbgdmbhghajblalbaacdiibhcblijgjcbjbfmedmiahlibbbdidlcelelklflemiemklfdckillga"
maxLetters = 6
minSize = 5
maxSize = 26
print(len(s))
print(sol.maxFreq(s, maxLetters, minSize, maxSize)) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This module contains project version information."""
__version__ = "v22.03.4"
|
# Longest Name.
# Create a program that takes an unknown amount of names and outputs the longest name. You will know that the inputs are done when the user has inputted a string of X
user_input = '' # Empty string
longest_name = ''
length = 0
while user_input != 'X':
user_input = input('Enter a name: ')
if user_input != 'X':
current_length = len(user_input) # determining how long the name is
if current_length > length:
length = current_length
longest_name = user_input
# end of while
print(longest_name, 'was the longest name with', length, 'characters.') |
# This program is open source. For license terms, see the LICENSE file.
class Movie():
"""Creates a Movie object with the possibility to define information about
its title, poster URL and YouTube trailer.
Attributes:
title: The title of the movie.
poster_image_url: URL to the poster of the movie.
trailer_youtube_url: URL to the trailer in YouTube.
"""
def __init__(self, title, poster_image_url, trailer_youtube_url):
"""Initiates class Movie, reserving memory space"""
self.title = title
self.poster_image_url = poster_image_url
self.trailer_youtube_url = trailer_youtube_url
|
# -*- coding: utf-8 -*-
{
"name": "WeCom Portal",
"author": "RStudio",
"website": "https://gitee.com/rainbowstudio/wecom",
"sequence": 608,
"installable": True,
"application": True,
"auto_install": False,
"category": "WeCom/WeCom",
"version": "15.0.0.1",
"summary": """
WeCom Portal
""",
"description": """
""",
"depends": ["portal",],
"external_dependencies": {"python": [],},
"data": [
# "data/portal_message_template_data.xml",
"views/portal_templates.xml",
],
"license": "LGPL-3",
}
|
class Solution(object):
def getSum(self, a, b):
"""
:type a: int
:type b: int
:rtype: int
"""
MAX_INT = 0x7FFFFFFF
MIN_INT = 0x80000000
MASK = 0x100000000
while b:
a, b = (a ^ b) % MASK, ((a & b) << 1) % MASK
return a if a <= MAX_INT else ~((a % MIN_INT) ^ MAX_INT) |
# Container for information about the project
class Project(object):
def __init__(self, origin='', commit='', owner='', name=''):
self.origin = origin
self.commit = commit
self.owner = owner
self.name = name
def _asdict(self):
return self.__dict__
def __dir__(self):
return ['origin', 'commit', 'owner', 'name']
# Container for information about the project
class Tree(object):
def __init__(self, name='', children=''):
self.name = name
self.children = children
def _asdict(self):
return self.__dict__
def __dir__(self):
return ['name', 'children']
# Container for information about the project's nodes
class Node(object):
def __init__(self, name='', group='', id='', url='', visibility=1.0, complexity=1.0, quality=1.0, size=1.0, weight=1.0):
self.id = id
self.name = name
self.group = group
self.url = url
self.visibility = visibility
# measures
self.complexity = complexity
self.quality = quality
self.size = size
self.weight = weight
# on-screen display - circle
self.cx = 0.0
self.cy = 0.0
self.r = 0.0
self.style = ""
self.index = 0
self.x = 0.0
self.y = 0.0
self.px = 0.0
self.py = 0.0
self.fixed = False
def _asdict(self):
return self.__dict__
def __dir__(self):
return ['id', 'name', 'group', 'url', 'visibility', 'complexity', 'quality', 'size', 'weight']
# Container for information about the project's links (between nodes)
class Link(object):
def __init__(self, source='', target='', value='', visibility=1.0, complexity=1.0, quality=1.0):
self.source = source
self.target = target
self.value = value
self.visibility = visibility
self.complexity = complexity
self.quality = quality
# on-screen display - line
self.x1 = 0.0
self.y1 = 0.0
self.x2 = 0.0
self.y2 = 0.0
self.style = ""
def _asdict(self):
return self.__dict__
def __dir__(self):
return ['source', 'target', 'value', 'visibility', 'complexity', 'quality']
# Container for information about graph of nodes and links
class Graph(object):
# Note: there were problems with nodes=[], links=[]
# for some reason two consecutively created graphs
# were allocated the same collections of nodes and links
def __init__(self, nodes, links):
self.nodes = nodes
self.links = links
def _asdict(self):
return self.__dict__
# Container for information about project, tree (of files), graph (of many things)
class Magnify(object):
# Note: there were problems with nodes=[], links=[]
# for some reason two consecutively created graphs
# were allocated the same collections of nodes and links
def __init__(self):
self.project = None
self.tree = None
self.graph = None
def _asdict(self):
return self.__dict__
# Container for information about the project and its layers (subgraphs)
class Archive(object):
def __init__(self, project, commits, files, functions):
self.project = project
self.commits = commits
self.files = files
self.functions = functions
def _asdict(self):
return self.__dict__
# For json printing
def default(self):
return self._asdict()
|
with open("my_files_ex1.txt") as f:
file = f.read()
print(file)
|
class NewsModel:
title = None
url = None
def __init__(self, __title__, __url__):
self.title = __title__
self.url = __url__
def __str__(self):
return '''["%s", "%s"]''' % (self.title, self.url)
|
class Operation:
__slots__ = ['session', '_sql', '_table', '_stable', '_col_lst', '_tag_lst', '_operation_history']
def __init__(self, session):
self.session = session
self._sql = []
self._table = None
self._stable = None
self._col_lst = None
self._tag_lst = None
self._operation_history = dict()
def _determine_cols(self, table):
_col_lst = self._col_lst
_all_cols = self.session.get_columns_cache(table)
if not _col_lst:
return _all_cols
if not (set(_col_lst) <= set(_all_cols)):
raise ValueError('Invalid column')
return [o for o in _all_cols if o in _col_lst]
def _determine_tags(self, table):
_tag_lst = self._tag_lst
_all_tags = self.session.get_tags_cache(table)
if not _tag_lst:
return _all_tags
if not (set(_tag_lst) < set(_all_tags)):
raise ValueError('Invalid tag')
return [o for o in _all_tags if o in _tag_lst]
def __repr__(self):
return ' '.join(self._sql)
@staticmethod
def template(args):
"""
according to the order of the fields, return string template
example:
args: (col1, col2, ...)
return:
'({col1}, {col2}, ...)'
"""
args = [o.strip(' ') for o in args]
return '(' + ', '.join(['{%s}' % o for o in args]) + ')'
|
"""
The Data recording and processing program with the use of a derived class of `list` data structure.
"""
# Classes:
class Athlete(list):
def __init__(self, arg_name, arg_dob=None, arg_times=[]):
"""
:param arg_name: The name of the athlete
:param arg_dob: The Date of birth of the athlete
:param arg_times: The list of times for each athlete.
"""
list.__init__([])
self.name = arg_name
self.dob = arg_dob
self.extend(arg_times)
def top3(self):
return sorted(set([sanitize(t) for t in self]))[0:3]
# Static Methods:
def sanitize(time_string):
"""
:param time_string: Input time string, which may have mins and seconds separated by either ':', '-' or '.'
:return: Uniformly formatted time string with mins and secs separated by '.'
"""
if "-" in time_string:
splitter = "-"
elif ":" in time_string:
splitter = ":"
else:
return time_string
(mins, secs) = time_string.split(splitter)
return mins + "." + secs
def data_grepper(file):
try:
with open(file) as data:
line = data.readline().strip().split(',')
athlete = Athlete(line.pop(0), line.pop(0), line)
return athlete
except IOError as err:
print("File Error : " + str(err))
def printObj(ath):
print(ath.name + ", born on " + ath.dob + ", has the top 3 times of : " + str(ath.top3()))
james = data_grepper('athleteTraining/james2.txt') # Contains reference to an object holding all the related data.
julie = data_grepper('athleteTraining/julie2.txt')
mikey = data_grepper('athleteTraining/mikey2.txt')
sarah = data_grepper('athleteTraining/sarah2.txt')
printObj(james)
printObj(julie)
printObj(mikey)
printObj(sarah)
# Testing code for new functionality - we simply use the objects of the class as any other list!
vera = Athlete('Vera Vi', '1991-01-02')
vera.append('1.31')
printObj(vera)
vera.extend(['2.22', "1-21", '2:22'])
printObj(vera) |
jogador = dict()
jogador['Nome'] = str(input('digite o nome do jogador: ')).strip().capitalize()
jogador['Jogos'] = int(str(input('digite a quantidade de jogos do jogador: ')).strip().capitalize())
tot = 0
for c in range(0, jogador['Jogos']):
jogador[f'partida {c+1}'] = int(input(f'quantos gols o jogador {jogador["Nome"]} fez na {c+1}ª partida?'))
tot += jogador[f'partida {c+1}']
print(f'o jogador {jogador["Nome"]} jogou {jogador["Jogos"]} partidas.')
for k in range(0, jogador['Jogos']):
print(f' => Na partida {k+1}, fez {jogador[f"partida {k+1}"]} gols.')
print(f'Foi um total de {tot} gols')
|
def main():
space = " "
item = "o"
buffer = ""
for x in range(2):
spacing = ""
for y in range(50):
spacing += space
buffer = spacing + item
print(buffer)
for z in range(50):
spacing -= space
buffer = item + spacing
print(buffer)
main() |
__name__ = "ratter"
__version__ = "0.1.0-a0"
__author__ = "Fabian Meyer"
__maintainer__ = "Fabian Meyer"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Fraunhofer ISE"
__license__ = "BSD 3-clause"
__url__ = "https://github.com/fa-me/ratter"
__summary__ = """calculate reflection, absorption and transmission of thin layer stacks""" |
def x(city, name):
city = open(city,'r').read()
city = city.replace('\n', ';')
city = city.split(';')
success = []
for i in city:
if i:
success.append(i.lstrip().rstrip())
success = {k: v for k, v in zip(success[::2], success[1::2])}
file = open('{0}.txt'.format(name), 'w')
print(success,file=file)
file.close()
res = open('{0}.txt'.format(name), 'r')
finish = res.read()
res.close()
return print(finish)
one = input('Data')
two = input('name city')
x(one, two)
|
users = [
{"first_name": "Helen", "age": 39},
{"first_name": "anni", "age": 9},
{"first_name": "Buck", "age": 10},
]
def get_user_name(users):
"""Get name of the user in lower case"""
return users["first_name"].lower()
def get_sorted_dictionary(users):
"""Sort the nested dictionary"""
if not isinstance(users, dict):
raise ValueError("Not a correct dictionary")
if not len(users):
raise ValueError("Empty dictionary")
users_by_name = sorted(users, key=get_user_name)
print(users_by_name) |
'''Libraries used by the main program
All the libraries that will be used by the
main program will be placed here.
Contains Library with functions to create latex report.
''' |
"""
Author: Ao Wang
Date: 08/27/19
Description: Brute force decryption of the Simplified Columnar Cipher w/o asking the key
"""
LETTERS_AND_SPACE = "abcdefghijklmnopqrstuvwxyz" + ' \t\n'
# The function returns a list of words from two word text files
def loadDictionary():
with open("words.txt", "r") as f1:
file1 = f1.read().split("\n")
with open("morewords.txt", "r") as f2:
file2 = f2.read().split("\n")
# second text file was all uppercase, so needed to become lowercase
for i in range(len(file2)):
file2[i] = file2[i].lower()
# extend the second file by adding the first
file2.extend(file1)
return file2
ENGLISH_WORDS = loadDictionary()
# The function removes non characters in the LETTERS_AND_SPACE variable
def removeNonLetters(msg):
lettersOnly = []
for symbol in msg:
if symbol in LETTERS_AND_SPACE:
lettersOnly.append(symbol)
return "".join(lettersOnly)
# The function returns the percentage of words in the dictionary
def getEnglishCount(msg):
msg = msg.lower()
msg = removeNonLetters(msg)
possibleWords = msg.split()
if possibleWords == []:
return 0
matches = 0
for word in possibleWords:
if word in ENGLISH_WORDS:
matches += 1
return 100 * float(matches)/len(possibleWords)
# The function hacks the Columnar Transposition Cipher and prints out the key, decrypted message, and percentage
# of the words in the dictionary
def hackTransposition(msg):
print("Hacking...")
percentages = {}
# try keys from 1 to the length of the message
for key in range(1, len(msg)):
decryptedText = decrypt(msg, key)
# if the percentage of words in the word dictionary is above 80%, then add the keys and percentages
# into the dictionary
threshold = 80
if getEnglishCount(decryptedText) > threshold:
percentages[key] = getEnglishCount(decryptedText)
key_break = findMaxInd(percentages)
if key_break == -1:
print("Failed to hack cipher :(")
else:
print("Cipher hacked! :)")
print()
print("The key is: " + str(key_break))
print("Decrypted text: " + decrypt(msg, key_break) + "\n")
print("Percentage of words in dictionary: " + str(percentages[key_break]))
# The function finds the highest percentage of words in the dictionary and returns the key
def findMaxInd(keys):
maximum = -1
max_key = -1
for key in keys:
if keys[key] > maximum:
maximum = keys[key]
max_key = key
return max_key
def main():
with open("msg.txt", "r") as file:
myMessage = file.read()
hackTransposition(myMessage)
if __name__ == "__main__":
main()
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
]
cudnn_benchmark = True
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
with_cp=True,
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch'),
neck=dict(
relu_before_extra_convs=True,
no_norm_on_lateral=True,
norm_cfg=norm_cfg),
bbox_head=dict(type='RetinaSepBNHead', num_ins=5, norm_cfg=norm_cfg))
# training and testing settings
train_cfg = dict(assigner=dict(neg_iou_thr=0.5))
# dataset settings
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(
type='Mosaic',
sub_pipeline=[
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1.4, 1.4),
prob=1.0),
dict(
type='RandomCrop',
crop_size=None,
min_crop_size=0.4286, # 0.6 / 1.4
allow_negative_crop=True),
dict(type='Resize', img_scale=(640, 640), keep_ratio=False)
],
size=(640, 640),
min_offset=0.2),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(640, 640),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=64),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=4,
train=dict(pipeline=train_pipeline, num_samples_per_iter=4),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(
type='SGD',
lr=0.08,
momentum=0.9,
weight_decay=0.0001,
paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True))
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=1000,
warmup_ratio=0.1,
step=[30, 40])
# runtime settings
total_epochs = 50
|
"""
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
"""
# Write your code here
n = int(input())
s = input().strip()
ans = max(s)
idx = s.index(ans)
for i in range(ord(ans) - 1, ord('a') - 1, -1):
try:
idx = s.index(chr(i), idx + 1)
ans += chr(i)
except ValueError:
pass
print(ans)
|
"""
Name : Breaking the records
Category : Implementation
Difficulty : Easy
Language : Python3
Question Link : https://www.hackerrank.com/challenges/breaking-best-and-worst-records/problem
"""
n = int(input())
score = list(map(int, input().split()))
a = b = score[0]
r1 = r2 = 0
for i in score[1:]:
if i > a:
a = i
r1 += 1
if i < b:
b = i
r2 += 1
print(r1,r2) |
"""Crie um programa que leia uma frase qualquer e diga se ela é um palíndromo,
desconsiderando os espaços. Exemplos de palíndromos:
APOS A SOPA, A SACADA DA CASA, A TORRE DA DERROTA, O LOBO AMA O BOLO, ANOTARAM A DATA DA MARATONA."""
frase = str(input('Digite uma frase (sem pontuação ou acento): ')).strip().upper().replace(' ', '')
inverso = frase[::-1].replace(' ', '')
print(f'O inverso de {frase} é {inverso}')
if frase == inverso:
print('A frase é um palíndromo.')
else:
print('A frase NÃO é um palíndromo.')
|
#
# PySNMP MIB module HH3C-SESSION-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HH3C-SESSION-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:16:47 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ConstraintsIntersection, SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ConstraintsIntersection", "SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint")
hh3cCommon, = mibBuilder.importSymbols("HH3C-OID-MIB", "hh3cCommon")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Unsigned32, ModuleIdentity, MibIdentifier, NotificationType, Integer32, IpAddress, TimeTicks, Counter64, Gauge32, iso, Bits, Counter32, ObjectIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "Unsigned32", "ModuleIdentity", "MibIdentifier", "NotificationType", "Integer32", "IpAddress", "TimeTicks", "Counter64", "Gauge32", "iso", "Bits", "Counter32", "ObjectIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
hh3cSession = ModuleIdentity((1, 3, 6, 1, 4, 1, 25506, 2, 149))
hh3cSession.setRevisions(('2013-12-20 00:00',))
if mibBuilder.loadTexts: hh3cSession.setLastUpdated('201312200000Z')
if mibBuilder.loadTexts: hh3cSession.setOrganization('Hangzhou H3C Technologies Co., Ltd.')
hh3cSessionTables = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 149, 1))
hh3cSessionStatTable = MibTable((1, 3, 6, 1, 4, 1, 25506, 2, 149, 1, 1), )
if mibBuilder.loadTexts: hh3cSessionStatTable.setStatus('current')
hh3cSessionStatEntry = MibTableRow((1, 3, 6, 1, 4, 1, 25506, 2, 149, 1, 1, 1), ).setIndexNames((0, "HH3C-SESSION-MIB", "hh3cSessionStatChassis"), (0, "HH3C-SESSION-MIB", "hh3cSessionStatSlot"), (0, "HH3C-SESSION-MIB", "hh3cSessionStatCPUID"))
if mibBuilder.loadTexts: hh3cSessionStatEntry.setStatus('current')
hh3cSessionStatChassis = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 2, 149, 1, 1, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 65534)))
if mibBuilder.loadTexts: hh3cSessionStatChassis.setStatus('current')
hh3cSessionStatSlot = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 2, 149, 1, 1, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 65534)))
if mibBuilder.loadTexts: hh3cSessionStatSlot.setStatus('current')
hh3cSessionStatCPUID = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 2, 149, 1, 1, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 7)))
if mibBuilder.loadTexts: hh3cSessionStatCPUID.setStatus('current')
hh3cSessionStatCount = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 2, 149, 1, 1, 1, 4), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hh3cSessionStatCount.setStatus('current')
hh3cSessionStatCreateRate = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 2, 149, 1, 1, 1, 5), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hh3cSessionStatCreateRate.setStatus('current')
mibBuilder.exportSymbols("HH3C-SESSION-MIB", hh3cSessionStatEntry=hh3cSessionStatEntry, hh3cSessionTables=hh3cSessionTables, hh3cSessionStatCount=hh3cSessionStatCount, hh3cSessionStatSlot=hh3cSessionStatSlot, hh3cSessionStatCreateRate=hh3cSessionStatCreateRate, hh3cSession=hh3cSession, hh3cSessionStatTable=hh3cSessionStatTable, PYSNMP_MODULE_ID=hh3cSession, hh3cSessionStatChassis=hh3cSessionStatChassis, hh3cSessionStatCPUID=hh3cSessionStatCPUID)
|
def getYears():
"""Return a list of years for which data is available."""
years = [1900, 1901, 1902, 1903, 1905, 1905, 1906, 1907, 1908, 1909, 1910]
return years
def getSentencesForYear(year):
"""Return list of sentences in given year.
Each sentence is a list of words.
Each word is a string.
Returns a list of lists of strings."""
sentence1 = ['this', 'is', 'one', 'sentence']
sentence2 = ['this', 'is', 'another', 'sentence']
sentence3 = ['and', 'yet', 'another', 'one']
sentences = [sentence1, sentence2, sentence3]
return sentences
|
# class object to store neural signal data
class NeuralBit:
def __init__(self, value):
self.value = value
def bit(self):
return self.value
class NeuralWave:
def __init__(self, path, label, output_matrix_size):
""" Specify the path to the data set, upload the data points"""
self.label = label
self.raw_data = []
self.sampled_data = []
self.output_matrix = []
self.initial_mse = None
self.optimized_mse = None
file = open(path, "r")
for val in file.read().split(','):
self.raw_data.append(int(val))
file.close()
# configure the DNN output matrix of the neural wave
for i in range(output_matrix_size):
if i == label:
self.output_matrix.append(1.00)
else:
self.output_matrix.append(0.00)
def data_label(self):
return self.label
def raw(self):
return self.raw_data
def raw_bit(self, index):
if (index < 0) | (index >= len(self.raw_data)):
return -1
else:
return self.raw_data[index]
def sampled(self):
return self.sampled_data
def sampled_bit(self, index):
if (index < 0) | (index >= len(self.sampled_data)):
return -1
else:
return self.sampled_data[index]
def clear_sampled_matrix(self):
del self.sampled_data[:]
def dnn_matrix(self):
return self.output_matrix
def replace_raw_bit(self, index, value):
if (index < 0) | (index >= len(self.raw_data)):
pass
else:
self.raw_data[index] = value
def push_sampled_bit(self, val):
self.sampled_data.append(val)
def raw_data_length(self):
return len(self.raw_data)
def sampled_data_length(self):
return len(self.sampled_data)
|
file_path = 'D12/input.txt'
#file_path = 'D12/test.txt'
#file_path = 'D12/test2.txt'
with open(file_path) as f:
text = f.read().split('\n')
def reset(text):
data = []
for i in text:
data.append(i.split('-'))
distinctNodes = []
for i in data:
if i[0] not in distinctNodes:
distinctNodes.append(i[0])
if i[1] not in distinctNodes:
distinctNodes.append(i[1])
nodeDictionary = {}
for i in distinctNodes:
nodeDictionary[i] = []
for pair in range(0,len(data)):
nodeDictionary[data[pair][0]].append(data[pair][1])
nodeDictionary[data[pair][1]].append(data[pair][0])
visitedList = [[]]
return nodeDictionary, data, visitedList, distinctNodes
def Pt1ElectricBungalung(nodeDictionary, currentVertex, visited, visitedlc):
visited.append(currentVertex)
if currentVertex.upper() != currentVertex:
visitedlc.append(currentVertex)
for vertex in nodeDictionary[currentVertex]:
if vertex not in visitedlc:
Pt1ElectricBungalung(nodeDictionary, vertex, visited.copy(), visitedlc.copy())
visitedList.append(visited)
def Pt2ElectricBoogaloo(nodeDictionary,positionArray,trialNode):
if positionArray[-1]=="end":
pt2ReturnArray.add(tuple(positionArray))
return pt2ReturnArray
for dictItem in nodeDictionary[positionArray[-1]]:
if dictItem.upper()!=dictItem:
if trialNode== "" and dictItem!= "start":
Pt2ElectricBoogaloo(nodeDictionary,positionArray+[dictItem],dictItem)
if not dictItem in positionArray:
Pt2ElectricBoogaloo(nodeDictionary,positionArray+[dictItem],"")
elif trialNode==dictItem:
if positionArray.count(dictItem)==1:
Pt2ElectricBoogaloo(nodeDictionary,positionArray+[dictItem],dictItem)
else:
if dictItem not in positionArray:
Pt2ElectricBoogaloo(nodeDictionary,positionArray+[dictItem],trialNode)
else:
Pt2ElectricBoogaloo(nodeDictionary,positionArray+[dictItem],trialNode)
return pt2ReturnArray
###################################
# Part 1
###################################
nodeDictionary,data,visitedList,distinctNodes = reset(text)
Pt1ElectricBungalung(nodeDictionary, 'start', [], [])
visitedList.remove([])
solutionList = []
for i in range(0,len(visitedList)):
if visitedList[i][-1] == 'end':
solutionList.append(visitedList[i])
print('Part 1: ', len(solutionList))
###################################
# Part 2
###################################
nodeDictionary,data,visitedList,distinctNodes = reset(text)
pt2ReturnArray = set()
resultArray = Pt2ElectricBoogaloo(nodeDictionary,["start"],"")
print('Part 2: ', len(resultArray)) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Написать программу, которая считывает текст из файла и выводит на экран все его
# предложения в обратном порядке
if __name__ == '__main__':
with open('text.txt', 'r') as f:
text = f.read().split()
i = len(text) - 1
while i >= 0:
print(text[i])
i -= 1 |
'''num = []
for n in range(0, 4):
num.append(int(input(f'Digite um número na posição {n}:')))
print(f'O MAIOR número foi {max(num)} nas posiçoes ', end='')
for i, v in enumerate(num):
if v == max(num):
print(f'{i}...', end='' )
print()
print(f'O MENOR número foi {min(num)} nas posições ', end='')
for i, v in enumerate(num):
if v == min(num):
print(f'{i}...', end='')'''
'''lista_nome = []
for cc in range(0, 5):
nome = str(input('Digite um nome:')).strip().upper()
if nome[0] == 'C':
lista_nome.append(nome)
print(lista_nome)'''
|
def generate_permutations(perm, n):
if len(perm) == n:
print(perm)
return
for k in range(n):
if k not in perm:
perm.append(k)
generate_permutations(perm, n)
perm.pop()
generate_permutations(perm=[], n=4)
|
hip = 'hip'
thorax = 'thorax'
r_hip = 'r_hip'
r_knee = 'r_knee'
r_ankle = 'r_ankle'
r_ball = 'r_ball'
r_toes = 'r_toes'
l_hip = 'l_hip'
l_knee = 'l_knee'
l_ankle = 'l_ankle'
l_ball = 'l_ball'
l_toes = 'l_toes'
neck_base = 'neck'
head_center = 'head-center'
head_back = 'head-back'
l_uknown = 'l_uknown'
l_shoulder = 'l_shoulder'
l_elbow = 'l_elbow'
l_wrist = 'l_wrist'
l_wrist_2 = 'l_wrist_2'
l_thumb = 'l_thumb'
l_little = 'l_little'
l_little_2 = 'l_little_2'
r_uknown = 'r_uknown'
r_shoulder = 'r_shoulder'
r_elbow = 'r_elbow'
r_wrist = 'r_wrist'
r_wrist_2 = 'r_wrist_2'
r_thumb = 'r_thumb'
r_little = 'r_little'
r_little_2 = 'r_little_2'
pelvis = 'pelvis'
links = (
(r_hip, thorax),
# (r_hip, pelvis),
(r_knee, r_hip),
(r_ankle, r_knee),
(r_ball, r_ankle),
(r_toes, r_ball),
(l_hip, thorax),
# (l_hip, pelvis),
(l_knee, l_hip),
(l_ankle, l_knee),
(l_ball, l_ankle),
(l_toes, l_ball),
(neck_base, thorax),
# (head_center, head_back),
# (head_back, neck_base),
# (head_back, head_center),
# (head_center, neck_base),
(head_back, neck_base),
(head_center, head_back),
(l_shoulder, neck_base),
(l_elbow, l_shoulder),
(l_wrist, l_elbow),
(l_thumb, l_wrist),
(l_little, l_wrist),
(r_shoulder, neck_base),
(r_elbow, r_shoulder),
(r_wrist, r_elbow),
(r_thumb, r_wrist),
(r_little, r_wrist),
# (pelvis, thorax),
)
links_simple = (
(r_hip, thorax),
# (r_hip, pelvis),
(r_knee, r_hip),
(r_ankle, r_knee),
(r_ball, r_ankle),
(r_toes, r_ball),
(l_hip, thorax),
# (l_hip, pelvis),
(l_knee, l_hip),
(l_ankle, l_knee),
(l_ball, l_ankle),
(l_toes, l_ball),
(neck_base, thorax),
# (head_center, head_back),
# (head_back, neck_base),
# (head_back, head_center),
# (head_center, neck_base),
(head_back, neck_base),
(head_center, head_back),
(l_shoulder, neck_base),
(l_elbow, l_shoulder),
(l_wrist, l_elbow),
(r_shoulder, neck_base),
(r_elbow, r_shoulder),
(r_wrist, r_elbow),
# (pelvis, thorax),
)
links_simple2 = (
(r_hip, pelvis),
(r_knee, r_hip),
(r_ankle, r_knee),
(r_toes, r_ankle),
(l_hip, pelvis),
(l_knee, l_hip),
(l_ankle, l_knee),
(l_toes, l_ankle),
(neck_base, pelvis),
(head_back, neck_base),
(l_shoulder, neck_base),
(l_elbow, l_shoulder),
(l_wrist, l_elbow),
(r_shoulder, neck_base),
(r_elbow, r_shoulder),
(r_wrist, r_elbow),
)
joint_indices = {
hip: 0,
thorax: 12,
r_hip: 1,
r_knee: 2,
r_ankle: 3,
r_ball: 4,
r_toes: 5,
l_hip: 6,
l_knee: 7,
l_ankle: 8,
l_ball: 9,
l_toes: 10,
neck_base: 13,
head_center: 14,
head_back: 15,
l_uknown: 16,
l_shoulder: 17,
l_elbow: 18,
l_wrist: 19,
l_wrist_2: 20,
l_thumb: 21,
l_little: 22,
l_little_2: 23,
r_uknown: 24,
r_shoulder: 25,
r_elbow: 26,
r_wrist: 27,
r_wrist_2: 28,
r_thumb: 29,
r_little: 30,
r_little_2: 31,
pelvis: 11
}
joints_eval_martinez = {
'Hip': 0,
'RHip': 1,
'RKnee': 2,
'RFoot': 3,
'LHip': 6,
'LKnee': 7,
'LFoot': 8,
'Spine': 12,
'Thorax': 13,
'Neck/Nose': 14,
'Head': 15,
'LShoulder': 17,
'LElbow': 18,
'LWrist': 19,
'RShoulder': 25,
'RElbow': 26,
'RWrist': 27
}
official_eval = {
'Pelvis': (pelvis),
'RHip': (r_hip),
'RKnee': (r_knee),
'RAnkle': (r_ankle),
'LHip': (l_hip),
'LKnee': (l_knee),
'LAnkle': (l_ankle),
'Spine1': (thorax),
'Neck': (head_center),
'Head': (head_back),
'Site': (neck_base),
'LShoulder': (l_shoulder),
'LElbow': (l_elbow),
'LWrist': (l_wrist),
'RShoulder': (r_shoulder),
'RElbow': (r_elbow),
'RWrist': (r_wrist)}
official_eval_indices = {k: joint_indices[v] for k, v in official_eval.items()}
def get_link_indices(links):
return [(joint_indices[x], joint_indices[y]) for x, y in links]
simple_link_indices = get_link_indices(links_simple)
simple2_link_indices = get_link_indices(links_simple2)
link_indices = get_link_indices(links)
def get_lr_correspondences():
paired = []
for limb in joint_indices.keys():
if limb[:2] == 'l_':
paired.append(limb[2:])
correspond = []
for limb in paired:
correspond.append((joint_indices['l_' + limb], joint_indices['r_' + limb]))
return correspond
|
# Time complexity is O(n)
def leaders_to_right(iterable):
"""
Leaders-to-right in the iterable is defined as if an element in the iterable
is greater than all other elements to it's right side
:param iterable: It should be of either list or tuple types containing numbers
:return: list of tuples containing leader element and its index
Eg: leaders_to_right({5, 6, 7, 3, 6]) gives [(7, 2) (6, 4)]
Here the elements at the indexes 2, 4 are greater than all the elements to its right
"""
# To check whether the given iterable is list or tuple
if type(iterable) == list or type(iterable) == tuple:
pass
else:
raise TypeError("Iterable should be of either list or tuple")
# To check whether all the given items in the iterable are numbers only
for item in iterable:
if not isinstance(item, int):
raise ValueError("Only numbers are accepted in the iterable")
# List to store the right leaders
leaders_list = []
if len(iterable) > 0:
leaders_list.append((iterable[-1], len(iterable) - 1))
for i in range(len(iterable) - 2, -1, -1):
if iterable[i] >= leaders_list[-1][0]:
leaders_list.append((iterable[i], i))
return list(reversed(leaders_list))
|
a=0
b=1
while a<10:
print(a)
a,b=b,a+b
|
class Distances(object):
"""description of class"""
def __init__(self, root):
self.root = root
self.cells = {}
"""Root cell is distance 0"""
self.cells[root] = 0
def GetCellDistance(self, cell):
"""Gets the cell distance from the root cell"""
dist = self.cells.get(cell)
return dist
def SetCellDistance(self, cell, distance):
"""Sets the cell distance from the root cell"""
self.cells[cell] = distance
def GetCellsPresent(self):
"""Returns a list of the cells present"""
return self.cells.keys
def PathTo(self, goal):
"""Figures out the path to goal from the original starting cell"""
current = goal
breadcrumbs = Distances(self.root)
breadcrumbs.cells[current] = self.cells[current]
while current != self.root:
for neighbour in current.links:
if self.cells[neighbour] < self.cells[current]:
breadcrumbs.cells[neighbour] = self.cells[neighbour]
current = neighbour
break
return breadcrumbs
def Max(self):
"""Get the furthest cell and its distance from the root cell"""
maxDistance = 0
maxCell = self.root
for cell, distance in self.cells.items():
if distance > maxDistance:
maxCell = cell
maxDistance = distance
return maxCell, maxDistance
|
#program to read the mass data and find the number of islands.
c=0
def f(x,y,z):
if 0<=y<10 and 0<=z<10 and x[z][y]=='1':
x[z][y]='0'
for dy,dz in [[-1,0],[1,0],[0,-1],[0,1]]:f(x,y+dy,z+dz)
print("Input 10 rows of 10 numbers representing green squares (island) as 1 and blue squares (sea) as zeros")
while 1:
try:
if c:input()
except:break
x = [list(input()) for _ in [0]*10]
c=1;b=0
for i in range(10):
for j in range(10):
if x[j][i]=='1':
b+=1;f(x,i,j)
print("Number of islands:")
print(b) |
def seq(a, d, n):
res = str(a)
for i in range(n-1):
a += d
a %= 10
res += str(a)
return res[::-1]
l = input()
r = input()
limits = set()
for a in range(10):
for d in range(10):
limits.add(int(seq(a, d, len(l))))
limits.add(int(seq(a, d, len(r))))
res = max(99*(len(r) - len(l) - 1), 0)
for limit in limits:
if int(l) <= limit and limit <= int(r):
res += 1
print(res)
|
"""This is my module.
Pretty pointless tbh, has only one function, welcome()
Don't judge me this was a primer on using help()"""
def welcome(person='Person'):
"""The welcome() function
Takes an argument person, or defaults person to "Person", prints __name__
and welcomes the person.
Now scram."""
print(f'In {__name__}')
print(f'Welcome, {person}')
if __name__ == '__main__':
welcome()
|
def part1():
data = []
with open("C:\\Dev\\projects\\advent-of-code\\python\\day10\\input.txt") as f:
data = [int(x) for x in f.readlines()]
data.append(0)
data.append(max(data) + 3)
data.sort()
diffs = [0] * 4
for i in range(1, len(data)):
d = data[i] - data[i-1]
diffs[d] += 1
ans = diffs[1] * diffs[3]
print(str(ans))
def part2():
with open("C:\\Dev\\projects\\advent-of-code\\python\\day10\\input.txt") as f:
data = [int(x) for x in f.readlines()]
data.append(0)
data.append(max(data) + 3)
data.sort()
paths = [0] * (max(data) + 1)
paths[0] = 1
for i in range(1, max(data) + 1):
for x in range(1, 4):
if (i - x) in data:
paths[i] += paths[i - x]
print(str(paths[-1]))
part1()
part2() |
class Facility:
id = 0
operator = None
name = None
bcghg_id = None
type = None
naics = None
description = None
swrs_facility_id = None
production_calculation_explanation = None
production_additional_info = None
production_public_info = None
ciip_db_id = None
def __init__(self, operator):
self.operator = operator
return
|
numero1=int(input("Digite Numero Uno: "))
numero2=int(input("Digite Numero Dos: "))
operador=input(" * / + - %: ")
if(operador=="+"):
funcion=lambda a,b: a+b
elif(operador=="-"):
funcion=lambda a,b: a-b
elif(operador=="/"):
funcion=lambda a,b:a/b
elif(operador=="*"):
funcion=lambda a,b:a*b
elif(operador=="/"):
funcion=lambda a,b:a/b
print("el resultado es: "+str(funcion(numero1,numero2))) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Date : 2019-10-13 14:21:43
# @Author : Racterub (Racter Liu) ([email protected])
# @Link : https://racterub.io
# @License : MIT
data = int(input("Input your dateNumber:")) #測資: 6
if data == 0:
print("Sunday")
elif data == 1:
print("Monday")
elif data == 2:
print("Tuesday")
elif data == 3:
print("Wednesday")
elif data == 4:
print("Thursday")
elif data == 5:
print("Friday")
elif data == 6:
print("Saturday")
else:
print("Wrong input!")
#輸出 Saturday |
Lado_do_Quadrado = int(input('Digite o valor correspondente ao lado de um quadrado: '))
Perimetro_do_Quadrado = Lado_do_Quadrado * 4
Area_do_Quadrado = Lado_do_Quadrado ** 2
print(f'perímetro: {Perimetro_do_Quadrado} - área: {Area_do_Quadrado}')
|
a = 1
b = a+10
print(b)
|
n=99999
t=n
while(t//10!=0):
x=t
sum=0
while(x!=0):
sum += x%10
x=x//10
t=sum
print(sum)
|
lista = list()
cinco = False
while True:
lista.append(int(input('Digite um valor: ')))
continuar = ' '
while continuar not in 'SN':
continuar = str(input('Quer continuar?[S/N]: ')).strip().upper()[0]
if continuar == 'N':
break
if 5 in lista:
cinco = True
lista.sort(reverse=True)
print(f'Foram digitados {len(lista)} números e são estes em ordem decrescente: {lista}')
if cinco is True:
print('O número 5 está nesta lista')
else:
print('O número 5 não faz parte dessa lista')
|
"""
persistor base class
"""
class PersistorBase():
def __init__(self):
pass
def write(self, feature, dumps, **kwargs):
raise NotImplementedError("Persistor write method implementation error!")
def read(self, uid, **kwargs):
raise NotImplementedError("Persistor read method implementation error!")
def delete(self, uid, **kwargs):
raise NotImplementedError("Persistor delete method implementation error!") |
def sum_odd_numbers(numbers):
total = 0
odd_numbers = [num for num in numbers if num % 2 == 0]
for num in odd_numbers:
total += num
return total
sum_odd_numbers([1, 2, 3, 4, 5, 6, 7, 8, 9])
|
# 247. Strobogrammatic Number II
# [email protected]
# A strobogrammatic number is a number that looks the same when rotated 180 degrees (looked at upside down).
# Find all strobogrammatic numbers that are of length = n.
# For example,
# Given n = 2, return ["11","69","88","96"].
class Solution(object):
# sol 1
# runtime: 345ms
def __init__(self):
self.maps = {'0':'0', '1':'1', '6':'9', '8':'8', '9':'6'}
def findStrobogrammatic(self, n):
"""
:type n: int
:rtype: List[str]
"""
res = []
c = ["#"]*n
self.dfs(c, 0, n-1, res)
return res
def dfs(self, c, left, right, res):
if left > right:
s = ''.join(c)
res.append(s)
return
for p in self.maps.iteritems():
if left == right and p[0] in ('6', '9'):
continue
if left != right and left == 0 and p[0] == '0':
continue
c[left], c[right] = p[0], p[1]
self.dfs(c, left + 1, right - 1, res)
# sol 2:
# runtime: 265ms
def findStrobogrammatic(self, n):
oddNum = ['0', '1', '8']
evenNum = ['11', '88', '69', '96', '00']
if n == 1:
return oddNum
if n == 2:
return evenNum[:-1]
if n % 2:
pre, mid = self.findStrobogrammatic(n-1), oddNum
else:
pre, mid = self.findStrobogrammatic(n-2), evenNum
premid = (n-1)/2
return [p[:premid] + c + p[premid:] for c in mid for p in pre]
|
# Copyright 2000-2002 by Andrew Dalke.
# Revisions copyright 2007-2010 by Peter Cock.
# All rights reserved.
#
# This file is part of the Biopython distribution and governed by your
# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
# Please see the LICENSE file that should have been included as part of this
# package.
"""Alphabets were previously used to declare sequence type and letters (OBSOLETE).
The design of Bio.Aphabet included a number of historic design choices
which, with the benefit of hindsight, were regretable. Bio.Alphabet was
therefore removed from Biopython in release 1.78. Instead, the molecule type is
included as an annotation on SeqRecords where appropriate.
Please see https://biopython.org/wiki/Alphabet for examples showing how to
transition from Bio.Alphabet to molecule type annotations.
"""
raise ImportError(
"Bio.Alphabet has been removed from Biopython. In many cases, the alphabet can simply be ignored and removed from scripts. In a few cases, you may need to specify the ``molecule_type`` as an annotation on a SeqRecord for your script to work correctly. Please see https://biopython.org/wiki/Alphabet for more information."
)
|
# Tot's reward lv 20
sm.completeQuest(5519)
# Lv. 20 Equipment box
sm.giveItem(2431876, 1)
sm.dispose()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.