hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dbfbafe90d8d62c542ce03ef8a862cdef8687b06 | 5,288 | py | Python | radssh/hostkey.py | Eli-Tarrago/radssh | ebf3c8f17c3768268dcd483e899a590698de4452 | [
"BSD-3-Clause"
] | 39 | 2015-05-11T15:06:58.000Z | 2021-12-29T07:24:23.000Z | radssh/hostkey.py | Eli-Tarrago/radssh | ebf3c8f17c3768268dcd483e899a590698de4452 | [
"BSD-3-Clause"
] | 45 | 2015-01-05T22:11:18.000Z | 2021-06-02T03:57:49.000Z | radssh/hostkey.py | eorochena/radssh | b1d1ee5822036445f26a34147452df5c3142caee | [
"BSD-3-Clause"
] | 13 | 2015-05-05T12:42:09.000Z | 2022-03-03T18:09:49.000Z | #
# Copyright (c) 2014, 2016, 2018, 2020 LexisNexis Risk Data Management Inc.
#
# This file is part of the RadSSH software package.
#
# RadSSH is free software, released under the Revised BSD License.
# You are permitted to use, modify, and redsitribute this software
# according to the Revised BSD License, a copy of which should be
# included with the distribution as file LICENSE.txt
#
'''HostKey Handling Module'''
import os
import threading
import warnings
import paramiko.hostkeys
# Deprecated as of 1.1 - Use known_hosts rewrite instead if using this API
warnings.warn(FutureWarning('RadSSH hostkey module is no longer supported, and will be removed in release 2.0. Port existing code to use radssh.known_hosts instead.'))
class CodeMap(object):
'''CodeMap class'''
def __init__(self, **kwargs):
self._fwd = kwargs
self._reverse = {}
for k, v in kwargs.items():
self.__setattr__(k, v)
self._reverse[v] = k
def code(self, name):
'''Given a name, return the code'''
return self._fwd[name]
def name(self, code):
'''Given a code value, return the corresponding code'''
return self._reverse[code]
verify_mode = CodeMap(
# Different options for handling host key verification
# Listed in decreasing order of security/paranoia
reject=0, # Missing keys are rejected
prompt=1, # Missing keys may be accepted, based on user prompt
accept_new=2, # Missing keys automatically accepted
# After this point, key conflicts no longer hinder connections
# Using these options, you become vulnerable to spoofing and
# intercepted traffic for SSH sessions, and you don't care.
ignore=100, # Turn host key verification OFF
overwrite_blindly=666 # Concentrated evil
)
def printable_fingerprint(k):
'''Convert key fingerprint into OpenSSH printable format'''
fingerprint = k.get_fingerprint()
# Handle Python3 bytes or Python2 8-bit string style...
if isinstance(fingerprint[0], int):
seq = [int(x) for x in fingerprint]
else:
seq = [ord(x) for x in fingerprint]
return ':'.join(['%02x' % x for x in seq])
class HostKeyVerifier(object):
'''Class to control how (if) host keys are verified'''
def __init__(self, mode='reject', known_hosts_file='~/.ssh/known_hosts'):
self.mode = verify_mode.code(mode)
self.hostkeys = paramiko.hostkeys.HostKeys()
self.lock = threading.Lock()
if mode == verify_mode.ignore:
return
self.known_hosts_file = os.path.expanduser(known_hosts_file)
if os.path.exists(self.known_hosts_file):
self.hostkeys.load(self.known_hosts_file)
elif not os.path.exists(os.path.dirname(self.known_hosts_file)):
os.makedirs(os.path.dirname(self.known_hosts_file))
def verify_host_key(self, hostname, key):
'''Verify a single hostkey against a hostname or IP'''
if self.mode == verify_mode.ignore:
return True
# Special formatting for non-standard ports...
if ':' not in hostname:
lookup_name = hostname
elif hostname.endswith(':22'):
lookup_name = hostname[:-3]
else:
host_base, port_base = hostname.rsplit(':', 1)
lookup_name = '[%s]:%s' % (host_base, port_base)
# Try remainder of host verification with locking
self.lock.acquire()
if self.hostkeys.check(lookup_name, key):
self.lock.release()
return True
host_entry = self.hostkeys.lookup(lookup_name)
actual = printable_fingerprint(key)
if host_entry and key.get_name() in host_entry:
# Entry mismatch
expected = printable_fingerprint(host_entry[key.get_name()])
print('Host key mismatch for (%s)' % lookup_name)
print('Expected:', expected)
print('Got :', actual)
if self.mode == verify_mode.overwrite_blindly:
print('Blindly accepting updated host key for %s' % lookup_name)
self.hostkeys.add(lookup_name, key.get_name(), key)
self.hostkeys.save(self.known_hosts_file)
self.lock.release()
return True
else:
# Missing key
if self.mode == verify_mode.reject:
self.lock.release()
return False
accept_and_add = False
if self.mode == verify_mode.prompt:
print('Unverified connection to "%s"' % lookup_name)
print('(Host Key Fingerprint [%s])' % actual)
answer = input('Do you want to accept this key? (y/N): ')
if answer[0].upper() == 'Y':
accept_and_add = True
if self.mode in (verify_mode.accept_new, verify_mode.overwrite_blindly):
accept_and_add = True
if accept_and_add:
print('Accepting new host key for %s' % lookup_name)
self.hostkeys.add(lookup_name, key.get_name(), key)
self.hostkeys.save(self.known_hosts_file)
self.lock.release()
return True
self.lock.release()
return False
| 39.462687 | 167 | 0.625946 | 3,558 | 0.672844 | 0 | 0 | 0 | 0 | 0 | 0 | 1,810 | 0.342284 |
dbfc3c9f59db54005f9a1ad67dd376c6806f7fa6 | 14,153 | py | Python | nuke/pymmh3.py | jfpanisset/Cryptomatte | d7c71cff17a4e8895eb17520115aa45ff66b8540 | [
"BSD-3-Clause"
] | 543 | 2016-07-07T15:31:01.000Z | 2022-03-31T10:58:32.000Z | nuke/pymmh3.py | jfpanisset/Cryptomatte | d7c71cff17a4e8895eb17520115aa45ff66b8540 | [
"BSD-3-Clause"
] | 143 | 2016-07-07T16:56:38.000Z | 2022-02-23T23:16:52.000Z | nuke/pymmh3.py | jfpanisset/Cryptomatte | d7c71cff17a4e8895eb17520115aa45ff66b8540 | [
"BSD-3-Clause"
] | 158 | 2016-07-07T16:41:49.000Z | 2022-03-21T17:57:28.000Z | '''
pymmh3 was written by Fredrik Kihlander and enhanced by Swapnil Gusani, and is placed in the public
domain. The authors hereby disclaim copyright to this source code.
pure python implementation of the murmur3 hash algorithm
https://code.google.com/p/smhasher/wiki/MurmurHash3
This was written for the times when you do not want to compile c-code and install modules,
and you only want a drop-in murmur3 implementation.
As this is purely python it is FAR from performant and if performance is anything that is needed
a proper c-module is suggested!
This module is written to have the same format as mmh3 python package found here for simple conversions:
https://pypi.python.org/pypi/mmh3/2.3.1
'''
import sys as _sys
if (_sys.version_info > (3, 0)):
def xrange( a, b, c ):
return list(range( a, b, c))
def xencode(x):
if isinstance(x, bytes) or isinstance(x, bytearray):
return x
else:
return x.encode()
else:
def xencode(x):
return x
del _sys
def hash( key, seed = 0x0 ):
''' Implements 32bit murmur3 hash. '''
key = bytearray( xencode(key) )
def fmix( h ):
h ^= h >> 16
h = ( h * 0x85ebca6b ) & 0xFFFFFFFF
h ^= h >> 13
h = ( h * 0xc2b2ae35 ) & 0xFFFFFFFF
h ^= h >> 16
return h
length = len( key )
nblocks = int( length / 4 )
h1 = seed
c1 = 0xcc9e2d51
c2 = 0x1b873593
# body
for block_start in range( 0, nblocks * 4, 4 ):
# ??? big endian?
k1 = key[ block_start + 3 ] << 24 | \
key[ block_start + 2 ] << 16 | \
key[ block_start + 1 ] << 8 | \
key[ block_start + 0 ]
k1 = ( c1 * k1 ) & 0xFFFFFFFF
k1 = ( k1 << 15 | k1 >> 17 ) & 0xFFFFFFFF # inlined ROTL32
k1 = ( c2 * k1 ) & 0xFFFFFFFF
h1 ^= k1
h1 = ( h1 << 13 | h1 >> 19 ) & 0xFFFFFFFF # inlined ROTL32
h1 = ( h1 * 5 + 0xe6546b64 ) & 0xFFFFFFFF
# tail
tail_index = nblocks * 4
k1 = 0
tail_size = length & 3
if tail_size >= 3:
k1 ^= key[ tail_index + 2 ] << 16
if tail_size >= 2:
k1 ^= key[ tail_index + 1 ] << 8
if tail_size >= 1:
k1 ^= key[ tail_index + 0 ]
if tail_size > 0:
k1 = ( k1 * c1 ) & 0xFFFFFFFF
k1 = ( k1 << 15 | k1 >> 17 ) & 0xFFFFFFFF # inlined ROTL32
k1 = ( k1 * c2 ) & 0xFFFFFFFF
h1 ^= k1
#finalization
unsigned_val = fmix( h1 ^ length )
if unsigned_val & 0x80000000 == 0:
return unsigned_val
else:
return -( (unsigned_val ^ 0xFFFFFFFF) + 1 )
def hash128( key, seed = 0x0, x64arch = True ):
''' Implements 128bit murmur3 hash. '''
def hash128_x64( key, seed ):
''' Implements 128bit murmur3 hash for x64. '''
def fmix( k ):
k ^= k >> 33
k = ( k * 0xff51afd7ed558ccd ) & 0xFFFFFFFFFFFFFFFF
k ^= k >> 33
k = ( k * 0xc4ceb9fe1a85ec53 ) & 0xFFFFFFFFFFFFFFFF
k ^= k >> 33
return k
length = len( key )
nblocks = int( length / 16 )
h1 = seed
h2 = seed
c1 = 0x87c37b91114253d5
c2 = 0x4cf5ad432745937f
#body
for block_start in range( 0, nblocks * 8, 8 ):
# ??? big endian?
k1 = key[ 2 * block_start + 7 ] << 56 | \
key[ 2 * block_start + 6 ] << 48 | \
key[ 2 * block_start + 5 ] << 40 | \
key[ 2 * block_start + 4 ] << 32 | \
key[ 2 * block_start + 3 ] << 24 | \
key[ 2 * block_start + 2 ] << 16 | \
key[ 2 * block_start + 1 ] << 8 | \
key[ 2 * block_start + 0 ]
k2 = key[ 2 * block_start + 15 ] << 56 | \
key[ 2 * block_start + 14 ] << 48 | \
key[ 2 * block_start + 13 ] << 40 | \
key[ 2 * block_start + 12 ] << 32 | \
key[ 2 * block_start + 11 ] << 24 | \
key[ 2 * block_start + 10 ] << 16 | \
key[ 2 * block_start + 9 ] << 8 | \
key[ 2 * block_start + 8 ]
k1 = ( c1 * k1 ) & 0xFFFFFFFFFFFFFFFF
k1 = ( k1 << 31 | k1 >> 33 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64
k1 = ( c2 * k1 ) & 0xFFFFFFFFFFFFFFFF
h1 ^= k1
h1 = ( h1 << 27 | h1 >> 37 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64
h1 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF
h1 = ( h1 * 5 + 0x52dce729 ) & 0xFFFFFFFFFFFFFFFF
k2 = ( c2 * k2 ) & 0xFFFFFFFFFFFFFFFF
k2 = ( k2 << 33 | k2 >> 31 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64
k2 = ( c1 * k2 ) & 0xFFFFFFFFFFFFFFFF
h2 ^= k2
h2 = ( h2 << 31 | h2 >> 33 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64
h2 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF
h2 = ( h2 * 5 + 0x38495ab5 ) & 0xFFFFFFFFFFFFFFFF
#tail
tail_index = nblocks * 16
k1 = 0
k2 = 0
tail_size = length & 15
if tail_size >= 15:
k2 ^= key[ tail_index + 14 ] << 48
if tail_size >= 14:
k2 ^= key[ tail_index + 13 ] << 40
if tail_size >= 13:
k2 ^= key[ tail_index + 12 ] << 32
if tail_size >= 12:
k2 ^= key[ tail_index + 11 ] << 24
if tail_size >= 11:
k2 ^= key[ tail_index + 10 ] << 16
if tail_size >= 10:
k2 ^= key[ tail_index + 9 ] << 8
if tail_size >= 9:
k2 ^= key[ tail_index + 8 ]
if tail_size > 8:
k2 = ( k2 * c2 ) & 0xFFFFFFFFFFFFFFFF
k2 = ( k2 << 33 | k2 >> 31 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64
k2 = ( k2 * c1 ) & 0xFFFFFFFFFFFFFFFF
h2 ^= k2
if tail_size >= 8:
k1 ^= key[ tail_index + 7 ] << 56
if tail_size >= 7:
k1 ^= key[ tail_index + 6 ] << 48
if tail_size >= 6:
k1 ^= key[ tail_index + 5 ] << 40
if tail_size >= 5:
k1 ^= key[ tail_index + 4 ] << 32
if tail_size >= 4:
k1 ^= key[ tail_index + 3 ] << 24
if tail_size >= 3:
k1 ^= key[ tail_index + 2 ] << 16
if tail_size >= 2:
k1 ^= key[ tail_index + 1 ] << 8
if tail_size >= 1:
k1 ^= key[ tail_index + 0 ]
if tail_size > 0:
k1 = ( k1 * c1 ) & 0xFFFFFFFFFFFFFFFF
k1 = ( k1 << 31 | k1 >> 33 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64
k1 = ( k1 * c2 ) & 0xFFFFFFFFFFFFFFFF
h1 ^= k1
#finalization
h1 ^= length
h2 ^= length
h1 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF
h2 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF
h1 = fmix( h1 )
h2 = fmix( h2 )
h1 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF
h2 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF
return ( h2 << 64 | h1 )
def hash128_x86( key, seed ):
''' Implements 128bit murmur3 hash for x86. '''
def fmix( h ):
h ^= h >> 16
h = ( h * 0x85ebca6b ) & 0xFFFFFFFF
h ^= h >> 13
h = ( h * 0xc2b2ae35 ) & 0xFFFFFFFF
h ^= h >> 16
return h
length = len( key )
nblocks = int( length / 16 )
h1 = seed
h2 = seed
h3 = seed
h4 = seed
c1 = 0x239b961b
c2 = 0xab0e9789
c3 = 0x38b34ae5
c4 = 0xa1e38b93
#body
for block_start in range( 0, nblocks * 16, 16 ):
k1 = key[ block_start + 3 ] << 24 | \
key[ block_start + 2 ] << 16 | \
key[ block_start + 1 ] << 8 | \
key[ block_start + 0 ]
k2 = key[ block_start + 7 ] << 24 | \
key[ block_start + 6 ] << 16 | \
key[ block_start + 5 ] << 8 | \
key[ block_start + 4 ]
k3 = key[ block_start + 11 ] << 24 | \
key[ block_start + 10 ] << 16 | \
key[ block_start + 9 ] << 8 | \
key[ block_start + 8 ]
k4 = key[ block_start + 15 ] << 24 | \
key[ block_start + 14 ] << 16 | \
key[ block_start + 13 ] << 8 | \
key[ block_start + 12 ]
k1 = ( c1 * k1 ) & 0xFFFFFFFF
k1 = ( k1 << 15 | k1 >> 17 ) & 0xFFFFFFFF # inlined ROTL32
k1 = ( c2 * k1 ) & 0xFFFFFFFF
h1 ^= k1
h1 = ( h1 << 19 | h1 >> 13 ) & 0xFFFFFFFF # inlined ROTL32
h1 = ( h1 + h2 ) & 0xFFFFFFFF
h1 = ( h1 * 5 + 0x561ccd1b ) & 0xFFFFFFFF
k2 = ( c2 * k2 ) & 0xFFFFFFFF
k2 = ( k2 << 16 | k2 >> 16 ) & 0xFFFFFFFF # inlined ROTL32
k2 = ( c3 * k2 ) & 0xFFFFFFFF
h2 ^= k2
h2 = ( h2 << 17 | h2 >> 15 ) & 0xFFFFFFFF # inlined ROTL32
h2 = ( h2 + h3 ) & 0xFFFFFFFF
h2 = ( h2 * 5 + 0x0bcaa747 ) & 0xFFFFFFFF
k3 = ( c3 * k3 ) & 0xFFFFFFFF
k3 = ( k3 << 17 | k3 >> 15 ) & 0xFFFFFFFF # inlined ROTL32
k3 = ( c4 * k3 ) & 0xFFFFFFFF
h3 ^= k3
h3 = ( h3 << 15 | h3 >> 17 ) & 0xFFFFFFFF # inlined ROTL32
h3 = ( h3 + h4 ) & 0xFFFFFFFF
h3 = ( h3 * 5 + 0x96cd1c35 ) & 0xFFFFFFFF
k4 = ( c4 * k4 ) & 0xFFFFFFFF
k4 = ( k4 << 18 | k4 >> 14 ) & 0xFFFFFFFF # inlined ROTL32
k4 = ( c1 * k4 ) & 0xFFFFFFFF
h4 ^= k4
h4 = ( h4 << 13 | h4 >> 19 ) & 0xFFFFFFFF # inlined ROTL32
h4 = ( h1 + h4 ) & 0xFFFFFFFF
h4 = ( h4 * 5 + 0x32ac3b17 ) & 0xFFFFFFFF
#tail
tail_index = nblocks * 16
k1 = 0
k2 = 0
k3 = 0
k4 = 0
tail_size = length & 15
if tail_size >= 15:
k4 ^= key[ tail_index + 14 ] << 16
if tail_size >= 14:
k4 ^= key[ tail_index + 13 ] << 8
if tail_size >= 13:
k4 ^= key[ tail_index + 12 ]
if tail_size > 12:
k4 = ( k4 * c4 ) & 0xFFFFFFFF
k4 = ( k4 << 18 | k4 >> 14 ) & 0xFFFFFFFF # inlined ROTL32
k4 = ( k4 * c1 ) & 0xFFFFFFFF
h4 ^= k4
if tail_size >= 12:
k3 ^= key[ tail_index + 11 ] << 24
if tail_size >= 11:
k3 ^= key[ tail_index + 10 ] << 16
if tail_size >= 10:
k3 ^= key[ tail_index + 9 ] << 8
if tail_size >= 9:
k3 ^= key[ tail_index + 8 ]
if tail_size > 8:
k3 = ( k3 * c3 ) & 0xFFFFFFFF
k3 = ( k3 << 17 | k3 >> 15 ) & 0xFFFFFFFF # inlined ROTL32
k3 = ( k3 * c4 ) & 0xFFFFFFFF
h3 ^= k3
if tail_size >= 8:
k2 ^= key[ tail_index + 7 ] << 24
if tail_size >= 7:
k2 ^= key[ tail_index + 6 ] << 16
if tail_size >= 6:
k2 ^= key[ tail_index + 5 ] << 8
if tail_size >= 5:
k2 ^= key[ tail_index + 4 ]
if tail_size > 4:
k2 = ( k2 * c2 ) & 0xFFFFFFFF
k2 = ( k2 << 16 | k2 >> 16 ) & 0xFFFFFFFF # inlined ROTL32
k2 = ( k2 * c3 ) & 0xFFFFFFFF
h2 ^= k2
if tail_size >= 4:
k1 ^= key[ tail_index + 3 ] << 24
if tail_size >= 3:
k1 ^= key[ tail_index + 2 ] << 16
if tail_size >= 2:
k1 ^= key[ tail_index + 1 ] << 8
if tail_size >= 1:
k1 ^= key[ tail_index + 0 ]
if tail_size > 0:
k1 = ( k1 * c1 ) & 0xFFFFFFFF
k1 = ( k1 << 15 | k1 >> 17 ) & 0xFFFFFFFF # inlined ROTL32
k1 = ( k1 * c2 ) & 0xFFFFFFFF
h1 ^= k1
#finalization
h1 ^= length
h2 ^= length
h3 ^= length
h4 ^= length
h1 = ( h1 + h2 ) & 0xFFFFFFFF
h1 = ( h1 + h3 ) & 0xFFFFFFFF
h1 = ( h1 + h4 ) & 0xFFFFFFFF
h2 = ( h1 + h2 ) & 0xFFFFFFFF
h3 = ( h1 + h3 ) & 0xFFFFFFFF
h4 = ( h1 + h4 ) & 0xFFFFFFFF
h1 = fmix( h1 )
h2 = fmix( h2 )
h3 = fmix( h3 )
h4 = fmix( h4 )
h1 = ( h1 + h2 ) & 0xFFFFFFFF
h1 = ( h1 + h3 ) & 0xFFFFFFFF
h1 = ( h1 + h4 ) & 0xFFFFFFFF
h2 = ( h1 + h2 ) & 0xFFFFFFFF
h3 = ( h1 + h3 ) & 0xFFFFFFFF
h4 = ( h1 + h4 ) & 0xFFFFFFFF
return ( h4 << 96 | h3 << 64 | h2 << 32 | h1 )
key = bytearray( xencode(key) )
if x64arch:
return hash128_x64( key, seed )
else:
return hash128_x86( key, seed )
def hash64( key, seed = 0x0, x64arch = True ):
''' Implements 64bit murmur3 hash. Returns a tuple. '''
hash_128 = hash128( key, seed, x64arch )
unsigned_val1 = hash_128 & 0xFFFFFFFFFFFFFFFF
if unsigned_val1 & 0x8000000000000000 == 0:
signed_val1 = unsigned_val1
else:
signed_val1 = -( (unsigned_val1 ^ 0xFFFFFFFFFFFFFFFF) + 1 )
unsigned_val2 = ( hash_128 >> 64 ) & 0xFFFFFFFFFFFFFFFF
if unsigned_val2 & 0x8000000000000000 == 0:
signed_val2 = unsigned_val2
else:
signed_val2 = -( (unsigned_val2 ^ 0xFFFFFFFFFFFFFFFF) + 1 )
return ( int( signed_val1 ), int( signed_val2 ) )
def hash_bytes( key, seed = 0x0, x64arch = True ):
''' Implements 128bit murmur3 hash. Returns a byte string. '''
hash_128 = hash128( key, seed, x64arch )
bytestring = ''
for i in range(0, 16, 1):
lsbyte = hash_128 & 0xFF
bytestring = bytestring + str( chr( lsbyte ) )
hash_128 = hash_128 >> 8
return bytestring
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser( 'pymurmur3', 'pymurmur [options] "string to hash"' )
parser.add_argument( '--seed', type = int, default = 0 )
parser.add_argument( 'strings', default = [], nargs='+')
opts = parser.parse_args()
for str_to_hash in opts.strings:
sys.stdout.write( '"%s" = 0x%08X\n' % ( str_to_hash, hash( str_to_hash ) ) )
| 31.311947 | 104 | 0.464283 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,532 | 0.108246 |
dbfc56fb832ee5fc9af604dacd2a35c059519b31 | 950 | py | Python | bindings/python/tests/test_factory.py | pscff/dlite | 4365d828dcaa1736cc78ff6ed9a65592f198ba25 | [
"MIT"
] | 10 | 2020-04-08T06:25:27.000Z | 2022-03-15T06:54:53.000Z | bindings/python/tests/test_factory.py | pscff/dlite | 4365d828dcaa1736cc78ff6ed9a65592f198ba25 | [
"MIT"
] | 117 | 2019-12-16T14:43:41.000Z | 2022-03-21T19:46:58.000Z | bindings/python/tests/test_factory.py | pscff/dlite | 4365d828dcaa1736cc78ff6ed9a65592f198ba25 | [
"MIT"
] | 5 | 2020-04-15T16:23:29.000Z | 2021-12-07T08:40:54.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import dlite
thisdir = os.path.abspath(os.path.dirname(__file__))
class Person:
def __init__(self, name, age, skills):
self.name = name
self.age = age
self.skills = skills
def __repr__(self):
return 'Person(%r, %r, %r)' % (self.name, self.age, list(self.skills))
url = 'json://' + thisdir + '/Person.json'
print('-- create: ExPerson')
ExPerson = dlite.classfactory(Person, url=url)
print('-- create: person1')
person1 = Person('Jack Daniel', 42, ['distilling', 'tasting'])
print('-- create: person2')
person2 = ExPerson('Jack Daniel', 42, ['distilling', 'tasting'])
person2.dlite_inst.save('json', 'persons.json', 'mode=w')
# Print json-representation of person2 using dlite
print(person2.dlite_inst.asjson(indent=2))
person3 = dlite.loadfactory(Person, 'json://persons.json')
person4 = dlite.objectfactory(person1, meta=person2.dlite_meta)
| 25 | 78 | 0.671579 | 237 | 0.249474 | 0 | 0 | 0 | 0 | 0 | 0 | 315 | 0.331579 |
dbfcfb1df1954ace1963bc30983b96adb222d711 | 807 | py | Python | week_11_DS_N_Algorithm/03_Thr_Lecture/실습6_연속 부분 최대합.py | bky373/elice-racer-1st | ddea8079a1083796ed4f59c38650ff8f4333e6ef | [
"FSFAP"
] | 1 | 2021-11-03T18:27:37.000Z | 2021-11-03T18:27:37.000Z | week_11_DS_N_Algorithm/03_Thr_Lecture/실습6_연속 부분 최대합.py | bky373/elice-racer-1st | ddea8079a1083796ed4f59c38650ff8f4333e6ef | [
"FSFAP"
] | null | null | null | week_11_DS_N_Algorithm/03_Thr_Lecture/실습6_연속 부분 최대합.py | bky373/elice-racer-1st | ddea8079a1083796ed4f59c38650ff8f4333e6ef | [
"FSFAP"
] | 1 | 2021-02-10T15:21:53.000Z | 2021-02-10T15:21:53.000Z | '''
연속 부분 최대합
nn개의 숫자가 주어질 때, 연속 부분을 선택하여 그 합을 최대화 하는 프로그램을 작성하시오.
예를 들어, 다음과 같이 8개의 숫자가 있다고 하자.
1 2 -4 5 3 -2 9 -10
이 때, 연속 부분이란 연속하여 숫자를 선택하는 것을 말한다.
가능한 연속 부분으로써 [1, 2, -4], [5, 3, -2, 9], [9, -10] 등이 있을 수 있다.
이 연속 부분들 중에서 가장 합이 큰 연속 부분은 [5, 3, -2, 9] 이며,
이보다 더 합을 크게 할 수는 없다.
따라서 연속 부분 최대합은 5+3+(-2)+9 = 15 이다.
입력 예시
1 2 -4 5 3 -2 9 -10
출력 예시
15
문제 조건
입력되는 수의 개수는 최대 100개입니다.
'''
import sys
def getSubsum(data) :
'''
n개의 숫자가 list로 주어질 때, 그 연속 부분 최대합을 반환하는 함수를 작성하세요.
'''
dp = [0] * len(data)
dp[0] = data[0]
for i in range(1, len(data)):
dp[i] = max(dp[i-1] + data[i], data[i])
return max(dp)
def main():
'''
이 부분은 수정하지 마세요.
'''
data = [int(x) for x in input().split()]
print(getSubsum(data))
if __name__ == "__main__":
main()
| 17.170213 | 61 | 0.537794 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 935 | 0.751004 |
dbfd1a602dd992f412e1700c617d5bbf9b239826 | 505 | py | Python | tests/test_dns.py | jensstein/mockdock | 4eec294f33d929d361973c1708d2aa856a9900a0 | [
"MIT"
] | null | null | null | tests/test_dns.py | jensstein/mockdock | 4eec294f33d929d361973c1708d2aa856a9900a0 | [
"MIT"
] | 6 | 2020-03-24T16:45:10.000Z | 2021-02-13T10:03:53.000Z | tests/test_dns.py | jensstein/mockdock | 4eec294f33d929d361973c1708d2aa856a9900a0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import unittest
from mockdock import dns
class DNSTest(unittest.TestCase):
def test_build_packet(self):
data = b"^4\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x06google\x03com\x00\x00\x01\x00\x01"
packet = dns.build_packet(data, "192.168.0.1")
expeced_result = b"^4\x81\x80\x00\x01\x00\x01\x00\x00\x00\x00\x06google\x03com\x00\x00\x01\x00\x01\xc0\x0c\x00\x01\x00\x01\x00\x00\x00<\x00\x04\xc0\xa8\x00\x01"
self.assertEqual(packet, expeced_result)
| 38.846154 | 168 | 0.708911 | 437 | 0.865347 | 0 | 0 | 0 | 0 | 0 | 0 | 260 | 0.514851 |
dbfd45a1262d4d81ad4ca682e226d591f37c7fd4 | 1,490 | py | Python | tests/conftest.py | zhongnansu/es-cli | e0656c21392e52a8b9cfafa69acfa0c13b743a9c | [
"Apache-2.0"
] | 6 | 2019-08-23T18:06:41.000Z | 2020-05-06T18:26:53.000Z | tests/conftest.py | zhongnansu/es-cli | e0656c21392e52a8b9cfafa69acfa0c13b743a9c | [
"Apache-2.0"
] | null | null | null | tests/conftest.py | zhongnansu/es-cli | e0656c21392e52a8b9cfafa69acfa0c13b743a9c | [
"Apache-2.0"
] | null | null | null | """
Copyright 2019, Amazon Web Services Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
We can define the fixture functions in this file to make them
accessible across multiple test modules.
"""
import os
import pytest
from utils import create_index, delete_index, get_connection
@pytest.fixture(scope="function")
def connection():
test_connection = get_connection()
create_index(test_connection)
yield test_connection
delete_index(test_connection)
@pytest.fixture(scope="function")
def default_config_location():
from escli.conf import __file__ as package_root
package_root = os.path.dirname(package_root)
default_config = os.path.join(package_root, "esclirc")
yield default_config
@pytest.fixture(scope="session", autouse=True)
def temp_config(tmpdir_factory):
# this function runs on start of test session.
# use temporary directory for conf home so user conf will not be used
os.environ["XDG_CONFIG_HOME"] = str(tmpdir_factory.mktemp("data"))
| 29.8 | 73 | 0.769799 | 0 | 0 | 368 | 0.24698 | 711 | 0.477181 | 0 | 0 | 857 | 0.575168 |
dbfd8140aa71c6ce6288cd86d96c8cf8754cf91f | 26,845 | py | Python | Cogs/ServerStats.py | Damiian1/techwizardshardware | 97ceafc15036be4136e860076d73d74f1887f041 | [
"MIT"
] | null | null | null | Cogs/ServerStats.py | Damiian1/techwizardshardware | 97ceafc15036be4136e860076d73d74f1887f041 | [
"MIT"
] | null | null | null | Cogs/ServerStats.py | Damiian1/techwizardshardware | 97ceafc15036be4136e860076d73d74f1887f041 | [
"MIT"
] | null | null | null | import asyncio
import discord
from datetime import datetime
from operator import itemgetter
from discord.ext import commands
from Cogs import Nullify
from Cogs import DisplayName
from Cogs import UserTime
from Cogs import Message
def setup(bot):
# Add the bot and deps
settings = bot.get_cog("Settings")
bot.add_cog(ServerStats(bot, settings))
class ServerStats:
def __init__(self, bot, settings):
self.bot = bot
self.settings = settings
async def message(self, message):
# Check the message and see if we should allow it - always yes.
# This module doesn't need to cancel messages.
# Don't count your own, Pooter
if not message.author.id == self.bot.user.id:
server = message.guild
messages = int(self.settings.getServerStat(server, "TotalMessages"))
if messages == None:
messages = 0
messages += 1
self.settings.setServerStat(server, "TotalMessages", messages)
return { 'Ignore' : False, 'Delete' : False}
@commands.command(pass_context=True)
async def serverinfo(self, ctx, *, guild_name = None):
"""Lists some info about the current or passed server."""
# Check if we passed another guild
guild = None
if guild_name == None:
guild = ctx.guild
else:
for g in self.bot.guilds:
if g.name.lower() == guild_name.lower():
guild = g
break
if str(g.id) == str(guild_name):
guild = g
break
if guild == None:
# We didn't find it
await ctx.send("I couldn't find that guild...")
return
server_embed = discord.Embed(color=ctx.author.color)
server_embed.title = guild.name
# Get localized user time
local_time = UserTime.getUserTime(ctx.author, self.settings, guild.created_at)
time_str = "{} {}".format(local_time['time'], local_time['zone'])
server_embed.description = "Created at {}".format(time_str)
online_members = 0
bot_member = 0
bot_online = 0
for member in guild.members:
if member.bot:
bot_member += 1
if not member.status == discord.Status.offline:
bot_online += 1
continue
if not member.status == discord.Status.offline:
online_members += 1
# bot_percent = "{:,g}%".format((bot_member/len(guild.members))*100)
user_string = "{:,}/{:,} online ({:,g}%)".format(
online_members,
len(guild.members) - bot_member,
round((online_members/(len(guild.members) - bot_member) * 100), 2)
)
b_string = "bot" if bot_member == 1 else "bots"
user_string += "\n{:,}/{:,} {} online ({:,g}%)".format(
bot_online,
bot_member,
b_string,
round((bot_online/bot_member)*100, 2)
)
#server_embed.add_field(name="Members", value="{:,}/{:,} online ({:.2f}%)\n{:,} {} ({}%)".format(online_members, len(guild.members), bot_percent), inline=True)
server_embed.add_field(name="Members ({:,} total)".format(len(guild.members)), value=user_string, inline=True)
server_embed.add_field(name="Roles", value=str(len(guild.roles)), inline=True)
chandesc = "{:,} text, {:,} voice".format(len(guild.text_channels), len(guild.voice_channels))
server_embed.add_field(name="Channels", value=chandesc, inline=True)
server_embed.add_field(name="Default Role", value=guild.default_role, inline=True)
server_embed.add_field(name="Owner", value=guild.owner.name + "#" + guild.owner.discriminator, inline=True)
server_embed.add_field(name="AFK Channel", value=guild.afk_channel, inline=True)
server_embed.add_field(name="Verification", value=guild.verification_level, inline=True)
server_embed.add_field(name="Voice Region", value=guild.region, inline=True)
server_embed.add_field(name="Considered Large", value=guild.large, inline=True)
# Find out where in our join position this server is
joinedList = []
popList = []
for g in self.bot.guilds:
joinedList.append({ 'ID' : g.id, 'Joined' : g.me.joined_at })
popList.append({ 'ID' : g.id, 'Population' : len(g.members) })
# sort the guilds by join date
joinedList = sorted(joinedList, key=lambda x:x['Joined'])
popList = sorted(popList, key=lambda x:x['Population'], reverse=True)
check_item = { "ID" : guild.id, "Joined" : guild.me.joined_at }
total = len(joinedList)
position = joinedList.index(check_item) + 1
server_embed.add_field(name="Join Position", value="{:,} of {:,}".format(position, total), inline=True)
# Get our population position
check_item = { "ID" : guild.id, "Population" : len(guild.members) }
total = len(popList)
position = popList.index(check_item) + 1
server_embed.add_field(name="Population Rank", value="{:,} of {:,}".format(position, total), inline=True)
emojitext = ""
emojicount = 0
for emoji in guild.emojis:
if emoji.animated:
emojiMention = "<a:"+emoji.name+":"+str(emoji.id)+">"
else:
emojiMention = "<:"+emoji.name+":"+str(emoji.id)+">"
test = emojitext + emojiMention
if len(test) > 1024:
# TOOO BIIIIIIIIG
emojicount += 1
if emojicount == 1:
ename = "Emojis ({:,} total)".format(len(guild.emojis))
else:
ename = "Emojis (Continued)"
server_embed.add_field(name=ename, value=emojitext, inline=True)
emojitext=emojiMention
else:
emojitext = emojitext + emojiMention
if len(emojitext):
if emojicount == 0:
emojiname = "Emojis ({} total)".format(len(guild.emojis))
else:
emojiname = "Emojis (Continued)"
server_embed.add_field(name=emojiname, value=emojitext, inline=True)
if len(guild.icon_url):
server_embed.set_thumbnail(url=guild.icon_url)
else:
# No Icon
server_embed.set_thumbnail(url=ctx.author.default_avatar_url)
server_embed.set_footer(text="Server ID: {}".format(guild.id))
await ctx.channel.send(embed=server_embed)
@commands.command(pass_context=True)
async def sharedservers(self, ctx, *, member = None):
"""Lists how many servers you share with the bot."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions"):
suppress = True
else:
suppress = False
if member == None:
member = ctx.author
if type(member) is str:
member_check = DisplayName.memberForName(member, ctx.guild)
if not member_check:
msg = "I couldn't find *{}* on this server...".format(member)
if suppress:
msg = Nullify.clean(msg)
await ctx.send(msg)
return
member = member_check
if member.id == self.bot.user.id:
count = len(self.bot.guilds)
if count == 1:
await ctx.send("I'm on *1* server. :blush:")
else:
await ctx.send("I'm on *{}* servers. :blush:".format(count))
return
count = 0
for guild in self.bot.guilds:
for mem in guild.members:
if mem.id == member.id:
count += 1
if ctx.author.id == member.id:
targ = "You share"
else:
targ = "*{}* shares".format(DisplayName.name(member))
if count == 1:
await ctx.send("{} *1* server with me. :blush:".format(targ))
else:
await ctx.send("{} *{}* servers with me. :blush:".format(targ, count))
@commands.command(pass_context=True)
async def listservers(self, ctx, number : int = 10):
"""Lists the servers I'm connected to - default is 10, max is 50."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions"):
suppress = True
else:
suppress = False
if number > 50:
number = 50
if number < 1:
await ctx.channel.send('Oookay - look! No servers! Just like you wanted!')
return
i = 1
msg = '__**Servers I\'m On:**__\n\n'
for server in self.bot.guilds:
if i > number:
break
msg += '{}. *{}*\n'.format(i, server.name)
i += 1
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def topservers(self, ctx, number : int = 10):
"""Lists the top servers I'm connected to ordered by population - default is 10, max is 50."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions"):
suppress = True
else:
suppress = False
if number > 50:
number = 50
if number < 1:
await ctx.channel.send('Oookay - look! No servers! Just like you wanted!')
return
serverList = []
for server in self.bot.guilds:
memberCount = 0
for member in server.members:
memberCount += 1
serverList.append({ 'Name' : server.name, 'Users' : memberCount })
# sort the servers by population
serverList = sorted(serverList, key=lambda x:int(x['Users']), reverse=True)
if number > len(serverList):
number = len(serverList)
i = 1
msg = ''
for server in serverList:
if i > number:
break
msg += '{}. *{}* - *{:,}* members\n'.format(i, server['Name'], server['Users'])
i += 1
if number < len(serverList):
msg = '__**Top {} of {} Servers:**__\n\n'.format(number, len(serverList))+msg
else:
msg = '__**Top {} Servers:**__\n\n'.format(len(serverList))+msg
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def bottomservers(self, ctx, number : int = 10):
"""Lists the bottom servers I'm connected to ordered by population - default is 10, max is 50."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions"):
suppress = True
else:
suppress = False
if number > 50:
number = 50
if number < 1:
await ctx.channel.send('Oookay - look! No servers! Just like you wanted!')
return
serverList = []
for server in self.bot.guilds:
serverList.append({ 'Name' : server.name, 'Users' : len(server.members) })
# sort the servers by population
serverList = sorted(serverList, key=lambda x:int(x['Users']))
if number > len(serverList):
number = len(serverList)
i = 1
msg = ''
for server in serverList:
if i > number:
break
msg += '{}. *{}* - *{:,}* members\n'.format(i, server['Name'], server['Users'])
i += 1
if number < len(serverList):
msg = '__**Bottom {} of {} Servers:**__\n\n'.format(number, len(serverList))+msg
else:
msg = '__**Bottom {} Servers:**__\n\n'.format(len(serverList))+msg
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def users(self, ctx):
"""Lists the total number of users on all servers I'm connected to."""
message = await Message.EmbedText(title="Counting users...", color=ctx.message.author).send(ctx)
servers = members = membersOnline = bots = botsOnline = 0
counted_users = []
counted_bots = []
for server in self.bot.guilds:
servers += 1
for member in server.members:
if member.bot:
bots += 1
if not member.id in counted_bots:
counted_bots.append(member.id)
if not member.status == discord.Status.offline:
botsOnline += 1
else:
members += 1
if not member.id in counted_users:
counted_users.append(member.id)
if not member.status == discord.Status.offline:
membersOnline += 1
await Message.Embed(
title="Member Stats",
description="Current User Information".format(server.name),
fields=[
{ "name" : "Servers", "value" : "└─ {:,}".format(servers), "inline" : False },
{ "name" : "Users", "value" : "└─ {:,}/{:,} online ({:,g}%) - {:,} unique ({:,g}%)".format(membersOnline, members, round((membersOnline/members)*100, 2), len(counted_users), round((len(counted_users)/members)*100, 2)), "inline" : False},
{ "name" : "Bots", "value" : "└─ {:,}/{:,} online ({:,g}%) - {:,} unique ({:,g}%)".format(botsOnline, bots, round((botsOnline/bots)*100, 2), len(counted_bots), round(len(counted_bots)/bots*100, 2)), "inline" : False},
{ "name" : "Total", "value" : "└─ {:,}/{:,} online ({:,g}%)".format(membersOnline + botsOnline, members+bots, round(((membersOnline + botsOnline)/(members+bots))*100, 2)), "inline" : False}
],
color=ctx.message.author).edit(ctx, message)
'''userCount = 0
serverCount = 0
counted_users = []
message = await ctx.send("Counting users...")
for server in self.bot.guilds:
serverCount += 1
userCount += len(server.members)
for member in server.members:
if not member.id in counted_users:
counted_users.append(member.id)
await message.edit(content='There are *{:,} users* (*{:,}* unique) on the *{:,} servers* I am currently a part of!'.format(userCount, len(counted_users), serverCount))'''
@commands.command(pass_context=True)
async def joinpos(self, ctx, *, member = None):
"""Tells when a user joined compared to other users."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions"):
suppress = True
else:
suppress = False
if member == None:
member = ctx.author
if type(member) is str:
member_check = DisplayName.memberForName(member, ctx.guild)
if not member_check:
msg = "I couldn't find *{}* on this server...".format(member)
if suppress:
msg = Nullify.clean(msg)
await ctx.send(msg)
return
member = member_check
joinedList = []
for mem in ctx.message.guild.members:
joinedList.append({ 'ID' : mem.id, 'Joined' : mem.joined_at })
# sort the users by join date
joinedList = sorted(joinedList, key=lambda x:x['Joined'])
check_item = { "ID" : member.id, "Joined" : member.joined_at }
total = len(joinedList)
position = joinedList.index(check_item) + 1
before = ""
after = ""
msg = "*{}'s* join position is **{:,}**.".format(DisplayName.name(member), position, total)
if position-1 == 1:
# We have previous members
before = "**1** user"
elif position-1 > 1:
before = "**{:,}** users".format(position-1)
if total-position == 1:
# There were users after as well
after = "**1** user"
elif total-position > 1:
after = "**{:,}** users".format(total-position)
# Build the string!
if len(before) and len(after):
# Got both
msg += "\n\n{} joined before, and {} after.".format(before, after)
elif len(before):
# Just got before
msg += "\n\n{} joined before.".format(before)
elif len(after):
# Just after
msg += "\n\n{} joined after.".format(after)
await ctx.send(msg)
@commands.command(pass_context=True)
async def firstjoins(self, ctx, number : int = 10):
"""Lists the first users to join - default is 10, max is 25."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions"):
suppress = True
else:
suppress = False
if number > 25:
number = 25
if number < 1:
await ctx.channel.send('Oookay - look! No users! Just like you wanted!')
return
joinedList = []
for member in ctx.message.guild.members:
joinedList.append({ 'ID' : member.id, 'Joined' : member.joined_at })
# sort the users by join date
joinedList = sorted(joinedList, key=lambda x:x['Joined'])
i = 1
msg = ''
for member in joinedList:
if i > number:
break
# Get localized user time
local_time = UserTime.getUserTime(ctx.author, self.settings, member['Joined'])
time_str = "{} {}".format(local_time['time'], local_time['zone'])
msg += '{}. *{}* - *{}*\n'.format(i, DisplayName.name(DisplayName.memberForID(member['ID'], ctx.message.guild)), time_str)
i += 1
if number < len(joinedList):
msg = '__**First {} of {} Members to Join:**__\n\n'.format(number, len(joinedList))+msg
else:
msg = '__**First {} Members to Join:**__\n\n'.format(len(joinedList))+msg
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def recentjoins(self, ctx, number : int = 10):
"""Lists the most recent users to join - default is 10, max is 25."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions"):
suppress = True
else:
suppress = False
if number > 25:
number = 25
if number < 1:
await ctx.channel.send('Oookay - look! No users! Just like you wanted!')
return
joinedList = []
for member in ctx.message.guild.members:
joinedList.append({ 'ID' : member.id, 'Joined' : member.joined_at })
# sort the users by join date
joinedList = sorted(joinedList, key=lambda x:x['Joined'], reverse=True)
i = 1
msg = ''
for member in joinedList:
if i > number:
break
# Get localized user time
local_time = UserTime.getUserTime(ctx.author, self.settings, member['Joined'])
time_str = "{} {}".format(local_time['time'], local_time['zone'])
msg += '{}. *{}* - *{}*\n'.format(i, DisplayName.name(DisplayName.memberForID(member['ID'], ctx.message.guild)), time_str)
i += 1
if number < len(joinedList):
msg = '__**Last {} of {} Members to Join:**__\n\n'.format(number, len(joinedList))+msg
else:
msg = '__**Last {} Members to Join:**__\n\n'.format(len(joinedList))+msg
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def firstservers(self, ctx, number : int = 10):
"""Lists the first servers I've joined - default is 10, max is 25."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions"):
suppress = True
else:
suppress = False
if number > 25:
number = 25
if number < 1:
await ctx.channel.send('Oookay - look! No servers! Just like you wanted!')
return
joinedList = []
for guild in self.bot.guilds:
botmember = DisplayName.memberForID(self.bot.user.id, guild)
joinedList.append({ 'Name' : guild.name, 'Joined' : botmember.joined_at, 'Members': len(guild.members) })
# sort the servers by join date
joinedList = sorted(joinedList, key=lambda x:x['Joined'])
i = 1
msg = ''
for member in joinedList:
if i > number:
break
# Get localized user time
local_time = UserTime.getUserTime(ctx.author, self.settings, member['Joined'])
time_str = "{} {}".format(local_time['time'], local_time['zone'])
if member['Members'] == 1:
msg += '{}. *{}* - *{}* - *(1 member)*\n'.format(i, member['Name'], time_str)
else:
msg += '{}. *{}* - *{}* - *({} members)*\n'.format(i, member['Name'], time_str, member['Members'])
i += 1
if number < len(joinedList):
msg = '__**First {} of {} Servers I Joined:**__\n\n'.format(number, len(joinedList))+msg
else:
msg = '__**First {} Servers I Joined:**__\n\n'.format(len(joinedList))+msg
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def recentservers(self, ctx, number : int = 10):
"""Lists the most recent users to join - default is 10, max is 25."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions"):
suppress = True
else:
suppress = False
if number > 25:
number = 25
if number < 1:
await ctx.channel.send('Oookay - look! No servers! Just like you wanted!')
return
joinedList = []
for guild in self.bot.guilds:
botmember = DisplayName.memberForID(self.bot.user.id, guild)
joinedList.append({ 'Name' : guild.name, 'Joined' : botmember.joined_at, 'Members': len(guild.members) })
# sort the servers by join date
joinedList = sorted(joinedList, key=lambda x:x['Joined'], reverse=True)
i = 1
msg = ''
for member in joinedList:
if i > number:
break
# Get localized user time
local_time = UserTime.getUserTime(ctx.author, self.settings, member['Joined'])
time_str = "{} {}".format(local_time['time'], local_time['zone'])
if member['Members'] == 1:
msg += '{}. *{}* - *{}* - *(1 member)*\n'.format(i, member['Name'], time_str)
else:
msg += '{}. *{}* - *{}* - *({} members)*\n'.format(i, member['Name'], time_str, member['Members'])
i += 1
if number < len(joinedList):
msg = '__**Last {} of {} Servers I Joined:**__\n\n'.format(number, len(joinedList))+msg
else:
msg = '__**Last {} Servers I Joined:**__\n\n'.format(len(joinedList))+msg
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def messages(self, ctx):
"""Lists the number of messages I've seen on this sever so far. (only applies after this module's inception, and if I'm online)"""
messages = int(self.settings.getServerStat(ctx.message.guild, "TotalMessages"))
messages -= 1
self.settings.setServerStat(ctx.message.guild, "TotalMessages", messages)
if messages == None:
messages = 0
if messages == 1:
await ctx.channel.send('So far, I\'ve witnessed *{:,} message!*'.format(messages))
else:
await ctx.channel.send('So far, I\'ve witnessed *{:,} messages!*'.format(messages))
@commands.command(pass_context=True)
async def allmessages(self, ctx):
"""Lists the number of messages I've seen on all severs so far. (only applies after this module's inception, and if I'm online)"""
messages = 0
for guild in self.bot.guilds:
temp = 0 if self.settings.getServerStat(guild, "TotalMessages") is None else self.settings.getServerStat(guild, "TotalMessages")
messages += int(temp)
messages -= 1
if messages == 1:
await ctx.channel.send('So far, I\'ve witnessed *{:,} message across all servers!*'.format(messages))
else:
await ctx.channel.send('So far, I\'ve witnessed *{:,} messages across all servers!*'.format(messages))
# Set our message count locally -1
messages = int(self.settings.getServerStat(ctx.message.guild, "TotalMessages"))
messages -= 1
self.settings.setServerStat(ctx.message.guild, "TotalMessages", messages)
| 42.076803 | 254 | 0.537568 | 26,447 | 0.984587 | 0 | 0 | 25,586 | 0.952533 | 25,652 | 0.954991 | 6,638 | 0.247124 |
dbfdaede95b2536399d16c62c421baf5bd420ceb | 6,688 | py | Python | chess_commentary_model/transformers_model/dataset_preprocessing.py | Rseiji/TCC-2020 | da68a49da38adf1bcf590b3028894d7834a28157 | [
"MIT"
] | null | null | null | chess_commentary_model/transformers_model/dataset_preprocessing.py | Rseiji/TCC-2020 | da68a49da38adf1bcf590b3028894d7834a28157 | [
"MIT"
] | 2 | 2020-08-30T22:47:54.000Z | 2021-03-31T19:58:11.000Z | chess_commentary_model/transformers_model/dataset_preprocessing.py | Rseiji/TCC-2020 | da68a49da38adf1bcf590b3028894d7834a28157 | [
"MIT"
] | null | null | null | """Métodos de preprocessamento de testes individuais
"""
import pandas as pd
import numpy as np
import math
def test_1(df, seed=0):
"""training: balanced; test: balanced
training: 80k (40k 0, 40k 1)
test: 20k (10k 0, 10k 1)
"""
df_ones = df[df['label'] == 1]
df_zeros = df[df['label'] == 0]
df_ones = df_ones.sample(frac=1, random_state=seed).reset_index(drop=True)
df_zeros = df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True)
df_ones_training = df_ones.loc[:40000]
df_zeros_training = df_zeros.loc[:40000]
df_ones_test = df_ones.loc[40000:50000]
df_zeros_test = df_zeros.loc[40000:50000]
df_training = pd.concat([df_ones_training, df_zeros_training])
df_training = df_training.sample(frac=1).reset_index(drop=True)
df_test = pd.concat([df_ones_test, df_zeros_test])
df_test = df_test.sample(frac=1).reset_index(drop=True)
sentences_train = df_training['comment'].tolist()
sentences_test = df_test['comment'].tolist()
labels_train = df_training['label'].tolist()
labels_test = df_test['label'].tolist()
return sentences_train, sentences_test, labels_train, labels_test
def test_2(df, seed=0):
"""training: balanced; test: unbalanced
training: 80k (40k 0, 40k 1)
test: 20k (4k 0, 16k 1)
"""
df_ones = df[df['label'] == 1]
df_zeros = df[df['label'] == 0]
df_ones = df_ones.sample(frac=1, random_state=seed).reset_index(drop=True)
df_zeros = df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True)
df_ones_training = df_ones.loc[:40000]
df_zeros_training = df_zeros.loc[:40000]
df_ones_test = df_ones.loc[40000:44000]
df_zeros_test = df_zeros.loc[40000:56000]
df_training = pd.concat([df_ones_training, df_zeros_training])
df_training = df_training.sample(frac=1).reset_index(drop=True)
df_test = pd.concat([df_ones_test, df_zeros_test])
df_test = df_test.sample(frac=1).reset_index(drop=True)
sentences_train = df_training['comment'].tolist()
sentences_test = df_test['comment'].tolist()
labels_train = df_training['label'].tolist()
labels_test = df_test['label'].tolist()
return sentences_train, sentences_test, labels_train, labels_test
def test_3(df, seed=0):
"""training: unbalanced; test: unbalanced
training: 80k (16k 1, 64k 0)
test: 20k (4k 1, 16k 0)
"""
df_ones = df[df['label'] == 1]
df_zeros = df[df['label'] == 0]
df_ones = df_ones.sample(frac=1, random_state=seed).reset_index(drop=True)
df_zeros = df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True)
df_ones_training = df_ones.loc[:16000]
df_zeros_training = df_zeros.loc[:64000]
df_ones_test = df_ones.loc[16000:20000]
df_zeros_test = df_zeros.loc[64000:80000]
df_training = pd.concat([df_ones_training, df_zeros_training])
df_training = df_training.sample(frac=1).reset_index(drop=True)
df_test = pd.concat([df_ones_test, df_zeros_test])
df_test = df_test.sample(frac=1).reset_index(drop=True)
sentences_train = df_training['comment'].tolist()
sentences_test = df_test['comment'].tolist()
labels_train = df_training['label'].tolist()
labels_test = df_test['label'].tolist()
return sentences_train, sentences_test, labels_train, labels_test
##################################
## Tests on old dataset
##################################
def test_4(df, seed=0):
""" training: balanced; test: balanced
training: 58k (29k 0, 29k 1)
test: 14.5k (7.25k 0, 7.25k 1)
"""
df_ones = df[df['label'] == 1]
df_zeros = df[df['label'] == 0]
df_ones = df_ones.sample(frac=1, random_state=seed).reset_index(drop=True)
df_zeros = df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True)
df_ones_training = df_ones.loc[:29000]
df_zeros_training = df_zeros.loc[:29000]
df_ones_test = df_ones.loc[29000:36250]
df_zeros_test = df_zeros.loc[29000:36250]
df_training = pd.concat([df_ones_training, df_zeros_training])
df_training = df_training.sample(frac=1).reset_index(drop=True)
df_test = pd.concat([df_ones_test, df_zeros_test])
df_test = df_test.sample(frac=1).reset_index(drop=True)
sentences_train = df_training['comment'].tolist()
sentences_test = df_test['comment'].tolist()
labels_train = df_training['label'].tolist()
labels_test = df_test['label'].tolist()
return sentences_train, sentences_test, labels_train, labels_test
def test_5(df, seed=0):
"""training: balanced; test: unbalanced
training: 58k (29000 0, 29000 1)
test: 14.5k (12905 0, 1595 1)
"""
df_ones = df[df['label'] == 1]
df_zeros = df[df['label'] == 0]
df_ones = df_ones.sample(frac=1, random_state=seed).reset_index(drop=True)
df_zeros = df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True)
df_ones_training = df_ones.loc[:29000]
df_zeros_training = df_zeros.loc[:29000]
df_ones_test = df_ones.loc[29000:30595]
df_zeros_test = df_zeros.loc[29000:41905]
df_training = pd.concat([df_ones_training, df_zeros_training])
df_training = df_training.sample(frac=1).reset_index(drop=True)
df_test = pd.concat([df_ones_test, df_zeros_test])
df_test = df_test.sample(frac=1).reset_index(drop=True)
sentences_train = df_training['comment'].tolist()
sentences_test = df_test['comment'].tolist()
labels_train = df_training['label'].tolist()
labels_test = df_test['label'].tolist()
return sentences_train, sentences_test, labels_train, labels_test
def test_6(df, seed=0):
"""training: unbalanced; test: unbalanced
training: 58k (6380 1, 51620 0)
test: 14.5k (1595 1, 12905 0)
"""
df_ones = df[df['label'] == 1]
df_zeros = df[df['label'] == 0]
df_ones = df_ones.sample(frac=1, random_state=seed).reset_index(drop=True)
df_zeros = df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True)
df_ones_training = df_ones.loc[:6380]
df_zeros_training = df_zeros.loc[:51620]
df_ones_test = df_ones.loc[6380:7975]
df_zeros_test = df_zeros.loc[51620:64525]
df_training = pd.concat([df_ones_training, df_zeros_training])
df_training = df_training.sample(frac=1).reset_index(drop=True)
df_test = pd.concat([df_ones_test, df_zeros_test])
df_test = df_test.sample(frac=1).reset_index(drop=True)
sentences_train = df_training['comment'].tolist()
sentences_test = df_test['comment'].tolist()
labels_train = df_training['label'].tolist()
labels_test = df_test['label'].tolist()
return sentences_train, sentences_test, labels_train, labels_test
| 36.546448 | 80 | 0.689145 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,144 | 0.171027 |
dbfdb987e6de76d1f36bf0f8ce7f9d972b1cbaed | 7,103 | py | Python | venv/Lib/site-packages/CoolProp/constants.py | kubakoziczak/gasSteamPowerPlant | e6c036cc66ee2ff0b3f2fc923d0991bf57295d61 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/CoolProp/constants.py | kubakoziczak/gasSteamPowerPlant | e6c036cc66ee2ff0b3f2fc923d0991bf57295d61 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/CoolProp/constants.py | kubakoziczak/gasSteamPowerPlant | e6c036cc66ee2ff0b3f2fc923d0991bf57295d61 | [
"MIT"
] | null | null | null | # This file is automatically generated by the generate_constants_module.py script in wrappers/Python.
# DO NOT MODIFY THE CONTENTS OF THIS FILE!
from __future__ import absolute_import
from . import _constants
INVALID_PARAMETER = _constants.INVALID_PARAMETER
igas_constant = _constants.igas_constant
imolar_mass = _constants.imolar_mass
iacentric_factor = _constants.iacentric_factor
irhomolar_reducing = _constants.irhomolar_reducing
irhomolar_critical = _constants.irhomolar_critical
iT_reducing = _constants.iT_reducing
iT_critical = _constants.iT_critical
irhomass_reducing = _constants.irhomass_reducing
irhomass_critical = _constants.irhomass_critical
iP_critical = _constants.iP_critical
iP_reducing = _constants.iP_reducing
iT_triple = _constants.iT_triple
iP_triple = _constants.iP_triple
iT_min = _constants.iT_min
iT_max = _constants.iT_max
iP_max = _constants.iP_max
iP_min = _constants.iP_min
idipole_moment = _constants.idipole_moment
iT = _constants.iT
iP = _constants.iP
iQ = _constants.iQ
iTau = _constants.iTau
iDelta = _constants.iDelta
iDmolar = _constants.iDmolar
iHmolar = _constants.iHmolar
iSmolar = _constants.iSmolar
iCpmolar = _constants.iCpmolar
iCp0molar = _constants.iCp0molar
iCvmolar = _constants.iCvmolar
iUmolar = _constants.iUmolar
iGmolar = _constants.iGmolar
iHelmholtzmolar = _constants.iHelmholtzmolar
iSmolar_residual = _constants.iSmolar_residual
iDmass = _constants.iDmass
iHmass = _constants.iHmass
iSmass = _constants.iSmass
iCpmass = _constants.iCpmass
iCp0mass = _constants.iCp0mass
iCvmass = _constants.iCvmass
iUmass = _constants.iUmass
iGmass = _constants.iGmass
iHelmholtzmass = _constants.iHelmholtzmass
iviscosity = _constants.iviscosity
iconductivity = _constants.iconductivity
isurface_tension = _constants.isurface_tension
iPrandtl = _constants.iPrandtl
ispeed_sound = _constants.ispeed_sound
iisothermal_compressibility = _constants.iisothermal_compressibility
iisobaric_expansion_coefficient = _constants.iisobaric_expansion_coefficient
ifundamental_derivative_of_gas_dynamics = _constants.ifundamental_derivative_of_gas_dynamics
ialphar = _constants.ialphar
idalphar_dtau_constdelta = _constants.idalphar_dtau_constdelta
idalphar_ddelta_consttau = _constants.idalphar_ddelta_consttau
ialpha0 = _constants.ialpha0
idalpha0_dtau_constdelta = _constants.idalpha0_dtau_constdelta
idalpha0_ddelta_consttau = _constants.idalpha0_ddelta_consttau
iBvirial = _constants.iBvirial
iCvirial = _constants.iCvirial
idBvirial_dT = _constants.idBvirial_dT
idCvirial_dT = _constants.idCvirial_dT
iZ = _constants.iZ
iPIP = _constants.iPIP
ifraction_min = _constants.ifraction_min
ifraction_max = _constants.ifraction_max
iT_freeze = _constants.iT_freeze
iGWP20 = _constants.iGWP20
iGWP100 = _constants.iGWP100
iGWP500 = _constants.iGWP500
iFH = _constants.iFH
iHH = _constants.iHH
iPH = _constants.iPH
iODP = _constants.iODP
iPhase = _constants.iPhase
iundefined_parameter = _constants.iundefined_parameter
INPUT_PAIR_INVALID = _constants.INPUT_PAIR_INVALID
QT_INPUTS = _constants.QT_INPUTS
PQ_INPUTS = _constants.PQ_INPUTS
QSmolar_INPUTS = _constants.QSmolar_INPUTS
QSmass_INPUTS = _constants.QSmass_INPUTS
HmolarQ_INPUTS = _constants.HmolarQ_INPUTS
HmassQ_INPUTS = _constants.HmassQ_INPUTS
DmolarQ_INPUTS = _constants.DmolarQ_INPUTS
DmassQ_INPUTS = _constants.DmassQ_INPUTS
PT_INPUTS = _constants.PT_INPUTS
DmassT_INPUTS = _constants.DmassT_INPUTS
DmolarT_INPUTS = _constants.DmolarT_INPUTS
HmolarT_INPUTS = _constants.HmolarT_INPUTS
HmassT_INPUTS = _constants.HmassT_INPUTS
SmolarT_INPUTS = _constants.SmolarT_INPUTS
SmassT_INPUTS = _constants.SmassT_INPUTS
TUmolar_INPUTS = _constants.TUmolar_INPUTS
TUmass_INPUTS = _constants.TUmass_INPUTS
DmassP_INPUTS = _constants.DmassP_INPUTS
DmolarP_INPUTS = _constants.DmolarP_INPUTS
HmassP_INPUTS = _constants.HmassP_INPUTS
HmolarP_INPUTS = _constants.HmolarP_INPUTS
PSmass_INPUTS = _constants.PSmass_INPUTS
PSmolar_INPUTS = _constants.PSmolar_INPUTS
PUmass_INPUTS = _constants.PUmass_INPUTS
PUmolar_INPUTS = _constants.PUmolar_INPUTS
HmassSmass_INPUTS = _constants.HmassSmass_INPUTS
HmolarSmolar_INPUTS = _constants.HmolarSmolar_INPUTS
SmassUmass_INPUTS = _constants.SmassUmass_INPUTS
SmolarUmolar_INPUTS = _constants.SmolarUmolar_INPUTS
DmassHmass_INPUTS = _constants.DmassHmass_INPUTS
DmolarHmolar_INPUTS = _constants.DmolarHmolar_INPUTS
DmassSmass_INPUTS = _constants.DmassSmass_INPUTS
DmolarSmolar_INPUTS = _constants.DmolarSmolar_INPUTS
DmassUmass_INPUTS = _constants.DmassUmass_INPUTS
DmolarUmolar_INPUTS = _constants.DmolarUmolar_INPUTS
FLUID_TYPE_PURE = _constants.FLUID_TYPE_PURE
FLUID_TYPE_PSEUDOPURE = _constants.FLUID_TYPE_PSEUDOPURE
FLUID_TYPE_REFPROP = _constants.FLUID_TYPE_REFPROP
FLUID_TYPE_INCOMPRESSIBLE_LIQUID = _constants.FLUID_TYPE_INCOMPRESSIBLE_LIQUID
FLUID_TYPE_INCOMPRESSIBLE_SOLUTION = _constants.FLUID_TYPE_INCOMPRESSIBLE_SOLUTION
FLUID_TYPE_UNDEFINED = _constants.FLUID_TYPE_UNDEFINED
iphase_liquid = _constants.iphase_liquid
iphase_supercritical = _constants.iphase_supercritical
iphase_supercritical_gas = _constants.iphase_supercritical_gas
iphase_supercritical_liquid = _constants.iphase_supercritical_liquid
iphase_critical_point = _constants.iphase_critical_point
iphase_gas = _constants.iphase_gas
iphase_twophase = _constants.iphase_twophase
iphase_unknown = _constants.iphase_unknown
iphase_not_imposed = _constants.iphase_not_imposed
NORMALIZE_GAS_CONSTANTS = _constants.NORMALIZE_GAS_CONSTANTS
CRITICAL_WITHIN_1UK = _constants.CRITICAL_WITHIN_1UK
CRITICAL_SPLINES_ENABLED = _constants.CRITICAL_SPLINES_ENABLED
SAVE_RAW_TABLES = _constants.SAVE_RAW_TABLES
ALTERNATIVE_TABLES_DIRECTORY = _constants.ALTERNATIVE_TABLES_DIRECTORY
ALTERNATIVE_REFPROP_PATH = _constants.ALTERNATIVE_REFPROP_PATH
ALTERNATIVE_REFPROP_HMX_BNC_PATH = _constants.ALTERNATIVE_REFPROP_HMX_BNC_PATH
ALTERNATIVE_REFPROP_LIBRARY_PATH = _constants.ALTERNATIVE_REFPROP_LIBRARY_PATH
REFPROP_DONT_ESTIMATE_INTERACTION_PARAMETERS = _constants.REFPROP_DONT_ESTIMATE_INTERACTION_PARAMETERS
REFPROP_IGNORE_ERROR_ESTIMATED_INTERACTION_PARAMETERS = _constants.REFPROP_IGNORE_ERROR_ESTIMATED_INTERACTION_PARAMETERS
REFPROP_USE_GERG = _constants.REFPROP_USE_GERG
REFPROP_USE_PENGROBINSON = _constants.REFPROP_USE_PENGROBINSON
MAXIMUM_TABLE_DIRECTORY_SIZE_IN_GB = _constants.MAXIMUM_TABLE_DIRECTORY_SIZE_IN_GB
DONT_CHECK_PROPERTY_LIMITS = _constants.DONT_CHECK_PROPERTY_LIMITS
HENRYS_LAW_TO_GENERATE_VLE_GUESSES = _constants.HENRYS_LAW_TO_GENERATE_VLE_GUESSES
PHASE_ENVELOPE_STARTING_PRESSURE_PA = _constants.PHASE_ENVELOPE_STARTING_PRESSURE_PA
R_U_CODATA = _constants.R_U_CODATA
VTPR_UNIFAC_PATH = _constants.VTPR_UNIFAC_PATH
SPINODAL_MINIMUM_DELTA = _constants.SPINODAL_MINIMUM_DELTA
OVERWRITE_FLUIDS = _constants.OVERWRITE_FLUIDS
OVERWRITE_DEPARTURE_FUNCTION = _constants.OVERWRITE_DEPARTURE_FUNCTION
OVERWRITE_BINARY_INTERACTION = _constants.OVERWRITE_BINARY_INTERACTION
USE_GUESSES_IN_PROPSSI = _constants.USE_GUESSES_IN_PROPSSI
ASSUME_CRITICAL_POINT_STABLE = _constants.ASSUME_CRITICAL_POINT_STABLE
VTPR_ALWAYS_RELOAD_LIBRARY = _constants.VTPR_ALWAYS_RELOAD_LIBRARY
FLOAT_PUNCTUATION = _constants.FLOAT_PUNCTUATION
| 44.672956 | 120 | 0.887653 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 143 | 0.020132 |
dbfe9381e4f6dcc57fd5c5d02265d7f565b40315 | 2,857 | py | Python | torch_datasets/samplers/balanced_batch_sampler.py | mingruimingrui/torch-datasets | 2640b8c4fa82156e68e617fc545a546b4e08dc4e | [
"MIT"
] | null | null | null | torch_datasets/samplers/balanced_batch_sampler.py | mingruimingrui/torch-datasets | 2640b8c4fa82156e68e617fc545a546b4e08dc4e | [
"MIT"
] | null | null | null | torch_datasets/samplers/balanced_batch_sampler.py | mingruimingrui/torch-datasets | 2640b8c4fa82156e68e617fc545a546b4e08dc4e | [
"MIT"
] | null | null | null | import random
import torch.utils.data.sampler
class BalancedBatchSampler(torch.utils.data.sampler.BatchSampler):
def __init__(
self,
dataset_labels,
batch_size=1,
steps=None,
n_classes=0,
n_samples=2
):
""" Create a balanced batch sampler for label based datasets
Args
dataset_labels : Labels of every entry from a dataset (in the same sequence)
batch_size : batch_size no explaination needed
step_size : Number of batches to generate (if None, then dataset_size / batch_size will be used)
n_classes : Number of classes
n_samples : Number of samples per class
*** If batch_size > n_classes * n_samples, rest of batch will be randomly filled
"""
self.batch_size = batch_size
self.steps = len(dataset_labels) // batch_size if steps is None else steps
self.n_classes = n_classes
self.n_samples = n_samples
# Create a label_to_entry_ids table
self.label_to_entry_ids = {}
for entry_id, label in enumerate(dataset_labels):
if label in self.label_to_entry_ids:
self.label_to_entry_ids[label].append(entry_id)
else:
self.label_to_entry_ids[label] = [entry_id]
# Subset the labels with more than n_samples entries
self.labels_subset = [label for (label, entry_ids) in self.label_to_entry_ids.items() if len(entry_ids) >= n_samples]
assert len(self.labels_subset) >= n_classes, 'Too little labels have {} entries, choose a smaller n_classes or n_samples'.format(n_samples)
def _make_batch_ids(self):
batch_ids = []
# Choose classes and entries
labels_choosen = random.sample(self.labels_subset, self.n_classes)
# Randomly sample n_samples entries from choosen labels
for l in labels_choosen:
batch_ids += random.sample(self.label_to_entry_ids[l], self.n_samples)
if len(batch_ids) < self.batch_size:
# Randomly sample remainder
labels_choosen = {l: None for l in labels_choosen}
remaining_entry_ids = []
for label, entry_ids in self.label_to_entry_ids.items():
if label not in labels_choosen:
remaining_entry_ids += entry_ids
batch_ids += random.sample(remaining_entry_ids, self.batch_size - len(batch_ids))
# Randomly shuffle batch ids
batch_ids = random.sample(batch_ids, self.batch_size)
batch_ids = torch.LongTensor(batch_ids)
return batch_ids
def __iter__(self):
self.count = 0
while self.count < self.steps:
self.count += 1
yield self._make_batch_ids()
def __len__(self):
return self.steps
| 38.608108 | 147 | 0.637382 | 2,808 | 0.982849 | 150 | 0.052503 | 0 | 0 | 0 | 0 | 850 | 0.297515 |
dbfe9b7374d292dd3a07ffc92b4ebb9e7af2ac5d | 1,416 | py | Python | ambari-common/src/main/python/resource_management/libraries/functions/get_bare_principal.py | likenamehaojie/Apache-Ambari-ZH | 5973025bd694cdbb4b49fb4c4e0d774782811ff6 | [
"Apache-2.0"
] | 1,664 | 2015-01-03T09:35:21.000Z | 2022-03-31T04:55:24.000Z | ambari-common/src/main/python/resource_management/libraries/functions/get_bare_principal.py | likenamehaojie/Apache-Ambari-ZH | 5973025bd694cdbb4b49fb4c4e0d774782811ff6 | [
"Apache-2.0"
] | 3,018 | 2015-02-19T20:16:10.000Z | 2021-11-13T20:47:48.000Z | ambari-common/src/main/python/resource_management/libraries/functions/get_bare_principal.py | likenamehaojie/Apache-Ambari-ZH | 5973025bd694cdbb4b49fb4c4e0d774782811ff6 | [
"Apache-2.0"
] | 1,673 | 2015-01-06T14:14:42.000Z | 2022-03-31T07:22:30.000Z | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
import re
__all__ = ["get_bare_principal"]
def get_bare_principal(normalized_principal_name):
"""
Given a normalized principal name (nimbus/[email protected]) returns just the
primary component (nimbus)
:param normalized_principal_name: a string containing the principal name to process
:return: a string containing the primary component value or None if not valid
"""
bare_principal = None
if normalized_principal_name:
match = re.match(r"([^/@]+)(?:/[^@])?(?:@.*)?", normalized_principal_name)
if match:
bare_principal = match.group(1)
return bare_principal | 33.714286 | 97 | 0.764831 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,150 | 0.812147 |
dbff3b375851c03b4ae31fb20b30423a4b9c6ad5 | 1,162 | py | Python | 04/cross_validation.01.py | study-machine-learning/dongheon.shin | 6103ef9c73b162603bc39a27e4ecca0f1ac35e57 | [
"MIT"
] | 2 | 2017-09-24T02:29:48.000Z | 2017-10-05T11:15:22.000Z | 04/cross_validation.01.py | study-machine-learning/dongheon.shin | 6103ef9c73b162603bc39a27e4ecca0f1ac35e57 | [
"MIT"
] | null | null | null | 04/cross_validation.01.py | study-machine-learning/dongheon.shin | 6103ef9c73b162603bc39a27e4ecca0f1ac35e57 | [
"MIT"
] | null | null | null | from sklearn import svm, metrics
import random
import re
def split(rows):
data = []
labels = []
for row in rows:
data.append(row[0:4])
labels.append(row[4])
return (data, labels)
def calculate_score(train, test):
train_data, train_label = split(train)
test_data, test_label = split(test)
classifier = svm.SVC()
classifier.fit(train_data, train_label)
predict = classifier.predict(test_data)
return metrics.accuracy_score(test_label, predict)
def to_number(n):
return float(n) if re.match(r"^[0-9\.]+$", n) else n
def to_columm(line):
return list(map(to_number, line.strip().split(",")))
lines = open("iris.csv", "r", encoding="utf-8").read().split("\n")
csv = list(map(to_columm, lines))
del csv[0]
random.shuffle(csv)
k = 5
csv_k = [[] for i in range(k)]
scores = []
for i in range(len(csv)):
csv_k[i % k].append(csv[i])
for test in csv_k:
train = []
for data in csv_k:
if test != data:
train += data
score = calculate_score(train, test)
scores.append(score)
print("score = ", scores)
print("avg = ", sum(scores) / len(scores))
| 16.84058 | 66 | 0.620482 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 58 | 0.049914 |
e002150863388c3c73e7c985abca9ac6c4427a70 | 2,587 | bzl | Python | third_party/org_specs2.bzl | wix/wix-oss-infra | b57ac2f0f66487cfdd08293a8fa389efe3d42c43 | [
"MIT"
] | 3 | 2020-01-14T12:57:31.000Z | 2021-06-06T20:47:57.000Z | third_party/org_specs2.bzl | wix/wix-oss-infra | b57ac2f0f66487cfdd08293a8fa389efe3d42c43 | [
"MIT"
] | 20 | 2020-04-02T13:08:43.000Z | 2020-11-05T11:27:40.000Z | third_party/org_specs2.bzl | wix/wix-oss-infra | b57ac2f0f66487cfdd08293a8fa389efe3d42c43 | [
"MIT"
] | 1 | 2021-04-02T09:32:35.000Z | 2021-04-02T09:32:35.000Z | load("@wix_oss_infra//:import_external.bzl", import_external = "safe_wix_scala_maven_import_external")
def dependencies():
import_external(
name = "org_specs2_specs2_fp_2_12",
artifact = "org.specs2:specs2-fp_2.12:4.8.3",
artifact_sha256 = "777962ca58054a9ea86e294e025453ecf394c60084c28bd61956a00d16be31a7",
srcjar_sha256 = "6b8bd1e7210754b768b68610709271c0dac29447936a976a2a9881389e6404ca",
deps = [
"@org_scala_lang_scala_library"
],
)
import_external(
name = "org_specs2_specs2_common_2_12",
artifact = "org.specs2:specs2-common_2.12:4.8.3",
artifact_sha256 = "3b08fecb9e21d3903e48b62cd95c19ea9253d466e03fd4cf9dc9227e7c368708",
srcjar_sha256 = "b2f148c75d3939b3cd0d58afddd74a8ce03077bb3ccdc93dae55bd9c3993e9c3",
deps = [
"@org_scala_lang_modules_scala_parser_combinators_2_12",
"@org_scala_lang_modules_scala_xml_2_12",
"@org_scala_lang_scala_library",
"@org_scala_lang_scala_reflect",
"@org_specs2_specs2_fp_2_12"
],
)
import_external(
name = "org_specs2_specs2_matcher_2_12",
artifact = "org.specs2:specs2-matcher_2.12:4.8.3",
artifact_sha256 = "aadf27b6d015572b2e3842627c09bf0797153dbb329262ea3bcbbce129d51ad8",
srcjar_sha256 = "01251acc28219aa17aabcb9a26a84e1871aa64980d335cd8f83c2bcea6f4f1be",
deps = [
"@org_scala_lang_scala_library",
"@org_specs2_specs2_common_2_12"
],
)
import_external(
name = "org_specs2_specs2_core_2_12",
artifact = "org.specs2:specs2-core_2.12:4.8.3",
artifact_sha256 = "f73f32156a711a4e83e696dc83e269c5a165d62cc3dd7c652617cb03d140d063",
srcjar_sha256 = "0e3cebfc7410051b70e627e35f13978add3d061b8f1233741f9b397638f193e9",
deps = [
"@org_scala_lang_scala_library",
"@org_scala_sbt_test_interface",
"@org_specs2_specs2_common_2_12",
"@org_specs2_specs2_matcher_2_12"
],
)
import_external(
name = "org_specs2_specs2_junit_2_12",
artifact = "org.specs2:specs2-junit_2.12:4.8.3",
artifact_sha256 = "5d7ad2c0b0bc142ea064edb7a1ea75ab7b17ad37e1a621ac7e578823845098e8",
srcjar_sha256 = "84edd1cd6291f6686638225fcbaff970ae36da006efabf2228255c2127b2290c",
deps = [
"@junit_junit",
"@org_scala_lang_scala_library",
"@org_scala_sbt_test_interface",
"@org_specs2_specs2_core_2_12"
],
)
| 37.492754 | 102 | 0.68535 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,576 | 0.6092 |
e0023b6272774adf06f1384bdb4cb510043c4a82 | 224 | py | Python | task/w2/trenirovka/12-rivnist 2.py | beregok/pythontask | 50394ff2b52ab4f3273ec9ddc4b504d1f7b3159e | [
"MIT"
] | 1 | 2019-09-29T14:19:54.000Z | 2019-09-29T14:19:54.000Z | task/w2/trenirovka/12-rivnist 2.py | beregok/pythontask | 50394ff2b52ab4f3273ec9ddc4b504d1f7b3159e | [
"MIT"
] | null | null | null | task/w2/trenirovka/12-rivnist 2.py | beregok/pythontask | 50394ff2b52ab4f3273ec9ddc4b504d1f7b3159e | [
"MIT"
] | null | null | null | a = int(input())
b = int(input())
c = int(input())
d = int(input())
if a == 0 and b == 0:
print("INF")
else:
if (d - b * c / a) != 0 and (- b / a) == (- b // a):
print(- b // a)
else:
print("NO")
| 18.666667 | 56 | 0.397321 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 | 0.040179 |
e004ab57c3294086a91490c226d7c40f3986ad7f | 4,265 | py | Python | src/reg_resampler.py | atif-hassan/Regression_ReSampling | 194b2ae8efea7598a6690792d40e42aba74c111b | [
"BSD-3-Clause"
] | 15 | 2020-06-09T20:08:04.000Z | 2021-11-21T15:58:09.000Z | src/reg_resampler.py | atif-hassan/Regression_ReSampling | 194b2ae8efea7598a6690792d40e42aba74c111b | [
"BSD-3-Clause"
] | null | null | null | src/reg_resampler.py | atif-hassan/Regression_ReSampling | 194b2ae8efea7598a6690792d40e42aba74c111b | [
"BSD-3-Clause"
] | 5 | 2020-06-13T22:07:51.000Z | 2021-05-21T03:26:45.000Z | class resampler:
def __init__(self):
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from collections import Counter
import numpy as np
self.bins = 3
self.pd = pd
self.LabelEncoder = LabelEncoder
self.Counter = Counter
self.X = 0
self.Y_classes = 0
self.target = 0
self.np = np
# This function adds classes to each sample and returns the class list as a dataframe/numpy array (as per input)
# It also merges classes as and when required
def fit(self, X, target, bins=3, min_n_samples=6, balanced_binning=False, verbose=2):
self.bins = bins
tmp = target
# If data is numpy, then convert it into pandas
if type(target) == int:
if target < 0:
target = X.shape[1]+target
tmp = target
self.X = self.pd.DataFrame()
for i in range(X.shape[1]):
if i!=target:
self.X[str(i)] = X[:,i]
self.X["target"] = X[:,target]
target = "target"
else:
self.X = X.copy()
# Use qcut if balanced binning is required
if balanced_binning:
self.Y_classes = self.pd.qcut(self.X[target], q=self.bins, precision=0)
else:
self.Y_classes = self.pd.cut(self.X[target], bins=self.bins)
# Pandas outputs ranges after binning. Convert ranges to classes
le = self.LabelEncoder()
self.Y_classes = le.fit_transform(self.Y_classes)
# Merge classes if number of neighbours is more than the number of samples
classes_count = list(map(list, self.Counter(self.Y_classes).items()))
classes_count = sorted(classes_count, key = lambda x: x[0])
mid_point = len(classes_count)
# Logic for merging
for i in range(len(classes_count)):
if classes_count[i][1] < min_n_samples:
self.Y_classes[self.np.where(self.Y_classes == classes_count[i][0])[0]] = classes_count[i-1][0]
if verbose > 0:
print("INFO: Class " + str(classes_count[i][0]) + " has been merged into Class " + str(classes_count[i-1][0]) + " due to low number of samples")
classes_count[i][0] = classes_count[i-1][0]
if verbose > 0:
print()
# Perform label-encoding once again
# Avoids class skipping after merging
le = self.LabelEncoder()
self.Y_classes = le.fit_transform(self.Y_classes)
# Pretty print
if verbose > 1:
print("Class Distribution:\n-------------------")
classes_count = list(map(list, self.Counter(self.Y_classes).items()))
classes_count = sorted(classes_count, key = lambda x: x[0])
for class_, count in classes_count:
print(str(class_)+": "+str(count))
print()
# Finally concatenate and return as dataframe or numpy
# Based on what type of target was sent
self.X["classes"] = self.Y_classes
if type(tmp) == int:
self.target = tmp
else:
self.target = target
return self.Y_classes
# This function performs the re-sampling
def resample(self, sampler_obj, trainX, trainY):
# If classes haven't yet been created, then run the "fit" function
if type(self.Y_classes) == int:
print("Error! Run fit method first!!")
return None
# Finally, perform the re-sampling
resampled_data, _ = sampler_obj.fit_resample(trainX, trainY)
if type(resampled_data).__module__ == 'numpy':
resampled_data = self.pd.DataFrame(resampled_data, columns=self.X.drop("classes", axis=1).columns)
# Return the correct X and Y
if type(self.target) == int:
return resampled_data.drop("target", axis=1).values, resampled_data["target"].values
else:
return resampled_data.drop(self.target, axis=1), resampled_data[self.target]
| 41.813725 | 169 | 0.562251 | 4,263 | 0.999531 | 0 | 0 | 0 | 0 | 0 | 0 | 975 | 0.228605 |
e0063a8b35dfc827fe158a159fe5d8b8ab703065 | 4,920 | py | Python | get_data/speech_commands.py | patrick-kidger/generalised_shapelets | 04930c89dc4673e2af402895fe67655f8375a808 | [
"MIT"
] | 32 | 2020-05-31T17:41:58.000Z | 2022-03-28T18:38:11.000Z | get_data/speech_commands.py | patrick-kidger/generalised_shapelets | 04930c89dc4673e2af402895fe67655f8375a808 | [
"MIT"
] | 1 | 2022-02-09T22:13:03.000Z | 2022-02-09T23:55:28.000Z | get_data/speech_commands.py | patrick-kidger/generalised_shapelets | 04930c89dc4673e2af402895fe67655f8375a808 | [
"MIT"
] | 9 | 2020-07-17T16:50:24.000Z | 2021-12-13T11:29:12.000Z | import os
import pathlib
import sklearn.model_selection
import tarfile
import torch
import torchaudio
import urllib.request
here = pathlib.Path(__file__).resolve().parent
def _split_data(tensor, stratify):
# 0.7/0.15/0.15 train/val/test split
(train_tensor, testval_tensor,
train_stratify, testval_stratify) = sklearn.model_selection.train_test_split(tensor, stratify,
train_size=0.7,
random_state=0,
shuffle=True,
stratify=stratify)
val_tensor, test_tensor = sklearn.model_selection.train_test_split(testval_tensor,
train_size=0.5,
random_state=1,
shuffle=True,
stratify=testval_stratify)
return train_tensor, val_tensor, test_tensor
def _save_data(dir, **tensors):
for tensor_name, tensor_value in tensors.items():
torch.save(tensor_value, str(dir / tensor_name) + '.pt')
def download():
base_base_loc = str(here / '../experiments/data')
if not os.path.exists(base_base_loc):
raise RuntimeError("data directory does not exist. Please create a directory called 'data' in the 'experiments'"
" directory. (We're going to put a lot of data there, so we don't make it automatically - "
"thus giving you the opportunity to make it a symlink rather than a normal directory, so "
"that the data can be stored elsewhere if you wish.)")
base_loc = base_base_loc + '/SpeechCommands'
loc = base_loc + '/speech_commands.tar.gz'
if os.path.exists(loc):
return
if not os.path.exists(base_loc):
os.mkdir(base_loc)
urllib.request.urlretrieve('http://download.tensorflow.org/data/speech_commands_v0.02.tar.gz',
loc)
with tarfile.open(loc, 'r') as f:
f.extractall(base_loc)
def _process_data():
base_loc = here / '..' / 'experiments' / 'data' / 'SpeechCommands'
X = torch.empty(34975, 16000, 1)
y = torch.empty(34975, dtype=torch.long)
batch_index = 0
y_index = 0
for foldername in ('yes', 'no', 'up', 'down', 'left', 'right', 'on', 'off', 'stop', 'go'):
loc = base_loc / foldername
for filename in os.listdir(loc):
audio, _ = torchaudio.load_wav(loc / filename, channels_first=False,
normalization=False) # for forward compatbility if they fix it
audio = audio / 2 ** 15 # Normalization argument doesn't seem to work so we do it manually.
# A few samples are shorter than the full length; for simplicity we discard them.
if len(audio) != 16000:
continue
X[batch_index] = audio
y[batch_index] = y_index
batch_index += 1
y_index += 1
assert batch_index == 34975, "batch_index is {}".format(batch_index)
audio_X = X
# X is of shape (batch=34975, length=16000, channels=1)
X = torchaudio.transforms.MFCC(log_mels=True)(X.squeeze(-1)).transpose(1, 2).detach()
# X is of shape (batch=34975, length=81, channels=40). For some crazy reason it requires a gradient, so detach.
train_X, _, _ = _split_data(X, y)
out = []
means = []
stds = []
for Xi, train_Xi in zip(X.unbind(dim=-1), train_X.unbind(dim=-1)):
mean = train_Xi.mean()
std = train_Xi.std()
means.append(mean)
stds.append(std)
out.append((Xi - mean) / (std + 1e-5))
X = torch.stack(out, dim=-1)
train_audio_X, val_audio_X, test_audio_X = _split_data(audio_X, y)
train_X, val_X, test_X = _split_data(X, y)
train_y, val_y, test_y = _split_data(y, y)
return train_X, val_X, test_X, train_y, val_y, test_y, torch.stack(means), torch.stack(stds), train_audio_X, \
val_audio_X, test_audio_X
def main():
download()
(train_X, val_X, test_X, train_y, val_y, test_y, means, stds, train_audio_X, val_audio_X,
test_audio_X) = _process_data()
loc = here / '..' / 'experiments' / 'data' / 'speech_commands_data'
if not os.path.exists(loc):
os.mkdir(loc)
_save_data(loc, train_X=train_X, val_X=val_X, test_X=test_X, train_y=train_y, val_y=val_y, test_y=test_y,
means=means, stds=stds, train_audio_X=train_audio_X, val_audio_X=val_audio_X, test_audio_X=test_audio_X)
if __name__ == '__main__':
main()
| 42.051282 | 120 | 0.566057 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,019 | 0.207114 |
e008ab01b4020e37d916e20d303c66a51a23123e | 5,652 | py | Python | app/endpoints/products.py | duch94/spark_crud_test | 94a514797700c2e929792f0424fb0e9e911489b7 | [
"BSD-2-Clause"
] | null | null | null | app/endpoints/products.py | duch94/spark_crud_test | 94a514797700c2e929792f0424fb0e9e911489b7 | [
"BSD-2-Clause"
] | null | null | null | app/endpoints/products.py | duch94/spark_crud_test | 94a514797700c2e929792f0424fb0e9e911489b7 | [
"BSD-2-Clause"
] | null | null | null | from datetime import datetime
from typing import List
from flask import Blueprint, jsonify, request, json
from app.models.products import Product, Category, products_categories
from app import db
products_blueprint = Blueprint('products', __name__)
def create_or_get_categories(p: dict) -> List[Category]:
"""
Func to get existing categories objects or create new otherwise
:param p: payload of request
:return: list of categories
"""
recevied_categories: List[Category] = [Category(name=cat) for cat in p['categories']]
categories = []
for cat in recevied_categories:
exists = db.session.query(db.exists().where(Category.name == cat.name)).all()[0][0]
if exists:
existing_category = Category.query.filter(Category.name == cat.name).all()[0]
categories.append(existing_category)
else:
categories.append(cat)
return categories
@products_blueprint.route('/products', methods=['GET'])
def get_products():
return jsonify({
'results': [p.serialized for p in Product.query.all()]
})
@products_blueprint.route('/create_product', methods=['POST'])
def create_product():
data = request.get_data().decode('utf-8')
payload = json.loads(data)
datetime_format = '%Y-%m-%d %H:%M:%S'
if len(payload['categories']) < 1 or len(payload['categories']) > 5:
return '{"status": "error", "msg": "categories number must be between 1 and 5"}', 400
categories = create_or_get_categories(payload)
try:
new_prod = Product(name=payload['name'],
rating=float(payload['rating']),
featured=bool(payload['featured'] if 'featured' in payload.keys() else None),
expiration_date=(datetime.strptime(payload['expiration_date'], datetime_format)
if ('expiration_date' in payload.keys()) else None),
brand_id=int(payload['brand_id']),
items_in_stock=int(payload['items_in_stock']),
receipt_date=(datetime.strptime(payload['receipt_date'], datetime_format)
if ('receipt_date' in payload.keys()) else None))
except TypeError as e:
return '{"status": "error", "msg": "TypeError occured: check values of fields"}'
except KeyError as e:
return '{"status": "error", "msg": "field %s have not been found, but is required"}' % str(e), 400
if new_prod.rating > 8.0:
new_prod.featured = True
[cat.products.append(new_prod) for cat in categories]
[db.session.add(cat) for cat in categories]
db.session.commit()
return jsonify({"status": "ok", "msg": "product received"})
@products_blueprint.route('/update_product', methods=['PUT'])
def update_product():
data = request.get_data().decode('utf-8')
payload = json.loads(data)
datetime_format = '%Y-%m-%d %H:%M:%S'
product = Product.query.filter(Product.id == payload['id'])
if product:
if 'name' in payload.keys():
product.update({'name': payload['name']})
if 'featured' in payload.keys():
product.update({'featured': bool(payload['featured'])})
if 'rating' in payload.keys():
product.update({'rating': float(payload['rating'])})
if product.rating > 8.0:
product.featured = True
if 'items_in_stock' in payload.keys():
product.update({'items_in_stock': int(payload['items_in_stock'])})
if 'receipt_date' in payload.keys():
product.update({'receipt_date': datetime.strptime(payload['receipt_date'], datetime_format)})
if 'brand' in payload.keys():
product.update({'brand': int(payload['brand'])})
if 'categories' in payload.keys():
categories = create_or_get_categories(payload)
db.session.query(products_categories).filter(
products_categories.c.product_id == int(payload['id'])).delete(synchronize_session=False)
product_obj = product.all()[0]
[cat.products.append(product_obj) for cat in categories]
[db.session.add(cat) for cat in categories]
if 'expiration_date' in payload.keys():
product.update({'expiration_date': datetime.strptime(payload['expiration_date'], datetime_format)})
db.session.commit()
return jsonify({"status": "ok", "msg": "product updated"})
else:
return '{"status": "error", "msg": "no product found with given id"}', 404
@products_blueprint.route('/delete_product', methods=['DELETE'])
def delete_product():
data = request.get_data().decode('utf-8')
p = json.loads(data)
products_result = Product.query.filter(Product.id == int(p['id'])).delete(synchronize_session=False)
products_categories_result = db.session.query(products_categories).filter(
products_categories.c.product_id == int(p['id'])).delete(synchronize_session=False)
db.session.commit()
if products_result == 1:
return jsonify({"status": "ok",
"msg": "product deleted, also %d product_categories relations deleted"
% products_categories_result})
else:
return jsonify({"status": "warning", "msg": "%d products deleted, also %d product_categories relations deleted"
% (products_result, products_categories_result)})
| 45.580645 | 120 | 0.608457 | 0 | 0 | 0 | 0 | 4,674 | 0.826964 | 0 | 0 | 1,255 | 0.222045 |
e008c8c892e467ea561589969c08eaa2c9b808db | 1,701 | py | Python | util/config/validators/test/test_validate_bitbucket_trigger.py | giuseppe/quay | a1b7e4b51974edfe86f66788621011eef2667e6a | [
"Apache-2.0"
] | 2,027 | 2019-11-12T18:05:48.000Z | 2022-03-31T22:25:04.000Z | util/config/validators/test/test_validate_bitbucket_trigger.py | giuseppe/quay | a1b7e4b51974edfe86f66788621011eef2667e6a | [
"Apache-2.0"
] | 496 | 2019-11-12T18:13:37.000Z | 2022-03-31T10:43:45.000Z | util/config/validators/test/test_validate_bitbucket_trigger.py | giuseppe/quay | a1b7e4b51974edfe86f66788621011eef2667e6a | [
"Apache-2.0"
] | 249 | 2019-11-12T18:02:27.000Z | 2022-03-22T12:19:19.000Z | import pytest
from httmock import urlmatch, HTTMock
from util.config import URLSchemeAndHostname
from util.config.validator import ValidatorContext
from util.config.validators import ConfigValidationException
from util.config.validators.validate_bitbucket_trigger import BitbucketTriggerValidator
from test.fixtures import *
@pytest.mark.parametrize(
"unvalidated_config",
[
(ValidatorContext({})),
(ValidatorContext({"BITBUCKET_TRIGGER_CONFIG": {}})),
(ValidatorContext({"BITBUCKET_TRIGGER_CONFIG": {"CONSUMER_KEY": "foo"}})),
(ValidatorContext({"BITBUCKET_TRIGGER_CONFIG": {"CONSUMER_SECRET": "foo"}})),
],
)
def test_validate_invalid_bitbucket_trigger_config(unvalidated_config, app):
validator = BitbucketTriggerValidator()
with pytest.raises(ConfigValidationException):
validator.validate(unvalidated_config)
def test_validate_bitbucket_trigger(app):
url_hit = [False]
@urlmatch(netloc=r"bitbucket.org")
def handler(url, request):
url_hit[0] = True
return {
"status_code": 200,
"content": "oauth_token=foo&oauth_token_secret=bar",
}
with HTTMock(handler):
validator = BitbucketTriggerValidator()
url_scheme_and_hostname = URLSchemeAndHostname("http", "localhost:5000")
unvalidated_config = ValidatorContext(
{
"BITBUCKET_TRIGGER_CONFIG": {
"CONSUMER_KEY": "foo",
"CONSUMER_SECRET": "bar",
},
},
url_scheme_and_hostname=url_scheme_and_hostname,
)
validator.validate(unvalidated_config)
assert url_hit[0]
| 29.842105 | 87 | 0.671958 | 0 | 0 | 0 | 0 | 764 | 0.449148 | 0 | 0 | 306 | 0.179894 |
e008cc40a9e990beff8a7a594350250e113f3691 | 2,414 | py | Python | Refraction.py | silkoch42/Geometric-Optics-from-QM | baf41b54c37835b527d5b98cb480d68bc2ff68c3 | [
"MIT"
] | null | null | null | Refraction.py | silkoch42/Geometric-Optics-from-QM | baf41b54c37835b527d5b98cb480d68bc2ff68c3 | [
"MIT"
] | null | null | null | Refraction.py | silkoch42/Geometric-Optics-from-QM | baf41b54c37835b527d5b98cb480d68bc2ff68c3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 15 16:51:16 2019
@author: Silvan
"""
import numpy as np
import scipy
import matplotlib.pyplot as plt
k=1000
n1=2.0
n2=1.0
alpha=np.pi/6.0
beta=np.arcsin(n2/n1*np.sin(alpha))
ya=1.0
xa=-ya*np.tan(alpha)
yb=-1.0
xb=-yb*np.tan(beta)
def s(x):
return n1*np.sqrt((xa-x)**2+ya**2)+n2*np.sqrt((xb-x)**2+yb**2)
def kernel(xa,xb):
return 1.0/np.sqrt(xa**2+1)**(3/2.0)+1.0/np.sqrt(xa**2+1)**(3/2.0)
def K(R):
L=1000 #Maximum Number of subdivisions for integral calculations
eps=0.01
N=50
x,dx=np.linspace(0.01,R,N,retstep=True)
real=np.empty(N)
imag=np.empty(N)
real[0]=scipy.integrate.quad(lambda x: np.cos(k*s(x)),-x[0],x[0],epsrel=eps,limit=L)[0]
imag[0]=scipy.integrate.quad(lambda x: np.sin(k*s(x)),-x[0],x[0],epsrel=eps,limit=L)[0]
for i in range(1,N):
r1=scipy.integrate.quad(lambda x: np.cos(k*s(x)),-x[i]-dx,-x[i],epsrel=eps,limit=L)[0]
r2=scipy.integrate.quad(lambda x: np.cos(k*s(x)),x[i],x[i]+dx,epsrel=eps,limit=L)[0]
real[i]=real[i-1]+r1+r2
i1=scipy.integrate.quad(lambda x: np.sin(k*s(x)),-x[i]-dx,-x[i],epsrel=eps,limit=L)[0]
i2=scipy.integrate.quad(lambda x: np.sin(k*s(x)),x[i],x[i]+dx,epsrel=eps,limit=L)[0]
imag[i]=imag[i-1]+i1+i2
return np.sqrt(real**2+imag**2),x,real,imag
K2,x,r,i=K(3)
M=np.mean(K2[25:])
plt.plot(x,K2/M,label=r'$|\int_{-R}^{R}e^{i k s(x)}dx|^2$')
#plt.errorbar(x,K2/M,0.1*K2/M)
plt.xlabel(r'Integration range $R$')
plt.ylabel('Detection probabilty')
plt.legend(loc='best')
plt.text(2.4,0.2,r'$k=1000$')
#plt.text(1.1,0.5,r'$|\int_{-R}^{R}e^{i k s(x)}dx|^2$',fontsize=20)
plt.savefig('refraction_v3',dpi=200)
plt.show()
#N=20
#
#dx=np.linspace(0,10,N)
#
#P=np.ones(N)
#
#for i in range(N):
# print(i+1)
# P[i]=trans_amp(dx[i])
#
#
#plt.figure(1)
#plt.plot(dx,P/np.mean(P[20:]))
#plt.text(4.0,0.5,r'$|\int_{-\Delta x}^{\Delta x} e^{ik s(x)}dx$|',fontsize=20)
#plt.ylabel('Transition Amplitude')
#plt.xlabel(r'Integration Interval $ \Delta x$')
##plt.axis([0,10,0,1.1])
#plt.legend(loc='best')
##plt.savefig('refraction',dpi=200)
#plt.show()
#x=np.linspace(-5,5,100)
#
#plt.figure(2)
#plt.plot(x,s(x))
#plt.show()
#
#d=np.linspace(0,5,100)
#xa=-d/2
#xb=d/2
#plt.figure(3)
#plt.plot(d,kernel(xa,xb)**2)
#plt.show() | 24.383838 | 95 | 0.583264 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 956 | 0.396023 |
e0097d9cfd4f53f9d94ad08b373e659436909217 | 444 | py | Python | readthedocs/docsitalia/management/commands/clear_elasticsearch.py | italia/readthedocs.org | 440d3885380d20ec24081f76e26d20701749e179 | [
"MIT"
] | 19 | 2018-03-28T12:28:35.000Z | 2022-02-14T20:09:42.000Z | readthedocs/docsitalia/management/commands/clear_elasticsearch.py | berez23/docs.italia.it | 440d3885380d20ec24081f76e26d20701749e179 | [
"MIT"
] | 274 | 2017-10-10T07:59:04.000Z | 2022-03-12T00:56:03.000Z | readthedocs/docsitalia/management/commands/clear_elasticsearch.py | italia/readthedocs.org | 440d3885380d20ec24081f76e26d20701749e179 | [
"MIT"
] | 13 | 2018-04-03T09:49:50.000Z | 2021-04-18T22:04:15.000Z | """Remove the readthedocs elasticsearch index."""
from __future__ import absolute_import
from django.conf import settings
from django.core.management.base import BaseCommand
from elasticsearch import Elasticsearch
class Command(BaseCommand):
"""Clear elasticsearch index."""
def handle(self, *args, **options):
"""handle command."""
e_s = Elasticsearch(settings.ES_HOSTS)
e_s.indices.delete(index='_all')
| 23.368421 | 51 | 0.725225 | 224 | 0.504505 | 0 | 0 | 0 | 0 | 0 | 0 | 108 | 0.243243 |
e00b9ee8e43ae71af00a3fe383bedc3df745f04d | 7,574 | py | Python | train.py | vnbot2/BigGAN-PyTorch | 1725269d52e05fbd4d06dac64aa4906a8ae7a760 | [
"MIT"
] | null | null | null | train.py | vnbot2/BigGAN-PyTorch | 1725269d52e05fbd4d06dac64aa4906a8ae7a760 | [
"MIT"
] | null | null | null | train.py | vnbot2/BigGAN-PyTorch | 1725269d52e05fbd4d06dac64aa4906a8ae7a760 | [
"MIT"
] | null | null | null | """ BigGAN: The Authorized Unofficial PyTorch release
Code by A. Brock and A. Andonian
This code is an unofficial reimplementation of
"Large-Scale GAN Training for High Fidelity Natural Image Synthesis,"
by A. Brock, J. Donahue, and K. Simonyan (arXiv 1809.11096).
Let's go.
"""
import datetime
import time
import torch
import dataset
import BigGAN
import train_fns
import utils
from common import *
# IMG_SIZE = 64
# IMG_SIZE_2 = IMG_SIZE * 2
def run(config):
# Update the config dict as necessary
# This is for convenience, to add settings derived from the user-specified
# configuration into the config-dict (e.g. inferring the number of classes
# and size of the images from the dataset, passing in a pytorch object
# for the activation specified as a string)
config['resolution'] = IMG_SIZE
config['n_classes'] = 1
config['G_activation'] = utils.activation_dict[config['G_nl']]
config['D_activation'] = utils.activation_dict[config['D_nl']]
# By default, skip init if resuming training.
if config['resume']:
print('Skipping initialization for training resumption...')
config['skip_init'] = True
config = utils.update_config_roots(config)
device = 'cuda'
# Seed RNG
utils.seed_rng(config['seed'])
# Prepare root folders if necessary
utils.prepare_root(config)
# Setup cudnn.benchmark for free speed
torch.backends.cudnn.benchmark = True
experiment_name = (config['experiment_name'] if config['experiment_name']
else 'generative_dog_images')
print('Experiment name is %s' % experiment_name)
G = BigGAN.Generator(**config).to(device)
D = BigGAN.Discriminator(**config).to(device)
# if config['parallel']:
G = nn.DataParallel(G)
D = nn.DataParallel(D)
# If using EMA, prepare it
if config['ema']:
print('Preparing EMA for G with decay of {}'.format(
config['ema_decay']))
G_ema = BigGAN.Generator(**{**config, 'skip_init': True,
'no_optim': True}).to(device)
G_ema = nn.DataParallel(G_ema)
ema = utils.ema(G, G_ema, config['ema_decay'], config['ema_start'])
else:
G_ema, ema = None, None
GD = BigGAN.G_D(G, D)
print(G)
print(D)
print('Number of params in G: {} D: {}'.format(
*[sum([p.data.nelement() for p in net.parameters()]) for net in [G, D]]))
# Prepare state dict, which holds things like epoch # and itr #
state_dict = {'itr': 0, 'epoch': 0, 'save_num': 0, 'config': config}
# If loading from a pre-trained model, load weights
if config['resume']:
print('Loading weights...')
utils.load_weights(G, D, state_dict,
config['weights_root'], experiment_name,
config['load_weights'] if config['load_weights'] else None,
G_ema if config['ema'] else None)
# Prepare data; the Discriminator's batch size is all that needs to be passed
# to the dataloader, as G doesn't require dataloading.
# Note that at every loader iteration we pass in enough data to complete
# a full D iteration (regardless of number of D steps and accumulations)
D_batch_size = (config['batch_size'] *
config['num_D_steps'] * config['num_D_accumulations'])
loaders = dataset.get_data_loaders(
data_root=config['data_root'],
label_root=config['label_root'],
batch_size=D_batch_size,
num_workers=config['num_workers'],
shuffle=config['shuffle'],
pin_memory=config['pin_memory'],
drop_last=True,
load_in_mem=config['load_in_mem'],
mask_out=config['mask_out']
)
# Prepare noise and randomly sampled label arrays
# Allow for different batch sizes in G
G_batch_size = max(config['G_batch_size'], config['batch_size'])
num_samples = config['num_fixed_samples']
z_, y_ = utils.prepare_z_y(
num_samples, G.module.dim_z, config['n_classes'], device=device, fp16=config['G_fp16'])
# Prepare a fixed z & y to see individual sample evolution throghout training
fixed_z, fixed_y = utils.prepare_z_y(
num_samples, G.module.dim_z, config['n_classes'], device=device, fp16=config['G_fp16'])
fixed_z.sample_()
fixed_y.sample_()
# Loaders are loaded, prepare the training function
train = train_fns.create_train_fn(
G, D, GD, z_, y_, ema, state_dict, config)
print('Beginning training at epoch %d...' % state_dict['epoch'])
start_time = time.perf_counter()
loader = loaders[0]
total_iters = config['num_epochs'] * len(loader)
# Train for specified number of epochs, although we mostly track G iterations.
pbar = tqdm(total=total_iters)
for _ in range(state_dict['itr']):
pbar.update()
timer = mmcv.Timer()
timer.start()
start_itr = state_dict['itr']
for epoch in range(state_dict['epoch'], config['num_epochs']):
for i, data in enumerate(loader):
x, y = data['img'], data['label']
# Increment the iteration counter
state_dict['itr'] += 1
# Make sure G and D are in training mode, just in case they got set to eval
# For D, which typically doesn't have BN, this shouldn't matter much.
G.train()
D.train()
if config['ema']:
G_ema.train()
x, y = x.to(device), y.to(device)
metrics = train(x, y)
if not (state_dict['itr'] % config['log_interval']):
curr_time = timer.since_start()
curr_time_str = datetime.datetime.fromtimestamp(
curr_time).strftime('%H:%M:%S')
# quang duong / (quang duong da di / thoi gian da di)
eta = (
total_iters - state_dict['itr']) // ((state_dict['itr']-start_itr) / (curr_time+1))
eta_str = datetime.datetime.fromtimestamp(
eta).strftime('%H:%M:%S')
log = "[{}] [{}] [{} / {}] Ep {}, ".format(
curr_time_str, eta_str, state_dict['itr'], total_iters, epoch)
log += ', '.join(['%s : %+4.3f' % (key, metrics[key])
for key in metrics])
pbar.set_description(log)
# print(log)
# Save weights and copies as configured at specified interval
if not (state_dict['itr'] % config['sample_every']):
if config['G_eval_mode']:
# print('Switching G to eval mode...')
G.eval()
train_fns.save_and_sample(G, D, G_ema, z_, y_, fixed_z, fixed_y,
state_dict, config, experiment_name, save_weight=False)
if not (state_dict['itr'] % config['save_every']):
if config['G_eval_mode']:
# print('Switching G to eval mode...')
G.eval()
train_fns.save_and_sample(G, D, G_ema, z_, y_, fixed_z, fixed_y,
state_dict, config, experiment_name, save_weight=True)
pbar.update()
# Increment epoch counter at end of epoch
state_dict['epoch'] += 1
def main():
# parse command line and run
parser = utils.prepare_parser()
config = vars(parser.parse_args())
print(config)
run(config)
if __name__ == '__main__':
main()
| 40.287234 | 103 | 0.597571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,926 | 0.386322 |
e00c71d6078595059b1d0af82650622e80499174 | 1,693 | py | Python | geocamUtil/tempfiles.py | geocam/geocamUtilWeb | b64fc063c64b4b0baa140db4c126f2ff980756ab | [
"NASA-1.3"
] | 4 | 2017-03-03T16:24:24.000Z | 2018-06-24T05:50:40.000Z | geocamUtil/tempfiles.py | geocam/geocamUtilWeb | b64fc063c64b4b0baa140db4c126f2ff980756ab | [
"NASA-1.3"
] | 1 | 2021-09-29T17:17:30.000Z | 2021-09-29T17:17:30.000Z | geocamUtil/tempfiles.py | geocam/geocamUtilWeb | b64fc063c64b4b0baa140db4c126f2ff980756ab | [
"NASA-1.3"
] | 1 | 2017-12-19T20:45:53.000Z | 2017-12-19T20:45:53.000Z | # __BEGIN_LICENSE__
#Copyright (c) 2015, United States Government, as represented by the
#Administrator of the National Aeronautics and Space Administration.
#All rights reserved.
# __END_LICENSE__
import os
import time
import random
import shutil
from glob import glob
import traceback
import sys
from geocamUtil import FileUtil
from django.conf import settings
def getTempName(prefix, suffix=''):
return '%s/%s-%s-%s%s' % (settings.TMP_DIR,
prefix,
time.strftime('%Y-%m-%d-%H%M'),
'%04x' % random.getrandbits(16),
suffix)
def deleteStaleFiles():
files = glob('%s/*' % settings.TMP_DIR)
now = time.time()
for f in files:
if (now - os.stat(f).st_ctime > settings.GEOCAM_UTIL_DELETE_TMP_FILE_WAIT_SECONDS and
not f.endswith('/README.txt')):
try:
os.unlink(f)
except OSError:
traceback.print_exc()
print >> sys.stderr, '[tempfiles.deleteStaleFiles: could not unlink %s]' % f
def makeTempDir(prefix):
d = getTempName(prefix)
if not os.path.exists(settings.TMP_DIR):
FileUtil.mkdirP(settings.TMP_DIR)
os.system('chmod go+rw %s' % settings.TMP_DIR)
deleteStaleFiles()
FileUtil.mkdirP(d)
return d
def initZipDir(prefix):
return makeTempDir(prefix)
def finishZipDir(zipDir):
zipFile = '%s.zip' % zipDir
oldDir = os.getcwd()
os.chdir(os.path.dirname(settings.TMP_DIR))
os.system('zip -r %s %s' % (zipFile, os.path.basename(zipDir)))
os.chdir(oldDir)
shutil.rmtree(zipDir)
return zipFile
| 27.306452 | 93 | 0.617247 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 341 | 0.201418 |
e00d7dd12724a0363ee40d8c349e7cccfb71d6f4 | 5,752 | py | Python | Ex1:Tests/ex2.py | Lludion/Exercises-SE | 4d5b2b4f2989a3e2c7891ba2b766394dbfb43973 | [
"MIT"
] | null | null | null | Ex1:Tests/ex2.py | Lludion/Exercises-SE | 4d5b2b4f2989a3e2c7891ba2b766394dbfb43973 | [
"MIT"
] | null | null | null | Ex1:Tests/ex2.py | Lludion/Exercises-SE | 4d5b2b4f2989a3e2c7891ba2b766394dbfb43973 | [
"MIT"
] | null | null | null | # Ce fichier contient (au moins) cinq erreurs.
# Instructions:
# - tester jusqu'à atteindre 100% de couverture;
# - corriger les bugs;"
# - envoyer le diff ou le dépôt git par email."""
import hypothesis
from hypothesis import given, settings
from hypothesis.strategies import integers, lists
class BinHeap:
#structure de tas binaires d'entiers
def __init__(self):
#initialise un tas binaire d'entiers avec un element 0
self.heapList = [0]
self.currentSize = 1#taille de la liste heapList (invariant)
def percUp(self,i):
#upward percolation until 0 reached or father is bigger
while i // 2 > 0 and self.heapList[i] < self.heapList[i // 2]:
tmp = self.heapList[i // 2]
self.heapList[i // 2] = self.heapList[i]
self.heapList[i] = tmp
i //= 2
def insert(self,k):
#inserting a new value into the heap
self.heapList.append(k)
self.percUp(self.currentSize)
self.currentSize = self.currentSize + 1
def percDown(self,i):
while (i * 2) < self.currentSize:#while I have a child
mc = self.minChild(i)#mc is the index of the smallest
if self.heapList[i] > self.heapList[mc]:
tmp = self.heapList[i]
self.heapList[i] = self.heapList[mc]
self.heapList[mc] = tmp
i = mc
def minChild(self,i):
if i * 2 >= self.currentSize or i == 0:
print("No Child. None is returned.")
return
if i * 2 + 1 >= self.currentSize:
return i * 2
else:
if self.heapList[i*2] < self.heapList[i*2+1]:
return i * 2
else:
return i * 2 + 1
def delMin(self):
try:
rval = self.heapList[1]
except IndexError:
print("Empty heap. Nothing is changed. None is returned.")
return
self.currentSize = self.currentSize - 1
self.heapList[1] = self.heapList[self.currentSize]
self.heapList.pop()
self.percDown(1)
return rval
def buildHeap(self,alist):
#creates a whole heap from a list, by percolating all its elements
i = 1
self.currentSize = len(alist) + 1# + 1
self.heapList = [0] + alist # enlever le [:]
while (i < self.currentSize):
self.percUp(i)
i += 1
def assert_isheaplist(x,val,lon,HL):
assert ((x * 2 + 1 > lon) or (x * 2 + 1 == lon and HL[2*x] >= val) or (HL[2*x] >= val and HL[2*x+1] >= val))
def assert_goodheap(tau,lon):
for x in range(1,lon):
assert_isheaplist(x,tau.heapList[x],tau.currentSize,tau.heapList)
def test_init():
tau = BinHeap()
assert tau.heapList == [0]
assert tau.currentSize == 1
@given(integers())
@settings(max_examples=100)
def test_percup(integer):
gamma = [0,1,3,2,9,99,2,3,10,9,103,102,3,2,3,3]
tau = BinHeap()
tau.currentsize = 16
tau.heapList = gamma[:]
tau.percUp(15)
assert tau.heapList == gamma[:]
tau.heapList[15] = 2
tau.percUp(15)
print(tau.heapList)
assert tau.heapList == [0,1,3,2,9,99,2,2,10,9,103,102,3,2,3,3]
assert tau.currentsize == 16
tau.heapList.append(8)
tau.currentsize = 17
tau.percUp(16)
assert tau.heapList == [0,1,3,2,8,99,2,2,9,9,103,102,3,2,3,3,10]
tau.heapList.append(integer)
tau.currentsize = 18
tau.percUp(17)
assert tau.heapList[17] >= tau.heapList[8]
assert tau.heapList[8] >= tau.heapList[4]
@given(lists(elements=integers()))
@settings(max_examples=1000)
def test_build(L):
tau = BinHeap()
tau.buildHeap(L)
assert tau.currentSize == len(L) + 1
assert sorted(tau.heapList) == sorted(L+[0])
assert_goodheap(tau,len(L)+1)
#for x in range(1,len(L) + 1):
# assert_isheaplist(x,tau.heapList[x],tau.currentSize,tau.heapList)
@given(lists(elements=integers()),integers())
@settings(max_examples=1000)
def test_insert(L,i):
tau = BinHeap()
tau.buildHeap(L)
tau.insert(i)
assert_goodheap(tau,len(L)+1)
@given(lists(elements=integers()),integers())
@settings(max_examples=100)
def test_percDown(L,i):
tau = BinHeap()
L += [10]
tau.buildHeap(L)
tau.heapList[1] = i
tau.percDown(1)
for x in range(1,len(L) + 1):
for _ in range(len(L)):
tau.percDown(x)
#then we test that we got a well-ordered heap
assert_goodheap(tau,len(L)+1)
@given(lists(elements=integers()))
@settings(max_examples=400,deadline=None)
def test_delmin(L):
L += [10]
tau = BinHeap()
assert tau.delMin() is None
tau.buildHeap(L)
#print(L)
#print("sorted",sorted(L),"\n")
#print("TAU ", tau.heapList,"\n")
assert tau.delMin() == min(L)
@given(lists(elements=integers()),integers())
@settings(max_examples=400)
def test_minChild(L,i):
tau = BinHeap()
assert tau.minChild(abs(i)) is None
tau.buildHeap(2*L+[0,1])
assert tau.minChild(len(L)+1) is not None
@given(lists(elements=integers()),lists(elements=integers()))
@settings(max_examples=400,deadline=None)
def test_general(L,K):
tau = BinHeap()
tau.buildHeap(L)#tas construit avec L
for k in K:tau.insert(k)#on rajoute les elements de K
assert_goodheap(tau,tau.currentSize)
x = []
while tau.currentSize > 1:x.append(tau.delMin())#on retire tous les elements
assert x == sorted(L + K)#verifie qu'on a bien le minimum avec delmin
assert tau.delMin() is None
x = []
tau.buildHeap(K)
for l in L:#teste si 1 suite d'insertion/ suppression maintient la structure
tau.delMin()
tau.insert(l)
assert_goodheap(tau,tau.currentSize) | 31.26087 | 112 | 0.604312 | 2,145 | 0.372719 | 0 | 0 | 2,885 | 0.501303 | 0 | 0 | 1,033 | 0.179496 |
e00dbb3c20046835e182d01718caf34d09944176 | 22,455 | py | Python | python/snewpy/snowglobes.py | svalder/snewpy | 5723189ae3dce3506f2fab056bbef24c9ab1a31f | [
"BSD-3-Clause"
] | null | null | null | python/snewpy/snowglobes.py | svalder/snewpy | 5723189ae3dce3506f2fab056bbef24c9ab1a31f | [
"BSD-3-Clause"
] | null | null | null | python/snewpy/snowglobes.py | svalder/snewpy | 5723189ae3dce3506f2fab056bbef24c9ab1a31f | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""The ``snewpy.snowglobes`` module contains functions for interacting with SNOwGLoBES.
`SNOwGLoBES <https://github.com/SNOwGLoBES/snowglobes>`_ can estimate detected
event rates from a given input supernova neutrino flux. It supports many
different neutrino detectors, detector materials and interaction channels.
There are three basic steps to using SNOwGLoBES from SNEWPY:
* **Generating input files for SNOwGLoBES:**
There are two ways to do this, either generate a time series or a fluence file. This is done taking as input the supernova simulation model.
The first will evaluate the neutrino flux at each time step, the latter will compute the integrated neutrino flux (fluence) in the time bin.
The result is a compressed .tar file containing all individual input files.
* **Running SNOwGLoBES:**
This step convolves the fluence generated in the previous step with the cross-sections for the interaction channels happening in various detectors supported by SNOwGLoBES.
It takes into account the effective mass of the detector as well as a smearing matrix describing the energy-dependent detection efficiency.
The output gives the number of events detected as a function of energy for each interaction channel, integrated in a given time window (or time bin), or in a snapshot in time.
* **Collating SNOwGLoBES outputs:**
This step puts together all the interaction channels and time bins evaluated by SNOwGLoBES in a single file (for each detector and for each time bin).
The output tables allow to build the detected neutrino energy spectrum and neutrino time distribution, for each reaction channel or the sum of them.
"""
import io
import logging
import os
import re
import tarfile
from pathlib import Path
from tempfile import TemporaryDirectory
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from astropy import units as u
from tqdm.auto import tqdm
import snewpy.models
from snewpy.flavor_transformation import *
from snewpy.neutrino import Flavor, MassHierarchy
from snewpy.snowglobes_interface import SNOwGLoBES
logger = logging.getLogger(__name__)
def generate_time_series(model_path, model_type, transformation_type, d, output_filename=None, ntbins=30, deltat=None):
"""Generate time series files in SNOwGLoBES format.
This version will subsample the times in a supernova model, produce energy
tables expected by SNOwGLoBES, and compress the output into a tarfile.
Parameters
----------
model_path : str
Input file containing neutrino flux information from supernova model.
model_type : str
Format of input file. Matches the name of the corresponding class in :py:mod:`snewpy.models`.
transformation_type : str
Name of flavor transformation. See snewpy.flavor_transformation documentation for possible values.
d : int or float
Distance to supernova in kpc.
output_filename : str or None
Name of output file. If ``None``, will be based on input file name.
ntbins : int
Number of time slices. Will be ignored if ``deltat`` is also given.
deltat : astropy.Quantity or None
Length of time slices.
Returns
-------
str
Path of compressed .tar file with neutrino flux data.
"""
model_class = getattr(snewpy.models.ccsn, model_type)
# Choose flavor transformation. Use dict to associate the transformation name with its class.
flavor_transformation_dict = {'NoTransformation': NoTransformation(), 'AdiabaticMSW_NMO': AdiabaticMSW(mh=MassHierarchy.NORMAL), 'AdiabaticMSW_IMO': AdiabaticMSW(mh=MassHierarchy.INVERTED), 'NonAdiabaticMSWH_NMO': NonAdiabaticMSWH(mh=MassHierarchy.NORMAL), 'NonAdiabaticMSWH_IMO': NonAdiabaticMSWH(mh=MassHierarchy.INVERTED), 'TwoFlavorDecoherence': TwoFlavorDecoherence(), 'ThreeFlavorDecoherence': ThreeFlavorDecoherence(), 'NeutrinoDecay_NMO': NeutrinoDecay(mh=MassHierarchy.NORMAL), 'NeutrinoDecay_IMO': NeutrinoDecay(mh=MassHierarchy.INVERTED)}
flavor_transformation = flavor_transformation_dict[transformation_type]
model_dir, model_file = os.path.split(os.path.abspath(model_path))
snmodel = model_class(model_path)
# Subsample the model time. Default to 30 time slices.
tmin = snmodel.get_time()[0]
tmax = snmodel.get_time()[-1]
if deltat is not None:
dt = deltat
ntbins = int((tmax-tmin)/dt)
else:
dt = (tmax - tmin) / (ntbins+1)
tedges = np.arange(tmin/u.s, tmax/u.s, dt/u.s)*u.s
times = 0.5*(tedges[1:] + tedges[:-1])
# Generate output.
if output_filename is not None:
tfname = output_filename + 'kpc.tar.bz2'
else:
model_file_root, _ = os.path.splitext(model_file) # strip extension (if present)
tfname = model_file_root + '.' + transformation_type + '.{:.3f},{:.3f},{:d}-{:.1f}'.format(tmin, tmax, ntbins, d) + 'kpc.tar.bz2'
with tarfile.open(os.path.join(model_dir, tfname), 'w:bz2') as tf:
#creates file in tar archive that gives information on parameters
output = '\n'.join(map(str, transformation_type)).encode('ascii')
tf.addfile(tarfile.TarInfo(name='parameterinfo'), io.BytesIO(output))
MeV = 1.60218e-6 * u.erg
energy = np.linspace(0, 100, 501) * MeV # 1MeV
# Loop over sampled times.
for i, t in enumerate(times):
osc_spectra = snmodel.get_transformed_spectra(t, energy, flavor_transformation)
osc_fluence = {}
table = []
table.append('# TBinMid={:g}sec TBinWidth={:g}s EBinWidth=0.2MeV Fluence at Earth for this timebin in neutrinos per cm^2'.format(t, dt))
table.append('# E(GeV) NuE NuMu NuTau aNuE aNuMu aNuTau')
# Generate energy + number flux table.
for j, E in enumerate(energy):
for flavor in Flavor:
osc_fluence[flavor] = osc_spectra[flavor][j] * dt * 0.2 * MeV / (4.*np.pi*(d*1000*3.086e+18)**2)
s = '{:17.8E}'.format(E/(1e3 * MeV))
s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_E])
s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X])
s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X])
s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_E_BAR])
s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X_BAR])
s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X_BAR])
table.append(s)
logging.debug(s)
# Encode energy/flux table and output to file in tar archive.
output = '\n'.join(table).encode('ascii')
extension = ".dat"
model_file_root, _ = os.path.splitext(model_file)
filename = model_file_root + '.tbin{:01d}.'.format(i+1) + transformation_type + \
'.{:.3f},{:.3f},{:01d}-{:.1f}kpc{}'.format(tmin/u.s, tmax/u.s, ntbins, d, extension)
info = tarfile.TarInfo(name=filename)
info.size = len(output)
tf.addfile(info, io.BytesIO(output))
return os.path.join(model_dir, tfname)
def generate_fluence(model_path, model_type, transformation_type, d, output_filename=None, tstart=None, tend=None):
"""Generate fluence files in SNOwGLoBES format.
This version will subsample the times in a supernova model, produce energy
tables expected by SNOwGLoBES, and compress the output into a tarfile.
Parameters
----------
model_path : str
Input file containing neutrino flux information from supernova model.
model_type : str
Format of input file. Matches the name of the corresponding class in :py:mod:`snewpy.models`.
transformation_type : str
Name of flavor transformation. See snewpy.flavor_transformation documentation for possible values.
d : int or float
Distance to supernova in kpc.
output_filename : str or None
Name of output file. If ``None``, will be based on input file name.
tstart : astropy.Quantity or None
Start of time interval to integrate over, or list of start times of the time series bins.
tend : astropy.Quantity or None
End of time interval to integrate over, or list of end times of the time series bins.
Returns
-------
str
Path of compressed .tar file with neutrino flux data.
"""
model_class = getattr(snewpy.models.ccsn, model_type)
# Choose flavor transformation. Use dict to associate the transformation name with its class.
flavor_transformation_dict = {'NoTransformation': NoTransformation(), 'AdiabaticMSW_NMO': AdiabaticMSW(mh=MassHierarchy.NORMAL), 'AdiabaticMSW_IMO': AdiabaticMSW(mh=MassHierarchy.INVERTED), 'NonAdiabaticMSWH_NMO': NonAdiabaticMSWH(mh=MassHierarchy.NORMAL), 'NonAdiabaticMSWH_IMO': NonAdiabaticMSWH(mh=MassHierarchy.INVERTED), 'TwoFlavorDecoherence': TwoFlavorDecoherence(), 'ThreeFlavorDecoherence': ThreeFlavorDecoherence(), 'NeutrinoDecay_NMO': NeutrinoDecay(mh=MassHierarchy.NORMAL), 'NeutrinoDecay_IMO': NeutrinoDecay(mh=MassHierarchy.INVERTED)}
flavor_transformation = flavor_transformation_dict[transformation_type]
model_dir, model_file = os.path.split(os.path.abspath(model_path))
snmodel = model_class(model_path)
#set the timings up
#default if inputs are None: full time window of the model
if tstart is None:
tstart = snmodel.get_time()[0]
tend = snmodel.get_time()[-1]
try:
if len(tstart/u.s) > 0:
t0 = tstart[0]
t1 = tend[-1]
nbin = len(tstart/u.s)
except:
t0 = tstart
t1 = tend
nbin = 1
times = 0.5*(tstart + tend)
model_times = snmodel.get_time()
model_tstart = model_times*1.0
model_tend = model_times*1.0
model_tstart[0] = model_times[0]
for i in range(1, len(model_times), 1):
model_tstart[i] = 0.5*(model_times[i]+model_times[i-1])
model_tend[i-1] = model_tstart[i]
model_tend[len(model_times)-1] = model_times[-1]
if nbin > 1:
starting_index = np.zeros(len(times), dtype=np.int64)
ending_index = np.zeros(len(times), dtype=np.int64)
for i in range(len(tstart)):
starting_index[i] = next(j for j, t in enumerate(model_tend) if t > tstart[i])
ending_index[i] = next(j for j, t in enumerate(model_tend) if t >= tend[i])
else:
starting_index = [next(j for j, t in enumerate(model_tend) if t > tstart)]
ending_index = [next(j for j, t in enumerate(model_tend) if t >= tend)]
# Generate output.
if output_filename is not None:
tfname = output_filename+'.tar.bz2'
else:
model_file_root, _ = os.path.splitext(model_file) # strip extension (if present)
tfname = model_file_root + '.' + transformation_type + '.{:.3f},{:.3f},{:d}-{:.1f}'.format(t0, t1, nbin, d) + 'kpc.tar.bz2'
with tarfile.open(os.path.join(model_dir, tfname), 'w:bz2') as tf:
#creates file in tar archive that gives information on parameters
output = '\n'.join(map(str, transformation_type)).encode('ascii')
tf.addfile(tarfile.TarInfo(name='parameterinfo'), io.BytesIO(output))
MeV = 1.60218e-6 * u.erg
energy = np.linspace(0, 100, 501) * MeV
# Loop over sampled times.
for i in range(nbin):
if nbin > 1:
ta = tstart[i]
tb = tend[i]
t = times[i]
dt = tb-ta
else:
ta = tstart
tb = tend
t = times
dt = tb-ta
#first time bin of model in requested interval
osc_spectra = snmodel.get_transformed_spectra(model_times[starting_index[i]], energy, flavor_transformation)
if dt < model_tend[starting_index[i]]-ta:
dt = dt
else:
for flavor in Flavor:
osc_spectra[flavor] *= (model_tend[starting_index[i]]-ta)
#intermediate time bins of model in requested interval
for j in range(starting_index[i]+1, ending_index[i], 1):
temp_spectra = snmodel.get_transformed_spectra(model_times[j], energy, flavor_transformation)
for flavor in Flavor:
osc_spectra[flavor] += temp_spectra[flavor]*(model_tend[j]-model_tstart[j])
#last time bin of model in requested interval
temp_spectra = snmodel.get_transformed_spectra(
model_times[ending_index[i]], energy, flavor_transformation)
for flavor in Flavor:
osc_spectra[flavor] += temp_spectra[flavor]*(tb-model_tstart[ending_index[i]])
for flavor in Flavor:
osc_spectra[flavor] /= (tb-ta)
osc_fluence = {}
table = []
table.append('# TBinMid={:g}sec TBinWidth={:g}s EBinWidth=0.2MeV Fluence at Earth for this timebin in neutrinos per cm^2'.format(t, dt))
table.append('# E(GeV) NuE NuMu NuTau aNuE aNuMu aNuTau')
# Generate energy + number flux table.
for j, E in enumerate(energy):
for flavor in Flavor:
osc_fluence[flavor] = osc_spectra[flavor][j] * dt * 0.2 * MeV / (4.*np.pi*(d*1000*3.086e+18)**2)
s = '{:17.8E}'.format(E/(1e3 * MeV))
s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_E])
s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X])
s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X])
s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_E_BAR])
s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X_BAR])
s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X_BAR])
table.append(s)
logging.debug(s)
# Encode energy/flux table and output to file in tar archive.
output = '\n'.join(table).encode('ascii')
extension = ".dat"
if output_filename is not None:
if nbin > 1:
filename = output_filename+"_"+str(i)+extension
else:
filename = output_filename+extension
else:
model_file_root, _ = os.path.splitext(model_file) # strip extension (if present)
filename = model_file_root + '.tbin{:01d}.'.format(i+1) + transformation_type + \
'.{:.3f},{:.3f},{:01d}-{:.1f}kpc{}'.format(t0, t1, nbin, d, extension)
info = tarfile.TarInfo(name=filename)
info.size = len(output)
tf.addfile(info, io.BytesIO(output))
return os.path.join(model_dir, tfname)
def simulate(SNOwGLoBESdir, tarball_path, detector_input="all", verbose=False):
"""Takes as input the neutrino flux files and configures and runs the supernova script inside SNOwGLoBES, which outputs calculated event rates expected for a given (set of) detector(s). These event rates are given as a function of the neutrino energy and time, for each interaction channel.
Parameters
----------
SNOwGLoBESdir : str
Path to directory where SNOwGLoBES is installed.
tarball_path : str
Path of compressed .tar file produced e.g. by ``generate_time_series()`` or ``generate_fluence()``.
detector_input : str
Name of detector. If ``"all"``, will use all detectors supported by SNOwGLoBES.
verbose : bool
Whether to generate verbose output, e.g. for debugging.
"""
sng = SNOwGLoBES(SNOwGLoBESdir)
if detector_input == 'all':
detector_input = list(sng.detectors)
detector_input.remove('d2O')
elif isinstance(detector_input,str):
detector_input = [detector_input]
result = {}
#Extracts data from tarfile and sets up lists of paths and fluxfilenames for later use
with TemporaryDirectory(prefix='snowglobes') as tempdir:
with tarfile.open(tarball_path) as tar:
tar.extractall(tempdir)
flux_files = list(Path(tempdir).glob('*.dat'))
if len(detector_input)>0:
detector_input = tqdm(detector_input, desc='Detectors', leave=False)
for det in detector_input:
res=sng.run(flux_files, det)
result[det]=dict(zip((f.stem for f in flux_files),res))
# save result to file for re-use in collate()
cache_file = tarball_path[:tarball_path.rfind('.tar')] + '.npy'
logging.info(f'Saving simulation results to {cache_file}')
np.save(cache_file, result)
return result
re_chan_label = re.compile(r'nu(e|mu|tau)(bar|)_([A-Z][a-z]*)(\d*)_?(.*)')
def get_channel_label(c):
mapp = {'nc':'NeutralCurrent',
'ibd':'Inverse Beta Decay',
'e':r'${\nu}_x+e^-$'}
def gen_label(m):
flv,bar,Nuc,num,res = m.groups()
if flv!='e':
flv='\\'+flv
if bar:
bar='\\'+bar
s = f'${bar}{{\\nu}}_{flv}$ '+f'${{}}^{{{num}}}{Nuc}$ '+res
return s
if c in mapp:
return mapp[c]
else:
return re_chan_label.sub(gen_label, c)
def collate(SNOwGLoBESdir, tarball_path, detector_input="all", skip_plots=False, verbose=False, remove_generated_files=True):
"""Collates SNOwGLoBES output files and generates plots or returns a data table.
Parameters
----------
SNOwGLoBESdir : str
Path to directory where SNOwGLoBES is installed.
tarball_path : str
Path of compressed .tar file produced e.g. by ``generate_time_series()`` or ``generate_fluence()``.
detector_input : str
Name of detector. If ``"all"``, will use all detectors supported by SNOwGLoBES.
skip_plots: bool
If False, it gives as output the plot of the energy distribution for each time bin and for each interaction channel.
verbose : bool
Whether to generate verbose output, e.g. for debugging.
remove_generated_files: bool
Remove the output files from SNOwGLoBES, collated files, and .png's made for this snewpy run.
Returns
-------
dict
Dictionary of data tables: One table per time bin; each table contains in the first column the energy bins, in the remaining columns the number of events for each interaction channel in the detector.
"""
def aggregate_channels(table, **patterns):
#rearrange the table to have only channel column
levels = list(table.columns.names)
levels.remove('channel')
t = table.stack(levels)
for name,pattern in patterns.items():
#get channels which contain `like`
t_sel = t.filter(like=pattern)
#sum over them and save to a separate column
t_agg = t_sel.sum(axis='columns')
#drop processed channels
t.drop(t_sel.columns, axis='columns',inplace=True)
t[name]=t_agg #fill the column
#return table with the original levels order
t = t.unstack(levels)
t = t.reorder_levels(table.columns.names, axis=1)
return t
def do_plot(table, params):
#plotting the events from given table
flux,det,weighted,smeared = params
for c in table.columns:
if table[c].max() > 0.1:
plt.plot(table[c],drawstyle='steps',label=get_channel_label(c), lw=1)
plt.xlim(right=0.10)
plt.ylim(bottom=0.10)
plt.yscale('log')
plt.legend(bbox_to_anchor=(0.5, 0.5, 0.5, 0.5), loc='best', borderaxespad=0) # formats complete graph
smear_title = 'Interaction' if smeared=='unsmeared' else 'Detected'
plt.title(f'{flux} {det.capitalize()} {weighted.capitalize()} {smear_title} Events')
if smeared=='smeared':
plt.xlabel('Detected Energy (GeV)')
plt.ylabel('Events')
else:
plt.xlabel('Neutrino Energy (GeV)')
plt.ylabel('Interaction Events')
#read the results from storage
cache_file = tarball_path[:tarball_path.rfind('.tar')] + '.npy'
logging.info(f'Reading tables from {cache_file}')
tables = np.load(cache_file, allow_pickle=True).tolist()
#This output is similar to what produced by:
#tables = simulate(SNOwGLoBESdir, tarball_path,detector_input)
#dict for old-style results, for backward compatibiity
results = {}
#save collated files:
with TemporaryDirectory(prefix='snowglobes') as tempdir:
tempdir = Path(tempdir)
for det in tables:
results[det] = {}
for flux,t in tables[det].items():
t = aggregate_channels(t,nc='nc_',e='_e')
for w in ['weighted','unweighted']:
for s in ['smeared','unsmeared']:
table = t[w][s]
filename_base = f'{flux}_{det}_events_{s}_{w}'
filename = tempdir/f'Collated_{filename_base}.dat'
#save results to text files
with open(filename,'w') as f:
f.write(table.to_string(float_format='%23.15g'))
#format the results for the output
header = 'Energy '+' '.join(list(table.columns))
data = table.to_numpy().T
index = table.index.to_numpy()
data = np.concatenate([[index],data])
results[filename.name] = {'header':header,'data':data}
#optionally plot the results
if skip_plots is False:
plt.figure(dpi=300)
do_plot(table,(flux,det,w,s))
filename = tempdir/f'{filename_base}_log_plot.png'
plt.savefig(filename.with_suffix('.png'), dpi=300, bbox_inches='tight')
#Make a tarfile with the condensed data files and plots
output_name = Path(tarball_path).stem
output_name = output_name[:output_name.rfind('.tar')]+'_SNOprocessed'
output_path = Path(tarball_path).parent/(output_name+'.tar.gz')
with tarfile.open(output_path, "w:gz") as tar:
for file in tempdir.iterdir():
tar.add(file,arcname=output_name+'/'+file.name)
logging.info(f'Created archive: {output_path}')
return results
| 46.298969 | 553 | 0.631485 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9,347 | 0.416255 |
e00e074bf789711cc01d53bcaa030d52c4e69f5b | 4,621 | py | Python | rlcycle/dqn_base/loss.py | cyoon1729/Rlcycle | 5c65b9dd61a6fd5d6dfe92f0b3e04bf309828569 | [
"MIT"
] | 128 | 2020-06-29T01:40:36.000Z | 2022-03-29T15:37:39.000Z | rlcycle/dqn_base/loss.py | cyoon1729/Rlcycle | 5c65b9dd61a6fd5d6dfe92f0b3e04bf309828569 | [
"MIT"
] | 8 | 2020-06-29T03:51:50.000Z | 2020-07-22T23:55:47.000Z | rlcycle/dqn_base/loss.py | cyoon1729/Rlcycle | 5c65b9dd61a6fd5d6dfe92f0b3e04bf309828569 | [
"MIT"
] | 24 | 2020-07-02T06:03:03.000Z | 2022-03-22T11:59:53.000Z | from typing import List, Tuple
from omegaconf import DictConfig
import torch
import torch.nn as nn
import torch.nn.functional as F
from rlcycle.common.abstract.loss import Loss
class DQNLoss(Loss):
"""Compute double DQN loss"""
def __init__(self, hyper_params: DictConfig, use_cuda: bool):
Loss.__init__(self, hyper_params, use_cuda)
def __call__(
self, networks: Tuple[nn.Module, ...], data: Tuple[torch.Tensor, ...]
) -> Tuple[torch.Tensor, ...]:
network, target_network = networks
states, actions, rewards, next_states, dones = data
q_value = network.forward(states).gather(1, actions)
with torch.no_grad():
next_q = torch.max(target_network.forward(next_states), 1)[0].unsqueeze(1)
n_step_gamma = self.hyper_params.gamma ** self.hyper_params.n_step
target_q = rewards + (1 - dones) * n_step_gamma * next_q
element_wise_loss = F.smooth_l1_loss(
q_value, target_q.detach(), reduction="none"
)
return element_wise_loss
class QRLoss(Loss):
"""Compute quantile regression loss"""
def __init__(self, hyper_params: DictConfig, use_cuda: bool):
Loss.__init__(self, hyper_params, use_cuda)
def __call__(
self, networks: Tuple[nn.Module, ...], data: Tuple[torch.Tensor, ...],
) -> Tuple[torch.Tensor, ...]:
network, target_network = networks
states, actions, rewards, next_states, dones = data
z_dists = network.forward(states)
z_dists = z_dists[list(range(states.size(0))), actions.view(-1)]
with torch.no_grad():
next_z = target_network.forward(next_states)
next_actions = torch.max(next_z.mean(2), dim=1)[1]
next_z = next_z[list(range(states.size(0))), next_actions]
n_step_gamma = self.hyper_params.gamma ** self.hyper_params.n_step
target_z = rewards + (1 - dones) * n_step_gamma * next_z
distance = target_z - z_dists
quantile_huber_loss = (
network.tau - (distance.detach() < 0).float()
).abs() * self.huber_loss(distance)
element_wise_loss = torch.mean(quantile_huber_loss, dim=1, keepdim=True)
return element_wise_loss
@staticmethod
def huber_loss(x: List[torch.Tensor], k: float = 1.0):
return torch.where(x.abs() <= k, 0.5 * x.pow(2), k * (x.abs() - 0.5 * k))
class CategoricalLoss(Loss):
"""Compute C51 loss"""
def __init__(self, hyper_params: DictConfig, use_cuda: bool):
Loss.__init__(self, hyper_params, use_cuda)
def __call__(
self, networks: Tuple[nn.Module, ...], data: Tuple[torch.Tensor, ...]
) -> Tuple[torch.Tensor, ...]:
network, target_network = networks
states, actions, rewards, next_states, dones = data
batch_size = states.size(0)
offset = (
torch.linspace(0, (batch_size - 1) * network.num_atoms, batch_size)
.long()
.unsqueeze(1)
.expand(batch_size, network.num_atoms)
)
if self.use_cuda:
offset = offset.cuda()
z_dists = network.forward(states)
z_dists = z_dists[list(range(states.size(0))), actions.view(-1)]
with torch.no_grad():
next_z = target_network.forward(next_states)
next_actions = torch.max(next_z.mean(2), dim=1)[1]
next_z = next_z[list(range(states.size(0))), next_actions]
n_step_gamma = self.hyper_params.gamma ** self.hyper_params.n_step
target_z = rewards + (1 - dones) * n_step_gamma * network.support
target_z = torch.clamp(target_z, min=network.v_min, max=network.v_max)
target_proj = self.dist_projection(network, next_z, target_z, offset)
log_dist = torch.log(z_dists)
element_wise_loss = -(target_proj * log_dist).sum(1)
return element_wise_loss
def dist_projection(
self,
network: nn.Module,
next_z: torch.Tensor,
target_z: torch.Tensor,
offset: torch.Tensor,
) -> torch.Tensor:
b = (target_z - network.v_min) / network.delta_z
lb = b.floor().long()
ub = b.ceil().long()
proj_dist = torch.zeros(next_z.size())
if self.use_cuda:
proj_dist = proj_dist.cuda()
proj_dist.view(-1).index_add_(
0, (lb + offset).view(-1), (next_z * (ub.float() - b)).view(-1)
)
proj_dist.view(-1).index_add_(
0, (ub + offset).view(-1), (next_z * (b - lb.float())).view(-1)
)
return proj_dist
| 34.485075 | 86 | 0.61069 | 4,433 | 0.959316 | 0 | 0 | 154 | 0.033326 | 0 | 0 | 95 | 0.020558 |
e00f4579dad4a0f1f3310721291b602f532b6bf5 | 12,518 | py | Python | scripts/gap_filling_viewer.py | raphischer/probgf | 01bd2be85aa98afd79fc05c1eb3e260b2bcd2ebd | [
"MIT"
] | 3 | 2020-11-19T10:28:57.000Z | 2021-04-15T17:16:24.000Z | scripts/gap_filling_viewer.py | raphischer/probgf | 01bd2be85aa98afd79fc05c1eb3e260b2bcd2ebd | [
"MIT"
] | null | null | null | scripts/gap_filling_viewer.py | raphischer/probgf | 01bd2be85aa98afd79fc05c1eb3e260b2bcd2ebd | [
"MIT"
] | null | null | null | """viewer application which allows to interactively view spatio-temporal gap filling results"""
import os
import argparse
from datetime import datetime, timedelta
from tkinter import Canvas, Tk, Button, RAISED, DISABLED, SUNKEN, NORMAL
import numpy as np
from PIL import Image, ImageTk
import probgf.media as media
class MainWindow():
def next(self, event=None):
self.curr_img = (self.curr_img + 1) % len(self.imgs_orig)
self.refresh()
def prev(self, event=None):
self.curr_img = (self.curr_img - 1) % len(self.imgs_orig)
self.refresh()
def click_wheel(self, event):
self.start_drag = (event.x + self.shift_x, event.y + self.shift_y)
def click_left(self, event):
if not self.over_button:
self.prev()
def click_right(self, event):
if not self.over_button:
self.next()
def refresh(self):
zoom = float(self.zoom) / 100
self.start_x = int(self.img_w_f / 2 - self.img_w_f / zoom / 2) + self.shift_x
self.end_x = int(self.start_x + self.img_w_f / zoom)
self.start_y = int(self.img_w_f / 2 - self.img_w_f / zoom / 2) + self.shift_y
self.end_y = int(self.start_y + self.img_w_f / zoom)
if not self.mask_toggle:
self.b_masks.config(relief=RAISED)
img1 = self.imgs_orig[self.curr_img]
img2 = self.imgs_pred[self.curr_img]
else:
self.b_masks.config(relief=SUNKEN)
img1 = self.imgs_orig_m[self.curr_img]
img2 = self.imgs_pred_m[self.curr_img]
img1 = img1.crop((self.start_x, self.start_y, self.end_x, self.end_y)).resize((self.img_w, self.img_w), Image.ANTIALIAS)
img2 = img2.crop((self.start_x, self.start_y, self.end_x, self.end_y)).resize((self.img_w, self.img_w), Image.ANTIALIAS)
self.imgs_orig_v[self.curr_img] = ImageTk.PhotoImage(img1)
self.imgs_pred_v[self.curr_img] = ImageTk.PhotoImage(img2)
self.canvas.itemconfig(self.i_left, image = self.imgs_orig_v[self.curr_img])
self.canvas.itemconfig(self.i_right, image = self.imgs_pred_v[self.curr_img])
self.canvas.itemconfig(self.i_list, image = self.imagelists[self.curr_img])
self.canvas.itemconfig(self.day_info, text='{} - cloud cover {:06.2f}% - estimated MAE {}'.format(self.dates[self.curr_img],
self.cc[self.curr_img] * 100,
self.errors[self.curr_img]))
if self.zoom == 100:
self.canvas.itemconfig(self.zoom, text='')
self.b_reset.config(state=DISABLED)
else:
self.canvas.itemconfig(self.zoom, text='ZOOM: {:3d}%'.format(self.zoom))
self.b_reset.config(state=NORMAL)
def zoomer(self, event):
if event.num == 4 or event.delta == 120 or event.keysym == 'plus':
self.zoom += 20
elif event.delta == 240:
self.zoom += 40
elif event.delta == 360:
self.zoom += 60
else:
if self.zoom - 20 >= 100:
self.zoom -= 20
if self.zoom == 100:
self.reset_transform()
self.refresh()
def drag_roi(self, event):
self.shift_x = min(max(self.start_drag[0] - event.x, 0 - int(self.img_w_f / 2 - self.img_w_f / self.zoom / 2)),
int(self.img_w_f / 2 - self.img_w_f / self.zoom / 2))
self.shift_y = min(max(self.start_drag[1] - event.y, 0 - int(self.img_w_f / 2 - self.img_w_f / self.zoom / 2)),
int(self.img_w_f / 2 - self.img_w_f / self.zoom / 2))
self.refresh()
def toggle_mask(self, event=None):
self.mask_toggle = not self.mask_toggle
self.refresh()
def reset_transform(self, event=None):
self.mask_toggle = False
self.zoom = 100
self.shift_x = 0
self.shift_y = 0
self.refresh()
def button_enter(self, event):
self.over_button = True
def button_leave(self, enter):
self.over_button = False
def __init__(self, root, w, h, imgs_p, imgs_o, imgs_m, dates, errors, logos):
self.dates = dates
self.errors = errors
# setup images
self.img_w = int(h * 0.68) # width of each displayed image
self.imgs_orig_m = [] # masked full images
self.imgs_pred_m = []
self.imgs_orig = [] # unmasked full images
self.imgs_pred = []
self.cc = []
for index, img in enumerate(imgs_p):
self.imgs_orig.append(imgs_o[index].resize((self.img_w, self.img_w), resample=0))
self.imgs_pred.append(img.resize((self.img_w, self.img_w), resample=0))
self.imgs_orig_m.append(Image.blend(self.imgs_orig[-1], imgs_m[index].convert(mode='RGB').resize((self.img_w, self.img_w), resample=0), alpha=.5))
self.imgs_pred_m.append(Image.blend(self.imgs_pred[-1], imgs_m[index].convert(mode='RGB').resize((self.img_w, self.img_w), resample=0), alpha=.5))
self.cc.append(1 - np.count_nonzero(np.array(imgs_m[index])) / np.array(imgs_m[index]).size)
self.curr_img = 0
# text labels and logos
h_logos = int(h / 17)
b_logos = int(w / 100)
self.canvas = Canvas(root, width=w, height=h)
self.canvas.pack()
self.canvas.configure(background='white')
self.logo1 = ImageTk.PhotoImage(logos[0].resize((int(h_logos / logos[0].size[1] * logos[0].size[0]), h_logos), Image.ANTIALIAS))
self.logo2 = ImageTk.PhotoImage(logos[1].resize((int(h_logos / logos[1].size[1] * logos[1].size[0]), h_logos), Image.ANTIALIAS))
self.logo3 = ImageTk.PhotoImage(logos[2].resize((int(h_logos / logos[2].size[1] * logos[2].size[0]), h_logos), Image.ANTIALIAS))
self.canvas.create_image(int(self.logo1.width() / 2 + b_logos), int(self.logo1.height() / 2 + b_logos), image=self.logo1)
self.canvas.create_image(int(w - self.logo2.width() / 2 - b_logos), int(self.logo2.height() / 2 + b_logos), image=self.logo2)
self.canvas.create_image(int(w - self.logo3.width() / 2 - b_logos), int(h - (self.logo3.height() / 2 + b_logos)), image=self.logo3)
self.canvas.create_text(w / 2, h * 0.06, font=("Courier", int(h / 25)), text='Gap Filling Viewer')
self.canvas.create_text(w / 3.9, h * 0.19, font=("Courier", int(h / 35)), text='Observed')
self.canvas.create_text(w - w / 3.9, h * 0.19, font=("Courier", int(h / 35)), text='Predicted')
self.day_info = self.canvas.create_text(w / 2, h * 0.13, font=("Courier", int(h / 30)), text='')
self.zoom = self.canvas.create_text(w * 0.12, h * 0.94, font=("Courier", int(h / 50)), text='')
# image timeline
imagelist_h = int(self.img_w / len(self.imgs_pred)) + 1
imagelist_a = np.zeros((len(self.imgs_pred), imagelist_h, imagelist_h, 3), dtype='uint8')
for index in range(len(self.imgs_pred)):
imagelist_a[index, :, :, :] = np.array(self.imgs_pred[index].resize((imagelist_h, imagelist_h), Image.ANTIALIAS))
self.imagelists = []
for index in range(len(self.imgs_pred)):
c_list = np.array(imagelist_a)
c_list[index, :int(w / 600), :, :] = 255
c_list[index, (imagelist_h - int(w / 600)):, :, :] = 255
c_list[index, :, :int(w / 600), :] = 255
c_list[index, :, (imagelist_h - int(w / 600)):, :] = 255
self.imagelists.append(ImageTk.PhotoImage(Image.fromarray(c_list.reshape(len(self.imgs_pred) * imagelist_h, imagelist_h, 3))))
self.i_list = self.canvas.create_image(w * 0.5, h * 0.56, image=self.imagelists[self.curr_img])
# images and buttons
self.img_w_f = self.imgs_orig[0].size[0] # full image width
self.imgs_orig_v = [ImageTk.PhotoImage(img.resize((self.img_w, self.img_w), Image.ANTIALIAS)) for img in self.imgs_orig] # images for visualization
self.imgs_pred_v = [ImageTk.PhotoImage(img.resize((self.img_w, self.img_w), Image.ANTIALIAS)) for img in self.imgs_pred]
self.i_left = self.canvas.create_image(w / 3.9, h * 0.56, image=self.imgs_orig_v[self.curr_img])
self.i_right = self.canvas.create_image(w - w / 3.9, h * 0.56, image=self.imgs_pred_v[self.curr_img])
self.b_masks = Button(root, font=("Courier", int(h / 50)), text = "Show masks", command=self.toggle_mask)
self.b_reset = Button(root, font=("Courier", int(h / 50)), text = "Reset view", command=self.reset_transform, state=DISABLED)
self.b_quit = Button(root, font=("Courier", int(h / 50)), text = "Quit", command=self.canvas.master.destroy)
self.reset_transform()
self.canvas.create_window(w * 0.30, h * 0.94, window=self.b_masks)
self.canvas.create_window(w * 0.50, h * 0.94, window=self.b_reset)
self.canvas.create_window(w * 0.70, h * 0.94, window=self.b_quit)
# bind buttons and keys
root.bind("q", lambda e: self.canvas.master.destroy())
root.bind("r", self.reset_transform)
root.bind("m", self.toggle_mask)
root.bind("<Right>", self.next)
root.bind("<Left>", self.prev)
root.bind("<Down>", self.next)
root.bind("<Up>", self.prev)
root.bind("<Button-3>", self.click_right)
root.bind("<Button-1>", self.click_left)
root.bind("<Button-2>", self.click_wheel)
root.bind("<Button-4>", self.zoomer)
root.bind("<Button-5>", self.zoomer)
root.bind("<MouseWheel>", self.zoomer)
root.bind("<B2-Motion>", self.drag_roi)
root.bind("+", self.zoomer)
root.bind("-", self.zoomer)
self.over_button = False
self.b_masks.bind("<Enter>", self.button_enter)
self.b_masks.bind("<Leave>", self.button_leave)
self.b_reset.bind("<Enter>", self.button_enter)
self.b_reset.bind("<Leave>", self.button_leave)
self.b_quit.bind("<Enter>", self.button_enter)
self.b_quit.bind("<Leave>", self.button_leave)
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-l', '--left', default='imgs/original/',
help='directory with images which are shown on the left')
parser.add_argument('-r', '--right', default='imgs/pred_outline_lin_spatial_clouds0_2/',
help='directory with images which are shown on the right')
parser.add_argument('-m', '--masks', default='imgs/mask/',
help='directory with mask images')
parser.add_argument('-R', '--report', default='report_lin_spatial_clouds0_2.csv',
help='report containing date and error information for the right hand images')
parser.add_argument('-y', '--year', type=int, default=2018,
help='year of data acquisition')
parser.add_argument('-W', '--width', type=int, default=1280,
help='window width')
parser.add_argument('-H', '--height', type=int, default=720,
help='window height')
args = parser.parse_args()
imgs_o = [Image.open(img) for img in sorted([os.path.join(args.left, img) for img in os.listdir(args.left)])]
imgs_p = [Image.open(img) for img in sorted([os.path.join(args.right, img) for img in os.listdir(args.right)])]
imgs_m = [Image.open(img) for img in sorted([os.path.join(args.masks, img) for img in os.listdir(args.masks)])]
report = np.genfromtxt(args.report, delimiter=',', dtype=float)[1:-1]
dates = [(datetime(args.year, 1, 1) + timedelta(int(report[day, 1]) - 1)).strftime('%b %d %Y') for day in range(report.shape[0])]
errors = ['{:4.1f}'.format(error) if error != 0.0 else 'n.a. ' for error in report[:, 5]]
logos = [media.logo1, media.logo2, media.logo3]
if len(imgs_o) != len(dates):
raise RuntimeError('Different number of images in {} than days in the report {}!'.format(args.left, args.report))
if len(imgs_p) != len(dates):
raise RuntimeError('Different number of images in {} than days in the report {}!'.format(args.right, args.report))
if len(imgs_m) != len(dates):
raise RuntimeError('Different number of images in {} than days in the report {}!'.format(args.masks, args.report))
root = Tk()
root.title('Gap Filling Viewer')
root.geometry("%dx%d+0+0" % (args.width, args.height))
MainWindow(root, args.width, args.height, imgs_p, imgs_o, imgs_m, dates, errors, logos)
root.focus_set()
root.mainloop()
| 51.941909 | 158 | 0.616552 | 9,852 | 0.787027 | 0 | 0 | 0 | 0 | 0 | 0 | 1,434 | 0.114555 |
e00f57f732929e05a58cd0ef2eae47d08e8561a9 | 4,946 | py | Python | paypal/pro/tests.py | pdfcrowd/django-paypal | 0ea56dc6c799204f0f8719481f94d0c79de6eff5 | [
"Unlicense",
"MIT"
] | 1 | 2019-06-13T15:59:48.000Z | 2019-06-13T15:59:48.000Z | pro/tests.py | sirmmo/django-paypal | 0c8aeec1c319a08ce1bfdf828534d01b69b8fa27 | [
"MIT",
"Unlicense"
] | null | null | null | pro/tests.py | sirmmo/django-paypal | 0c8aeec1c319a08ce1bfdf828534d01b69b8fa27 | [
"MIT",
"Unlicense"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
from django.conf import settings
from django.core.handlers.wsgi import WSGIRequest
from django.forms import ValidationError
from django.http import QueryDict
from django.test import TestCase
from django.test.client import Client
from paypal.pro.fields import CreditCardField
from paypal.pro.helpers import PayPalWPP, PayPalError
class RequestFactory(Client):
# Used to generate request objects.
def request(self, **request):
environ = {
'HTTP_COOKIE': self.cookies,
'PATH_INFO': '/',
'QUERY_STRING': '',
'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'SERVER_NAME': 'testserver',
'SERVER_PORT': 80,
'SERVER_PROTOCOL': 'HTTP/1.1',
}
environ.update(self.defaults)
environ.update(request)
return WSGIRequest(environ)
RF = RequestFactory()
REQUEST = RF.get("/pay/", REMOTE_ADDR="127.0.0.1:8000")
class DummyPayPalWPP(PayPalWPP):
pass
# """Dummy class for testing PayPalWPP."""
# responses = {
# # @@@ Need some reals data here.
# "DoDirectPayment": """ack=Success×tamp=2009-03-12T23%3A52%3A33Z&l_severitycode0=Error&l_shortmessage0=Security+error&l_longmessage0=Security+header+is+not+valid&version=54.0&build=854529&l_errorcode0=&correlationid=""",
# }
#
# def _request(self, data):
# return self.responses["DoDirectPayment"]
class CreditCardFieldTest(TestCase):
def testCreditCardField(self):
field = CreditCardField()
field.clean('4797503429879309')
self.assertEquals(field.card_type, "Visa")
self.assertRaises(ValidationError, CreditCardField().clean, '1234567890123455')
class PayPalWPPTest(TestCase):
def setUp(self):
# Avoding blasting real requests at PayPal.
self.old_debug = settings.DEBUG
settings.DEBUG = True
self.item = {
'amt': '9.95',
'inv': 'inv',
'custom': 'custom',
'next': 'http://www.example.com/next/',
'returnurl': 'http://www.example.com/pay/',
'cancelurl': 'http://www.example.com/cancel/'
}
self.wpp = DummyPayPalWPP(REQUEST)
def tearDown(self):
settings.DEBUG = self.old_debug
def test_doDirectPayment_missing_params(self):
data = {'firstname': 'Chewbacca'}
self.assertRaises(PayPalError, self.wpp.doDirectPayment, data)
def test_doDirectPayment_valid(self):
data = {
'firstname': 'Brave',
'lastname': 'Star',
'street': '1 Main St',
'city': u'San Jos\xe9',
'state': 'CA',
'countrycode': 'US',
'zip': '95131',
'expdate': '012019',
'cvv2': '037',
'acct': '4797503429879309',
'creditcardtype': 'visa',
'ipaddress': '10.0.1.199',}
data.update(self.item)
self.assertTrue(self.wpp.doDirectPayment(data))
def test_doDirectPayment_invalid(self):
data = {
'firstname': 'Epic',
'lastname': 'Fail',
'street': '100 Georgia St',
'city': 'Vancouver',
'state': 'BC',
'countrycode': 'CA',
'zip': 'V6V 1V1',
'expdate': '012019',
'cvv2': '999',
'acct': '1234567890',
'creditcardtype': 'visa',
'ipaddress': '10.0.1.199',}
data.update(self.item)
self.assertFalse(self.wpp.doDirectPayment(data))
def test_setExpressCheckout(self):
# We'll have to stub out tests for doExpressCheckoutPayment and friends
# because they're behind paypal's doors.
nvp_obj = self.wpp.setExpressCheckout(self.item)
self.assertTrue(nvp_obj.ack == "Success")
### DoExpressCheckoutPayment
# PayPal Request:
# {'amt': '10.00',
# 'cancelurl': u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname',
# 'custom': u'website_id=480&cname=1',
# 'inv': u'website-480-cname',
# 'method': 'DoExpressCheckoutPayment',
# 'next': u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname',
# 'payerid': u'BN5JZ2V7MLEV4',
# 'paymentaction': 'Sale',
# 'returnurl': u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname',
# 'token': u'EC-6HW17184NE0084127'}
#
# PayPal Response:
# {'ack': 'Success',
# 'amt': '10.00',
# 'build': '848077',
# 'correlationid': '375f4773c3d34',
# 'currencycode': 'USD',
# 'feeamt': '0.59',
# 'ordertime': '2009-03-04T20:56:08Z',
# 'paymentstatus': 'Completed',
# 'paymenttype': 'instant',
# 'pendingreason': 'None',
# 'reasoncode': 'None',
# 'taxamt': '0.00',
# 'timestamp': '2009-03-04T20:56:09Z',
# 'token': 'EC-6HW17184NE0084127',
# 'transactionid': '3TG42202A7335864V',
# 'transactiontype': 'expresscheckout',
# 'version': '54.0'} | 32.973333 | 234 | 0.593207 | 3,009 | 0.60837 | 0 | 0 | 0 | 0 | 0 | 0 | 2,468 | 0.498989 |
e00f75413d6a65ba71109974edd248bc1533ce8f | 1,010 | py | Python | Hackerrank_Bot_Saves_Princess.py | madhurgupta96/Algorithmic-Journey | 75868af1050c99fc25e295812ba1a47468c6737f | [
"Apache-2.0"
] | null | null | null | Hackerrank_Bot_Saves_Princess.py | madhurgupta96/Algorithmic-Journey | 75868af1050c99fc25e295812ba1a47468c6737f | [
"Apache-2.0"
] | null | null | null | Hackerrank_Bot_Saves_Princess.py | madhurgupta96/Algorithmic-Journey | 75868af1050c99fc25e295812ba1a47468c6737f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 7 19:46:40 2020
@author: Intel
"""
def displayPathtoPrincess(n,grid):
me_i=n//2
me_j=n//2
for i in range(n):
if 'p' in grid[i]:
pe_i=i
for j in range(n):
if 'p'==grid[i][j]:
pe_j=j
break
break
while((me_i!=pe_i) | (me_j!=pe_j)):
if(me_i-pe_i<0):
print('DOWN')
me_i=me_i+1
elif(me_i-pe_i>0):
print('UP')
me_i=me_i-1
else:
if(me_j-pe_j>0):
print('LEFT')
me_j=me_j-1
elif(me_j-pe_j<0):
print('RIGHT')
me_j=me_j+1
else:
break
m = int(input())
grid = []
for i in range(0, m):
grid.append(input().strip())
displayPathtoPrincess(m,grid) | 22.954545 | 40 | 0.372277 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 116 | 0.114851 |
e01063b5b93496a8c88b374770c28bc942feb23d | 65,397 | py | Python | gelviz/basic.py | HiDiHlabs/gelviz | 515f0462738b44609679c2a26c7d8ac3ed3b4b2b | [
"BSD-3-Clause"
] | null | null | null | gelviz/basic.py | HiDiHlabs/gelviz | 515f0462738b44609679c2a26c7d8ac3ed3b4b2b | [
"BSD-3-Clause"
] | null | null | null | gelviz/basic.py | HiDiHlabs/gelviz | 515f0462738b44609679c2a26c7d8ac3ed3b4b2b | [
"BSD-3-Clause"
] | null | null | null | import matplotlib.pyplot as plt
import pybedtools
import pandas as pnd
import numpy as np
import tabix
import matplotlib.ticker as ticker
from matplotlib.patches import Rectangle
from matplotlib.patches import Arrow
from matplotlib.path import Path
from matplotlib.patches import PathPatch
import matplotlib.cm as cm
import matplotlib
import tabix
import math
def plotGenes(genes_bed,
exons_bed,
introns_bed,
region_bed,
blacklist=None,
gene_map=None,
plot_gene_ids=True,
y_max=None,
distance_ratio=0.1,
ax=None,
plot_legend=False,
legend_loc="lower right",
color_plus="#80b1d3",
color_minus="#fb8072"):
"""Function for plotting gene structures, i.e. introns exons of genes.
:param genes_bed: :class:`pybedtools.BedTool` object containing TX start,
and TX end of genes.
:type genes_bed: :class:`pybedtools.BedTool`
:param exons_bed: :class:`pybedtools.BedTool` object containing exons of
genes.
:type exons_bed: :class:`pybedtools.BedTool`
:param introns_bed: :class:`pybedtools.BedTool` object containing introns
:type introns_bed: :class:`pybedtools.BedTool`
:param region_bed: :class:`pybedtools.BedTool` object containing the one
region, for which the gene plot is created.
:type region_bed: :class:`pybedtools.BedTool`
:param blacklist: List of gene names, for genes that should not be shown on
the plot, default is None
:type blacklist: list, optional
:param plot_gene_ids: If True, all gene ids will be included in the plot,
False otherwise, default is True
:type plot_gene_ids: bool, optional
:param y_max: Max y value in the gene plot. If not set, then y_max is the
max number of stacked genes, default is None.
:type y_max: bool, optional
:param distance_ratio: Minimal distance between two genes, as ratio of ax
width, such that two genes are plotted side by side. If this ratio is
underwent, the genes will be stacked, default is 0.1.
:type distance_ratio: float, optional
:param ax: Axes instance on which the genes are plotted, default is None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:param plot_legend: If True, a legend describing plus or minus stranded
genes is plotted, False otherwise. Default is False.
:type plot_legend: bool, optional
:param legend_loc: Location of the legend. Either of "lower left",
"lower right", "upper left", "upper right", default is "lower right".
:type legend_loc: str, optional
:param color_plus: Color code for plus stranded genes, default is "#80b1d3".
:type color_plus: str, optional.
:param color_minus: Color code for minus stranded genes, default is
"#fb8072".
:type color_minus: str, optional.
:return: Tuple of max_y_pos+1.5, patch_list, patch_description_list, where
1. max_y_pos+1.5 is the max_y_position + 1.5. max_y_pos defines the \
number of stacked genes.
2. patch_list is the list of patches drawn on the ax.
3. patch_description_list is the list of descriptions for the patches \
drawn on the ax.
:rtype: list
"""
ax = ax if ax is not None else plt.gca()
genes_in_region = genes_bed
exons_in_region = exons_bed
introns_in_region = introns_bed
region_border_up = int(region_bed[0][1])
region_border_down = int(region_bed[0][2])
region_size = region_border_down-region_border_up
color_forward = color_plus
color_reverse = color_minus
max_y_pos = None
if(not len(genes_in_region) == 0):
# Determine y positions of genes for plotting
max_y_pos, y_pos_dict = determineYPosGene(genes_in_region,
(region_border_down-
region_border_up),
distance_ratio)
if(not y_max is None):
max_y_pos = y_max
# Plot Exons
for i in exons_in_region:
start = int(i[1])
end = int(i[2])
gene_name = str(i[3])
if(not blacklist is None and gene_map[gene_name] in blacklist):
continue
# Define color for gene plotting
strand = str(i[5])
color = color_forward
if(strand == "-"):
color = color_reverse
y = max_y_pos-y_pos_dict[gene_name]+0.5
rect = Rectangle((start, y-.2),
end-start,
.4,
color=color,
capstyle='butt',
linewidth=0)
ax.add_patch(rect)
patch_list = []
patch_description_list = []
met_forward = False
met_reverse = False
# Plot Introns
for i in introns_in_region:
start = int(i[1])
end = int(i[2])
gene_name = str(i[3])
if(not blacklist is None and gene_map[gene_name] in blacklist):
continue
# Define color for gene plotting
strand = str(i[5])
color = color_forward
if(strand == "-"):
color = color_reverse
y = max_y_pos-y_pos_dict[gene_name]+0.5
patch = Rectangle((start, y-.03),
end-start,
.06,
color=color,
capstyle='butt',
linewidth=0)
ax.add_patch(patch)
if(strand == "+" and not(met_forward)):
patch_list += [patch]
patch_description_list += ["forward strand"]
met_forward = True
elif(strand == "-" and not(met_reverse)):
patch_list += [patch]
patch_description_list += ["reverse strand"]
met_reverse = True
# Plot Gene Names
if(plot_gene_ids):
for i in genes_in_region:
start = int(i[1])
gene_name = str(i[3])
if(not blacklist is None and gene_map[gene_name] in blacklist):
continue
# Define color for gene plotting
strand = str(i[5])
color = color_forward
if(strand == "-"):
color = color_reverse
border_distance_down = region_border_down-start
if(start < region_border_up):
start = region_border_up
border_distance_down = region_border_down-start
if(not(float(border_distance_down)/float(region_size)
< distance_ratio)):
gene_name = str(i[3])
gene_name_label = gene_name
if(not gene_map is None):
gene_name_label = gene_map[gene_name]
y = max_y_pos-y_pos_dict[gene_name]+.8
plt.text(start,
y,
gene_name_label,
size=5,
color = color)
gene_name = str(i[3])
gene_name_label = gene_name
if(not gene_map is None):
gene_name_label = gene_map[gene_name]
y = max_y_pos-y_pos_dict[gene_name]+.8
plt.text(start, y, gene_name_label, size=5, color = color)
plt.xlim([region_border_up, region_border_down])
plt.ylim([0, max_y_pos+1.5])
plt.yticks([], [])
if(plot_legend):
plt.legend(patch_list,
patch_description_list,
loc=legend_loc,
fontsize=5)
return max_y_pos+1.5, patch_list, patch_description_list
def determineYPosGene(genes_bed,
region_size,
distance_ratio):
'''Function that determines the max y position for gene plotting via
function plotGenes.
:param genes_bed: :class:`pybedtools.BedTool` object containing genes to be
plotted.
:type genes_bed: :class:`pybedtools.BedTool`
:param region_size: Size of region to be plotted in base pairs.
:type region_size: int
:param distance_ratio: Minimal distance between two genes, as ratio of ax
width, such that two genes are plotted side by side. If this ratio is
underwent, the genes will be stacked.
:type distance_ratio: float
:return: Tuple of
1. max_y_pos: Defines the number of stacked genes.
2. y_pos_dict: Dictionary with keys = gene ids and values = y position \
of gene.
:rtype: tuple
'''
sort_indices = [int(idx) for idx in np.argsort([i[1] for i in genes_bed])]
genes_sorted_bed = [genes_bed[i] for i in sort_indices]
y_pos_dict = {}
y_level_dict = {}
max_y_pos = 0
for interval in genes_sorted_bed:
gene_name = interval[3]
gene_start = int(interval[1])
gene_end = int(interval[2])
for i in range(max_y_pos+1):
if(i == 0 and not max_y_pos in y_level_dict):
y_pos_dict[gene_name] = i
y_level_dict[i] = [[gene_start, gene_end]]
break
elif(gene_start > y_level_dict[i][-1][1] and
float(gene_start-y_level_dict[i][-1][0])/float(region_size) >
distance_ratio):
y_pos_dict[gene_name] = i
y_level_dict[i] += [[gene_start, gene_end]]
break
elif(i == max_y_pos):
max_y_pos += 1
y_pos_dict[gene_name] = max_y_pos
y_level_dict[max_y_pos] = [[gene_start, gene_end]]
break
else:
continue
return max_y_pos, y_pos_dict
def createGeneNameMap(gene_name_mapping_filename):
'''Function that creates a mapping between gene ids
:param gene_name_mapping_file: Path to a tab separated file, for which the
first column is a ensemble gene id, and the second column is the HUGO
gene name
:type gene_name_mapping_file: str
:return: Dictionary containing the gene id mapping.
:rtype: dictionary
'''
gene_name_mapping_file = open(gene_name_mapping_filename, "r")
gene_map = {}
for line in gene_name_mapping_file:
split_line = line.rstrip().split("\t")
ensembl_gene_id = split_line[0].split(".")[0]
hugo_gene_symbol = split_line[1].split(".")[0]
gene_map[ensembl_gene_id] = hugo_gene_symbol
gene_name_mapping_file.close()
return gene_map
def plotGeneExpression(genes_bed,
region_bed,
expression_df_g1,
expression_df_g2,
gene_names_map,
blacklist=None,
ax=None,
plot_legend=False,
color_g1="#fb8072",
color_g2="#80b1d3",
g1_id="tumor",
g2_id="normal",
plot_gene_names=True):
'''Function for plotting paired gene expression (e.g. tumor and normal) on a
gene region scale retaining the position of genes.
:param genes_bed: :class:`pybedtools.BedTool` object containing TXstart,
and TXend of genes.
:type genes_bed: :class:`pybedtools.BedTool`
:param region_bed: :class:`pybedtools.BedTool` object containing the region
to be plotted
:type region_bed: :class:`pybedtools.BedTool`
:param expression_df_g1: :class:`pandas.Dataframe` containing the expression
values of g1 samples (columns: sample ids; index: gene ids)
:type expression_df_g1: :class:`pandas.DataFrame`
:param expression_df_g2: :class:`pandas.Dataframe` containing the expression
values of g2 samples (columns: sample ids; index: gene ids)
:type expression_df_g2: :class:`pandas.DataFrame`
:param gene_names_map: Dictionary with keys: ENSEMBL GENE IDs, and values:
HUGO GENE SYMBOLs.
:type gene_names_map: dict.
:param blacklist: Set containing gene ids not to be plotted, default to
None.
:type blacklist: set, optional
:param ax: Axis used for plotting, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:param plot_legend: If True legend is plotted, False otherwise, defaults to
False.
:type plot_legend: bool
:param color_g1: Color used for plotting g1 samples expression, defaults to
"#fb8072".
:type color_g1: str, optional
:param color_g2: Color used for plotting g2 samples expression, defaults to
"#80b1d3".
:type color_g2: str, optional
:param g1_id: ID of g1 used for legend plotting, defaults to "tumor".
:type g1_id: str, optional
:param g2_id: ID of g2 used for legend plotting, defaults to "normal".
:type g2_id: str, optional
:param plot_gene_names: If True, the HUGO GENE SYMBOLs will be shown, else
the GENE SYMBOLs are hidden.
:type plot_gene_names: bool.
:return: Axis on which plot was placed.
:rtype: :class:`matplotlib.axes._subplots.AxesSubplot`
'''
ax = ax if ax is not None else plt.gca()
# Get gene names and regions
genes_in_region_bed = genes_bed.intersect(region_bed,
wa=True,
u=True).sort()
gene_names = []
gene_regions = []
for e in genes_in_region_bed:
gene_name_ens = str(e[3])
gene_names += [gene_names_map[gene_name_ens]]
gene_regions += [[int(e[1]), int(e[2])]]
region_right_border = int(region_bed[0][2])
region_left_border = int(region_bed[0][1])
# Determine minimal extension of barplot
extension=None
for i in range(len(gene_regions)):
if(not blacklist is None and gene_names[i] in blacklist):
continue
left_border = gene_regions[i][0]
right_border = None
if(i < len(gene_names)-1):
right_border = gene_regions[i+1][0]
else:
right_border = region_right_border
current_extension = right_border-left_border
if(current_extension == 0.):
continue
if(extension is None):
extension = float(current_extension)
elif(current_extension < extension):
extension = float(current_extension)
boxprops = {"color": "k", "linewidth": .3}
flierprops = {"color": "k"}
medianprops = {"color": "k", "linewidth": .3}
whiskerprops = {"color": "k", "linewidth": .3}
capprops={"color": "k", "linewidth": .3}
patch_list = None
patch_description_list = None
tick_positions = []
gene_names_clean = []
counter=0
patch_saved = False
for gene_name in gene_names:
left_border = gene_regions[counter][0]
right_border = region_right_border
if(not blacklist is None and gene_name in blacklist):
counter += 1
continue
if(counter < len(gene_names)-1):
right_border = gene_regions[counter+1][0]
bplot_g1_pos = left_border + extension/4.
bplot_g2_pos = left_border + 3*(extension/4.)
tick_positions += [left_border + extension/2.]
gene_names_clean += [gene_name]
exp_values_g1 = expression_df_g1.loc[gene_name, :]
if(type(exp_values_g1).__name__ == "Series"):
exp_values_g1 = list(exp_values_g1)
else:
exp_values_g1 = list(exp_values_g1.iloc[0, :])
exp_values_g2 = expression_df_g2.loc[gene_name, :]
if(type(exp_values_g2).__name__ == "Series"):
exp_values_g2 = list(exp_values_g2)
else:
exp_values_g2 = list(exp_values_g2.iloc[0, :])
bplot_g1 = ax.boxplot([np.log2([i if
i >= 1. else
1. for
i in exp_values_g1])],
positions=[bplot_g1_pos],
widths=extension/2.,
patch_artist=True,
boxprops=boxprops,
flierprops=flierprops,
medianprops=medianprops,
whiskerprops=whiskerprops,
capprops=capprops,
showfliers=False)
bplot_g2 = ax.boxplot([np.log2([i if
i >= 1. else
1. for
i in exp_values_g2])],
positions=[bplot_g2_pos],
widths=extension/2.,
patch_artist = True,
boxprops=boxprops,
flierprops=flierprops,
medianprops=medianprops,
whiskerprops=whiskerprops,
capprops=capprops,
showfliers=False)
bplot_g1["boxes"][0].set_facecolor(color_g1)
bplot_g2["boxes"][0].set_facecolor(color_g2)
if(not patch_saved):
patch_saved=True
patch_list = [bplot_g1["boxes"][0], bplot_g2["boxes"][0]]
patch_description_list = [g1_id, g2_id]
counter += 1
ax.set_xlim(region_left_border, region_right_border)
ax.xaxis.set_major_locator(ticker.FixedLocator((tick_positions)))
ax.xaxis.set_major_formatter(ticker.FixedFormatter((gene_names_clean)))
if(not plot_gene_names):
ax.xaxis.set_major_formatter(
ticker.FixedFormatter(([ " " for i in
gene_names_clean])))
for tick in ax.get_xticklabels():
tick.set_rotation(45)
tick.set_size(6)
for ytick in ax.get_yticklabels():
ytick.set_size(6)
if(plot_legend):
ax.legend(patch_list,
patch_description_list,
fontsize=5,
loc='lower left')
return ax
def plotGeneExpressionEqualDist(genes_bed,
gene_mid_points,
region,
expression_df,
groups,
gene_names_map=None,
blacklist=None,
ax=None,
plot_legend=False,
colors=None,
ids=None,
plot_gene_names=True,
position_gene_names="bottom",
log_transformed=True,
plot_points=False,
alpha=.5):
'''Function for plotting grouped gene expression (e.g. tumor and normal) on
a gene region scale equalizing the position of genes.
:param genes_bed: :class:`pybedtools.BedTool` object containing gene
regions.
:type genes_bed: :class:`pybedtools.BedTool`
:param gene_mid_points: list of integer values containing center positions
of genes.
:type gene_mid_points: list
:param region: List containing the region to be plotted
([<chrom>, <start>, <end>]).
:type region: list
:param groups: List of lists containing the IDs of the different groups.
:type groups: list
:param gene_names_map: Dictionary with keys: ENSEMBL GENE IDs, and values:
HUGO GENE SYMBOLs.
:type gene_names_map: dict.
:param expression_df: class:`pandas.DataFrame` object containing the
expression values of all samples (columns: sample ids; index: gene ids).
:type expression_df: class:`pandas.DataFrame`
:param blacklist: Set containing gene ids not to be plotted, defaults to
None,
:type blacklist: set, optional
:param ax: (default: None) Axis used for plotting, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:param plot_legend: If True plot legend, False otherwise, defaults to False.
:type plot_legend: bool, optional
:param colors: List of colors used for plotting samples expression. The
number of colors must be the same as the number of groups, defaults to
None.
:type colors: str, optional
:param ids: IDs used for legend plotting, defaults to None. Number of ids
must be the same as the number of groups.
:type ids: list, optional.
:param plot_gene_names: True if gene names shall be plotted,
False otherwise, defaults to True.
:type plot_gene_names: bool, optional
:param position_gene_names: Either of "top", or "bottom", defaults to
"bottom".
:type position_gene_names: str, optional
:param log_transformed: If True use log transformed values for plotting,
non-transformed values otherwise.
:type log_transformed: bool, optional
:param plot_points: If True, a point per expression value is plotted in
addition to the boxplot, no points are plotted otherwise, defaults to
False.
:type plot_points: bool, optional
:param alpha: Alpha value for the background color of the boxplots boxes,
defaults to 0.5.
:type alpha: float, optional
:return: Plots axis.
:rtype: :class:`matplotlib.axes._subplots.AxesSubplot`
'''
standard_colors = ["#66c2a5",
"#fc8d62",
"#8da0cb",
"#ec87c2",
"#a6d854",
"#ffd92f",
"#e5c494",
"#bbbbbb"]
ax = ax if ax is not None else plt.gca()
region_bed = pybedtools.BedTool("\t".join([str(i) for i in region]),
from_string=True)
# Get gene names and regions
genes_in_region_bed = genes_bed.intersect(region_bed,
wa=True,
u=True).sort()
gene_names = []
gene_regions = []
for e in genes_in_region_bed:
gene_name_ens = str(e[3])
if(not gene_names_map is None):
gene_names += [gene_names_map[gene_name_ens]]
else:
gene_names += [gene_name_ens]
gene_regions += [[int(e[1]), int(e[2])]]
region_right_border = int(region_bed[0][2])
region_left_border = int(region_bed[0][1])
# Determine minimal extension of barplot
extension=None
if(len(gene_mid_points) <= 1):
extension=region[2]-region[1]
else:
extension=gene_mid_points[1]-gene_mid_points[0]
# Subtract a small percentage of region size from extension
extension=extension-(region[2]-region[1])*.01
boxprops = {"color": "k", "linewidth": .3, "alpha":alpha}
flierprops = {"color": "k"}
medianprops = {"color": "k", "linewidth": .3}
whiskerprops = {"color": "k", "linewidth": .3}
capprops={"color": "k", "linewidth": .3}
patch_list = []
patch_description_list = []
tick_positions = []
gene_names_clean = []
counter=0
for gene_name in gene_names:
left_border = gene_mid_points[counter]-extension/2
right_border = gene_mid_points[counter]+extension/2
if(not blacklist is None and gene_name in blacklist):
counter += 1
continue
n_groups = len(groups)
for g in range(n_groups):
bplot_pos = left_border + (2*g+1)*extension/float((n_groups*2.))
tick_positions += [left_border + extension/2.]
gene_names_clean += [gene_name]
exp_values = expression_df.loc[gene_name, groups[g]]
if(type(exp_values).__name__ == "Series"):
exp_values = list(exp_values)
else:
exp_values = list(exp_values.iloc[0, :])
expression_values = exp_values
if(log_transformed):
expression_values = np.log2([i
if i >= 1.
else 1.
for i in exp_values])
bplot = ax.boxplot(expression_values,
positions=[bplot_pos],
widths=extension/float(n_groups),
patch_artist=True,
boxprops=boxprops,
flierprops=flierprops,
medianprops=medianprops,
whiskerprops=whiskerprops,
capprops=capprops,
showfliers=False)
color = None
if(not colors is None):
color = colors[g]
else:
color = standard_colors[g]
bplot["boxes"][0].set_facecolor(color)
if(plot_points):
x_positions = [ (bplot_pos+
(i-.5)*
((2*extension)/(float(n_groups)*3))) for i in
list(np.random.rand(len(expression_values))) ]
plt.plot(x_positions, expression_values, "k.", markersize=3)
g_id = None
if(not ids is None):
g_id = ids[g]
else:
g_id = "group "+str(g)
if(not g_id in patch_description_list):
patch_list += [bplot["boxes"][0]]
patch_description_list += [g_id]
counter += 1
ax.set_xlim(region_left_border, region_right_border)
if(position_gene_names == "top"):
ax.xaxis.set_ticks_position("top")
ax.xaxis.set_major_locator(ticker.FixedLocator((tick_positions)))
ax.xaxis.set_major_formatter(ticker.FixedFormatter((gene_names_clean)))
if(not plot_gene_names):
ax.xaxis.set_major_formatter(ticker.FixedFormatter(
([ " " for i in
gene_names_clean])))
for tick in ax.get_xticklabels():
tick.set_rotation(45)
tick.set_size(5)
for ytick in ax.get_yticklabels():
ytick.set_size(5)
if(plot_legend):
ax.legend(patch_list,
patch_description_list,
fontsize=5,
loc='lower left')
return ax
def plotGenomicSegments(segments_list,
chrom,
start,
end,
ax = None):
'''Function for plotting genomix segments in different colors
:param segments_tabix_filename: Path to tabixed bed file containing
(chrom, start, end, name, score, strand, start, end, color). The color
field is used to determine the color for plotting (R,G,B).
:type segments_Tabix_filename: str
:param chrom: Chromosome of the region to be plotted.
:type chrom: str
:param start: Start position of the region to be plotted.
:type start: str
:param end: End position of the region to be plotted.
:type end: str
:param ax: Axis used for plotting, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:return: Dictionary with keys = names of segments, and values patch
:rtype: dict
'''
ax = ax if ax is not None else plt.gca()
patches_dict = {}
for segment in segments_list:
segment_start = int(segment[1])
segment_end = int(segment[2])
color = tuple([ float(i)/256. for i in
str(segment[-1]).split(",") ]+[1])
segment_type = str(segment[3])
if(segment_type == "R"):
color = (1,1,1,1)
rect = Rectangle((segment_start, 0),
segment_end-segment_start,
1,
color=color)
ax.add_patch(rect)
patches_dict[segment_type] = rect
plt.xlim(int(start), int(end))
plt.ylim(0, 1)
plt.yticks([], [])
return patches_dict
def plotCNVs(cnvs_bed,
chromosome,
start,
end,
ploidy=2,
cnv_threshold=0.7,
color_gain="g",
color_loss="r",
color_neutral="k",
ax=None):
'''Function for plotting CNV segments
:param cnvs_bed: :class:`pybedtools.BedTool` object containing CNVs with
following entries:
1. Chromosome,
2. Start Position,
3. End Position,
4. Deviation from ploidy,
5. True Copy Number)
:type cnvs_bed: :class:`pybedtools.BedTool`
:param chromosome: Chromosome for which to plot CNVs.
:type chromosome: str
:param start: Start position on chromosome.
:type start: int
:param end: End position on chromosome.
:type end: int
:param ploidy: Assumed ploidy of tumor, defaults to 2.
:type ploidy: int, optional
:param cnv_threshold: Minimal deviation from ploidy to be considered as a
CNV, defaults to 0.7.
:type cnv_threshold: float, optional
:param color_gain: Plot color of copy number gains, defaults to "g".
:type color_gain: str, optional
:param color_loss: Plot color of copy number losses, defaults to "r".
:type color_loss: str, optional
:param color_neutral: Plot color of copy number neutral regions, defaults to
"k".
:type color_neutral: str, optional
:param ax: Axis used for plotting.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:return: Nothing to be returned
:rtype: None
'''
# Use given axis for plotting
ax = ax if ax is not None else plt.gca()
for interval in cnvs_bed:
current_start = int(interval[1])
current_end = int(interval[2])
ploidy_dev = float(interval[3])
tcn = float(interval[4])
# Smooth tcn, if ploidy_dev is smaller than cnv_threshold
if(abs(ploidy_dev) < cnv_threshold):
tcn = ploidy
color = color_neutral
if(ploidy_dev >= cnv_threshold):
color=color_gain
elif(ploidy_dev <= -1.*cnv_threshold):
color = color_loss
if(abs(ploidy_dev) > cnv_threshold):
rect = Rectangle((current_start, tcn-.2),
current_end-current_start,
.4,
color=color,
edgecolor='none',
capstyle='butt',
linewidth=0)
ax.add_patch(rect)
else:
rect = Rectangle((current_start, tcn-.1),
current_end-current_start,
.2,
color=color,
edgecolor='none',
capstyle='butt',
linewidth=0)
ax.add_patch(rect)
# Plot thresholds
color_threshold=(189./255., 189./255., 189./255., 0.5)
if(ploidy == 2):
plt.plot([int(start), int(end)],
[1, 1],
color=color_threshold,
linestyle="--",
linewidth=.5)
plt.plot([int(start), int(end)],
[2, 2],
color=color_threshold,
linestyle="--",
linewidth=.5)
plt.plot([int(start), int(end)],
[3, 3],
color=color_threshold,
linestyle="--",
linewidth=.5)
plt.plot([int(start), int(end)],
[4, 4],
color=color_threshold,
linestyle="--",
linewidth=.5)
elif(ploidy == 4):
plt.plot([int(start), int(end)],
[1, 1],
color=color_threshold,
linestyle="--",
linewidth=.5)
plt.plot([int(start), int(end)],
[2, 2],
color=color_threshold,
linestyle="--",
linewidth=.5)
plt.plot([int(start), int(end)],
[3, 3],
color=color_threshold,
linestyle="--",
linewidth=.5)
plt.plot([int(start), int(end)],
[4, 4],
color=color_threshold,
linestyle="--",
linewidth=.5)
plt.plot([int(start), int(end)],
[5, 5],
color=color_threshold,
linestyle="--",
linewidth=.5)
plt.plot([int(start), int(end)],
[6, 6],
color=color_threshold,
linestyle="--",
linewidth=.5)
plt.xlim([int(start), int(end)])
if(ploidy == 2):
plt.ylim([0, 4.5])
plt.yticks([0, 1, 2, 3, 4], ["0", "1", "2", "3", "4"], size=6)
elif(ploidy == 4):
plt.ylim([0, 6.5])
plt.yticks([0, 2, 4, 6], ["0", "2", "4", "6"], size=6)
plt.xticks(rotation=45)
def plotCNVsHeat(cnvs_bed,
chromosome,
start,
end,
ploidy=2,
cnv_threshold=0.7,
cmap="bwr",
max_dev=None,
ax=None):
'''Function for plotting CNV segments as heatmap
:param cnvs_bed: :class:`pybedtools.BedTool` object containing CNVs with
following entries:
1. Chromosome,
2. Start Position,
3. End Position,
4. Deviation from ploidy,
5. True Copy Number)
:type cnvs_bed: :class:`pybedtools.BedTool`
:param chromosome: Chromosome for which to plot CNVs.
:type chromosome: str
:param start: Start position on chromosome.
:type start: int
:param end: End position on chromosome.
:type end: int
:param ploidy: Assumed ploidy of tumor, defaults to 2.
:type ploidy: int, optional
:param cnv_threshold: Minimal deviation from ploidy to be considered as a
CNV, defaults to 0.7.
:type cnv_threshold: float, optional
:param cmap: Colormap used for plotting CNVs, defaults to "bwr".
:type cmap: str, optional
:param max_dev: Maximal deviation from ploidy to plot, defaults to None.
:type max_dev: float, optional
:param ax: Axis used for plotting, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:return: Nothing to be returned.
:rtype: None
'''
# Use given axis for plotting
ax = ax if ax is not None else plt.gca()
colors = plt.cm.get_cmap(cmap)
if(max_dev is None):
max_dev = max([abs(float(i[3])) for i in cnvs_bed])
for interval in cnvs_bed:
current_start = int(interval[1])
current_end = int(interval[2])
ploidy_dev = float(interval[3])
tcn = float(interval[4])
if(tcn < -1.*max_dev):
tcn = -1.*max_dev
elif(tcn > max_dev):
tcn = max_dev
color = colors((ploidy_dev+max_dev)/(2*max_dev))
if(abs(ploidy_dev) < cnv_threshold):
color=colors(.5)
rect = Rectangle((current_start, .5),
current_end-current_start,
1,
color=color,
edgecolor='none',
capstyle='butt',
linewidth=0)
ax.add_patch(rect)
plt.xlim([int(start), int(end)])
plt.ylim([.5, 1.5])
plt.xticks([], [])
plt.yticks([], [])
def readACESeqAsBed(input_filename):
'''Function that reads CNVs from ACESeq ("*most_important*") files and
converts them to pybedtools.BedTool object
:param input_filename: Full path to ACESeq "most_important" file
:type input_filename: str
:return: :class:`pybedtools.BedTool` object containing CNVs from ACESeq
:rtype: :class:`pybedtools.BedTool`
'''
input_file = open(input_filename, "r")
cnv_bed_list = []
ploidy = None
for line in input_file:
if(line[:7] == "#ploidy"):
ploidy = float(line.rstrip().split(":")[1])
print(ploidy)
if(line[0] == "#" or line[:5] == "chrom"):
continue
split_line = line.rstrip().split("\t")
ploidy_dev = float(split_line[5])-ploidy
chrom = split_line[0]
if(chrom == "23"):
chrom="X"
elif(chrom == "24"):
chrom = "Y"
cnv_bed_list += [ [chrom,
split_line[1],
split_line[2],
str(ploidy_dev),
split_line[5],
"+"]
]
input_file.close()
return pybedtools.BedTool("\n".join(["\t".join(e) for e in
cnv_bed_list]),
from_string=True)
def plotChIPSignals(chip_signals,
r_chrom,
r_start,
r_end,
ax=None,
color="b",
offset=None,
merge=None):
'''Function that plots bedGraph like iterators.
:param chip_signals: Iterator for which each element is a list-ike
object containing:
1. Chromosome
2. Start postion
3. End position
4. Value to be plotted as bar
:type chip_signals: iterator
:param r_chrom: Chromosome of region to be plotted.
:type r_chrom: str
:param r_start: Start position of region to be plotted.
:type r_start: int
:param r_end: End position of region to be plotted.
:type r_end: int
:param ax: Axis of plot
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:param color: color of bars, defaults to "b".
:type color: str, optional
:param offset: Length of intervals, defaults to None.
:type offset: int, optional
:param merge: Number of elements to be merged. If this value is not equal to
0, than merge elements will be averaged an plotted, defaults to 0.
:type merge: int, optional
:return: Nothing to be returned.
:rtype: None
'''
ax = ax if ax is not None else plt.gca()
max_signal = 0
left = []
height = []
for signal in chip_signals:
start = int(signal[1])
end = int(signal[2])
value = float(signal[3])
if(value > max_signal):
max_signal = value
if(not offset is None):
end = start + offset
left += [start]
height += [value]
left_merged = []
height_merged = []
if(not merge is None):
heights = []
lefts = []
for i in range(len(left)):
if(i % merge == 0 and not (i == 0)):
left_merged += [lefts[0]]
lefts = []
height_merged += [np.mean(heights)]
heights = []
heights += [height[i]]
lefts += [left[i]]
if(not i % merge == 0):
left_merged += [lefts[0]]
lefts = []
height_merged += [np.mean(heights)]
heights = []
offset = merge*offset
left = left_merged
height = height_merged
plt.bar(left, height, offset, color = color, edgecolor = color)
plt.xlim(r_start, r_end)
def plotMethylationProfileHeat(methylation_bed,
chrom,
start,
end,
bin_size=1000,
ax = None):
'''Function for plotting methylation values as heatmap
:param methylation_bed: Methylation calls. Following fields must be
included: Chrom, Start, End, Methylated Cs, Unmethylated Cs.
:type methylation_bed: :class:`pybedtools.BedTool`
:param chrom: Chromosome of region to be plotted.
:type chrom: str
:param start: Start position of region to be plotted.
:type start: int
:param end: End position of region to be plotted.
:type end: int
:param bin_size: size of bin to average methylation values, defaults to
1000.
:type bin_size: int, optional
:param ax: Axis to be used for plotting, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:return: Nothing to be returned
:rtype: None
'''
ax = ax if ax is not None else plt.gca()
binned_meth_calls = [ [0, 0] for i in range(int(((end-start)/bin_size)+1)) ]
counter = 0
for element in methylation_bed:
# Determine bin
position = int(element[1])
if(position < start or position > end):
continue
n_meth = int(element[3])
n_unmeth = int(element[4])
current_bin = int((position-start)/bin_size)
counter += 1
binned_meth_calls[current_bin][0] += n_meth
binned_meth_calls[current_bin][1] += n_unmeth
binned_average_meth = [ float(i[0])/(float(i[0])+float(i[1]))
if (float(i[0])+float(i[1])) > 0
else "NA"
for i in binned_meth_calls ]
binned_average_meth_no_missing = []
n = len(binned_average_meth)
for i in range(n):
if(not binned_average_meth[i] == "NA"):
binned_average_meth_no_missing += [binned_average_meth[i]]
else:
meth_before = (binned_average_meth[i-1]
if not i == 0
else "NA")
meth_after = (binned_average_meth[i+1]
if not i == len(binned_average_meth)-1
else "NA")
average_list = [ j
for j
in [meth_before, meth_after]
if not j == "NA" ]
binned_average_meth_no_missing += [ (float(sum(average_list))/
float(len(average_list)))
if len(average_list) > 0
else 0. ]
binned_average_meth = binned_average_meth_no_missing
# Plot average methylation values per bin
# Define Colormap
cmap = cm.bwr
norm = matplotlib.colors.Normalize(vmin=0., vmax=1.)
m = matplotlib.cm.ScalarMappable(norm = norm, cmap = cmap)
for cbin in range(len(binned_average_meth)):
rect = Rectangle((start+cbin*bin_size, 0),
bin_size,
1,
color=m.to_rgba(binned_average_meth[cbin]))
ax.add_patch(rect)
plt.xlim([start, end])
plt.ylim([0, 1])
plt.xticks([], [])
plt.yticks([], [])
def plotMethylationProfile(meth_calls,
chrom,
start,
end,
color="k",
ax=None):
'''Function that plots methylation values as dot plots.
:param meth_calls: Iterator containing list-like elements with the following
entries:
1. Chromsome
2. Start position
3. end position
4. Number methylated cytosines
5. Number unmethylated cytosines
Or
1. Chromsome
2. Start position
3. end position
4. Beta Value
:type meth_calles: iterator
:param chrom: Chromosome of region to be plotted.
:type chrom: str
:param start: Start position of region to be plotted.
:type start: int
:param end: End position of region to be plotted.
:type end: int
:param color: Color of points representing methylation values, defaults to
"k".
:type color: str, optional
:param ax: Axis of plot, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:return: Nothing to be returned
:rtype: None
'''
ax = ax if ax is not None else plt.gca()
n_entries = len(meth_calls[0])
if(n_entries == 5):
plt.plot([ (float(m[1])+float(m[2]))/2. for m in meth_calls ],
[ float(m[3])/(float(m[3])+float(m[4]))
if not(float(m[3])+float(m[4]) == 0.)
else 0. for m in meth_calls],
color=color,
marker=".",
linestyle='None',
markersize=1,
alpha=.5)
elif(n_entries == 4):
plt.plot([ (float(m[1])+float(m[2]))/2. for m in meth_calls ],
[ float(m[4]) for m in m in meth_calls],
color=color,
marker=".",
linestyle='None',
markersize=1,
alpha=.5)
plt.ylim([0, 1])
plt.xticks([], [])
plt.xlim([start, end])
def plotTX(chrom_r,
start_r,
end_r,
TX_pos,
direction="right",
color="k",
ax=None):
'''Function that plots a translocation event as a bar, showing the part
of the genome that is translocated.
:param chrom_r: Chromosome of the region to be plotted.
:type chrom_r: str
:param start_r: Start position of the region to be plotted.
:type start_r: int
:param end_r: End position of the region to be plotted.
:type end_r: int
:param TX_pos: Position of the translocation.
:type TX_pos: int
:param direction: Direction of the genomic part that is translocated. Either
of "left" (upstream), or "right" (downstream), defaults to "left".
:type direction: str, optional
:param color: Color of the bar representing the translocation, defaults to
"k".
:type color: str, optional
:param ax: Axis of plot, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:return: Nothing to be returned.
:rtype: None
'''
ax = ax if ax is not None else plt.gca()
TX_start = TX_pos
TX_end = end_r
if(direction == "left"):
TX_start = start_r
TX_end = TX_pos
rect = Rectangle((TX_start, .4),
TX_end-TX_start,
.2,
color=color,
capstyle='butt',
linewidth=0)
ax.add_patch(rect)
plt.xlim([start_r, end_r])
plt.ylim([0.3, 0.7])
def plotRegions(regions,
start,
end,
color="#cbebc4",
edgecolor=False,
alpha=1,
ax = None):
'''Functions that plots genomic regions as simple rectangles.
:param regions: Iterator containig list-like elements with the following
entries:
1. Chromosome
2. Start position
3. End position
:type regions: iterator
:param start: Start position of the region to be plotted.
:type start: int
:param end: End position of the region to be plotted.
:type end: int
:param color: Color of the rectangles representing the regions to be
plotted, defaults to "#cbebc4".
:type color: str, optional
:param edge_color: Color of region edge. If False, no edge is plotted,
defaults to False.
:type edge_color: str, optional
:param alpha: Alpha value of the rectangle, representing the region to be
plotted, defaults to 1.
:type alpha: float, optional.
:param ax: Axis of plot, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:return: Nothing to be returned
:rtype: None
'''
ax = ax if ax is not None else plt.gca()
c = 0
for region in regions:
if(not edgecolor):
current_color = color
rect = Rectangle([int(region[1]), -.75],
int(region[2])-int(region[1]),
1.5,
facecolor=current_color,
edgecolor='none',
alpha=alpha)
c += 1
else:
current_color = color
rect = Rectangle([int(region[1]), -.75],
int(region[2])-int(region[1]),
1.5,
facecolor=current_color,
edgecolor=edgecolor,
alpha=alpha)
c += 1
ax.add_patch(rect)
plt.xticks([], [])
plt.yticks([], [])
plt.xlim([start, end])
plt.ylim([-1, 1])
def plotMotifDirections(motifs_bed,
start,
end,
head_width=0.2,
head_length=1000,
overhang=0,
color_plus="#80b1d3",
color_minus="#fb8072",
ax=None):
'''Function that plots TF motifs as arrows, indicating their directionality.
:param motifs_bed: :class:`pybedtools.BedTool` object containing regions
of the TF sited to be plotted.
:type motifs_bed: :class:`pybedtools.BedTool`
:param start: Start position of the region to be plotted.
:type start: int
:param end: End position of the region to be plotted.
:type end: int
:param head_width: Width of the arrow head as proportion of the arrow,
defaults to 0.2
:type head_width: float, optional
:param head_length: Length of the arrow in bp (depends on the region that
is plotted), defaults to 1000.
:type head_length: int, optional
:param overhang: Fraction that the arrow is swept back (0 overhang means
triangular shape). Can be negative or greater than one. Defaults to 0.
:type overhang: float, optional
:param color_plus: Color of plus stranded TF regions, defaults to "#80b1d3".
:type color_plus: str, optional
:param color_minus: Color of plus stranded TF regions, defaults to
"#fb8072".
:type color_minus: str, optional
:param ax: Axis on which to plot contact map, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:return: Nothing to be returned.
:rtype: None
'''
ax = ax if ax is not None else plt.gca()
for motif in motifs_bed:
motif_start = int(motif[1])
motif_end = int(motif[2])
strand = str(motif[3])
arrow_start = motif_start
arrow_end = motif_end
color=color_plus
dx = head_length
if(strand == "-"):
arrow_start = motif_end
arrow_end = motif_start
color = color_minus
dx = -1.*head_length
plt.arrow(arrow_start,
.5,
dx,
0,
head_width=head_width,
head_length=head_length,
overhang=overhang,
head_starts_at_zero=False,
edgecolor="none",
facecolor=color,
length_includes_head=True)
plt.xlim([start, end])
plt.ylim([0.4, 0.6])
def plotHiCContactMap(contact_map,
start,
end,
segment_size,
cmap="Greys",
vmin=None,
vmax=None,
location="top",
ax=None):
'''Function that plots HiC contact maps as pyramid plots
:param contact_map: Matrix that contains the intensity values of HiC
contacts.
:type contact_map: :class:`pandas.DataFrame`
:param start: Chromosomal start position of region to be plotted.
:type start: int
:param end: Chromosomal end position of region to be plotted.
:type end: int
:param segment_size: Size of the segments for which contacts were called.
:type segment_size: int
:param cmap: Name of the colormap to be used for plotting HiC intensities,
defaults to "Greys".
:type cmap: str, optional
:param vmin: Minimal value of intensity range to be plotted, defaults to
None
:type vmin: float, optional
:param vmax: Maximal value of intensity range to be plotted, defaults to
None.
:type vmax: float, optional
:param location: Either of "top" | "bottom". If location == "top", the
pyramid points upwards, else if location == "bottom" the pyramid points
downwards, defaults to top,
:type location: str, optional
:param ax: Axis on which to plot contact map, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:return: Nothing to be returned.
:rtype: None
'''
ax = ax if ax is not None else plt.gca()
contact_map_index1 = (start)/segment_size
contact_map_index2 = ((end)/segment_size)+1
sliced_contact_map = contact_map.iloc[contact_map_index1:contact_map_index2,
contact_map_index1:contact_map_index2]
if(vmin is None):
vmin = 0
if(vmax is None):
vmax = np.percentile(contact_map, 99.9)
colormap = plt.get_cmap(cmap)
for i in range(contact_map_index1, contact_map_index2):
y_range = (range(contact_map_index1+(i-contact_map_index1),
contact_map_index2)
if location == "top"
else range(contact_map_index1,
contact_map_index2-(contact_map_index2-i)))
for j in y_range:
# Define midpoint of rectangle
midpoint = (i*segment_size+(j*segment_size-i*segment_size)/2.,
(j*segment_size-i*segment_size)/2.)
vertices = [(midpoint[0]-segment_size/2., midpoint[1]),
(midpoint[0], midpoint[1]-segment_size/2.),
(midpoint[0]+segment_size/2., midpoint[1]),
(midpoint[0], midpoint[1]+segment_size/2.),
(midpoint[0]-segment_size/2., midpoint[1])
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(vertices, codes)
intensity_value = contact_map.iloc[i, j]
intensity_value = (intensity_value/vmax
if intensity_value <= vmax
else 1.)
facecolor = colormap(intensity_value)
patch = matplotlib.patches.PathPatch(path,
facecolor=facecolor,
edgecolor='none')
ax.add_patch(patch)
ax.set_xlim(start, end)
if(location == "top"):
ax.set_ylim(0, (end-start)/2.)
else:
ax.set_ylim(-1.*(end-start)/2., 0)
def distanceEqualizer(genomic_segments,
start,
end,
direction="top_down",
color="k",
ax = None):
'''Function that plots arcs from unequal distances of genomic segments to
equal distances.
:param genomic_segments: List of segments for which distances shall be
equalized (each segment is of the form [<chrom>, <start>, <end>])
:type genomic_segments: list
:param start: Start position of the genomic region.
:type start: int
:param end: End position of the genomic region.
:type end: int
:param color: Color of lines equalizing distances, defaults to "k".
:type color: str, optional
:param direction: Direction of distance equalization (top_down | bottom_up),
defaults to "top_down".
:type direction: str, optional.
:param ax: Axis on which to plot, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:return: List of equalized region midpoints.
:rtype: list
'''
ax = ax if ax is not None else plt.gca()
# Calculate midpoints of original and distance equalized segments
n_segments = len(genomic_segments)
equalized_region_size = (end-start)
if(n_segments > 0):
equalized_region_size=(end-start)/n_segments
equalized_region_mid_points = []
for i in range(1, n_segments+1):
equalized_region_mid_points += [((start+
i*equalized_region_size)-
equalized_region_size/2)]
region_mid_points = []
for e in genomic_segments:
if(int(e[1]) < start):
region_mid_points += [start+(int(e[2])-start)/2]
elif(int(e[2]) > end):
region_mid_points += [int(e[1])+(end-int(e[1]))/2]
else:
region_mid_points += [int(e[1])+(int(e[2])-int(e[1]))/2]
for i in range(len(region_mid_points)):
region_mid_point = region_mid_points[i]
equalized_region_mid_point = equalized_region_mid_points[i]
codes = []
vertices = []
if(direction == "top_down"):
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO]
vertices = [(region_mid_point, 1),
(region_mid_point, .8),
(equalized_region_mid_point, .2),
(equalized_region_mid_point, 0)]
else:
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO]
vertices = [(region_mid_point, 0),
(region_mid_point, .2),
(equalized_region_mid_point, .8),
(equalized_region_mid_point, 1)]
path = Path(vertices, codes)
path_patch = PathPatch(path,
facecolor="none",
edgecolor=color,
linewidth=.5)
ax.add_patch(path_patch)
ax.axis("off")
plt.xlim([start, end])
plt.ylim([0, 1])
return equalized_region_mid_points
def plotCoordinates(chrom,
start,
end,
color="k",
ax = None,
upper=True,
loc_coordinates="up",
revert_coordinates=False,
rotation=0):
'''Function that plots genomic coordinates in a linea fashion.
:param chrom: Chromosome of the region to be plotted.
:type chrom: str
:param start: Start position of the region to be plotted.
:type start: int
:param end: End position of the region to be plotted.
:type end: int
:param color: Color of the genomic scales elements, defaults to "k".
:type color: str, optional
:param ax: Axis of plot, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:param upper: If True, make less ticks, else if False make more ticks.
:type upper: bool, optional
:param loc_coordinates: Either of "up" | "down". If "up", plot ticks to
upper direction, else if "down", plot ticks to lower direction, defaults
to "up".
:type loc_coordinates: str, optional
:param revert_coordinates: If True, coordinates are reverted to decreasing
order. Else, coordinates stay in increasing order, defaults to False.
:type revert_coordinates: bool, optional
:param rotation: Rotational angle of coordinate strings, defaults to 0.
:type rotation: int, optional
:return: Nothing to be returned.
:rtype: None
'''
ax = ax if ax is not None else plt.gca()
tick_size = 10**math.ceil((np.log10((end-start)/10)))
if(not upper):
tick_size = 10**int((np.log10((end-start)/10)))
# Determine first tick position
first_tick = start+(tick_size-start%tick_size)
ticks = []
current_tick = first_tick
while(current_tick <= end):
ticks += [current_tick]
current_tick = current_tick + tick_size
scale = None
if(first_tick > 1000000):
scale = "Mb"
else:
scale="Kb"
digits_to_round = None
divisor = None
if(scale == "Mb"):
digits_to_round = int(6-np.log10(tick_size))
divisor = 1000000
else:
digits_to_round = int(5-np.log10(tick_size))
divisor = 100000
tick_labels = [ str(round(i/float(divisor), digits_to_round))+scale
for i in ticks ]
if(loc_coordinates == "up"):
plt.plot([start, end],
[0, 0],
linestyle="-",
color=color,
linewidth=1)
else:
plt.plot([start, end],
[0.3, 0.3],
linestyle="-",
color=color,
linewidth=1)
if(revert_coordinates):
ticks = [ start + end-i for i in ticks ]
ticks.reverse()
tick_labels.reverse()
print(tick_labels)
for i in range(len(ticks)):
if(loc_coordinates == "up"):
plt.plot([ticks[i], ticks[i]],
[0., .3],
linestyle="-",
color=color,
linewidth=1)
plt.text(ticks[i],
.4,
tick_labels[i],
horizontalalignment="center",
verticalalignment="bottom",
fontsize=5,
color=color,
rotation=rotation)
else:
plt.plot([ticks[i], ticks[i]],
[.3, .0],
linestyle="-",
color=color,
linewidth=1)
plt.text(ticks[i],
-.1,
tick_labels[i],
horizontalalignment="center",
fontsize=5,
color=color,
verticalalignment="top",
rotation=rotation)
plt.xlim([start, end])
plt.yticks([], [])
if(loc_coordinates == "up"):
plt.ylim([-.1, .8])
else:
plt.ylim([-1.5, .3])
plt.xticks([], [])
ax.spines["bottom"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.spines["right"].set_visible(False)
def plotLinksAsArcs(links_bed,
chrom_r,
start_r,
end_r,
lw=1,
color="k",
ax = None):
'''Function that plots links between genomic regions as arcs.
:param links_bed: Iterator, that contains bed-like structured lists with the
following elements:
1. Chromosome region1
2. Start region1
3. End region1
4. Chromosome region2
5. Start region2
6. End region2
:type links_bed: iterator
:param chrom_r: Chromosome of the region to be plotted.
:type chrom_r: str
:param start_r: Chromosomal start position of the region to be plotted.
:type start_r: int
:param end_r: Chromosomal end positiont of the region to be plotted.
:type end_r: int
:param color: Color of the arc, defaults to "k".
:type color: str, optional.
:param ax: Axis where the plot is drawn, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:return: Nothing to be returned.
:rtype: None
'''
ax = ax if ax is not None else plt.gca()
max_dist = 0
for e in links_bed:
link_pos1 = int(e[1])+(int(e[2])-int(e[1]))/2
link_pos2 = int(e[4])+(int(e[5])-int(e[4]))/2
distance = abs(link_pos2-link_pos1)
if(distance > max_dist):
max_dist = distance
mid_point = link_pos1 + (link_pos2-link_pos1)/2
if(link_pos2 < link_pos2):
mid_point = link_pos2 + (link_pos1-link_pos2)/2
vertices = [(link_pos1, 0),
(mid_point, distance),
(link_pos2, 0)]
codes = [Path.MOVETO,
Path.CURVE3,
Path.CURVE3]
path = Path(vertices,
codes)
patch = PathPatch(path,
facecolor = "None",
edgecolor = color,
lw = lw)
ax.add_patch(patch)
#ax.spines["bottom"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.spines["right"].set_visible(False)
plt.xticks([], [])
plt.yticks([], [])
plt.xlim([start_r, end_r])
plt.ylim([0, max_dist/2])
| 35.254447 | 80 | 0.545101 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 24,154 | 0.369344 |
e010b163b6fbc347a75063de7760418370bb37d6 | 31,171 | py | Python | toc/fsa/fsa.py | djrochford/toc | 934d19b4acda55a6d4610c8a91b1a6005ff7b683 | [
"MIT"
] | null | null | null | toc/fsa/fsa.py | djrochford/toc | 934d19b4acda55a6d4610c8a91b1a6005ff7b683 | [
"MIT"
] | null | null | null | toc/fsa/fsa.py | djrochford/toc | 934d19b4acda55a6d4610c8a91b1a6005ff7b683 | [
"MIT"
] | null | null | null | """
File containing DFA and NFA public classes
"""
import collections.abc
from itertools import product, chain, combinations
from string import printable
from typing import (
AbstractSet,
Container,
FrozenSet,
Iterable,
List,
Mapping,
MutableMapping,
Optional,
Set,
Tuple,
Union,
cast
)
from .base import (
_Base,
_extract_states_alphabet,
_error_message,
_good_alphabet,
_check_input
)
State = str
Symbol = str
Regex = str
FsaTransitionFunction = Mapping[
Tuple[State, Symbol], Union[State, AbstractSet[State]]
]
class _FSA(_Base):
def __init__(
self,
*,
transition_function: FsaTransitionFunction,
start_state: State,
accept_states: AbstractSet[State]
):
super().__init__(
transition_function=transition_function, start_state=start_state
)
self._accept_states = accept_states
self._states, self._alphabet = _extract_states_alphabet(
self._transition_function.keys()
)
self._well_defined()
@property
def alphabet(self) -> FrozenSet[Symbol]:
return self._alphabet
@property
def accept_states(self) -> AbstractSet[State]:
return self._accept_states
def _well_defined(self) -> None:
super()._well_defined()
_good_alphabet(alphabet=self.alphabet, name="alphabet")
self._good_accept()
self._good_domain(self.alphabet)
def _good_accept(self) -> None:
bad_accept_states = self.accept_states - self.states
_error_message(
bad_set=bad_accept_states,
message_singular=("Accept state {} is not a member of the fsa's "
"state set."),
message_plural=("Accept states {} are not members of the fsa's "
"state set.")
)
def _good_range(self):
raise NotImplementedError
GnfaTransitionFunction = Mapping[Tuple[State, State], Regex]
MutableGnfaTF = MutableMapping[Tuple[State, State], Regex]
class _GNFA:
def __init__(
self,
transition_function: GnfaTransitionFunction,
body_states: Set[State],
start_state: State,
accept_state: State
):
self.transition_function = transition_function
self.body_states = body_states
self.start_state = start_state
self.accept_state = accept_state
self.states = (
self.body_states | {self.start_state} | {self.accept_state}
)
def reduce(self) -> "_GNFA":
"""
Output a GNFA equivalent to `self` with one less state in it.
"""
def union_main_scope(regex: Regex) -> bool:
paren_count = 0
for char in regex:
if char == '(':
paren_count += 1
elif char == ')':
paren_count -= 1
elif char == '|':
if paren_count == 0:
return True
return False
def regex_star(regex: Regex) -> Regex:
if regex in EMPTIES:
return '€'
if len(regex) == 1:
return regex + '*'
return f"({regex})*"
def regex_concat(regex1: Regex, regex2: Regex) -> Regex:
if regex1 == 'Ø' or regex2 == 'Ø':
return 'Ø'
if regex1 == '€':
return regex2
if regex2 == '€':
return regex1
if union_main_scope(regex1):
regex1 = f'({regex1})'
if union_main_scope(regex2):
regex2 = f'({regex2})'
return regex1 + regex2
def regex_union(regex1: Regex, regex2: Regex) -> Regex:
if regex1 == "Ø":
return regex2
if regex2 == "Ø":
return regex1
return f"{regex1}|{regex2}"
rip = self.body_states.pop()
r2 = self.transition_function[(rip, rip)]
reduced_tf = {}
for state1 in self.states - {self.accept_state, rip}:
r1 = self.transition_function[(state1, rip)]
for state2 in self.states - {self.start_state, rip}:
r3 = self.transition_function[(rip, state2)]
r4 = self.transition_function[(state1, state2)]
new_regex = regex_union(
regex_concat(regex_concat(r1, regex_star(r2)), r3),
r4
)
reduced_tf[(state1, state2)] = new_regex
return _GNFA(
reduced_tf,
self.body_states - {rip},
self.start_state,
self.accept_state
)
NfaTransitionFunction = Mapping[Tuple[State, Symbol], AbstractSet[State]]
MutableNfaTF = MutableMapping[Tuple[State, Symbol], Set[State]]
class NFA(_FSA):
"""
A nondeterministic finite automaton class. Takes three keyword arguments:
- `transition_function`: Mapping[Tuple[State, Symbol], AbstractSet[State]]
- `start_state`: State
- `accept_states`: AbstractSet[State]
(Where States are strings, and Symbols are one-char strings.)
The transition function' keys implicitly define the nfa's state-set and
alphabet; the first elements of the tuples represent the nfa's states, and
the second elements are the symbols in the alphabet.
The domain of the transition function is the power-set of the nfa's state
set --- i.e., the values of the transition function dictionary should be
sets (or frozensets). The empty set is a valid value; in fact, you are
required to specify that the successor set for a given state-symbol pair is
the empty set, if it is.
You can define epsilon-moves by using the empty string in place of an
alphabet symbol in the transition function. Note that the empty string will
not be inferred to be a member of the alphabet (and hence the checks below
will work as you would expect).
The class will raise a ValueError exception on instantiation if any of the
following are true:
1. the start state is not a member of the set of states inferred from
the transition function;
2. the set of accept states is not a subset of the set of states
inferred from the transition function;
3. a member of the alphabet inferred from the transition function is
not a one-character string;
4. a member of the transition function's range is not a set;
5. the range of the transition function is not a subset of the power
set of states inferred from the transition function;
6. the transition function is missing cases -- i.e., it is not the case
that every pair of a state and a symbol is in the domain of the
transition function.
The exception message will specify which of these six conditions things
triggered the exception, and which states/symbols are the source of the
problem.
"""
def __or__(self, other: "NFA") -> "NFA":
"""
Let A be the language recognised by nfa1, and B be the language
recognized by nfa2. `nfa1 | nfa2` returns an nfa that recognizes A
union B. The cardinality of the state-set of nfa1 | nfa2 is the
cardinality of the state set of nfa1 plus the cardinality of the
state-set of nfa2 plus 1.
There is no problem with the input NFAs having different alphabets.
"""
new_self, new_other, union_tf = self._combine(other)
union_start_state = _get_new_state(new_self.states | new_other.states)
union_tf[(union_start_state, '')] = {
new_self.start_state, new_other.start_state
}
for symbol in new_self.alphabet | new_other.alphabet:
union_tf[(union_start_state, symbol)] = set()
union_accept_states = new_self.accept_states | new_other.accept_states
return NFA(
transition_function=union_tf,
start_state=union_start_state,
accept_states=union_accept_states
)
def __add__(self, other: "NFA") -> "NFA":
"""
Let A be the language recognised by nfa1, and B be the language
recognized by nfa2. `nfa1 + nfa2` returns an nfa that recognizes A
concat B -- i.e., the language consisting of the set of strings of the
form a concat b, where a is an element of A and b is an element of B.
Note that this `+` operation is not commutative.
"""
new_self, new_other, concat_tf = self._combine(other)
for state in new_self.accept_states:
if (state, '') in concat_tf:
concat_tf[(state, '')].add(new_other.start_state)
else:
concat_tf[(state, '')] = {new_other.start_state}
return NFA(
transition_function=concat_tf,
start_state=new_self.start_state,
accept_states=new_other.accept_states
)
def _combine(self, other: "NFA") -> Tuple["NFA", "NFA", MutableNfaTF]:
def prime(state: State):
return state + '`'
def copy(nfa: NFA) -> NFA:
copy_tf = {}
for state, symbol in nfa.transition_function.keys():
copy_tf[(prime(state), symbol)] = {
prime(x) for x in nfa.transition_function[(state, symbol)]
}
copy_start = prime(nfa.start_state)
copy_accept = {prime(x) for x in nfa.accept_states}
return NFA(
transition_function=copy_tf,
start_state=copy_start,
accept_states=copy_accept
)
overlap = self.states & other.states
while overlap:
other = copy(other)
overlap = self.states & other.states
def add_empty_transitions(
nfa1: NFA, nfa2: NFA
) -> Tuple[NfaTransitionFunction, NfaTransitionFunction]:
def add_one_way(nfa1: NFA, nfa2: NFA) -> NfaTransitionFunction:
new_tf = nfa1.transition_function
extra_symbols = nfa2.alphabet - nfa1.alphabet
if extra_symbols:
for pair in product(nfa1.states, extra_symbols):
new_tf[pair] = set()
return new_tf
return add_one_way(nfa1, nfa2), add_one_way(nfa2, nfa1)
self_tf, other_tf = add_empty_transitions(self, other)
new_self = NFA(
transition_function=self_tf,
start_state=self.start_state,
accept_states=self.accept_states
)
new_other = NFA(
transition_function=other_tf,
start_state=other.start_state,
accept_states=other.accept_states
)
combination_tf = {}
combination_tf.update(new_self.transition_function)
combination_tf.update(new_other.transition_function)
return new_self, new_other, combination_tf
def _good_range(self) -> None:
bad_range = {
x for x in self.transition_function.values()
if not isinstance(x, collections.abc.Set)
}
_error_message(
bad_set=bad_range,
message_singular=("Value {} in the range of the transition "
"function is not a set."),
message_plural=("Values {} in the range of the transition "
"function are not sets.")
)
transition_range: Set[Optional[AbstractSet[State]]] = set.union(
*self.transition_function.values()
)
_error_message(
bad_set=transition_range - self.states,
message_singular=("State {} in the range of the transition "
"function is not in the fsa's state set."),
message_plural=("States {} in the range of the transition "
"function are not in the fsa's state set.")
)
def _get_successors(
self, *, state_set: AbstractSet[State], symbol: Symbol
) -> FrozenSet[State]:
def get_successor(state: State, sym: Symbol) -> AbstractSet[State]:
self._transition_function = cast(
NfaTransitionFunction, self._transition_function
)
return self._transition_function.get((state, sym), frozenset())
empty: FrozenSet[State] = frozenset() # This avoids a mypy bug.
return empty.union(
*[frozenset(get_successor(state, symbol)) for state in state_set]
)
def _add_epsilons(self, state_set: AbstractSet[State]) -> FrozenSet[State]:
epsilon_neighbours = self._get_successors(
state_set=state_set, symbol=''
)
while epsilon_neighbours - state_set:
state_set = state_set | epsilon_neighbours
epsilon_neighbours = self._get_successors(
state_set=epsilon_neighbours, symbol=''
)
return frozenset(state_set)
def _transition(self, state_set: AbstractSet[State], symbol: Symbol):
return self._add_epsilons(self._get_successors(state_set=state_set, symbol=symbol))
def accepts(self, string: str) -> bool:
"""
Determines whether nfa accepts input string. Will raise a ValueError
exception is the string contains symbols that aren't in the nfa's
alphabet.
"""
_check_input(string=string, alphabet=self.alphabet)
current_states = self._add_epsilons({self.start_state})
for symbol in string:
current_states = self._transition(current_states, symbol)
return not current_states & self.accept_states == set()
def determinize(self) -> "DFA":
"""Returns a DFA that recognizes the same same language as the NFA
instance.
WARNING: The set of DFA states has the cardinality of the power-set of
the set of NFA states. For related reasons, the time complexity of this
method is exponential in the number of states of the NFA. Don't
determinize big NFAs.
"""
# powerset code an itertools recipe, from
# https://docs.python.org/3/library/itertools.html#recipes
# (minor modification to make the return a set of frozensets).
def powerset(iterable: Iterable) -> Set[FrozenSet]:
s = list(iterable)
return {
frozenset(item) for item in chain.from_iterable(
combinations(s, r) for r in range(len(s)+1)
)
}
state_sets = powerset(self.states)
determinized_tf = {}
determinized_accept = set()
for (state_set, symbol) in product(state_sets, self._alphabet):
determinzed_state = _stringify(state_set)
determinized_tf[(determinzed_state, symbol)] = _stringify(
self._transition(state_set, symbol)
)
if set(state_set) & self.accept_states:
determinized_accept.add(determinzed_state)
determinized_start = _stringify(
self._add_epsilons({self._start_state})
)
return DFA(
transition_function=determinized_tf,
start_state=determinized_start,
accept_states=determinized_accept
)
def star(self) -> "NFA":
"""
Let A be the language recognised by nfa. `nfa.self()` returns an nfa
that recognizes A* -- i.e., the set of all strings formed by
concatenating any number of members of A.
"""
star_start = _get_new_state(self.states)
star_tf = self.transition_function
star_tf[(star_start, '')] = {self.start_state}
for symbol in self.alphabet:
star_tf[(star_start, symbol)] = set()
for state in self.accept_states:
star_tf[(state, '')] = {self.start_state}
star_accepts = self.accept_states | {star_start}
return NFA(
transition_function=star_tf,
start_state=star_start,
accept_states=star_accepts
)
@staticmethod
def fit(
regex: Regex,
alphabet: AbstractSet[Symbol] = (
set(printable) - {'(', ')', '|', '*'}
)
) -> "NFA":
"""
Takes a regular expression and an alphabet (i.e., a set of
one-character strings) as input; returns an NFA that recognises the
language defined by that regular expression and that alphabet.
The alphabet parameter is optional; it's default value is
string.printable -- i.e., the set of "printable" characters, which
includes the standard ASCII letters and digits, and most common
punctuation and white space.
Actually, that's not quite right -- the default value is
string.printable *minus* parentheses, the vertical bar, the star
symbol, and the tilde, for reasons that I will explain presently.
As of now, the syntax of the regular expressions that this method takes
as input is very simple -- much simpler than the standard python
regular expresssions. All characters are intepreted as literals for
symbols in the alphabet except for '(', '')', '|', '*', '•', '€' and
'Ø'. The parentheses, vertical bar and star mean what you'd expect
them to mean if you are familiar with regular expressions. '•'
(option-8 on a mac keyboard) means concatenation. You can leave
concatentation implicit, as is usual; no need to write '•'' explicitly
if you don't want to. But it gets used internally. '€' (option-shift-2)
is used to match the empty string (because it kind of looks like an
epsilon); there's no other way to match, for instance, {'', '0'} with
the current syntax. (Quick challenge: it's not totally obvious how to
match the empty string in normal python regex syntax either, though it
can be done; give it a go.) 'Ø' (option-shift-o) represents the empty
set; you can match to the empty language with it.
For reaons related to the above, the characters '(', ')', '|', '*',
'•', '€' and 'Ø' cannot be symbols in the alphabet of the NFA. (My
apologies to speakers of Scandinavian languages for the last one; I am
very against English chauvinism, but your letter is so very close to
the empty-set symbol. If, by some miracle, there is someone who cares
about this, I will change the symbol for empty-set.)
In the absence of parentheses, the order of operations is: `*`, then
`•`, then `|`.
This method uses a version of Dijkstra's shunting yard algorithm to
parse the regex and build the NFA.
The method will raise a ValueError exception if any of the following
conditions hold:
1. the alphabet contains any of the verboten characters -- i.e.,`(`
, `)`, `|`, `*`, `•`, `€` and `Ø`,
2. the input regex string contains a character not in the alphabet,
and not one of the above veboten characters,
3. the input regex contain a binary operator followed by an
operator, or
4. the input regex does not have properly matching parentheses.
"""
operator_to_operation = {
'|': NFA.__or__,
'•': NFA.__add__
}
_error_message(
bad_set=set(NOT_SYMBOLS) & alphabet,
message_singular="Alphabet cannot contain character {}.",
message_plural="Alphabet cannot contain characters {}."
)
def fit_empty(empty: Regex) -> NFA:
tf: NfaTransitionFunction = {
pair: set() for pair in product({'q1'}, alphabet)
}
accept_states = set() if empty == 'Ø' else {'q1'}
return NFA(
transition_function=tf,
start_state='q1',
accept_states=accept_states
)
def fit_symbol(symbol: Symbol) -> NFA:
tf: MutableNfaTF = {
pair: set() for pair in product({'q1', 'q2'}, alphabet)
}
tf[('q1', symbol)] = {'q2'}
return NFA(
transition_function=tf, start_state='q1', accept_states={'q2'}
)
machine_stack: List[NFA] = []
operator_stack = ['sentinel']
def binary_operate() -> None:
right_operand = machine_stack.pop()
left_operand = machine_stack.pop()
machine = operator_to_operation[operator_stack.pop()](
left_operand, right_operand
)
machine_stack.append(machine)
def compare(operator: Regex) -> int:
return (
OPERATORS.index(operator)
- OPERATORS.index(operator_stack[-1])
)
regex = _pre_process(regex, alphabet)
for char in regex:
if char in EMPTIES:
machine_stack.append(fit_empty(char))
elif char in alphabet:
machine_stack.append(fit_symbol(char))
elif char == '*':
machine_stack[-1] = machine_stack[-1].star()
elif char in OPERATORS:
if operator_stack[-1] in PARENTHE or compare(char) > 0:
operator_stack.append(char)
else:
while (
operator_stack[-1] not in PARENTHE
and compare(char) <= 0
):
binary_operate()
operator_stack.append(char)
elif char == '(':
operator_stack.append(char)
else:
while operator_stack[-1] != '(':
binary_operate()
operator_stack.pop()
while len(operator_stack) > 1:
binary_operate()
return machine_stack.pop()
OPERATORS = ['sentinel', '|', '•', '*']
PARENTHE = ['(', ')']
EMPTIES = ['€', 'Ø']
NOT_SYMBOLS = OPERATORS + PARENTHE + EMPTIES
def _pre_process(regex: Regex, alphabet: AbstractSet[Symbol]) -> Regex:
first_char = regex[0]
if first_char in OPERATORS:
raise ValueError(f"Regex cannot start with '{first_char}'.")
processed = ''
paren_count = 0
for char in regex:
if char in alphabet or char == '(':
if len(processed) > 0:
processed += (
'•' if processed[-1] not in {'(', '|'}
else ''
)
if char not in alphabet | set(NOT_SYMBOLS):
raise ValueError(
f"Regex contains character '{char}' that is not in "
"alphabet and not an accepted regex character."
)
if char in OPERATORS and processed[-1] in {'|', '•'}:
raise ValueError(
"Regex contains binary operator followed by an "
"operator; not cool."
)
if char == '(':
paren_count += 1
if char == ')':
paren_count -= 1
if paren_count < 0:
raise ValueError(
"Right parenthesis occurs in regex withour matching "
"left parenthesis."
)
processed += char
if paren_count > 0:
raise ValueError(
"Left parenthesis occurs in regex without matching right "
"parenthesis."
)
return processed
DfaTransitionFunction = Mapping[Tuple[State, Symbol], State]
class DFA(_FSA):
"""
A deterministic finite automaton class. Takes three keyword arguments:
- `transition_function`: Mapping[Tuple[State, Symbol], State]
- `start_state`: State
- `accept_state`: AbstractSet[State]
(where States are strings and Symbols are one-char strings).
The keys of the `transition_function` implicitly define the dfa's state-set
and alphabet.
The class will raise a ValueError exception on instantiation if any of th
following are true:
* the start state is not a member of the set of states inferred from the
transition function;
* the set of accept states is not a subset of the set of states inferred
from the transition function;
* the range of the transition function is not a subset of the set of
states inferred from the transition function;
* a member of the alphabet inferred from the transition function is not a
one-character string;
* the transition function is missing a case -- i.e., it is not the case
that every pair of a state and a symbol is in the domain of the
transition function.
The exception message will specify which of these above conditions things
triggered the exception, and which states/symbols are the source of the
problem.
"""
def __or__(self, other: "DFA") -> "DFA":
"""
Let A be the language recognised by dfa1, and B be the language
recognized by dfa2. `dfa1 | dfa2` returns a dfa that recognizes A union
B. The states of dfa1 | dfa2 are ordered pairs of states from dfa1 and
dfa2. There is no problem with the input DFAs having different
alphabets.
"""
union_alphabet = self.alphabet | other.alphabet
def maybe_add_state(
dfa1: DFA, dfa2: DFA
) -> Tuple[FrozenSet[State], DfaTransitionFunction]:
new_tf = dfa1.transition_function
new_states = dfa1.states
extra_symbols = dfa2.alphabet - dfa1.alphabet
if extra_symbols:
error_state = _get_new_state(dfa1.states)
new_states = dfa1.states | {error_state}
for symbol in union_alphabet:
new_tf[(error_state, symbol)] = error_state
for symbol in extra_symbols:
for state in dfa1.states:
new_tf[(state, symbol)] = error_state
return new_states, new_tf
self_states, self_tf = maybe_add_state(self, other)
other_states, other_tf = maybe_add_state(other, self)
state_pairs = product(self_states, other_states)
union_transition_function = {}
for (state1, state2), symbol in product(state_pairs, union_alphabet):
union_transition_function[(state1 + state2, symbol)] = (
self_tf[(state1, symbol)] + other_tf[(state2, symbol)]
)
union_start_state = self.start_state + other.start_state
union_accept_states = {
_stringify(item) for item in (
set(product(self.accept_states, other_states))
| set(product(self_states, other.accept_states))
)
}
return DFA(
transition_function=union_transition_function,
start_state=union_start_state,
accept_states=union_accept_states
)
def __add__(self, other: "DFA") -> "DFA":
"""
Let A be the language recognised by dfa1, B be the language recognised
by dfa2. `dfa1 + dfa2` returns a DFA that recognises the set of all
concatenations of strings in A with strings in B. This DFA operator is
parasitic on the NFA operator; it converts the input DFAs into NFAs,
uses the NFA '+', then converts the result back to a DFA. That makes
for a relatively simple but, sadly, computationally expensive algorith.
For that reason, I recommend you don't `+` dfas with large numbers of
states.
"""
return (self.non_determinize() + other.non_determinize()).determinize()
def _gnfize(self) -> _GNFA:
gnfa_tf: MutableGnfaTF = {}
for state1, symbol in self.transition_function.keys():
state2 = self.transition_function[(state1, symbol)]
if (state1, state2) in gnfa_tf.keys():
gnfa_tf[(state1, state2)] += '|' + symbol
else:
gnfa_tf[(state1, state2)] = symbol
gnfa_start = _get_new_state(self.states)
gnfa_accept = _get_new_state(self.states | {gnfa_start})
gnfa_tf[(gnfa_start, self.start_state)] = '€'
for state in self.accept_states:
gnfa_tf[(state, gnfa_accept)] = '€'
for state1, state2 in product(
self.states | {gnfa_start}, self.states | {gnfa_accept}
):
if (state1, state2) not in gnfa_tf:
gnfa_tf[(state1, state2)] = 'Ø'
return _GNFA(gnfa_tf, set(self.states), gnfa_start, gnfa_accept)
def _good_range(self) -> None:
transition_range = set(self.transition_function.values())
bad_range = transition_range - self.states
_error_message(
bad_set=bad_range,
message_singular=("State {} in the range of the transition "
"function is not in the fsa's state set."),
message_plural=("States {} in the range of the transition "
"function are not in the fsa's state set.")
)
def accepts(self, string: str) -> bool:
"""
`my_dfa.accepts("some string")` returns `True` if my_dfa accepts "some
string", and `False` otherwise. Will raise a ValueError exception is
the string contains symbols that aren't in the DFA's alphabet.
"""
_check_input(string=string, alphabet=self.alphabet)
current_state = self.start_state
for symbol in string:
current_state = self.transition_function[(current_state, symbol)]
return current_state in self.accept_states
def encode(self) -> Regex:
"""
Let A be the language accepted by dfa. `dfa.encode()` returns a regex
string that generates A. That regex string is liable to be much more
complicated than necessary; maybe I'll figure out how to improve on
average simplicity, eventually.
"""
gnfa = self._gnfize()
while len(gnfa.states) > 2:
gnfa = gnfa.reduce()
return gnfa.transition_function[(gnfa.start_state, gnfa.accept_state)]
def non_determinize(self) -> NFA:
"""
Convenience method that takes a DFA instance and returns an NFA
instance.
"""
nd_transition_function = {
key: {value} for key, value in self.transition_function.items()
}
return NFA(
transition_function=nd_transition_function,
start_state=self.start_state,
accept_states=self.accept_states
)
def _stringify(states: Iterable[State]) -> str:
if not isinstance(states, collections.abc.Sequence):
states = list(states)
states.sort()
return "".join(states)
def _get_new_state(state_set: Container) -> State:
counter = 1
new_state = 'new_state1'
while new_state in state_set:
counter += 1
new_state = "new_state" + str(counter)
return new_state
| 39.506971 | 91 | 0.595939 | 28,343 | 0.90776 | 0 | 0 | 6,111 | 0.195721 | 0 | 0 | 11,318 | 0.362489 |
e0112c386596780c5a86b0d6086dc2f7b6c2be8a | 429 | py | Python | Numbers/Roman Number Generator/tests.py | fossabot/IdeaBag2-Solutions | 73b554d9796510fc86e5fc55016732aa866266c6 | [
"MIT"
] | 10 | 2018-07-06T22:05:45.000Z | 2021-05-22T11:29:04.000Z | Numbers/Roman Number Generator/tests.py | jarik-marwede/IdeaBag2-Projects | c5fe9524ef03a6ebc098ab8aaee7448f5b877828 | [
"MIT"
] | 22 | 2018-07-13T17:16:43.000Z | 2022-01-11T11:16:08.000Z | Numbers/Roman Number Generator/tests.py | jarik-marwede/IdeaBag2-Projects | c5fe9524ef03a6ebc098ab8aaee7448f5b877828 | [
"MIT"
] | 1 | 2020-06-13T18:53:51.000Z | 2020-06-13T18:53:51.000Z | #!/usr/bin/env python3
import unittest
from roman_number_generator import arabic_to_roman
class Test(unittest.TestCase):
def _start_arabic_to_roman(self):
self.assertRaises(ValueError, arabic_to_roman, 4000)
self.assertEqual(arabic_to_roman(4), "IV")
self.assertEqual(arabic_to_roman(12), "XII")
self.assertEqual(arabic_to_roman(20), "XX")
if __name__ == "__main__":
unittest.main()
| 23.833333 | 60 | 0.717949 | 286 | 0.666667 | 0 | 0 | 0 | 0 | 0 | 0 | 45 | 0.104895 |
e012a92e1f872614d01a6331fee5e35c430a31f7 | 261 | py | Python | modules/moduleBase.py | saintaardvark/glouton-satnogs-data-downloader | dc8671340f558b1a21b41b9b04bab05fc15c7809 | [
"MIT"
] | null | null | null | modules/moduleBase.py | saintaardvark/glouton-satnogs-data-downloader | dc8671340f558b1a21b41b9b04bab05fc15c7809 | [
"MIT"
] | null | null | null | modules/moduleBase.py | saintaardvark/glouton-satnogs-data-downloader | dc8671340f558b1a21b41b9b04bab05fc15c7809 | [
"MIT"
] | null | null | null | from infrastructure.satnogClient import SatnogClient
import os
class ModuleBase:
def __init__(self, working_dir):
self.working_dir = working_dir
def runAfterDownload(self, file_name, full_path, observation):
raise NotImplementedError() | 29 | 66 | 0.762452 | 197 | 0.754789 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
e013ea72c2e27425fa2415a60a17282e347acbb7 | 45,537 | py | Python | oregano_plugins/fusion/server.py | MrNaif2018/Oregano | cc08f813f9cbdb80d1ac607892f8439ec064ee04 | [
"MIT"
] | null | null | null | oregano_plugins/fusion/server.py | MrNaif2018/Oregano | cc08f813f9cbdb80d1ac607892f8439ec064ee04 | [
"MIT"
] | null | null | null | oregano_plugins/fusion/server.py | MrNaif2018/Oregano | cc08f813f9cbdb80d1ac607892f8439ec064ee04 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#
# Oregano - a lightweight Ergon client
# CashFusion - an advanced coin anonymizer
#
# Copyright (C) 2020 Mark B. Lundeberg
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
A basic server implementation for CashFusion. Does not natively offer SSL
support, however a server admin may run an SSL server proxy such as nginx for
that purpose.
"""
import secrets
import sys
import threading
import time
import traceback
from collections import defaultdict
import oregano.schnorr as schnorr
from oregano.address import Address
from oregano.util import PrintError, ServerError, TimeoutException
from . import fusion_pb2 as pb
from .comms import send_pb, recv_pb, ClientHandlerThread, GenericServer, get_current_genesis_hash
from .protocol import Protocol
from .util import (FusionError, sha256, calc_initial_hash, calc_round_hash, gen_keypair, tx_from_components,
rand_position)
from .validation import (check_playercommit, check_covert_component, validate_blame, ValidationError,
check_input_electrumx)
# Resistor "E series" values -- round numbers that are almost geometrically uniform
E6 = [1.0, 1.5, 2.2, 3.3, 4.7, 6.8]
E12 = [1.0, 1.2, 1.5, 1.8, 2.2, 2.7, 3.3, 3.9, 4.7, 5.6, 6.8, 8.2]
E24 = [1.0, 1.1, 1.2, 1.3, 1.5, 1.6, 1.8, 2.0, 2.2, 2.4, 2.7, 3.0, 3.3, 3.6, 3.9, 4.3, 4.7, 5.1, 5.6, 6.2, 6.8, 7.5, 8.2, 9.1]
# TODO - make these configurable
class Params:
num_components = 23
component_feerate = 1000 # sats/kB
max_excess_fee = 300000 # sats
tiers = [round(b*s) for b in [10000, 100000, 1000000, 10000000, 100000000] for s in E12]
# How many clients do we want before starting a fusion?
min_clients = 8
# If all clients submitted largest possible component (uncompressed p2pkh input), how many could we take until the result would exceed 100 kB standard tx size limitation?
max_clients = (100000 - 12) // (num_components * 173)
# Every round, clients leave ... How many clients do we need as an absolute minimum (for privacy)?
min_safe_clients = 6
# Choose the minimum excess fee based on dividing the overhead amongst players, in the smallest fusion
# (these overhead numbers assume op_return script size of 1 + 5 (lokad) + 33 (session hash) )
if min_safe_clients * num_components >= 2 * 0xfc:
# the smallest fusion could require 3-byte varint for both inputs and outputs lists
overhead = 62
elif min_safe_clients * num_components >= 0xfc:
# the smallest fusion could require 3-byte varint for either inputs or outputs lists
overhead = 60
else:
# the smallest fusion will use 1-byte varint for both inputs and outputs lists
overhead = 58
min_excess_fee = (overhead + min_safe_clients - 1) // min_safe_clients
# How many clients can share same tag on a given tier (if more try to join, reject)
max_tier_client_tags = 100
# For a given IP, how many players can they represent in the same fuse?
ip_max_simul_fuse = 3
# Guaranteed time to launch a fusion if the pool has stayed at or above min_clients for this long.
start_time_max = 1200
# Inter-fusion delay -- after starting any fusion, wait this long before starting the next one (unless hit max time or pool is full).
start_time_spacing = 120
# But don't start a fusion if it has only been above min_clients for a short time (unless pool is full).
start_time_min = 400
# whether to print a lot of logs
noisy = False
# How long covert connections are allowed to stay open without activity.
# note this needs to consider the maximum interval between messages:
# - how long from first connection to last possible Tor component submission?
# - how long from one round's component submission to the next round's component submission?
COVERT_CLIENT_TIMEOUT = 40
# used for non-cryptographic purposes
import random
rng = random.Random()
rng.seed(secrets.token_bytes(32))
def clientjob_send(client, msg, timeout = Protocol.STANDARD_TIMEOUT):
client.send(msg, timeout=timeout)
def clientjob_goodbye(client, text):
# a gentler goodbye than killing
if text is not None:
client.send_error(text)
raise client.Disconnect
class ClientThread(ClientHandlerThread):
"""Basic thread per connected client."""
def recv(self, *expected_msg_names, timeout=Protocol.STANDARD_TIMEOUT):
submsg, mtype = recv_pb(self.connection, pb.ClientMessage, *expected_msg_names, timeout=timeout)
return submsg
def send(self, submsg, timeout=Protocol.STANDARD_TIMEOUT):
send_pb(self.connection, pb.ServerMessage, submsg, timeout=timeout)
def send_error(self, msg):
self.send(pb.Error(message = msg), timeout=Protocol.STANDARD_TIMEOUT)
def error(self, msg):
self.send_error(msg)
raise FusionError(f'Rejected client: {msg}')
class ClientTag(bytes):
""" enhanced bytes object to represent a pool tag """
__slots__ = ()
def __new__(cls, ipstr, tagbytes, maxsimul):
ipb = ipstr.encode()
b = bytes([maxsimul, len(ipb)]) + ipb + tagbytes
return super().__new__(cls, b)
@property
def maxsimul(self):
return self[0]
class TagStatus:
__slots__ = ('pool', 'all_')
def __init__(self):
self.pool = 0
self.all_ = 0
class WaitingPool:
""" a waiting pool for a specific tier """
def __init__(self, fill_threshold, tag_max):
self.pool = set() # clients who will be put into fusion round if started at this tier
self.queue = list() # clients who are waiting due to tags being full
self.tags = defaultdict(TagStatus) # how are the various tags
self.fill_threshold = fill_threshold # minimum number of pool clients to trigger setting fill_time
self.fill_time = None # when did pool exceed fill_threshold
self.tag_max = tag_max # how many clients can share same tag (in pool and queue)
def check_add(self, client):
for t in client.tags:
ts = self.tags.get(t)
if ts is not None and ts.all_ >= self.tag_max:
return "too many clients with same tag"
def _add_pool(self, client):
self.pool.add(client)
for t in client.tags:
ts = self.tags[t]
ts.pool += 1
if len(self.pool) == self.fill_threshold:
self.fill_time = time.monotonic()
def add(self, client):
can_pool = True
for t in client.tags:
ts = self.tags[t]
ts.all_ += 1
if ts.pool >= t.maxsimul:
can_pool = False
if can_pool:
self._add_pool(client)
else:
self.queue.append(client)
return can_pool
def remove(self, client):
# make sure to call try_move_from_queue() after calling this
try:
self.pool.remove(client)
except KeyError:
in_pool = False
try:
self.queue.remove(client)
except ValueError:
return False
else:
in_pool = True
if len(self.pool) < self.fill_threshold:
self.fill_time = None
for t in client.tags:
ts = self.tags[t]
ts.all_ -= 1
if in_pool:
ts.pool -= 1
if ts.all_ == 0: # cleanup for no-longer-used tags
del self.tags[t]
return True
def try_move_from_queue(self):
# attempt to move clients from queue into pool
moved = []
for client in self.queue:
for t in client.tags:
ts = self.tags[t]
if ts.pool >= t.maxsimul:
break
else:
self._add_pool(client)
moved.append(client)
for client in moved:
self.queue.remove(client)
class FusionServer(GenericServer):
"""Server for clients waiting to start a fusion. New clients get a
ClientThread made for them, and they are put into the waiting pools.
Once a Fusion thread is started, the ClientThreads are passed over to
a FusionController to run the rounds."""
def __init__(self, config, network, bindhost, port, upnp = None, announcehost = None, donation_address = None):
assert network
assert isinstance(donation_address, (Address, type(None)))
if not schnorr.has_fast_sign() or not schnorr.has_fast_verify():
raise RuntimeError("Fusion requires libsecp256k1")
super().__init__(bindhost, port, ClientThread, upnp = upnp)
self.config = config
self.network = network
self.announcehost = announcehost
self.donation_address = donation_address
self.waiting_pools = {t: WaitingPool(Params.min_clients, Params.max_tier_client_tags) for t in Params.tiers}
self.t_last_fuse = time.monotonic() # when the last fuse happened; as a placeholder, set this to startup time.
self.reset_timer()
def run(self):
try:
super().run()
finally:
self.waiting_pools.clear() # gc clean
def reset_timer(self, ):
""" Scan pools for the favoured fuse:
- Out of the pool(s) with the most number of players,
- Choose the pool with the earliest fill time;
- If no pools are filled then there is no favoured fuse.
(since fill time is a float, this will almost always be unique)
"""
with self.lock:
time_best = None
tier_best = None
size_best = 0
for t, pool in self.waiting_pools.items():
ft = pool.fill_time
if ft is None:
continue
size = len(pool.pool)
if size >= size_best:
if time_best is None or ft < time_best or size > size_best:
time_best = ft
tier_best = t
size_best = size
if time_best is None:
self.tier_best_starttime = None
else:
self.tier_best_starttime = max(time_best + Params.start_time_min, self.t_last_fuse + Params.start_time_spacing)
self.tier_best = tier_best
def start_fuse(self, tier):
""" Immediately launch Fusion at the selected tier. """
with self.lock:
chosen_clients = list(self.waiting_pools[tier].pool)
# Notify that we will start.
for c in chosen_clients:
c.start_ev.set()
# Remove those clients from all pools
for t, pool in self.waiting_pools.items():
for c in chosen_clients:
pool.remove(c)
pool.try_move_from_queue()
# Update timing info
self.t_last_fuse = time.monotonic()
self.reset_timer()
# Uncomment the following to: Remove from spawned clients list, so that the fusion can continue independently of waiting server.
# self.spawned_clients.difference_update(chosen_clients)
# Kick off the fusion.
rng.shuffle(chosen_clients)
fusion = FusionController(self. network, tier, chosen_clients, self.bindhost, upnp = self.upnp, announcehost = self.announcehost)
fusion.start()
return len(chosen_clients)
def new_client_job(self, client):
client_ip = client.connection.socket.getpeername()[0]
msg = client.recv('clienthello')
if msg.version != Protocol.VERSION:
client.error("Mismatched protocol version, please upgrade")
if msg.genesis_hash:
if msg.genesis_hash != get_current_genesis_hash():
# For now, msg.genesis_hash is optional and we tolerate it
# missing. However, if the client declares the genesis_hash, we
# do indeed disallow them connecting if they are e.g. on testnet
# and we are mainnet, etc.
client.error("This server is on a different chain, please switch servers")
else:
client.print_error("👀 No genesis hash declared by client, we'll let them slide...")
if self.stopping:
return
donation_address = ''
if isinstance(self.donation_address, Address):
donation_address = self.donation_address.to_full_ui_string()
client.send(pb.ServerHello( num_components = Params.num_components,
component_feerate = Params.component_feerate,
min_excess_fee = Params.min_excess_fee,
max_excess_fee = Params.max_excess_fee,
tiers = Params.tiers,
donation_address = donation_address
))
# We allow a long timeout for clients to choose their pool.
msg = client.recv('joinpools', timeout=120)
if len(msg.tiers) == 0:
client.error("No tiers")
if len(msg.tags) > 5:
client.error("Too many tags")
# Event for signalling us that a pool started.
start_ev = threading.Event()
client.start_ev = start_ev
if client_ip.startswith('127.'):
# localhost is whitelisted to allow unlimited access
client.tags = []
else:
# Default tag: this IP cannot be present in too many fuses.
client.tags = [ClientTag(client_ip, b'', Params.ip_max_simul_fuse)]
for tag in msg.tags:
if len(tag.id) > 20:
client.error("Tag id too long")
if not (0 < tag.limit < 6):
client.error("Tag limit out of range")
ip = '' if tag.no_ip else client_ip
client.tags.append(ClientTag(ip, tag.id, tag.limit))
try:
mytierpools = {t: self.waiting_pools[t] for t in msg.tiers}
except KeyError:
if self.stopping:
return
client.error(f"Invalid tier selected: {t}")
try:
mytiers = list(mytierpools)
rng.shuffle(mytiers) # shuffle the adding order so that if filling more than one pool, we don't have bias towards any particular tier
with self.lock:
if self.stopping:
return
# add this client to waiting pools
for pool in mytierpools.values():
res = pool.check_add(client)
if res is not None:
client.error(res)
for t in mytiers:
pool = mytierpools[t]
pool.add(client)
if len(pool.pool) >= Params.max_clients:
# pool filled up to the maximum size, so start immediately
self.start_fuse(t)
return
# we have added to pools, which may have changed the favoured tier
self.reset_timer()
inftime = float('inf')
while True:
with self.lock:
if self.stopping or start_ev.is_set():
return
tnow = time.monotonic()
# scan through tiers and collect statuses, also check start times.
statuses = dict()
tfill_thresh = tnow - Params.start_time_max
for t, pool in mytierpools.items():
if client not in pool.pool:
continue
status = pb.TierStatusUpdate.TierStatus(players = len(pool.pool), min_players = Params.min_clients)
remtime = inftime
if pool.fill_time is not None:
# a non-favoured pool will start eventually
remtime = pool.fill_time - tfill_thresh
if t == self.tier_best:
# this is the favoured pool, can start at a special time
remtime = min(remtime, self.tier_best_starttime - tnow)
if remtime <= 0:
self.start_fuse(t)
return
elif remtime != inftime:
status.time_remaining = round(remtime)
statuses[t] = status
client.send(pb.TierStatusUpdate(statuses = statuses))
start_ev.wait(2)
except:
# Remove client from waiting pools on failure (on success, we are already removed; on stop we don't care.)
with self.lock:
for t, pool in mytierpools.items():
if pool.remove(client):
pool.try_move_from_queue()
if self.tier_best in mytierpools:
# we left from best pool, so it might not be best anymore.
self.reset_timer()
raise
class ResultsCollector:
# Collect submissions from different sources, with a deadline.
def __init__(self, num_results, done_on_fail = True):
self.num_results = int(num_results)
self.done_on_fail = bool(done_on_fail)
self.done_ev = threading.Event()
self.lock = threading.Lock()
self.results = []
self.fails = []
def __enter__(self, ):
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is not None:
self.fails.append(exc_value)
if self.done_on_fail:
self.done_ev.set()
elif len(self.fails) + len(getattr(self, 'results', ())) >= self.num_results:
self.done_ev.set()
def gather(self, *, deadline):
remtime = deadline - time.monotonic()
self.done_ev.wait(max(0., remtime))
with self.lock:
ret = self.results
del self.results
return ret
def add(self, result):
with self.lock:
try:
self.results.append(result)
except AttributeError:
return False
else:
if len(self.fails) + len(self.results) >= self.num_results:
self.done_ev.set()
return True
class FusionController(threading.Thread, PrintError):
""" This controls the Fusion rounds running from server side. """
def __init__(self, network, tier, clients, bindhost, upnp = None, announcehost = None):
super().__init__(name="FusionController")
self.network = network
self.tier = tier
self.clients = list(clients)
self.bindhost = bindhost
self.upnp = upnp
self.announcehost = announcehost
self.daemon = True
def sendall(self, msg, timeout = Protocol.STANDARD_TIMEOUT):
for client in self.clients:
client.addjob(clientjob_send, msg, timeout)
def check_client_count(self,):
live = [c for c in self.clients if not c.dead]
if len(live) < Params.min_safe_clients:
for c in live:
c.kill("too few remaining live players")
raise FusionError("too few remaining live players")
def run (self, ):
self.print_error(f'Starting fusion with {len(self.clients)} players at tier={self.tier}')
covert_server = CovertServer(self.bindhost, upnp = self.upnp)
try:
annhost = covert_server.host if self.announcehost is None else self.announcehost
annhost_b = annhost.encode('ascii')
annport = covert_server.port
covert_server.noisy = Params.noisy
covert_server.start()
self.print_error(f'Covert server started @ {covert_server.host}:{covert_server.port} (announcing as: {annhost_b}:{annport})')
begin_time = round(time.time())
self.sendall(pb.FusionBegin(tier = self.tier,
covert_domain = annhost_b,
covert_port = annport,
covert_ssl = False,
server_time = begin_time))
self.last_hash = calc_initial_hash(self.tier, annhost_b, annport, False, begin_time)
time.sleep(Protocol.WARMUP_TIME)
# repeatedly run rounds until successful or exception
while True:
covert_server.reset()
# Clean up dead clients
self.clients = [c for c in self.clients if not c.dead]
self.check_client_count()
if self.run_round(covert_server):
break
self.print_error('Ended successfully!')
except FusionError as e:
self.print_error(f"Ended with error: {e}")
except Exception as e:
self.print_error('Failed with exception!')
traceback.print_exc(file=sys.stderr)
for c in self.clients:
c.addjob(clientjob_goodbye, 'internal server error')
finally:
covert_server.stop()
for c in self.clients:
c.addjob(clientjob_goodbye, None)
self.clients = [] # gc
def kick_missing_clients(self, goodclients, reason = None):
baddies = set(self.clients).difference(goodclients)
for c in baddies:
c.kill(reason)
def run_round(self, covert_server):
covert_priv, covert_Upub, covert_Cpub = gen_keypair()
round_pubkey = covert_Cpub
# start to accept covert components
covert_server.start_components(round_pubkey, Params.component_feerate)
# generate blind nonces (slow!)
for c in self.clients:
c.blinds = [schnorr.BlindSigner() for _co in range(Params.num_components)]
lock = threading.Lock()
seen_salthashes = set()
# Send start message to players; record the time we did this
round_time = round(time.time())
collector = ResultsCollector(len(self.clients), done_on_fail = False)
def client_start(c, collector):
with collector:
c.send(pb.StartRound(round_pubkey = round_pubkey,
blind_nonce_points = [b.get_R() for b in c.blinds],
server_time = round_time
))
msg = c.recv('playercommit')
commit_messages = check_playercommit(msg, Params.min_excess_fee, Params.max_excess_fee, Params.num_components)
newhashes = set(m.salted_component_hash for m in commit_messages)
with lock:
expected_len = len(seen_salthashes) + len(newhashes)
seen_salthashes.update(newhashes)
if len(seen_salthashes) != expected_len:
c.error('duplicate component commitment')
if not collector.add((c, msg.initial_commitments, msg.excess_fee)):
c.error("late commitment")
# record for later
c.blind_sig_requests = msg.blind_sig_requests
c.random_number_commitment = msg.random_number_commitment
for client in self.clients:
client.addjob(client_start, collector)
# Record the time that we sent 'startround' message to players; this
# will form the basis of our covert timeline.
covert_T0 = time.monotonic()
self.print_error(f"startround sent at {time.time()}; accepting covert components")
# Await commitment messages then process results
results = collector.gather(deadline = covert_T0 + Protocol.TS_EXPECTING_COMMITMENTS)
# Filter clients who didn't manage to give a good commitment.
prev_client_count = len(self.clients)
self.clients = [c for c, _, _ in results]
self.check_client_count()
self.print_error(f"got commitments from {len(self.clients)} clients (dropped {prev_client_count - len(self.clients)})")
total_excess_fees = sum(f for _,_,f in results)
# Generate scrambled commitment list, but remember exactly where each commitment originated.
commitment_master_list = [(commit, ci, cj) for ci, (_, commitments, _) in enumerate(results) for cj,commit in enumerate(commitments)]
rng.shuffle(commitment_master_list)
all_commitments = tuple(commit for commit,ci,cj in commitment_master_list)
# Send blind signatures
for c in self.clients:
scalars = [b.sign(covert_priv, e) for b,e in zip(c.blinds, c.blind_sig_requests)]
c.addjob(clientjob_send, pb.BlindSigResponses(scalars = scalars))
del c.blinds, c.blind_sig_requests
del results, collector
# Sleep a bit before uploading commitments, as clients are doing this.
remtime = covert_T0 + Protocol.T_START_COMPS - time.monotonic()
if remtime > 0:
time.sleep(remtime)
# Upload the full commitment list; we're a bit generous with the timeout but that's OK.
self.sendall(pb.AllCommitments(initial_commitments = all_commitments),
timeout=Protocol.TS_EXPECTING_COVERT_SIGNATURES)
# Sleep until end of covert components phase
remtime = covert_T0 + Protocol.TS_EXPECTING_COVERT_COMPONENTS - time.monotonic()
assert remtime > 0, "timings set up incorrectly"
time.sleep(remtime)
component_master_list = list(covert_server.end_components().items())
self.print_error(f"ending covert component acceptance. {len(component_master_list)} received.")
# Sort the components & contribs list, then separate it out.
component_master_list.sort(key=lambda x:x[1][0])
all_components = [comp for comp, (sort_key, contrib) in component_master_list]
component_contribs = [contrib for comp, (sort_key, contrib) in component_master_list]
del component_master_list
# Do some preliminary checks to see whether we should just skip the
# signing phase and go directly to blame, or maybe even restart / end
# without sharing components.
skip_signatures = False
if len(all_components) != len(self.clients)*Params.num_components:
skip_signatures = True
self.print_error("problem detected: too few components submitted")
if total_excess_fees != sum(component_contribs):
skip_signatures = True
self.print_error("problem detected: excess fee mismatch")
self.last_hash = session_hash = calc_round_hash(self.last_hash, round_pubkey, round_time, all_commitments, all_components)
#TODO : Check the inputs and outputs to see if we even have reasonable
# privacy with what we have.
bad_components = set()
###
if skip_signatures:
self.print_error("skipping covert signature acceptance")
self.sendall(pb.ShareCovertComponents(components = all_components, skip_signatures = True))
else:
self.print_error("starting covert signature acceptance")
tx, input_indices = tx_from_components(all_components, session_hash)
sighashes = [sha256(sha256(bytes.fromhex(tx.serialize_preimage(i, 0x41, use_cache = True))))
for i in range(len(tx.inputs()))]
pubkeys = [bytes.fromhex(inp['pubkeys'][0]) for inp in tx.inputs()]
covert_server.start_signatures(sighashes,pubkeys)
self.sendall(pb.ShareCovertComponents(components = all_components, session_hash = session_hash))
# Sleep until end of covert signatures phase
remtime = covert_T0 + Protocol.TS_EXPECTING_COVERT_SIGNATURES - time.monotonic()
if remtime < 0:
# really shouldn't happen, we had plenty of time
raise FusionError("way too slow")
time.sleep(remtime)
signatures = list(covert_server.end_signatures())
missing_sigs = len([s for s in signatures if s is None])
###
self.print_error(f"ending covert signature acceptance. {missing_sigs} missing :{'(' if missing_sigs else ')'}")
# mark all missing-signature components as bad.
bad_inputs = set(i for i,sig in enumerate(signatures) if sig is None)
# further, search for duplicated inputs (through matching the prevout and claimed pubkey).
prevout_spenders = defaultdict(list)
for i, inp in enumerate(tx.inputs()):
prevout_spenders[f"{inp['prevout_hash']}:{inp['prevout_n']} {inp['pubkeys'][0]}"].append(i)
for prevout, spenders in prevout_spenders.items():
if len(spenders) == 1:
continue
self.print_error(f"multi-spend of f{prevout} detected")
# If exactly one of the inputs is signed, we don't punish him
# because he's the honest guy and all the other components were
# just imposters who didn't have private key. If more than one
# signed, then it's malicious behaviour!
if sum((signatures[i] is not None) for i in spenders) != 1:
bad_inputs.update(spenders)
if bad_inputs:
bad_components.update(input_indices[i] for i in bad_inputs)
else:
for i, (inp, sig) in enumerate(zip(tx.inputs(), signatures)):
inp['signatures'][0] = sig.hex() + '41'
assert tx.is_complete()
txid = tx.txid()
self.print_error("completed the transaction! " + txid)
try:
self.network.broadcast_transaction2(tx, timeout=3)
except ServerError as e:
nice_msg, = e.args
server_msg = e.server_msg
self.print_error(f"could not broadcast the transaction! {nice_msg}")
except TimeoutException:
self.print_error("timed out while trying to broadcast transaction! misconfigured?")
# This probably indicates misconfiguration since fusion server ought
# to have a good connection to the EC server. Report this back to clients
# as an 'internal server error'.
raise
else:
self.print_error("broadcast was successful!")
# Give our transaction a small head start in relaying, before sharing the
# signatures. This makes it slightly harder for one of the players to
# broadcast a malleated version by re-signing one of their inputs.
time.sleep(2)
self.sendall(pb.FusionResult(ok = True, txsignatures = signatures))
return True
self.sendall(pb.FusionResult(ok = False, bad_components = sorted(bad_components)))
###
self.print_error(f"entering blame phase. bad components: {bad_components}")
if len(self.clients) < 2:
# Sanity check for testing -- the proof sharing thing doesn't even make sense with one player.
for c in self.clients:
c.kill('blame yourself!')
return
# scan the commitment list and note where each client's commitments ended up
client_commit_indexes = [[None]*Params.num_components for _ in self.clients]
for i, (commit, ci, cj) in enumerate(commitment_master_list):
client_commit_indexes[ci][cj] = i
collector = ResultsCollector(len(self.clients), done_on_fail = False)
def client_get_proofs(client, collector):
with collector:
msg = client.recv('myproofslist')
seed = msg.random_number
if sha256(seed) != client.random_number_commitment:
client.error("seed did not match commitment")
proofs = msg.encrypted_proofs
if len(proofs) != Params.num_components:
client.error("wrong number of proofs")
if any(len(p) > 200 for p in proofs):
client.error("too-long proof") # they should only be 129 bytes long.
# generate the possible destinations list (all commitments, but leaving out the originating client's commitments).
myindex = self.clients.index(client)
possible_commitment_destinations = [(ci,cj) for commit, ci, cj in commitment_master_list if ci != myindex]
N = len(possible_commitment_destinations)
assert N == len(all_commitments) - Params.num_components
# calculate the randomly chosen destinations, same way as client did.
relays = []
for i, proof in enumerate(proofs):
dest_client_idx, dest_key_idx = possible_commitment_destinations[rand_position(seed, N, i)]
src_commitment_idx = client_commit_indexes[myindex][i]
relays.append((proof, src_commitment_idx, dest_client_idx, dest_key_idx))
if not collector.add((client, relays)):
client.error("late proofs")
for client in self.clients:
client.addjob(client_get_proofs, collector)
results = collector.gather(deadline = time.monotonic() + Protocol.STANDARD_TIMEOUT)
# Now, repackage the proofs according to destination.
proofs_to_relay = [list() for _ in self.clients]
for src_client, relays in results:
for proof, src_commitment_idx, dest_client_idx, dest_key_idx in relays:
proofs_to_relay[dest_client_idx].append((proof, src_commitment_idx, dest_key_idx, src_client))
live_clients = len(results)
collector = ResultsCollector(live_clients, done_on_fail = False)
def client_get_blames(client, myindex, proofs, collector):
with collector:
# an in-place sort by source commitment idx removes ordering correlations about which client sent which proof
proofs.sort(key = lambda x:x[1])
client.send(pb.TheirProofsList(proofs = [
dict(encrypted_proof=x, src_commitment_idx=y, dst_key_idx=z)
for x,y,z, _ in proofs]))
msg = client.recv('blames', timeout = Protocol.STANDARD_TIMEOUT + Protocol.BLAME_VERIFY_TIME)
# More than one blame per proof is malicious. Boot client
# immediately since client may be trying to DoS us by
# making us check many inputs against blockchain.
if len(msg.blames) > len(proofs):
client.error('too many blames')
if len(set(blame.which_proof for blame in msg.blames)) != len(msg.blames):
client.error('multiple blames point to same proof')
# Note, the rest of this function might run for a while if many
# checks against blockchain need to be done, perhaps even still
# running after run_round has exited. For this reason we try to
# not reference self.<variables> that may change.
for blame in msg.blames:
try:
encproof, src_commitment_idx, dest_key_idx, src_client = proofs[blame.which_proof]
except IndexError:
client.kill(f'bad proof index {blame.which_proof} / {len(proofs)}')
continue
src_commit_blob, src_commit_client_idx, _ = commitment_master_list[src_commitment_idx]
dest_commit_blob = all_commitments[client_commit_indexes[myindex][dest_key_idx]]
try:
ret = validate_blame(blame, encproof, src_commit_blob, dest_commit_blob, all_components, bad_components, Params.component_feerate)
except ValidationError as e:
self.print_error("got bad blame; clamed reason was: "+repr(blame.blame_reason))
client.kill(f'bad blame message: {e} (you claimed: {blame.blame_reason!r})')
continue
if isinstance(ret, str):
self.print_error(f"verified a bad proof (for {src_commitment_idx}): {ret}")
src_client.kill(f'bad proof (for {src_commitment_idx}): {ret}')
continue
if src_client.dead:
# If the blamed client is already dead, don't waste more time.
# Since nothing after this point can report back to the
# verifier, there is no privacy leak by the ommission.
continue
assert ret, 'expecting input component'
outpoint = ret.prev_txid[::-1].hex() + ':' + str(ret.prev_index)
try:
check_input_electrumx(self.network, ret)
except ValidationError as e:
reason = f'{e.args[0]} ({outpoint})'
self.print_error(f"blaming[{src_commitment_idx}] for bad input: {reason}")
src_client.kill('you provided a bad input: ' + reason)
continue
except Exception as e:
self.print_error(f"player indicated bad input but checking failed with exception {repr(e)} ({outpoint})")
else:
self.print_error(f"player indicated bad input but it was fine ({outpoint})")
# At this point we could blame the originator, however
# blockchain checks are somewhat subjective. It would be
# appropriate to add some 'ban score' to the player.
# we aren't collecting any results, rather just marking that
# 'checking finished' so that if all blames are checked, we
# can start next round right away.
collector.add(None)
for idx, (client, proofs) in enumerate(zip(self.clients, proofs_to_relay)):
client.addjob(client_get_blames, idx, proofs, collector)
_ = collector.gather(deadline = time.monotonic() + Protocol.STANDARD_TIMEOUT + Protocol.BLAME_VERIFY_TIME * 2)
self.sendall(pb.RestartRound())
class CovertClientThread(ClientHandlerThread):
def recv(self, *expected_msg_names, timeout=None):
submsg, mtype = recv_pb(self.connection, pb.CovertMessage, *expected_msg_names, timeout=timeout)
return submsg, mtype
def send(self, submsg, timeout=None):
send_pb(self.connection, pb.CovertResponse, submsg, timeout=timeout)
def send_ok(self,):
self.send(pb.OK(), timeout=5)
def send_error(self, msg):
self.send(pb.Error(message = msg), timeout=5)
def error(self, msg):
self.send_error(msg)
raise FusionError(f'Rejected client: {msg}')
class CovertServer(GenericServer):
"""
Server for covert submissions. How it works:
- Launch the server at any time. By default, will bind to an ephemeral port.
- Before start of covert components phase, call start_components.
- To signal the end of covert components phase, owner calls end_components, which returns a dict of {component: contrib}, where contrib is (+- amount - fee).
- Before start of covert signatures phase, owner calls start_signatures.
- To signal the end of covert signatures phase, owner calls end_signatures, which returns a list of signatures (which will have None at positions of missing signatures).
- To reset the server for a new round, call .reset(); to kill all connections, call .stop().
"""
def __init__(self, bindhost, port=0, upnp = None):
super().__init__(bindhost, port, CovertClientThread, upnp = upnp)
self.round_pubkey = None
def start_components(self, round_pubkey, feerate):
self.components = dict()
self.feerate = feerate
self.round_pubkey = round_pubkey
for c in self.spawned_clients:
c.got_submit = False
def end_components(self):
with self.lock:
ret = self.components
del self.components
return ret
def start_signatures(self, sighashes, pubkeys):
num_inputs = len(sighashes)
assert num_inputs == len(pubkeys)
self.signatures = [None]*num_inputs
self.sighashes = sighashes
self.pubkeys = pubkeys
for c in self.spawned_clients:
c.got_submit = False
def end_signatures(self):
with self.lock:
ret = self.signatures
del self.signatures
return ret
def reset(self):
try:
del self.round_pubkey
del self.components
del self.feerate
except AttributeError:
pass
try:
del self.sighashes
del self.pubkeys
except AttributeError:
pass
def new_client_job(self, client):
client.got_submit = False
while True:
msg, mtype = client.recv('component', 'signature', 'ping', timeout = COVERT_CLIENT_TIMEOUT)
if mtype == 'ping':
continue
if client.got_submit:
# We got a second submission before a new phase started. As
# an anti-spam measure we only allow one submission per connection
# per phase.
client.error('multiple submission in same phase')
if mtype == 'component':
try:
round_pubkey = self.round_pubkey
feerate = self.feerate
_ = self.components
except AttributeError:
client.error('component submitted at wrong time')
sort_key, contrib = check_covert_component(msg, round_pubkey, feerate)
with self.lock:
try:
self.components[msg.component] = (sort_key, contrib)
except AttributeError:
client.error('component submitted at wrong time')
else:
assert mtype == 'signature'
try:
sighash = self.sighashes[msg.which_input]
pubkey = self.pubkeys[msg.which_input]
existing_sig = self.signatures[msg.which_input]
except AttributeError:
client.error('signature submitted at wrong time')
except IndexError:
raise ValidationError('which_input too high')
sig = msg.txsignature
if len(sig) != 64:
raise ValidationError('signature length is wrong')
# It might be we already have this signature. This is fine
# since it might be a resubmission after ack failed delivery,
# but we don't allow it to consume our CPU power.
if sig != existing_sig:
if not schnorr.verify(pubkey, sig, sighash):
raise ValidationError('bad transaction signature')
if existing_sig:
# We received a distinct valid signature. This is not
# allowed and we break the connection as a result.
# Note that we could have aborted earlier but this
# way third parties can't abuse us to find out the
# timing of a given input's signature submission.
raise ValidationError('conflicting valid signature')
with self.lock:
try:
self.signatures[msg.which_input] = sig
except AttributeError:
client.error('signature submitted at wrong time')
client.send_ok()
client.got_submit = True
| 45.310448 | 174 | 0.599798 | 42,368 | 0.930347 | 0 | 0 | 56 | 0.00123 | 0 | 0 | 13,317 | 0.292424 |
e014451ff2d26b3e408bb00a4f1a954adc75daa5 | 2,229 | py | Python | Excercici4Package/ex4.py | jtorrenth/CienciaDades | 81f005ed1ddcc218dcde8c5e2f1a297444389a82 | [
"MIT"
] | null | null | null | Excercici4Package/ex4.py | jtorrenth/CienciaDades | 81f005ed1ddcc218dcde8c5e2f1a297444389a82 | [
"MIT"
] | null | null | null | Excercici4Package/ex4.py | jtorrenth/CienciaDades | 81f005ed1ddcc218dcde8c5e2f1a297444389a82 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
def countvalues(dataframe, subject):
# Filtrem i tractem el dataset
economydf = filtrar(dataframe, "economy")
# el printem
printar(economydf, subject)
# Filtrem ara per subject infected i ho desem en un altre df
infectedf = filtrar(dataframe, "infected")
# Calculem els percentatjes
percentvery = (infectedf['ppl_very'].sum()/infectedf['sample_size'].sum())*100
percentnotatall = (infectedf['ppl_not_at_all'].sum() / infectedf['sample_size'].sum()) * 100
# Els printem
print("percentatge very: {}%".format(percentvery))
print("percentatge not_at_all: {}%".format(percentnotatall))
grafic4('People_Very', 'People_Not_At_All', percentvery, percentnotatall, " % Persones", "Satisfacció", "% de persones preocupades o no per infected")
def printar(df, subject):
# Printem a la consola els valors
print("Valors per subject {}".format(subject))
pplvery = df['ppl_very'].sum()
pplnot = df['ppl_not_at_all'].sum()
print("Very: {}".format(pplvery))
print("Not at All: {}".format(pplnot))
# Finalment, grafiquem
# Cal tancar el grafic per a seguir amb l'execució
grafic4('People_Very', 'People_Not_At_All', pplvery, pplnot, "Persones", "Satisfacció", "Nombre de persones preocupades o no per l'economia")
def filtrar(dataframe, subject1):
df = dataframe[dataframe['subject'].str.contains(subject1, case=False)].copy()
# Afegim els valors en funció del samplesize a dues noves columnes
df['ppl_very'] = df['very'] / 100 * df['sample_size']
df['ppl_not_at_all'] = df['not_at_all'] / 100 * df['sample_size']
return df
def grafic4(label1, label2, valor1, valor2, leyenday, leyendax, titulo):
# Declaramos valors per l'eix x
eje_x = [label1, label2]
# Declaramos valors per l'eix y
eje_y = [valor1, valor2]
# Fem la grafica
plt.bar(eje_x, eje_y)
# Llegenda de l'eix x
plt.ylabel(leyenday)
# Legenda en el eje x
plt.xlabel(leyendax)
# Títol de Grafica
plt.title(titulo)
# Mostrem Grafica
plt.show()
#Funcio per a l'excercici 4.4
def grades(df):
df['538 Grade']=df['538 Grade'].str[0]
print(df.groupby('538 Grade').size())
| 31.842857 | 154 | 0.674742 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,022 | 0.457475 |
e0161b99cffb06588c8cd2a39e9f07abf59540ea | 18,987 | bzl | Python | build/rules.bzl | filmil/bazel-ebook | 433f1e157c6c1b7867abf72bc0e882c07477d60d | [
"Apache-2.0"
] | 9 | 2020-05-31T10:24:57.000Z | 2021-12-21T10:07:51.000Z | build/rules.bzl | filmil/bazel-ebook | 433f1e157c6c1b7867abf72bc0e882c07477d60d | [
"Apache-2.0"
] | 2 | 2021-11-09T23:25:01.000Z | 2021-11-10T08:42:22.000Z | build/rules.bzl | filmil/bazel-ebook | 433f1e157c6c1b7867abf72bc0e882c07477d60d | [
"Apache-2.0"
] | 2 | 2020-06-03T13:21:33.000Z | 2021-12-01T20:17:46.000Z | # Copyright (C) 2020 Google Inc.
#
# This file has been licensed under Apache 2.0 license. Please see the LICENSE
# file at the root of the repository.
# Build rules for building ebooks.
# This is the container
CONTAINER = "filipfilmar/ebook-buildenv:1.1"
# Use this for quick local runs.
#CONTAINER = "ebook-buildenv:local"
EbookInfo = provider(fields=["figures", "markdowns"])
# Returns the docker_run script invocation command based on the
# script path and its reference directory.
#
# Params:
# script_path: (string) The full path to the script to invoke
# dir_reference: (string) The path to a file used for figuring out
# the reference directories (build root and repo root).
def _script_cmd(script_path, dir_reference):
return """\
{script} \
--container={container} \
--dir-reference={dir_reference}""".format(
script=script_path,
container=CONTAINER,
dir_reference=dir_reference,
)
def _drawtiming_png_impl(ctx):
cmd = "drawtiming"
docker_run = ctx.executable._script
figures = []
for target in ctx.attr.srcs:
for src in target.files.to_list():
in_file = src
out_file = ctx.actions.declare_file(in_file.basename + ".png")
figures += [out_file]
script_cmd = _script_cmd(docker_run.path, in_file.path)
ctx.actions.run_shell(
progress_message = "timing diagram to PNG with {1}: {0}".format(in_file.short_path, cmd),
inputs = [in_file],
outputs = [out_file],
tools = [docker_run],
command = """\
{script} \
{cmd} --output "{out_file}" "{in_file}"
""".format(
cmd=cmd,
out_file=out_file.path,
in_file=in_file.path,
script=script_cmd),
)
deps = []
for target in ctx.attr.deps:
ebook_provider = target[EbookInfo]
if not ebook_provider:
continue
deps += ebook_provider.figures
runfiles = ctx.runfiles(files = figures)
return [
EbookInfo(figures=figures+deps, markdowns=[]),
DefaultInfo(files=depset(figures+deps), runfiles=runfiles),
]
drawtiming_png = rule(implementation = _drawtiming_png_impl,
attrs = {
"srcs": attr.label_list(
allow_files = [".t"],
doc = "The file to compile",
),
"deps": attr.label_list(
doc = "The dependencies, any targets should be allowed",
),
"output": attr.output(doc="The generated file"),
"_script": attr.label(
default="//build:docker_run",
executable=True,
cfg="host"),
},
doc = "Transform a timing diagram file into png using drawtiming",
)
def _generalized_graphviz_rule_impl(ctx, cmd):
docker_run = ctx.executable._script
figures = []
for target in ctx.attr.srcs:
for src in target.files.to_list():
in_file = src
out_file = ctx.actions.declare_file(in_file.basename + ".png")
figures += [out_file]
script_cmd = _script_cmd(docker_run.path, in_file.path)
ctx.actions.run_shell(
progress_message = "graphviz to PNG with {1}: {0}".format(in_file.short_path, cmd),
inputs = [in_file],
outputs = [out_file],
tools = [docker_run],
command = """\
{script} \
{cmd} -Tpng -o "{out_file}" "{in_file}"
""".format(
cmd=cmd,
out_file=out_file.path,
in_file=in_file.path,
script=script_cmd),
)
deps = []
for target in ctx.attr.deps:
ebook_provider = target[EbookInfo]
if not ebook_provider:
continue
deps += ebook_provider.figures
runfiles = ctx.runfiles(files = figures)
return [
EbookInfo(figures=figures+deps, markdowns=[]),
DefaultInfo(files=depset(figures+deps), runfiles=runfiles),
]
def _neato_png_impl(ctx):
return _generalized_graphviz_rule_impl(ctx, "neato")
neato_png = rule(implementation = _neato_png_impl,
attrs = {
"srcs": attr.label_list(
allow_files = [".dot"],
doc = "The file to compile",
),
"deps": attr.label_list(
doc = "The dependencies, any targets should be allowed",
),
"output": attr.output(doc="The generated file"),
"_script": attr.label(
default="//build:docker_run",
executable=True,
cfg="host"),
},
doc = "Transform a graphviz dot file into png using neato",
)
def _dot_png_impl(ctx):
return _generalized_graphviz_rule_impl(ctx, "dot")
dot_png = rule(implementation = _dot_png_impl,
attrs = {
"srcs": attr.label_list(
allow_files = [".dot"],
doc = "The file to compile",
),
"deps": attr.label_list(
doc = "The dependencies, any targets should be allowed",
),
"output": attr.output(doc="The generated file"),
"_script": attr.label(
default="//build:docker_run",
executable=True,
cfg="host"),
},
doc = "Transform a graphviz dot file into png using dot",
)
def _asymptote_impl(ctx):
asycc = ctx.executable._script
figures = []
for target in ctx.attr.srcs:
for src in target.files.to_list():
in_file = src
out_file = ctx.actions.declare_file(in_file.basename + ".png")
figures += [out_file]
script_cmd = _script_cmd(asycc.path, in_file.path)
ctx.actions.run_shell(
progress_message = "ASY to PNG: {0}".format(in_file.short_path),
inputs = [in_file],
outputs = [out_file],
tools = [asycc],
command = """\
{script} \
asy -render 5 -f png -o "{out_file}" "{in_file}"
""".format(
out_file=out_file.path, in_file=in_file.path, script=script_cmd),
)
deps = []
for target in ctx.attr.deps:
ebook_provider = target[EbookInfo]
if not ebook_provider:
continue
deps += ebook_provider.figures
runfiles = ctx.runfiles(files=figures+deps)
for dep in ctx.attr.deps:
runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles)
return [
EbookInfo(figures=figures+deps, markdowns=[]),
DefaultInfo(files=depset(figures+deps), runfiles=runfiles),
]
asymptote = rule(implementation = _asymptote_impl,
attrs = {
"srcs": attr.label_list(
allow_files = [".asy"],
doc = "The file to compile",
),
"deps": attr.label_list(
doc = "The dependencies, any targets should be allowed",
),
"output": attr.output(doc="The generated file"),
"_script": attr.label(
default="//build:docker_run",
executable=True,
cfg="host"),
},
doc = "Transform an asymptote file into png",
)
def _copy_file_to_workdir_renamed(ctx, src):
src_copy = ctx.actions.declare_file("{}_{}".format(ctx.label.name, src.short_path))
ctx.actions.run_shell(
progress_message = "Copying {} to {}".format(src.short_path, src_copy.short_path),
outputs = [src_copy],
inputs = [src],
command="cp {} {}".format(src.path, src_copy.path),
)
return src_copy
def _copy_file_to_workdir(ctx, src):
src_copy = ctx.actions.declare_file(src.basename)
ctx.actions.run_shell(
progress_message = "Copying {}".format(src.short_path),
outputs = [src_copy],
inputs = [src],
command="cp {} {}".format(src.path, src_copy.path),
)
return src_copy
def _markdown_lib_impl(ctx):
markdowns = []
for target in ctx.attr.srcs:
for src in target.files.to_list():
markdowns += [_copy_file_to_workdir(ctx, src)]
figures = []
for target in ctx.attr.deps:
provider = target[EbookInfo]
figures += (provider.figures or [])
markdowns += (provider.markdowns or [])
runfiles = ctx.runfiles(files=figures+markdowns)
for dep in ctx.attr.deps:
runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles)
return [
EbookInfo(figures=figures, markdowns=markdowns),
DefaultInfo(
files=depset(figures+markdowns),
runfiles=runfiles,
),
]
markdown_lib = rule(
implementation = _markdown_lib_impl,
doc = "Declares a set of markdown files",
attrs = {
"srcs": attr.label_list(
allow_files = [".md"],
doc = "The markdown source files",
),
"deps": attr.label_list(
doc = "The file to compile",
providers = [EbookInfo],
),
},
)
def _ebook_epub_impl(ctx):
name = ctx.label.name
# This is duplicated in _ebook_pdf_impl.
# steps
# run htex on all *md, gives book.htex
markdowns = []
figures = []
for dep in ctx.attr.deps:
provider = dep[EbookInfo]
markdowns += provider.markdowns
figures += provider.figures
dir_reference = markdowns[0]
htex_file = ctx.actions.declare_file("{}.htex".format(name))
markdowns_paths = [file.path for file in markdowns]
markdowns_paths_stripped = _strip_reference_dir_from_files(dir_reference, markdowns)
script = ctx.executable._script
script_cmd = _script_cmd(script.path, markdowns_paths[0])
ctx.actions.run_shell(
progress_message = "Building equation environments for: {}".format(name),
inputs = markdowns,
outputs = [htex_file],
tools = [script],
command = """\
{script} \
pandoc -s --gladtex -o {target} {sources} \
""".format(
script=script_cmd,
target=htex_file.path,
sources=" ".join(markdowns_paths))
)
# run gladtex on the resulting htex to obtain html and output directory with figures.
outdir = ctx.actions.declare_directory("{}.eqn".format(name))
html_file = ctx.actions.declare_file("{}.html".format(name))
ctx.actions.run_shell(
progress_message = "Extracting equations for: {}".format(name),
inputs = [htex_file],
outputs = [outdir, html_file],
tools = [script],
command = """\
{script} --cd-to-dir-reference \
gladtex -r 200 -d {outdir} {htex_file} \
""".format(
script=script_cmd,
outdir=_strip_reference_dir(dir_reference, outdir.path),
htex_file=_strip_reference_dir(dir_reference, htex_file.path),
)
)
outdir_tar = ctx.actions.declare_file("{}.tar".format(outdir.basename))
tar_command = "(cd {base} ; tar cf {archive} {dir})".format(
base=outdir_tar.dirname,
archive=outdir_tar.basename,
dir=outdir.basename)
ctx.actions.run_shell(
progress_message = "Archiving equations: {}".format(outdir_tar.short_path),
inputs = [outdir],
outputs = [outdir_tar],
command = tar_command,
)
# run htexepub to obtain book.epub.
# This is gonna be fun!
epub_metadata = ctx.attr.metadata_xml.files.to_list()[0]
epub_metadata = _copy_file_to_workdir_renamed(ctx, epub_metadata)
title_yaml = ctx.attr.title_yaml.files.to_list()[0]
title_yaml = _copy_file_to_workdir_renamed(ctx, epub_metadata)
ebook_epub = ctx.actions.declare_file("{}.epub".format(name))
inputs = [epub_metadata, title_yaml, html_file, outdir, outdir_tar] + markdowns + figures
ctx.actions.run_shell(
progress_message = "Building EPUB for: {}".format(name),
inputs = inputs,
tools = [script],
outputs = [ebook_epub],
command = """\
{script} --cd-to-dir-reference \
pandoc --epub-metadata={epub_metadata} \
-f html -t epub3 -o {ebook_epub} {html_file} \
""".format(
script=script_cmd,
epub_metadata=_strip_reference_dir(dir_reference, epub_metadata.path),
ebook_epub=_strip_reference_dir(dir_reference, ebook_epub.path),
html_file=_strip_reference_dir(dir_reference, html_file.path),
))
runfiles = ctx.runfiles(files=[ebook_epub])
for dep in ctx.attr.deps:
runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles)
return [
dep[EbookInfo],
DefaultInfo(
files=depset([ebook_epub, outdir, outdir_tar]),
runfiles=runfiles,
)
]
ebook_epub = rule(
implementation = _ebook_epub_impl,
attrs = {
"deps": attr.label_list(
doc = "All the targets you need to make this book work.",
providers = [EbookInfo],
),
"title_yaml": attr.label(
allow_files = True,
doc = "The title.yaml file to use for this book",
),
"metadata_xml": attr.label(
allow_files = True,
doc = "The epub-metadata.xml file to use for this book",
),
"_script": attr.label(
default="//build:docker_run",
executable=True,
cfg="host"),
},
doc = "Generate an ebook in EPUB format"
)
def _strip_reference_dir(reference_dir, path):
return path.replace(reference_dir.dirname+"/", "")
def _strip_reference_dir_from_files(reference_dir, files):
return [ _strip_reference_dir(reference_dir, file.path) for file in files]
def _ebook_pdf_impl(ctx):
name = ctx.label.name
# steps
# run htex on all *md, gives book.htex
markdowns = []
figures = []
for dep in ctx.attr.deps:
provider = dep[EbookInfo]
markdowns += provider.markdowns
figures += provider.figures
dir_reference = markdowns[0]
# Fixed up paths -- relative to the directory dir_reference, not the
# directory where the build happens! This is needed because we can not control
# figure inclusion.
markdowns_paths = _strip_reference_dir_from_files(dir_reference, markdowns)
script = ctx.executable._script
script_cmd = _script_cmd(script.path, dir_reference.path)
# run htexepub to obtain book.epub.
# This is gonna be fun!
epub_metadata = ctx.attr.metadata_xml.files.to_list()[0]
epub_metadata = _copy_file_to_workdir(ctx, epub_metadata)
title_yaml = ctx.attr.title_yaml.files.to_list()[0]
title_yaml = _copy_file_to_workdir(ctx, title_yaml)
ebook_pdf = ctx.actions.declare_file("{}.pdf".format(name))
inputs = [epub_metadata, title_yaml] + markdowns + figures
ctx.actions.run_shell(
progress_message = "Building PDF for: {}".format(name),
inputs = inputs,
tools = [script],
outputs = [ebook_pdf],
command = """\
{script} --cd-to-dir-reference \
pandoc --epub-metadata={epub_metadata} \
--mathml -o {ebook_pdf} {markdowns} \
""".format(
script=script_cmd,
epub_metadata=_strip_reference_dir(dir_reference, epub_metadata.path),
ebook_pdf=_strip_reference_dir(dir_reference, ebook_pdf.path),
markdowns=" ".join(markdowns_paths),
))
runfiles = ctx.runfiles(files=[ebook_pdf])
for dep in ctx.attr.deps:
runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles)
return [
DefaultInfo(
files=depset([ebook_pdf]),
runfiles=runfiles,
)
]
ebook_pdf = rule(
implementation = _ebook_pdf_impl,
attrs = {
"deps": attr.label_list(
doc = "All the targets you need to make this book work.",
providers = [EbookInfo],
),
"title_yaml": attr.label(
allow_files = True,
doc = "The title.yaml file to use for this book",
),
"metadata_xml": attr.label(
allow_files = True,
doc = "The epub-metadata.xml file to use for this book",
),
"_script": attr.label(
default="//build:docker_run",
executable=True,
cfg="host"),
},
doc = "Generate an ebook in PDF format"
)
def _ebook_kindle_impl(ctx):
mobi_file = ctx.actions.declare_file("{}.mobi".format(ctx.label.name))
# First provider is EbookInfo, second is DefaultInfo.
(ebook_info, default_info) = _ebook_epub_impl(ctx)
# There can be only one such file
outputs = default_info.files.to_list()
epub_file = outputs[0]
equation_outdir = outputs[1]
equation_outdir_tar = outputs[2]
captured_output = ctx.actions.declare_file(
"{}.untar-out".format(ctx.label.name))
# untar the equation dir
# Maybe this is not needed.
tar_command = "(cd {base} ; tar xvf {archive}) > {output}".format(
base=equation_outdir_tar.dirname,
archive=equation_outdir_tar.basename,
output=captured_output.path)
ctx.actions.run_shell(
progress_message = "Unarchiving equations: {}".format(equation_outdir_tar.short_path),
inputs = [equation_outdir_tar],
outputs = [captured_output],
command = tar_command,
)
dir_reference = epub_file
script = ctx.executable._script
name = ctx.label.name
script_cmd = _script_cmd(script.path, epub_file.path)
ctx.actions.run_shell(
progress_message = "Building MOBI for: {}".format(name),
inputs = [epub_file, equation_outdir],
tools = [script],
outputs = [mobi_file],
command = """\
{script} --cd-to-dir-reference \
ebook-convert {epub_file} {mobi_file} \
""".format(
script=script_cmd,
epub_file=_strip_reference_dir(dir_reference, epub_file.path),
mobi_file=_strip_reference_dir(dir_reference, mobi_file.path),
))
runfiles = ctx.runfiles(files=[mobi_file])
for dep in ctx.attr.deps:
runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles)
return [
DefaultInfo(
files=depset([mobi_file, captured_output]),
runfiles=runfiles,
)
]
ebook_kindle = rule(
implementation = _ebook_kindle_impl,
attrs = {
"deps": attr.label_list(
doc = "All the targets you need to make this book work.",
providers = [EbookInfo],
),
"title_yaml": attr.label(
allow_files = True,
doc = "The title.yaml file to use for this book",
),
"metadata_xml": attr.label(
allow_files = True,
doc = "The epub-metadata.xml file to use for this book",
),
"_script": attr.label(
default="//build:docker_run",
executable=True,
cfg="host"),
},
doc = "Generate an ebook in the Kindle's MOBI format"
)
| 32.126904 | 103 | 0.599357 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,564 | 0.240375 |
e0164a1f4fee849a8bca46fb970244ecbfd603fe | 715 | py | Python | 1094 EXPERIENCIAS.py | castrolimoeiro/Uri-exercise | 7a9227c55a79f14fe8bde4aa0ebb4c268bbda4bb | [
"MIT"
] | null | null | null | 1094 EXPERIENCIAS.py | castrolimoeiro/Uri-exercise | 7a9227c55a79f14fe8bde4aa0ebb4c268bbda4bb | [
"MIT"
] | null | null | null | 1094 EXPERIENCIAS.py | castrolimoeiro/Uri-exercise | 7a9227c55a79f14fe8bde4aa0ebb4c268bbda4bb | [
"MIT"
] | null | null | null | n = int(input())
coelho = rato = sapo = contador = 0
for i in range(0, n):
q, t = input().split(' ')
t = t.upper()
q = int(q)
if 1 <= q <= 15:
contador += q
if t == 'C':
coelho += q
elif t == 'R':
rato += q
elif t == 'S':
sapo += q
porccoelho = (coelho * 100) / contador
porcrato = (rato * 100) / contador
porcsapo = (sapo * 100) / contador
print(f'Total: {contador} cobaias')
print(f'Total de coelhos: {coelho}')
print(f'Total de ratos: {rato}')
print(f'Total de sapos: {sapo}')
print(f'Percentual de coelhos: {porccoelho:.2f} %')
print(f'Percentual de ratos: {porcrato:.2f} %')
print(f'Percentual de sapos: {porcsapo:.2f} %')
| 25.535714 | 51 | 0.544056 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 243 | 0.33986 |
e016b94fa3454d62f7b448ca14631899dd78dc4c | 299 | py | Python | gravur/common/amountinput.py | F483/gravur | 575c268d9ac28aa0ba00f1e5109bd74c3b7b69a5 | [
"MIT"
] | 3 | 2015-07-20T17:56:21.000Z | 2017-10-22T05:52:13.000Z | gravur/common/amountinput.py | F483/gravur | 575c268d9ac28aa0ba00f1e5109bd74c3b7b69a5 | [
"MIT"
] | null | null | null | gravur/common/amountinput.py | F483/gravur | 575c268d9ac28aa0ba00f1e5109bd74c3b7b69a5 | [
"MIT"
] | null | null | null | # coding: utf-8
# Copyright (c) 2015 Fabian Barkhau <[email protected]>
# License: MIT (see LICENSE file)
from kivy.uix.boxlayout import BoxLayout
from gravur.common.labelbox import LabelBox # NOQA
from gravur.utils import load_widget
@load_widget
class AmountInput(BoxLayout):
pass
| 21.357143 | 62 | 0.772575 | 38 | 0.12709 | 0 | 0 | 51 | 0.170569 | 0 | 0 | 116 | 0.38796 |
e018a8edf8d16988caad3f9660a381b73b1f97c4 | 17,156 | py | Python | tibanna/top.py | 4dn-dcic/tibanna | bb84597c425a481a230be30cb0ed9b99c774e53d | [
"MIT"
] | 62 | 2017-02-16T02:16:22.000Z | 2022-02-07T08:26:12.000Z | tibanna/top.py | 4dn-dcic/tibanna | bb84597c425a481a230be30cb0ed9b99c774e53d | [
"MIT"
] | 77 | 2017-10-26T20:17:35.000Z | 2022-03-25T22:56:32.000Z | tibanna/top.py | 4dn-dcic/tibanna | bb84597c425a481a230be30cb0ed9b99c774e53d | [
"MIT"
] | 19 | 2017-01-27T16:37:37.000Z | 2021-12-12T13:52:01.000Z | import datetime
class Top(object):
"""class TopSeries stores the information of a series of top commands
::
echo -n 'Timestamp: '; date +%F-%H:%M:%S
top -b -n1 [-i] [-c]
over short intervals to monitor the same set of processes over time.
An example input content looks like below, or a series of these.
The initialization works at any time interval and can be used as a generic
class, but the class is designed for the output of a regular top commands above
run at about 1-minute intervals, which is performed by awsf3 on an AWSEM instance
through cron jobs. (some can be skipped but there should be no more than 1 per minute).
This top output can be obtained through ``tibanna log -j <job_id> -t`` or through
API ``API().log(job_id=<job_id>, top=True)``.
::
Timestamp: 2020-12-18-18:55:37
top - 18:55:37 up 4 days, 2:37, 0 users, load average: 5.59, 5.28, 5.76
Tasks: 7 total, 1 running, 6 sleeping, 0 stopped, 0 zombie
%Cpu(s): 6.6 us, 0.1 sy, 0.0 ni, 93.2 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
KiB Mem : 12971188+total, 10379019+free, 20613644 used, 5308056 buff/cache
KiB Swap: 0 total, 0 free, 0 used. 10834606+avail Mem
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
712 root 20 0 36.464g 8.223g 19572 S 100.0 6.6 125:55.12 java -Xmx32g -Xms32g -jar juicer_tools.jar addNorm -w 1000 -d -F out.hic
17919 ubuntu 20 0 40676 3828 3144 R 6.2 0.0 0:00.01 top -b -n1 -c -i -w 10000
The default timestamp from top output does not contain dates, which can screw up multi-day processes
which is common for bioinformatics pipelines. So, an extra timestamp is added before each top command.
To parse top output content, simply create an object. This will create processes attribute,
which is a raw parsed result organized by time stamps.
::
top = Top(top_output_content)
To reorganize the contents by commands, run digest. By default, the max number of commands is 16,
and if there are more than 16 unique commands, they will be collapsed into prefixes.
::
top.digest()
To write a csv / tsv file organized by both timestamps (rows) and commands (columns),
use :func: write_to_csv.
::
top.write_to_csv(...)
"""
# assume this format for timestamp
timestamp_format = '%Y-%m-%d-%H:%M:%S'
# These commands are excluded when parsing the top output
# Currently only 1-, 2- or 3-word prefixes work.
exclude_list = ['top', 'docker', 'dockerd', '/usr/bin/dockerd', 'cron',
'docker-untar', 'containerd', 'goofys-latest', 'cwltool',
'/usr/bin/containerd-shim-runc-v2', 'goofys', 'nodejs --eval',
'/usr/bin/python3 /usr/local/bin/cwltool', 'containerd-shim',
'/usr/bin/python3 /bin/unattended-upgrade',
'/usr/bin/python3 /usr/local/bin/awsf3',
'/usr/bin/python3 /usr/local/bin/aws s3',
'java -jar /usr/local/bin/cromwell.jar',
'java -jar /usr/local/bin/cromwell-35.jar']
def __init__(self, contents):
"""initialization parsed top output content and
creates processes which is a dictionary with timestamps as keys
and a list of Process class objects as a value.
It also creates empty attributes timestamps, commands, cpus and mems
which can be filled through method :func: digest.
"""
self.processes = dict()
self.timestamps = []
self.commands = []
self.cpus = dict()
self.mems = dict()
self.parse_contents(contents)
def parse_contents(self, contents):
is_in_table = False
for line in contents.splitlines():
if line.startswith('Timestamp:'):
timestamp = line.split()[1]
continue
if line.lstrip().startswith('PID'):
is_in_table = True
continue
if not line or line.isspace():
is_in_table = False
if is_in_table:
if timestamp not in self.processes:
self.processes[timestamp] = []
process = Process(line)
if not self.should_skip_process(process):
self.processes[timestamp].append(Process(line))
def digest(self, max_n_commands=16, sort_by='alphabetical'):
"""Fills in timestamps, commands, cpus and mems attributes
from processes attribute.
:param max_n_commands: When the number of unique commands exceeds
this value, they are collapsed into unique prefixes.
:sort_by: alphabetical|cpu|mem The commands are by default sorted
alphabetically, but optionally can be sorted by total cpus or total
mem (in reverser order) (e.g. the first command consumed the most cpu)
"""
# Reinitializat these so that you get the same results if you run it twice
self.timestamps = []
self.commands = []
self.cpus = dict()
self.mems = dict()
# First fill in commands from commands in processes (and collapse if needed.)
self.commands = self.get_collapsed_commands(max_n_commands)
# Fill in timestamps, cpus and mems from processes, matching collapsed commands.
self.nTimepoints = len(self.processes)
timestamp_ind = 0
for timestamp in sorted(self.processes):
# sorted timestamps (columns)
self.timestamps.append(timestamp)
# commands (rows)
for process in self.processes[timestamp]:
# find a matching collapsed command (i.e. command prefix) and use that as command.
command = Top.convert_command_to_collapsed_command(process.command, self.commands)
if command not in self.cpus:
self.cpus[command] = [0] * self.nTimepoints
self.mems[command] = [0] * self.nTimepoints
self.cpus[command][timestamp_ind] += process.cpu
self.mems[command][timestamp_ind] += process.mem
timestamp_ind += 1
# sort commands according to total cpu
self.sort_commands(by=sort_by)
def get_collapsed_commands(self, max_n_commands):
"""If the number of commands exceeds max_n_commands,
return a collapsed set of commands
that consists of prefixes of commands so that
the total number is within max_n_commands.
First decide the number of words from the beginning of the commands
to collapse commands that start with the same words, i.e.
find the maximum number of words that makes the number of unique commands to be
bounded by max_n_commands.
If using only the first word is not sufficient, go down to the characters of
the first word. If that's still not sufficient, collapse all of them into a single
command ('all_commands')
After the collapse, commands that are unique to a collapsed prefix are
extended back to the original command.
"""
all_commands = set()
for timestamp in self.processes:
all_commands.update(set([pr.command for pr in self.processes[timestamp]]))
if len(all_commands) <= max_n_commands:
# no need to collapse
return list(all_commands)
# decide the number of words from the beginning of the commands
# to collapse commands starting with the same words
all_cmd_lengths = [len(cmd.split()) for cmd in all_commands] # number of words per command
max_cmd_length = max(all_cmd_lengths)
min_cmd_length = min(all_cmd_lengths)
collapsed_len = max_cmd_length - 1
n_commands = len(all_commands)
while(n_commands > max_n_commands and collapsed_len > 1):
reduced_commands = set()
for cmd in all_commands:
reduced_commands.add(Top.first_words(cmd, collapsed_len))
n_commands = len(reduced_commands)
collapsed_len -= 1
# went down to the first words but still too many commands - start splitting characters then
if n_commands > max_n_commands:
all_cmd_lengths = [len(cmd.split()[0]) for cmd in all_commands] # number of characters of the first word
max_cmd_length = max(all_cmd_lengths)
min_cmd_length = min(all_cmd_lengths)
collapsed_len = max_cmd_length - 1
while(n_commands > max_n_commands and collapsed_len > 1):
reduced_commands = set()
for cmd in all_commands:
reduced_commands.add(Top.first_characters(cmd.split()[0], collapsed_len))
n_commands = len(reduced_commands)
collapsed_len -= 1
if n_commands > max_n_commands:
return ['all_commands']
else:
# extend reduced commands that don't need to be reduced
for r_cmd in list(reduced_commands): # wrap in list so that we can remove elements in the loop
uniq_cmds = [cmd for cmd in all_commands if cmd.startswith(r_cmd)]
if len(uniq_cmds) == 1:
reduced_commands.remove(r_cmd)
reduced_commands.add(uniq_cmds[0])
return reduced_commands
def write_to_csv(self, csv_file, metric='cpu', delimiter=',', colname_for_timestamps='timepoints',
timestamp_start=None, timestamp_end=None, base=0):
"""write metrics as csv file with commands as columns
:param metric: 'cpu' or 'mem'
:param delimiter: default ','
:param colname_for_timestamps: colunm name for the timepoint column (1st column). default 'timepoints'
:param timestamp_start: start time in the same timestamp format (e.g. 01:23:45),
time stamps will be converted to minutes since start time.
The report starts with minute 0.
Time points with no top records will be filled with 0.
If not specified, the first timestamp in the top commands will be used.
:param timestamp_end: end time in the same timestamp format (e.g. 01:23:45),
The reports will be generated only up to the end time.
Time points with no top records will be filled with 0.
If not specified, the last timestamp in the top commands will be used.
:param base: default 0. If 0, minutes start with 0, if 1, minutes are 1-based (shifted by 1).
"""
metric_array = getattr(self, metric + 's')
if self.timestamps:
if not timestamp_start:
timestamp_start = self.timestamps[0]
if not timestamp_end:
timestamp_end = self.timestamps[-1]
timestamps_as_minutes = self.timestamps_as_minutes(timestamp_start)
last_minute = self.as_minutes(timestamp_end, timestamp_start)
else: # default when timestamps is not available (empty object)
timestamps_as_minutes = range(0, 5)
last_minute = 5
with open(csv_file, 'w') as fo:
# header
# we have to escape any double quotes that are present in the cmd, before wrapping it in double quotes. Otherwise we
# will get incorrect column counts when creating the metrics report.
fo.write(delimiter.join([colname_for_timestamps] + [Top.wrap_in_double_quotes(cmd.replace('"', '""')) for cmd in self.commands]))
fo.write('\n')
# contents
# skip timepoints earlier than timestamp_start
for i in range(0, len(timestamps_as_minutes)):
if timestamps_as_minutes[i] >= 0:
break
for clock in range(0, last_minute + 1):
clock_shifted = clock + base
if i < len(timestamps_as_minutes) and timestamps_as_minutes[i] == clock:
fo.write(delimiter.join([str(clock_shifted)] + [str(metric_array[cmd][i]) for cmd in self.commands]))
i += 1
else:
fo.write(delimiter.join([str(clock_shifted)] + ['0' for cmd in self.commands])) # add 0 for timepoints not reported
fo.write('\n')
def should_skip_process(self, process):
"""A predicate function to check if the process should be skipped (excluded).
It returns True if the input process should be skipped.
e.g. the top command itself is excluded, as well as docker, awsf3, cwltool, etc.
the list to be excluded is in self.exclude_list.
It compares either first word or first two or three words only.
Kernel threads (single-word commands wrapped in bracket (e.g. [perl]) are also excluded.
"""
first_word = Top.first_words(process.command, 1)
first_two_words = Top.first_words(process.command, 2)
first_three_words = Top.first_words(process.command, 3)
if first_word in self.exclude_list:
return True
elif first_two_words in self.exclude_list:
return True
elif first_three_words in self.exclude_list:
return True
if first_word.startswith('[') and first_word.endswith(']'):
return True
return False
@staticmethod
def convert_command_to_collapsed_command(cmd, collapsed_commands):
if collapsed_commands == 'all_commands': # collapsed to one command
return 'all_commands'
elif cmd in collapsed_commands: # not collapsed
return cmd
else: # collapsed to prefix
all_prefixes = [_ for _ in collapsed_commands if cmd.startswith(_)]
longest_prefix = sorted(all_prefixes, key=lambda x: len(x), reverse=True)[0]
return longest_prefix
def total_cpu_per_command(self, command):
return sum([v for v in self.cpus[command]])
def total_mem_per_command(self, command):
return sum([v for v in self.mems[command]])
def sort_commands(self, by='cpu'):
"""sort self.commands by total cpu (default) or mem in reverse order,
or alphabetically (by='alphabetical')"""
if by == 'cpu':
self.commands = sorted(self.commands, key=lambda x: self.total_cpu_per_command(x), reverse=True)
elif by == 'mem':
self.commands = sorted(self.commands, key=lambda x: self.total_mem_per_command(x), reverse=True)
elif by == 'alphabetical':
self.commands = sorted(self.commands)
@classmethod
def as_minutes(cls, timestamp, timestamp_start):
"""timestamp as minutes since timestamp_start.
:param timestamp: given timestamp in the same format (e.g. 01:23:45)
:param timestamp_start: start timestamp in the same format (e.g. 01:20:45)
In the above example, 3 will be the return value.
"""
dt = cls.as_datetime(timestamp)
dt_start = cls.as_datetime(timestamp_start)
# negative numbers are not supported by timedelta, so do each case separately
if dt > dt_start:
return round((dt - dt_start).seconds / 60)
else:
return -round((dt_start - dt).seconds / 60)
def timestamps_as_minutes(self, timestamp_start):
"""convert self.timestamps to a list of minutes since timestamp_start
:param timestamp_start: timestamp in the same format (e.g. 01:23:45)
"""
return [self.as_minutes(t, timestamp_start) for t in self.timestamps]
@classmethod
def as_datetime(cls, timestamp):
return datetime.datetime.strptime(timestamp, cls.timestamp_format)
@staticmethod
def wrap_in_double_quotes(string):
"""wrap a given string with double quotes (e.g. haha -> "haha")
"""
return '\"' + string + '\"'
@staticmethod
def first_words(string, n_words):
"""returns first n words of a string
e.g. first_words('abc def ghi', 2) ==> 'abc def'
"""
words = string.split()
return ' '.join(words[0:min(n_words, len(words))])
@staticmethod
def first_characters(string, n_letters):
"""returns first n letters of a string
e.g. first_characters('abc def ghi', 2) ==> 'ab'
"""
letters = list(string)
return ''.join(letters[0:min(n_letters, len(letters))])
def as_dict(self):
return self.__dict__
class Process(object):
def __init__(self, top_line):
prinfo_as_list = top_line.lstrip().split()
self.pid = prinfo_as_list[0]
self.user = prinfo_as_list[1]
self.cpu = float(prinfo_as_list[8])
self.mem = float(prinfo_as_list[9])
self.command = ' '.join(prinfo_as_list[11:])
def as_dict(self):
return self.__dict__
| 47.392265 | 148 | 0.6199 | 17,134 | 0.998718 | 0 | 0 | 2,015 | 0.117452 | 0 | 0 | 8,467 | 0.49353 |
e01a18c1d0d2ecbc1fcb6159c9f9c87becb0c6cc | 1,458 | py | Python | venv/Lib/site-packages/zmq/tests/test_draft.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 603 | 2020-12-23T13:49:32.000Z | 2022-03-31T23:38:03.000Z | venv/Lib/site-packages/zmq/tests/test_draft.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 387 | 2020-12-15T14:54:04.000Z | 2022-03-31T07:00:21.000Z | venv/Lib/site-packages/zmq/tests/test_draft.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 35 | 2021-03-26T03:12:04.000Z | 2022-03-23T10:15:10.000Z | # -*- coding: utf8 -*-
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
import os
import platform
import time
import pytest
import zmq
from zmq.tests import BaseZMQTestCase, skip_pypy
class TestDraftSockets(BaseZMQTestCase):
def setUp(self):
if not zmq.DRAFT_API:
raise pytest.skip("draft api unavailable")
super(TestDraftSockets, self).setUp()
def test_client_server(self):
client, server = self.create_bound_pair(zmq.CLIENT, zmq.SERVER)
client.send(b'request')
msg = self.recv(server, copy=False)
assert msg.routing_id is not None
server.send(b'reply', routing_id=msg.routing_id)
reply = self.recv(client)
assert reply == b'reply'
def test_radio_dish(self):
dish, radio = self.create_bound_pair(zmq.DISH, zmq.RADIO)
dish.rcvtimeo = 250
group = 'mygroup'
dish.join(group)
received_count = 0
received = set()
sent = set()
for i in range(10):
msg = str(i).encode('ascii')
sent.add(msg)
radio.send(msg, group=group)
try:
recvd = dish.recv()
except zmq.Again:
time.sleep(0.1)
else:
received.add(recvd)
received_count += 1
# assert that we got *something*
assert len(received.intersection(sent)) >= 5
| 29.16 | 71 | 0.593964 | 1,226 | 0.840878 | 0 | 0 | 0 | 0 | 0 | 0 | 209 | 0.143347 |
e01a5f16e11613ae6cff496ae606faff7b1d0e27 | 460 | py | Python | home/push/mipush/APIError.py | he0119/smart-home | bdd3a59a8c46c0fdc07ac3049bf589c7f95a2683 | [
"MIT"
] | null | null | null | home/push/mipush/APIError.py | he0119/smart-home | bdd3a59a8c46c0fdc07ac3049bf589c7f95a2683 | [
"MIT"
] | 223 | 2020-02-21T06:16:56.000Z | 2022-03-01T22:24:19.000Z | home/push/mipush/APIError.py | he0119/smart-home | bdd3a59a8c46c0fdc07ac3049bf589c7f95a2683 | [
"MIT"
] | null | null | null | class APIError(Exception):
"""
raise APIError if receiving json message indicating failure.
"""
def __init__(self, error_code, error, request):
self.error_code = error_code
self.error = error
self.request = request
Exception.__init__(self, error)
def __str__(self):
return "APIError: %s: %s, request: %s" % (
self.error_code,
self.error,
self.request,
)
| 25.555556 | 64 | 0.576087 | 459 | 0.997826 | 0 | 0 | 0 | 0 | 0 | 0 | 107 | 0.232609 |
e01cbf8a1a1ab981a1d993596c3a332451dcd74d | 367 | py | Python | pythonlibs/mantis/templates/webapp/src/webapp/base.py | adoggie/Tibet.6 | 3c53060edafd80b9c4dafa10699a68d86a410c66 | [
"MIT"
] | 22 | 2019-10-28T07:28:12.000Z | 2022-03-19T15:36:41.000Z | AliceBackend/src/AliceBackend/base.py | adoggie/Tibet.6 | 3c53060edafd80b9c4dafa10699a68d86a410c66 | [
"MIT"
] | 1 | 2019-11-07T04:54:14.000Z | 2019-11-07T07:12:48.000Z | AliceBackend/src/AliceBackend/base.py | adoggie/Tibet.6 | 3c53060edafd80b9c4dafa10699a68d86a410c66 | [
"MIT"
] | 13 | 2019-10-28T07:29:07.000Z | 2021-11-03T06:53:12.000Z | #coding:utf-8
class SystemDeviceType(object):
InnerBox = 1 # 主屏分离的室内主机
InnerScreen = 2 # 主屏分离的室内屏
OuterBox = 3 # 室外机
PropCallApp = 4 # 物业值守
PropSentryApp = 5 # 物业岗亭机
Others = 10
ValidatedList = (1,2,3,4,5)
class Constants(object):
SUPER_ACCESS_TOKEN = 'YTU3NzVlYjktYjQwMi00MGY2LTkxZjktYWMxYjIxZjM4NjNlCg ==' | 24.466667 | 80 | 0.643052 | 407 | 0.957647 | 0 | 0 | 0 | 0 | 0 | 0 | 165 | 0.388235 |
e01cd6185b052b2c9153c8eec135e9e3a2cf7572 | 667 | py | Python | base/site-packages/django_qbe/urls.py | edisonlz/fastor | 342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3 | [
"Apache-2.0"
] | 285 | 2019-12-23T09:50:21.000Z | 2021-12-08T09:08:49.000Z | base/site-packages/django_qbe/urls.py | jeckun/fastor | 342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3 | [
"Apache-2.0"
] | null | null | null | base/site-packages/django_qbe/urls.py | jeckun/fastor | 342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3 | [
"Apache-2.0"
] | 9 | 2019-12-23T12:59:25.000Z | 2022-03-15T05:12:11.000Z | # -*- coding: utf-8 -*-
from django.conf.urls.defaults import patterns, url
from django_qbe.exports import formats
urlpatterns = patterns('django_qbe.views',
url(r'^$', 'qbe_form', name="qbe_form"),
url(r'^js/$', 'qbe_js', name="qbe_js"),
url(r'^results/bookmark/$',
'qbe_bookmark', name="qbe_bookmark"),
url(r'^results/export/(?P<format>(%s))/$' % "|".join(formats.keys()),
'qbe_export', name="qbe_export"),
url(r'^results/proxy/$',
'qbe_proxy', name="qbe_proxy"),
url(r'^results/(?P<query_hash>(.*))/$',
'qbe_results', name="qbe_results"),
url(r'^auto/$', 'qbe_autocomplete', name="qbe_autocomplete"),
)
| 37.055556 | 73 | 0.611694 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 351 | 0.526237 |
e01d041f8b5c1564d154529462e58e50b56f4910 | 5,264 | py | Python | augment.py | docongminh/Text-Image-Augmentation-python | da27e8346ce2339f801335923faf7b14e026fd90 | [
"Apache-2.0"
] | 217 | 2020-02-09T07:44:18.000Z | 2022-03-24T03:52:51.000Z | ocraug/augment.py | lzmisscc/Text-Image-Augmentation-python | 12f104452e939444eb0fd4ac96143b78d091845b | [
"Apache-2.0"
] | 5 | 2020-03-23T02:24:33.000Z | 2022-03-13T07:02:04.000Z | ocraug/augment.py | lzmisscc/Text-Image-Augmentation-python | 12f104452e939444eb0fd4ac96143b78d091845b | [
"Apache-2.0"
] | 42 | 2020-02-10T06:42:31.000Z | 2022-03-13T11:54:18.000Z | # -*- coding:utf-8 -*-
# Author: RubanSeven
# import cv2
import numpy as np
# from transform import get_perspective_transform, warp_perspective
from warp_mls import WarpMLS
def distort(src, segment):
img_h, img_w = src.shape[:2]
cut = img_w // segment
thresh = cut // 3
# thresh = img_h // segment // 3
# thresh = img_h // 5
src_pts = list()
dst_pts = list()
src_pts.append([0, 0])
src_pts.append([img_w, 0])
src_pts.append([img_w, img_h])
src_pts.append([0, img_h])
dst_pts.append([np.random.randint(thresh), np.random.randint(thresh)])
dst_pts.append([img_w - np.random.randint(thresh), np.random.randint(thresh)])
dst_pts.append([img_w - np.random.randint(thresh), img_h - np.random.randint(thresh)])
dst_pts.append([np.random.randint(thresh), img_h - np.random.randint(thresh)])
half_thresh = thresh * 0.5
for cut_idx in np.arange(1, segment, 1):
src_pts.append([cut * cut_idx, 0])
src_pts.append([cut * cut_idx, img_h])
dst_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh,
np.random.randint(thresh) - half_thresh])
dst_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh,
img_h + np.random.randint(thresh) - half_thresh])
trans = WarpMLS(src, src_pts, dst_pts, img_w, img_h)
dst = trans.generate()
return dst
def stretch(src, segment):
img_h, img_w = src.shape[:2]
cut = img_w // segment
thresh = cut * 4 // 5
# thresh = img_h // segment // 3
# thresh = img_h // 5
src_pts = list()
dst_pts = list()
src_pts.append([0, 0])
src_pts.append([img_w, 0])
src_pts.append([img_w, img_h])
src_pts.append([0, img_h])
dst_pts.append([0, 0])
dst_pts.append([img_w, 0])
dst_pts.append([img_w, img_h])
dst_pts.append([0, img_h])
half_thresh = thresh * 0.5
for cut_idx in np.arange(1, segment, 1):
move = np.random.randint(thresh) - half_thresh
src_pts.append([cut * cut_idx, 0])
src_pts.append([cut * cut_idx, img_h])
dst_pts.append([cut * cut_idx + move, 0])
dst_pts.append([cut * cut_idx + move, img_h])
trans = WarpMLS(src, src_pts, dst_pts, img_w, img_h)
dst = trans.generate()
return dst
def perspective(src):
img_h, img_w = src.shape[:2]
thresh = img_h // 2
src_pts = list()
dst_pts = list()
src_pts.append([0, 0])
src_pts.append([img_w, 0])
src_pts.append([img_w, img_h])
src_pts.append([0, img_h])
dst_pts.append([0, np.random.randint(thresh)])
dst_pts.append([img_w, np.random.randint(thresh)])
dst_pts.append([img_w, img_h - np.random.randint(thresh)])
dst_pts.append([0, img_h - np.random.randint(thresh)])
trans = WarpMLS(src, src_pts, dst_pts, img_w, img_h)
dst = trans.generate()
return dst
# def distort(src, segment):
# img_h, img_w = src.shape[:2]
# dst = np.zeros_like(src, dtype=np.uint8)
#
# cut = img_w // segment
# thresh = img_h // 8
#
# src_pts = list()
# # dst_pts = list()
#
# src_pts.append([-np.random.randint(thresh), -np.random.randint(thresh)])
# src_pts.append([-np.random.randint(thresh), img_h + np.random.randint(thresh)])
#
# # dst_pts.append([0, 0])
# # dst_pts.append([0, img_h])
# dst_box = np.array([[0, 0], [0, img_h], [cut, 0], [cut, img_h]], dtype=np.float32)
#
# half_thresh = thresh * 0.5
#
# for cut_idx in np.arange(1, segment, 1):
# src_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh,
# np.random.randint(thresh) - half_thresh])
# src_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh,
# img_h + np.random.randint(thresh) - half_thresh])
#
# # dst_pts.append([cut * i, 0])
# # dst_pts.append([cut * i, img_h])
#
# src_box = np.array(src_pts[-4:-2] + src_pts[-2:-1] + src_pts[-1:], dtype=np.float32)
#
# # mat = cv2.getPerspectiveTransform(src_box, dst_box)
# # print(mat)
# # dst[:, cut * (cut_idx - 1):cut * cut_idx] = cv2.warpPerspective(src, mat, (cut, img_h))
#
# mat = get_perspective_transform(dst_box, src_box)
# dst[:, cut * (cut_idx - 1):cut * cut_idx] = warp_perspective(src, mat, (cut, img_h))
# # print(mat)
#
# src_pts.append([img_w + np.random.randint(thresh) - half_thresh,
# np.random.randint(thresh) - half_thresh])
# src_pts.append([img_w + np.random.randint(thresh) - half_thresh,
# img_h + np.random.randint(thresh) - half_thresh])
# src_box = np.array(src_pts[-4:-2] + src_pts[-2:-1] + src_pts[-1:], dtype=np.float32)
#
# # mat = cv2.getPerspectiveTransform(src_box, dst_box)
# # dst[:, cut * (segment - 1):] = cv2.warpPerspective(src, mat, (img_w - cut * (segment - 1), img_h))
# mat = get_perspective_transform(dst_box, src_box)
# dst[:, cut * (segment - 1):] = warp_perspective(src, mat, (img_w - cut * (segment - 1), img_h))
#
# return dst
| 33.74359 | 107 | 0.586816 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,442 | 0.463906 |
e01de102906f7a6f8c39855d08b6adaa53f5663c | 1,347 | py | Python | Graph/print all paths from two vertices in a directed graph.py | ikaushikpal/DS-450-python | 9466f77fb9db9e6a5bb3f20aa89ba6332f49e848 | [
"MIT"
] | 3 | 2021-06-28T12:04:19.000Z | 2021-09-07T07:23:41.000Z | Graph/print all paths from two vertices in a directed graph.py | SupriyoDam/DS-450-python | 5dc21ce61b3279e9bd9d6ef3ad236667227ca283 | [
"MIT"
] | null | null | null | Graph/print all paths from two vertices in a directed graph.py | SupriyoDam/DS-450-python | 5dc21ce61b3279e9bd9d6ef3ad236667227ca283 | [
"MIT"
] | 1 | 2021-06-28T15:42:55.000Z | 2021-06-28T15:42:55.000Z | from collections import defaultdict
class Graph:
def __init__(self):
self.graph = defaultdict(list)
def addEdge(self, starting_vertex, end_vertex):
self.graph[starting_vertex].append(end_vertex)
def printAllPaths(self, starting_vertex, target_vertex):
visitedVertices = defaultdict(bool)
self.resultPaths = []
self.dfsUtil(starting_vertex, visitedVertices, target_vertex, "")
return self.resultPaths
def dfsUtil(self, current_vertex, visitedVertices, target_vertex, output_string):
visitedVertices[current_vertex] = True
if output_string == "":
output_string = current_vertex
else:
output_string = output_string + "->" + current_vertex
if current_vertex == target_vertex:
self.resultPaths.append(output_string)
return
for vertex in self.graph[current_vertex]:
if visitedVertices[vertex] == False:
self.dfsUtil(vertex, visitedVertices, target_vertex, output_string)
visitedVertices[vertex] = False
if __name__ == "__main__":
g = Graph()
g.addEdge("A", "B")
g.addEdge("B", "D")
g.addEdge("A", "D")
g.addEdge("C", "A")
g.addEdge("C", "B")
g.addEdge("A", "C")
paths = g.printAllPaths("A", "B")
print(paths)
| 28.659574 | 85 | 0.628062 | 1,063 | 0.789161 | 0 | 0 | 0 | 0 | 0 | 0 | 60 | 0.044543 |
e01e00717692398432049be3d51d551f012c222e | 1,958 | py | Python | tests/pipegeojson_test/test_pipegeojson.py | kamyarrasta/berrl | 1cf2ba8194498ec8f80d2908399ad00f1e963d83 | [
"Apache-2.0"
] | 1 | 2016-03-04T18:30:48.000Z | 2016-03-04T18:30:48.000Z | tests/pipegeojson_test/test_pipegeojson.py | kamyarrasta/berrl | 1cf2ba8194498ec8f80d2908399ad00f1e963d83 | [
"Apache-2.0"
] | null | null | null | tests/pipegeojson_test/test_pipegeojson.py | kamyarrasta/berrl | 1cf2ba8194498ec8f80d2908399ad00f1e963d83 | [
"Apache-2.0"
] | null | null | null | # testing the output of pipegeojson against different input types
import berrl as bl
import itertools
# making line with csv file location
line1=bl.make_line('csvs/line_example.csv')
# making line with list
testlist=bl.read('csvs/line_example.csv')
line2=bl.make_line(testlist,list=True)
# testing each line geojson against each other
ind=0
for a,b in itertools.izip(line1,line2):
if not a==b:
ind=1
# carrying the passing of status down to the test for the rest
if ind==0:
passing=0
else:
passing=1
# making points with csv file location
points1=bl.make_line('csvs/points_example.csv')
# making points with list
testlist=bl.read('csvs/points_example.csv')
points2=bl.make_line(testlist,list=True)
# testing each points geojson against each other
ind=0
for a,b in itertools.izip(points1,points2):
if not a==b:
ind=1
# carrying the passing of status down to the test for the rest
if ind==0 and passing==0:
passing=0
else:
passing=1
# making blocks with csv file location
blocks1=bl.make_line('csvs/blocks_example.csv')
# making blocks with list
testlist=bl.read('csvs/blocks_example.csv')
blocks2=bl.make_line(testlist,list=True)
# testing each bloocks geojson against each other
ind=0
for a,b in itertools.izip(blocks1,blocks2):
if not a==b:
ind=1
# carrying the passing of status down to the test for the rest
if ind==0 and passing==0:
passing=0
else:
passing=1
# making blocks with csv file location
polygon1=bl.make_line('csvs/polygon_example.csv')
# making blocks with list
testlist=bl.read('csvs/polygon_example.csv')
polygon2=bl.make_line(testlist,list=True)
# testing each bloocks geojson against each other
ind=0
for a,b in itertools.izip(polygon1,polygon2):
if not a==b:
ind=1
# carrying the passing of status down to the test for the rest
if ind==0 and passing==0:
passing=0
else:
passing=1
# printing output result
if passing==0:
print 'pipegeojson build passed'
else:
print 'pipegeojson build failed'
| 22.25 | 65 | 0.759959 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,031 | 0.526558 |
e01f044aab30cbd5165bae297a319d57b579704e | 912 | py | Python | tierpsy/debugging/catch_infinite_loop.py | mgh17/tierpsy-tracker | a18c06aa80a5fb22fd51563d82c639b520742777 | [
"MIT"
] | 9 | 2021-01-11T10:49:21.000Z | 2022-02-28T15:48:00.000Z | tierpsy/debugging/catch_infinite_loop.py | mgh17/tierpsy-tracker | a18c06aa80a5fb22fd51563d82c639b520742777 | [
"MIT"
] | 18 | 2020-05-08T15:43:08.000Z | 2022-03-23T10:19:24.000Z | tierpsy/debugging/catch_infinite_loop.py | mgh17/tierpsy-tracker | a18c06aa80a5fb22fd51563d82c639b520742777 | [
"MIT"
] | 10 | 2019-12-18T12:10:12.000Z | 2022-01-05T09:12:47.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 8 16:19:07 2017
@author: ajaver
"""
import os
import cv2
import sys
import glob
import threading
from functools import partial
main_dir = '/Volumes/behavgenom_archive$/Celine/raw/'
fnames = glob.glob(os.path.join(main_dir, '**', '*.avi'))
fnames = [x for x in fnames if not x.endswith('_seg.avi')]
fnames = sorted(fnames)
def get_and_release(video_file):
original = sys.stderr
f = open(os.devnull, 'w')
sys.stderr = f
print('here')
vid = cv2.VideoCapture(video_file)
vid.release()
sys.stderr = original
return vid
all_threads = []
for ii, video_file in enumerate(fnames):
print(ii, video_file)
vid = cv2.VideoCapture(video_file)
vid.release()
t = threading.Thread(target = partial(get_and_release, video_file))
t.start()
all_threads.append((video_file, t))
| 21.714286 | 71 | 0.663377 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 177 | 0.194079 |
e0201884251a727105b3a8b3946ca3bc3aefd73d | 480 | py | Python | devito/passes/iet/languages/C.py | guaacoelho/devito | 7e0b873114675752c4a49ed9076ee5d52997833c | [
"MIT"
] | 199 | 2016-08-18T23:33:05.000Z | 2019-12-24T07:08:48.000Z | devito/passes/iet/languages/C.py | guaacoelho/devito | 7e0b873114675752c4a49ed9076ee5d52997833c | [
"MIT"
] | 949 | 2016-04-25T11:41:34.000Z | 2019-12-27T10:43:40.000Z | devito/passes/iet/languages/C.py | guaacoelho/devito | 7e0b873114675752c4a49ed9076ee5d52997833c | [
"MIT"
] | 78 | 2016-08-30T07:42:34.000Z | 2019-12-13T20:34:45.000Z | from devito.ir import Call
from devito.passes.iet.definitions import DataManager
from devito.passes.iet.langbase import LangBB
__all__ = ['CBB', 'CDataManager']
class CBB(LangBB):
mapper = {
'aligned': lambda i:
'__attribute__((aligned(%d)))' % i,
'host-alloc': lambda i, j, k:
Call('posix_memalign', (i, j, k)),
'host-free': lambda i:
Call('free', (i,)),
}
class CDataManager(DataManager):
lang = CBB
| 21.818182 | 53 | 0.591667 | 312 | 0.65 | 0 | 0 | 0 | 0 | 0 | 0 | 103 | 0.214583 |
e0215d4c222f248ad7105000615a748c88340354 | 2,026 | py | Python | tests/_test_image.py | Freakwill/ell | 8aa510cefb5d63db35071820208971013fac154c | [
"MIT"
] | null | null | null | tests/_test_image.py | Freakwill/ell | 8aa510cefb5d63db35071820208971013fac154c | [
"MIT"
] | null | null | null | tests/_test_image.py | Freakwill/ell | 8aa510cefb5d63db35071820208971013fac154c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""Test methods about image process
Make sure the existance of the images
"""
from ell import *
import numpy as np
_filter = Filter.from_name('db4')
def test_resize():
chennal=0
c = ImageRGB.open('src/lenna.jpg')
d=c.resize(minInd=(-100,-100), maxInd=(100,100))
d.to_image()
assert True
def test_quantize():
im = ImageRGB.open('src/lenna.jpg')
d = im.quantize(128)
d.to_image()
assert True
def test_convolve():
im = ImageRGB.open('src/lenna.jpg')
d = (im @ _filter.H).D
# print(f"{d:i}, {d.shape}")
assert True
def test_filter():
im = ImageRGB.open('src/lenna.jpg')
rec = (im @ _filter.H).D.U @ _filter
assert True
def test_rec():
im = ImageRGB.open('src/lenna.jpg')
def _f(im, h1, h2=None):
if h2 is None: h2 = h1
return (im.conv1d(h1.H, axis=0).conv1d(h2.H, axis=1)).P.conv1d(h1, axis=0).conv1d(h2, axis=1)
rec = _f(im, _filter) + _f(im, _filter.H) + _f(im, _filter, _filter.H) + _f(im, _filter.H, _filter)
assert True
def test_rec2():
im = ImageRGB.open('../src/lenna.jpg')
def _f(im, h1, h2=None):
if h2 is None: h2 = h1
# return (im @ h1.tensor(h2).H).P @ h1.tensor(h2)
return (im.conv1d(h1.H, axis=0).conv1d(h2.H, axis=1)).P.conv1d(h1, axis=0).conv1d(h2, axis=1)
im1 = _f(im, _filter)
rec1 = _f(im1, _filter) + _f(im1, _filter.H) + _f(im1, _filter, _filter.H) + _f(im1, _filter.H, _filter)
rec2 = rec1 + _f(im, _filter.H) + _f(im, _filter, _filter.H) + _f(im, _filter.H, _filter)
assert True
def test_rec3():
im = ImageRGB.open('src/lenna.jpg')
def _f(im, h1, h2=None):
if h2 is None: h2 = h1
f = h1.tensor(h2)
return im.reduce(f).expand(f)
im1 = im.reduce(_filter)
rec1 = _f(im1, _filter) + _f(im1, _filter.H) + _f(im1, _filter, _filter.H) + _f(im1, _filter.H, _filter)
rec2 = rec1.expand(_filter) + _f(im, _filter.H) + _f(im, _filter, _filter.H) + _f(im, _filter.H, _filter)
assert True
| 28.535211 | 109 | 0.605133 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 290 | 0.143139 |
e02439282d17416800f4bfd8e050f404bc4d7706 | 5,991 | py | Python | donkeycar/parts/pytorch/torch_data.py | adricl/donkeycar | 8eb2705ed4161c0d6a9cfd9c7b0a1c0ca5abaeef | [
"MIT"
] | 1,100 | 2017-01-18T16:08:33.000Z | 2018-11-04T00:42:54.000Z | donkeycar/parts/pytorch/torch_data.py | adricl/donkeycar | 8eb2705ed4161c0d6a9cfd9c7b0a1c0ca5abaeef | [
"MIT"
] | 199 | 2016-12-20T07:45:16.000Z | 2018-11-01T02:30:12.000Z | donkeycar/parts/pytorch/torch_data.py | adricl/donkeycar | 8eb2705ed4161c0d6a9cfd9c7b0a1c0ca5abaeef | [
"MIT"
] | 521 | 2017-01-10T21:53:24.000Z | 2018-11-01T18:17:52.000Z | # PyTorch
import torch
from torch.utils.data import IterableDataset, DataLoader
from donkeycar.utils import train_test_split
from donkeycar.parts.tub_v2 import Tub
from torchvision import transforms
from typing import List, Any
from donkeycar.pipeline.types import TubRecord, TubDataset
from donkeycar.pipeline.sequence import TubSequence
import pytorch_lightning as pl
def get_default_transform(for_video=False, for_inference=False, resize=True):
"""
Creates a default transform to work with torchvision models
Video transform:
All pre-trained models expect input images normalized in the same way,
i.e. mini-batches of 3-channel RGB videos of shape (3 x T x H x W),
where H and W are expected to be 112, and T is a number of video frames
in a clip. The images have to be loaded in to a range of [0, 1] and
then normalized using mean = [0.43216, 0.394666, 0.37645] and
std = [0.22803, 0.22145, 0.216989].
"""
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
input_size = (224, 224)
if for_video:
mean = [0.43216, 0.394666, 0.37645]
std = [0.22803, 0.22145, 0.216989]
input_size = (112, 112)
transform_items = [
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std)
]
if resize:
transform_items.insert(0, transforms.Resize(input_size))
return transforms.Compose(transform_items)
class TorchTubDataset(IterableDataset):
'''
Loads the dataset, and creates a train/test split.
'''
def __init__(self, config, records: List[TubRecord], transform=None):
"""Create a PyTorch Tub Dataset
Args:
config (object): the configuration information
records (List[TubRecord]): a list of tub records
transform (function, optional): a transform to apply to the data
"""
self.config = config
# Handle the transforms
if transform:
self.transform = transform
else:
self.transform = get_default_transform()
self.sequence = TubSequence(records)
self.pipeline = self._create_pipeline()
self.len = len(records)
def _create_pipeline(self):
""" This can be overridden if more complicated pipelines are
required """
def y_transform(record: TubRecord):
angle: float = record.underlying['user/angle']
throttle: float = record.underlying['user/throttle']
predictions = torch.tensor([angle, throttle], dtype=torch.float)
# Normalize to be between [0, 1]
# angle and throttle are originally between [-1, 1]
predictions = (predictions + 1) / 2
return predictions
def x_transform(record: TubRecord):
# Loads the result of Image.open()
img_arr = record.image(cached=True, as_nparray=False)
return self.transform(img_arr)
# Build pipeline using the transformations
pipeline = self.sequence.build_pipeline(x_transform=x_transform,
y_transform=y_transform)
return pipeline
def __len__(self):
return len(self.sequence)
def __iter__(self):
return iter(self.pipeline)
class TorchTubDataModule(pl.LightningDataModule):
def __init__(self, config: Any, tub_paths: List[str], transform=None):
"""Create a PyTorch Lightning Data Module to contain all data loading logic
Args:
config (object): the configuration information
tub_paths (List[str]): a list of paths to the tubs to use (minimum size of 1).
Each tub path corresponds to another training run.
transform (function, optional): a transform to apply to the data
"""
super().__init__()
self.config = config
self.tub_paths = tub_paths
# Handle the transforms
if transform:
self.transform = transform
else:
self.transform = get_default_transform()
self.tubs: List[Tub] = [Tub(tub_path, read_only=True)
for tub_path in self.tub_paths]
self.records: List[TubRecord] = []
def setup(self, stage=None):
"""Load all the tub data and set up the datasets.
Args:
stage ([string], optional): setup expects a string arg stage.
It is used to separate setup logic for trainer.fit
and trainer.test. Defaults to None.
"""
# Loop through all the different tubs and load all the records for each of them
for tub in self.tubs:
for underlying in tub:
record = TubRecord(self.config, tub.base_path,
underlying=underlying)
self.records.append(record)
train_records, val_records = train_test_split(
self.records, test_size=(1. - self.config.TRAIN_TEST_SPLIT))
assert len(val_records) > 0, "Not enough validation data. Add more data"
self.train_dataset = TorchTubDataset(
self.config, train_records, transform=self.transform)
self.val_dataset = TorchTubDataset(
self.config, val_records, transform=self.transform)
def train_dataloader(self):
# The number of workers are set to 0 to avoid errors on Macs and Windows
# See: https://github.com/rusty1s/pytorch_geometric/issues/366#issuecomment-498022534
return DataLoader(self.train_dataset, batch_size=self.config.BATCH_SIZE, num_workers=0)
def val_dataloader(self):
# The number of workers are set to 0 to avoid errors on Macs and Windows
# See: https://github.com/rusty1s/pytorch_geometric/issues/366#issuecomment-498022534
return DataLoader(self.val_dataset, batch_size=self.config.BATCH_SIZE, num_workers=0)
| 37.21118 | 95 | 0.633951 | 4,556 | 0.760474 | 0 | 0 | 0 | 0 | 0 | 0 | 2,320 | 0.387248 |
e0251d7f1fc5a3340792a778c923482bb49bcf13 | 316 | py | Python | lite/__init__.py | CleverInsight/sparx-lite | 1b729e8d11292e9737d57e092ee8916999ab1338 | [
"MIT"
] | null | null | null | lite/__init__.py | CleverInsight/sparx-lite | 1b729e8d11292e9737d57e092ee8916999ab1338 | [
"MIT"
] | null | null | null | lite/__init__.py | CleverInsight/sparx-lite | 1b729e8d11292e9737d57e092ee8916999ab1338 | [
"MIT"
] | null | null | null | import os
from tornado.template import Template
__SNIPPET__ = os.path.join(os.path.dirname(os.path.abspath(__file__)), '_snippet')
def T(name, **kw):
t = Template(open(os.path.join(__SNIPPET__, name + '.html'), 'rb').read())
return t.generate(**dict([('template_file', name)] + globals().items() + kw.items()))
| 31.6 | 86 | 0.686709 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 36 | 0.113924 |
e025cd2fbcd0226b08e7474394109f24f199f13c | 3,857 | py | Python | homeassistant/components/sensor/hddtemp.py | mdonoughe/home-assistant | d9805160bc787146bff0c434fdcab995716f0f8c | [
"Apache-2.0"
] | 2 | 2020-02-20T18:47:55.000Z | 2021-11-09T11:33:28.000Z | homeassistant/components/sensor/hddtemp.py | mdonoughe/home-assistant | d9805160bc787146bff0c434fdcab995716f0f8c | [
"Apache-2.0"
] | 1 | 2021-02-08T20:56:06.000Z | 2021-02-08T20:56:06.000Z | homeassistant/components/sensor/hddtemp.py | diophung/home-assistant | a5aa1118937702ca8bec050614ee52dc14f8466b | [
"Apache-2.0"
] | 1 | 2020-11-21T09:37:47.000Z | 2020-11-21T09:37:47.000Z | """
Support for getting the disk temperature of a host.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.hddtemp/
"""
import logging
from datetime import timedelta
from telnetlib import Telnet
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_NAME, CONF_HOST, CONF_PORT, TEMP_CELSIUS, TEMP_FAHRENHEIT, CONF_DISKS)
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
ATTR_DEVICE = 'device'
ATTR_MODEL = 'model'
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 7634
DEFAULT_NAME = 'HD Temperature'
DEFAULT_TIMEOUT = 5
SCAN_INTERVAL = timedelta(minutes=1)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_DISKS, default=[]): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the HDDTemp sensor."""
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
disks = config.get(CONF_DISKS)
hddtemp = HddTempData(host, port)
hddtemp.update()
if hddtemp.data is None:
return False
if not disks:
disks = [next(iter(hddtemp.data)).split('|')[0]]
dev = []
for disk in disks:
if disk in hddtemp.data:
dev.append(HddTempSensor(name, disk, hddtemp))
add_devices(dev, True)
class HddTempSensor(Entity):
"""Representation of a HDDTemp sensor."""
def __init__(self, name, disk, hddtemp):
"""Initialize a HDDTemp sensor."""
self.hddtemp = hddtemp
self.disk = disk
self._name = '{} {}'.format(name, disk)
self._state = None
self._details = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
if self._details[3] == 'C':
return TEMP_CELSIUS
return TEMP_FAHRENHEIT
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
return {
ATTR_DEVICE: self._details[0],
ATTR_MODEL: self._details[1],
}
def update(self):
"""Get the latest data from HDDTemp daemon and updates the state."""
self.hddtemp.update()
if self.hddtemp.data and self.disk in self.hddtemp.data:
self._details = self.hddtemp.data[self.disk].split('|')
self._state = self._details[2]
else:
self._state = None
class HddTempData(object):
"""Get the latest data from HDDTemp and update the states."""
def __init__(self, host, port):
"""Initialize the data object."""
self.host = host
self.port = port
self.data = None
def update(self):
"""Get the latest data from HDDTemp running as daemon."""
try:
connection = Telnet(
host=self.host, port=self.port, timeout=DEFAULT_TIMEOUT)
data = connection.read_all().decode(
'ascii').lstrip('|').rstrip('|').split('||')
self.data = {data[i].split('|')[0]: data[i]
for i in range(0, len(data), 1)}
except ConnectionRefusedError:
_LOGGER.error(
"HDDTemp is not available at %s:%s", self.host, self.port)
self.data = None
| 29.219697 | 79 | 0.637283 | 2,167 | 0.561836 | 0 | 0 | 620 | 0.160747 | 0 | 0 | 796 | 0.206378 |
e026ba13c5f7c12090e3dee6c5f9a4f65eca3bb7 | 1,402 | py | Python | boomer.py | JohnnySn0w/BabbleBot | 03a383b063e4f28049f27f8ec669f22767ed8a87 | [
"MIT"
] | 1 | 2019-07-07T01:46:55.000Z | 2019-07-07T01:46:55.000Z | boomer.py | JohnnySn0w/BabbleBot | 03a383b063e4f28049f27f8ec669f22767ed8a87 | [
"MIT"
] | 1 | 2019-07-26T18:34:02.000Z | 2019-07-26T18:34:02.000Z | boomer.py | JohnnySn0w/BabbleBot | 03a383b063e4f28049f27f8ec669f22767ed8a87 | [
"MIT"
] | 1 | 2020-05-10T01:27:48.000Z | 2020-05-10T01:27:48.000Z | import random
prefix = [
'Look at you! ',
'Bless ',
'Bless! ',
'I heard about that! ',
'Amen!',
'You and the kids doing alright?',
'Miss ya\'ll!'
]
suffix = [
'. Amen!',
'. God bless america',
'. God bless!',
' haha',
'. love ya!',
'. love ya\'ll!',
]
def add_pre_suf(sentence):
if random.randint(1,10) <= 6:
if random.randint(1,10) <= 5:
sentence = prefix[random.randint(0, len(prefix) - 1)] + sentence
else:
sentence += suffix[random.randint(0, len(suffix) - 1)]
return sentence
def add_elipses(sentence):
words = sentence.split()
for i in range(4, len(words), 5):
if random.randint(1,10) <= 7:
words[i] += "..."
return " ".join(words)
def boomer_caps(sentence):
seed = random.randint(1, 10)
sent_array = sentence.split()
if seed in (1, 2, 3):
return sentence
elif seed in (4, 5):
temp_sent = []
for x in sent_array:
if random.random() < 0.25:
x = x.upper()
temp_sent.append(x)
return " ".join(temp_sent)
elif seed in (6, 7):
temp_sent = []
for x in sent_array:
if random.random() < 0.5:
x = x.upper()
temp_sent.append(x)
return " ".join(temp_sent)
elif seed in (8, 9):
return sentence.title()
elif seed == 10:
return sentence.upper()
| 23.366667 | 76 | 0.53067 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 201 | 0.143367 |
e026dd61a71f4c0236cf71cd04ff440228426371 | 1,303 | py | Python | bot/views.py | eyobofficial/COVID-19-Mutual-Aid | 42d30ce95b0e9c717c5eda3ecaafea2812ec34f7 | [
"MIT"
] | null | null | null | bot/views.py | eyobofficial/COVID-19-Mutual-Aid | 42d30ce95b0e9c717c5eda3ecaafea2812ec34f7 | [
"MIT"
] | 5 | 2020-03-19T17:49:50.000Z | 2021-06-10T20:06:14.000Z | bot/views.py | eyobofficial/COVID-19-Mutual-Aid | 42d30ce95b0e9c717c5eda3ecaafea2812ec34f7 | [
"MIT"
] | null | null | null | import telegram
from django.conf import settings
from django.shortcuts import redirect
from django.utils.decorators import method_decorator
from django.views.generic import View
from django.views.decorators.csrf import csrf_exempt
from braces.views import CsrfExemptMixin
from rest_framework.authentication import BasicAuthentication
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.permissions import AllowAny
from .bots import TelegramBot
from .models import TelegramUser as User
@method_decorator(csrf_exempt, name='dispatch')
class TelegramBotView(APIView):
permission_classes = (AllowAny, )
def post(self, request, *args, **kwargs):
context = request.data
bot = TelegramBot(context)
user, _ = User.objects.get_or_create(
id=bot.sender['id'],
defaults={
'first_name': bot.sender['first_name'],
'last_name': bot.sender.get('last_name', ''),
'username': bot.sender.get('username', ''),
'is_bot': bot.sender.get('is_bot', False)
}
)
user.access_count += 1
user.save()
bot.process(user)
return Response(status=status.HTTP_200_OK)
| 29.613636 | 61 | 0.692249 | 672 | 0.515733 | 0 | 0 | 720 | 0.552571 | 0 | 0 | 100 | 0.076746 |
e026df5666a9c260f8a2d313e1edc3eee3cad4f7 | 9,053 | py | Python | code/counterfactual_generative_networks-main/imagenet/train_cgn.py | dummyxyz1/re_counterfactual_generative | 4dda8e17a1123a564d60be82c17e9589155fb2e2 | [
"MIT"
] | null | null | null | code/counterfactual_generative_networks-main/imagenet/train_cgn.py | dummyxyz1/re_counterfactual_generative | 4dda8e17a1123a564d60be82c17e9589155fb2e2 | [
"MIT"
] | null | null | null | code/counterfactual_generative_networks-main/imagenet/train_cgn.py | dummyxyz1/re_counterfactual_generative | 4dda8e17a1123a564d60be82c17e9589155fb2e2 | [
"MIT"
] | null | null | null | import os
from datetime import datetime
from os.path import join
import pathlib
from tqdm import tqdm
import argparse
import torch
from torch import nn, optim
from torch.autograd import Variable
import torchvision
from torchvision.transforms import Pad
from torchvision.utils import make_grid
import repackage
repackage.up()
from imagenet.models import CGN
from imagenet.config import get_cfg_defaults
from shared.losses import *
from utils import Optimizers
from inception_score import *
def save_sample_sheet(cgn, u_fixed, sample_path, ep_str):
cgn.eval()
dev = u_fixed.to(cgn.get_device())
ys = [15, 251, 330, 382, 385, 483, 559, 751, 938, 947, 999]
to_save = []
with torch.no_grad():
for y in ys:
# generate
y_vec = cgn.get_class_vec(y, sz=1)
inp = (u_fixed.to(dev), y_vec.to(dev), cgn.truncation)
x_gt, mask, premask, foreground, background, bg_mask = cgn(inp)
x_gen = mask * foreground + (1 - mask) * background
# build class grid
to_plot = [premask, foreground, background, x_gen, x_gt]
grid = make_grid(torch.cat(to_plot).detach().cpu(),
nrow=len(to_plot), padding=2, normalize=True)
# add unnormalized mask
mask = Pad(2)(mask[0].repeat(3, 1, 1)).detach().cpu()
grid = torch.cat([mask, grid], 2)
# save to disk
to_save.append(grid)
del to_plot, mask, premask, foreground, background, x_gen, x_gt
# save the image
path = join(sample_path, f'cls_sheet_' + ep_str + '.png')
torchvision.utils.save_image(torch.cat(to_save, 1), path)
cgn.train()
def save_sample_single(cgn, u_fixed, sample_path, ep_str):
cgn.eval()
dev = u_fixed.to(cgn.get_device())
ys = [15, 251, 330, 382, 385, 483, 559, 751, 938, 947, 999]
with torch.no_grad():
for y in ys:
# generate
y_vec = cgn.get_class_vec(y, sz=1)
inp = (u_fixed.to(dev), y_vec.to(dev), cgn.truncation)
_, mask, premask, foreground, background, _ = cgn(inp)
x_gen = mask * foreground + (1 - mask) * background
# save_images
path = join(sample_path, f'{y}_1_premask_' + ep_str + '.png')
torchvision.utils.save_image(premask, path, normalize=True)
path = join(sample_path, f'{y}_2_mask_' + ep_str + '.png')
torchvision.utils.save_image(mask, path, normalize=True)
path = join(sample_path, f'{y}_3_texture_' + ep_str + '.png')
torchvision.utils.save_image(foreground, path, normalize=True)
path = join(sample_path, f'{y}_4_bgs_' + ep_str + '.png')
torchvision.utils.save_image(background, path, normalize=True)
path = join(sample_path, f'{y}_5_gen_ims_' + ep_str + '.png')
torchvision.utils.save_image(x_gen, path, normalize=True)
cgn.train()
def fit(cfg, cgn, opts, losses):
inception_score_val = list()
# total number of episodes, accounted for batch accumulation
episodes = cfg.TRAIN.EPISODES
episodes *= cfg.TRAIN.BATCH_ACC
# directories for experiments
time_str = datetime.now().strftime("%Y_%m_%d_%H_%M")
if cfg.WEIGHTS_PATH:
weights_path = str(pathlib.Path(cfg.WEIGHTS_PATH).parent)
start_ep = int(pathlib.Path(cfg.WEIGHTS_PATH).stem[3:])
sample_path = weights_path.replace('weights', 'samples')
ep_range = (start_ep, start_ep + episodes)
else:
model_path = join('imagenet', 'experiments',
f'cgn_{time_str}_{cfg.MODEL_NAME}')
weights_path = join(model_path, 'weights')
sample_path = join(model_path, 'samples')
pathlib.Path(weights_path).mkdir(parents=True, exist_ok=True)
pathlib.Path(sample_path).mkdir(parents=True, exist_ok=True)
ep_range = (0, episodes)
# fixed noise sample
u_fixed_path = join('imagenet', 'experiments', 'u_fixed.pt')
if not os.path.isfile(u_fixed_path) or cfg.LOG.SAMPLED_FIXED_NOISE:
u_fixed = cgn.get_noise_vec()
torch.save(u_fixed, u_fixed_path)
else:
u_fixed = torch.load(u_fixed_path)
# Training Loop
cgn.train()
L_l1, L_perc, L_binary, L_mask, L_text, L_bg = losses
save_samples = save_sample_single if cfg.LOG.SAVE_SINGLES else save_sample_sheet
pbar = tqdm(range(*ep_range))
for i, ep in enumerate(pbar):
x_gt, mask, premask, foreground, background, background_mask = cgn()
x_gen = mask * foreground + (1 - mask) * background
# Losses
losses_g = {}
losses_g['l1'] = L_l1(x_gen, x_gt)
losses_g['perc'] = L_perc(x_gen, x_gt)
losses_g['binary'] = L_binary(mask)
losses_g['mask'] = L_mask(mask)
losses_g['perc_text'] = L_text(x_gt, mask, foreground)
losses_g['bg'] = L_bg(background_mask)
# backprop
losses_g = {k: v.mean() for k, v in losses_g.items()}
g_loss = sum(losses_g.values())
g_loss.backward()
if (i+1) % cfg.TRAIN.BATCH_ACC == 0:
opts.step(['shape', 'bg', 'texture'])
# Saving
if not i % cfg.LOG.SAVE_ITER:
ep_str = f'ep_{ep:07}'
save_samples(cgn, u_fixed, sample_path, ep_str)
torch.save(cgn.state_dict(), join(weights_path, ep_str + '.pth'))
# Logging
if cfg.LOG.LOSSES:
msg = ''.join([f"[{k}: {v:.3f}]" for k, v in losses_g.items()])
pbar.set_description(msg)
# Calculate Inception SCore
if cfg.LOG.INCEPTION_SCORE:
score, score_std = inception_score(x_gen)
inception_score_val.append(score)
def main(cfg):
# model init
cgn = CGN(
batch_sz=cfg.TRAIN.BATCH_SZ,
truncation=cfg.MODEL.TRUNCATION,
pretrained=True,
)
print("------CGN-------")
print(cgn)
if cfg.WEIGHTS_PATH:
weights = torch.load(cfg.WEIGHTS_PATH)
weights = {k.replace('module.', ''): v for k, v in weights.items()}
cgn.load_state_dict(weights)
# optimizers
opts = Optimizers()
opts.set('shape', cgn.f_shape, cfg.LR.SHAPE)
opts.set('texture', cgn.f_text, cfg.LR.TEXTURE)
opts.set('bg', cgn.f_bg, cfg.LR.BG)
# losses
L_l1 = ReconstructionLoss(mode='l1', loss_weight=cfg.LAMBDA.L1)
L_perc = PerceptualLoss(style_wgts=cfg.LAMBDA.PERC)
L_binary = BinaryLoss(loss_weight=cfg.LAMBDA.BINARY)
L_mask = MaskLoss(loss_weight=cfg.LAMBDA.MASK)
L_text = PercLossText(style_wgts=cfg.LAMBDA.TEXT)
L_bg = BackgroundLoss(loss_weight=cfg.LAMBDA.BG)
losses = (L_l1, L_perc, L_binary, L_mask, L_text, L_bg)
# push to device and train
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
cgn = cgn.to(device)
losses = (l.to(device) for l in losses)
fit(cfg, cgn, opts, losses)
def merge_args_and_cfg(args, cfg):
cfg.MODEL_NAME = args.model_name
cfg.WEIGHTS_PATH = args.weights_path
cfg.LOG.SAMPLED_FIXED_NOISE = args.sampled_fixed_noise
cfg.LOG.SAVE_SINGLES = args.save_singles
cfg.LOG.SAVE_ITER = args.save_iter
cfg.LOG.LOSSES = args.log_losses
cfg.LOG.INCEPTION_SCORE = True
cfg.TRAIN.EPISODES = args.episodes
cfg.TRAIN.BATCH_SZ = args.batch_sz
cfg.TRAIN.BATCH_ACC = args.batch_acc
cfg.MODEL.TRUNCATION = args.truncation
return cfg
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--model_name', default='tmp',
help='Weights and samples will be saved under experiments/model_name')
parser.add_argument('--weights_path', default='',
help='provide path to continue training')
parser.add_argument('--sampled_fixed_noise', default=False, action='store_true',
help='If you want a different noise vector than provided in the repo')
parser.add_argument('--save_singles', default=False, action='store_true',
help='Save single images instead of sheets')
parser.add_argument('--truncation', type=float, default=1.0,
help='Truncation value for noise sampling')
parser.add_argument('--episodes', type=int, default=300,
help="We don't do dataloading, hence, one episode = one gradient update.")
parser.add_argument('--batch_sz', type=int, default=1,
help='Batch size, use in conjunciton with batch_acc')
parser.add_argument('--batch_acc', type=int, default=4000,
help='pseudo_batch_size = batch_acc*batch size')
parser.add_argument('--save_iter', type=int, default=4000,
help='Save samples/weights every n iter')
parser.add_argument('--log_losses', default=False, action='store_true',
help='Print out losses')
args = parser.parse_args()
cfg = get_cfg_defaults()
cfg = merge_args_and_cfg(args, cfg)
print(cfg)
main(cfg)
| 37.720833 | 98 | 0.629515 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,428 | 0.157738 |
e0274bb01109146cf480d663260e32b7e8a8cc2d | 580 | py | Python | portfolio/urls.py | ramza007/Ramza.io | 2172d9ac13e87becbc8644ad5755070f48fab8da | [
"Apache-2.0"
] | 3 | 2019-12-16T16:47:16.000Z | 2020-07-28T19:47:34.000Z | portfolio/urls.py | ramza007/Ramza.io | 2172d9ac13e87becbc8644ad5755070f48fab8da | [
"Apache-2.0"
] | 15 | 2019-12-05T03:38:19.000Z | 2022-03-13T02:35:30.000Z | portfolio/urls.py | ramza007/Ramza.io | 2172d9ac13e87becbc8644ad5755070f48fab8da | [
"Apache-2.0"
] | null | null | null | from django.conf.urls import url
from django.urls import path, include,re_path
from . import views
from rest_framework.authtoken.views import obtain_auth_token
urlpatterns = [
path('', views.index, name='index'),
path('about', views.about, name='about'),
path('projects', views.projects, name='projects'),
path('photos', views.photos, name='photos'),
re_path(r'^api/projects/$', views.ProjectList.as_view()),
re_path(r'^api-token-auth/', obtain_auth_token),
re_path(r'api/project/project-id/(?P<pk>[0-9]+)/$', views.ProjectDescription.as_view()),
]
| 34.117647 | 92 | 0.7 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 138 | 0.237931 |
e027abc215159b586950e87882ad8ad4be055155 | 407 | py | Python | tests/resources/mlflow-test-plugin/mlflow_test_plugin/file_store.py | iPieter/kiwi | 76b66872fce68873809a0dea112e2ed552ae5b63 | [
"Apache-2.0"
] | null | null | null | tests/resources/mlflow-test-plugin/mlflow_test_plugin/file_store.py | iPieter/kiwi | 76b66872fce68873809a0dea112e2ed552ae5b63 | [
"Apache-2.0"
] | 1 | 2021-01-24T13:34:51.000Z | 2021-01-24T13:34:51.000Z | tests/resources/mlflow-test-plugin/mlflow_test_plugin/file_store.py | iPieter/kiwi | 76b66872fce68873809a0dea112e2ed552ae5b63 | [
"Apache-2.0"
] | null | null | null | from six.moves import urllib
from kiwi.store.tracking.file_store import FileStore
class PluginFileStore(FileStore):
"""FileStore provided through entrypoints system"""
def __init__(self, store_uri=None, artifact_uri=None):
path = urllib.parse.urlparse(store_uri).path if store_uri else None
self.is_plugin = True
super(PluginFileStore, self).__init__(path, artifact_uri)
| 31.307692 | 75 | 0.746929 | 321 | 0.788698 | 0 | 0 | 0 | 0 | 0 | 0 | 51 | 0.125307 |
e02820e74734d672d90f15bf093da7319a0c92ba | 12,815 | py | Python | tests/server/test_flask_api.py | YuhangCh/terracotta | 867ba5f7425fa88881f4c161d81cc7311f4f9c4e | [
"MIT"
] | null | null | null | tests/server/test_flask_api.py | YuhangCh/terracotta | 867ba5f7425fa88881f4c161d81cc7311f4f9c4e | [
"MIT"
] | null | null | null | tests/server/test_flask_api.py | YuhangCh/terracotta | 867ba5f7425fa88881f4c161d81cc7311f4f9c4e | [
"MIT"
] | null | null | null | from io import BytesIO
import json
import urllib.parse
from collections import OrderedDict
from PIL import Image
import numpy as np
import pytest
@pytest.fixture(scope='module')
def flask_app():
from terracotta.server import create_app
return create_app()
@pytest.fixture(scope='module')
def client(flask_app):
with flask_app.test_client() as client:
yield client
def test_get_keys(client, use_testdb):
rv = client.get('/keys')
expected_response = [
{'key': 'key1'},
{'key': 'akey'},
{'key': 'key2', 'description': 'key2'}
]
assert rv.status_code == 200
assert expected_response == json.loads(rv.data)['keys']
def test_get_metadata(client, use_testdb):
rv = client.get('/metadata/val11/x/val12/')
assert rv.status_code == 200
assert ['extra_data'] == json.loads(rv.data)['metadata']
def test_get_metadata_nonexisting(client, use_testdb):
rv = client.get('/metadata/val11/x/NONEXISTING/')
assert rv.status_code == 404
def test_get_datasets(client, use_testdb):
rv = client.get('/datasets')
assert rv.status_code == 200
datasets = json.loads(rv.data, object_pairs_hook=OrderedDict)['datasets']
assert len(datasets) == 4
assert OrderedDict([('key1', 'val11'), ('akey', 'x'), ('key2', 'val12')]) in datasets
def test_get_datasets_pagination(client, use_testdb):
# no page (implicit 0)
rv = client.get('/datasets?limit=2')
assert rv.status_code == 200
response = json.loads(rv.data, object_pairs_hook=OrderedDict)
assert response['limit'] == 2
assert response['page'] == 0
first_datasets = response['datasets']
assert len(first_datasets) == 2
assert OrderedDict([('key1', 'val11'), ('akey', 'x'), ('key2', 'val12')]) in first_datasets
# second page
rv = client.get('/datasets?limit=2&page=1')
assert rv.status_code == 200
response = json.loads(rv.data, object_pairs_hook=OrderedDict)
assert response['limit'] == 2
assert response['page'] == 1
last_datasets = response['datasets']
assert len(last_datasets) == 2
assert OrderedDict([('key1', 'val11'), ('akey', 'x'), ('key2', 'val12')]) not in last_datasets
# page out of range
rv = client.get('/datasets?limit=2&page=1000')
assert rv.status_code == 200
assert not json.loads(rv.data)['datasets']
# invalid page
rv = client.get('/datasets?page=-1')
assert rv.status_code == 400
# invalid limit
rv = client.get('/datasets?limit=-1')
assert rv.status_code == 400
def test_get_datasets_selective(client, use_testdb):
rv = client.get('/datasets?key1=val21')
assert rv.status_code == 200
assert len(json.loads(rv.data)['datasets']) == 3
rv = client.get('/datasets?key1=val21&key2=val23')
assert rv.status_code == 200
assert len(json.loads(rv.data)['datasets']) == 1
def test_get_datasets_unknown_key(client, use_testdb):
rv = client.get('/datasets?UNKNOWN=val21')
assert rv.status_code == 400
def test_get_singleband_greyscale(client, use_testdb, raster_file_xyz):
import terracotta
settings = terracotta.get_settings()
x, y, z = raster_file_xyz
rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png')
assert rv.status_code == 200
img = Image.open(BytesIO(rv.data))
assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE
def test_get_singleband_extra_args(client, use_testdb, raster_file_xyz):
import terracotta
settings = terracotta.get_settings()
x, y, z = raster_file_xyz
rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?foo=bar&baz=quz')
assert rv.status_code == 200
img = Image.open(BytesIO(rv.data))
assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE
def test_get_singleband_cmap(client, use_testdb, raster_file_xyz):
import terracotta
settings = terracotta.get_settings()
x, y, z = raster_file_xyz
rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=jet')
assert rv.status_code == 200
img = Image.open(BytesIO(rv.data))
assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE
def test_get_singleband_preview(client, use_testdb):
import terracotta
settings = terracotta.get_settings()
rv = client.get(f'/singleband/val11/x/val12/preview.png?colormap=jet')
assert rv.status_code == 200
img = Image.open(BytesIO(rv.data))
assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE
def urlsafe_json(payload):
payload_json = json.dumps(payload)
return urllib.parse.quote_plus(payload_json, safe=r',.[]{}:"')
def test_get_singleband_explicit_cmap(client, use_testdb, raster_file_xyz):
import terracotta
settings = terracotta.get_settings()
x, y, z = raster_file_xyz
explicit_cmap = {1: (0, 0, 0), 2.0: (255, 255, 255, 20), 3: '#ffffff', 4: 'abcabc'}
rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit'
f'&explicit_color_map={urlsafe_json(explicit_cmap)}')
assert rv.status_code == 200, rv.data.decode('utf-8')
img = Image.open(BytesIO(rv.data))
assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE
def test_get_singleband_explicit_cmap_invalid(client, use_testdb, raster_file_xyz):
x, y, z = raster_file_xyz
explicit_cmap = {1: (0, 0, 0), 2: (255, 255, 255), 3: '#ffffff', 4: 'abcabc'}
rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?'
f'explicit_color_map={urlsafe_json(explicit_cmap)}')
assert rv.status_code == 400
rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=jet'
f'&explicit_color_map={urlsafe_json(explicit_cmap)}')
assert rv.status_code == 400
rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit')
assert rv.status_code == 400
explicit_cmap[3] = 'omgomg'
rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit'
f'&explicit_color_map={urlsafe_json(explicit_cmap)}')
assert rv.status_code == 400
explicit_cmap = [(255, 255, 255)]
rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit'
f'&explicit_color_map={urlsafe_json(explicit_cmap)}')
assert rv.status_code == 400
rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=explicit'
f'&explicit_color_map=foo')
assert rv.status_code == 400
def test_get_singleband_stretch(client, use_testdb, raster_file_xyz):
import terracotta
settings = terracotta.get_settings()
x, y, z = raster_file_xyz
for stretch_range in ('[0,1]', '[0,null]', '[null, 1]', '[null,null]', 'null'):
rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?stretch_range={stretch_range}')
assert rv.status_code == 200
img = Image.open(BytesIO(rv.data))
assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE
def test_get_singleband_out_of_bounds(client, use_testdb):
import terracotta
settings = terracotta.get_settings()
x, y, z = (0, 0, 10)
rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png')
assert rv.status_code == 200
img = Image.open(BytesIO(rv.data))
assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE
assert np.all(np.asarray(img) == 0)
def test_get_singleband_unknown_cmap(client, use_testdb, raster_file_xyz):
x, y, z = raster_file_xyz
rv = client.get(f'/singleband/val11/x/val12/{z}/{x}/{y}.png?colormap=UNKNOWN')
assert rv.status_code == 400
def test_get_rgb(client, use_testdb, raster_file_xyz):
import terracotta
settings = terracotta.get_settings()
x, y, z = raster_file_xyz
rv = client.get(f'/rgb/val21/x/{z}/{x}/{y}.png?r=val22&g=val23&b=val24')
assert rv.status_code == 200
img = Image.open(BytesIO(rv.data))
assert np.asarray(img).shape == (*settings.DEFAULT_TILE_SIZE, 3)
def test_get_rgb_preview(client, use_testdb):
import terracotta
settings = terracotta.get_settings()
rv = client.get(f'/rgb/val21/x/preview.png?r=val22&g=val23&b=val24')
assert rv.status_code == 200
img = Image.open(BytesIO(rv.data))
assert np.asarray(img).shape == (*settings.DEFAULT_TILE_SIZE, 3)
def test_get_rgb_extra_args(client, use_testdb, raster_file_xyz):
import terracotta
settings = terracotta.get_settings()
x, y, z = raster_file_xyz
rv = client.get(f'/rgb/val21/x/{z}/{x}/{y}.png?r=val22&g=val23&b=val24&foo=bar&baz=quz')
assert rv.status_code == 200
img = Image.open(BytesIO(rv.data))
assert np.asarray(img).shape == (*settings.DEFAULT_TILE_SIZE, 3)
def test_get_rgb_stretch(client, use_testdb, raster_file_xyz):
import terracotta
settings = terracotta.get_settings()
x, y, z = raster_file_xyz
for stretch_range in ('[0,10000]', '[0,null]', '[null, 10000]', '[null,null]', 'null'):
rv = client.get(f'/rgb/val21/x/{z}/{x}/{y}.png?r=val22&g=val23&b=val24&'
f'r_range={stretch_range}&b_range={stretch_range}&g_range={stretch_range}')
assert rv.status_code == 200, rv.data
img = Image.open(BytesIO(rv.data))
assert np.asarray(img).shape == (*settings.DEFAULT_TILE_SIZE, 3)
def test_get_compute(client, use_testdb, raster_file_xyz):
import terracotta
settings = terracotta.get_settings()
# default tile size
x, y, z = raster_file_xyz
rv = client.get(
f'/compute/val21/x/{z}/{x}/{y}.png'
'?expression=v1*v2&v1=val22&v2=val23'
'&stretch_range=[0,10000]'
)
assert rv.status_code == 200
img = Image.open(BytesIO(rv.data))
assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE
# custom tile size
rv = client.get(
f'/compute/val21/x/{z}/{x}/{y}.png'
'?expression=v1*v2&v1=val22&v2=val23'
'&stretch_range=[0,10000]'
'&tile_size=[128,128]'
)
assert rv.status_code == 200
img = Image.open(BytesIO(rv.data))
assert np.asarray(img).shape == (128, 128)
def test_get_compute_preview(client, use_testdb):
import terracotta
settings = terracotta.get_settings()
rv = client.get(
f'/compute/val21/x/preview.png'
'?expression=v1*v2&v1=val22&v2=val23'
'&stretch_range=[0,10000]'
)
assert rv.status_code == 200
img = Image.open(BytesIO(rv.data))
assert np.asarray(img).shape == settings.DEFAULT_TILE_SIZE
def test_get_compute_invalid(client, use_testdb, raster_file_xyz):
x, y, z = raster_file_xyz
# too few keys
rv = client.get(
f'/compute/val21/{z}/{x}/{y}.png'
'?expression=v1*v2&v1=val22&v2=val23'
'&stretch_range=[0,10000]'
)
assert rv.status_code == 400
# invalid expression
rv = client.get(
f'/compute/val21/x/preview.png'
'?expression=__builtins__["dir"](v1)&v1=val22'
'&stretch_range=[0,10000]'
)
assert rv.status_code == 400
# no stretch range
rv = client.get(
f'/compute/val21/x/{z}/{x}/{y}.png'
'?expression=v1*v2&v1=val22&v2=val23'
)
assert rv.status_code == 400
# no expression
rv = client.get(
f'/compute/val21/x/{z}/{x}/{y}.png'
'?stretch_range=[0,10000)'
)
assert rv.status_code == 400
# missing operand
rv = client.get(
f'/compute/val21/x/{z}/{x}/{y}.png'
'?expression=v1*v2'
'&stretch_range=[0,10000)'
)
assert rv.status_code == 400
# invalid stretch range (syntax)
rv = client.get(
f'/compute/val21/x/{z}/{x}/{y}.png'
'?expression=v1*v2&v1=val22&v2=val23'
'&stretch_range=[0,10000)'
)
assert rv.status_code == 400
# invalid stretch range (value)
rv = client.get(
f'/compute/val21/x/{z}/{x}/{y}.png'
'?expression=v1*v2&v1=val22&v2=val23'
'&stretch_range=[10000,0]'
)
assert rv.status_code == 400
def test_get_colormap(client):
rv = client.get('/colormap?stretch_range=[0,1]&num_values=100')
assert rv.status_code == 200
assert len(json.loads(rv.data)['colormap']) == 100
def test_get_colormap_invalid(client):
rv = client.get('/colormap?stretch_range=[0,1')
assert rv.status_code == 400
def test_get_colormap_extra_args(client):
rv = client.get('/colormap?stretch_range=[0,1]&num_values=100&foo=bar&baz=quz')
assert rv.status_code == 200
assert len(json.loads(rv.data)['colormap']) == 100
def test_get_spec(client):
from terracotta import __version__
rv = client.get('/swagger.json')
assert rv.status_code == 200
assert json.loads(rv.data)
assert __version__ in rv.data.decode('utf-8')
rv = client.get('/apidoc')
assert rv.status_code == 200
assert b'Terracotta' in rv.data
| 30.731415 | 99 | 0.65595 | 0 | 0 | 87 | 0.006789 | 236 | 0.018416 | 0 | 0 | 3,504 | 0.27343 |
e02908fac191deeaa9eb04515ee51d7d466320c5 | 1,695 | py | Python | url.py | matthieucan/shorturl | a7f7fab61e8b23b352590797ca4959ed166c865e | [
"WTFPL"
] | 1 | 2018-10-19T01:57:29.000Z | 2018-10-19T01:57:29.000Z | url.py | matthieucan/shorturl | a7f7fab61e8b23b352590797ca4959ed166c865e | [
"WTFPL"
] | null | null | null | url.py | matthieucan/shorturl | a7f7fab61e8b23b352590797ca4959ed166c865e | [
"WTFPL"
] | null | null | null | def base_conv(n, input_base=10, output_base=10):
"""
Converts a number n from base input_base to base output_base.
The following symbols are used to represent numbers:
0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ
n can be an int if input_base <= 10, and a string otherwise.
The result will be a string.
"""
numbers = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
## base 10 conversion
n = str(n)
size = len(n)
baseten = 0
for i in range(size):
baseten += numbers.index(n[i]) * input_base ** (size - 1 - i)
## base output_base conversion
# we search the biggest number m such that n^m < x
max_power = 0
while output_base ** (max_power + 1) <= baseten:
max_power += 1
result = ""
for i in range(max_power + 1):
coeff = baseten / (output_base ** (max_power - i))
baseten -= coeff * (output_base ** (max_power - i))
result += numbers[coeff]
return result
if __name__ == "__main__":
assert(base_conv(10) == "10")
assert(base_conv(42) == "42")
assert(base_conv(5673576) == "5673576")
assert(base_conv(10, input_base=2) == "2")
assert(base_conv(101010, input_base=2) == "42")
assert(base_conv(43, input_base=10, output_base=2) == "101011")
assert(base_conv(256**3 - 1, input_base=10, output_base=16) == "ffffff")
assert(base_conv("d9bbb9d0ceabf", input_base=16, output_base=8) ==
"154673563503165277")
assert(base_conv("154673563503165277", input_base=8, output_base=10) ==
"3830404793297599")
assert(base_conv(0, input_base=3, output_base=50) == "0")
| 36.06383 | 78 | 0.640708 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 597 | 0.352212 |
e02960393a5a94bda69769c1a73c609b148e700d | 13,612 | py | Python | src/qtt/qiskit/passes.py | codecrap/qtt | 39a8bf21f7bcab94940a66f4d553a14bf34f82b0 | [
"MIT"
] | null | null | null | src/qtt/qiskit/passes.py | codecrap/qtt | 39a8bf21f7bcab94940a66f4d553a14bf34f82b0 | [
"MIT"
] | null | null | null | src/qtt/qiskit/passes.py | codecrap/qtt | 39a8bf21f7bcab94940a66f4d553a14bf34f82b0 | [
"MIT"
] | null | null | null | import logging
from typing import Dict, List, Optional
import numpy as np
import qiskit
from qiskit.circuit import Barrier, Delay, Reset
from qiskit.circuit.library import (CRXGate, CRYGate, CRZGate, CZGate,
PhaseGate, RXGate, RYGate, RZGate, U1Gate,
U2Gate, U3Gate, UGate)
from qiskit.circuit.library.standard_gates import (CU1Gate, RZZGate, SdgGate,
SGate, TdgGate, TGate,
ZGate)
from qiskit.circuit.quantumcircuit import QuantumCircuit
from qiskit.converters.circuit_to_dag import circuit_to_dag
from qiskit.dagcircuit import DAGCircuit
from qiskit.transpiler.basepasses import TransformationPass
logger = logging.getLogger(__name__)
class RemoveSmallRotations(TransformationPass):
"""Return a circuit with small rotation gates removed."""
def __init__(self, epsilon: float = 0, modulo2pi=False):
"""Remove all small rotations from a circuit
Args:
epsilon: Threshold for rotation angle to be removed
modulo2pi: If True, then rotations multiples of 2pi are removed as well
"""
super().__init__()
self.epsilon = epsilon
self._empty_dag1 = qiskit.converters.circuit_to_dag(QuantumCircuit(1))
self._empty_dag2 = qiskit.converters.circuit_to_dag(QuantumCircuit(2))
self.mod2pi = modulo2pi
def run(self, dag: DAGCircuit) -> DAGCircuit:
"""Run the pass on `dag`.
Args:
dag: input dag.
Returns:
Output dag with small rotations removed
"""
def modulo_2pi(x):
x = float(x)
return np.mod(x + np.pi, 2 * np.pi) - np.pi
for node in dag.op_nodes():
if isinstance(node.op, (PhaseGate, RXGate, RYGate, RZGate)):
if node.op.is_parameterized():
# for parameterized gates we do not optimize
pass
else:
phi = float(node.op.params[0])
if self.mod2pi:
phi = modulo_2pi(phi)
if np.abs(phi) <= self.epsilon:
dag.substitute_node_with_dag(node, self._empty_dag1)
elif isinstance(node.op, (CRXGate, CRYGate, CRZGate)):
if node.op.is_parameterized():
# for parameterized gates we do not optimize
pass
else:
phi = float(node.op.params[0])
if self.mod2pi:
phi = modulo_2pi(phi)
if np.abs(phi) <= self.epsilon:
dag.substitute_node_with_dag(node, self._empty_dag2)
return dag
class RemoveDiagonalGatesAfterInput(TransformationPass):
"""Remove diagonal gates (including diagonal 2Q gates) at the start of a circuit.
Transpiler pass to remove diagonal gates (like RZ, T, Z, etc) at the start of a circuit.
Including diagonal 2Q gates. Nodes after a reset are also included.
"""
def run(self, dag):
"""Run the RemoveDiagonalGatesBeforeMeasure pass on `dag`.
Args:
dag (DAGCircuit): the DAG to be optimized.
Returns:
DAGCircuit: the optimized DAG.
"""
diagonal_1q_gates = (RZGate, ZGate, TGate, SGate, TdgGate, SdgGate, U1Gate)
diagonal_2q_gates = (CZGate, CRZGate, CU1Gate, RZZGate)
nodes_to_remove = set()
for input_node in (dag.input_map.values()):
try:
successor = next(dag.quantum_successors(input_node))
except StopIteration:
continue
if successor.type == "op" and isinstance(successor.op, diagonal_1q_gates):
nodes_to_remove.add(successor)
def valid_predecessor(s):
""" Return True of node is valid predecessor for removal """
if s.type == 'in':
return True
if s.type == "op" and isinstance(s.op, Reset):
return True
return False
if successor.type == "op" and isinstance(successor.op, diagonal_2q_gates):
predecessors = dag.quantum_predecessors(successor)
if all(valid_predecessor(s) for s in predecessors):
nodes_to_remove.add(successor)
for node_to_remove in nodes_to_remove:
dag.remove_op_node(node_to_remove)
return dag
class DecomposeU(TransformationPass):
""" Decompose U gates into elementary rotations Rx, Ry, Rz
The U gates are decomposed using McKay decomposition.
"""
def __init__(self, verbose=0):
"""
Args:
"""
super().__init__()
self._subdags = []
self.verbose = verbose
self.initial_layout = None
def ugate_replacement_circuit(self, ugate):
qc = QuantumCircuit(1)
if isinstance(ugate, (U3Gate, UGate)):
theta, phi, lam = ugate.params
if theta == np.pi/2:
# a u2 gate
qc.rz(lam - np.pi / 2, 0)
qc.rx(np.pi / 2, 0)
qc.rz(phi + np.pi / 2, 0)
else:
# from https://arxiv.org/pdf/1707.03429.pdf
qc.rz(lam, 0)
qc.rx(np.pi / 2, 0)
qc.rz(theta + np.pi, 0)
qc.rx(np.pi / 2, 0)
qc.rz(phi + np.pi, 0)
elif isinstance(ugate, U2Gate):
phi, lam = ugate.params
qc.rz(lam - np.pi / 2, 0)
qc.rx(np.pi / 2, 0)
qc.rz(phi + np.pi / 2, 0)
elif isinstance(ugate, (U1Gate, PhaseGate)):
lam, = ugate.params
qc.rz(lam, 0)
else:
raise Exception(f'unknown gate type {ugate}')
return qc
def run(self, dag: DAGCircuit) -> DAGCircuit:
"""Run the Decompose pass on `dag`.
Args:
dag: input DAG.
Returns:
Output DAG where ``U`` gates have been decomposed.
"""
# Walk through the DAG and expand each node if required
for node in dag.op_nodes():
if isinstance(node.op, (PhaseGate, U1Gate, U2Gate, U3Gate, UGate)):
subdag = circuit_to_dag(self.ugate_replacement_circuit(node.op))
dag.substitute_node_with_dag(node, subdag)
return dag
class DecomposeCX(TransformationPass):
""" Decompose CX into CZ and single qubit rotations
"""
def __init__(self, mode: str = 'ry'):
"""
Args:
"""
super().__init__()
self._subdags: List = []
self.initial_layout = None
self.gate = qiskit.circuit.library.CXGate
self.decomposition = QuantumCircuit(2)
if mode == 'ry':
self.decomposition.ry(-np.pi / 2, 1)
self.decomposition.cz(0, 1)
self.decomposition.ry(np.pi / 2, 1)
else:
self.decomposition.h(1)
self.decomposition.cz(0, 1)
self.decomposition.h(1)
self._dag = circuit_to_dag(self.decomposition)
def run(self, dag: DAGCircuit) -> DAGCircuit:
"""Run the Decompose pass on `dag`.
Args:
dag: input dag.
Returns:
output dag where ``CX`` was expanded.
"""
# Walk through the DAG and expand each non-basis node
for node in dag.op_nodes(self.gate):
dag.substitute_node_with_dag(node, self._dag)
return dag
class SequentialPass(TransformationPass):
"""Adds barriers between gates to make the circuit sequential."""
def run(self, dag):
new_dag = DAGCircuit()
for qreg in dag.qregs.values():
new_dag.add_qreg(qreg)
for creg in dag.cregs.values():
new_dag.add_creg(creg)
for node in dag.op_nodes():
new_dag.apply_operation_back(node.op, node.qargs, node.cargs)
logger.info('SequentialPass: adding node {node.name}')
if node.name in ['barrier', 'measure']:
continue
new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), [])
return new_dag
class LinearTopologyParallelPass(TransformationPass):
"""Adds barriers to enforce a linear topology
The barrier are placed between gates such that no two qubit gates are executed
at the same time and only single qubit gates on non-neighboring qubits can
be executed in parallel. It assumes a linear topology."""
def run(self, dag):
new_dag = DAGCircuit()
for qreg in dag.qregs.values():
new_dag.add_qreg(qreg)
for creg in dag.cregs.values():
new_dag.add_creg(creg)
for ii, layer in enumerate(dag.layers()):
gates_1q = []
gates_2q = []
other_gates = []
for node in layer['graph'].op_nodes():
if len(node.qargs) == 2:
gates_2q.append(node)
elif len(node.qargs) == 1:
gates_1q.append(node)
else:
logging.info(f'layer {ii}: other type of node {node}')
other_gates.append(node)
even = []
odd = []
for node in gates_1q:
if node.qargs[0].index % 2 == 0:
even.append(node)
else:
odd.append(node)
logging.info(
f'layer {ii}: 2q gates {len(gates_2q)}, even {len(even)} odd {len(odd)}, other {len(other_gates)}')
if len(even) > 0:
for node in even:
new_dag.apply_operation_back(node.op, node.qargs, node.cargs)
if not isinstance(node.op, Barrier):
new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), [])
if len(odd) > 0:
for node in odd:
new_dag.apply_operation_back(node.op, node.qargs, node.cargs)
if not isinstance(node.op, Barrier):
new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), [])
for node in gates_2q:
new_dag.apply_operation_back(node.op, node.qargs, node.cargs)
if not isinstance(node.op, Barrier):
new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), [])
for node in other_gates:
new_dag.apply_operation_back(node.op, node.qargs, node.cargs)
if not isinstance(node.op, Barrier):
new_dag.apply_operation_back(Barrier(new_dag.num_qubits()), list(new_dag.qubits), [])
return new_dag
class DelayPass(TransformationPass):
"""Adds delay gates when the qubits are idle.
For every layer of the circuit it finds the gate that
lasts the longest and applies appropriate delays on the
other qubits.
"""
def __init__(self, gate_durations: Dict[str, float], delay_quantum: Optional[float] = None):
"""
Args:
gate_durations: Gate durations in the units of dt
"""
super().__init__()
self.gate_durations = gate_durations
self.delay_quantum = delay_quantum
def add_delay_to_dag(self, duration, dag, qargs, cargs):
if self.delay_quantum:
number_of_delays = int(duration/self.delay_quantum)
for ii in range(number_of_delays):
dag.apply_operation_back(Delay(self.delay_quantum), qargs, cargs)
else:
dag.apply_operation_back(Delay(duration), qargs, cargs)
@staticmethod
def _determine_delay_target_qubits(dag, layer):
""" Determine qubits in specified layer which require a delay gate """
partition = layer['partition']
lst = list(dag.qubits)
for el in partition:
for q in el:
if q in lst:
lst.remove(q)
return lst
def run(self, dag):
new_dag = DAGCircuit()
for qreg in dag.qregs.values():
new_dag.add_qreg(qreg)
for creg in dag.cregs.values():
new_dag.add_creg(creg)
for layer_idx, layer in enumerate(dag.layers()):
max_duration = 0
durations = {}
for node in layer['graph'].op_nodes():
if node.name in self.gate_durations:
max_duration = max(max_duration, self.gate_durations[node.name])
for q in node.qargs:
durations[q] = self.gate_durations[node.name]
else:
logger.info('layer {layer_idx}, could not find duration for node {node.name}')
new_dag.apply_operation_back(node.op, node.qargs, node.cargs)
partition = layer['partition']
if len(partition) == 0:
continue
lst = DelayPass._determine_delay_target_qubits(dag, layer)
logger.info(f'layer: {layer_idx}: lst {lst}, durations {durations}')
for el in lst:
logger.info(f'apply_operation_back: {[el]}')
self.add_delay_to_dag(max_duration, new_dag, [el], [])
for q in durations:
if max_duration - durations[q] > 0:
self.add_delay_to_dag(max_duration - durations[q], new_dag, [q], [])
return new_dag
| 36.591398 | 115 | 0.563914 | 12,778 | 0.938731 | 0 | 0 | 350 | 0.025713 | 0 | 0 | 2,881 | 0.211651 |
e029ad3e92c68df36a0c0c69723e696b156c5364 | 5,616 | py | Python | IAFNNESTA.py | JonathanAlis/IAFNNESTA | 6845bed7e41a162a60e65d709f37cf975c8c8a4e | [
"MIT"
] | 3 | 2021-05-13T05:51:42.000Z | 2022-02-06T13:36:52.000Z | IAFNNESTA.py | JonathanAlis/IAFNNESTA | 6845bed7e41a162a60e65d709f37cf975c8c8a4e | [
"MIT"
] | null | null | null | IAFNNESTA.py | JonathanAlis/IAFNNESTA | 6845bed7e41a162a60e65d709f37cf975c8c8a4e | [
"MIT"
] | 1 | 2022-02-06T13:36:39.000Z | 2022-02-06T13:36:39.000Z | def help():
return '''
Isotropic-Anisotropic Filtering Norm Nesterov Algorithm
Solves the filtering norm minimization + quadratic term problem
Nesterov algorithm, with continuation:
argmin_x || iaFN(x) ||_1/2 subjected to ||b - Ax||_2^2 < delta
If no filter is provided, solves the L1.
Continuation is performed by sequentially applying Nesterov's algorithm
with a decreasing sequence of values of mu0 >= mu >= muf
The observation matrix A must be a projector (non projector not implemented yet)
Inputs:
IAFNNESTA(b, #Observed data, a m x 1 array
A=identity,At=identity, # measurement matrix and adjoint (either a matrix, function handles)
muf=0.0001, #final mu value, smaller leads to higher accuracy
delta, #l2 error bound. This enforces how close the variable
#must fit the observations b, i.e. || y - Ax ||_2 <= delta
#If delta = 0, enforces y = Ax
#delta = sqrt(m + 2*sqrt(2*m))*sigma, where sigma=std(noise).
L1w=1,L2w=0, #weights of L1 (anisotropic) and L2(isotropic) norms
verbose=0, #whether to print internal steps
maxit=1000, #maximum iterations at the inner loop
x0=[], #initial solution, if not provided, will be At(b)
U=identity,Ut=identity, #Analysis/Synthesis operators
stopTest=1, #stopTest == 1 : stop when the relative change in the objective
function is less than TolVar
stopTest == 2 : stop with the l_infinity norm of difference in
the xk variable is less than TolVar
TolVar = 1e-5, #tolerance for the stopping criteria
AAtinv=[], #not implemented
normU=1, #if U is provided, this should be norm(U)
H=[],Ht=[]): #filter operations in sparse matrix form
#also accepts the string 'tv' as input,
#in that case, calculates the tv norm
Outputs:
return xk, #estimated x reconstructed signal
niter, #number of iterations
residuals #first column is the residual at every step,
#second column is the value of f_mu at every step
'''
import IAFNNesterov
import numpy as np
from scipy import sparse
import fil2mat
def identity(x):
return x
def IAFNNESTA(b,sig_size=0,A=identity,At=identity,muf=0.0001,delta=0,L1w=1,L2w=0,verbose=0,MaxIntIter=5,maxit=1000,x0=[],U=identity,Ut=identity,stopTest=1,TolVar = 1e-5,AAtinv=[],normU=1,H=[]):
if delta<0:
raise Exception('Delta must not be negative')
if not callable(A): #If not function
A=lambda x:np.matmul(A,x)
At=lambda x:np.matmul(np.transpose(A),x)
b=b.reshape((-1,1))
Atb=At(b)
if sig_size==0:
sig_size=Atb.shape
if callable(AAtinv):
AtAAtb = At( AAtinv(b) )
else:
if len(AAtinv)>0:
AAtinv=lambda x: np.matmul(AAtinv,x)
AtAAtb = At( AAtinv(b) )
else: #default
AtAAtb = Atb
AAtinv=identity
if len(x0)==0:
x0 = AtAAtb
if len(H)==0:
Hf=identity
Hft=identity
else:
if not sparse.issparse(H):
if isinstance(H, str):
if H=='tv':
hs=[]
hs.append(np.array([[1,-1]]))
hs.append(np.array([[1],[-1]]))
H,_,_,_=fil2mat.fil2mat(hs,sig_size)
else:
print('H not recognized. Must be a sparse matrix, a list of filters or the string tv')
else:
#list of filters:
H,_,_,_=fil2mat.fil2mat(H,sig_size)
#print(H.shape)
#print(H)
#print(type(H))
Ht=H.transpose()
Hf=lambda x: H@x
Hft=lambda x: Ht@x
HU=lambda x: Hf(U(x))
UtHt=lambda x: Ut(Hft(x))
typemin=''
if L1w>0:
typemin+="iso"
if L2w>0:
typemin+="aniso"
typemin+='tropic '
if callable(H):
typemin+='filtering norm '
mu0=0
if L1w>0:
mu0+=L1w*0.9*np.max(np.linalg.norm(HU(x0),1))
if L2w>0:
mu0+=L2w*0.9*np.max(np.linalg.norm(HU(x0),2))
niter = 0
Gamma = np.power(muf/mu0,1/MaxIntIter)
mu = mu0
Gammat= np.power(TolVar/0.1,1/MaxIntIter)
TolVar = 0.1
for i in range(MaxIntIter):
mu = mu*Gamma
TolVar=TolVar*Gammat;
if verbose>0:
#if k%verbose==0:
print("\tBeginning %s Minimization; mu = %g\n" %(typemin,mu))
xk,niter_int,res = IAFNNesterov.IAFNNesterov(b,A=A,At=At,mu=mu,delta=delta,L1w=L1w,L2w=L2w,verbose=verbose,maxit=maxit,x0=x0,U=U,Ut=Ut,stopTest=stopTest,TolVar = TolVar,AAtinv=AAtinv,normU=normU,H=Hf,Ht=Hft)
xplug = xk
niter = niter_int + niter
if i==0:
residuals=res
else:
residuals = np.vstack((residuals, res))
return xk.reshape(sig_size)
if __name__ == "__main__":
print(help())
| 35.770701 | 215 | 0.51834 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,982 | 0.530983 |
e029f3704209eae0d9983e10eec83eadf0c6a288 | 6,952 | py | Python | hypatia/util/__init__.py | pfw/hypatia | 407cd62e4817c85188aa6abdf204c5aaff5ab570 | [
"ZPL-2.1"
] | null | null | null | hypatia/util/__init__.py | pfw/hypatia | 407cd62e4817c85188aa6abdf204c5aaff5ab570 | [
"ZPL-2.1"
] | null | null | null | hypatia/util/__init__.py | pfw/hypatia | 407cd62e4817c85188aa6abdf204c5aaff5ab570 | [
"ZPL-2.1"
] | null | null | null | import itertools
import BTrees
from persistent import Persistent
from ZODB.broken import Broken
from zope.interface import implementer
_marker = object()
from .. import exc
from ..interfaces import (
IResultSet,
STABLE,
)
@implementer(IResultSet)
class ResultSet(object):
"""Implements :class:`hypatia.interfaces.IResultSet`"""
family = BTrees.family64
def __init__(self, ids, numids, resolver, sort_type=None):
self.ids = ids # only guaranteed to be iterable, not sliceable
self.numids = numids
self.resolver = resolver
self.sort_type = sort_type
def __len__(self):
return self.numids
def sort(
self, index, reverse=False, limit=None, sort_type=None, raise_unsortable=True
):
if sort_type is None:
sort_type = self.sort_type
ids = self.ids
if not hasattr(ids, "__len__"):
# indexes have no obligation to be able to sort generators
ids = list(ids)
self.ids = ids
ids = index.sort(
self.ids,
reverse=reverse,
limit=limit,
sort_type=sort_type,
raise_unsortable=raise_unsortable,
)
numids = self.numids
if limit:
numids = min(numids, limit)
return self.__class__(ids, numids, self.resolver, sort_type=STABLE)
def first(self, resolve=True):
# return the first object or None
resolver = self.resolver
if resolver is None or not resolve:
for id_ in self.ids:
# if self.ids is not a list or a tuple, allow this result set
# to be iterated after first() is called and allow first() to
# be idempotent
if not hasattr(self.ids, "__len__"):
self.ids = itertools.chain([id_], self.ids)
return id_
else:
for id_ in self.ids:
# if self.ids is not a list or a tuple, allow this result set
# to be iterated after first() is called and allow first() to
# be idempotent
if not hasattr(self.ids, "__len__"):
self.ids = itertools.chain([id_], self.ids)
return resolver(id_)
def one(self, resolve=True):
if self.numids == 1:
return self.first(resolve=resolve)
if self.numids > 1:
raise exc.MultipleResults(self)
else:
raise exc.NoResults(self)
def _resolve_all(self, resolver):
for id_ in self.ids:
yield resolver(id_)
def all(self, resolve=True):
resolver = self.resolver
if resolver is None or not resolve:
return self.ids
else:
return self._resolve_all(resolver)
def __iter__(self):
return iter(self.all())
def intersect(self, docids):
"""Intersect this resultset with a sequence of docids or
another resultset. Returns a new ResultSet."""
# NB: we can't use an intersection function here because
# self.ids may be a generator
if isinstance(docids, ResultSet):
docids = docids.ids
filtered_ids = [x for x in self.ids if x in docids]
return self.__class__(filtered_ids, len(filtered_ids), self.resolver)
class BaseIndexMixin(object):
"""Mixin class for indexes that implements common behavior"""
family = BTrees.family64
def discriminate(self, obj, default):
"""See interface IIndexInjection"""
if callable(self.discriminator):
value = self.discriminator(obj, _marker)
else:
value = getattr(obj, self.discriminator, _marker)
if value is _marker:
return default
if isinstance(value, Persistent):
raise ValueError("Catalog cannot index persistent object %s" % value)
if isinstance(value, Broken):
raise ValueError("Catalog cannot index broken object %s" % value)
return value
def reindex_doc(self, docid, obj):
"""See interface IIndexInjection"""
self.unindex_doc(docid)
self.index_doc(docid, obj)
def indexed_count(self):
"""See IIndexedDocuments"""
return len(self.indexed())
def not_indexed_count(self):
"""See IIndexedDocuments"""
return len(self.not_indexed())
def docids(self):
"""See IIndexedDocuments"""
not_indexed = self.not_indexed()
indexed = self.indexed()
if len(not_indexed) == 0:
return self.family.IF.Set(indexed)
elif len(indexed) == 0:
return not_indexed
indexed = self.family.IF.Set(indexed)
return self.family.IF.union(not_indexed, indexed)
def docids_count(self):
"""See IIndexedDocuments"""
return len(self.docids())
def apply_intersect(self, query, docids):
"""Default apply_intersect implementation"""
result = self.apply(query)
if docids is None:
return result
return self.family.IF.weightedIntersection(result, docids)[1]
def _negate(self, apply_func, *args, **kw):
positive = apply_func(*args, **kw)
all = self.docids()
if len(positive) == 0:
return all
return self.family.IF.difference(all, positive)
def qname(self):
# used in query representations; __name__ should be set by
# catalog __setitem__ but if it's not, we fall back to a generic
# representation
return getattr(
self,
"__name__",
str(self),
)
def resultset_from_query(self, query, names=None, resolver=None):
# default resultset factory; meant to be overridden by systems that
# have a default resolver. NB: although the default implementation
# below does not access "self", so it would appear that this could be
# turned into a classmeth or staticmethod, subclasses that override may
# expect self, so this is a plain method.
docids = query._apply(names)
numdocs = len(docids)
return ResultSet(docids, numdocs, resolver)
def flush(self, *arg, **kw):
"""Hookable by upstream systems"""
pass
class RichComparisonMixin(object):
# Stolen from http://www.voidspace.org.uk/python/recipebook.shtml#comparison
def __eq__(self, other):
raise NotImplementedError("Equality not implemented")
def __lt__(self, other):
raise NotImplementedError("Less than not implemented")
def __ne__(self, other):
return not self.__eq__(other)
def __gt__(self, other):
return not (self.__lt__(other) or self.__eq__(other))
def __le__(self, other):
return self.__eq__(other) or self.__lt__(other)
def __ge__(self, other):
return self.__eq__(other) or self.__gt__(other)
| 31.174888 | 85 | 0.60889 | 6,685 | 0.961594 | 94 | 0.013521 | 3,120 | 0.448792 | 0 | 0 | 1,682 | 0.241945 |
e02a89e62a53d61fc9086acef78dc03df26f1de7 | 2,140 | py | Python | backend/listings/migrations/0001_initial.py | relaxxpls/Music-Control | 76f5d10904f820607b3eb756850d5c5d7d89d875 | [
"MIT"
] | null | null | null | backend/listings/migrations/0001_initial.py | relaxxpls/Music-Control | 76f5d10904f820607b3eb756850d5c5d7d89d875 | [
"MIT"
] | null | null | null | backend/listings/migrations/0001_initial.py | relaxxpls/Music-Control | 76f5d10904f820607b3eb756850d5c5d7d89d875 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.3 on 2021-05-30 04:28
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('realtors', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Listing',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slug', models.CharField(max_length=200, unique=True)),
('title', models.CharField(max_length=150)),
('address', models.CharField(default='', max_length=150)),
('city', models.CharField(max_length=100)),
('state', models.CharField(max_length=100)),
('zipcode', models.CharField(max_length=15)),
('description', models.TextField(blank=True)),
('sale_type', models.CharField(choices=[('For Sale', 'For Sale'), ('For Rent', 'For Rent')], default='For Sale', max_length=50)),
('price', models.IntegerField()),
('bedrooms', models.IntegerField()),
('bathrooms', models.DecimalField(decimal_places=1, max_digits=2)),
('home_type', models.CharField(choices=[('House', 'House'), ('Condo', 'Condo'), ('Townhouse', 'Townhouse')], default='House', max_length=50)),
('sqft', models.IntegerField()),
('open_house', models.BooleanField(default=False)),
('photo_main', models.ImageField(upload_to='photos/%Y/%m/%d')),
('photo_1', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d')),
('photo_2', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d')),
('is_published', models.BooleanField(default=True)),
('list_date', models.DateTimeField(blank=True, default=django.utils.timezone.now)),
('realtor', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='realtors.realtor')),
],
),
]
| 48.636364 | 158 | 0.583645 | 1,985 | 0.92757 | 0 | 0 | 0 | 0 | 0 | 0 | 455 | 0.212617 |
e02ae313e5c6ccbda99f1c423609cc20c6a48485 | 483 | py | Python | examples/example_without_CommandSet/my_listeners.py | LeConstellationniste/DiscordFramework | 24d4b9b7cb0a21d3cec9d5362ab0828c5e15a3af | [
"CC0-1.0"
] | 1 | 2021-01-27T14:55:03.000Z | 2021-01-27T14:55:03.000Z | examples/example_without_CommandSet/my_listeners.py | LeConstellationniste/DiscordFramework | 24d4b9b7cb0a21d3cec9d5362ab0828c5e15a3af | [
"CC0-1.0"
] | null | null | null | examples/example_without_CommandSet/my_listeners.py | LeConstellationniste/DiscordFramework | 24d4b9b7cb0a21d3cec9d5362ab0828c5e15a3af | [
"CC0-1.0"
] | null | null | null | import asyncio
import discord
# Just with a function to add to the bot.
async def on_message(message):
if not message.author.bot:
await message.channel.send(f"{message.author.mention} a envoyé un message!")
# A Listener already created with the function
from discordEasy.objects import Listener
async def on_message(message):
if not message.author.bot:
await message.channel.send(f"{message.author.mention} a envoyé un message!")
listener_on_message = Listener(on_message) | 28.411765 | 78 | 0.784679 | 0 | 0 | 0 | 0 | 0 | 0 | 276 | 0.569072 | 185 | 0.381443 |
e02beca3eabc9ebe9a2e1d16196b54fbf1a8bc1b | 4,024 | py | Python | pianonet/serving/app.py | robgon-art/pianonet | 8d8a827bc8d310b8ce3f66259bbdf72648e9ca32 | [
"MIT"
] | 14 | 2020-09-01T11:16:28.000Z | 2021-05-02T18:04:21.000Z | pianonet/serving/app.py | robgon-art/pianonet | 8d8a827bc8d310b8ce3f66259bbdf72648e9ca32 | [
"MIT"
] | 5 | 2020-11-13T18:46:05.000Z | 2022-02-10T01:16:13.000Z | pianonet/serving/app.py | robgon-art/pianonet | 8d8a827bc8d310b8ce3f66259bbdf72648e9ca32 | [
"MIT"
] | 3 | 2020-09-02T15:05:00.000Z | 2021-05-02T18:04:24.000Z | import os
import random
from flask import Flask, request, send_from_directory
from werkzeug.utils import secure_filename
from pianonet.core.pianoroll import Pianoroll
from pianonet.model_inspection.performance_from_pianoroll import get_performance_from_pianoroll
app = Flask(__name__)
base_path = "/app/"
# base_path = "/Users/angsten/PycharmProjects/pianonet"
performances_path = os.path.join(base_path, 'data', 'performances')
def get_random_midi_file_name():
"""
Get a random midi file name that will not ever collide.
"""
return str(random.randint(0, 10000000000000000000)) + ".midi"
def get_performance_path(midi_file_name):
"""
Returns full path to performaqnce midi file given a file name.
"""
return os.path.join(performances_path, midi_file_name)
@app.route('/')
def alive():
return 'OK'
@app.route('/performances/', methods=['GET'])
def get_performance():
"""
Returns the requested performance as midi file.
Expected query string is 'midi_file_name', such as 1234.midi
"""
performance_midi_file_name = request.args.get('midi_file_name')
performance_midi_file_name = secure_filename(performance_midi_file_name)
print(performance_midi_file_name)
if performance_midi_file_name == None:
return {"http_code": 400, "code": "BadRequest", "message": "midi_file_name not found in request."}
midi_file_path = get_performance_path(performance_midi_file_name)
if not os.path.exists(midi_file_path):
return {
"http_code": 404,
"code": "Not Found",
"message": "midi_file " + performance_midi_file_name + " not found."
}
with open(midi_file_path, 'rb') as midi_file:
return send_from_directory(performances_path, performance_midi_file_name)
@app.route('/create-performance', methods=['POST'])
def performance():
"""
Expects post form data as follows:
seed_midi_file_data: Midi file that forms the seed for a performance as string encoding like "8,2,3,4,5..."
seconds_to_generate: Number of seconds of new notes to generate
model_complexity: Quality of model to use, one of ['low', 'medium', 'high', 'highest']
"""
seed_midi_file_data = request.form.get('seed_midi_file_data')
if seed_midi_file_data == None:
return {"http_code": 400, "code": "BadRequest", "message": "seed_midi_file_data not found in request."}
else:
seed_midi_file_int_array = [int(x) for x in seed_midi_file_data.split(',')]
frame = bytearray()
for i in seed_midi_file_int_array:
frame.append(i)
saved_seed_midi_file_path = os.path.join(base_path, 'data', 'seeds', get_random_midi_file_name())
with open(saved_seed_midi_file_path, 'wb') as midi_file:
midi_file.write(frame)
seconds_to_generate = request.form.get('seconds_to_generate')
if seconds_to_generate == None:
return {"http_code": 400, "code": "BadRequest", "message": "seconds_to_generate not found in request."}
else:
seconds_to_generate = float(seconds_to_generate)
model_complexity = request.form.get('model_complexity', 'low')
if model_complexity == 'low':
model_name = "micro_1"
else:
model_name = "r9p0_3500kparams_approx_9_blocks_model"
model_path = os.path.join(base_path, 'models', model_name)
input_pianoroll = Pianoroll(saved_seed_midi_file_path, use_custom_multitrack=True)
input_pianoroll.trim_silence_off_ends()
final_pianoroll = get_performance_from_pianoroll(
pianoroll_seed=input_pianoroll,
num_time_steps=int(48 * seconds_to_generate),
model_path=model_path,
)
midi_file_name = get_random_midi_file_name()
midi_file_path = get_performance_path(midi_file_name)
final_pianoroll.save_to_midi_file(midi_file_path)
return {"http_code": 200, "code": "Success", "message": "", "midi_file_name": midi_file_name}
if __name__ == '__main__':
app.run(host='0.0.0.0')
| 31.685039 | 115 | 0.706511 | 0 | 0 | 0 | 0 | 3,158 | 0.784791 | 0 | 0 | 1,293 | 0.321322 |
e02cef0666c1161f8f7f1e91555b80b350dae71e | 4,965 | py | Python | app.py | rafalbigaj/epidemic-model-visualization | 35829180b5a53697b336e8615d854a21b3395f59 | [
"Apache-2.0"
] | null | null | null | app.py | rafalbigaj/epidemic-model-visualization | 35829180b5a53697b336e8615d854a21b3395f59 | [
"Apache-2.0"
] | null | null | null | app.py | rafalbigaj/epidemic-model-visualization | 35829180b5a53697b336e8615d854a21b3395f59 | [
"Apache-2.0"
] | null | null | null | import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import logging
import json
import os
import pandas as pd
from datetime import datetime
from datetime import timedelta
from urllib import parse
import requests
logger = logging.getLogger(__name__)
external_stylesheets = [dbc.themes.DARKLY]
is_cf_instance = os.environ.get('CF_INSTANCE_GUID', '') != ''
port = int(os.environ.get('PORT', 8050))
host = os.environ.get('CF_INSTANCE_INTERNAL_IP', '127.0.0.1')
wml_api_key = os.environ['WML_API_KEY']
wml_scoring_url = os.environ['WML_SCORING_URL']
url = parse.urlparse(wml_scoring_url)
wml_base_url = url._replace(path='').geturl()
wml_instance_id = url.path.split('/')[3]
logger.setLevel(logging.INFO if is_cf_instance else logging.DEBUG)
logger.info('Starting %s server: %s:%d', 'CF' if is_cf_instance else 'local', host, port)
logger.info('WML URL: %s', wml_base_url)
logger.info('WML instance ID: %s', wml_instance_id)
wml_credentials = {
"apikey": wml_api_key,
"instance_id": wml_instance_id,
"url": wml_base_url,
}
iam_token_endpoint = 'https://iam.cloud.ibm.com/identity/token'
def _get_token():
data = {
'grant_type': 'urn:ibm:params:oauth:grant-type:apikey',
'apikey': wml_credentials['apikey']
}
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
response = requests.post(iam_token_endpoint, data=data, headers=headers)
return response.json()['access_token']
def score(token, algorithm, start_date, country, predict_range, s, i, r):
headers = {'Authorization': 'Bearer ' + token}
payload = {
"fields": ["algorithm", "start_date", "country", "predict_range", "S0", "I0", "R0"],
"values": [[algorithm, start_date.strftime('%-m/%-d/%y'), country, predict_range, s, i, r]]
}
logger.info('Scoring with payload: %s', json.dumps(payload))
response = requests.post(wml_scoring_url, json=payload, headers=headers)
if response.status_code == 200:
result = response.json()
else:
raise Exception('Scoring error [{}]: {}'.format(response.status_code, response.text))
n_days = len(result['values'])
index = [(start_date + timedelta(days=i)).strftime('%d/%m/%y') for i in range(n_days)]
return pd.DataFrame(result['values'], columns=result['fields'], index=index)
def serve_layout():
token = _get_token()
# predict_range = 14
# sir_result = score(token, 'SIR', datetime(2020, 3, 3), 'Poland', predict_range, 10_000, 20, 10)
# logistic_result = score(token, 'LOGISTIC', datetime(2020, 3, 3), 'Poland', predict_range, 10_000, 20, 10)
calibration_result = score(token, 'CALIBRATION', datetime(2020, 1, 22), 'Poland', 40, 10_000, 20, 10)
# days = list(sir_result.index)
days = list(calibration_result.index)
calibration_result['ActualChange'] = calibration_result['Actual'] - calibration_result['Actual'].shift(1, fill_value=0)
calibration_result['PredictedChange'] = calibration_result['Predicted'] - calibration_result['Predicted'].shift(1, fill_value=0)
fig = make_subplots(specs=[[{"secondary_y": True}]])
fig.add_trace(
go.Bar(x=days, y=calibration_result['PredictedChange'], name='Predicted Change', opacity=0.5),
secondary_y=True,
)
fig.add_trace(
go.Bar(x=days, y=calibration_result['ActualChange'], name='Actual Change', opacity=0.5),
secondary_y=True,
)
fig.add_trace(
go.Scatter(x=days, y=calibration_result['Predicted'], name='Calibration'),
secondary_y=False,
)
fig.add_trace(
go.Scatter(x=days, y=calibration_result['Actual'], name='Actual', mode="markers", marker=dict(size=8)),
secondary_y=False,
)
fig.update_layout(
title="Prediction of confirmed cases for Poland",
template="plotly_dark",
height=900
)
fig.update_xaxes(title_text="Date")
fig.update_yaxes(title_text="Total confirmed cases", secondary_y=False, range=[0, 6000])
fig.update_yaxes(title_text="New cases per day", secondary_y=True, range=[0, 1000])
# fig = go.Figure(
# data=[
# go.Scatter(x=days, y=sir_result['I'], name='SIR'),
# go.Scatter(x=days, y=logistic_result['I'], name='Logistic'),
# ],
# layout=go.Layout(
# title="COVID19 infected prediction in Poland",
# template="plotly_dark",
# height=600
# )
# )
return html.Div(children=[
html.H1(children='COVID-19 Predictions with Watson Machine Learning'),
dcc.Graph(
id='example-graph',
figure=fig
)
])
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = serve_layout
if __name__ == '__main__':
app.run_server(debug=(not is_cf_instance), port=port, host=host)
| 34.006849 | 132 | 0.67291 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,525 | 0.30715 |
e02d1d4df9c56883a92d8546af5497c549276afd | 1,106 | py | Python | src/sweetrpg_library_api/application/blueprints/systems/manager.py | paulyhedral/sweetrpg-library-api | 0105e963ef4321398aa66d7cb3aa9c2df1c4f375 | [
"MIT"
] | null | null | null | src/sweetrpg_library_api/application/blueprints/systems/manager.py | paulyhedral/sweetrpg-library-api | 0105e963ef4321398aa66d7cb3aa9c2df1c4f375 | [
"MIT"
] | 33 | 2021-09-18T23:52:05.000Z | 2022-03-30T12:25:49.000Z | src/sweetrpg_library_api/application/blueprints/systems/manager.py | sweetrpg/library-api | 0105e963ef4321398aa66d7cb3aa9c2df1c4f375 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
__author__ = "Paul Schifferer <[email protected]>"
"""
"""
from flask_rest_jsonapi import ResourceList, ResourceDetail, ResourceRelationship
from sweetrpg_library_objects.api.system.schema import SystemAPISchema
from sweetrpg_api_core.data import APIData
from sweetrpg_library_objects.model.system import System
from sweetrpg_library_api.application.db import db
from sweetrpg_library_api.application.blueprints.setup import model_info
class SystemList(ResourceList):
schema = SystemAPISchema
data_layer = {"class": APIData, "type": "system", "model": System, "db": db, "model_info": model_info}
class SystemDetail(ResourceDetail):
schema = SystemAPISchema
data_layer = {
"class": APIData,
"type": "system",
"model": System,
"db": db,
"model_info": model_info
}
# class SystemAuthorRelationship(ResourceRelationship):
# schema = SystemAPISchema
# data_layer = {
# "class": APIData,
# "type": "system",
# "model": System,
# "db": db,
# "model_info": model_info
# }
| 28.358974 | 106 | 0.683544 | 384 | 0.347197 | 0 | 0 | 0 | 0 | 0 | 0 | 398 | 0.359855 |
e02e416aacee98cfdb91a5328f2267836f5a1229 | 6,862 | py | Python | tests/test_ordering.py | deepio-oc/pabot | ebf1894c6d35b2ddd5c4bca01bceb25189358106 | [
"Apache-2.0"
] | 379 | 2015-02-02T17:47:45.000Z | 2022-03-20T16:51:05.000Z | tests/test_ordering.py | deepio-oc/pabot | ebf1894c6d35b2ddd5c4bca01bceb25189358106 | [
"Apache-2.0"
] | 406 | 2015-02-12T07:41:53.000Z | 2022-03-28T23:35:32.000Z | tests/test_ordering.py | deepio-oc/pabot | ebf1894c6d35b2ddd5c4bca01bceb25189358106 | [
"Apache-2.0"
] | 159 | 2015-01-16T13:42:20.000Z | 2022-03-30T19:48:15.000Z | from robot import __version__ as ROBOT_VERSION
import sys
import tempfile
import textwrap
import unittest
import shutil
import subprocess
class PabotOrderingGroupTest(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def _run_tests_with(self, testfile, orderfile):
robot_file = open("{}/test.robot".format(self.tmpdir), "w")
robot_file.write(textwrap.dedent(testfile))
robot_file.close()
with open("{}/order.dat".format(self.tmpdir), "w") as f:
f.write(textwrap.dedent(orderfile))
process = subprocess.Popen(
[
sys.executable,
"-m" "pabot.pabot",
"--testlevelsplit",
"--ordering",
"{}/order.dat".format(self.tmpdir),
"{}/test.robot".format(self.tmpdir),
],
cwd=self.tmpdir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
return process.communicate()
def test_orders(self):
stdout, stderr = self._run_tests_with(
"""
*** Variables ***
${SCALAR} Hello, globe!
*** Test Cases ***
First Test
Set Suite Variable ${SCALAR} Hello, world!
Second Test
Should Be Equal ${SCALAR} Hello, world!
Third Test
Should Be Equal ${SCALAR} Hello, globe!
""",
"""
{
--test Test.First Test
--test Test.Second Test
}
--test Test.Third Test
""",
)
if sys.version_info < (3, 0):
self.assertIn("PASSED", stdout, stderr)
self.assertNotIn("FAILED", stdout, stderr)
self.assertEqual(stdout.count("PASSED"), 2)
else:
self.assertIn(b"PASSED", stdout, stderr)
self.assertNotIn(b"FAILED", stdout, stderr)
self.assertEqual(stdout.count(b"PASSED"), 2)
def test_two_orders(self):
stdout, stderr = self._run_tests_with(
"""
*** Variables ***
${SCALAR} Hello, globe!
*** Test Cases ***
First Test
Set Suite Variable ${SCALAR} Hello, world!
Second Test
Should Be Equal ${SCALAR} Hello, world!
Second And Quarter
Should Be Equal ${SCALAR} Hello, globe!
Second And Half
Should Be Equal ${SCALAR} Hello, globe!
Third Test
Should Be Equal ${SCALAR} Hello, globe!
""",
"""
{
--test Test.First Test
--test Test.Second Test
}
{
--test Test.Second And Quarter
--test Test.Second And Half
}
--test Test.Third Test
""",
)
if sys.version_info < (3, 0):
self.assertIn("PASSED", stdout, stderr)
self.assertNotIn("FAILED", stdout, stderr)
if ROBOT_VERSION < "4.0":
expected_write = "5 critical tests, 5 passed, 0 failed"
else:
expected_write = "5 tests, 5 passed, 0 failed, 0 skipped."
self.assertIn(expected_write, stdout, stderr)
self.assertEqual(stdout.count("PASSED"), 3)
else:
self.assertIn(b"PASSED", stdout, stderr)
self.assertNotIn(b"FAILED", stdout, stderr)
if ROBOT_VERSION < "4.0":
expected_write = b"5 critical tests, 5 passed, 0 failed"
else:
expected_write = b"5 tests, 5 passed, 0 failed, 0 skipped."
self.assertIn(expected_write, stdout, stderr)
self.assertEqual(stdout.count(b"PASSED"), 3)
def test_too_big_testname(self):
stdout, stderr = self._run_tests_with(
"""
*** Test Cases ***
Test Lorem ipsum dolor sit amet, consectetur adipiscing elit. Mauris eu velit nunc. Duis eget purus eget orci porta blandit sed ut tortor. Nunc vel nulla bibendum, auctor sem ac, molestie risus. Sed eu metus volutpat, hendrerit nibh in, auctor urna. Nunc a sodales.
Log Test
""",
"""
--test Invalid
""",
)
if sys.version_info < (3, 0):
self.assertIn("PASSED", stdout, stderr)
self.assertNotIn("FAILED", stdout, stderr)
self.assertEqual(stdout.count("PASSED"), 1)
else:
self.assertIn(b"PASSED", stdout, stderr)
self.assertNotIn(b"FAILED", stdout, stderr)
self.assertEqual(stdout.count(b"PASSED"), 1)
def test_longnames_in_tests(self):
stdout, stderr = self._run_tests_with(
"""
*** Settings ***
Test Template Test1
*** Test Cases ***
The Somewhat Long Name Of The Test S1Test 01 1
The Somewhat Long Name Of The Test S1Test 02 1
The Somewhat Long Name Of The Test S1Test 03 1
The Somewhat Long Name Of The Test S1Test 04 1
The Somewhat Long Name Of The Test S1Test 05 1
The Somewhat Long Name Of The Test S1Test 06 1
The Somewhat Long Name Of The Test S1Test 07 1
The Somewhat Long Name Of The Test S1Test 08 1
The Somewhat Long Name Of The Test S1Test 09 1
The Somewhat Long Name Of The Test S1Test 10 1
The Somewhat Long Name Of The Test S1Test 11 1
The Somewhat Long Name Of The Test S1Test 12 1
*** Keywords ***
Test1
[Arguments] ${arg}
Log Test
""",
"""
{
--test Test.The Somewhat Long Name Of The Test S1Test 01
--test Test.The Somewhat Long Name Of The Test S1Test 02
--test Test.The Somewhat Long Name Of The Test S1Test 03
--test Test.The Somewhat Long Name Of The Test S1Test 04
--test Test.The Somewhat Long Name Of The Test S1Test 05
--test Test.The Somewhat Long Name Of The Test S1Test 06
}
{
--test Test.The Somewhat Long Name Of The Test S1Test 07
--test Test.The Somewhat Long Name Of The Test S1Test 08
--test Test.The Somewhat Long Name Of The Test S1Test 09
--test Test.The Somewhat Long Name Of The Test S1Test 10
--test Test.The Somewhat Long Name Of The Test S1Test 11
--test Test.The Somewhat Long Name Of The Test S1Test 12
}
""",
)
if sys.version_info < (3, 0):
self.assertIn("PASSED", stdout, stderr)
self.assertNotIn("FAILED", stdout, stderr)
self.assertEqual(stdout.count("PASSED"), 2)
else:
self.assertIn(b"PASSED", stdout, stderr)
self.assertNotIn(b"FAILED", stdout, stderr)
self.assertEqual(stdout.count(b"PASSED"), 2)
| 35.189744 | 273 | 0.557418 | 6,721 | 0.979452 | 0 | 0 | 0 | 0 | 0 | 0 | 3,745 | 0.545759 |
e02e814aa08f31a0fd4f302fa151aca0b7af7756 | 984 | py | Python | setup.py | Commonists/pageview-api | 39e8b3c3c82f64a500e3dd4f306451c81c7e31b7 | [
"MIT"
] | 21 | 2015-12-02T12:06:38.000Z | 2022-02-11T16:16:06.000Z | setup.py | Commonists/pageview-api | 39e8b3c3c82f64a500e3dd4f306451c81c7e31b7 | [
"MIT"
] | 3 | 2016-04-19T19:56:25.000Z | 2020-08-27T09:52:42.000Z | setup.py | Commonists/pageview-api | 39e8b3c3c82f64a500e3dd4f306451c81c7e31b7 | [
"MIT"
] | 6 | 2017-10-27T15:39:51.000Z | 2020-12-17T02:11:52.000Z | #!/usr/bin/python
# -*- coding: latin-1 -*-
"""Setup script."""
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
try:
import pageviewapi
version = pageviewapi.__version__
except ImportError:
version = 'Undefined'
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities'
]
packages = ['pageviewapi']
requires = ['requests', 'attrdict']
setup(
name='pageviewapi',
version=version,
author='Commonists',
author_email='[email protected]',
url='http://github.com/Commonists/pageview-api',
description='Wikimedia Pageview API client',
long_description=open('README.md').read(),
license='MIT',
packages=packages,
install_requires=requires,
classifiers=classifiers
)
| 22.883721 | 52 | 0.670732 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 457 | 0.464431 |
e030b341c624d43cef697abc742e82664391c682 | 416 | py | Python | task1b.py | juby-gif/assignment1 | 3d39478fdc371e80a546caac545561145afbb080 | [
"BSD-3-Clause"
] | null | null | null | task1b.py | juby-gif/assignment1 | 3d39478fdc371e80a546caac545561145afbb080 | [
"BSD-3-Clause"
] | null | null | null | task1b.py | juby-gif/assignment1 | 3d39478fdc371e80a546caac545561145afbb080 | [
"BSD-3-Clause"
] | null | null | null | #a2_t1b.py
#This program is to convert Celsius to Kelvin
def c_to_k(c):
k = c + 273.15 #Formula to convert Celsius to Kelvin
return k
def f_to_c(f):
fa = (f-32) * 5/9 #Formula to convert Fareheit to Celsius
return fa
c = 25.0
f = 100.0
k = c_to_k(c)
fa = f_to_c(f)
print("Celsius of " + str(c) + " is " + str(k) + " in Kelvin")
print("Farenheit of " + str(f) + " is " + str(fa) + " in Celsius")
| 24.470588 | 67 | 0.605769 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 198 | 0.475962 |
e0315471bd1a35e31c6a9cdd93a2a2a27365d479 | 2,702 | py | Python | TWLight/emails/views.py | jajodiaraghav/TWLight | 22359ab0b95ee3653e8ffa0eb698acd7bb8ebf70 | [
"MIT"
] | 1 | 2019-10-24T04:49:52.000Z | 2019-10-24T04:49:52.000Z | TWLight/emails/views.py | jajodiaraghav/TWLight | 22359ab0b95ee3653e8ffa0eb698acd7bb8ebf70 | [
"MIT"
] | 1 | 2019-03-29T15:29:45.000Z | 2019-03-29T15:57:20.000Z | TWLight/emails/views.py | jajodiaraghav/TWLight | 22359ab0b95ee3653e8ffa0eb698acd7bb8ebf70 | [
"MIT"
] | 1 | 2019-09-26T14:40:27.000Z | 2019-09-26T14:40:27.000Z | from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse, reverse_lazy
from django.core.mail import BadHeaderError, send_mail
from django.http import HttpResponse, HttpResponseRedirect
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext_lazy as _
from django.views.generic.edit import FormView
from TWLight.emails.forms import ContactUsForm
from TWLight.emails.signals import ContactUs
@method_decorator(login_required, name='post')
class ContactUsView(FormView):
template_name = 'emails/contact.html'
form_class = ContactUsForm
success_url = reverse_lazy('contact')
def get_initial(self):
initial = super(ContactUsView, self).get_initial()
# @TODO: This sort of gets repeated in ContactUsForm.
# We could probably be factored out to a common place for DRYness.
if self.request.user.is_authenticated():
if self.request.user.email:
initial.update({
'email': self.request.user.email,
})
if ('message' in self.request.GET):
initial.update({
'message': self.request.GET['message'],
})
initial.update({
'next': reverse_lazy('contact'),
})
return initial
def form_valid(self, form):
# Adding an extra check to ensure the user is a wikipedia editor.
try:
assert self.request.user.editor
email = form.cleaned_data['email']
message = form.cleaned_data['message']
carbon_copy = form.cleaned_data['cc']
ContactUs.new_email.send(
sender=self.__class__,
user_email=email,
cc=carbon_copy,
editor_wp_username=self.request.user.editor.wp_username,
body=message
)
messages.add_message(self.request, messages.SUCCESS,
# Translators: Shown to users when they successfully submit a new message using the contact us form.
_('Your message has been sent. We\'ll get back to you soon!'))
return HttpResponseRedirect(reverse('contact'))
except (AssertionError, AttributeError) as e:
messages.add_message (self.request, messages.WARNING,
# Translators: This message is shown to non-wikipedia editors who attempt to post data to the contact us form.
_('You must be a Wikipedia editor to do that.'))
raise PermissionDenied
return self.request.user.editor | 43.580645 | 126 | 0.657661 | 2,086 | 0.772021 | 0 | 0 | 2,133 | 0.789415 | 0 | 0 | 610 | 0.225759 |
e0322ebc94878f3dc7b69955feb764a97d3db29b | 1,997 | py | Python | frontend/config.py | lcbm/cs-data-ingestion | 314525285bfefe726d86c232937b05d273e44e7f | [
"0BSD"
] | null | null | null | frontend/config.py | lcbm/cs-data-ingestion | 314525285bfefe726d86c232937b05d273e44e7f | [
"0BSD"
] | null | null | null | frontend/config.py | lcbm/cs-data-ingestion | 314525285bfefe726d86c232937b05d273e44e7f | [
"0BSD"
] | null | null | null | """Flask App configuration file."""
import logging
import os
import dotenv
import frontend.constants as constants
dotenv.load_dotenv(os.path.join(constants.BASEDIR, "frontend.env"))
class Base:
"""Configuration class used as base for all environments."""
DEBUG = False
TESTING = False
LOGGING_FORMAT = "[%(asctime)s] %(levelname)s in %(message)s"
LOGGING_LOCATION = "frontend.log"
LOGGING_LEVEL = os.environ.get("LOGGING_LEVEL", logging.DEBUG)
class Development(Base):
"""Configuration class for development environment.
Parameters
----------
Base: base configuration object.
"""
DEBUG = True
TESTING = False
ENV = "dev"
class Staging(Base):
"""Configuration class for development staging environment.
Parameters
----------
Base: base configuration object.
"""
DEBUG = False
TESTING = True
ENV = "staging"
class Production(Base):
"""Configuration class for development production environment.
Parameters
----------
Base: base configuration object.
"""
DEBUG = False
TESTING = False
ENV = "prod"
config = {
"development": "frontend.config.Development",
"staging": "frontend.config.Staging",
"production": "frontend.config.Production",
"default": "frontend.config.Development",
}
def configure_app(app):
"""Configures the Flask app according to the FLASK_ENV
envar. In case FLASK_ENV is not defined, then use the
'default' configuration.
Parameters
----------
app: flask.Flask
Flask app Module.
"""
# Configure app
config_name = os.environ.get("FLASK_ENV", "default")
app.config.from_object(config[config_name])
# Configure logging
handler = logging.FileHandler(app.config["LOGGING_LOCATION"])
handler.setLevel(app.config["LOGGING_LEVEL"])
formatter = logging.Formatter(app.config["LOGGING_FORMAT"])
handler.setFormatter(formatter)
app.logger.addHandler(handler)
| 21.473118 | 67 | 0.667001 | 937 | 0.469204 | 0 | 0 | 0 | 0 | 0 | 0 | 1,086 | 0.543816 |
e032bc66a6f5b0a211c59ba883502067921d3427 | 2,961 | py | Python | tests/test_dsl.py | goodreferences/ElasticQuery | 579e387c5a7c1cbbeab999050c0d2faa80ded821 | [
"MIT"
] | null | null | null | tests/test_dsl.py | goodreferences/ElasticQuery | 579e387c5a7c1cbbeab999050c0d2faa80ded821 | [
"MIT"
] | null | null | null | tests/test_dsl.py | goodreferences/ElasticQuery | 579e387c5a7c1cbbeab999050c0d2faa80ded821 | [
"MIT"
] | null | null | null | # ElasticQuery
# File: tests/test_dsl.py
# Desc: tests for ElasticQuery DSL objects (Filter, Query, Aggregate)
from os import path
from unittest import TestCase
from jsontest import JsonTest
from elasticquery import Query, Aggregate, Suggester
from elasticquery.exceptions import (
NoQueryError, NoAggregateError, NoSuggesterError,
MissingArgError
)
from .util import assert_equal
CLASS_NAMES = {
'_query': Query
}
def _test_query(self, query, test_name, test_data):
method = getattr(query, test_name)
def parse_arg(arg):
if isinstance(arg, list):
return [parse_arg(a) for a in arg]
else:
return (
CLASS_NAMES[arg](arg, {})
if (isinstance(arg, basestring) and arg.startswith('_'))
else arg
)
args = test_data.get('args', [])
args = parse_arg(args)
kwargs = test_data.get('kwargs', {})
kwargs = {
k: parse_arg(v) if isinstance(v, list) else parse_arg(v)
for k, v in kwargs.iteritems()
}
output = method(*args, **kwargs).dict()
assert_equal(self, output, test_data['output'])
class TestQueries(TestCase):
__metaclass__ = JsonTest
jsontest_files = path.join('tests', 'queries')
jsontest_function = lambda self, test_name, test_data: (
_test_query(self, Query, test_name, test_data)
)
class TestAggregates(TestCase):
__metaclass__ = JsonTest
jsontest_files = path.join('tests', 'aggregates')
jsontest_function = lambda self, test_name, test_data: (
_test_query(self, Aggregate, test_name, test_data)
)
class TestSuggesters(TestCase):
__metaclass__ = JsonTest
jsontest_files = path.join('tests', 'suggesters')
jsontest_function = lambda self, test_name, test_data: (
_test_query(self, Suggester, test_name, test_data)
)
class TestFails(TestCase):
def test_no_query(self):
with self.assertRaises(NoQueryError):
Query.doesnotexist()
def test_no_aggregate(self):
with self.assertRaises(NoAggregateError):
Aggregate.doesnotexist()
def test_no_suggester(self):
with self.assertRaises(NoSuggesterError):
Suggester.doesnotexist()
def test_missing_arg(self):
with self.assertRaises(MissingArgError):
Query.term(None)
def test_invalid_arg(self):
# Test passing not a list
with self.assertRaises(ValueError):
Query.bool(must=set())
# And now an invalid list
with self.assertRaises(ValueError):
Query.bool(must=[None])
# And now an invalid list
with self.assertRaises(ValueError):
Query.bool(must=[Aggregate.terms('test', 'test')])
# And now an invalid list
with self.assertRaises(ValueError):
Query.range('field', gte=['error'])
# Empty list should be OK/ignored
Query.bool(must=[])
| 26.675676 | 72 | 0.646066 | 1,801 | 0.60824 | 0 | 0 | 0 | 0 | 0 | 0 | 354 | 0.119554 |
e033806ab7ea22ebae1fd718d44b4fe732b6c01d | 360 | py | Python | models/SelectionGAN/person_transfer/tool/rm_insnorm_running_vars.py | xianjian-xie/pose-generation | ad0495e80c6fe1e7690fa8691f1eb11b4e9bca32 | [
"MIT"
] | 445 | 2019-04-14T17:48:11.000Z | 2022-03-20T11:53:30.000Z | models/SelectionGAN/person_transfer/tool/rm_insnorm_running_vars.py | xianjian-xie/pose-generation | ad0495e80c6fe1e7690fa8691f1eb11b4e9bca32 | [
"MIT"
] | 17 | 2019-06-03T11:34:22.000Z | 2022-02-28T01:26:13.000Z | models/SelectionGAN/person_transfer/tool/rm_insnorm_running_vars.py | xianjian-xie/pose-generation | ad0495e80c6fe1e7690fa8691f1eb11b4e9bca32 | [
"MIT"
] | 71 | 2019-04-16T01:55:39.000Z | 2022-03-22T05:09:59.000Z | import torch
ckp_path = './checkpoints/fashion_PATN/latest_net_netG.pth'
save_path = './checkpoints/fashion_PATN_v1.0/latest_net_netG.pth'
states_dict = torch.load(ckp_path)
states_dict_new = states_dict.copy()
for key in states_dict.keys():
if "running_var" in key or "running_mean" in key:
del states_dict_new[key]
torch.save(states_dict_new, save_path) | 32.727273 | 65 | 0.794444 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 128 | 0.355556 |
e03555c89ef682c9881524e84b3f99fb40c60411 | 3,916 | py | Python | script/dummy/arm_control.py | amazon-picking-challenge/team_pfn | 2f76524b067d816d8407f6c4fae4e6d33939c024 | [
"Apache-2.0"
] | 7 | 2016-09-04T02:07:04.000Z | 2017-05-25T02:31:07.000Z | script/dummy/arm_control.py | amazon-picking-challenge/team_pfn | 2f76524b067d816d8407f6c4fae4e6d33939c024 | [
"Apache-2.0"
] | null | null | null | script/dummy/arm_control.py | amazon-picking-challenge/team_pfn | 2f76524b067d816d8407f6c4fae4e6d33939c024 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# Copyright 2016 Preferred Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy
import rospy
import actionlib
from geometry_msgs.msg import Twist, Vector3
from apc2016.msg import *
class DummyArmControl(object):
def __init__(self):
self.srv_lowlevel_left = \
actionlib.SimpleActionServer('move_to_left',
RobotArmMoveAction,
execute_cb=self.cb_move_to_left,
auto_start=False)
self.srv_highlevel_left = \
actionlib.SimpleActionServer('move_to_bin_left',
BinToteMoveAction,
execute_cb=self.cb_move_to_bin_left,
auto_start=False)
self.srv_lowlevel_right = \
actionlib.SimpleActionServer('move_to_right',
RobotArmMoveAction,
execute_cb=self.cb_move_to_right,
auto_start=False)
self.srv_highlevel_right = \
actionlib.SimpleActionServer('move_to_bin_right',
BinToteMoveAction,
execute_cb=self.cb_move_to_bin_right,
auto_start=False)
self.srv_lowlevel_left.start()
self.srv_highlevel_left.start()
self.srv_lowlevel_right.start()
self.srv_highlevel_right.start()
def cb_move_to_left(self, goal):
print "moving away right arm, then moving left arm:"
print goal.target_position
result = RobotArmMoveResult(success=True,
position=goal.target_position)
self.srv_lowlevel_left.set_succeeded(result)
def cb_move_to_bin_left(self, goal):
if goal.position:
pos = goal.position
else:
pos = "photo"
print "looking up position for %s/%s" % (goal.bin, pos)
pos = numpy.asarray([550, -146, 752, 181, 0, 180])
p = Vector3(pos[0], pos[1], pos[2])
r = Vector3(pos[3], pos[4], pos[5])
print "moving away right arm, then moving left arm"
result = BinToteMoveResult(success=True, position=Twist(p, r))
self.srv_highlevel_left.set_succeeded(result)
def cb_move_to_right(self, goal):
print "moving away left arm, then moving right arm:"
print goal.target_position
result = RobotArmMoveResult(success=True,
position=goal.target_position)
self.srv_lowlevel_right.set_succeeded(result)
def cb_move_to_bin_right(self, goal):
if goal.position:
pos = goal.position
else:
pos = "photo"
print "looking up position for %s/%s" % (goal.bin, pos)
pos = numpy.asarray([550, -146, 752, 184, 0, 180])
p = Vector3(pos[0], pos[1], pos[2])
r = Vector3(pos[3], pos[4], pos[5])
print "moving away left arm, then moving right arm"
result = BinToteMoveResult(success=True, position=Twist(p, r))
self.srv_highlevel_right.set_succeeded(result)
if __name__ == '__main__':
rospy.init_node("arm_control_dummy", anonymous=True)
DummyArmControl()
rospy.spin()
| 39.959184 | 78 | 0.589122 | 3,064 | 0.782431 | 0 | 0 | 0 | 0 | 0 | 0 | 945 | 0.241318 |
e035deed8737a8c4ccc24d990b915152d4728210 | 3,115 | py | Python | cogs/events.py | rompdodger/RompDodger | 9c8b481d9f69e05c15f01271f6c18e09ab2723e6 | [
"MIT"
] | null | null | null | cogs/events.py | rompdodger/RompDodger | 9c8b481d9f69e05c15f01271f6c18e09ab2723e6 | [
"MIT"
] | null | null | null | cogs/events.py | rompdodger/RompDodger | 9c8b481d9f69e05c15f01271f6c18e09ab2723e6 | [
"MIT"
] | null | null | null | import json
import discord
from utils.time import format_time
from utils import utilities
from discord.ext import commands
from discord import Embed
class Events(commands.Cog):
"""Event Handler for RompDodger"""
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
if hasattr(ctx.command, 'on_error'):
return
if isinstance(error, (commands.CommandNotFound, commands.NoPrivateMessage)):
return
elif isinstance(error, commands.MissingRequiredArgument):
await ctx.send(embed=await utilities.generate_embed(f"Command {ctx.prefix} {ctx.command} requires **{error.param.name}** argument, but you missed giving that"))
elif isinstance(error, commands.BotMissingPermissions):
perms = "".join(error.missing_perms)
await ctx.send(embed=await utilities.generate_embed(f"To finish the command bot must have {perms} permission, give the bot appropriate permissions and re-try"))
self.bot.logger.critical(f"Ignoring Exception in {ctx.command}\nError: {error}")
@commands.Cog.listener()
async def on_guild_join(self, guild):
#TODO: implement blacklist sytem
self.bot.logger.info(f"Joined on {guild} > Total Guilds: {len(self.bot.guilds)}")
@commands.Cog.listener()
async def on_guild_remove(self, guild):
self.bot.logger.info(f"Removed on {guild} > Total Guilds: {len(self.bot.guilds)}")
@commands.Cog.listener()
async def on_member_join(self, member):
cursor = await self.bot.db.execute(f"SELECT channel FROM welcomer WHERE guild_id = {member.guild.id}")
chrow = await cursor.fetchone()
if chrow is None:
return
else:
msgrow = await self.bot.db.execute(f"SELECT message FROM welcomer WHERE guild_id = {member.guild.id}")
msg = await msgrow.fetchone()
name = member.name
mention = member.mention
members = member.guild.member_count
server = member.guild
embed = discord.Embed(color=discord.Color.dark_green(), description=msg[0].format(name=name, mention=mention, members=members, server=server))
embed.set_thumbnail(url=f"{member.avatar_url_as(format='png', size=2048)}")
created = format_time(member.created_at)
embed.set_footer(text=f"{member.name} Created on {created}")
ch = self.bot.get_channel(int(chrow[0]))
await ch.send(embed=embed)
await cursor.close()
@commands.Cog.listener()
async def on_member_remove(self, member):
cursor = await self.bot.db.execute(f"SELECT channel FROM leaver WHERE guild_id = {ctx.guild.id}")
chrow = await cursor.fetchone()
if chrow is None:
return
else:
msg = await self.bot.db.execute(f"SELECT msg FROM leaver WHERE guild_id = {member.guild.id}")
name = member.name
mention = member.mention
server = member.server
members = member.guild.member_count
embed.set_thumbnail(url=f"{member.avatar_url_as(format='png', size=2048)}")
created = format_time(member.joined_at)
embed.set_footer(text=f"{member.name} Created joined on {joined}")
ch = self.bot.get_channel(int(chrow[0]))
await ch.send(embed=embed)
await cursor.close()
def setup(bot):
bot.add_cog(Events(bot)) | 39.43038 | 163 | 0.733547 | 2,920 | 0.9374 | 0 | 0 | 2,786 | 0.894382 | 2,656 | 0.852648 | 896 | 0.28764 |
e036c8bce2480207e7560bdb8a009054bcbca43d | 1,333 | py | Python | Task/Parallel-calculations/Python/parallel-calculations-2.py | LaudateCorpus1/RosettaCodeData | 9ad63ea473a958506c041077f1d810c0c7c8c18d | [
"Info-ZIP"
] | 1 | 2018-11-09T22:08:38.000Z | 2018-11-09T22:08:38.000Z | Task/Parallel-calculations/Python/parallel-calculations-2.py | seanwallawalla-forks/RosettaCodeData | 9ad63ea473a958506c041077f1d810c0c7c8c18d | [
"Info-ZIP"
] | null | null | null | Task/Parallel-calculations/Python/parallel-calculations-2.py | seanwallawalla-forks/RosettaCodeData | 9ad63ea473a958506c041077f1d810c0c7c8c18d | [
"Info-ZIP"
] | 1 | 2018-11-09T22:08:40.000Z | 2018-11-09T22:08:40.000Z | import multiprocessing
# ========== #Python3 - concurrent
from math import floor, sqrt
numbers = [
112272537195293,
112582718962171,
112272537095293,
115280098190773,
115797840077099,
1099726829285419]
# numbers = [33, 44, 55, 275]
def lowest_factor(n, _start=3):
if n % 2 == 0:
return 2
search_max = int(floor(sqrt(n))) + 1
for i in range(_start, search_max, 2):
if n % i == 0:
return i
return n
def prime_factors(n, lowest):
pf = []
while n > 1:
pf.append(lowest)
n //= lowest
lowest = lowest_factor(n, max(lowest, 3))
return pf
# ========== #Python3 - concurrent
def prime_factors_of_number_with_lowest_prime_factor(numbers):
pool = multiprocessing.Pool(processes=5)
factors = pool.map(lowest_factor,numbers)
low_factor,number = max((l,f) for l,f in zip(factors,numbers))
all_factors = prime_factors(number,low_factor)
return number,all_factors
if __name__ == '__main__':
print('For these numbers:')
print('\n '.join(str(p) for p in numbers))
number, all_factors = prime_factors_of_number_with_lowest_prime_factor(numbers)
print(' The one with the largest minimum prime factor is {}:'.format(number))
print(' All its prime factors in order are: {}'.format(all_factors))
| 28.361702 | 84 | 0.650413 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 237 | 0.177794 |
e036f44b7fa0f2862267ed2ae2bb354dffc8bc0b | 260 | py | Python | setup.py | clin366/airpollutionnowcast | f9152583eebc4ad747c8d0510460334a5fb23ff9 | [
"MIT"
] | null | null | null | setup.py | clin366/airpollutionnowcast | f9152583eebc4ad747c8d0510460334a5fb23ff9 | [
"MIT"
] | 9 | 2020-03-24T18:12:45.000Z | 2022-02-10T00:36:57.000Z | setup.py | clin366/airpollutionnowcast | f9152583eebc4ad747c8d0510460334a5fb23ff9 | [
"MIT"
] | null | null | null | from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='Project: Nowcasting the air pollution using online search log',
author='Emory University(IR Lab)',
license='MIT',
)
| 23.636364 | 80 | 0.692308 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 106 | 0.407692 |
e037cc498ab758b47d57f427145d459d775fb063 | 339 | py | Python | problems/p0048/s48.py | ahrarmonsur/euler | 4174790637806521a4ea2973abeb76c96c64a782 | [
"MIT"
] | 1 | 2017-12-19T21:18:48.000Z | 2017-12-19T21:18:48.000Z | problems/p0048/s48.py | ahrarmonsur/euler | 4174790637806521a4ea2973abeb76c96c64a782 | [
"MIT"
] | null | null | null | problems/p0048/s48.py | ahrarmonsur/euler | 4174790637806521a4ea2973abeb76c96c64a782 | [
"MIT"
] | null | null | null | """
Project Euler Problem 48
Self powers
Solved by Ahrar Monsur
The series, 1^1 + 2^2 + 3^3 + ... + 10^10 = 10405071317.
Find the last ten digits of the series, 1^1 + 2^2 + 3^3 + ... + 1000^1000.
"""
def main():
max_digits = 1000
sum = 0
for i in range(1, max_digits+1):
sum += i**i
print str(sum)[-10:]
main() | 17.842105 | 74 | 0.575221 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 201 | 0.59292 |
e037f80102198e6c3f910c89e80dfa13f614bfb4 | 1,109 | py | Python | BigData/sparkTask/test.py | Rainstyd/rainsty | 9a0d5f46c20faf909c4194f315fb9960652cffc6 | [
"Apache-2.0"
] | 1 | 2020-03-25T01:13:35.000Z | 2020-03-25T01:13:35.000Z | BigData/sparkTask/test.py | Rainstyed/rainsty | f74e0ccaf16d1871c9d1870bd8a7c8a63243fcf5 | [
"Apache-2.0"
] | 1 | 2022-01-06T23:49:21.000Z | 2022-01-06T23:49:21.000Z | BigData/sparkTask/test.py | rainstyd/rainsty | 9a0d5f46c20faf909c4194f315fb9960652cffc6 | [
"Apache-2.0"
] | 1 | 2020-03-20T08:48:36.000Z | 2020-03-20T08:48:36.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
@author: rainsty
@file: test.py
@time: 2020-01-04 18:36:57
@description:
"""
import os
from pyspark.sql import SparkSession
os.environ['JAVA_HOME'] = '/root/jdk'
os.environ['SPARK_HOME'] = '/root/spark'
os.environ['PYTHON_HOME'] = "/root/python"
os.environ['PYSPARK_PYTHON'] = "/usr/bin/python"
os.environ['SPARK_MASTER_IP'] = 'rainsty'
def create_spark_context():
sc = SparkSession.builder \
.appName("TestSparkSession") \
.master("spark://rainsty:7077") \
.config('spark.executor.num', '1')\
.config('spark.executor.memory', '512m')\
.config("spark.executor.cores", '1')\
.config('spark.cores.max', '1')\
.config('spark.driver.memory', '512m') \
.getOrCreate()
return sc
logFile = "/root/spark/README.md"
spark = create_spark_context()
logData = spark.read.text(logFile).cache()
numAs = logData.filter(logData.value.contains('a')).count()
numBs = logData.filter(logData.value.contains('b')).count()
print("Lines with a: %i, lines with b: %i" % (numAs, numBs))
spark.stop()
| 24.108696 | 60 | 0.640216 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 486 | 0.438233 |
e03860989956f152e97aacd3a94938522a675b8e | 1,042 | py | Python | esercizi/areaSottesaCompareNumPy.py | gdv/python-alfabetizzazione | d87561222de8a230db11d8529c49cf1702aec326 | [
"MIT"
] | null | null | null | esercizi/areaSottesaCompareNumPy.py | gdv/python-alfabetizzazione | d87561222de8a230db11d8529c49cf1702aec326 | [
"MIT"
] | null | null | null | esercizi/areaSottesaCompareNumPy.py | gdv/python-alfabetizzazione | d87561222de8a230db11d8529c49cf1702aec326 | [
"MIT"
] | 1 | 2019-03-26T11:14:33.000Z | 2019-03-26T11:14:33.000Z | import numpy as np
import timeit
def effe(x):
y = -x * (x - 1.0)
return y
numIntervalli = input('inserire il numero di intervalli in [0.0, 1.0] ')
deltaIntervallo = 1.0 / float(numIntervalli)
print "larghezza intervallo", deltaIntervallo
start = timeit.default_timer()
xIntervalli = []
yIntervalli = []
i = 0
while i < numIntervalli:
xIntervallo = i*deltaIntervallo
xIntervalli.append(xIntervallo)
yIntervalli.append(effe(xIntervallo))
i += 1
areaSottesa = 0.0
for altezza in yIntervalli:
areaSottesa += altezza * deltaIntervallo
endOld = timeit.default_timer()
print "l'area sottesa dalla curva vale ", areaSottesa
xNPIntervalli = np.linspace(0.0, 1.0, numIntervalli, endpoint=False)
yNPIntervalli = -xNPIntervalli * (xNPIntervalli - 1.0)
npArea = np.sum(yNPIntervalli*deltaIntervallo)
endNP = timeit.default_timer()
# print xNPIntervalli
# print xIntervalli
# print yNPIntervalli
# print yIntervalli
print "area numpy = ", npArea
print "old timing = ", endOld - start, "numPy timing = ", endNP - endOld
| 24.809524 | 72 | 0.726488 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 232 | 0.222649 |
e039092d052960d2f6c3a01770cd6d300e7b630a | 8,810 | py | Python | json_codegen/generators/python3_marshmallow/object_generator.py | expobrain/json-schema-codegen | e22b386333c6230e5d6f5984fd947fdd7b947e82 | [
"MIT"
] | 21 | 2018-06-15T16:08:57.000Z | 2022-02-11T16:16:11.000Z | json_codegen/generators/python3_marshmallow/object_generator.py | expobrain/json-schema-codegen | e22b386333c6230e5d6f5984fd947fdd7b947e82 | [
"MIT"
] | 14 | 2018-08-09T18:02:19.000Z | 2022-01-24T18:04:17.000Z | json_codegen/generators/python3_marshmallow/object_generator.py | expobrain/json-schema-codegen | e22b386333c6230e5d6f5984fd947fdd7b947e82 | [
"MIT"
] | 4 | 2018-11-30T18:19:10.000Z | 2021-11-18T04:04:36.000Z | import ast
from json_codegen.generators.python3_marshmallow.utils import Annotations, class_name
class ObjectGenerator(object):
@staticmethod
def _get_property_name(node_assign):
name = node_assign.targets[0]
return name.id
@staticmethod
def _nesting_class(node_assign):
for node in ast.walk(node_assign):
if isinstance(node, ast.Call):
if node.func.attr == "Nested":
return class_name(node.args[0].id)
@staticmethod
def _non_primitive_nested_list(node_assign):
if node_assign.value.func.attr == "List":
return (
len(node_assign.value.args) > 0 and node_assign.value.args[0].func.attr == "Nested"
)
else:
return False
@staticmethod
def _init_non_primitive_nested_class(node_assign, object_, prop):
"""
If the nested list is non-primitive, initialise sub-classes in a list comp
If the nest is primitive, we can simply get it
Marshmallow will do the type marshalling
"""
return ast.ListComp(
elt=ast.Call(
func=ast.Name(id=ObjectGenerator._nesting_class(node_assign)),
args=[ast.Name(id="el")],
keywords=[],
),
generators=[
ast.comprehension(
target=ast.Name(id="el"),
iter=ast.Call(
func=ast.Attribute(value=ast.Name(id=object_), attr="get"),
args=[ast.Str(s=prop), ast.Dict(keys=[], values=[])],
keywords=[],
),
ifs=[],
is_async=0,
)
],
)
@staticmethod
def _get_key_from_object(object_, prop):
return ast.Call(
func=ast.Attribute(value=ast.Name(id=object_), attr="get"),
args=[ast.Str(s=prop)],
keywords=[],
)
@staticmethod
def _hint_required_property(node_assign, value, object_, prop):
for node in ast.walk(node_assign):
if isinstance(node, ast.keyword):
if "required" in node.arg:
value = ast.Subscript(
value=ast.Name(id=object_), slice=ast.Index(value=ast.Str(s=prop))
)
return value
@staticmethod
def _get_default_for_property(node_assign, value, object_, prop):
for node in ast.walk(node_assign):
if isinstance(node, ast.keyword) and node.arg == "required":
return value
for node in ast.walk(node_assign):
if isinstance(node, ast.keyword) and node.arg == "default":
default_value = [
keyword.value
for keyword in node_assign.value.keywords
if keyword.arg == "default"
][0]
value.args.append(default_value)
return value
else:
return value
@staticmethod
def assign_property(node_assign, object_):
"""
Required property -> self.prop = parent_dict["prop"]
Optional property -> self.prop = parent_dict.get("prop")
Primative nested list -> self.prop = parent_dict.get("prop")
Non-primative nested list -> self.props = [PropertyClass(el) for el in parent_dict.get('props', {})]
"""
prop = ObjectGenerator._get_property_name(node_assign)
if ObjectGenerator._non_primitive_nested_list(node_assign):
value = ObjectGenerator._init_non_primitive_nested_class(node_assign, object_, prop)
else:
# Assign the property as self.prop = table.get("prop")
value = ObjectGenerator._get_key_from_object(object_, prop)
# If the property is required, assign as self.prop = table["prop"]
value = ObjectGenerator._hint_required_property(node_assign, value, object_, prop)
value = ObjectGenerator._get_default_for_property(node_assign, value, object_, prop)
return ast.AnnAssign(
target=ast.Attribute(value=ast.Name(id="self"), attr=prop),
value=value,
simple=0,
annotation=Annotations(node_assign).type,
)
@staticmethod
def construct_class(schema):
name = class_name(schema.name)
name_lower = name.lower()
# Bundle function arguments and keywords
fn_arguments = ast.arguments(
args=[
ast.arg(arg="self", annotation=None),
ast.arg(arg=name_lower, annotation=ast.Name(id="dict")),
],
vararg=None,
kwarg=None,
kwonlyargs=[],
kw_defaults=[],
defaults=[],
)
fn_body = [
ObjectGenerator.assign_property(node, name_lower)
for node in schema.body
if isinstance(node, ast.Assign)
]
# pass if no Assign nodes
if len(fn_body) == 0:
fn_body = [ast.Pass()]
# Generate class constructor
class_body = [
ast.FunctionDef(
name="__init__", args=fn_arguments, body=fn_body, decorator_list=[], returns=None
),
ObjectGenerator._construct_to_("json")(schema),
ObjectGenerator._construct_to_("dict")(schema),
ObjectGenerator.construct_from_json(schema),
]
return ast.ClassDef(name=name, bases=[], body=class_body, decorator_list=[], keywords=[])
@staticmethod
def _construct_to_(output):
if output == "json":
method = "dumps"
elif output == "dict":
method = "dump"
else:
raise NotImplementedError("Only deserialisation to json or dict supported")
def _construct_to_helper(schema):
fn_args = ast.arguments(
args=[ast.arg(arg="self", annotation=None)],
vararg=None,
kwonlyargs=[],
kw_defaults=[],
kwarg=None,
defaults=[],
)
fn_body = [
ast.Return(
value=ast.Attribute(
value=ast.Call(
func=ast.Attribute(
value=ast.Call(
func=ast.Name(id=schema.name),
args=[],
keywords=[
ast.keyword(
arg="strict", value=ast.NameConstant(value=True)
)
],
),
attr=method,
),
args=[ast.Name(id="self")],
keywords=[],
),
attr="data",
)
)
]
return ast.FunctionDef(
name=f"to_{output}", args=fn_args, body=fn_body, decorator_list=[], returns=None
)
return _construct_to_helper
@staticmethod
def construct_from_json(schema):
fn_args = ast.arguments(
args=[
ast.arg(arg="json", annotation=ast.Name(id="str")),
ast.arg(arg="only", annotation=None),
],
vararg=None,
kwonlyargs=[],
kw_defaults=[],
kwarg=None,
defaults=[ast.NameConstant(value=None)],
)
fn_body = [
ast.Return(
ast.Attribute(
value=ast.Call(
func=ast.Attribute(
value=ast.Call(
func=ast.Name(id=schema.name),
args=[],
keywords=[
ast.keyword(arg="strict", value=ast.NameConstant(value=True)),
ast.keyword(arg="only", value=ast.Name(id="only")),
],
),
attr="loads",
),
args=[ast.Name(id="json")],
keywords=[],
),
attr="data",
)
)
]
return ast.FunctionDef(
name="from_json",
args=fn_args,
body=fn_body,
decorator_list=[ast.Name(id="staticmethod")],
returns=None,
)
| 34.414063 | 108 | 0.484222 | 8,709 | 0.988536 | 0 | 0 | 8,614 | 0.977753 | 0 | 0 | 1,069 | 0.121339 |
e039c81acd8d1fcb88f92f04b6556a716666da98 | 12,736 | py | Python | testing/regrid/testEsmfGridToMeshRegridCsrv.py | xylar/cdat | 8a5080cb18febfde365efc96147e25f51494a2bf | [
"BSD-3-Clause"
] | 62 | 2018-03-30T15:46:56.000Z | 2021-12-08T23:30:24.000Z | testing/regrid/testEsmfGridToMeshRegridCsrv.py | xylar/cdat | 8a5080cb18febfde365efc96147e25f51494a2bf | [
"BSD-3-Clause"
] | 114 | 2018-03-21T01:12:43.000Z | 2021-07-05T12:29:54.000Z | testing/regrid/testEsmfGridToMeshRegridCsrv.py | CDAT/uvcdat | 5133560c0c049b5c93ee321ba0af494253b44f91 | [
"BSD-3-Clause"
] | 14 | 2018-06-06T02:42:47.000Z | 2021-11-26T03:27:00.000Z | #!/usr/bin/env python
#
# $Id: ESMP_GridToMeshRegridCsrv.py,v 1.5 2012/04/23 23:00:14 rokuingh Exp $
#===============================================================================
# ESMP/examples/ESMP_GridToMeshRegrid.py
#===============================================================================
"""
ESMP_GridToMeshRegridCsrv.py
Two ESMP_Field objects are created, one on a Grid and the other on a Mesh. The
source Field is set to an analytic function, and a conservative regridding
operation is performed from the source to the destination Field. After
the regridding is completed, the destination Field is compared to the
exact solution over that domain.
"""
import cdms2
import ESMP
import numpy as _NP
import unittest
def grid_create():
'''
PRECONDITIONS: ESMP has been initialized.
POSTCONDITIONS: A ESMP_Grid has been created.
'''
ub_x = float(4)
ub_y = float(4)
lb_x = float(0)
lb_y = float(0)
max_x = float(4)
max_y = float(4)
min_x = float(0)
min_y = float(0)
cellwidth_x = (max_x-min_x)/(ub_x-lb_x)
cellwidth_y = (max_y-min_y)/(ub_y-lb_y)
cellcenter_x = cellwidth_x/2
cellcenter_y = cellwidth_y/2
maxIndex = _NP.array([ub_x,ub_y], dtype=_NP.int32)
grid = ESMP.ESMP_GridCreateNoPeriDim(maxIndex,
coordSys=ESMP.ESMP_COORDSYS_CART)
## CORNERS
ESMP.ESMP_GridAddCoord(grid, staggerloc=ESMP.ESMP_STAGGERLOC_CORNER)
exLB_corner, exUB_corner = ESMP.ESMP_GridGetCoord(grid, \
ESMP.ESMP_STAGGERLOC_CORNER)
# get the coordinate pointers and set the coordinates
[x,y] = [0, 1]
gridXCorner = ESMP.ESMP_GridGetCoordPtr(grid, x, ESMP.ESMP_STAGGERLOC_CORNER)
gridYCorner = ESMP.ESMP_GridGetCoordPtr(grid, y, ESMP.ESMP_STAGGERLOC_CORNER)
#print 'lower corner bounds = [{0},{1}]'.format(exLB_corner[0],exLB_corner[1])
#print 'upper corner bounds = [{0},{1}]'.format(exUB_corner[0],exUB_corner[1])
p = 0
for i1 in range(exLB_corner[1], exUB_corner[1]):
for i0 in range(exLB_corner[0], exUB_corner[0]):
gridXCorner[p] = float(i0)*cellwidth_x
gridYCorner[p] = float(i1)*cellwidth_y
p = p + 1
#print 'Grid corner coordinates:'
p = 0
for i1 in range(exLB_corner[1], exUB_corner[1]):
for i0 in range(exLB_corner[0], exUB_corner[0]):
#print '[{0},{1}]'.format(gridXCorner[p], gridYCorner[p])
p = p + 1
#print '\n'
## CENTERS
ESMP.ESMP_GridAddCoord(grid, staggerloc=ESMP.ESMP_STAGGERLOC_CENTER)
exLB_center, exUB_center = ESMP.ESMP_GridGetCoord(grid, \
ESMP.ESMP_STAGGERLOC_CENTER)
# get the coordinate pointers and set the coordinates
[x,y] = [0, 1]
gridXCenter = ESMP.ESMP_GridGetCoordPtr(grid, x, ESMP.ESMP_STAGGERLOC_CENTER)
gridYCenter = ESMP.ESMP_GridGetCoordPtr(grid, y, ESMP.ESMP_STAGGERLOC_CENTER)
#print 'lower corner bounds = [{0},{1}]'.format(exLB_center[0],exLB_center[1])
#print 'upper corner bounds = [{0},{1}]'.format(exUB_center[0],exUB_center[1])
p = 0
for i1 in range(exLB_center[1], exUB_center[1]):
for i0 in range(exLB_center[0], exUB_center[0]):
gridXCenter[p] = float(i0)*cellwidth_x + cellwidth_x/2.0
gridYCenter[p] = float(i1)*cellwidth_y + cellwidth_y/2.0
p = p + 1
#print 'Grid center coordinates:'
p = 0
for i1 in range(exLB_center[1], exUB_center[1]):
for i0 in range(exLB_center[0], exUB_center[0]):
#print '[{0},{1}]'.format(gridXCenter[p], gridYCenter[p])
p = p + 1
#print '\n'
return grid
def mesh_create_3x3(mesh):
'''
PRECONDITIONS: An ESMP_Mesh has been declared.
POSTCONDITIONS: A 3x3 ESMP_Mesh has been created.
3x3 Mesh
3.0 2.0 13 -------14 --------15--------16
| | | |
| 7 | 8 | 9 |
| | | |
2.5 1.5 9 ------- 10 --------11--------12
| | | |
| 4 | 5 | 6 |
| | | |
1.5 0.5 5 ------- 6 -------- 7-------- 8
| | | |
| 1 | 2 | 3 |
| | | |
1.0 0.0 1 ------- 2 -------- 3-------- 4
0.0 0.5 1.5 2.0
1.0 1.5 2.5 3.0
Node Ids at corners
Element Ids in centers
(Everything owned by PET 0)
'''
# set up a simple mesh
num_node = 16
num_elem = 9
nodeId = _NP.array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16])
'''
# this is for grid to mesh
nodeCoord = _NP.array([1.0,1.0, 1.5,1.0, 2.5,1.0, 3.0,1.0,
1.0,1.5, 1.5,1.5, 2.5,1.5, 3.0,1.5,
1.0,2.5, 1.5,2.5, 2.5,2.5, 3.0,2.5,
1.0,3.0, 1.5,3.0, 2.5,3.0, 3.0,3.0])
'''
# this is for mesh to grid
nodeCoord = _NP.array([0.0,0.0, 1.5,0.0, 2.5,0.0, 4.0,0.0,
0.0,1.5, 1.5,1.5, 2.5,1.5, 4.0,1.5,
0.0,2.5, 1.5,2.5, 2.5,2.5, 4.0,2.5,
0.0,4.0, 1.5,4.0, 2.5,4.0, 4.0,4.0])
nodeOwner = _NP.zeros(num_node, dtype=_NP.int32)
elemId = _NP.array([1,2,3,4,5,6,7,8,9], dtype=_NP.int32)
elemType = _NP.ones(num_elem, dtype=_NP.int32)
elemType*=ESMP.ESMP_MESHELEMTYPE_QUAD
elemConn = _NP.array([0,1,5,4,
1,2,6,5,
2,3,7,6,
4,5,9,8,
5,6,10,9,
6,7,11,10,
8,9,13,12,
9,10,14,13,
10,11,15,14], dtype=_NP.int32)
ESMP.ESMP_MeshAddNodes(mesh,num_node,nodeId,nodeCoord,nodeOwner)
ESMP.ESMP_MeshAddElements(mesh,num_elem,elemId,elemType,elemConn)
#print 'Mesh coordinates:'
for i in range(num_node):
x = nodeCoord[2*i]
y = nodeCoord[2*i+1]
#print '[{0},{1}]'.format(x, y)
#print '\n'
return mesh, nodeCoord, elemType, elemConn
def create_ESMPmesh_3x3():
'''
PRECONDITIONS: ESMP is initialized.
POSTCONDITIONS: An ESMP_Mesh (3x3) has been created and returned as 'mesh'.
'''
# Two parametric dimensions, and three spatial dimensions
mesh = ESMP.ESMP_MeshCreate(2,2)
mesh, nodeCoord, elemType, elemConn = mesh_create_3x3(mesh)
return mesh, nodeCoord, elemType, elemConn
def create_ESMPfieldgrid(grid, name):
'''
PRECONDITIONS: An ESMP_Grid has been created, and 'name' is a string that
will be used to initialize the name of a new ESMP_Field.
POSTCONDITIONS: An ESMP_Field has been created.
'''
# defaults to center staggerloc
field = ESMP.ESMP_FieldCreateGrid(grid, name)
return field
def build_analyticfieldgrid(field, grid):
'''
PRECONDITIONS: An ESMP_Field has been created.
POSTCONDITIONS: The 'field' has been initialized to an analytic field.
'''
# get the field pointer first
fieldPtr = ESMP.ESMP_FieldGetPtr(field)
# get the grid bounds and coordinate pointers
exLB, exUB = ESMP.ESMP_GridGetCoord(grid, ESMP.ESMP_STAGGERLOC_CENTER)
# get the coordinate pointers and set the coordinates
[x,y] = [0, 1]
gridXCoord = ESMP.ESMP_GridGetCoordPtr(grid, x, ESMP.ESMP_STAGGERLOC_CENTER)
gridYCoord = ESMP.ESMP_GridGetCoordPtr(grid, y, ESMP.ESMP_STAGGERLOC_CENTER)
#print "Grid center coordinates"
p = 0
for i1 in range(exLB[1], exUB[1]):
for i0 in range(exLB[0], exUB[0]):
xc = gridXCoord[p]
yc = gridYCoord[p]
fieldPtr[p] = 20.0+xc+yc
#fieldPtr[p] = 20.0+xc*yc+yc**2
#print '[{0},{1}] = {2}'.format(xc,yc,fieldPtr[p])
p = p + 1
#print "\n"
return field
def create_ESMPfield(mesh, name):
'''
PRECONDITIONS: An ESMP_Mesh has been created, and 'name' is a string that
will be used to initialize the name of a new ESMP_Field.
POSTCONDITIONS: An ESMP_Field has been created.
'''
field = ESMP.ESMP_FieldCreate(mesh, name, meshloc=ESMP.ESMP_MESHLOC_ELEMENT)
return field
def build_analyticfield(field, nodeCoord, elemType, elemConn):
'''
PRECONDITIONS: An ESMP_Field has been created.
POSTCONDITIONS: The 'field' has been initialized to an analytic field.
'''
# get the field pointer first
fieldPtr = ESMP.ESMP_FieldGetPtr(field, 0)
# set the field to a vanilla initial field for now
#print "Mesh center coordinates"
offset = 0
for i in range(field.size): # this routine assumes this field is on elements
if (elemType[i] == ESMP.ESMP_MESHELEMTYPE_TRI):
raise NameError("Cannot compute a non-constant analytic field for a mesh\
with triangular elements!")
x1 = nodeCoord[(elemConn[offset])*2]
x2 = nodeCoord[(elemConn[offset+1])*2]
y1 = nodeCoord[(elemConn[offset+1])*2+1]
y2 = nodeCoord[(elemConn[offset+3])*2+1]
x = (x1+x2)/2.0
y = (y1+y2)/2.0
fieldPtr[i] = 20.0+x+y
#fieldPtr[i] = 20.0+x*y+y**2
#print '[{0},{1}] = {2}'.format(x,y,fieldPtr[i])
offset = offset + 4
#print "\n"
return field
def run_regridding(srcfield, dstfield):
'''
PRECONDITIONS: Two ESMP_Fields have been created and a regridding operation
is desired from 'srcfield' to 'dstfield'.
POSTCONDITIONS: An ESMP regridding operation has set the data on 'dstfield'.
'''
# call the regridding functions
routehandle = ESMP.ESMP_FieldRegridStore(srcfield, dstfield,
regridmethod=ESMP.ESMP_REGRIDMETHOD_CONSERVE,
unmappedaction=ESMP.ESMP_UNMAPPEDACTION_ERROR)
ESMP.ESMP_FieldRegrid(srcfield, dstfield, routehandle)
ESMP.ESMP_FieldRegridRelease(routehandle)
return dstfield
def compare_fields(field1, field2):
'''
PRECONDITIONS: Two ESMP_Fields have been created and a comparison of the
the values is desired between 'srcfield' and 'dstfield'.
POSTCONDITIONS: The values on 'srcfield' and 'dstfield' are compared.
returns True if the fileds are comparable (success)
'''
# get the data pointers for the fields
field1ptr = ESMP.ESMP_FieldGetPtr(field1)
field2ptr = ESMP.ESMP_FieldGetPtr(field2)
# compare point values of field1 to field2
# first verify they are the same size
if (field1.size != field2.size):
raise NameError('compare_fields: Fields must be the same size!')
# initialize to True, and check for False point values
correct = True
totalErr = 0.0
for i in range(field1.size):
err = abs(field1ptr[i] - field2ptr[i])/abs(field2ptr[i])
if err > .06:
correct = False
print "ACCURACY ERROR - "+str(err)
print "field1 = {0} : field2 = {1}\n".format(field1ptr[i], field2ptr[i])
totalErr += err
if correct:
print " - PASS - Total Error = "+str(totalErr)
return True
else:
print " - FAIL - Total Error = "+str(totalErr)
return False
class TestESMP_GridToMeshRegridCsrv(unittest.TestCase):
def setUp(self):
pass
def test_test1(self):
# create two unique ESMP_Mesh objects
grid = grid_create()
mesh, nodeCoord, elemType, elemConn = create_ESMPmesh_3x3()
'''
# this is for grid to mesh
# create ESMP_Field objects on the Meshes
srcfield = create_ESMPfieldgrid(grid, 'srcfield')
dstfield = create_ESMPfield(mesh, 'dstfield')
dstfield2 = create_ESMPfield(mesh, 'dstfield_exact')
# initialize the Fields to an analytic function
srcfield = build_analyticfieldgrid(srcfield, grid)
dstfield2 = build_analyticfield(dstfield2, nodeCoord, elemType, elemConn)
'''
# this is for mesh to grid
# create ESMP_Field objects on the Meshes
srcfield = create_ESMPfield(mesh, 'srcfield')
dstfield = create_ESMPfieldgrid(grid, 'dstfield')
dstfield2 = create_ESMPfieldgrid(grid, 'dstfield_exact')
# initialize the Fields to an analytic function
srcfield = build_analyticfield(srcfield, nodeCoord, elemType, elemConn)
dstfield2 = build_analyticfieldgrid(dstfield2, grid)
# run the ESMF regridding
dstfield = run_regridding(srcfield, dstfield)
# compare results and output PASS or FAIL
ok = compare_fields(dstfield, dstfield2)
# clean up
ESMP.ESMP_FieldDestroy(srcfield)
ESMP.ESMP_FieldDestroy(dstfield)
ESMP.ESMP_FieldDestroy(dstfield2)
ESMP.ESMP_GridDestroy(grid)
ESMP.ESMP_MeshDestroy(mesh)
self.assertEqual(ok, True)
if __name__ == '__main__':
ESMP.ESMP_LogSet(True)
print "" # Spacer
suite = unittest.TestLoader().loadTestsFromTestCase(TestESMP_GridToMeshRegridCsrv)
unittest.TextTestRunner(verbosity = 1).run(suite)
| 33.515789 | 86 | 0.613693 | 1,513 | 0.118797 | 0 | 0 | 0 | 0 | 0 | 0 | 5,881 | 0.461762 |
e03a335c46211edd43cb24ddb42d950cbfd7fa71 | 1,184 | py | Python | test/mock_module.py | ariffyasri/lale | 326012c3c3dd884fae0093fe0c45596e4f9c0d72 | [
"Apache-2.0"
] | 1 | 2020-04-28T11:27:48.000Z | 2020-04-28T11:27:48.000Z | test/mock_module.py | ariffyasri/lale | 326012c3c3dd884fae0093fe0c45596e4f9c0d72 | [
"Apache-2.0"
] | null | null | null | test/mock_module.py | ariffyasri/lale | 326012c3c3dd884fae0093fe0c45596e4f9c0d72 | [
"Apache-2.0"
] | 1 | 2020-07-30T10:06:23.000Z | 2020-07-30T10:06:23.000Z | # Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sklearn.neighbors
# class that follows scikit-learn conventions but lacks schemas,
# for the purpose of testing how to wrap an operator without schemas
class UnknownOp:
def __init__(self, n_neighbors=5, algorithm='auto'):
self._hyperparams = {
'n_neighbors': n_neighbors, 'algorithm': algorithm}
def get_params(self, deep:bool=False):
return self._hyperparams
def fit(self, X, y):
self._wrapped_model = sklearn.neighbors.KNeighborsClassifier(
**self._hyperparams)
def predict(self, X):
return self._wrapped_model.predict(X)
| 34.823529 | 74 | 0.72973 | 446 | 0.376689 | 0 | 0 | 0 | 0 | 0 | 0 | 724 | 0.611486 |
e03bee15bfc41f500be41ba4168c4029ea4dba20 | 3,770 | py | Python | scripts/beautify.py | lukaschoebel/POTUSgen | 7b88ba63f0ddab199937df909c5af3271a833cf3 | [
"MIT"
] | null | null | null | scripts/beautify.py | lukaschoebel/POTUSgen | 7b88ba63f0ddab199937df909c5af3271a833cf3 | [
"MIT"
] | 5 | 2020-03-25T08:02:45.000Z | 2020-04-08T20:07:42.000Z | scripts/beautify.py | lukaschoebel/POTUSgen | 7b88ba63f0ddab199937df909c5af3271a833cf3 | [
"MIT"
] | null | null | null | import json
import re
import sys
def beautify(name):
''' Loading, filtering and saving the JSON tweet file to a newly generated .txt file
:type: name: String
:rtype: output: .txt
'''
filename = name + '.json'
output_name = name + "_filtered.txt"
with open(filename, "r", encoding="utf-8") as input:
with open(output_name, "w", encoding="utf-8") as output:
document = json.load(input)
# Filter only the messages that are not retweeted
# >> Version i): for tweets from archive "master_XXXX.json"
# document = [x['full_text'] for x in document if x['user']['screen_name'] == 'realDonaldTrump' and 'full_text' in x]
# >> Version ii): for self-scraped tweets via https://github.com/bpb27/twitter_scraping
# document = [x['text'] for x in document if x['user']['screen_name'] == 'realDonaldTrump' and 'text' in x]
# >> Version iii): Data set from https://github.com/MatthewWolff/MarkovTweets/
document = [x['text'] for x in document]
# Clean and only include not retweeted messages
document = [deep_clean(x) for x in document if deep_clean(x) is not None]
# Preventing unicode characters by ensuring false ascii encoding
for _, value in enumerate(document):
output.write(json.dumps(value, ensure_ascii=False) + "\n")
# json.dump(document, output, ensure_ascii=False, indent=4)
print(f">> Sucessfully cleaned {filename} and saved it to {output_name}")
def deep_clean(s):
''' Deep cleaning of filtered tweets. Replaces common symbols and kills quotation marks/apostrophes.
:type: s: String
:rtype: s: String
'''
# Return None if given tweet is a retweet
if s[:2] == 'RT':
return None
# Delete all URLs because they don't make for interesting tweets.
s = re.sub(r'http[\S]*', '', s)
# Replace some common unicode symbols with raw character variants
s = re.sub(r'\\u2026', '...', s)
s = re.sub(r'…', '', s)
s = re.sub(r'\\u2019', "'", s)
s = re.sub(r'\\u2018', "'", s)
s = re.sub(r"&", r"&", s)
s = re.sub(r'\\n', r"", s)
# Delete emoji modifying characters
s = re.sub(chr(127996), '', s)
s = re.sub(chr(65039), '', s)
# Kill apostrophes & punctuation because they confuse things.
s = re.sub(r"'", r"", s)
s = re.sub(r"“", r"", s)
s = re.sub(r"”", r"", s)
s = re.sub('[()]', r'', s)
s = re.sub(r'"', r"", s)
# Collapse multiples of certain chars
s = re.sub('([.-])+', r'\1', s)
# Pad sentence punctuation chars with whitespace
s = re.sub('([^0-9])([.,!?])([^0-9])', r'\1 \2 \3', s)
# Remove extra whitespace (incl. newlines)
s = ' '.join(s.split()).lower()
# Define emoji_pattern
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
u"\U0001F1F2-\U0001F1F4" # Macau flag
u"\U0001F1E6-\U0001F1FF" # flags
u"\U0001F600-\U0001F64F"
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
u"\U0001f926-\U0001f937"
u"\U0001F1F2"
u"\U0001F1F4"
u"\U0001F620"
u"\u200d"
u"\u2640-\u2642"
"]+", flags=re.UNICODE)
s = emoji_pattern.sub(r'', s)
# Care for a special case where the first char is a "."
# return s[1:] if s[0] == "." else s
if len(s):
return s[1:] if s[0] == "." else s
return None
if __name__ == "__main__":
if len(sys.argv) - 1: beautify(sys.argv[1]) | 33.963964 | 129 | 0.571088 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,190 | 0.579979 |
e03c2a58883f30a7a78a6973c7fd5ce571d96bba | 1,746 | py | Python | result2gaofentype/pkl2txt_ggm.py | G-Naughty/Fine-grained-OBB-Detection | 8c82c4c178f0b6bba077ff9d906a81bf8e04789c | [
"Apache-2.0"
] | 2 | 2022-02-06T07:45:03.000Z | 2022-03-11T14:18:32.000Z | result2gaofentype/pkl2txt_ggm.py | G-Naughty/Fine-grained-OBB-Detection | 8c82c4c178f0b6bba077ff9d906a81bf8e04789c | [
"Apache-2.0"
] | null | null | null | result2gaofentype/pkl2txt_ggm.py | G-Naughty/Fine-grained-OBB-Detection | 8c82c4c178f0b6bba077ff9d906a81bf8e04789c | [
"Apache-2.0"
] | null | null | null | import BboxToolkit as bt
import pickle
import copy
import numpy as np
path1="/home/hnu1/GGM/OBBDetection/work_dir/oriented_obb_contrast_catbalance/dets.pkl"
path2="/home/hnu1/GGM/OBBDetection/data/FaIR1M/test/annfiles/ori_annfile.pkl"#
with open(path2,'rb') as f: #/home/disk/FAIR1M_1000_split/val/annfiles/ori_annfile.pkl
data2 = pickle.load(f)
with open(path1,'rb') as f:
obbdets = pickle.load(f)
polydets=copy.deepcopy(obbdets)
for i in range(len(obbdets)):
for j in range(len(obbdets[0][1])):
data=obbdets[i][1][j]
if data.size!= 0:
polys=[]
for k in range(len(data)):
poly = bt.obb2poly(data[k][0:5])
poly=np.append(poly,data[k][5])
polys.append(poly)
else:
polys=[]
polydets[i][1][j]=polys
savepath="/home/hnu1/GGM/OBBDetection/work_dir/oriented_obb_contrast_catbalance/result_txt/"
for i in range(len(polydets)):
txtfile=savepath+polydets[i][0]+".txt"
f = open(txtfile, "w")
for j in range(len(polydets[0][1])):
if polydets[i][1][j]!=[]:
for k in range(len(polydets[i][1][j])):
f.write(str(polydets[i][1][j][k][0])+" "+
str(polydets[i][1][j][k][1])+" "+
str(polydets[i][1][j][k][2])+" "+
str(polydets[i][1][j][k][3])+" "+
str(polydets[i][1][j][k][4])+" "+
str(polydets[i][1][j][k][5])+" "+
str(polydets[i][1][j][k][6])+" "+
str(polydets[i][1][j][k][7])+" "+
str(data2["cls"][j])+" "+
str(polydets[i][1][j][k][8])+"\n")
f.close() | 40.604651 | 95 | 0.512027 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 346 | 0.198167 |
e03e3fafddd8bfe7f29e435a8b1b27b522698dbd | 938 | py | Python | initializer_3d.py | HarperCallahan/taichi_ferrofluid | 6113f6c7d9d9d612b6dadc500cf91b576c2d05ea | [
"MIT"
] | null | null | null | initializer_3d.py | HarperCallahan/taichi_ferrofluid | 6113f6c7d9d9d612b6dadc500cf91b576c2d05ea | [
"MIT"
] | null | null | null | initializer_3d.py | HarperCallahan/taichi_ferrofluid | 6113f6c7d9d9d612b6dadc500cf91b576c2d05ea | [
"MIT"
] | null | null | null | import taichi as ti
import utils
from apic_extension import *
@ti.data_oriented
class Initializer3D: # tmp initializer
def __init__(self, res, x0, y0, z0, x1, y1, z1):
self.res = res
self.x0 = int(res * x0)
self.y0 = int(res * y0)
self.z0 = int(res * z0)
self.x1 = int(res * x1)
self.y1 = int(res * y1)
self.z1 = int(res * z1)
@ti.kernel
def init_kernel(self, cell_type : ti.template()):
for i, j, k in cell_type:
if i >= self.x0 and i <= self.x1 and \
j >= self.y0 and j <= self.y1 and \
k >= self.z0 and k <= self.z1:
cell_type[i, j, k] = utils.FLUID
def init_scene(self, simulator):
self.init_kernel(simulator.cell_type)
dx = simulator.dx
simulator.level_set.initialize_with_aabb((self.x0 * dx, self.y0 * dx, self.z0 * dx), (self.x1 * dx, self.y1 * dx, self.z1 * dx))
| 31.266667 | 136 | 0.557569 | 855 | 0.911514 | 0 | 0 | 873 | 0.930704 | 0 | 0 | 17 | 0.018124 |
e03ebf0129e76590fab9b3f72a3301cc3f5c22ca | 1,265 | py | Python | copy_block_example.py | MilesCranmer/bifrost_paper | 654408cd7e34e7845cee58100fe459e1422e4859 | [
"MIT"
] | null | null | null | copy_block_example.py | MilesCranmer/bifrost_paper | 654408cd7e34e7845cee58100fe459e1422e4859 | [
"MIT"
] | null | null | null | copy_block_example.py | MilesCranmer/bifrost_paper | 654408cd7e34e7845cee58100fe459e1422e4859 | [
"MIT"
] | null | null | null | from copy import deepcopy
import bifrost as bf
from bifrost.pipeline import TransformBlock
from bifrost.ndarray import copy_array
class CopyBlock(TransformBlock):# $\tikzmark{block-start}$
"""Copy the input ring to output ring"""
def __init__(self, iring, space):
super(CopyBlock, self).__init__(iring)
self.orings = [self.create_ring(space=space)]
def on_sequence(self, iseq):
return deepcopy(iseq.header)
def on_data(self, ispan, ospan):
copy_array(ospan.data, ispan.data)#$\tikzmark{block-end}$
def copy_block(iring, space):
return CopyBlock(iring, space)
bc = bf.BlockChainer()
bc.blocks.read_wav(['hey_jude.wav'], gulp_nframe=4096)
bc.custom(copy_block)(space='cuda')# $\tikzmark{gpu-start}$
bc.views.split_axis('time', 256, label='fine_time')
bc.blocks.fft(axes='fine_time', axis_labels='freq')
bc.blocks.detect(mode='scalar')
bc.blocks.transpose(['time', 'pol', 'freq'])#$\tikzmark{gpu-end}$
bc.blocks.copy(space='system')
bc.blocks.quantize('i8')
bc.blocks.write_sigproc()
pipeline = bf.get_default_pipeline()# $\tikzmark{pipeline-start}$
pipeline.shutdown_on_signals()
pipeline.run()#$\tikzmark{pipeline-end}$
| 30.853659 | 98 | 0.674308 | 455 | 0.359684 | 0 | 0 | 0 | 0 | 0 | 0 | 351 | 0.27747 |
e0404632a7378b088279de3e94aac11c26a9e183 | 1,540 | py | Python | monasca_persister/conf/influxdb.py | zhangjianweibj/monasca-persister | 0c5d8a7c5553001f2d38227347f482201f92c8e1 | [
"Apache-2.0"
] | null | null | null | monasca_persister/conf/influxdb.py | zhangjianweibj/monasca-persister | 0c5d8a7c5553001f2d38227347f482201f92c8e1 | [
"Apache-2.0"
] | 1 | 2020-03-13T12:30:29.000Z | 2020-03-13T12:38:16.000Z | monasca_persister/conf/influxdb.py | zhangjianweibj/monasca-persister | 0c5d8a7c5553001f2d38227347f482201f92c8e1 | [
"Apache-2.0"
] | null | null | null | # (C) Copyright 2016-2017 Hewlett Packard Enterprise Development LP
# Copyright 2017 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
influxdb_opts = [
cfg.StrOpt('database_name',
help='database name where metrics are stored',
default='mon'),
cfg.HostAddressOpt('ip_address',
help='Valid IP address or hostname '
'to InfluxDB instance'),
cfg.PortOpt('port',
help='port to influxdb',
default=8086),
cfg.StrOpt('user',
help='influxdb user ',
default='mon_persister'),
cfg.StrOpt('password',
secret=True,
help='influxdb password')]
influxdb_group = cfg.OptGroup(name='influxdb',
title='influxdb')
def register_opts(conf):
conf.register_group(influxdb_group)
conf.register_opts(influxdb_opts, influxdb_group)
def list_opts():
return influxdb_group, influxdb_opts
| 32.765957 | 69 | 0.653896 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 865 | 0.561688 |
e040676396d83dae688cf225c1f4290cf1100f35 | 192 | py | Python | test_print_json.py | huangsen365/boto3-docker | 42d46ce4433dd037006d6b8d01db3fe444b9d8dd | [
"Apache-2.0"
] | null | null | null | test_print_json.py | huangsen365/boto3-docker | 42d46ce4433dd037006d6b8d01db3fe444b9d8dd | [
"Apache-2.0"
] | null | null | null | test_print_json.py | huangsen365/boto3-docker | 42d46ce4433dd037006d6b8d01db3fe444b9d8dd | [
"Apache-2.0"
] | null | null | null | import json
your_json = '["foo", {"bar":["baz", null, 1.0, 2]}]'
parsed = json.loads(your_json)
print(type(your_json))
print(type(parsed))
#print(json.dumps(parsed, indent=4, sort_keys=True)) | 27.428571 | 52 | 0.6875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 92 | 0.479167 |
e041875337916a4d8560bbab0e0b68edca74373b | 13,929 | py | Python | src/solutions/common/integrations/cirklo/api.py | goubertbrent/oca-backend | b9f59cc02568aecb55d4b54aec05245790ea25fd | [
"Apache-2.0"
] | null | null | null | src/solutions/common/integrations/cirklo/api.py | goubertbrent/oca-backend | b9f59cc02568aecb55d4b54aec05245790ea25fd | [
"Apache-2.0"
] | null | null | null | src/solutions/common/integrations/cirklo/api.py | goubertbrent/oca-backend | b9f59cc02568aecb55d4b54aec05245790ea25fd | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
import cloudstorage
import logging
from babel.dates import format_datetime
from datetime import datetime
from google.appengine.ext import ndb, deferred, db
from typing import List
from xlwt import Worksheet, Workbook, XFStyle
from mcfw.cache import invalidate_cache
from mcfw.consts import REST_TYPE_TO
from mcfw.exceptions import HttpBadRequestException, HttpForbiddenException, HttpNotFoundException
from mcfw.restapi import rest
from mcfw.rpc import returns, arguments
from rogerthat.bizz.gcs import get_serving_url
from rogerthat.bizz.service import re_index_map_only
from rogerthat.consts import FAST_QUEUE
from rogerthat.models import ServiceIdentity
from rogerthat.models.settings import ServiceInfo
from rogerthat.rpc import users
from rogerthat.rpc.users import get_current_session
from rogerthat.utils import parse_date
from rogerthat.utils.service import create_service_identity_user
from shop.models import Customer
from solutions import translate
from solutions.common.bizz import SolutionModule, broadcast_updates_pending
from solutions.common.bizz.campaignmonitor import send_smart_email_without_check
from solutions.common.consts import OCA_FILES_BUCKET
from solutions.common.dal import get_solution_settings
from solutions.common.integrations.cirklo.cirklo import get_city_id_by_service_email, whitelist_merchant, \
list_whitelisted_merchants, list_cirklo_cities
from solutions.common.integrations.cirklo.models import CirkloCity, CirkloMerchant, SignupLanguageProperty, \
SignupMails, CirkloAppInfo
from solutions.common.integrations.cirklo.to import CirkloCityTO, CirkloVoucherListTO, CirkloVoucherServiceTO, \
WhitelistVoucherServiceTO
from solutions.common.restapi.services import _check_is_city
def _check_permission(city_sln_settings):
if SolutionModule.CIRKLO_VOUCHERS not in city_sln_settings.modules:
raise HttpForbiddenException()
if len(city_sln_settings.modules) != 1:
_check_is_city(city_sln_settings.service_user)
@rest('/common/vouchers/cities', 'get', silent_result=True)
@returns([dict])
@arguments(staging=bool)
def api_list_cirklo_cities(staging=False):
return list_cirklo_cities(staging)
@rest('/common/vouchers/services', 'get', silent_result=True)
@returns(CirkloVoucherListTO)
@arguments()
def get_cirklo_vouchers_services():
city_service_user = users.get_current_user()
city_sln_settings = get_solution_settings(city_service_user)
_check_permission(city_sln_settings)
to = CirkloVoucherListTO()
to.total = 0
to.results = []
to.cursor = None
to.more = False
cirklo_city = CirkloCity.get_by_service_email(city_service_user.email())
if not cirklo_city:
return to
cirklo_merchants = list_whitelisted_merchants(cirklo_city.city_id)
cirklo_dict = {}
cirklo_emails = []
for merchant in cirklo_merchants:
if merchant['email'] in cirklo_emails:
logging.error('Duplicate found %s', merchant['email'])
continue
cirklo_emails.append(merchant['email'])
cirklo_dict[merchant['email']] = merchant
qry = CirkloMerchant.list_by_city_id(cirklo_city.city_id) # type: List[CirkloMerchant]
osa_merchants = []
for merchant in qry:
if merchant.service_user_email:
osa_merchants.append(merchant)
else:
cirklo_merchant = cirklo_dict.get(merchant.data['company']['email'])
if cirklo_merchant:
if merchant.data['company']['email'] in cirklo_emails:
cirklo_emails.remove(merchant.data['company']['email'])
if not merchant.whitelisted:
merchant.whitelisted = True
merchant.put()
elif merchant.whitelisted:
merchant.whitelisted = False
merchant.put()
whitelist_date = cirklo_merchant['createdAt'] if cirklo_merchant else None
merchant_registered = 'shopInfo' in cirklo_merchant if cirklo_merchant else False
to.results.append(
CirkloVoucherServiceTO.from_model(merchant, whitelist_date, merchant_registered, u'Cirklo signup'))
if osa_merchants:
customer_to_get = [Customer.create_key(merchant.customer_id) for merchant in osa_merchants]
customers_dict = {customer.id: customer for customer in db.get(customer_to_get)}
info_keys = [ServiceInfo.create_key(users.User(merchant.service_user_email), ServiceIdentity.DEFAULT)
for merchant in osa_merchants]
models = ndb.get_multi(info_keys)
for service_info, merchant in zip(models, osa_merchants):
customer = customers_dict[merchant.customer_id]
if not customer.service_user:
merchant.key.delete()
continue
cirklo_merchant = cirklo_dict.get(customer.user_email)
should_save = False
if cirklo_merchant:
if customer.user_email in cirklo_emails:
cirklo_emails.remove(customer.user_email)
if not merchant.whitelisted:
merchant.whitelisted = True
should_save = True
elif merchant.whitelisted:
merchant.whitelisted = False
should_save = True
if should_save:
merchant.put()
service_identity_user = create_service_identity_user(customer.service_user)
deferred.defer(re_index_map_only, service_identity_user)
whitelist_date = cirklo_merchant['createdAt'] if cirklo_merchant else None
merchant_registered = 'shopInfo' in cirklo_merchant if cirklo_merchant else False
service_to = CirkloVoucherServiceTO.from_model(merchant, whitelist_date, merchant_registered, u'OSA signup')
service_to.populate_from_info(service_info, customer)
to.results.append(service_to)
for email in cirklo_emails:
cirklo_merchant = cirklo_dict[email]
to.results.append(CirkloVoucherServiceTO.from_cirklo_info(cirklo_merchant))
return to
@rest('/common/vouchers/services/whitelist', 'put', type=REST_TYPE_TO)
@returns(CirkloVoucherServiceTO)
@arguments(data=WhitelistVoucherServiceTO)
def whitelist_voucher_service(data):
city_service_user = users.get_current_user()
city_sln_settings = get_solution_settings(city_service_user)
_check_permission(city_sln_settings)
cirklo_city = CirkloCity.get_by_service_email(city_service_user.email()) # type: CirkloCity
if not cirklo_city:
raise HttpNotFoundException('No cirklo settings found.')
is_cirklo_only_merchant = '@' not in data.id
if is_cirklo_only_merchant:
merchant = CirkloMerchant.create_key(long(data.id)).get() # type: CirkloMerchant
language = merchant.get_language()
else:
merchant = CirkloMerchant.create_key(data.id).get()
language = get_solution_settings(users.User(merchant.service_user_email)).main_language
if data.accepted:
email_id = cirklo_city.get_signup_accepted_mail(language)
if not email_id:
raise HttpBadRequestException('City settings aren\'t fully setup yet.')
whitelist_merchant(cirklo_city.city_id, data.email)
deferred.defer(send_smart_email_without_check, email_id, [data.email], _countdown=1,
_queue=FAST_QUEUE)
else:
email_id = cirklo_city.get_signup_accepted_mail(language)
if not email_id:
raise HttpBadRequestException('City settings aren\'t fully setup yet.')
deferred.defer(send_smart_email_without_check, email_id, [data.email], _countdown=1,
_queue=FAST_QUEUE)
whitelist_date = datetime.now().isoformat() + 'Z' if data.accepted else None
if not is_cirklo_only_merchant:
if data.accepted:
merchant.whitelisted = True
else:
merchant.denied = True
merchant.put()
service_info = ServiceInfo.create_key(users.User(merchant.service_user_email), ServiceIdentity.DEFAULT).get()
customer = Customer.get_by_id(merchant.customer_id) # type: Customer
if data.accepted:
service_identity_user = create_service_identity_user(customer.service_user)
deferred.defer(re_index_map_only, service_identity_user)
to = CirkloVoucherServiceTO.from_model(merchant, whitelist_date, False, u'OSA signup')
to.populate_from_info(service_info, customer)
return to
else:
if data.accepted:
merchant.whitelisted = True
else:
merchant.denied = True
merchant.put()
return CirkloVoucherServiceTO.from_model(merchant, whitelist_date, False, u'Cirklo signup')
@rest('/common/vouchers/cirklo', 'get')
@returns(CirkloCityTO)
@arguments()
def api_vouchers_get_cirklo_settings():
service_user = users.get_current_user()
city = CirkloCity.get_by_service_email(service_user.email())
return CirkloCityTO.from_model(city)
@rest('/common/vouchers/cirklo', 'put')
@returns(CirkloCityTO)
@arguments(data=CirkloCityTO)
def api_vouchers_save_cirklo_settings(data):
service_user = users.get_current_user()
if not get_current_session().shop:
lang = get_solution_settings(service_user).main_language
raise HttpForbiddenException(translate(lang, 'no_permission'))
other_city = CirkloCity.get_by_service_email(service_user.email()) # type: CirkloCity
if not data.city_id:
if other_city:
other_city.key.delete()
return CirkloCityTO.from_model(None)
key = CirkloCity.create_key(data.city_id)
city = key.get()
if not city:
city = CirkloCity(key=key, service_user_email=service_user.email())
elif city.service_user_email != service_user.email():
raise HttpBadRequestException('City id %s is already in use by another service' % data.city_id)
if other_city and other_city.key != key:
other_city.key.delete()
invalidate_cache(get_city_id_by_service_email, service_user.email())
city.logo_url = data.logo_url
city.signup_enabled = data.signup_enabled
city.signup_logo_url = data.signup_logo_url
city.signup_names = None
city.signup_mail = SignupMails.from_to(data.signup_mail)
if data.signup_name_nl and data.signup_name_fr:
city.signup_names = SignupLanguageProperty(nl=data.signup_name_nl,
fr=data.signup_name_fr)
elif data.signup_name_nl:
city.signup_names = SignupLanguageProperty(nl=data.signup_name_nl,
fr=data.signup_name_nl)
elif data.signup_name_fr:
city.signup_names = SignupLanguageProperty(nl=data.signup_name_fr,
fr=data.signup_name_fr)
og_info = city.app_info and city.app_info.to_dict()
info = CirkloAppInfo(enabled=data.app_info.enabled,
title=data.app_info.title,
buttons=data.app_info.buttons)
sln_settings = get_solution_settings(service_user)
if info.to_dict() != og_info and not sln_settings.ciklo_vouchers_only():
city.app_info = info
sln_settings.updates_pending = True
sln_settings.put()
broadcast_updates_pending(sln_settings)
city.put()
return CirkloCityTO.from_model(city)
@rest('/common/vouchers/cirklo/export', 'post')
@returns(dict)
@arguments()
def api_export_cirklo_services():
service_user = users.get_current_user()
city_sln_settings = get_solution_settings(service_user)
_check_permission(city_sln_settings)
all_services = get_cirklo_vouchers_services()
if all_services.cursor:
raise NotImplementedError()
book = Workbook(encoding='utf-8')
sheet = book.add_sheet('Cirklo') # type: Worksheet
language = city_sln_settings.main_language
sheet.write(0, 0, translate(language, 'reservation-name'))
sheet.write(0, 1, translate(language, 'Email'))
sheet.write(0, 2, translate(language, 'address'))
sheet.write(0, 3, translate(language, 'Phone number'))
sheet.write(0, 4, translate(language, 'created'))
sheet.write(0, 5, translate(language, 'merchant_registered'))
date_format = XFStyle()
date_format.num_format_str = 'dd/mm/yyyy'
row = 0
for service in all_services.results:
row += 1
sheet.write(row, 0, service.name)
sheet.write(row, 1, service.email)
sheet.write(row, 2, service.address)
sheet.write(row, 3, service.phone_number)
sheet.write(row, 4, parse_date(service.creation_date), date_format)
sheet.write(row, 5, translate(language, 'Yes') if service.merchant_registered else translate(language, 'No'))
date = format_datetime(datetime.now(), format='medium', locale='en_GB')
gcs_path = '/%s/tmp/cirklo/export-cirklo-%s.xls' % (OCA_FILES_BUCKET, date.replace(' ', '-'))
content_type = 'application/vnd.ms-excel'
with cloudstorage.open(gcs_path, 'w', content_type=content_type) as gcs_file:
book.save(gcs_file)
deferred.defer(cloudstorage.delete, gcs_path, _countdown=86400)
return {
'url': get_serving_url(gcs_path),
}
| 42.858462 | 120 | 0.710604 | 0 | 0 | 0 | 0 | 11,286 | 0.810252 | 0 | 0 | 1,523 | 0.10934 |
e042a55525baf01a1dd738c8dd3863fa44f09d50 | 1,624 | py | Python | aplpy/tests/test_grid.py | nbrunett/aplpy | f5d128faf3568adea753d52c11ba43014d25d90a | [
"MIT"
] | null | null | null | aplpy/tests/test_grid.py | nbrunett/aplpy | f5d128faf3568adea753d52c11ba43014d25d90a | [
"MIT"
] | null | null | null | aplpy/tests/test_grid.py | nbrunett/aplpy | f5d128faf3568adea753d52c11ba43014d25d90a | [
"MIT"
] | 1 | 2018-02-26T03:04:19.000Z | 2018-02-26T03:04:19.000Z | import matplotlib
matplotlib.use('Agg')
import numpy as np
from astropy.tests.helper import pytest
from .. import FITSFigure
def test_grid_addremove():
data = np.zeros((16, 16))
f = FITSFigure(data)
f.add_grid()
f.remove_grid()
f.add_grid()
f.close()
def test_grid_showhide():
data = np.zeros((16, 16))
f = FITSFigure(data)
f.add_grid()
f.grid.hide()
f.grid.show()
f.close()
def test_grid_spacing():
data = np.zeros((16, 16))
f = FITSFigure(data)
f.add_grid()
f.grid.set_xspacing(1.)
f.grid.set_xspacing('tick')
with pytest.raises(ValueError):
f.grid.set_xspacing('auto')
f.grid.set_yspacing(2.)
f.grid.set_yspacing('tick')
with pytest.raises(ValueError):
f.grid.set_yspacing('auto')
f.close()
def test_grid_color():
data = np.zeros((16, 16))
f = FITSFigure(data)
f.add_grid()
f.grid.set_color('black')
f.grid.set_color('#003344')
f.grid.set_color((1.0, 0.4, 0.3))
f.close()
def test_grid_alpha():
data = np.zeros((16, 16))
f = FITSFigure(data)
f.add_grid()
f.grid.set_alpha(0.0)
f.grid.set_alpha(0.3)
f.grid.set_alpha(1.0)
f.close()
def test_grid_linestyle():
data = np.zeros((16, 16))
f = FITSFigure(data)
f.add_grid()
f.grid.set_linestyle('solid')
f.grid.set_linestyle('dashed')
f.grid.set_linestyle('dotted')
f.close()
def test_grid_linewidth():
data = np.zeros((16, 16))
f = FITSFigure(data)
f.add_grid()
f.grid.set_linewidth(0)
f.grid.set_linewidth(2)
f.grid.set_linewidth(5)
f.close()
| 20.049383 | 39 | 0.618842 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 68 | 0.041872 |
e0435a8bdb5ad3ee4a83d670d6af34fbe9094657 | 12,910 | py | Python | vz.py | ponyatov/vz | f808dd0dca9b6aa7a3e492d2ee0797ab96cd23a1 | [
"MIT"
] | null | null | null | vz.py | ponyatov/vz | f808dd0dca9b6aa7a3e492d2ee0797ab96cd23a1 | [
"MIT"
] | null | null | null | vz.py | ponyatov/vz | f808dd0dca9b6aa7a3e492d2ee0797ab96cd23a1 | [
"MIT"
] | null | null | null | import os, sys
class Object:
## @name constructor
def __init__(self, V):
self.value = V
self.nest = []
def box(self, that):
if isinstance(that, Object): return that
if isinstance(that, str): return S(that)
raise TypeError(['box', type(that), that])
## @name dump / string
def test(self): return self.dump(test=True)
def __repr__(self): return self.dump(test=False)
def dump(self, cycle=[], depth=0, prefix='', test=False):
# head
def pad(depth): return '\n' + '\t' * depth
ret = pad(depth) + self.head(prefix, test)
# subtree
return ret
def head(self, prefix='', test=False):
gid = '' if test else f' @{id(self):x}'
return f'{prefix}<{self.tag()}:{self.val()}>{gid}'
def __format__(self, spec=''):
if not spec: return self.val()
raise TypeError(['__format__', spec])
def tag(self): return self.__class__.__name__.lower()
def val(self): return f'{self.value}'
## @name operator
def __iter__(self):
return iter(self.nest)
def __floordiv__(self, that):
self.nest.append(self.box(that)); return self
class Primitive(Object):
pass
class S(Primitive):
def __init__(self, V=None, end=None, pfx=None, sfx=None):
super().__init__(V)
self.end = end; self.pfx = pfx; self.sfx = sfx
def gen(self, to, depth=0):
ret = ''
if self.pfx is not None:
ret += f'{to.tab*depth}{self.pfx}\n'
if self.value is not None:
ret += f'{to.tab*depth}{self.value}\n'
for i in self:
ret += i.gen(to, depth + 1)
if self.end is not None:
ret += f'{to.tab*depth}{self.end}\n'
if self.sfx is not None:
ret += f'{to.tab*depth}{self.sfx}\n'
return ret
class Sec(S):
def gen(self, to, depth=0):
ret = ''
if self.pfx is not None:
ret += f'{to.tab*depth}{self.pfx}\n' if self.pfx else '\n'
if self.nest and self.value is not None:
ret += f'{to.tab*depth}{to.comment} \\ {self}\n'
for i in self:
ret += i.gen(to, depth + 0)
if self.nest and self.value is not None:
ret += f'{to.tab*depth}{to.comment} / {self}\n'
if self.sfx is not None:
ret += f'{to.tab*depth}{self.sfx}\n' if self.pfx else '\n'
return ret
class IO(Object):
def __init__(self, V):
super().__init__(V)
self.path = V
class Dir(IO):
def __floordiv__(self, that):
assert isinstance(that, IO)
that.path = f'{self.path}/{that.path}'
return super().__floordiv__(that)
def sync(self):
try: os.mkdir(self.path)
except FileExistsError: pass
for i in self: i.sync()
class File(IO):
def __init__(self, V, ext='', tab=' ' * 4, comment='#'):
super().__init__(V + ext)
self.top = Sec(); self.bot = Sec()
self.tab = tab; self.comment = comment
def sync(self):
with open(self.path, 'w') as F:
F.write(self.top.gen(self))
for i in self: F.write(i.gen(self))
F.write(self.bot.gen(self))
class giti(File):
def __init__(self, V='.gitignore'):
super().__init__(V)
self.bot // f'!{self}'
class Makefile(File):
def __init__(self, V='Makefile'):
super().__init__(V, tab='\t')
class pyFile(File):
def __init__(self, V, ext='.py'):
super().__init__(V, ext)
class jsonFile(File):
def __init__(self, V, ext='.json', comment='//'):
super().__init__(V, ext, comment=comment)
class Meta(Object): pass
class Class(Meta):
def __init__(self, C, sup=[]):
assert callable(C)
super().__init__(C.__name__)
self.clazz = C; self.sup = sup
def gen(self, to, depth=0):
ret = S(f'class {self}:', pfx='') // 'pass'
return ret.gen(to, depth)
class Project(Meta):
def __init__(self, V=None, title='', about=''):
if not V: V = os.getcwd().split('/')[-1]
super().__init__(V)
#
self.TITLE = title if title else f'{self}'
self.ABOUT = about
self.AUTHOR = 'Dmitry Ponyatov'
self.EMAIL = '[email protected]'
self.GITHUB = 'https://github.com/ponyatov'
self.YEAR = 2020
self.LICENSE = 'All rights reserved'
self.COPYRIGHT = f'(c) {self.AUTHOR} <{self.EMAIL}> {self.YEAR} {self.LICENSE}'
#
self.dirs()
self.mk()
self.src()
self.vscode()
self.apt()
def apt(self):
self.apt = File('apt', '.txt'); self.d // self.apt
self.apt \
// 'git make curl' // 'code meld' \
// 'python3 python3-venv' \
// 'build-essential g++'
def vscode(self):
self.vscode = Dir('.vscode'); self.d // self.vscode
self.settings()
self.tasks()
def settings(self):
self.settings = jsonFile('settings'); self.vscode // self.settings
#
def multi(key, cmd):
return (S('{', '},')
// f'"command": "multiCommand.{key}",'
// (S('"sequence": [', ']')
// '"workbench.action.files.saveAll",'
// (S('{"command": "workbench.action.terminal.sendSequence",')
// f'"args": {{"text": "\\u000D {cmd} \\u000D"}}}}'
)))
self.multi = \
(Sec('multi')
// (S('"multiCommand.commands": [', '],')
// multi('f11', 'make meta')
// multi('f12', 'make all')
))
#
self.files = (Sec()
// f'"{self}/**":true,'
)
self.exclude = \
(Sec()
// (S('"files.exclude": {', '},') // self.files))
self.watcher = \
(Sec()
// (S('"files.watcherExclude": {', '},') // self.files))
self.assoc = \
(Sec()
// (S('"files.associations": {', '},')))
self.files = (Sec('files', pfx='')
// self.exclude
// self.watcher
// self.assoc)
#
self.editor = (Sec('editor', pfx='')
// '"editor.tabSize": 4,'
// '"editor.rulers": [80],'
// '"workbench.tree.indent": 32,'
)
#
self.settings \
// (S('{', '}')
// self.multi
// self.files
// self.editor)
def tasks(self):
self.tasks = jsonFile('tasks'); self.vscode // self.tasks
def task(clazz, cmd):
return (S('{', '},')
// f'"label": "{clazz}: {cmd}",'
// f'"type": "shell",'
// f'"command": "make {cmd}",'
// f'"problemMatcher": []'
)
self.tasks \
// (S('{', '}')
// '"version": "2.0.0",'
// (S('"tasks": [', ']')
// task('project', 'install')
// task('project', 'update')
// task('git', 'dev')
// task('git', 'shadow')
))
def src(self):
self.py()
self.test()
self.config()
def config(self):
self.config = pyFile('config'); self.d // self.config
self.config \
// f"{'SECURE_KEY':<11} = {os.urandom(0x22)}" \
// f"{'HOST':<11} = '127..0.0.1'" \
// f"{'PORT':<11} = 12345"
def py(self):
self.py = pyFile(f'{self}'); self.d // self.py
self.py \
// 'import os, sys'
for i in [Object, S, Sec, IO, Dir, File, Meta, Class, Project]:
self.py // Class(i)
self.py // Class(Primitive, [Object])
self.py \
// S('Project().sync()', pfx='')
def test(self):
self.test = pyFile(f'test_{self}'); self.d // self.test
self.test \
// 'import pytest' \
// f'from {self} import *' \
// 'def test_any(): assert True'
def dirs(self):
self.d = Dir(f'{self}'); self.giti = giti(); self.d // self.giti
self.giti.top // '*~' // '*.swp' // '*.log'; self.giti.top.sfx = ''
self.giti // f'/{self}/' // '/__pycache__/'
self.giti.bot.pfx = ''
#
self.bin = Dir('bin'); self.d // self.bin
def mk(self):
self.mk = Makefile(); self.d // self.mk
#
self.mk.var = Sec('var', pfx=''); self.mk // self.mk.var
self.mk.var \
// f'{"MODULE":<11} = $(notdir $(CURDIR))' \
// f'{"OS":<11} = $(shell uname -s)' \
// f'{"CORES":<11} = $(shell grep processor /proc/cpuinfo | wc -l)'
#
self.mk.dir = Sec('dir', pfx=''); self.mk // self.mk.dir
self.mk.dir \
// f'{"CWD":<11} = $(CURDIR)' \
// f'{"BIN":<11} = $(CWD)/bin' \
// f'{"DOC":<11} = $(CWD)/doc' \
// f'{"LIB":<11} = $(CWD)/lib' \
// f'{"SRC":<11} = $(CWD)/src' \
// f'{"TMP":<11} = $(CWD)/tmp'
#
self.mk.tool = Sec('tool', pfx=''); self.mk // self.mk.tool
self.mk.tool \
// f'CURL = curl -L -o' \
// f'PY = $(shell which python3)' \
// f'PYT = $(shell which pytest)' \
// f'PEP = $(shell which autopep8)'
#
self.mk.package = Sec('package', pfx=''); self.mk // self.mk.package
self.mk.package \
// f'SYSLINUX_VER = 6.0.3'
#
self.mk.src = Sec('src', pfx=''); self.mk // self.mk.src
self.mk.src \
// f'Y += $(MODULE).py test_$(MODULE).py' \
// f'P += config.py' \
// f'S += $(Y)'
#
self.mk.cfg = Sec('cfg', pfx=''); self.mk // self.mk.cfg
self.mk.cfg \
// f'PEPS = E26,E302,E305,E401,E402,E701,E702'
#
self.mk.all = Sec('all', pfx=''); self.mk // self.mk.all
self.mk.all \
// (S('meta: $(Y)', pfx='.PHONY: meta')
// '$(MAKE) test'
// '$(PY) $(MODULE).py'
// '$(PEP) --ignore=$(PEPS) --in-place $?')
self.mk.all \
// (S('test: $(Y)', pfx='\n.PHONY: test')
// '$(PYT) test_$(MODULE).py')
#
self.mk.rule = Sec('rule', pfx=''); self.mk // self.mk.rule
#
self.mk.doc = Sec('doc', pfx=''); self.mk // self.mk.doc
self.mk.doc \
// S('doc: doc/pyMorphic.pdf', pfx='.PHONY: doc')
self.mk.doc \
// (S('doc/pyMorphic.pdf:')
// '$(CURL) $@ http://www.diva-portal.org/smash/get/diva2:22296/FULLTEXT01.pdf')
#
self.mk.install = Sec('install', pfx=''); self.mk // self.mk.install
self.mk.install // '.PHONY: install update'
self.mk.install \
// (S('install: $(OS)_install doc')
// '$(MAKE) test'
)
self.mk.install \
// (S('update: $(OS)_update doc')
// '$(MAKE) test'
)
self.mk.install \
// (S('Linux_install Linux_update:',
pfx='.PHONY: Linux_install Linux_update')
// 'sudo apt update'
// 'sudo apt install -u `cat apt.txt`')
#
self.mk.merge = Sec('merge', pfx=''); self.mk // self.mk.merge
self.mk.merge \
// 'SHADOW ?= ponymuck'
self.mk.merge \
// 'MERGE = Makefile .gitignore README.md apt.txt $(S)' \
// 'MERGE += .vscode bin doc lib src tmp'
self.mk.merge \
// (S('dev:', pfx='\n.PHONY: dev')
// 'git push -v'
// 'git checkout $@'
// 'git checkout $(SHADOW) -- $(MERGE)'
)
self.mk.merge \
// (S('shadow:', pfx='\n.PHONY: shadow')
// 'git push -v'
// 'git checkout $(SHADOW)'
)
self.mk.merge \
// (S('release:', pfx='\n.PHONY: release')
)
self.mk.merge \
// (S('zip:', pfx='\n.PHONY: zip')
)
def sync(self):
self.readme()
self.d.sync()
def readme(self):
self.readme = File('README', '.md'); self.d // self.readme
self.readme \
// f'#  `{self}`' // f'## {self.TITLE}'
self.readme \
// '' // self.COPYRIGHT // '' // f'github: {self.GITHUB}/{self}'
self.readme // self.ABOUT
Project(
title='ViZual language environment',
about='''
* object (hyper)graph interpreter
'''
).sync()
| 32.849873 | 96 | 0.449109 | 12,756 | 0.988071 | 0 | 0 | 0 | 0 | 0 | 0 | 3,482 | 0.269713 |
e044775152d95d5fb032af9b89fee05b4ac263fe | 2,630 | py | Python | src/server.py | FlakM/fastai_text_serving | 8262c2c1192c5e11df2e06b494ab9cf88c1dcd2a | [
"Apache-2.0"
] | null | null | null | src/server.py | FlakM/fastai_text_serving | 8262c2c1192c5e11df2e06b494ab9cf88c1dcd2a | [
"Apache-2.0"
] | null | null | null | src/server.py | FlakM/fastai_text_serving | 8262c2c1192c5e11df2e06b494ab9cf88c1dcd2a | [
"Apache-2.0"
] | null | null | null | import asyncio
import logging
import aiohttp
import uvicorn
from fastai.vision import *
from starlette.applications import Starlette
from starlette.middleware.cors import CORSMiddleware
from starlette.responses import JSONResponse
# put your url here here
model_file_url = 'https://www.dropbox.com/s/...?raw=1'
model_file_name = 'model'
path = Path(__file__).parent
logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.INFO)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
app = Starlette()
app.add_middleware(CORSMiddleware, allow_origins=['*'], allow_headers=['X-Requested-With', 'Content-Type'])
def hashsum(path, hex=True, hash_type=hashlib.md5):
hashinst = hash_type()
with open(path, 'rb') as f:
for chunk in iter(lambda: f.read(hashinst.block_size * 128), b''):
hashinst.update(chunk)
return hashinst.hexdigest() if hex else hashinst.digest()
async def download_file(url, dest):
if dest.exists(): return
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
data = await response.read()
with open(dest, 'wb') as f: f.write(data)
async def setup_learner():
model_file = path.parent / 'models' / f'{model_file_name}.pkl'
if not model_file.exists():
logging.info("Will download file %s from %s", model_file, model_file_url)
await download_file(model_file_url, model_file)
logging.info("Downloaded file md5sum: %s", hashsum(model_file))
else:
logging.info("File %s already exists will reuse md5sum: %s", model_file, hashsum(model_file))
# Loading the saved model using fastai's load_learner method
model = load_learner(model_file.parent, f'{model_file_name}.pkl')
classes = model.data.classes
return model, classes
loop = asyncio.get_event_loop()
tasks = [asyncio.ensure_future(setup_learner())]
model, classes = loop.run_until_complete(asyncio.gather(*tasks))[0]
loop.close()
def sortByProb(val):
return val["prob"]
@app.route('/predict', methods=['POST'])
async def analyze(request):
data = await request.form()
text = data['text']
predict_class, predict_idx, predict_values = model.predict(text)
results = []
for idx, val in enumerate(predict_values):
prob = val.item()
if prob > 0.01:
record = {"value": classes[idx], "prob": prob}
results.append(record)
results.sort(key=sortByProb, reverse=True)
return JSONResponse(results[:5])
if __name__ == '__main__':
if 'serve' in sys.argv: uvicorn.run(app, host='0.0.0.0' port=4000)
| 30.941176 | 107 | 0.692776 | 0 | 0 | 0 | 0 | 488 | 0.185551 | 1,349 | 0.512928 | 434 | 0.165019 |
e0448da70febec0759bc638d5a460760c3964480 | 402 | py | Python | tcpserver.py | justforbalance/CSnet | c1e049f63d245c5d464a2d6e9aa7d3daf15bf2b6 | [
"MIT"
] | null | null | null | tcpserver.py | justforbalance/CSnet | c1e049f63d245c5d464a2d6e9aa7d3daf15bf2b6 | [
"MIT"
] | null | null | null | tcpserver.py | justforbalance/CSnet | c1e049f63d245c5d464a2d6e9aa7d3daf15bf2b6 | [
"MIT"
] | null | null | null | from socket import *
serverPort = 12001
serverSocket = socket(AF_INET, SOCK_STREAM)
serverSocket.bind(('', serverPort))
serverSocket.listen(1)
print("the server is ready to receive")
while True:
connectionSocket,addr = serverSocket.accept()
sentence = connectionSocket.recv(1024).decode()
sentence = sentence.upper()
connectionSocket.send(sentence.encode())
connectionSocket.close() | 33.5 | 51 | 0.753731 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 0.084577 |
e044ab975c816db8531273f338dcef5b52d8c7ce | 1,061 | py | Python | src/geneflow/extend/local_workflow.py | jhphan/geneflow2 | a39ab97e6425ee45584cfc15b5740e94a5bf7512 | [
"Apache-2.0"
] | 7 | 2019-04-11T03:50:51.000Z | 2020-03-27T15:59:04.000Z | src/geneflow/extend/local_workflow.py | jhphan/geneflow2 | a39ab97e6425ee45584cfc15b5740e94a5bf7512 | [
"Apache-2.0"
] | 1 | 2019-05-06T14:18:42.000Z | 2019-05-08T22:06:12.000Z | src/geneflow/extend/local_workflow.py | jhphan/geneflow2 | a39ab97e6425ee45584cfc15b5740e94a5bf7512 | [
"Apache-2.0"
] | 6 | 2019-04-10T20:25:27.000Z | 2021-12-16T15:59:59.000Z | """This module contains the GeneFlow LocalWorkflow class."""
class LocalWorkflow:
"""
A class that represents the Local Workflow objects.
"""
def __init__(
self,
job,
config,
parsed_job_work_uri
):
"""
Instantiate LocalWorkflow class.
"""
self._job = job
self._config = config
self._parsed_job_work_uri = parsed_job_work_uri
def initialize(self):
"""
Initialize the LocalWorkflow class.
This workflow class has no additional functionality.
Args:
None.
Returns:
True.
"""
return True
def init_data(self):
"""
Initialize any data specific to this context.
"""
return True
def get_context_options(self):
"""
Return dict of options specific for this context.
Args:
None.
Returns:
{} - no options specific for this context.
"""
return {}
| 18.293103 | 60 | 0.524034 | 998 | 0.940622 | 0 | 0 | 0 | 0 | 0 | 0 | 624 | 0.588124 |
e0453a8ff093c7c5f6bb2239656a47c98c50cec7 | 2,849 | py | Python | S12/tensornet/engine/ops/lr_scheduler.py | abishek-raju/EVA4B2 | 189f4062c85d91f43c1381087a9c89ff794e5428 | [
"Apache-2.0"
] | 4 | 2020-06-18T13:07:19.000Z | 2022-01-07T10:51:10.000Z | S12/tensornet/engine/ops/lr_scheduler.py | abishek-raju/EVA4B2 | 189f4062c85d91f43c1381087a9c89ff794e5428 | [
"Apache-2.0"
] | 1 | 2021-07-31T04:34:46.000Z | 2021-08-11T05:55:57.000Z | S12/tensornet/engine/ops/lr_scheduler.py | abishek-raju/EVA4B2 | 189f4062c85d91f43c1381087a9c89ff794e5428 | [
"Apache-2.0"
] | 4 | 2020-08-09T07:10:46.000Z | 2021-01-16T14:57:23.000Z | from torch.optim.lr_scheduler import StepLR, ReduceLROnPlateau, OneCycleLR
def step_lr(optimizer, step_size, gamma=0.1, last_epoch=-1):
"""Create LR step scheduler.
Args:
optimizer (torch.optim): Model optimizer.
step_size (int): Frequency for changing learning rate.
gamma (float): Factor for changing learning rate. (default: 0.1)
last_epoch (int): The index of last epoch. (default: -1)
Returns:
StepLR: Learning rate scheduler.
"""
return StepLR(optimizer, step_size=step_size, gamma=gamma, last_epoch=last_epoch)
def reduce_lr_on_plateau(optimizer, factor=0.1, patience=10, verbose=False, min_lr=0):
"""Create LR plateau reduction scheduler.
Args:
optimizer (torch.optim): Model optimizer.
factor (float, optional): Factor by which the learning rate will be reduced.
(default: 0.1)
patience (int, optional): Number of epoch with no improvement after which learning
rate will be will be reduced. (default: 10)
verbose (bool, optional): If True, prints a message to stdout for each update.
(default: False)
min_lr (float, optional): A scalar or a list of scalars. A lower bound on the
learning rate of all param groups or each group respectively. (default: 0)
Returns:
ReduceLROnPlateau instance.
"""
return ReduceLROnPlateau(
optimizer, factor=factor, patience=patience, verbose=verbose, min_lr=min_lr
)
def one_cycle_lr(
optimizer, max_lr, epochs, steps_per_epoch, pct_start=0.5, div_factor=10.0, final_div_factor=10000
):
"""Create One Cycle Policy for Learning Rate.
Args:
optimizer (torch.optim): Model optimizer.
max_lr (float): Upper learning rate boundary in the cycle.
epochs (int): The number of epochs to train for. This is used along with
steps_per_epoch in order to infer the total number of steps in the cycle.
steps_per_epoch (int): The number of steps per epoch to train for. This is
used along with epochs in order to infer the total number of steps in the cycle.
pct_start (float, optional): The percentage of the cycle (in number of steps)
spent increasing the learning rate. (default: 0.5)
div_factor (float, optional): Determines the initial learning rate via
initial_lr = max_lr / div_factor. (default: 10.0)
final_div_factor (float, optional): Determines the minimum learning rate via
min_lr = initial_lr / final_div_factor. (default: 1e4)
Returns:
OneCycleLR instance.
"""
return OneCycleLR(
optimizer, max_lr, epochs=epochs, steps_per_epoch=steps_per_epoch,
pct_start=pct_start, div_factor=div_factor, final_div_factor=final_div_factor
)
| 40.7 | 102 | 0.679537 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,082 | 0.730783 |
e045d172e3aa9769db37dd0c8977af6b2b83dca1 | 10,889 | py | Python | armi/reactor/tests/test_zones.py | youngmit/armi | 67688e4e67d2a217dfc7b1ccfa64028c20b57a5b | [
"Apache-2.0"
] | null | null | null | armi/reactor/tests/test_zones.py | youngmit/armi | 67688e4e67d2a217dfc7b1ccfa64028c20b57a5b | [
"Apache-2.0"
] | null | null | null | armi/reactor/tests/test_zones.py | youngmit/armi | 67688e4e67d2a217dfc7b1ccfa64028c20b57a5b | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for Zones"""
import copy
import unittest
import armi
from armi import settings
from armi.reactor import assemblies
from armi.reactor import blueprints
from armi.reactor import geometry
from armi.reactor import grids
from armi.reactor import reactors
from armi.reactor import zones
from armi.reactor.flags import Flags
from armi.reactor.tests import test_reactors
from armi.utils import pathTools
from armi.settings.fwSettings import globalSettings
THIS_DIR = pathTools.armiAbsDirFromName(__name__)
class Zone_TestCase(unittest.TestCase):
def setUp(self):
bp = blueprints.Blueprints()
geom = geometry.SystemLayoutInput()
geom.symmetry = "third core periodic"
r = reactors.Reactor(settings.getMasterCs(), bp)
r.add(reactors.Core("Core", settings.getMasterCs(), geom))
r.core.spatialGrid = grids.hexGridFromPitch(1.0)
aList = []
for ring in range(10):
a = assemblies.HexAssembly("fuel")
a.spatialLocator = r.core.spatialGrid[ring, 1, 0]
a.parent = r.core
aList.append(a)
self.aList = aList
def test_addAssemblyLocations(self):
zone = zones.Zone("TestZone")
zone.addAssemblyLocations(self.aList)
for a in self.aList:
self.assertIn(a.getLocation(), zone)
self.assertRaises(RuntimeError, zone.addAssemblyLocations, self.aList)
def test_iteration(self):
locs = [a.getLocation() for a in self.aList]
zone = zones.Zone("TestZone")
zone.addAssemblyLocations(self.aList)
for aLoc in zone:
self.assertIn(aLoc, locs)
# loop twice to make sure it iterates nicely.
for aLoc in zone:
self.assertIn(aLoc, locs)
def test_addRing(self):
zone = zones.Zone("TestZone")
zone.addRing(5)
self.assertIn("A5003", zone)
self.assertNotIn("A6002", zone)
zone.addRing(6, 3, 9)
self.assertIn("A6003", zone)
self.assertIn("A6009", zone)
self.assertNotIn("A6002", zone)
self.assertNotIn("A6010", zone)
class Zones_InReactor(unittest.TestCase):
def setUp(self):
self.o, self.r = test_reactors.loadTestReactor()
def test_buildRingZones(self):
o, r = self.o, self.r
cs = o.cs
cs[globalSettings.CONF_ZONING_STRATEGY] = "byRingZone"
cs["ringZones"] = []
zonez = zones.buildZones(r.core, cs)
self.assertEqual(len(list(zonez)), 1)
self.assertEqual(9, r.core.numRings)
cs["ringZones"] = [5, 8]
zonez = zones.buildZones(r.core, cs)
self.assertEqual(len(list(zonez)), 2)
zone = zonez["ring-1"]
self.assertEqual(len(zone), (5 * (5 - 1) + 1))
zone = zonez["ring-2"]
# Note that the actual number of rings in the reactor model is 9. Even though we
# asked for the last zone to to to 8, the zone engine should bump it out. Not
# sure if this is behavior that we want to preserve, but at least it's being
# tested properly now.
self.assertEqual(len(zone), (9 * (9 - 1) + 1) - (5 * (5 - 1) + 1))
cs["ringZones"] = [5, 7, 8]
zonez = zones.buildZones(r.core, cs)
self.assertEqual(len(list(zonez)), 3)
zone = zonez["ring-3"]
self.assertEqual(len(zone), 30) # rings 8 and 9. See above comment
def test_removeZone(self):
o, r = self.o, self.r
cs = o.cs
cs[globalSettings.CONF_ZONING_STRATEGY] = "byRingZone"
cs["ringZones"] = [5, 8]
# produce 2 zones, with the names ringzone0 and ringzone1
daZones = zones.buildZones(r.core, cs)
daZones.removeZone("ring-1")
# The names list should only house the only other remaining zone now
self.assertEqual(["ring-2"], daZones.names)
# if indexed like a dict, the zones object should give a key error from the removed zone
with self.assertRaises(KeyError):
daZones["ring-1"]
# Ensure we can still iterate through our zones object
for name in daZones.names:
aZone = daZones[name]
def test_findZoneAssemblyIsIn(self):
cs = self.o.cs
cs["ringZones"] = [5, 7, 8]
daZones = zones.buildZones(self.r.core, cs)
for zone in daZones:
a = self.r.core.getAssemblyWithStringLocation(zone.locList[0])
aZone = daZones.findZoneAssemblyIsIn(a)
self.assertEqual(aZone, zone)
# lets test if we get a none and a warning if the assembly does not exist in a zone
a = self.r.core.getAssemblyWithStringLocation(
daZones[daZones.names[0]].locList[0]
) # get assem from first zone
daZones.removeZone(
daZones.names[0]
) # remove a zone to ensure that our assem does not have a zone anymore
self.assertEqual(daZones.findZoneAssemblyIsIn(a), None)
class Zones_InRZReactor(unittest.TestCase):
def test_splitZones(self):
# Test to make sure that we can split a zone containing control and fuel assemblies.
# Also test that we can separate out assemblies with differing numbers of blocks.
o, r = test_reactors.loadTestReactor(inputFileName="partisnTestReactor.yaml")
cs = o.cs
cs["splitZones"] = False
cs[globalSettings.CONF_ZONING_STRATEGY] = "byRingZone"
cs["ringZones"] = [1, 2, 3, 4, 5, 6, 7, 8, 9]
diverseZone = "ring-4"
r.core.buildZones(cs)
daZones = r.core.zones
# lets make one of the assemblies have an extra block
zoneLocations = daZones.getZoneLocations(diverseZone)
originalAssemblies = r.core.getLocationContents(
zoneLocations, assemblyLevel=True
)
fuel = [a for a in originalAssemblies if a.hasFlags(Flags.FUEL)][0]
newBlock = copy.deepcopy(fuel[-1])
fuel.add(newBlock)
# should contain a zone for every ring zone
# we only want one ring zone for this test, containing assemblies of different types.
zoneTup = tuple(daZones.names)
for zoneName in zoneTup:
if zoneName != diverseZone:
daZones.removeZone(zoneName)
# this should split diverseZone into multiple zones by nodalization type.
cs["splitZones"] = True
zones.splitZones(r.core, cs, daZones)
# test to make sure that we split the ring zone correctly
self.assertEqual(len(daZones["ring-4-primary-control-5"]), 2)
self.assertEqual(len(daZones["ring-4-middle-fuel-5"]), 3)
self.assertEqual(len(daZones["ring-4-middle-fuel-6"]), 1)
def test_createHotZones(self):
# Test to make sure createHotZones identifies the highest p/f location in a zone
# Test to make sure createHotZones can remove the peak assembly from that zone and place it in a new zone
# Test that the power in the old zone and the new zone is conserved.
# Test that if a hot zone can not be created from a single assembly zone.
o, r = test_reactors.loadTestReactor(inputFileName="partisnTestReactor.yaml")
cs = o.cs
cs["splitZones"] = False
cs[globalSettings.CONF_ZONING_STRATEGY] = "byRingZone"
cs["ringZones"] = [9] # build one giant zone
r.core.buildZones(cs)
daZones = r.core.zones
originalassemblies = []
originalPower = 0.0
peakZonePFRatios = []
# Create a single assembly zone to verify that it will not create a hot zone
single = zones.Zone("single")
daZones.add(single)
aLoc = r.core.getFirstAssembly(Flags.FUEL).getLocation()
single.append(aLoc)
# Set power and flow.
# Also gather channel peak P/F ratios, assemblies and power.
for zone in daZones:
powerToFlow = []
zoneLocations = daZones.getZoneLocations(zone.name)
assems = r.core.getLocationContents(zoneLocations, assemblyLevel=True)
power = 300.0
flow = 300.0
for a in assems:
a.getFirstBlock().p.power = power
assemblyPower = a.calcTotalParam("power")
a[-1].p.THmassFlowRate = flow
powerToFlow.append(assemblyPower / a[-1].p.THmassFlowRate)
originalPower += assemblyPower
originalassemblies.append(a)
power += 1
flow -= 1
peakZonePFRatios.append(max(powerToFlow))
daZones = zones.createHotZones(r.core, daZones)
# Test that the hot zones have the peak P/F from the host channels
i = 0
for zone in daZones:
if zone.hotZone:
hotAssemLocation = daZones.getZoneLocations(zone.name)
hotAssem = r.core.getLocationContents(
hotAssemLocation, assemblyLevel=True
)[0]
self.assertEqual(
peakZonePFRatios[i],
hotAssem.calcTotalParam("power") / hotAssem[-1].p.THmassFlowRate,
)
i += 1
powerAfterHotZoning = 0.0
assembliesAfterHotZoning = []
# Check that power is conserved and that we did not lose any assemblies
for zone in daZones:
locs = daZones.getZoneLocations(zone.name)
assems = r.core.getLocationContents(locs, assemblyLevel=True)
for a in assems:
assembliesAfterHotZoning.append(a)
powerAfterHotZoning += a.calcTotalParam("power")
self.assertEqual(powerAfterHotZoning, originalPower)
self.assertEqual(len(assembliesAfterHotZoning), len(originalassemblies))
# check that the original zone with 1 channel has False for hotzone
self.assertEqual(single.hotZone, False)
# check that we have the correct number of hot and normal zones.
hotCount = 0
normalCount = 0
for zone in daZones:
if zone.hotZone:
hotCount += 1
else:
normalCount += 1
self.assertEqual(hotCount, 1)
self.assertEqual(normalCount, 2)
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Zones_InReactor.test_buildRingZones']
unittest.main()
| 39.740876 | 113 | 0.629718 | 9,671 | 0.888144 | 0 | 0 | 0 | 0 | 0 | 0 | 3,163 | 0.290477 |
e04601e1749bc51e7e5f74ca383f947dc25e7da9 | 562 | py | Python | islam_fitz/survey/migrations/0005_auto_20210712_2132.py | OmarEhab177/Islam_fitz | 6ad0eb21549895a6fe537e8413022b82bc530c57 | [
"MIT"
] | null | null | null | islam_fitz/survey/migrations/0005_auto_20210712_2132.py | OmarEhab177/Islam_fitz | 6ad0eb21549895a6fe537e8413022b82bc530c57 | [
"MIT"
] | 2 | 2022-03-01T12:17:05.000Z | 2022-03-30T12:19:55.000Z | islam_fitz/survey/migrations/0005_auto_20210712_2132.py | OmarEhab177/Islam_fitz | 6ad0eb21549895a6fe537e8413022b82bc530c57 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.12 on 2021-07-12 19:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('survey', '0004_lastpage_whatsapp_button'),
]
operations = [
migrations.RemoveField(
model_name='lastpage',
name='whatsapp_button',
),
migrations.AddField(
model_name='lastpage',
name='whatsapp_number',
field=models.CharField(default=1, max_length=50),
preserve_default=False,
),
]
| 23.416667 | 61 | 0.592527 | 468 | 0.83274 | 0 | 0 | 0 | 0 | 0 | 0 | 141 | 0.25089 |
e046ccaf1594be44b4bc74501cfe08b79d45a1d7 | 490 | py | Python | Examples/WorkingWithOutlookMSGs/CreateAndSaveOutlookNote.py | Muzammil-khan/Aspose.Email-Python-Dotnet | 04ca3a6f440339f3ddf316218f92d15d66f24e7e | [
"MIT"
] | 5 | 2019-01-28T05:17:12.000Z | 2020-04-14T14:31:34.000Z | Examples/WorkingWithOutlookMSGs/CreateAndSaveOutlookNote.py | Muzammil-khan/Aspose.Email-Python-Dotnet | 04ca3a6f440339f3ddf316218f92d15d66f24e7e | [
"MIT"
] | 1 | 2019-01-28T16:07:26.000Z | 2021-11-25T10:59:52.000Z | Examples/WorkingWithOutlookMSGs/CreateAndSaveOutlookNote.py | Muzammil-khan/Aspose.Email-Python-Dotnet | 04ca3a6f440339f3ddf316218f92d15d66f24e7e | [
"MIT"
] | 6 | 2018-07-16T14:57:34.000Z | 2020-08-30T05:59:52.000Z | import aspose.email.mapi.msg as msg
from aspose.email.mapi import MapiNote, NoteSaveFormat, NoteColor
def run():
dataDir = "Data/"
#ExStart: CreateAndSaveOutlookNote
note3 = MapiNote()
note3.subject = "Blue color note"
note3.body = "This is a blue color note";
note3.color = NoteColor.YELLOW
note3.height = 500
note3.width = 500
note3.save(dataDir + "CreateAndSaveOutlookNote_out.msg", NoteSaveFormat.MSG)
#ExEnd: CreateAndSaveOutlookNote
if __name__ == '__main__':
run()
| 25.789474 | 77 | 0.746939 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 161 | 0.328571 |
e04830a8bb6dffa22a3b7aa461ea3221561a26cd | 6,114 | py | Python | nonebot/internal/adapter/template.py | mobyw/nonebot2 | 36663f1a8a51bd89f4a60110047e73719adcc73d | [
"MIT"
] | null | null | null | nonebot/internal/adapter/template.py | mobyw/nonebot2 | 36663f1a8a51bd89f4a60110047e73719adcc73d | [
"MIT"
] | null | null | null | nonebot/internal/adapter/template.py | mobyw/nonebot2 | 36663f1a8a51bd89f4a60110047e73719adcc73d | [
"MIT"
] | null | null | null | import functools
from string import Formatter
from typing import (
TYPE_CHECKING,
Any,
Set,
Dict,
List,
Type,
Tuple,
Union,
Generic,
Mapping,
TypeVar,
Callable,
Optional,
Sequence,
cast,
overload,
)
if TYPE_CHECKING:
from .message import Message, MessageSegment
TM = TypeVar("TM", bound="Message")
TF = TypeVar("TF", str, "Message")
FormatSpecFunc = Callable[[Any], str]
FormatSpecFunc_T = TypeVar("FormatSpecFunc_T", bound=FormatSpecFunc)
class MessageTemplate(Formatter, Generic[TF]):
"""消息模板格式化实现类。
参数:
template: 模板
factory: 消息类型工厂,默认为 `str`
"""
@overload
def __init__(
self: "MessageTemplate[str]", template: str, factory: Type[str] = str
) -> None:
...
@overload
def __init__(
self: "MessageTemplate[TM]", template: Union[str, TM], factory: Type[TM]
) -> None:
...
def __init__(self, template, factory=str) -> None:
self.template: TF = template
self.factory: Type[TF] = factory
self.format_specs: Dict[str, FormatSpecFunc] = {}
def add_format_spec(
self, spec: FormatSpecFunc_T, name: Optional[str] = None
) -> FormatSpecFunc_T:
name = name or spec.__name__
if name in self.format_specs:
raise ValueError(f"Format spec {name} already exists!")
self.format_specs[name] = spec
return spec
def format(self, *args, **kwargs):
"""根据传入参数和模板生成消息对象"""
return self._format(args, kwargs)
def format_map(self, mapping: Mapping[str, Any]) -> TF:
"""根据传入字典和模板生成消息对象, 在传入字段名不是有效标识符时有用"""
return self._format([], mapping)
def _format(self, args: Sequence[Any], kwargs: Mapping[str, Any]) -> TF:
msg = self.factory()
if isinstance(self.template, str):
msg += self.vformat(self.template, args, kwargs)
elif isinstance(self.template, self.factory):
template = cast("Message[MessageSegment]", self.template)
for seg in template:
msg += self.vformat(str(seg), args, kwargs) if seg.is_text() else seg
else:
raise TypeError("template must be a string or instance of Message!")
return msg # type:ignore
def vformat(
self, format_string: str, args: Sequence[Any], kwargs: Mapping[str, Any]
) -> TF:
used_args = set()
result, _ = self._vformat(format_string, args, kwargs, used_args, 2)
self.check_unused_args(list(used_args), args, kwargs)
return result
def _vformat(
self,
format_string: str,
args: Sequence[Any],
kwargs: Mapping[str, Any],
used_args: Set[Union[int, str]],
recursion_depth: int,
auto_arg_index: int = 0,
) -> Tuple[TF, int]:
if recursion_depth < 0:
raise ValueError("Max string recursion exceeded")
results: List[Any] = [self.factory()]
for (literal_text, field_name, format_spec, conversion) in self.parse(
format_string
):
# output the literal text
if literal_text:
results.append(literal_text)
# if there's a field, output it
if field_name is not None:
# this is some markup, find the object and do
# the formatting
# handle arg indexing when empty field_names are given.
if field_name == "":
if auto_arg_index is False:
raise ValueError(
"cannot switch from manual field specification to "
"automatic field numbering"
)
field_name = str(auto_arg_index)
auto_arg_index += 1
elif field_name.isdigit():
if auto_arg_index:
raise ValueError(
"cannot switch from manual field specification to "
"automatic field numbering"
)
# disable auto arg incrementing, if it gets
# used later on, then an exception will be raised
auto_arg_index = False
# given the field_name, find the object it references
# and the argument it came from
obj, arg_used = self.get_field(field_name, args, kwargs)
used_args.add(arg_used)
assert format_spec is not None
# do any conversion on the resulting object
obj = self.convert_field(obj, conversion) if conversion else obj
# expand the format spec, if needed
format_control, auto_arg_index = self._vformat(
format_spec,
args,
kwargs,
used_args,
recursion_depth - 1,
auto_arg_index,
)
# format the object and append to the result
formatted_text = self.format_field(obj, str(format_control))
results.append(formatted_text)
return functools.reduce(self._add, results), auto_arg_index
def format_field(self, value: Any, format_spec: str) -> Any:
formatter: Optional[FormatSpecFunc] = self.format_specs.get(format_spec)
if formatter is None and not issubclass(self.factory, str):
segment_class: Type["MessageSegment"] = self.factory.get_segment_class()
method = getattr(segment_class, format_spec, None)
if callable(method) and not cast(str, method.__name__).startswith("_"):
formatter = getattr(segment_class, format_spec)
return (
super().format_field(value, format_spec)
if formatter is None
else formatter(value)
)
def _add(self, a: Any, b: Any) -> Any:
try:
return a + b
except TypeError:
return a + str(b)
| 33.048649 | 85 | 0.56248 | 5,742 | 0.917839 | 0 | 0 | 267 | 0.042679 | 0 | 0 | 1,181 | 0.188779 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.