metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "3plus2/3plus2",
"score": 3
} |
#### File: 3plus2/code/idml.py
```python
from os import listdir
from os.path import isfile, join
import parse as p
import os
import shutil
import basic_generator as gen
import utils
import zipfile
import re
import copy
def setContent(inCitFileName, outCitFileName, content):
inCitFile = open(inCitFileName, 'r')
outCitFile = open(outCitFileName, 'w')
inText = inCitFile.read()
idx1 = inText.find("codigo")
if idx1 >= 0:
firstText = inText[0:idx1]
secondText = inText[idx1+10:]
inText = firstText+content+secondText
outCitFile.write(inText)
inCitFile.close()
outCitFile.close()
def setMainContent(inCitFileName, outCitFileName, gospelText, commentsText):
inCitFile = open(inCitFileName, 'r')
outCitFile = open(outCitFileName, 'w')
#read the origin
iniBlock = 6
midBlock = 5
finBlock = 3
inTextIni = ""
for f in range(iniBlock) :
inTextIni = inTextIni + inCitFile.readline()
for f in range(midBlock) :
inCitFile.readline()
inTextFin = ""
for f in range(finBlock) :
inTextFin = inTextFin + inCitFile.readline()
content = ""
gospelContent = paragraphStyleBlock("cita", characterStyleBlock("contenido_cita", gospelText, True))#+"\t\t\t\t</br>\n")
content = content + gospelContent
for parr in range(len(commentsText)) :
textParrafo = ""
elementParrafo = commentsText[parr]
for block in range(len(elementParrafo)) :
elementBlock = elementParrafo[block]
if block == len(elementParrafo)-1 and parr != len(commentsText)-1:
textParrafo = textParrafo + characterStyleBlock(elementBlock[0], elementBlock[1], True)
else:
textParrafo = textParrafo + characterStyleBlock(elementBlock[0], elementBlock[1])
content = content + paragraphStyleBlock("normal",textParrafo)
finalText = inTextIni+content+inTextFin
outCitFile.write(finalText)
inCitFile.close()
outCitFile.close()
def characterStyleBlock(style, content, withspace = False):
ini = """\t\t\t\t<CharacterStyleRange AppliedCharacterStyle="CharacterStyle/{0}">
\t\t<Content>""".format(style)
fin = """</Content>\n"""
if withspace : fin = fin + "\t\t\t\t<Br />\n"
fin = fin + "\t\t\t\t</CharacterStyleRange>\n"
return ini+content+fin
def paragraphStyleBlock(style, content):
ini = """\t\t\t<ParagraphStyleRange AppliedParagraphStyle="ParagraphStyle/{0}">\n""".format(style)
fin = "\t\t\t</ParagraphStyleRange>\n"
return ini+content+fin
def getPagesFromIdml(textContents, numOfPages, tabla, used, definitions ):
allfiles = [ f for f in listdir(definitions["mypath"]) if isfile(join(definitions["mypath"],f)) ]
# 1. Read all the stories files and create the relation table
minPage = int(9999)
maxPage = int(-1)
print "procesando fichero imdl"
for f in range(len(allfiles)) :
storyFile = open(definitions["mypath"]+allfiles[f], 'r')
#print definitions["mypath"]+allfiles[f]
text = storyFile.read()
pos = text.find("codigo")
if pos < 0 : continue
pagina = text[pos+6:pos+9]
bloque = text[pos+9:pos+10]
#print "pagina " + str(pagina) + " y bloque " + str(bloque) + " file " + allfiles[f]
value = int(pagina)
tabla[value][bloque] = allfiles[f]
used[value] = True
if value > maxPage :
maxPage = int(pagina)
if value < minPage :
minPage = int(pagina)
#print allfiles[f] + "- pagina: " + pagina + " - bloque: " + bloque
#print "minPage " + str(minPage)
#print "maxPage " + str(maxPage)
#print "textContents " + str(len(textContents))
return maxPage,minPage
def fillContents( tabla, comments, gospel,
citation, days, saints,
maxPage, minPage, definitions):
maxPage = int(maxPage)
pageId = 0
print "insertando contenido en fichero idml"
for i in range(len(comments)) :
if i > definitions["TEXTS_NUM"]:
continue
tablaId = i + minPage
if not tabla[tablaId].has_key('C'):
print "No esta bien definida la C de la pagina " + str(tablaId)
if not tabla[tablaId].has_key('D'):
print "No esta bien definida la D de la pagina " + str(tablaId)
if not tabla[tablaId].has_key('S'):
print "No esta bien definida la S de la pagina " + str(tablaId)
if not tabla[tablaId].has_key('T'):
print "No esta bien definida la T de la pagina " + str(tablaId)
#citation
inCitFileName = definitions["mypath"]+tabla[tablaId]['C']
outCitFileName = definitions["outPath"]+tabla[tablaId]['C']
setContent(inCitFileName, outCitFileName, citation[pageId])
#day
inCitFileName = definitions["mypath"]+tabla[tablaId]['D']
outCitFileName = definitions["outPath"]+tabla[tablaId]['D']
setContent(inCitFileName, outCitFileName, days[pageId])
#saint
inCitFileName = definitions["mypath"]+tabla[tablaId]['S']
outCitFileName = definitions["outPath"]+tabla[tablaId]['S']
setContent(inCitFileName, outCitFileName, saints[pageId])
#gospel+comments
inCitFileName = definitions["mypath"]+tabla[tablaId]['T']
outCitFileName = definitions["outPath"]+tabla[tablaId]['T']
setMainContent(inCitFileName, outCitFileName, gospel[pageId], comments[pageId])
pageId = pageId+1
return pageId
def processIdmlLeaflet( definitions ):
#Create folders and temp files
tempDir = definitions["globalPath"] + "deployment"
origDir = tempDir+"/orig"
destDir = tempDir+"/dest"
tempDir = definitions["globalPath"] + "deployment"
if not os.path.exists(tempDir):
os.mkdir(tempDir)
if os.path.exists(origDir):
shutil.rmtree(origDir)
if os.path.exists(destDir):
shutil.rmtree(destDir)
os.mkdir(origDir)
os.mkdir(destDir)
zipInterface = zipfile.ZipFile(definitions["globalPath"]+definitions["inFile"], 'r')
zipInterface.extractall(origDir)
zipInterface.extractall(destDir)
zipInterface.close()
#Read the structure from the origin -> to fill later
numOfPages = 70
tabla = [{} for i in range(numOfPages)]
used = [False for i in range(numOfPages)]
textContents = gen.getTextContentToLoad(definitions)
maxPage, minPage = getPagesFromIdml(textContents, numOfPages, tabla, used, definitions)
#read the content for filling
comments = {}
gospel = {}
citation = {}
days = {}
saints = {}
gen.readContents(textContents, comments, gospel, citation, days, saints, definitions)
#Fill the the idml with the contents
pageId = fillContents( tabla, comments, gospel,
citation, days, saints,
maxPage, minPage, definitions )
print "Builded " + str(pageId) +" pages!"
#Create final result and
#delete folders and temp files
utils.zip_tree(destDir, definitions["globalPath"] + definitions["outFile"])
shutil.rmtree(tempDir)
```
#### File: 3plus2/code/parse.py
```python
from __future__ import print_function
import os, sys
import codecs
import glob
import json
import re
INPUT_ENCODING = "utf-8-sig"
OUTPUT_ENCODING = "utf-8"
def parse_text(text):
"""Scan the input file one line at a time, looking for a keyword
at the start of the line which will be one word in capital letters
followed by a colon. This introduces a text suite, possibly over several
lines which lasts until the next keyword or the end of the text.
Lines which start with a hash sign (#) are treated as comments
and are ignored.
"""
keyword_matcher = re.compile(r"([A-Z]+)\:\s*(.*)", flags=re.UNICODE)
#
# The text consists of a number of blocks, introduced by a keyword
# and containing one or more paragraphs. This parser yields the keyword
# and a list of the paragraphs. For some keywords, this list will always
# contain exactly one string.
#
keyword = None
paragraphs = []
for line in text.splitlines():
if line.startswith("#"):
continue
match = keyword_matcher.match(line)
if match:
if keyword:
yield keyword, paragraphs
keyword, text = match.groups()
paragraphs = [text.strip()]
else:
paragraphs.append(line.strip())
#
# If we fall out of the loop with a keyword & text
# remaining (which is the most likely case) then yield
# both
#
if keyword and paragraphs:
yield keyword, paragraphs
def process_title(texts):
"""Take a title with an optional subtitle in brackets and
yield both as TITLE / SUBTITLE
"""
text = " ".join(texts)
#
# Match as many non-left-bracket characters as possible
# Then, optionally, match text in brackets
#
title, subtitle = re.match(r"([^(]+)\s*(?:\(([^)]+)\))?", text, flags=re.UNICODE).groups()
yield "TITLE", title
yield "SUBTITLE", subtitle
def process_gospel(texts):
"""Take a gospel quote prefixed by a chapter-and-verse reference.
NB The chapter-and-verse must be on the same line as the "GOSPEL:"
tag but the quote must be on a new line -- this makes it easier
(read: possible) to parse the messy citations you can get.
"""
text = "%s\n%s" % (texts[0], " ".join(texts[1:]))
citation, gospel = re.match(r"([^\n]+)\n(.*)", text, flags=re.UNICODE).groups()
yield "CITATION", citation
yield "GOSPEL", gospel
style_markers = {
"_" : "italic",
"*" : "bold",
"@" : "boldItalic"
}
def process_paragraph(paragraph):
"""Generate tuples of (style, text) where the default
style is normal, and an underscore introduces an italic style
and an asterisk introduces a bold style.
"""
state = "normal"
text = ""
for c in paragraph:
for marker, style in style_markers.items():
if c == marker:
if text:
yield state, text
text = ""
state = "normal" if state == style else style
break
else:
text += c
if text:
yield state, text
def process_comments(texts):
"""The comments field is processed specially so that blocks which are
tagged as italic or bold (surrounded by _ or *) can be broken out into
separate blocks and tagged as such.
"""
comments = []
for paragraph in texts:
comment = list(process_paragraph(paragraph))
if comment:
comments.append(comment)
yield "COMMENTS", comments
#
# Each processor takes a list of paragraphs and yields
# tuples of keyword, paragraphs. This allows a single source
# line to become more than one keyword / text. eg a title
# which looks like this:
# TITLE: This is a title (With a subtitle)
# can yield:
# TITLE, This is a title
# SUBTITLE, With a subtitle
#
PROCESSORS = {
"TITLE" : process_title,
"GOSPEL" : process_gospel,
"COMMENTS" : process_comments
}
def process_one_file(filepath):
items = {}
with codecs.open(filepath, encoding=INPUT_ENCODING) as f:
for keyword, paragraphs in parse_text(f.read()):
items.update(PROCESSORS[keyword](paragraphs))
return items
def process_one_folder(dirpath, include_subfolders=True):
text = {}
if include_subfolders:
filepaths = []
for dirname, dirnames, filenames in os.walk(dirpath):
for filename in filenames:
if filename.endswith(".txt"):
filepaths.append(os.path.join(dirname, filename))
else:
filepaths = glob.glob(os.path.join(dirpath, "*.txt"))
for filepath in sorted(filepaths):
print(filepath)
filename = os.path.basename(filepath)
name, ext = os.path.splitext(filename)
text[name] = dict(process_one_file(filepath))
return text
def process_one_thing(path):
if os.path.isdir(path):
return process_one_folder(path)
else:
print(path)
return process_one_file(path)
if __name__ == '__main__':
import pprint
with codecs.open("parse.txt", "wb", encoding=INPUT_ENCODING) as f:
f.write("# -*- coding: utf-8 -*-\n")
pprint.pprint(process_one_thing(*sys.argv[1:]), f)
#~ json.dump(process_one_folder(*sys.argv[1:]), f)
``` |
{
"source": "3point14thon/bill_analyzer",
"score": 4
} |
#### File: bill_analyzer/src/ET_reg_find.py
```python
import regex as re
def reg_findall(root, pattern):
'''
Returns a list of the immediate children from the given root whos tag
matches the given regex pattern.
Args:
root(xml_element): The xml_element to compose list of children from.
pattern(str): The regex pattern to be matched.
Returns:
List of children whoes tag matches the given regex pattern.
'''
pattern = re.compile(pattern)
matches = []
for child in root:
if pattern.match(child.tag):
matches.append(child)
return None if len(matches) == 0 else matches
``` |
{
"source": "3point14thon/DAT210x-master",
"score": 3
} |
#### File: free-spoken-digit-dataset-master/acquire_data/split_and_label_numbers.py
```python
import os
from collections import defaultdict
import numpy as np
from scipy.io.wavfile import read, write
from say_numbers_prompt import generate_number_sequence, DELAY_BETWEEN_NUMBERS
"""
Splits up the audio data you collected in Audacity.
Adjust the CONSTANTS below and run this file.
Labeled audio will appear in the "recordings" dir.
"""
YOUR_NAME_HERE = 'theo'
# Where did you save your Audacity-exported wav file?
PATH_TO_AUDIO_FILE = r'C:\Users\theo\Desktop\spoken_numbers_R_8khz.wav'
# Time (seconds) between the beginning of the file and the first number
# If your output files end up silent, change this number!
# It may help to look at the beginning of your recording in Audacity to see the offset.
START_OFFSET = 1.2
# How long it actually took you to say each number, typically 1.5 seconds
SECS_PER_NUMBER = 3
LABELS = generate_number_sequence()
def split_wav(start_offset, secs_between_numbers, secs_per_number):
fname = PATH_TO_AUDIO_FILE
rate, sound = read(fname)
if len(sound.shape) > 1:
# Audio probably has L and R channels.
# Use the left channel only (mono).
sound = sound[:, 0]
samples_between_numbers = int(rate * secs_between_numbers)
offset_idx = int(rate*start_offset)
counts = defaultdict(lambda: 0)
for i, label in enumerate(LABELS):
label = str(label)
start_idx = offset_idx + i * samples_between_numbers
stop_idx = start_idx + int(rate * secs_per_number)
if stop_idx > len(sound):
raise('Error: Sound ends before expected number of samples reached for index:' + str(i))
# trim silence
digit_audio = sound[start_idx:stop_idx]
digit_audio_trimmed = trim_silence(digit_audio)
# Build filename
outfile = label + "_" + YOUR_NAME_HERE + "_" + str(counts[label]) + ".wav"
outfile = 'recordings' + os.sep + outfile
# Write audio chunk to file
print "writing", outfile
write(outfile, rate, digit_audio_trimmed)
counts[label] += 1
def trim_silence(audio, n_noise_samples=1000, noise_factor=1.0, mean_filter_size=100):
""" Removes the silence at the beginning and end of the passed audio data
Fits noise based on the last n_noise_samples samples in the period
Finds where the mean-filtered magnitude > noise
:param audio: numpy array of audio
:return: a trimmed numpy array
"""
start = 0
end = len(audio)-1
mag = abs(audio)
noise_sample_period = mag[end-n_noise_samples:end]
noise_threshold = noise_sample_period.max()*noise_factor
mag_mean = np.convolve(mag, [1/float(mean_filter_size)]*mean_filter_size, 'same')
# find onset
for idx, point in enumerate(mag_mean):
if point > noise_threshold:
start = idx
break
# Reverse the array for trimming the end
for idx, point in enumerate(mag_mean[::-1]):
if point > noise_threshold:
end = len(audio) - idx
break
return audio[start:end]
if __name__ == '__main__':
split_wav(START_OFFSET, DELAY_BETWEEN_NUMBERS, SECS_PER_NUMBER)
```
#### File: free-spoken-digit-dataset/acquire_data/say_numbers_prompt.py
```python
import time
import math
"""
Prompts you to say numbers.
Start this, and then hit "Record" in Audacity.
http://www.audacityteam.org/download/
When you start Audacity, look in the bottom-left and set the Project Rate (Hz) to 8000.
It takes about 30 minutes to record a full dataset.
Tips:
- Turn off your screen saver before you start!
- Try a short recording session first to make sure everything works OK before doing the full recording.
When done, export the audio as one big .wav file and use 'split_and_label_numbers.py'
to make the labeled dataset.
"""
DELAY_BETWEEN_NUMBERS = 3
REPEATS_PER_NUMBER = 3
def wait_until(t):
while time.time() < t:
time.sleep(0.01)
def generate_number_sequence():
# We want the numbers jumbled up (helps eliminate any previous-number effects)
# This function scrambles the numbers in a deterministic way so that we can remember
# what the order was later.
# A deterministic shuffle makes labeling easy, makes pausing / resuming the experiment easy, etc.
nums = [str(i) for i in range(10) for set_num in range(REPEATS_PER_NUMBER)]
for i in range(len(nums)):
target = int(round(math.pi * i)) % len(nums)
(nums[i], nums[target]) = (nums[target], nums[i])
return nums
def show_numbers():
nums = generate_number_sequence()
print "Get ready..."
time.sleep(1)
t_start = time.time()
for i, num in enumerate(nums):
if (float(i)/len(nums) * 100) % 10 == 0:
print "\n====", float(i)/len(nums)*100, "% done====\n"
else:
print ""
t_next_display = t_start + (i + 1) * DELAY_BETWEEN_NUMBERS
t_next_blank = t_next_display + 2.5
# display a number
wait_until(t_next_display)
print num
# blank
wait_until(t_next_blank)
if __name__ == '__main__':
show_numbers()
``` |
{
"source": "3point14thon/Multaja",
"score": 3
} |
#### File: Multaja/src/camp_timeseries.py
```python
import numpy as np
import mimetypes
from PIL import Image
import numpy as np
from spectral import *
import pandas as pd
from fbprophet import Prophet
from math import sqrt
# Prepare data for FB Prophet
def format_data(df):
'''
Takes in a dataframe with population and date data and returns a dataframe
properly formated for fbprophet
Args:
df: Dataframe with population and date data
Returns:
Dataframe properly formated for facebook prophet
'''
df['date'] = pd.to_datetime(df['date'])
return df.rename(columns={'date': 'ds', 'population': 'y'})
# Create FB Prophet instance & fit data
def make_model(X):
'''
Creates a fitted Prophet model
Args:
X: Dataframe properly formated for Prophet
Returns:
A fit Prophet model
'''
model = Prophet() #daily_seasonality=True
return model.fit(X)
# Forecast refugee camp population 3 months out
def forcast_pop(model, periods=3):
'''
Forcasts low, medium and high estimates for population in the next periods
Args:
model: The fit Prophet model to be used for forcasting
periods: The next set of periods to predict for
Returns:
A data from containing date, population estimate, lower population
estimate, and upper population estimate
[DS, YHAT, YHAT_LOWER, YHAT_UPPER] respectivly
'''
future = model.make_future_dataframe(periods=periods)
forecast = model.predict(future)
return forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']]
def predict_pop(data_frame, period=3):
'''
Takes in a dataframe and period returns the predictions for the specified
time period
Args:
data_frame: A pandas dataframe from containing date and population data
periods: The next set of periods to predict for
Returns:
A data from containing date, population estimate, lower population
estimate, and upper population estimate
[DS, YHAT, YHAT_LOWER, YHAT_UPPER] respectivly
'''
df = format_data(data_frame)
model = make_model(df)
return forcast_pop(model, period)#.tail()?
if __name__ == '__main__':
rukban_df = pd.read_csv('data/refugee-population.csv')
print(predict_pop(rukban_df))
``` |
{
"source": "3probedata/Mask_RCNN",
"score": 2
} |
#### File: Mask_RCNN/tests/fixtures.py
```python
import os.path
import pytest
import urllib
from pathlib import Path
ROOT_DIR = Path(os.path.dirname(os.path.abspath(__file__))).parent
CACHE_DIR = ROOT_DIR/"cache"
@pytest.fixture
def model_data():
""" Fixture for downloading mask_rcnn_coco training data
"""
if not os.path.isdir(CACHE_DIR):
os.makedirs(CACHE_DIR)
test_model_path = str((CACHE_DIR / "mask_rcnn_coco.h5").resolve())
if not os.path.isfile(test_model_path):
urllib.request.urlretrieve(
"https://github.com/matterport/Mask_RCNN/releases/download/v2.0/mask_rcnn_coco.h5",
test_model_path)
return test_model_path
```
#### File: Mask_RCNN/tests/test_model.py
```python
import mrcnn.model as modellib
import numpy as np
import skimage.io
from fixtures import model_data
from fixtures import ROOT_DIR
from mrcnn.config import Config
TEST_IMAGE_PATH = ROOT_DIR/'images/3627527276_6fe8cd9bfe_z.jpg'
class UnittestConfig(Config):
"""Configuration for training on MS COCO.
Derives from the base Config class and overrides values specific
to the COCO dataset.
"""
# Give the configuration a recognizable name
NAME = "unittest"
IMAGES_PER_GPU = 1
# Number of classes (including background)
NUM_CLASSES = 1 + 80 # COCO has 80 classes
GPU_COUNT = 1
def test_inference_detect(tmpdir, model_data):
config = UnittestConfig()
model = modellib.MaskRCNN(mode="inference", model_dir=tmpdir, config=config)
# Load weights trained on MS-COCO
model.load_weights(model_data, by_name=True)
image = skimage.io.imread(TEST_IMAGE_PATH)
result = model.detect([image], verbose=1)[0]
assert np.all([result['class_ids'], [24, 23, 23, 23]])
assert np.all([np.greater(result['scores'], [0.99, 0.99, 0.99, 0.99]), [True, True, True, True]])
``` |
{
"source": "3r1ck10/cpp",
"score": 3
} |
#### File: cpp/cpp/cpp_e.py
```python
import xarray as xray
import numpy as np
import pandas as pd
from .ploteos import correct_mon
from .ploteos_e import correct_e
class cpp_e(object):
def __init__(self,datos):
self.data=datos
def day_scaling(self,acu=False,f_cal=None,f_apl=None):
#APLICAR SCALING DIARIO---------------------------
print('realizando Scaling diario')
cnj_do=pd.DataFrame()
cnj_dm=pd.DataFrame()
cnj_da=pd.DataFrame()
cnj_cor=pd.DataFrame()
for name in self.data['coords']['nombre']:
print(name)
coords=self.data['coords']
data=self.data['do'][name]
filtro_mes=data.resample('MS').agg(pd.Series.sum,skipna=False)
data=data.where(~filtro_mes.reindex(data.index,method='ffill').isna())
lat=coords.loc[coords['nombre']==name]['lat'].values[0]
lon=coords.loc[coords['nombre']==name]['lon'].values[0]
do=data.to_xarray()
do=do.rename({'index':'time'})
dm=self.data['dm'].interp(lat=lat,lon=lon)
dm=dm.drop(['lat','lon'])
da=self.data['da'].interp(lat=lat,lon=lon)
da=da.drop(['lat','lon'])
if f_cal==None or f_apl==None:
pass
else:
do=do.loc[f_cal[0]:f_cal[1]]
dm=dm.loc[f_cal[0]:f_cal[1]]
da=da.loc[f_apl[0]:f_apl[1]]
do['time']=dm['time']
dm=dm.where(do>-888)
if acu==True:
divisor=dm.resample(time='MS').sum('time',skipna=False).groupby('time.month').mean('time')
divisor=divisor.where(~(divisor<0.01),0.01)
fc=(do.resample(time='MS').sum('time',skipna=False).groupby('time.month').mean('time')/
divisor)
cor=(da.groupby('time.month')*fc)
else:
fc=(do.resample(time='MS').mean('time',skipna=False).groupby('time.month').mean('time')-
dm.resample(time='MS').mean('time',skipna=False).groupby('time.month').mean('time'))
cor=(da.groupby('time.month')+fc)
cor=cor.drop(['month'])
cnj_do.loc[:,name]=do.to_dataframe(name=name).iloc[:,0]
cnj_dm.loc[:,name]=dm.to_dataframe(name=name).iloc[:,0]
cnj_da.loc[:,name]=da.to_dataframe(name=name).iloc[:,0]
cnj_cor.loc[:,name]=cor.to_dataframe(name=name).iloc[:,0]
print('Terminado Scaling diario')
return correct_e(cnj_cor,cnj_da,cnj_dm,cnj_do,acu=acu)
def day_eqm(self,acu=False,f_cal=None,f_apl=None):
self.acumulado=acu
#APLICAR EQM DIARIO.------------------------------
print('realizando EQM diario')
cnj_do=pd.DataFrame()
cnj_dm=pd.DataFrame()
cnj_da=pd.DataFrame()
cnj_cor=pd.DataFrame()
for name in self.data['coords']['nombre']:
print(name)
coords=self.data['coords']
data=self.data['do'][name]
filtro_mes=data.resample('MS').agg(pd.Series.sum,skipna=False)
data=data.where(~filtro_mes.reindex(data.index,method='ffill').isna())
lat=coords.loc[coords['nombre']==name]['lat'].values[0]
lon=coords.loc[coords['nombre']==name]['lon'].values[0]
do=data.to_xarray()
do=do.rename({'index':'time'})
dm=self.data['dm'].interp(lat=lat,lon=lon)
dm=dm.drop(['lat','lon'])
da=self.data['da'].interp(lat=lat,lon=lon)
da=da.drop(['lat','lon'])
if f_cal==None or f_apl==None:
pass
else:
do=do.loc[f_cal[0]:f_cal[1]]
dm=dm.loc[f_cal[0]:f_cal[1]]
da=da.loc[f_apl[0]:f_apl[1]]
do['time']=dm['time']
dm=dm.where(do>-888)
quantiles=np.arange(0.01,1,0.01)
meses=[1,2,3,4,5,6,7,8,9,10,11,12]
for mon in meses:
do2=do.loc[do['time.month']==mon]
dm2=dm.loc[dm['time.month']==mon]
da2=da.loc[da['time.month']==mon]
if acu==True:
do_f=do2.where(do2>=0.001)
dm_f=dm2.where(dm2>=0.001)
da_f=da2.where(da2>=0.001)
else:
do_f=do2
dm_f=dm2
da_f=da2
datos_his=dm_f
datos_pro=da_f
datos_obs_q=do_f.quantile(quantiles,dim='time')
datos_his_q=dm_f.quantile(quantiles,dim='time')
datos_pro_q=da_f.quantile(quantiles,dim='time')
for quan in quantiles:
if quan==0.01:
datos_his_cor=datos_his.where(datos_his>datos_his_q.sel(quantile=0.02,method='nearest'),
datos_obs_q.sel(quantile=0.01,method='nearest'))
elif quan==0.99:
datos_his_cor=datos_his_cor.where(~(datos_his>=datos_his_q.sel(quantile=0.99,method='nearest')),
datos_obs_q.sel(quantile=0.99,method='nearest'))
else:
datos_his_cor=datos_his_cor.where(~((datos_his>=datos_his_q.sel(quantile=quan,method='nearest'))&
(datos_his<datos_his_q.sel(quantile=quan+0.01,method='nearest'))),
datos_obs_q.sel(quantile=quan,method='nearest'))
deltas=datos_his_cor.quantile(quantiles,dim='time')-datos_his.quantile(quantiles,dim='time')
#AÑADIENDO DELTAS DE QUANTILES A LA INFORMACION PROYECTADA.
for quan in quantiles:
if quan==0.01:
datos_pro_cor=datos_pro.where(datos_pro>datos_pro_q.sel(quantile=0.02,method='nearest'),
datos_pro+deltas.sel(quantile=0.01,method='nearest'))
elif quan==0.99:
datos_pro_cor=datos_pro_cor.where(~(datos_pro>=datos_pro_q.sel(quantile=0.99,method='nearest')),
datos_pro+deltas.sel(quantile=0.99,method='nearest'))
else:
datos_pro_cor=datos_pro_cor.where(~((datos_pro>=datos_pro_q.sel(quantile=quan,method='nearest'))&
(datos_pro<datos_pro_q.sel(quantile=quan+0.01,method='nearest'))),
datos_pro+deltas.sel(quantile=quan,method='nearest'))
if mon==1:
datos_his_cor2=datos_his_cor
datos_pro_cor2=datos_pro_cor
else:
datos_his_cor2=xray.concat([datos_his_cor2,datos_his_cor],dim='time')
datos_pro_cor2=xray.concat([datos_pro_cor2,datos_pro_cor],dim='time')
datos_his_cor2=datos_pro_cor2.sortby('time',ascending=True)
datos_pro_cor2=datos_pro_cor2.sortby('time',ascending=True)
if acu==True:
#cor=da.where(~(da<0.1),0)
#cor=cor.where(~(cor>=0.1),datos_pro_cor2)
cor=datos_pro_cor2
cor=cor.where(cor.notnull(),da)
cor=cor.where(~(cor<0),0)
else:
cor=datos_pro_cor2
cnj_do.loc[:,name]=do.to_dataframe(name=name).iloc[:,0]
cnj_dm.loc[:,name]=dm.to_dataframe(name=name).iloc[:,0]
cnj_da.loc[:,name]=da.to_dataframe(name=name).iloc[:,0]
cnj_cor.loc[:,name]=cor.to_dataframe(name=name).iloc[:,0]
#NOTA: LAS ADVERTENCIAS SALEN DEBIDO A QUE NO HAY DATOS EN OCEANO (POR PARTE DEL OBSERVADO)
print('realizando EQM diario')
return correct_e(cnj_cor,cnj_da,cnj_dm,cnj_do,acu=acu)
``` |
{
"source": "3r1ck10/meanpy",
"score": 3
} |
#### File: meanpy/meanpy/convertible_e.py
```python
import xarray as xray
import numpy as np
import pandas as pd
from .ploteos import convertido_e
class pre_rea_station(object):
def __init__(self,estaciones,obs,mod,pro):
self.estaciones=estaciones
self.observados=obs
self.modelos=mod
self.proyecciones=pro
def rea_perform(self,acu=False):
print("iniciando rea")
cnj_obs=pd.DataFrame()
cnj_mod=pd.DataFrame()
cnj_pro=pd.DataFrame()
cnj_rea=pd.DataFrame()
cnj_incertidumbre=pd.DataFrame()
cnj_serie_rea=pd.DataFrame()
cnj_cambios=pd.DataFrame()
cnj_r_fc=pd.DataFrame()
coords=self.estaciones
for name in self.estaciones['nombre']:
print(name)
obs=self.observados[name]
filtro_mes=obs.resample('MS').agg(pd.Series.sum,skipna=False)
obs=obs.where(~filtro_mes.reindex(obs.index,method='ffill').isna())
lat=coords.loc[coords['nombre']==name]['lat'].values[0]
lon=coords.loc[coords['nombre']==name]['lon'].values[0]
obs=obs.to_xarray()
obs=obs.rename({'index':'time'})
if acu==True:
obs=obs.resample(time='MS').sum('time',skipna=False)
else:
obs=obs.resample(time='MS').mean('time',skipna=False)
#print(obs)
modelos=self.modelos.interp(lat=lat,lon=lon)
modelos=modelos.drop(['lat','lon'])
#print(lat,lon)
#print(modelos)
proyecciones=self.proyecciones.interp(lat=lat,lon=lon)
proyecciones=proyecciones.drop(['lat','lon'])
#print(proyecciones)
iter_time=[1,2,3,4,5,6,7,8,9,10,11,12]
e_fc=obs.groupby('time.month').mean('time')
for i in iter_time:
gg=obs.loc[obs['time.month']==i]
n=gg['time'].shape[0]
gg['time']=np.arange(1,n+1)
xstd=gg['time'].std()
xmean=gg['time'].mean()
ymean=gg.mean(dim='time')
cov=((gg['time']-xmean)*(gg-ymean)).mean('time')
slope=cov/(xstd**2)
intercept = ymean-xmean*slope
trend=gg['time']*slope+intercept
detrended=gg-trend
detrended_rol=detrended.rolling(time=10,min_periods=2).mean()
detrended_rol_max=detrended_rol.max('time')
detrended_rol_min=detrended_rol.min('time')
if i==2:
print(detrended_rol,detrended_rol_max,detrended_rol_min)
detrended_rol_range=detrended_rol_max-detrended_rol_min
e_fc.loc[e_fc['month']==i]=detrended_rol_range
#-------------------------------------------------
obs_guardar=obs
modelos_guardar=modelos
proyecciones_guardar=proyecciones
#-----------------------------------------------
obs=obs.groupby('time.month').mean('time')
modelos=modelos.groupby('time.month').mean('time')
proyecciones=proyecciones.groupby('time.month').mean('time')
bias_mod=abs(modelos-obs)
bias_mod=bias_mod.where(~(bias_mod<0.01),0.01)#PARA EVITAR INFINITOS
r_bias=e_fc/bias_mod
r_bias=r_bias.where(bias_mod>e_fc,1)
distance_mod=proyecciones-modelos
distance_mean=distance_mod.mean('modelo')
#_-------------------------------------------------
for i in range(11):
if i==0:
distance=abs(distance_mod-distance_mean)
else:
distance=abs(distance_mod-rea_change)
distance=distance.where(~(distance<0.01),0.01)#PARA EVITAR INFINITOS
r_converg=e_fc/distance
r_converg=r_converg.where(distance>e_fc,1)
r_fc=r_bias*r_converg
r_fc=r_fc.where(~(r_fc<0.01),0.01)#PARA EVITAR INFINITOS
rea_change=(r_fc*distance_mod).sum('modelo')/r_fc.sum('modelo')
rango_incertidumbre=(((r_fc*(distance_mod-rea_change)**2).sum('modelo')/r_fc.sum('modelo'))**0.5)*2
#print(rea_change[:,30,30].values[6:12])
#-----------------------------------------------------------
serie_rea=((r_fc*proyecciones_guardar.groupby('time.month')).sum('modelo')).groupby('time.month')/r_fc.sum('modelo')#---2
#serie_rea.to_netcdf('serie_rea_'+var+'.nc')#------------2
#---------------------------------------------------------------
serie_incertidumbre=(((r_fc*((proyecciones_guardar-serie_rea)**2).groupby('time.month').mean('time')).sum('modelo')/r_fc.sum('modelo'))**0.5)*2
#rango_incertidumbre=(((r_fc*(distance_mod-rea_change)**2).sum('modelo')/r_fc.sum('modelo'))**0.5)*2
cnj_obs.loc[:,name]=obs_guardar.to_dataframe(name=name).iloc[:,0]
cnj_mod.loc[:,name]=modelos_guardar.to_dataframe(name=name).iloc[:,0]
cnj_pro.loc[:,name]=proyecciones_guardar.to_dataframe(name=name).iloc[:,0]
cnj_rea.loc[:,name]=rea_change.to_dataframe(name=name).iloc[:,0]
cnj_cambios.loc[:,name]=distance_mod.to_dataframe(name=name).iloc[:,0]
cnj_incertidumbre.loc[:,name]=rango_incertidumbre.to_dataframe(name=name).iloc[:,0]
cnj_serie_rea.loc[:,name]=serie_rea.to_dataframe(name=name).iloc[:,0]
cnj_r_fc.loc[:,name]=r_fc.to_dataframe(name=name).iloc[:,0]
return convertido_e(cnj_obs,cnj_mod,cnj_pro,cnj_rea,cnj_incertidumbre,cnj_serie_rea,cnj_cambios,cnj_r_fc)
``` |
{
"source": "3r1ck10/pypost",
"score": 2
} |
#### File: pypost/pypost/datos_grid.py
```python
import xarray as xray
import pandas as pd
import numpy as np
from .cpp import cpp
from .cpp_e import cpp_e
import os.path
def datasets_netcdf(do,dm,da,var,f_cal=None,f_apl=None):
#LEER DATASETS EN FORMATO NETCDF
cnj_datos=dict()
for i,j in zip(['do','dm','da'],[do,dm,da]):
if f_cal==None or f_apl==None:
#SIN ESPECIFICAR FECHA
cnj_datos[i]=xray.open_dataset(j)[var]
else:
#CON FECHA ESPECIFICADA
if i=='da':
cnj_datos[i]=xray.open_dataset(j)[var].sel(time=slice(f_apl[0],f_apl[1]))
else:
cnj_datos[i]=xray.open_dataset(j)[var].sel(time=slice(f_cal[0],f_cal[1]))
#Interpolacion Lineal
for la,lo in zip(['latitud','latitude','Latitude'],['longitud','longitude','Longitude']):
if ((la in cnj_datos[i].coords.dims) and (lo in cnj_datos[i].coords.dims)):
print('cambiando nombre de coordenadas a latlon')
cnj_datos[i]=cnj_datos[i].rename({la:'lat',lo:'lon'})
else:
print('nombres latlon correcto')
if i!='do':
if not (np.array_equal(cnj_datos['do']['lat'].values,cnj_datos[i]['lat'].values) and
np.array_equal(cnj_datos['do']['lon'].values,cnj_datos[i]['lon'].values)):
print('interpolando latlon')
cnj_datos[i]=cnj_datos[i].interp(lat=cnj_datos['do']['lat'],lon=cnj_datos['do']['lon'])
else:
pass
#Orden de dimensiones
cnj_datos[i]=cnj_datos[i].transpose("time", "lat", "lon")
return cpp(cnj_datos)
def open_station(estaciones,do,dm,da,var,f_cal=None,f_apl=None):
#LEER DATASETS EN FORMATO NETCDF, OBSERVADOS y COORDENADAS.
cnj_datos=dict()
coords=pd.read_csv(estaciones,delimiter=';',index_col=0)
datos=pd.read_csv(do,delimiter=';',index_col=0)
datos.index=pd.to_datetime(datos.index,format='%d/%m/%Y')
datos=datos.loc[f_cal[0]:f_cal[1]]
if f_cal==None or f_apl==None:
cnj_datos['dm']=xray.open_dataset(dm)[var]
cnj_datos['da']=xray.open_dataset(da)[var]
else:
cnj_datos['dm']=xray.open_dataset(dm)[var].sel(time=slice(f_cal[0],f_cal[1]))
cnj_datos['da']=xray.open_dataset(da)[var].sel(time=slice(f_apl[0],f_apl[1]))
for model_data in ['dm','da']:
for la,lo in zip(['latitud','latitude','Latitude'],['longitud','longitude','Longitude']):
if ((la in cnj_datos[model_data].coords.dims) and (lo in cnj_datos[model_data].coords.dims)):
print('cambiando nombre de coordenadas a latlon')
cnj_datos[model_data]=cnj_datos[model_data].rename({la:'lat',lo:'lon'})
else:
print('nombres latlon correcto')
#MOMENTANEO-----------------------------------
#if cnj_datos['dm']['lon'].max()>180:
# cnj_datos['dm']['lon']=cnj_datos['dm']['lon']-360.0
#if cnj_datos['da']['lon'].max()>180:
# cnj_datos['da']['lon']=cnj_datos['da']['lon']-360.0
#--------------------------------------------------------------------------
cnj_datos['do']=datos
cnj_datos['coords']=coords
return cpp_e(cnj_datos)
``` |
{
"source": "3r1co/registry-frontend",
"score": 2
} |
#### File: registry-frontend/api/repositories.py
```python
from rejson import Path
from sanic import Blueprint, response
from helpers.constants import REPO_PREFIX
repositories = Blueprint('repositories', url_prefix='/repositories')
@repositories.route("/")
async def get_repositories(request):
dictlist = list()
app = request.app
repos = app.db.keys("repo_*") if app.persistent else app.db
for repo in repos:
value = app.db.jsonget(repo, Path.rootPath()) if app.persistent else app.db[repo]
len_to_truncate = len(REPO_PREFIX) if app.persistent else 0
dictlist.append({'repo': repo[len_to_truncate:], 'tags': len(value['tags']), 'size': value['size']})
return response.json({'data': dictlist})
```
#### File: registry-frontend/api/scheduler.py
```python
import datetime
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from pytz import utc
from sanic import Blueprint
from helpers.data_retrieval import fetch
scheduler = Blueprint('scheduler')
@scheduler.listener('before_server_start')
async def initialize_scheduler(app, loop):
scheduler = AsyncIOScheduler({'event_loop': loop})
scheduler.add_job(fetch, 'interval', hours=1, next_run_time=datetime.datetime.now(), timezone=utc, args=[app])
scheduler.configure(timezone=utc)
scheduler.start()
```
#### File: registry-frontend/tests/test_helpers.py
```python
import random
import string
from types import SimpleNamespace
from unittest import mock
import pytest
import redis
import registryclient
from helpers import constants, helper, init_functions
def test_repo_prefix():
assert constants.REPO_PREFIX == "repo_"
def test_manifest_prefix():
assert constants.MANIFEST_PREFIX == "manifest_"
def test_truncate_middle():
# abcd = 4, maxlength = 30 -> 30-4 = 26 / 2 = 13
assert " " * 13 + "abcd" + " " * 13 == helper.truncate_middle("abcd", 30)
str = ''.join(random.choice(string.ascii_letters) for x in range(40))
assert str[0:15] + "..." + str[-12:] == helper.truncate_middle(str, 30)
def test_size_of_fmt():
assert "1.0KiB" == helper.sizeof_fmt(1024)
assert "1.0MiB" == helper.sizeof_fmt(1024 ** 2)
assert "2.0MiB" == helper.sizeof_fmt(2 * 1024 ** 2)
assert "1.0GiB" == helper.sizeof_fmt(1024 ** 3)
assert "1.0TiB" == helper.sizeof_fmt(1024 ** 4)
assert "1.0PiB" == helper.sizeof_fmt(1024 ** 5)
assert "1.0EiB" == helper.sizeof_fmt(1024 ** 6)
assert "1.0ZiB" == helper.sizeof_fmt(1024 ** 7)
assert "1.0YiB" == helper.sizeof_fmt(1024 ** 8)
def test_redis_available():
app = SimpleNamespace()
db = SimpleNamespace()
db.get = mock.Mock(return_value=None)
app.db = db
assert helper.is_redis_available(app) == True
db.get = mock.Mock(side_effect=redis.exceptions.ConnectionError)
assert helper.is_redis_available(app) == False
def test_startup_arguments():
args = init_functions.init_args(["--registry", "registry",
"--username", "aaa",
"--password", "<PASSWORD>",
"--listen", "0.0.0.0",
"--port", "8888",
"--redis", "redis",
"--cacert", "ca.crt",
"--cli","--debug"
])
assert args.registry == "registry"
assert args.username == "aaa"
assert args.password == "<PASSWORD>"
assert args.listen == "0.0.0.0"
assert args.redis == "redis"
assert args.cacert == "ca.crt"
assert args.cli == True
assert args.debug == True
def test_init_db_no_persistence():
app = SimpleNamespace()
args = SimpleNamespace()
args.redis = None
init_functions.init_db(app, args)
assert app.db == dict()
assert app.manifests == dict()
assert app.persistent == False
@mock.patch('helpers.init_functions.Client')
def test_init_db_persistence(mock_rejson):
app = SimpleNamespace()
args = SimpleNamespace()
args.redis = "localhost"
init_functions.init_db(app, args)
assert app.persistent == True
@mock.patch('helpers.init_functions.Client')
@mock.patch('helpers.init_functions.is_redis_available')
def test_init_db_persistence_no_connection(mock_is_available, mock_rejson):
app = SimpleNamespace()
args = SimpleNamespace()
args.redis = "localhost"
mock_is_available.return_value = False
try:
init_functions.init_db(app, args)
pytest.fail("Exception should have been thrown")
except Exception:
pass
def test_init_app():
app = mock.Mock()
args = SimpleNamespace()
args.registry = "http://localhost:5000"
args.username = "username"
args.password = "password"
args.cli = False
args.cacert = None
init_functions.init_app(app, args)
assert isinstance(app.reg, registryclient.RegistryClient)
```
#### File: registry-frontend/tests/test_registryclient.py
```python
import os
import aiohttp
import asyncio
import pytest
import json
from aioresponses import aioresponses
from registryclient import RegistryClient
@pytest.fixture(scope = 'module')
def global_data():
return {'registry': "http://registry",
'repository': "test/alpine",
'tag': "latest"}
@pytest.fixture(scope = 'module')
def client(global_data):
return RegistryClient(global_data["registry"], None, None, None)
@pytest.mark.asyncio
async def test_retrieve_repositories(global_data, client):
f = open(get_resource('response_repositories.json'), "r")
with aioresponses() as m:
m.get("%s/v2/_catalog" % global_data["registry"], status=200, body=f.read())
session = aiohttp.ClientSession()
resp = await client.retrieve_repositories(session)
expect = set([global_data["repository"]])
assert check_equal(resp, expect)
await session.close()
@pytest.mark.asyncio
async def test_retrieve_tags_for_repository(global_data, client):
f = open(get_resource('response_tags.json'), "r")
with aioresponses() as m:
m.get("%s/v2/%s/tags/list" % (global_data["registry"], global_data["repository"]), status=200, body=f.read())
session = aiohttp.ClientSession()
resp = await client.retrieve_tags_for_repository(global_data["repository"], session)
expect = set([global_data["tag"]])
assert check_equal(resp, expect)
await session.close()
@pytest.mark.asyncio
async def test_retrieve_size_for_tag_and_repository(global_data, client):
f = open(get_resource('response_manifest_v2.json'), "r")
with aioresponses() as m:
m.get("%s/v2/%s/manifests/%s" % (global_data["registry"], global_data["repository"], global_data["tag"]), status=200, body=f.read())
session = aiohttp.ClientSession()
resp = await client.retrieve_size_for_tag_and_repository(global_data["repository"], global_data["tag"], session)
expect = {'repo': 'test/alpine', 'tag': 'latest', 'sizes':
{'manifest': 7023,
'sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f': 32654,
'sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b': 16724,
'sha256:ec4b8955958665577945c89419d1af06b5f7636b4ac3da7f12184802ad867736': 73109}}
assert check_equal(resp, expect)
await session.close()
@pytest.mark.asyncio
async def test_retrieve_manifest_v1_for_tag_and_repository(global_data, client):
f = open(get_resource('response_manifest_v1.json'), "r")
with aioresponses() as m:
m.get("%s/v2/%s/manifests/%s" % (global_data["registry"], global_data["repository"], global_data["tag"]), status=200, body=f.read())
session = aiohttp.ClientSession()
resp = await client.retrieve_manifest_v1_for_tag_and_repository(global_data["repository"], global_data["tag"], session)
response = json.loads(resp)
assert response["architecture"] == "amd64"
await session.close()
def check_equal(s1, s2):
return len(s1) == len(s2) and sorted(s1) == sorted(s2)
def get_resource(filename):
return os.path.join(os.path.dirname(__file__), 'resources', filename)
``` |
{
"source": "3rand/benchmarking-platform",
"score": 2
} |
#### File: changeo/bin/AlignRecords.py
```python
__author__ = '<NAME>'
from changeo import __version__, __date__
# Imports
import os
import shutil
from argparse import ArgumentParser
from collections import OrderedDict
from itertools import chain
from textwrap import dedent
from Bio.SeqRecord import SeqRecord
# Presto and changeo import
from presto.Defaults import default_out_args, default_muscle_exec
from presto.Applications import runMuscle
from presto.IO import printLog, printError, printWarning
from presto.Multiprocessing import manageProcesses
from changeo.Commandline import CommonHelpFormatter, checkArgs, getCommonArgParser, parseCommonArgs
from changeo.IO import getDbFields, getFormatOperators
from changeo.Multiprocessing import DbResult, feedDbQueue, processDbQueue, collectDbQueue
# TODO: maybe not bothering with 'set' is best. can just work off field identity
def groupRecords(records, fields=None, calls=['v', 'j'], mode='gene', action='first'):
"""
Groups Receptor objects based on gene or annotation
Arguments:
records : an iterator of Receptor objects to group.
fields : gene field to group by.
calls : allele calls to use for grouping.
one or more of ('v', 'd', 'j').
mode : specificity of alignment call to use for allele call fields.
one of ('allele', 'gene').
action : only 'first' is currently supported.
Returns:
dictionary of grouped records
"""
# Define functions for grouping keys
if mode == 'allele' and fields is None:
def _get_key(rec, calls, action):
return tuple(rec.getAlleleCalls(calls, action))
elif mode == 'gene' and fields is None:
def _get_key(rec, calls, action):
return tuple(rec.getGeneCalls(calls, action))
elif mode == 'allele' and fields is not None:
def _get_key(rec, calls, action):
vdj = rec.getAlleleCalls(calls, action)
ann = [rec.getChangeo(k) for k in fields]
return tuple(chain(vdj, ann))
elif mode == 'gene' and fields is not None:
def _get_key(rec, calls, action):
vdj = rec.getGeneCalls(calls, action)
ann = [rec.getChangeo(k) for k in fields]
return tuple(chain(vdj, ann))
rec_index = {}
for rec in records:
key = _get_key(rec, calls, action)
# Assigned grouped records to individual keys and all failed to a single key
if all([k is not None for k in key]):
rec_index.setdefault(key, []).append(rec)
else:
rec_index.setdefault(None, []).append(rec)
return rec_index
def alignBlocks(data, field_map, muscle_exec=default_muscle_exec):
"""
Multiple aligns blocks of sequence fields together
Arguments:
data : DbData object with Receptor objects to process.
field_map : a dictionary of {input sequence : output sequence) field names to multiple align.
muscle_exec : the MUSCLE executable.
Returns:
changeo.Multiprocessing.DbResult : object containing Receptor objects with multiple aligned sequence fields.
"""
# Define return object
result = DbResult(data.id, data.data)
result.results = data.data
result.valid = True
# Fail invalid groups
if result.id is None:
result.log = None
result.valid = False
return result
seq_fields = list(field_map.keys())
seq_list = [SeqRecord(r.getSeq(f), id='%s_%s' % (r.sequence_id.replace(' ', '_'), f)) for f in seq_fields \
for r in data.data]
seq_aln = runMuscle(seq_list, aligner_exec=muscle_exec)
if seq_aln is not None:
aln_map = {x.id: i for i, x in enumerate(seq_aln)}
for i, r in enumerate(result.results, start=1):
for f in seq_fields:
idx = aln_map['%s_%s' % (r.sequence_id.replace(' ', '_'), f)]
seq = str(seq_aln[idx].seq)
r.annotations[field_map[f]] = seq
result.log['%s-%s' % (f, r.sequence_id)] = seq
else:
result.valid = False
#for r in result.results: print r.annotations
return result
def alignAcross(data, field_map, muscle_exec=default_muscle_exec):
"""
Multiple aligns sequence fields column wise
Arguments:
data : DbData object with Receptor objects to process.
field_map : a dictionary of {input sequence : output sequence) field names to multiple align.
muscle_exec : the MUSCLE executable.
Returns:
changeo.Multiprocessing.DbResult : object containing Receptor objects with multiple aligned sequence fields.
"""
# Define return object
result = DbResult(data.id, data.data)
result.results = data.data
result.valid = True
# Fail invalid groups
if result.id is None:
result.log = None
result.valid = False
return result
seq_fields = list(field_map.keys())
for f in seq_fields:
seq_list = [SeqRecord(r.getSeq(f), id=r.sequence_id.replace(' ', '_')) for r in data.data]
seq_aln = runMuscle(seq_list, aligner_exec=muscle_exec)
if seq_aln is not None:
aln_map = {x.id: i for i, x in enumerate(seq_aln)}
for i, r in enumerate(result.results, start=1):
idx = aln_map[r.sequence_id.replace(' ', '_')]
seq = str(seq_aln[idx].seq)
r.annotations[field_map[f]] = seq
result.log['%s-%s' % (f, r.sequence_id)] = seq
else:
result.valid = False
#for r in result.results: print r.annotations
return result
def alignWithin(data, field_map, muscle_exec=default_muscle_exec):
"""
Multiple aligns sequence fields within a row
Arguments:
data : DbData object with Receptor objects to process.
field_map : a dictionary of {input sequence : output sequence) field names to multiple align.
muscle_exec : the MUSCLE executable.
Returns:
changeo.Multiprocessing.DbResult : object containing Receptor objects with multiple aligned sequence fields.
"""
# Define return object
result = DbResult(data.id, data.data)
result.results = data.data
result.valid = True
# Fail invalid groups
if result.id is None:
result.log = None
result.valid = False
return result
record = data.data
seq_fields = list(field_map.keys())
seq_list = [SeqRecord(record.getSeq(f), id=f) for f in seq_fields]
seq_aln = runMuscle(seq_list, aligner_exec=muscle_exec)
if seq_aln is not None:
aln_map = {x.id: i for i, x in enumerate(seq_aln)}
for f in seq_fields:
idx = aln_map[f]
seq = str(seq_aln[idx].seq)
record.annotations[field_map[f]] = seq
result.log[f] = seq
else:
result.valid = False
return result
def alignRecords(db_file, seq_fields, group_func, align_func, group_args={}, align_args={},
format='changeo', out_file=None, out_args=default_out_args, nproc=None, queue_size=None):
"""
Performs a multiple alignment on sets of sequences
Arguments:
db_file : filename of the input database.
seq_fields : the sequence fields to multiple align.
group_func : function to use to group records.
align_func : function to use to multiple align sequence groups.
group_args : dictionary of arguments to pass to group_func.
align_args : dictionary of arguments to pass to align_func.
format : output format. One of 'changeo' or 'airr'.
out_file : output file name. Automatically generated from the input file if None.
out_args : common output argument dictionary from parseCommonArgs.
nproc : the number of processQueue processes.
if None defaults to the number of CPUs.
queue_size : maximum size of the argument queue.
if None defaults to 2*nproc.
Returns:
dict : names of the 'pass' and 'fail' output files.
"""
# Define subcommand label dictionary
cmd_dict = {alignAcross: 'across', alignWithin: 'within', alignBlocks: 'block'}
# Print parameter info
log = OrderedDict()
log['START'] = 'AlignRecords'
log['COMMAND'] = cmd_dict.get(align_func, align_func.__name__)
log['FILE'] = os.path.basename(db_file)
log['SEQ_FIELDS'] = ','.join(seq_fields)
if 'group_fields' in group_args: log['GROUP_FIELDS'] = ','.join(group_args['group_fields'])
if 'mode' in group_args: log['MODE'] = group_args['mode']
if 'action' in group_args: log['ACTION'] = group_args['action']
log['NPROC'] = nproc
printLog(log)
# Define format operators
try:
reader, writer, schema = getFormatOperators(format)
except ValueError:
printError('Invalid format %s.' % format)
# Define feeder function and arguments
if 'group_fields' in group_args and group_args['group_fields'] is not None:
group_args['group_fields'] = [schema.toReceptor(f) for f in group_args['group_fields']]
feed_func = feedDbQueue
feed_args = {'db_file': db_file,
'reader': reader,
'group_func': group_func,
'group_args': group_args}
# Define worker function and arguments
field_map = OrderedDict([(schema.toReceptor(f), '%s_align' % f) for f in seq_fields])
align_args['field_map'] = field_map
work_func = processDbQueue
work_args = {'process_func': align_func,
'process_args': align_args}
# Define collector function and arguments
out_fields = getDbFields(db_file, add=list(field_map.values()), reader=reader)
out_args['out_type'] = schema.out_type
collect_func = collectDbQueue
collect_args = {'db_file': db_file,
'label': 'align',
'fields': out_fields,
'writer': writer,
'out_file': out_file,
'out_args': out_args}
# Call process manager
result = manageProcesses(feed_func, work_func, collect_func,
feed_args, work_args, collect_args,
nproc, queue_size)
# Print log
result['log']['END'] = 'AlignRecords'
printLog(result['log'])
output = {k: v for k, v in result.items() if k in ('pass', 'fail')}
return output
def getArgParser():
"""
Defines the ArgumentParser
Arguments:
None
Returns:
an ArgumentParser object
"""
# Define output file names and header fields
fields = dedent(
'''
output files:
align-pass
database with multiple aligned sequences.
align-fail
database with records failing alignment.
required fields:
sequence_id, v_call, j_call
<field>
user specified sequence fields to align.
output fields:
<field>_align
''')
# Define ArgumentParser
parser = ArgumentParser(description=__doc__, epilog=fields,
formatter_class=CommonHelpFormatter, add_help=False)
group_help = parser.add_argument_group('help')
group_help.add_argument('--version', action='version',
version='%(prog)s:' + ' %s %s' %(__version__, __date__))
group_help.add_argument('-h', '--help', action='help', help='show this help message and exit')
subparsers = parser.add_subparsers(title='subcommands', dest='command', metavar='',
help='alignment method')
# TODO: This is a temporary fix for Python issue 9253
subparsers.required = True
# Parent parser
parser_parent = getCommonArgParser(format=True, multiproc=True)
# Argument parser for column-wise alignment across records
parser_across = subparsers.add_parser('across', parents=[parser_parent],
formatter_class=CommonHelpFormatter, add_help=False,
help='''Multiple aligns sequence columns within groups
and across rows using MUSCLE.''')
group_across = parser_across.add_argument_group('alignment arguments')
group_across.add_argument('--sf', nargs='+', action='store', dest='seq_fields', required=True,
help='The sequence fields to multiple align within each group.')
group_across.add_argument('--gf', nargs='+', action='store', dest='group_fields', default=None,
help='Additional (not allele call) fields to use for grouping.')
group_across.add_argument('--calls', nargs='+', action='store', dest='calls',
choices=('v', 'd', 'j'), default=['v', 'j'],
help='Segment calls (allele assignments) to use for grouping.')
group_across.add_argument('--mode', action='store', dest='mode',
choices=('allele', 'gene'), default='gene',
help='''Specifies whether to use the V(D)J allele or gene when
an allele call field (--calls) is specified.''')
group_across.add_argument('--act', action='store', dest='action', default='first',
choices=('first', ),
help='''Specifies how to handle multiple values within default
allele call fields. Currently, only "first" is supported.''')
group_across.add_argument('--exec', action='store', dest='muscle_exec',
default=default_muscle_exec,
help='The location of the MUSCLE executable')
parser_across.set_defaults(group_func=groupRecords, align_func=alignAcross)
# Argument parser for alignment of fields within records
parser_within = subparsers.add_parser('within', parents=[parser_parent],
formatter_class=CommonHelpFormatter, add_help=False,
help='Multiple aligns sequence fields within rows using MUSCLE')
group_within = parser_within.add_argument_group('alignment arguments')
group_within.add_argument('--sf', nargs='+', action='store', dest='seq_fields', required=True,
help='The sequence fields to multiple align within each record.')
group_within.add_argument('--exec', action='store', dest='muscle_exec',
default=default_muscle_exec,
help='The location of the MUSCLE executable')
parser_within.set_defaults(group_func=None, align_func=alignWithin)
# Argument parser for column-wise alignment across records
parser_block = subparsers.add_parser('block', parents=[parser_parent],
formatter_class=CommonHelpFormatter, add_help=False,
help='''Multiple aligns sequence groups across both
columns and rows using MUSCLE.''')
group_block = parser_block.add_argument_group('alignment arguments')
group_block.add_argument('--sf', nargs='+', action='store', dest='seq_fields', required=True,
help='The sequence fields to multiple align within each group.')
group_block.add_argument('--gf', nargs='+', action='store', dest='group_fields', default=None,
help='Additional (not allele call) fields to use for grouping.')
group_block.add_argument('--calls', nargs='+', action='store', dest='calls',
choices=('v', 'd', 'j'), default=['v', 'j'],
help='Segment calls (allele assignments) to use for grouping.')
group_block.add_argument('--mode', action='store', dest='mode',
choices=('allele', 'gene'), default='gene',
help='''Specifies whether to use the V(D)J allele or gene when
an allele call field (--calls) is specified.''')
group_block.add_argument('--act', action='store', dest='action', default='first',
choices=('first', ),
help='''Specifies how to handle multiple values within default
allele call fields. Currently, only "first" is supported.''')
group_block.add_argument('--exec', action='store', dest='muscle_exec',
default=default_muscle_exec,
help='The location of the MUSCLE executable')
parser_block.set_defaults(group_func=groupRecords, align_func=alignBlocks)
return parser
if __name__ == '__main__':
"""
Parses command line arguments and calls main function
"""
# Parse arguments
parser = getArgParser()
checkArgs(parser)
args = parser.parse_args()
args_dict = parseCommonArgs(args)
# Check if a valid MUSCLE executable was specified for muscle mode
if not shutil.which(args.muscle_exec):
parser.error('%s does not exist or is not executable.' % args.muscle_exec)
# Define align_args
args_dict['align_args'] = {'muscle_exec': args_dict['muscle_exec']}
del args_dict['muscle_exec']
# Define group_args
if args_dict['group_func'] is groupRecords:
args_dict['group_args'] = {'fields':args_dict['group_fields'],
'calls':args_dict['calls'],
'mode':args_dict['mode'],
'action':args_dict['action']}
del args_dict['group_fields']
del args_dict['calls']
del args_dict['mode']
del args_dict['action']
# Clean arguments dictionary
del args_dict['command']
del args_dict['db_files']
if 'out_files' in args_dict: del args_dict['out_files']
# Call main function for each input file
for i, f in enumerate(args.__dict__['db_files']):
args_dict['db_file'] = f
args_dict['out_file'] = args.__dict__['out_files'][i] \
if args.__dict__['out_files'] else None
alignRecords(**args_dict)
```
#### File: changeo/bin/ParseDb.py
```python
__author__ = '<NAME>'
from changeo import __version__, __date__
# Imports
import csv
import os
import re
from argparse import ArgumentParser
from collections import OrderedDict
from itertools import chain
from textwrap import dedent
from time import time
# Presto and changeo imports
from presto.IO import printLog, printProgress, printMessage
from changeo.Defaults import default_csv_size, default_out_args
from changeo.Commandline import CommonHelpFormatter, checkArgs, getCommonArgParser, parseCommonArgs
from changeo.IO import countDbFile, getOutputHandle, splitName, TSVReader, TSVWriter
# System settings
csv.field_size_limit(default_csv_size)
# Defaults
default_index_field = 'INDEX'
# TODO: convert SQL-ish operations to modify_func() as per ParseHeaders
def splitDbFile(db_file, field, num_split=None, out_args=default_out_args):
"""
Divides a tab-delimited database file into segments by description tags
Arguments:
db_file : filename of the tab-delimited database file to split
field : the field name by which to split db_file
num_split : the numerical threshold by which to group sequences;
if None treat field as textual
out_args : common output argument dictionary from parseCommonArgs
Returns:
list : a list of output file names.
"""
log = OrderedDict()
log['START'] = 'ParseDb'
log['COMMAND'] = 'split'
log['FILE'] = os.path.basename(db_file)
log['FIELD'] = field
log['NUM_SPLIT'] = num_split
printLog(log)
# Open input
db_handle = open(db_file, 'rt')
db_iter = TSVReader(db_handle)
out_fields = db_iter.fields
__, __, out_args['out_type'] = splitName(db_file)
# Determine total numbers of records
rec_count = countDbFile(db_file)
start_time = time()
count = 0
# Sort records into files based on textual field
if num_split is None:
# Create set of unique field tags
with open(db_file, 'rt') as tmp_handle:
tmp_iter = TSVReader(tmp_handle)
tag_list = list(set([row[field] for row in tmp_iter]))
# Forbidden characters in filename and replacements
no_good = {'\/':'f','\\':'b','?':'q','\%':'p','*':'s',':':'c',
'\|':'pi','\"':'dq','\'':'sq','<':'gt','>':'lt',' ':'_'}
# Replace forbidden characters in tag_list
tag_dict = {}
for tag in tag_list:
for c,r in no_good.items():
tag_dict[tag] = (tag_dict.get(tag, tag).replace(c,r) \
if c in tag else tag_dict.get(tag, tag))
# Create output handles
handles_dict = {tag: getOutputHandle(db_file,
out_label='%s-%s' % (field, label),
out_name=out_args['out_name'],
out_dir=out_args['out_dir'],
out_type=out_args['out_type'])
for tag, label in tag_dict.items()}
# Create Db writer instances
writers_dict = {tag: TSVWriter(handles_dict[tag], fields=out_fields)
for tag in tag_dict}
# Iterate over records
for row in db_iter:
printProgress(count, rec_count, 0.05, start_time=start_time)
count += 1
# Write row to appropriate file
tag = row[field]
writers_dict[tag].writeDict(row)
# Sort records into files based on numeric num_split
else:
num_split = float(num_split)
# Create output handles
handles_dict = {'under': getOutputHandle(db_file,
out_label='under-%.1f' % num_split,
out_name=out_args['out_name'],
out_dir=out_args['out_dir'],
out_type=out_args['out_type']),
'atleast': getOutputHandle(db_file,
out_label='atleast-%.1f' % num_split,
out_name=out_args['out_name'],
out_dir=out_args['out_dir'],
out_type=out_args['out_type'])}
# Create Db writer instances
writers_dict = {'under': TSVWriter(handles_dict['under'], fields=out_fields),
'atleast': TSVWriter(handles_dict['atleast'], fields=out_fields)}
# Iterate over records
for row in db_iter:
printProgress(count, rec_count, 0.05, start_time=start_time)
count += 1
tag = row[field]
tag = 'under' if float(tag) < num_split else 'atleast'
writers_dict[tag].writeDict(row)
# Write log
printProgress(count, rec_count, 0.05, start_time=start_time)
log = OrderedDict()
for i, k in enumerate(handles_dict):
log['OUTPUT%i' % (i + 1)] = os.path.basename(handles_dict[k].name)
log['RECORDS'] = rec_count
log['PARTS'] = len(handles_dict)
log['END'] = 'ParseDb'
printLog(log)
# Close output file handles
db_handle.close()
for t in handles_dict: handles_dict[t].close()
return [handles_dict[t].name for t in handles_dict]
def addDbFile(db_file, fields, values, out_file=None, out_args=default_out_args):
"""
Adds field and value pairs to a database file
Arguments:
db_file : the database file name.
fields : a list of fields to add.
values : a list of values to assign to all rows of each field.
out_file : output file name. Automatically generated from the input file if None.
out_args : common output argument dictionary from parseCommonArgs.
Returns:
str : output file name.
"""
log = OrderedDict()
log['START'] = 'ParseDb'
log['COMMAND'] = 'add'
log['FILE'] = os.path.basename(db_file)
log['FIELDS'] = ','.join(fields)
log['VALUES'] = ','.join(values)
printLog(log)
# Open inut
db_handle = open(db_file, 'rt')
db_iter = TSVReader(db_handle)
__, __, out_args['out_type'] = splitName(db_file)
# Add fields
out_fields = list(db_iter.fields)
out_fields.extend(fields)
# Open output
if out_file is not None:
pass_handle = open(out_file, 'w')
else:
pass_handle = getOutputHandle(db_file, out_label='parse-add', out_dir=out_args['out_dir'],
out_name=out_args['out_name'], out_type=out_args['out_type'])
pass_writer = TSVWriter(pass_handle, out_fields)
# Count records
result_count = countDbFile(db_file)
# Define fields and values to append
add_dict = {k:v for k,v in zip(fields, values) if k not in db_iter.fields}
# Iterate over records
start_time = time()
rec_count = 0
for rec in db_iter:
# Print progress for previous iteration
printProgress(rec_count, result_count, 0.05, start_time=start_time)
rec_count += 1
# Write updated row
rec.update(add_dict)
pass_writer.writeDict(rec)
# Print counts
printProgress(rec_count, result_count, 0.05, start_time=start_time)
log = OrderedDict()
log['OUTPUT'] = os.path.basename(pass_handle.name)
log['RECORDS'] = rec_count
log['END'] = 'ParseDb'
printLog(log)
# Close file handles
pass_handle.close()
db_handle.close()
return pass_handle.name
def indexDbFile(db_file, field=default_index_field, out_file=None, out_args=default_out_args):
"""
Adds an index column to a database file
Arguments:
db_file : the database file name.
field : the name of the index field to add.
out_file : output file name. Automatically generated from the input file if None.
out_args : common output argument dictionary from parseCommonArgs.
Returns:
str : output file name.
"""
log = OrderedDict()
log['START'] = 'ParseDb'
log['COMMAND'] = 'index'
log['FILE'] = os.path.basename(db_file)
log['FIELD'] = field
printLog(log)
# Open input
db_handle = open(db_file, 'rt')
db_iter = TSVReader(db_handle)
__, __, out_args['out_type'] = splitName(db_file)
# Append index field
out_fields = list(db_iter.fields)
out_fields.append(field)
# Open output
if out_file is not None:
pass_handle = open(out_file, 'w')
else:
pass_handle = getOutputHandle(db_file, out_label='parse-index', out_dir=out_args['out_dir'],
out_name=out_args['out_name'], out_type=out_args['out_type'])
pass_writer = TSVWriter(pass_handle, out_fields)
# Count records
result_count = countDbFile(db_file)
# Iterate over records
start_time = time()
rec_count = 0
for rec in db_iter:
# Print progress for previous iteration
printProgress(rec_count, result_count, 0.05, start_time=start_time)
rec_count += 1
# Add count and write updated row
rec.update({field:rec_count})
pass_writer.writeDict(rec)
# Print counts
printProgress(rec_count, result_count, 0.05, start_time=start_time)
log = OrderedDict()
log['OUTPUT'] = os.path.basename(pass_handle.name)
log['RECORDS'] = rec_count
log['END'] = 'ParseDb'
printLog(log)
# Close file handles
pass_handle.close()
db_handle.close()
return pass_handle.name
def dropDbFile(db_file, fields, out_file=None, out_args=default_out_args):
"""
Deletes entire fields from a database file
Arguments:
db_file : the database file name.
fields : a list of fields to drop.
out_file : output file name. Automatically generated from the input file if None.
out_args : common output argument dictionary from parseCommonArgs
Returns:
str : output file name.
"""
log = OrderedDict()
log['START'] = 'ParseDb'
log['COMMAND'] = 'add'
log['FILE'] = os.path.basename(db_file)
log['FIELDS'] = ','.join(fields)
printLog(log)
# Open input
db_handle = open(db_file, 'rt')
db_iter = TSVReader(db_handle)
__, __, out_args['out_type'] = splitName(db_file)
# Exclude dropped field from output
out_fields = [f for f in db_iter.fields if f not in fields]
# Open output
if out_file is not None:
pass_handle = open(out_file, 'w')
else:
pass_handle = getOutputHandle(db_file, out_label='parse-drop', out_dir=out_args['out_dir'],
out_name=out_args['out_name'], out_type=out_args['out_type'])
pass_writer = TSVWriter(pass_handle, out_fields)
# Count records
result_count = countDbFile(db_file)
# Iterate over records
start_time = time()
rec_count = 0
for rec in db_iter:
# Print progress for previous iteration
printProgress(rec_count, result_count, 0.05, start_time=start_time)
rec_count += 1
# Write row
pass_writer.writeDict(rec)
# Print counts
printProgress(rec_count, result_count, 0.05, start_time=start_time)
log = OrderedDict()
log['OUTPUT'] = os.path.basename(pass_handle.name)
log['RECORDS'] = rec_count
log['END'] = 'ParseDb'
printLog(log)
# Close file handles
pass_handle.close()
return pass_handle.name
def deleteDbFile(db_file, fields, values, logic='any', regex=False,
out_file=None, out_args=default_out_args):
"""
Deletes records from a database file
Arguments:
db_file : the database file name.
fields : a list of fields to check for deletion criteria.
values : a list of values defining deletion targets.
logic : one of 'any' or 'all' defining whether one or all fields must have a match.
regex : if False do exact full string matches; if True allow partial regex matches.
out_file : output file name. Automatically generated from the input file if None.
out_args : common output argument dictionary from parseCommonArgs.
Returns:
str : output file name.
"""
# Define string match function
if regex:
def _match_func(x, patterns): return any([re.search(p, x) for p in patterns])
else:
def _match_func(x, patterns): return x in patterns
# Define logic function
if logic == 'any':
_logic_func = any
elif logic == 'all':
_logic_func = all
log = OrderedDict()
log['START'] = 'ParseDb'
log['COMMAND'] = 'delete'
log['FILE'] = os.path.basename(db_file)
log['FIELDS'] = ','.join(fields)
log['VALUES'] = ','.join(values)
printLog(log)
# Open input
db_handle = open(db_file, 'rt')
db_iter = TSVReader(db_handle)
out_fields = db_iter.fields
__, __, out_args['out_type'] = splitName(db_file)
# Open output
if out_file is not None:
pass_handle = open(out_file, 'w')
else:
pass_handle = getOutputHandle(db_file, out_label='parse-delete', out_dir=out_args['out_dir'],
out_name=out_args['out_name'], out_type=out_args['out_type'])
pass_writer = TSVWriter(pass_handle, out_fields)
# Count records
result_count = countDbFile(db_file)
# Iterate over records
start_time = time()
rec_count, pass_count, fail_count = 0, 0, 0
for rec in db_iter:
# Print progress for previous iteration
printProgress(rec_count, result_count, 0.05, start_time=start_time)
rec_count += 1
# Check for deletion values in all fields
delete = _logic_func([_match_func(rec.get(f, False), values) for f in fields])
# Write sequences
if not delete:
pass_count += 1
pass_writer.writeDict(rec)
else:
fail_count += 1
# Print counts
printProgress(rec_count, result_count, 0.05, start_time=start_time)
log = OrderedDict()
log['OUTPUT'] = os.path.basename(pass_handle.name)
log['RECORDS'] = rec_count
log['KEPT'] = pass_count
log['DELETED'] = fail_count
log['END'] = 'ParseDb'
printLog(log)
# Close file handles
pass_handle.close()
db_handle.close()
return pass_handle.name
def renameDbFile(db_file, fields, names, out_file=None, out_args=default_out_args):
"""
Renames fields in a database file
Arguments:
db_file : the database file name.
fields : a list of fields to rename.
values : a list of new names for fields.
out_file : output file name. Automatically generated from the input file if None.
out_args : common output argument dictionary from parseCommonArgs.
Returns:
str : output file name.
"""
log = OrderedDict()
log['START'] = 'ParseDb'
log['COMMAND'] = 'rename'
log['FILE'] = os.path.basename(db_file)
log['FIELDS'] = ','.join(fields)
log['NAMES'] = ','.join(names)
printLog(log)
# Open file handles
db_handle = open(db_file, 'rt')
db_iter = TSVReader(db_handle)
__, __, out_args['out_type'] = splitName(db_file)
# Get header and rename fields
out_fields = list(db_iter.fields)
for f, n in zip(fields, names):
i = out_fields.index(f)
out_fields[i] = n
# Open writer
if out_file is not None:
pass_handle = open(out_file, 'w')
else:
pass_handle = getOutputHandle(db_file, out_label='parse-rename', out_dir=out_args['out_dir'],
out_name=out_args['out_name'], out_type=out_args['out_type'])
pass_writer = TSVWriter(pass_handle, out_fields)
# Count records
result_count = countDbFile(db_file)
# Iterate over records
start_time = time()
rec_count = 0
for rec in db_iter:
# Print progress for previous iteration
printProgress(rec_count, result_count, 0.05, start_time=start_time)
rec_count += 1
# TODO: repeating renaming is unnecessary.
# Rename fields
for f, n in zip(fields, names):
rec[n] = rec.pop(f)
# Write
pass_writer.writeDict(rec)
# Print counts
printProgress(rec_count, result_count, 0.05, start_time=start_time)
log = OrderedDict()
log['OUTPUT'] = os.path.basename(pass_handle.name)
log['RECORDS'] = rec_count
log['END'] = 'ParseDb'
printLog(log)
# Close file handles
pass_handle.close()
db_handle.close()
return pass_handle.name
def selectDbFile(db_file, fields, values, logic='any', regex=False,
out_file=None, out_args=default_out_args):
"""
Selects records from a database file
Arguments:
db_file : the database file name
fields : a list of fields to check for selection criteria
values : a list of values defining selection targets
logic : one of 'any' or 'all' defining whether one or all fields must have a match.
regex : if False do exact full string matches; if True allow partial regex matches.
out_file : output file name. Automatically generated from the input file if None.
out_args : common output argument dictionary from parseCommonArgs
Returns:
str : output file name.
"""
# Define string match function
if regex:
def _match_func(x, patterns): return any([re.search(p, x) for p in patterns])
else:
def _match_func(x, patterns): return x in patterns
# Define logic function
if logic == 'any':
_logic_func = any
elif logic == 'all':
_logic_func = all
# Print console log
log = OrderedDict()
log['START'] = 'ParseDb'
log['COMMAND'] = 'select'
log['FILE'] = os.path.basename(db_file)
log['FIELDS'] = ','.join(fields)
log['VALUES'] = ','.join(values)
log['REGEX'] =regex
printLog(log)
# Open input
db_handle = open(db_file, 'rt')
db_iter = TSVReader(db_handle)
out_fields = db_iter.fields
__, __, out_args['out_type'] = splitName(db_file)
# Open output
if out_file is not None:
pass_handle = open(out_file, 'w')
else:
pass_handle = getOutputHandle(db_file, out_label='parse-select', out_dir=out_args['out_dir'],
out_name=out_args['out_name'], out_type=out_args['out_type'])
pass_writer = TSVWriter(pass_handle, out_fields)
# Count records
result_count = countDbFile(db_file)
# Iterate over records
start_time = time()
rec_count, pass_count, fail_count = 0, 0, 0
for rec in db_iter:
# Print progress for previous iteration
printProgress(rec_count, result_count, 0.05, start_time=start_time)
rec_count += 1
# Check for selection values in all fields
select = _logic_func([_match_func(rec.get(f, False), values) for f in fields])
# Write sequences
if select:
pass_count += 1
pass_writer.writeDict(rec)
else:
fail_count += 1
# Print counts
printProgress(rec_count, result_count, 0.05, start_time=start_time)
log = OrderedDict()
log['OUTPUT'] = os.path.basename(pass_handle.name)
log['RECORDS'] = rec_count
log['SELECTED'] = pass_count
log['DISCARDED'] = fail_count
log['END'] = 'ParseDb'
printLog(log)
# Close file handles
pass_handle.close()
db_handle.close()
return pass_handle.name
def sortDbFile(db_file, field, numeric=False, descend=False,
out_file=None, out_args=default_out_args):
"""
Sorts records by values in an annotation field
Arguments:
db_file : the database filename
field : the field name to sort by
numeric : if True sort field numerically;
if False sort field alphabetically
descend : if True sort in descending order;
if False sort in ascending order
out_file : output file name. Automatically generated from the input file if None.
out_args : common output argument dictionary from parseCommonArgs
Returns:
str : output file name
"""
log = OrderedDict()
log['START'] = 'ParseDb'
log['COMMAND'] = 'sort'
log['FILE'] = os.path.basename(db_file)
log['FIELD'] = field
log['NUMERIC'] = numeric
printLog(log)
# Open input
db_handle = open(db_file, 'rt')
db_iter = TSVReader(db_handle)
out_fields = db_iter.fields
__, __, out_args['out_type'] = splitName(db_file)
# Open output
if out_file is not None:
pass_handle = open(out_file, 'w')
else:
pass_handle = getOutputHandle(db_file, out_label='parse-sort', out_dir=out_args['out_dir'],
out_name=out_args['out_name'], out_type=out_args['out_type'])
pass_writer = TSVWriter(pass_handle, out_fields)
# Store all records in a dictionary
start_time = time()
printMessage("Indexing: Running", start_time=start_time)
db_dict = {i:r for i, r in enumerate(db_iter)}
result_count = len(db_dict)
# Sort db_dict by field values
tag_dict = {k:v[field] for k, v in db_dict.items()}
if numeric: tag_dict = {k:float(v or 0) for k, v in tag_dict.items()}
sorted_keys = sorted(tag_dict, key=tag_dict.get, reverse=descend)
printMessage("Indexing: Done", start_time=start_time, end=True)
# Iterate over records
start_time = time()
rec_count = 0
for key in sorted_keys:
# Print progress for previous iteration
printProgress(rec_count, result_count, 0.05, start_time=start_time)
rec_count += 1
# Write records
pass_writer.writeDict(db_dict[key])
# Print counts
printProgress(rec_count, result_count, 0.05, start_time=start_time)
log = OrderedDict()
log['OUTPUT'] = os.path.basename(pass_handle.name)
log['RECORDS'] = rec_count
log['END'] = 'ParseDb'
printLog(log)
# Close file handles
pass_handle.close()
db_handle.close()
return pass_handle.name
def updateDbFile(db_file, field, values, updates, out_file=None, out_args=default_out_args):
"""
Updates field and value pairs to a database file
Arguments:
db_file : the database file name.
field : the field to update.
values : a list of values to specifying which rows to update.
updates : a list of values to update each value with.
out_file : output file name. Automatically generated from the input file if None.
out_args : common output argument dictionary from parseCommonArgs.
Returns:
str : output file name
"""
log = OrderedDict()
log['START'] = 'ParseDb'
log['COMMAND'] = 'update'
log['FILE'] = os.path.basename(db_file)
log['FIELD'] = field
log['VALUES'] = ','.join(values)
log['UPDATES'] = ','.join(updates)
printLog(log)
# Open input
db_handle = open(db_file, 'rt')
db_iter = TSVReader(db_handle)
out_fields = db_iter.fields
__, __, out_args['out_type'] = splitName(db_file)
# Open output
if out_file is not None:
pass_handle = open(out_file, 'w')
else:
pass_handle = getOutputHandle(db_file, out_label='parse-update', out_dir=out_args['out_dir'],
out_name=out_args['out_name'], out_type=out_args['out_type'])
pass_writer = TSVWriter(pass_handle, out_fields)
# Count records
result_count = countDbFile(db_file)
# Iterate over records
start_time = time()
rec_count, pass_count = 0, 0
for rec in db_iter:
# Print progress for previous iteration
printProgress(rec_count, result_count, 0.05, start_time=start_time)
rec_count += 1
# Updated values if found
for x, y in zip(values, updates):
if rec[field] == x:
rec[field] = y
pass_count += 1
# Write records
pass_writer.writeDict(rec)
# Print counts
printProgress(rec_count, result_count, 0.05, start_time=start_time)
log = OrderedDict()
log['OUTPUT'] = os.path.basename(pass_handle.name)
log['RECORDS'] = rec_count
log['UPDATED'] = pass_count
log['END'] = 'ParseDb'
printLog(log)
# Close file handles
pass_handle.close()
db_handle.close()
return pass_handle.name
def mergeDbFiles(db_files, drop=False, out_file=None, out_args=default_out_args):
"""
Updates field and value pairs to a database file
Arguments:
db_files : list of database file names.
drop : if True drop columns not present in all files.
out_file : output file name. Automatically generated from the input file if None.
out_args : common output argument dictionary from parseCommonArgs.
Returns:
str : output file name.
"""
log = OrderedDict()
log['START'] = 'ParseDb'
log['COMMAND'] = 'merge'
log['FILES'] = ','.join([os.path.basename(f) for f in db_files])
log['DROP'] = drop
printLog(log)
# Open input
db_handles = [open(f, 'rt') for f in db_files]
db_iters = [TSVReader(x) for x in db_handles]
result_count = sum([countDbFile(f) for f in db_files])
# Define output fields
field_list = [x.fields for x in db_iters]
if drop:
field_set = set.intersection(*map(set, field_list))
else:
field_set = set.union(*map(set, field_list))
field_order = OrderedDict([(f, None) for f in chain(*field_list)])
out_fields = [f for f in field_order if f in field_set]
# Open output file
if out_file is not None:
pass_handle = open(out_file, 'w')
else:
__, __, out_args['out_type'] = splitName(db_files[0])
pass_handle = getOutputHandle(db_files[0], out_label='parse-merge', out_dir=out_args['out_dir'],
out_name=out_args['out_name'], out_type=out_args['out_type'])
pass_writer = TSVWriter(pass_handle, out_fields)
# Iterate over records
start_time = time()
rec_count = 0
for db in db_iters:
for rec in db:
# Print progress for previous iteration
printProgress(rec_count, result_count, 0.05, start_time=start_time)
rec_count += 1
# Write records
pass_writer.writeDict(rec)
# Print counts
printProgress(rec_count, result_count, 0.05, start_time=start_time)
log = OrderedDict()
log['OUTPUT'] = os.path.basename(pass_handle.name)
log['RECORDS'] = rec_count
log['END'] = 'ParseDb'
printLog(log)
# Close file handles
pass_handle.close()
for x in db_handles: x.close()
return pass_handle.name
def getArgParser():
"""
Defines the ArgumentParser
Arguments:
None
Returns:
an ArgumentParser object
"""
# Define input and output field help message
fields = dedent(
'''
output files:
sequences
FASTA formatted sequences output from the subcommands fasta and clip.
<field>-<value>
database files partitioned by annotation <field> and <value>.
parse-<command>
output of the database modification functions where <command> is one of
the subcommands add, index, drop, delete, rename, select, sort or update.
required fields:
sequence_id
''')
# Define ArgumentParser
parser = ArgumentParser(description=__doc__, epilog=fields,
formatter_class=CommonHelpFormatter, add_help=False)
group_help = parser.add_argument_group('help')
group_help.add_argument('--version', action='version',
version='%(prog)s:' + ' %s %s' %(__version__, __date__))
group_help.add_argument('-h', '--help', action='help', help='show this help message and exit')
subparsers = parser.add_subparsers(title='subcommands', dest='command', metavar='',
help='Database operation')
# TODO: This is a temporary fix for Python issue 9253
subparsers.required = True
# Define parent parsers
default_parent = getCommonArgParser(failed=False, log=False, format=False)
multi_parent = getCommonArgParser(out_file=False, failed=False, log=False, format=False)
# Subparser to add records
parser_add = subparsers.add_parser('add', parents=[default_parent],
formatter_class=CommonHelpFormatter, add_help=False,
help='Adds field and value pairs.',
description='Adds field and value pairs.')
group_add = parser_add.add_argument_group('parsing arguments')
group_add.add_argument('-f', nargs='+', action='store', dest='fields', required=True,
help='The name of the fields to add.')
group_add.add_argument('-u', nargs='+', action='store', dest='values', required=True,
help='The value to assign to all rows for each field.')
parser_add.set_defaults(func=addDbFile)
# Subparser to delete records
parser_delete = subparsers.add_parser('delete', parents=[default_parent],
formatter_class=CommonHelpFormatter, add_help=False,
help='Deletes specific records.',
description='Deletes specific records.')
group_delete = parser_delete.add_argument_group('parsing arguments')
group_delete.add_argument('-f', nargs='+', action='store', dest='fields', required=True,
help='The name of the fields to check for deletion criteria.')
group_delete.add_argument('-u', nargs='+', action='store', dest='values', default=['', 'NA'],
help='''The values defining which records to delete. A value
may appear in any of the fields specified with -f.''')
group_delete.add_argument('--logic', action='store', dest='logic',
choices=('any', 'all'), default='any',
help='''Defines whether a value may appear in any field (any)
or whether it must appear in all fields (all).''')
group_delete.add_argument('--regex', action='store_true', dest='regex',
help='''If specified, treat values as regular expressions
and allow partial string matches.''')
parser_delete.set_defaults(func=deleteDbFile)
# Subparser to drop fields
parser_drop = subparsers.add_parser('drop', parents=[default_parent],
formatter_class=CommonHelpFormatter, add_help=False,
help='Deletes entire fields.',
description='Deletes entire fields.')
group_drop = parser_drop.add_argument_group('parsing arguments')
group_drop.add_argument('-f', nargs='+', action='store', dest='fields', required=True,
help='The name of the fields to delete from the database.')
parser_drop.set_defaults(func=dropDbFile)
# Subparser to index fields
parser_index = subparsers.add_parser('index', parents=[default_parent],
formatter_class=CommonHelpFormatter, add_help=False,
help='Adds a numeric index field.',
description='Adds a numeric index field.')
group_index = parser_index.add_argument_group('parsing arguments')
group_index.add_argument('-f', action='store', dest='field',
default=default_index_field,
help='The name of the index field to add to the database.')
parser_index.set_defaults(func=indexDbFile)
# Subparser to rename fields
parser_rename = subparsers.add_parser('rename', parents=[default_parent],
formatter_class=CommonHelpFormatter, add_help=False,
help='Renames fields.',
description='Renames fields.')
group_rename = parser_rename.add_argument_group('parsing arguments')
group_rename.add_argument('-f', nargs='+', action='store', dest='fields', required=True,
help='List of fields to rename.')
group_rename.add_argument('-k', nargs='+', action='store', dest='names', required=True,
help='List of new names for each field.')
parser_rename.set_defaults(func=renameDbFile)
# Subparser to select records
parser_select = subparsers.add_parser('select', parents=[default_parent],
formatter_class=CommonHelpFormatter, add_help=False,
help='Selects specific records.',
description='Selects specific records.')
group_select = parser_select.add_argument_group('parsing arguments')
group_select.add_argument('-f', nargs='+', action='store', dest='fields', required=True,
help='The name of the fields to check for selection criteria.')
group_select.add_argument('-u', nargs='+', action='store', dest='values', required=True,
help='''The values defining with records to select. A value
may appear in any of the fields specified with -f.''')
group_select.add_argument('--logic', action='store', dest='logic',
choices=('any', 'all'), default='any',
help='''Defines whether a value may appear in any field (any)
or whether it must appear in all fields (all).''')
group_select.add_argument('--regex', action='store_true', dest='regex',
help='''If specified, treat values as regular expressions
and allow partial string matches.''')
parser_select.set_defaults(func=selectDbFile)
# Subparser to sort file by records
parser_sort = subparsers.add_parser('sort', parents=[default_parent],
formatter_class=CommonHelpFormatter, add_help=False,
help='Sorts records by field values.',
description='Sorts records by field values.')
group_sort = parser_sort.add_argument_group('parsing arguments')
group_sort.add_argument('-f', action='store', dest='field', type=str, required=True,
help='The annotation field by which to sort records.')
group_sort.add_argument('--num', action='store_true', dest='numeric', default=False,
help='''Specify to define the sort column as numeric rather
than textual.''')
group_sort.add_argument('--descend', action='store_true', dest='descend',
help='''If specified, sort records in descending, rather
than ascending, order by values in the target field.''')
parser_sort.set_defaults(func=sortDbFile)
# Subparser to update records
parser_update = subparsers.add_parser('update', parents=[default_parent],
formatter_class=CommonHelpFormatter, add_help=False,
help='Updates field and value pairs.',
description='Updates field and value pairs.')
group_update = parser_update.add_argument_group('parsing arguments')
group_update.add_argument('-f', action='store', dest='field', required=True,
help='The name of the field to update.')
group_update.add_argument('-u', nargs='+', action='store', dest='values', required=True,
help='The values that will be replaced.')
group_update.add_argument('-t', nargs='+', action='store', dest='updates', required=True,
help='''The new value to assign to each selected row.''')
parser_update.set_defaults(func=updateDbFile)
# Subparser to merge files
parser_merge = subparsers.add_parser('merge', parents=[multi_parent],
formatter_class=CommonHelpFormatter, add_help=False,
help='Merges files.',
description='Merges files.')
group_merge = parser_merge.add_argument_group('parsing arguments')
group_merge.add_argument('-o', action='store', dest='out_file', default=None,
help='''Explicit output file name. Note, this argument cannot be used with
the --failed, --outdir or --outname arguments.''')
group_merge.add_argument('--drop', action='store_true', dest='drop',
help='''If specified, drop fields that do not exist in all input files.
Otherwise, include all columns in all files and fill missing data
with empty strings.''')
parser_merge.set_defaults(func=mergeDbFiles)
# Subparser to partition files by annotation values
parser_split = subparsers.add_parser('split', parents=[multi_parent],
formatter_class=CommonHelpFormatter, add_help=False,
help='Splits database files by field values.',
description='Splits database files by field values')
group_split = parser_split.add_argument_group('parsing arguments')
group_split.add_argument('-f', action='store', dest='field', type=str, required=True,
help='Annotation field by which to split database files.')
group_split.add_argument('--num', action='store', dest='num_split', type=float, default=None,
help='''Specify to define the field as numeric and group
records by whether they are less than or at least
(greater than or equal to) the specified value.''')
parser_split.set_defaults(func=splitDbFile)
return parser
if __name__ == '__main__':
"""
Parses command line arguments and calls main function
"""
# Parse arguments
parser = getArgParser()
checkArgs(parser)
args = parser.parse_args()
if args.command == 'merge':
args_dict = parseCommonArgs(args, in_list=True)
else:
args_dict = parseCommonArgs(args)
# Delete command declaration from argument dictionary
del args_dict['command']
del args_dict['func']
# Check argument pairs
if args.command == 'add' and len(args_dict['fields']) != len(args_dict['values']):
parser.error('You must specify exactly one value (-u) per field (-f)')
elif args.command == 'rename' and len(args_dict['fields']) != len(args_dict['names']):
parser.error('You must specify exactly one new name (-k) per field (-f)')
elif args.command == 'update' and len(args_dict['values']) != len(args_dict['updates']):
parser.error('You must specify exactly one value (-u) per replacement (-t)')
# Call parser function for each database file
if args.command == 'merge':
args.func(**args_dict)
elif args.command == 'split':
del args_dict['db_files']
for f in args.__dict__['db_files']:
args_dict['db_file'] = f
args.func(**args_dict)
else:
del args_dict['db_files']
del args_dict['out_files']
for i, f in enumerate(args.__dict__['db_files']):
args_dict['db_file'] = f
args_dict['out_file'] = args.__dict__['out_files'][i] \
if args.__dict__['out_files'] else None
args.func(**args_dict)
``` |
{
"source": "3rdcycle/csv2db",
"score": 2
} |
#### File: 3rdcycle/csv2db/csv2db.py
```python
import csv
import re
# Regular expression to detect potential string values with missing quotes
sql_fun = ['true', 'false', 'avg', 'count', 'first', 'last', 'max', 'min',
'sum', 'ucase', 'lcase', 'mid', 'len', 'round', 'now', 'format']
string_exp = re.compile('^(?!["\']|{}).*[a-z]'.format('|'.join(sql_fun)),
re.IGNORECASE)
class CsvImporter(object):
"""
CsvImporter imports values from a csv file into records and creates sql
insert statements to create the corresponding rows in the target db.
:param path: Path to the csv file to import
:param dialect: Dictionary with csv reader dialect specifications
(see http://docs.python.org/2/library/csv.html#csv-fmt-params)
:param import_specs: Dictionary with import specifications for each
table. RecordSpecs are used to tell the script how to extract the
csv columns into db records.
Each entry can have multiple RecordSpecs, identified by a unique
key which is used to resolve cross references in the attr_map of
each RecordSpec.
"""
def __init__(self, path, dialect, import_specs):
self.path = path
self.dialect = dialect
# Flatten import_specs to {(table, instance): record_spec} "t,i,s" form
flat_specs = {}
for (t, table_spec) in import_specs.items():
specs = {(t, i): s for (i, s) in table_spec.items()}
flat_specs.update(specs)
# Create a XReference dependency map and sort it topologically
dependency_map = {}
for (path, s) in flat_specs.items():
deps = set([(x.table_name, x.instance_name) for x
in s.attr_map.values() if isinstance(x, XReference)])
dependency_map[path] = deps
sorted_keys = [val for sub in _toposort(dependency_map) for val in sub]
# Store sorted results in a list [(t, i, s), ...]
try:
self.specs = [(t, i, flat_specs[(t, i)]) for (t, i) in sorted_keys]
except KeyError:
print('ERROR: Could not find specification for "{}" in table '
'"{}". Check your XReferences.'.format(i, t))
exit(-1)
def import_data(self, id_col=None):
"""
Imports the csv into DbRecords and returns them.
The method uses the import specification (import_specs) that was passed
to the importer on init to convert csv table columns to DbRecord
objects.
"""
records = []
with open(self.path) as f:
csv.register_dialect('csv2db', **self.dialect)
reader = csv.DictReader(f, dialect='csv2db')
row_num = 0
for row in reader:
row_id = row[id_col] if id_col else row_num
records += self._records_for_row(row, row_id);
row_num += 1
return records
def _records_for_row(self, row, row_id):
"""
Import one single row and return the resulting DbRecord objects
"""
records = []
xref_map = {}
for (table, instance, spec) in self.specs:
if spec.condition(row) is False:
continue
# Create record and import attributes according to spec
record = DbRecord(table, row_id)
record.import_attributes(spec.attr_map, xref_map, row)
records.append(record)
# Keep a reference to each record instance that we create for
# resolving XReferences in later instances
instance_path = (table, instance)
xref_map[instance_path] = record
return records
class RecordSpec(object):
"""
Specifications for extracting csv columns into the corresponding
database record.
:param attr_map: A dictionary that maps database columns to csv
columns using any of the ...Value classes below.
:param condition: An optional callable that returns false if the
object should not be created for the row that is currently.
The callable must accept exactly one parameter (the current row).
"""
def __init__(self, attr_map, condition=None):
self.attr_map = attr_map
self.condition = condition if condition else lambda row: True
class ColumnValue(object):
"""
Read an input value from a csv column
:param col_name: Column name to read the value from
:param convert: Optional conversion function that takes exactly one
argument which is the row dict for the currently imported row
"""
def __init__(self, col_name, convert=None):
self.col_name = col_name
self.convert = convert
def _read(self, row, **kw_args):
value = row[self.col_name]
return self.convert(value) if self.convert else value
class MultiColumnValue(object):
"""
Reads input from multiple columns and contracts them into a single value
using the (non-optional) callable given in *convert*.
:param col_names: List of column names to read values from
:param convert: Conversion function that takes exactly one argument (the
row dict of the currently imported row) and contracts the values into
a single return value
"""
def __init__(self, col_names, convert):
if not convert:
raise ValueError('ERROR: You must provide a convert function')
self.col_names = col_names
self.convert = convert
def _read(self, row, **kw_args):
values = {key: row[key] for key in self.col_names}
return self.convert(values)
class ConstValue(object):
""" Always returns the same constant value
:param value: The value to return for each row
"""
def __init__(self, value):
self.value = value
def _read(self, row, **kw_args):
return self.value
class DynamicValue(object):
""" Creates a value dynamically using the callable *generate*
:param generate: A function or other callable that takes a single argument
(the current row dict) and returns a single value
"""
def __init__(self, generate):
self.generate = generate
def _read(self, row, **kw_args):
return self.generate(row)
class XReference(object):
""" Takes the value of a specific attribute of another record.
:param table_name: Table name in the import_specs table given to the
*CsvImporter*
:param instance_name: Identifies a specific instance under *table_name*
:param attribute_name: Name of the attribute to return
"""
def __init__(self, table_name, instance_name, attribute_name):
self.table_name = table_name
self.instance_name = instance_name
self.attribute_name = attribute_name
def _read(self, row, **kw_args):
existing_records = kw_args['existing_records']
path = (self.table_name, self.instance_name)
value = existing_records[path].attributes[self.attribute_name]
return value
class DbRecord(object):
"""
One or more DbRecords are created for each imported row accoding to the
RecordSpecs.
"""
def __init__(self, table_name, row_id):
self.row_id = row_id
self.table_name = table_name
self.attributes = {}
def import_attributes(self, attr_map, existing_records, row):
"""
Import attributes according to the attr_map and resolve cross
references to existing_records.
"""
try:
imported = {k: v._read(row, existing_records=existing_records)
for (k, v) in attr_map.iteritems()}
except AttributeError:
k, v = next((k, v) for (k, v) in attr_map.iteritems()
if '_read' not in dir(v))
print('ERROR: The RecordSpec for {} in {} does not seem to be '
'valid'.format(k, self.table_name))
exit(-1)
self.attributes.update(imported)
def insert_statement(self):
"""
Returns the insert statement sequence for the current object
"""
col = ' (%s)' % ', '.join(self.attributes.keys())
# sanity checks
error = False
for k, v in self.attributes.iteritems():
if not isinstance(v, str):
print('ERROR: The value ({}) for "{}" in table "{}" is not a '
'string. Make sure your specs only produce string '
'values (i.e. \'5\', \'TRUE\', \'"Some text"\', '
'...)'.format(v, k, self.table_name))
error = True
elif string_exp.match(v):
print ('WARNING: {} looks like a string value but is not in '
'quotes. If "{}" in "{}" is a CHAR or VARCHAR type '
'column, you should put the value in quotes.').\
format(v, k, self.table_name)
if error:
print 'Aborting due to errors.'
exit(-1)
val = ' (%s)' % ', '.join(self.attributes.values())
sql = 'INSERT INTO ' + self.table_name + col + ' VALUES' + val + ';\n'
return sql
# Private (internal) methods
def _toposort(data):
"""
Sort dependencies topologically
:param data: Dependency map of the form
data = {
'business': set(['fleet','address']),
'device': set(['business','model','status','pack']),
'txn': set(['device','business','operator'])
}
"""
# Ignore self dependencies.
for k, v in data.items():
v.discard(k)
# Find all items that don't depend on anything.
extra_items = reduce(set.union, data.itervalues()) - set(data.iterkeys())
# Add empty dependences where needed
data.update({item: set() for item in extra_items})
while True:
ordered = set(item for item, dep in data.iteritems() if not dep)
if not ordered:
break
yield ordered
data = {item: (dep - ordered)
for item, dep in data.iteritems() if item not in ordered}
assert not data, "Cyclic dependencies:\n%s" % \
'\n'.join(repr(x) for x in data.iteritems())
```
#### File: csv2db/example/import.py
```python
import sys
import os
sys.path.append(os.path.abspath('..'))
from csv2db import CsvImporter, RecordSpec, ColumnValue, MultiColumnValue, \
ConstValue, DynamicValue, XReference
# We store the department records for later when we import employees
# and need to link them to their respective departments
departments = []
def main():
# See python csv module for dialect specifications
# Attention: skipinitialspace does not skip e.g. tabs. Make sure your
# csv contains spaces only
dialect = {
'delimiter': ',',
'skipinitialspace': True
}
sql = ''
# Import departments first
importer = CsvImporter('departments.txt', dialect, DEP_IMPORT_SPEC)
# Each DbRecord has a row_id attribute which is filled from the
# 'Department' column. We use this to link employees to departments
# later.
db_records = importer.import_data(id_col='Department')
# Generate insert statements for each DbRecord object
for record in db_records:
sql += record.insert_statement()
# store department records for later
if record.table_name == 'departments':
departments.append(record)
# Import employees
importer = CsvImporter('employees.txt', dialect, EMP_IMPORT_SPEC)
db_records = importer.import_data()
for record in db_records:
sql += record.insert_statement()
# Write combined SQL to file
with open('import.sql', 'w+') as f:
f.write(sql)
# Helper functions used in import specifications
# ----------------------------------------------
class OidFactory:
''' Creates sequential _oid numbers '''
def __init__(self):
self.oid = 0
def __call__(self, row):
self.oid += 1
return str(self.oid)
def quote(value):
''' Char type columns require extra quotes '''
return "'{}'".format(value)
def make_name(values):
''' Concatenates first and last name '''
return "'{} {}'".format(values['First'], values['Last'])
def department_oid(name):
''' Looks up department _oid from the department records created earlier'''
return next(d.attributes['_oid'] for d in departments if d.row_id == name)
def has_phone(row):
''' Checks if a particular row contains a phone number '''
return True if row['Phone'] != '-' else False
# Import specifications
# ---------------------
# Common
PUBLIC_OBJECT_MAP = {
'_oid': DynamicValue(OidFactory()), # Compute _oid dynamically
'date': ConstValue('now()') # Insertion date, now() is a DB function
}
# Department import specification
DEPARTMENT_MAP = {
'_oid': DynamicValue(OidFactory()), # Compute _oid dynamically
'_publicobj_oid': XReference('publicobject', 'sole', '_oid'), # Take _oid value from 'sole' instance in 'publicobject' table
'name': ColumnValue('Department', convert=quote), # Read from csv and put into quotes
'floor': ColumnValue('Floor', convert=quote) # Read from csv and put into quotes
}
DEP_IMPORT_SPEC = { # For each imported row create...
'publicobject': {
'sole': RecordSpec(attr_map=PUBLIC_OBJECT_MAP) # ... one record in the 'publicobject' table
},
'departments': {
'sole': RecordSpec(attr_map=DEPARTMENT_MAP) # ... one record in the 'departments' table
}
}
# Employee import specification
EMPLOYEE_MAP = {
'_oid': DynamicValue(OidFactory()), # Compute _oid dynamically
'_publicobj_oid': XReference('publicobject', 'emp', '_oid'), # Take _oid value from 'emp' instance in 'publicobject' table
'department_oid': ColumnValue('Department', convert=department_oid), # Lookup department _oid by name
'name': MultiColumnValue(['First', 'Last'], convert=make_name) # name = First + Last
}
PHONE_MAP = {
'_oid': DynamicValue(OidFactory()), # Compute _oid dynamically
'_publicobj_oid': XReference('publicobject', 'phone', '_oid'), # Take _oid value from 'phone' instance in 'publicobject' table
'_employee_oid': XReference('employees', 'sole', '_oid'), # Take _oid value from 'sole' instance in 'employees' table
'number': ColumnValue('Phone', convert=quote) # Read from csv and put into quotes
}
EMP_IMPORT_SPEC = { # For each imported row create...
'publicobject': {
'emp': RecordSpec(attr_map=PUBLIC_OBJECT_MAP), # ... one record in the 'publicobject' table for the employee
'phone': RecordSpec(attr_map=PUBLIC_OBJECT_MAP, # ... another one for the phone record we're going to create later
condition=has_phone), # but only if the row contains a phone number
},
'employees': {
'sole': RecordSpec(attr_map=EMPLOYEE_MAP) # ... one record in the 'employees' table
},
'phones': {
'sole': RecordSpec(attr_map=PHONE_MAP, condition=has_phone) # ... one record in the 'phones' table if the employee has a phone
},
}
if __name__ == '__main__':
main()
``` |
{
"source": "3rdIteration/bip_utils",
"score": 2
} |
#### File: bip_utils/addr/bch_addr_converter.py
```python
from typing import Optional
from bip_utils.bech32 import BchBech32Decoder, BchBech32Encoder
class BchAddrConverter:
"""
Bitcoin Cash address converter class.
It allows to convert a Bitcoin Cash address by changing its HRP and net version.
"""
@staticmethod
def Convert(address: str,
hrp: str,
net_ver: Optional[bytes] = None) -> str:
"""
Convert a Bitcoin Cash address by changing its HRP and net version.
Args:
address (str) : Bitcoin Cash address
hrp (str) : New HRP
net_ver (bytes, optional): New net version (if None, the old one will be used)
Returns:
str: Converted address string
Raises:
Bech32ChecksumError: If the address checksum is not valid
ValueError: If the address string is not valid
"""
# Decode address
curr_net_ver, data = BchBech32Decoder.Decode(address[:address.find(":")], address)
# Encode again with new HRP and net version
return BchBech32Encoder.Encode(hrp, net_ver or curr_net_ver, data)
```
#### File: bip_utils/addr/neo_addr.py
```python
from typing import Any, Union
from bip_utils.addr.addr_dec_utils import AddrDecUtils
from bip_utils.addr.addr_key_validator import AddrKeyValidator
from bip_utils.addr.iaddr_decoder import IAddrDecoder
from bip_utils.addr.iaddr_encoder import IAddrEncoder
from bip_utils.base58 import Base58ChecksumError, Base58Decoder, Base58Encoder
from bip_utils.ecc import IPublicKey
from bip_utils.utils.misc import BytesUtils, CryptoUtils, IntegerUtils
class NeoAddrConst:
"""Class container for NEO address constants."""
# Address prefix
PREFIX: bytes = b"\x21"
# Address suffix
SUFFIX: bytes = b"\xac"
class NeoAddrDecoder(IAddrDecoder):
"""
Neo address decoder class.
It allows the Neo address decoding.
"""
@staticmethod
def DecodeAddr(addr: str,
**kwargs: Any) -> bytes:
"""
Decode a Neo address to bytes.
Args:
addr (str): Address string
Other Parameters:
ver (bytes): Version
Returns:
bytes: Public key hash bytes
Raises:
ValueError: If the address encoding is not valid
"""
ver_bytes = kwargs["ver"]
try:
# Decode from base58
addr_dec_bytes = Base58Decoder.CheckDecode(addr)
except Base58ChecksumError as ex:
raise ValueError("Invalid base58 checksum") from ex
else:
# Validate length
AddrDecUtils.ValidateLength(addr_dec_bytes,
CryptoUtils.Hash160DigestSize() + len(ver_bytes))
# Check version
ver_got = IntegerUtils.ToBytes(addr_dec_bytes[0])
if ver_bytes != ver_got:
raise ValueError(f"Invalid version (expected {BytesUtils.ToHexString(ver_bytes)}, "
f"got {BytesUtils.ToHexString(ver_got)})")
return addr_dec_bytes[1:]
class NeoAddrEncoder(IAddrEncoder):
"""
Neo address encoder class.
It allows the Neo address encoding.
"""
@staticmethod
def EncodeKey(pub_key: Union[bytes, IPublicKey],
**kwargs: Any) -> str:
"""
Encode a public key to Neo address.
Args:
pub_key (bytes or IPublicKey): Public key bytes or object
Other Parameters:
ver (bytes): Version
Returns:
str: Address string
Raises:
ValueError: If the public key is not valid
TypeError: If the public key is not ed25519
"""
ver_bytes = kwargs["ver"]
pub_key_obj = AddrKeyValidator.ValidateAndGetNist256p1Key(pub_key)
# Get payload
payload_bytes = (NeoAddrConst.PREFIX
+ pub_key_obj.RawCompressed().ToBytes()
+ NeoAddrConst.SUFFIX)
# Encode to base58
return Base58Encoder.CheckEncode(ver_bytes + CryptoUtils.Hash160(payload_bytes))
class NeoAddr(NeoAddrEncoder):
"""
Neo address class.
Only kept for compatibility, NeoAddrEncoder shall be used instead.
"""
```
#### File: bip_utils/addr/trx_addr.py
```python
from typing import Any, Union
from bip_utils.addr.addr_dec_utils import AddrDecUtils
from bip_utils.addr.iaddr_decoder import IAddrDecoder
from bip_utils.addr.iaddr_encoder import IAddrEncoder
from bip_utils.addr.eth_addr import EthAddrConst, EthAddrDecoder, EthAddrEncoder
from bip_utils.base58 import Base58ChecksumError, Base58Decoder, Base58Encoder
from bip_utils.coin_conf import CoinsConf
from bip_utils.ecc import IPublicKey
from bip_utils.utils.misc import BytesUtils
class TrxAddrDecoder(IAddrDecoder):
"""
Tron address decoder class.
It allows the Tron address decoding.
"""
@staticmethod
def DecodeAddr(addr: str,
**kwargs: Any) -> bytes:
"""
Decode a Tron address to bytes.
Args:
addr (str): Address string
**kwargs : Not used
Returns:
bytes: Public key hash bytes
Raises:
ValueError: If the address encoding is not valid
"""
try:
# Decode from base58
addr_dec = Base58Decoder.CheckDecode(addr)
except Base58ChecksumError as ex:
raise ValueError("Invalid base58 checksum") from ex
else:
# Validate length
AddrDecUtils.ValidateLength(addr_dec,
(EthAddrConst.ADDR_LEN // 2) + len(CoinsConf.Tron.Params("addr_prefix")))
# Validate and remove prefix
addr_no_prefix = AddrDecUtils.ValidateAndRemovePrefix(addr_dec,
CoinsConf.Tron.Params("addr_prefix"))
return EthAddrDecoder.DecodeAddr(CoinsConf.Ethereum.Params("addr_prefix")
+ BytesUtils.ToHexString(addr_no_prefix),
skip_chksum_enc=True)
class TrxAddrEncoder(IAddrEncoder):
"""
Tron address encoder class.
It allows the Tron address encoding.
"""
@staticmethod
def EncodeKey(pub_key: Union[bytes, IPublicKey],
**kwargs: Any) -> str:
"""
Encode a public key to Tron address.
Args:
pub_key (bytes or IPublicKey): Public key bytes or object
**kwargs : Not used
Returns:
str: Address string
Raised:
ValueError: If the public key is not valid
TypeError: If the public key is not secp256k1
"""
# Get address in Ethereum format (remove "0x" at the beginning)
eth_addr = EthAddrEncoder.EncodeKey(pub_key)[2:]
# Add prefix and encode
return Base58Encoder.CheckEncode(CoinsConf.Tron.Params("addr_prefix") + BytesUtils.FromHexString(eth_addr))
class TrxAddr(TrxAddrEncoder):
"""
Tron address class.
Only kept for compatibility, TrxAddrEncoder shall be used instead.
"""
```
#### File: bip/bip32/bip32_key_data.py
```python
from __future__ import annotations
from typing import Union
from bip_utils.bip.bip32.bip32_utils import Bip32Utils
from bip_utils.utils.misc import DataBytes, IntegerUtils
class Bip32KeyDataConst:
"""Class container for BIP32 key data constants."""
# Depth length in bytes
DEPTH_BYTE_LEN: int = 1
# Key index length in bytes
KEY_INDEX_BYTE_LEN: int = 4
# Key index maximum value
KEY_INDEX_MAX_VAL: int = 2**32 - 1
# Chaincode length in bytes
CHAINCODE_BYTE_LEN: int = 32
# Fingerprint length in bytes
FINGERPRINT_BYTE_LEN: int = 4
# Key net version length in bytes
KEY_NET_VERSION_LEN: int = 4
# Fingerprint of master key
MASTER_FINGERPRINT: bytes = b"\x00\x00\x00\x00"
class Bip32ChainCode(DataBytes):
"""
BIP32 chaincode class.
It represents a BIP32 chaincode.
"""
def __init__(self,
chaincode: bytes = b"\x00" * Bip32KeyDataConst.CHAINCODE_BYTE_LEN) -> None:
"""
Construct class.
Args:
chaincode (bytes, optional): Fingerprint bytes (default: zero)
"""
if len(chaincode) != Bip32KeyDataConst.CHAINCODE_BYTE_LEN:
raise ValueError(f"Invalid chaincode length ({len(chaincode)})")
super().__init__(chaincode)
class Bip32FingerPrint(DataBytes):
"""
BIP32 fingerprint class.
It represents a BIP32 fingerprint.
"""
def __init__(self,
fprint: bytes = Bip32KeyDataConst.MASTER_FINGERPRINT) -> None:
"""
Construct class.
Args:
fprint (bytes, optional): Fingerprint bytes (default: master key)
"""
if len(fprint) < Bip32KeyDataConst.FINGERPRINT_BYTE_LEN:
raise ValueError(f"Invalid fingerprint length ({len(fprint)})")
super().__init__(fprint[:Bip32KeyDataConst.FINGERPRINT_BYTE_LEN])
def IsMasterKey(self) -> bool:
"""
Get if the fingerprint corresponds to a master key.
Returns:
bool: True if it corresponds to a master key, false otherwise
"""
return self.ToBytes() == Bip32KeyDataConst.MASTER_FINGERPRINT
class Bip32Depth:
"""
BIP32 depth class.
It represents a BIP32 depth.
"""
m_depth: int
def __init__(self,
depth: int) -> None:
"""
Construct class.
Args:
depth (int): Depth
"""
if depth < 0:
raise ValueError(f"Invalid depth ({depth})")
self.m_depth = depth
def Increase(self) -> Bip32Depth:
"""
Get a new object with increased depth.
Returns:
Bip32Depth object: Bip32Depth object
"""
return Bip32Depth(self.m_depth + 1)
def ToBytes(self) -> bytes:
"""
Get the depth as bytes.
Returns:
bytes: Depth bytes
"""
return IntegerUtils.ToBytes(self.m_depth, bytes_num=Bip32KeyDataConst.DEPTH_BYTE_LEN)
def ToInt(self) -> int:
"""
Get the depth as integer.
Returns:
int: Depth index
"""
return int(self.m_depth)
def __int__(self) -> int:
"""
Get the depth as integer.
Returns:
int: Depth index
"""
return self.ToInt()
def __bytes__(self) -> bytes:
"""
Get the depth as bytes.
Returns:
bytes: Depth bytes
"""
return self.ToBytes()
def __eq__(self,
other: object) -> bool:
"""
Equality operator.
Args:
other (int or Bip32Depth object): Other value to compare
Returns:
bool: True if equal false otherwise
"""
if not isinstance(other, (int, Bip32Depth)):
raise TypeError(f"Invalid type for checking equality ({type(other)})")
if isinstance(other, int):
return self.m_depth == other
return self.m_depth == other.m_depth
def __gt__(self,
other: Union[int, Bip32Depth]) -> bool:
"""
Greater than operator.
Args:
other (int or Bip32Depth object): Other value to compare
Returns:
bool: True if greater false otherwise
"""
if isinstance(other, int):
return self.m_depth > other
return self.m_depth > other.m_depth
def __lt__(self,
other: Union[int, Bip32Depth]) -> bool:
"""
Lower than operator.
Args:
other (int or Bip32Depth object): Other value to compare
Returns:
bool: True if lower false otherwise
"""
if isinstance(other, int):
return self.m_depth < other
return self.m_depth < other.m_depth
class Bip32KeyIndex:
"""
BIP32 key index class.
It represents a BIP32 key index.
"""
m_idx: int
def __init__(self,
idx: int) -> None:
"""
Construct class.
Args:
idx (int): Key index
"""
if idx < 0 or idx > Bip32KeyDataConst.KEY_INDEX_MAX_VAL:
raise ValueError(f"Invalid key index ({idx})")
self.m_idx = idx
def IsHardened(self) -> bool:
"""
Get if the key index is hardened.
Returns:
bool: True if hardened, false otherwise
"""
return Bip32Utils.IsHardenedIndex(self.m_idx)
def ToBytes(self) -> bytes:
"""
Get the key index as bytes.
Returns:
bytes: Key bytes
"""
return IntegerUtils.ToBytes(self.m_idx, bytes_num=Bip32KeyDataConst.KEY_INDEX_BYTE_LEN)
def ToInt(self) -> int:
"""
Get the key index as integer.
Returns:
int: Key index
"""
return int(self.m_idx)
def __int__(self) -> int:
"""
Get the key index as integer.
Returns:
int: Key index
"""
return self.ToInt()
def __bytes__(self) -> bytes:
"""
Get the key index as bytes.
Returns:
bytes: Key bytes
"""
return self.ToBytes()
def __eq__(self,
other: object) -> bool:
"""
Equality operator.
Args:
other (int or Bip32KeyIndex object): Other value to compare
Returns:
bool: True if equal false otherwise
"""
if not isinstance(other, (int, Bip32KeyIndex)):
raise TypeError(f"Invalid type for checking equality ({type(other)})")
if isinstance(other, int):
return self.m_idx == other
return self.m_idx == other.m_idx
class Bip32KeyNetVersions:
"""
BIP32 key net versions class.
It represents a BIP32 key net versions.
"""
m_pub_net_ver: bytes
m_priv_net_ver: bytes
def __init__(self,
pub_net_ver: bytes,
priv_net_ver: bytes) -> None:
"""
Construct class.
Args:
pub_net_ver (bytes) : Public net version
priv_net_ver (bytes): Private net version
"""
if (len(pub_net_ver) != self.Length()
or len(priv_net_ver) != self.Length()):
raise ValueError("Invalid key net version length")
self.m_pub_net_ver = pub_net_ver
self.m_priv_net_ver = priv_net_ver
@staticmethod
def Length() -> int:
"""
Get the key net version length.
Returns:
int: Key net version length
"""
return Bip32KeyDataConst.KEY_NET_VERSION_LEN
def Public(self) -> bytes:
"""
Get public net version.
Returns:
bytes: Public net version
"""
return self.m_pub_net_ver
def Private(self) -> bytes:
"""
Get private net version.
Returns:
bytes: Private net version
"""
return self.m_priv_net_ver
class Bip32KeyData:
"""
BIP32 key data class.
It contains all additional data related to a BIP32 key (e.g. depth, chain code, etc...).
"""
m_key_net_ver: Bip32KeyNetVersions
m_depth: Bip32Depth
m_index: Bip32KeyIndex
m_chain_code: Bip32ChainCode
m_parent_fprint: Bip32FingerPrint
def __init__(self,
key_net_ver: Bip32KeyNetVersions,
depth: Bip32Depth,
index: Bip32KeyIndex,
chain_code: Bip32ChainCode,
parent_fprint: Bip32FingerPrint) -> None:
"""
Construct class.
Args:
key_net_ver (Bip32KeyNetVersions object): Bip32KeyNetVersions object
depth (Bip32Depth object) : Key depth
index (Bip32KeyIndex object) : Key index
chain_code (Bip32ChainCode object) : Key chain code
parent_fprint (Bip32FingerPrint object) : Key parent fingerprint
"""
self.m_key_net_ver = key_net_ver
self.m_depth = depth
self.m_index = index
self.m_chain_code = chain_code
self.m_parent_fprint = parent_fprint
def KeyNetVersions(self) -> Bip32KeyNetVersions:
"""
Get key net versions.
Returns:
Bip32KeyNetVersions object: Bip32KeyNetVersions object
"""
return self.m_key_net_ver
def Depth(self) -> Bip32Depth:
"""
Get current depth.
Returns:
Bip32Depth object: Current depth
"""
return self.m_depth
def Index(self) -> Bip32KeyIndex:
"""
Get current index.
Returns:
Bip32KeyIndex object: Current index
"""
return self.m_index
def ChainCode(self) -> Bip32ChainCode:
"""
Get current chain code.
Returns:
Bip32ChainCode object: Chain code
"""
return self.m_chain_code
def ParentFingerPrint(self) -> Bip32FingerPrint:
"""
Get parent fingerprint.
Returns:
Bip32FingerPrint object: Parent fingerprint
"""
return self.m_parent_fprint
```
#### File: bip/bip38/bip38_ec.py
```python
import os
from typing import Optional, Tuple
from bip_utils.base58 import Base58Decoder, Base58Encoder
from bip_utils.bip.bip38.bip38_addr import Bip38PubKeyModes, Bip38Addr
from bip_utils.ecc import Secp256k1PrivateKey, Secp256k1PublicKey, Secp256k1
from bip_utils.utils.misc import (
AesEcbDecrypter, AesEcbEncrypter, BitUtils, BytesUtils, CryptoUtils, IntegerUtils, StringUtils
)
class Bip38EcConst:
"""Class container for BIP38 EC constants."""
# Minimum/Maximum values for lot number
LOT_NUM_MIN_VAL: int = 0
LOT_NUM_MAX_VAL: int = 1048575
# Minimum/Maximum values for sequence number
SEQ_NUM_MIN_VAL: int = 0
SEQ_NUM_MAX_VAL: int = 4095
# Owner salt lengths
OWNER_SALT_WITH_LOT_SEQ_BYTE_LEN: int = 4
OWNER_SALT_NO_LOT_SEQ_BYTE_LEN: int = 8
# Intermediate passphrase encrypted length in byte
INT_PASS_ENC_BYTE_LEN: int = 49
# Magic for intermediate passphrase
INT_PASS_MAGIC_WITH_LOT_SEQ = b"\x2c\xe9\xb3\xe1\xff\x39\xe2\x51"
INT_PASS_MAGIC_NO_LOT_SEQ = b"\x2c\xe9\xb3\xe1\xff\x39\xe2\x53"
# Seedb byte length
SEED_B_BYTE_LEN: int = 24
# Encrypted length
ENC_BYTE_LEN: int = 39
# Encrypted prefix
ENC_KEY_PREFIX: bytes = b"\x01\x43"
# Bit number for flags in flagbyte
FLAG_BIT_COMPRESSED: int = 5
FLAG_BIT_LOT_SEQ: int = 2
# Parameters for scrypt algorithm for computing prefactor
SCRYPT_PREFACTOR_KEY_LEN: int = 32
SCRYPT_PREFACTOR_N: int = 16384
SCRYPT_PREFACTOR_P: int = 8
SCRYPT_PREFACTOR_R: int = 8
# Parameters for scrypt algorithm for deriving key halves
SCRYPT_HALVES_KEY_LEN: int = 64
SCRYPT_HALVES_N: int = 1024
SCRYPT_HALVES_P: int = 1
SCRYPT_HALVES_R: int = 1
class _Bip38EcUtils:
"""Class container for BIP38 EC utility functions."""
@staticmethod
def OwnerEntropyWithLotSeq(lot_num: int,
sequence_num: int) -> bytes:
"""
Compute the owner entropy as specified in BIP38 (with EC multiplication) with lot and sequence numbers.
Args:
lot_num (int) : Lot number
sequence_num (int): Sequence number
Returns:
bytes: Owner entropy
Raises:
ValueError: If lot or sequence number is not valid
"""
# Check lot and sequence numbers
if lot_num < Bip38EcConst.LOT_NUM_MIN_VAL or lot_num > Bip38EcConst.LOT_NUM_MAX_VAL:
raise ValueError(f"Invalid lot number ({lot_num})")
if sequence_num < Bip38EcConst.SEQ_NUM_MIN_VAL or sequence_num > Bip38EcConst.SEQ_NUM_MAX_VAL:
raise ValueError(f"Invalid sequence number ({sequence_num})")
# Generate random owner salt (4 bytes)
owner_salt = os.urandom(Bip38EcConst.OWNER_SALT_WITH_LOT_SEQ_BYTE_LEN)
# Compute lot sequence
lot_sequence = IntegerUtils.ToBytes((lot_num * (Bip38EcConst.SEQ_NUM_MAX_VAL + 1)) + sequence_num,
bytes_num=4)
# Compute owner entropy
return owner_salt + lot_sequence
@staticmethod
def OwnerEntropyNoLotSeq() -> bytes:
"""
Compute the owner entropy as specified in BIP38 (with EC multiplication) without lot and sequence numbers.
Returns:
bytes: Owner entropy
"""
# Generate random owner salt (8 bytes)
owner_salt = os.urandom(Bip38EcConst.OWNER_SALT_NO_LOT_SEQ_BYTE_LEN)
# Owner entropy is owner salt
return owner_salt
@staticmethod
def OwnerSaltFromEntropy(owner_entropy: bytes,
has_lot_seq: bool) -> bytes:
"""
Get owner salt from owner entropy.
Args:
owner_entropy (bytes): Owner entropy
has_lot_seq (bool) : True if lot and sequence numbers are present, false otherwise
Returns:
bytes: Owner salt
"""
return owner_entropy if not has_lot_seq else owner_entropy[:Bip38EcConst.OWNER_SALT_WITH_LOT_SEQ_BYTE_LEN]
@staticmethod
def PassFactor(passphrase: str,
owner_entropy: bytes,
has_lot_seq: bool) -> bytes:
"""
Compute the passfactor as specified in BIP38 (with EC multiplication).
Args:
passphrase (str) : <PASSWORD>phrase
owner_entropy (bytes): Owner entropy
has_lot_seq (bool) : True if lot and sequence numbers are present, false otherwise
Returns:
bytes: Passfactor
"""
# Compute the prefactor
prefactor = CryptoUtils.Scrypt(StringUtils.NormalizeNfc(passphrase),
_Bip38EcUtils.OwnerSaltFromEntropy(owner_entropy, has_lot_seq),
key_len=Bip38EcConst.SCRYPT_PREFACTOR_KEY_LEN,
n=Bip38EcConst.SCRYPT_PREFACTOR_N,
r=Bip38EcConst.SCRYPT_PREFACTOR_P,
p=Bip38EcConst.SCRYPT_PREFACTOR_R)
# Compute the passfactor
if has_lot_seq:
passfactor = CryptoUtils.DoubleSha256(prefactor + owner_entropy)
else:
passfactor = prefactor
return passfactor
@staticmethod
def PassPoint(passfactor: bytes) -> bytes:
"""
Compute the passpoint as specified in BIP38 (with EC multiplication).
Args:
passfactor (bytes): Passfactor
Returns:
bytes: Passpoint bytes in compressed format
"""
# Compute passpoint
passpoint = Secp256k1PublicKey.FromPoint(Secp256k1.Generator() * BytesUtils.ToInteger(passfactor))
# Return it as a compressed public key
return passpoint.RawCompressed().ToBytes()
@staticmethod
def DeriveKeyHalves(passpoint: bytes,
address_hash: bytes,
owner_entropy: bytes) -> Tuple[bytes, bytes]:
"""
Compute the scrypt as specified in BIP38 (without EC multiplication)and derive the two key halves.
Args:
passpoint (bytes) : Passpoint
address_hash (bytes) : Address hash
owner_entropy (bytes): Owner entropy
Returns:
tuple[bytes, bytes]: Derived key halves
"""
# Derive a key from passpoint, address hash and owner entropy
key = CryptoUtils.Scrypt(passpoint,
address_hash + owner_entropy,
key_len=Bip38EcConst.SCRYPT_HALVES_KEY_LEN,
n=Bip38EcConst.SCRYPT_HALVES_N,
r=Bip38EcConst.SCRYPT_HALVES_R,
p=Bip38EcConst.SCRYPT_HALVES_P)
# Split the resulting 64 bytes in half
derived_half_1 = key[:Bip38EcConst.SCRYPT_HALVES_KEY_LEN // 2]
derived_half_2 = key[Bip38EcConst.SCRYPT_HALVES_KEY_LEN // 2:]
return derived_half_1, derived_half_2
class Bip38EcKeysGenerator:
"""
BIP38 keys generator class.
It generates intermediate codes and private keys using the algorithm specified in BIP38 with EC multiplication.
"""
@staticmethod
def GenerateIntermediatePassphrase(passphrase: str,
lot_num: Optional[int] = None,
sequence_num: Optional[int] = None) -> str:
"""
Generate an intermediate passphrase from the user passphrase as specified in BIP38.
Args:
passphrase (str) : Passphrase
lot_num (int, optional) : Lot number
sequence_num (int, optional): Sequence number
Returns:
str: Intermediate passphrase encoded in base58
"""
# Get if lot and sequence are used
has_lot_seq = lot_num is not None and sequence_num is not None
# Compute owner entropy and salt
# We can ignore the mypy warning because has_lot_seq checks for variables for being not None
owner_entropy = (_Bip38EcUtils.OwnerEntropyWithLotSeq(lot_num, sequence_num) # type: ignore
if has_lot_seq
else _Bip38EcUtils.OwnerEntropyNoLotSeq())
# Compute passpoint
passfactor = _Bip38EcUtils.PassFactor(passphrase, owner_entropy, has_lot_seq)
passpoint = _Bip38EcUtils.PassPoint(passfactor)
# Get magic
magic = Bip38EcConst.INT_PASS_MAGIC_WITH_LOT_SEQ if has_lot_seq else Bip38EcConst.INT_PASS_MAGIC_NO_LOT_SEQ
# Build and encode intermediate passphrase
return Base58Encoder.CheckEncode(magic + owner_entropy + passpoint)
@staticmethod
def GeneratePrivateKey(int_passphrase: str,
pub_key_mode: Bip38PubKeyModes) -> str:
"""
Generate a random encrypted private key from the intermediate passphrase.
Args:
int_passphrase (str) : Intermediate passphrase
pub_key_mode (Bip38PubKeyModes): Public key mode
Returns:
str: Encrypted private key
Raises:
Base58ChecksumError: If base58 checksum is not valid
ValueError: If the intermediate code is not valid
"""
# Decode intermediate passphrase
int_passphrase_bytes = Base58Decoder.CheckDecode(int_passphrase)
# Check length
if len(int_passphrase_bytes) != Bip38EcConst.INT_PASS_ENC_BYTE_LEN:
raise ValueError(f"Invalid intermediate code length ({len(int_passphrase_bytes)})")
# Get all the parts back
magic = int_passphrase_bytes[:8]
owner_entropy = int_passphrase_bytes[8:16]
passpoint = Secp256k1PublicKey.FromBytes(int_passphrase_bytes[16:])
# Check magic
if magic not in (Bip38EcConst.INT_PASS_MAGIC_NO_LOT_SEQ, Bip38EcConst.INT_PASS_MAGIC_WITH_LOT_SEQ):
raise ValueError(f"Invalid magic ({BytesUtils.ToHexString(magic)})")
# Generate seedb
seedb = os.urandom(Bip38EcConst.SEED_B_BYTE_LEN)
# Compute factorb from seedb
factorb = CryptoUtils.DoubleSha256(seedb)
# Compute address hash
address_hash = Bip38Addr.AddressHash(
Secp256k1PublicKey.FromPoint(passpoint.Point() * BytesUtils.ToInteger(factorb)),
pub_key_mode
)
# Derive key halves from the passpoint, address hash and owner entropy
derived_half_1, derived_half_2 = _Bip38EcUtils.DeriveKeyHalves(passpoint.RawCompressed().ToBytes(),
address_hash,
owner_entropy)
# Encrypt seedb in two parts
encrypted_part_1, encrypted_part_2 = Bip38EcKeysGenerator.__EncryptSeedb(seedb,
derived_half_1,
derived_half_2)
# Get flagbyte by setting bits
flagbyte = Bip38EcKeysGenerator.__SetFlagbyteBits(magic, pub_key_mode)
# Concatenate all parts
enc_key_bytes = (Bip38EcConst.ENC_KEY_PREFIX + flagbyte + address_hash
+ owner_entropy + encrypted_part_1[:8] + encrypted_part_2)
# Encode in Base58Check
return Base58Encoder.CheckEncode(enc_key_bytes)
@staticmethod
def __EncryptSeedb(seedb: bytes,
derived_half_1: bytes,
derived_half_2: bytes) -> Tuple[bytes, bytes]:
"""
Encrypt seedb in two parts.
Args:
seedb (bytes) : Seedb
derived_half_1 (bytes): First half of derived key
derived_half_2 (bytes): Second half of derived key
Returns:
tuple[bytes, bytes]: Two encrypted parts
"""
# Use derived_half_2 as AES key
aes_enc = AesEcbEncrypter(derived_half_2)
aes_enc.AutoPad(False)
# Encrypt the first part: seedb[0...15] xor derived_half_1[0...15]
encrypted_part_1 = aes_enc.Encrypt(BytesUtils.Xor(seedb[:16], derived_half_1[:16]))
# Encrypt the second part: (encrypted_part_1[8...15] + seedb[16...23])) xor derivedhalf1[16...31]
encrypted_part_2 = aes_enc.Encrypt(BytesUtils.Xor(encrypted_part_1[8:] + seedb[16:], derived_half_1[16:]))
return encrypted_part_1, encrypted_part_2
@staticmethod
def __SetFlagbyteBits(magic: bytes,
pub_key_mode: Bip38PubKeyModes) -> bytes:
"""
Set flagbyte bits and return it.
Args:
magic (bytes) : Magic
pub_key_mode (Bip38PubKeyModes): Public key mode
Returns:
bytes: Flagbyte
"""
flagbyte_int = 0
if pub_key_mode == Bip38PubKeyModes.COMPRESSED:
flagbyte_int = BitUtils.SetBit(flagbyte_int, Bip38EcConst.FLAG_BIT_COMPRESSED)
if magic == Bip38EcConst.INT_PASS_MAGIC_WITH_LOT_SEQ:
flagbyte_int = BitUtils.SetBit(flagbyte_int, Bip38EcConst.FLAG_BIT_LOT_SEQ)
return IntegerUtils.ToBytes(flagbyte_int)
class Bip38EcDecrypter:
"""
BIP38 decrypter class.
It decrypts a private key using the algorithm specified in BIP38 with EC multiplication.
"""
@staticmethod
def Decrypt(priv_key_enc: str,
passphrase: str) -> Tuple[bytes, Bip38PubKeyModes]:
"""
Decrypt the specified private key.
Args:
priv_key_enc (str): Encrypted private key bytes
passphrase (str) : Passphrase
Returns:
tuple[bytes, Bip38PubKeyModes]: Decrypted private key (index 0), public key mode (index 1)
Raises:
Base58ChecksumError: If base58 checksum is not valid
ValueError: If the encrypted key is not valid
"""
# Decode private key
priv_key_enc_bytes = Base58Decoder.CheckDecode(priv_key_enc)
# Check encrypted length
if len(priv_key_enc_bytes) != Bip38EcConst.ENC_BYTE_LEN:
raise ValueError(f"Invalid encrypted length ({len(priv_key_enc_bytes)})")
# Get all the parts back
prefix = priv_key_enc_bytes[:2]
flagbyte = IntegerUtils.ToBytes(priv_key_enc_bytes[2])
address_hash = priv_key_enc_bytes[3:7]
owner_entropy = priv_key_enc_bytes[7:15]
encrypted_part_1_lower = priv_key_enc_bytes[15:23]
encrypted_part_2 = priv_key_enc_bytes[23:]
# Check prefix
if prefix != Bip38EcConst.ENC_KEY_PREFIX:
raise ValueError(f"Invalid prefix ({BytesUtils.ToHexString(prefix)})")
# Get flagbyte options
pub_key_mode, has_lot_seq = Bip38EcDecrypter.__GetFlagbyteOptions(flagbyte)
# Compute passfactor
passfactor = _Bip38EcUtils.PassFactor(passphrase, owner_entropy, has_lot_seq)
# Derive key halves from the passpoint, address hash and owner entropy
derived_half_1, derived_half_2 = _Bip38EcUtils.DeriveKeyHalves(_Bip38EcUtils.PassPoint(passfactor),
address_hash,
owner_entropy)
# Get factorb back by decrypting
factorb = Bip38EcDecrypter.__DecryptAndGetFactorb(encrypted_part_1_lower,
encrypted_part_2,
derived_half_1,
derived_half_2)
# Compute private key
priv_key_bytes = Bip38EcDecrypter.__ComputePrivateKey(passfactor, factorb)
# Verify the address hash
address_hash_got = Bip38Addr.AddressHash(Secp256k1PrivateKey.FromBytes(priv_key_bytes).PublicKey(),
pub_key_mode)
if address_hash != address_hash_got:
raise ValueError(
f"Invalid address hash (expected: {BytesUtils.ToHexString(address_hash)}, "
f"got: {BytesUtils.ToHexString(address_hash_got)})"
)
return priv_key_bytes, pub_key_mode
@staticmethod
def __DecryptAndGetFactorb(encrypted_part_1_lower: bytes,
encrypted_part_2: bytes,
derived_half_1: bytes,
derived_half_2: bytes) -> bytes:
"""
Decrypt and get back factorb.
Args:
encrypted_part_1_lower (bytes): Lower part of first encrypted part
encrypted_part_2 (bytes) : Second encrypted part
derived_half_1 (bytes) : First half of derived key
derived_half_2 (bytes) : Second half of derived key
Returns:
bytes: Factorb
"""
# Use derived_half_2 as AES key
aes_dec = AesEcbDecrypter(derived_half_2)
aes_dec.AutoUnPad(False)
# Decrypt the second part and get back the higher parts of seedb and encrypted half 1
decrypted_part_2 = BytesUtils.Xor(aes_dec.Decrypt(encrypted_part_2), derived_half_1[16:])
encrypted_part_1_higher = decrypted_part_2[:8]
seedb_part_2 = decrypted_part_2[8:]
# Decrypt the first part to get the lower part of seedb
seedb_part_1 = BytesUtils.Xor(aes_dec.Decrypt(encrypted_part_1_lower + encrypted_part_1_higher),
derived_half_1[:16])
# Rebuild the complete seedb
seedb = seedb_part_1 + seedb_part_2
# Compute factorb from seedb
return CryptoUtils.DoubleSha256(seedb)
@staticmethod
def __ComputePrivateKey(passfactor: bytes,
factorb: bytes) -> bytes:
"""
Compute the private key from passfactor and factorb.
Args:
passfactor (bytes): Passfactor
factorb (bytes) : Factorb
Returns:
bytes: Private key
"""
# Private key: (passfactor * factorb) mod N
priv_key_int = (BytesUtils.ToInteger(passfactor) * BytesUtils.ToInteger(factorb)) % Secp256k1.Order()
return IntegerUtils.ToBytes(priv_key_int, bytes_num=Secp256k1PrivateKey.Length())
@staticmethod
def __GetFlagbyteOptions(flagbyte: bytes) -> Tuple[Bip38PubKeyModes, bool]:
"""
Get the options from the flagbyte.
Args:
flagbyte (bytes): Flagbyte
Returns:
tuple[Bip38PubKeyModes, bool]: Public key mode (index 0), has lot/sequence numbers (index 1)
"""
# Convert flagbyte to integer
flagbyte_int = BytesUtils.ToInteger(flagbyte)
# Get bit set in flagbyte
has_lot_seq = BitUtils.IsBitSet(flagbyte_int, Bip38EcConst.FLAG_BIT_LOT_SEQ)
pub_key_mode = (Bip38PubKeyModes.COMPRESSED
if BitUtils.IsBitSet(flagbyte_int, Bip38EcConst.FLAG_BIT_COMPRESSED)
else Bip38PubKeyModes.UNCOMPRESSED)
# Check flagbyte
flagbyte_int = BitUtils.ResetBit(flagbyte_int, Bip38EcConst.FLAG_BIT_LOT_SEQ)
flagbyte_int = BitUtils.ResetBit(flagbyte_int, Bip38EcConst.FLAG_BIT_COMPRESSED)
if flagbyte_int != 0:
raise ValueError(f"Invalid flagbyte ({BytesUtils.ToHexString(flagbyte)})")
return pub_key_mode, has_lot_seq
```
#### File: bip/bip39/bip39_entropy_generator.py
```python
from enum import IntEnum, unique
from typing import List, Union
from bip_utils.utils.mnemonic import EntropyGenerator
@unique
class Bip39EntropyBitLen(IntEnum):
"""Enumerative for BIP39 entropy bit lengths."""
BIT_LEN_128 = 128
BIT_LEN_160 = 160
BIT_LEN_192 = 192
BIT_LEN_224 = 224
BIT_LEN_256 = 256
class Bip39EntropyGeneratorConst:
"""Class container for BIP39 entropy generator constants."""
# Accepted entropy lengths in bit
ENTROPY_BIT_LEN: List[Bip39EntropyBitLen] = [
Bip39EntropyBitLen.BIT_LEN_128,
Bip39EntropyBitLen.BIT_LEN_160,
Bip39EntropyBitLen.BIT_LEN_192,
Bip39EntropyBitLen.BIT_LEN_224,
Bip39EntropyBitLen.BIT_LEN_256,
]
class Bip39EntropyGenerator(EntropyGenerator):
"""
BIP39 entropy generator class.
It generates random entropy bytes with the specified length.
"""
def __init__(self,
bits_len: Union[int, Bip39EntropyBitLen]) -> None:
"""
Construct class.
Args:
bits_len (int or Bip39EntropyBitLen): Entropy length in bits
Raises:
ValueError: If the bit length is not valid
"""
if not self.IsValidEntropyBitLen(bits_len):
raise ValueError(f"Entropy bit length is not valid ({bits_len})")
super().__init__(bits_len // 8)
@staticmethod
def IsValidEntropyBitLen(bits_len: Union[int, Bip39EntropyBitLen]) -> bool:
"""
Get if the specified entropy bit length is valid.
Args:
bits_len (int or Bip39EntropyBitLen): Entropy length in bits
Returns:
bool: True if valid, false otherwise
"""
return bits_len in Bip39EntropyGeneratorConst.ENTROPY_BIT_LEN
@staticmethod
def IsValidEntropyByteLen(bytes_len: int) -> bool:
"""
Get if the specified entropy byte length is valid.
Args:
bytes_len (int): Entropy length in bytes
Returns:
bool: True if valid, false otherwise
"""
return Bip39EntropyGenerator.IsValidEntropyBitLen(bytes_len * 8)
```
#### File: conf/bip86/bip86_conf_getter.py
```python
from typing import Dict
from bip_utils.bip.conf.common import BipCoinConf
from bip_utils.bip.conf.bip86.bip86_coins import Bip86Coins
from bip_utils.bip.conf.bip86.bip86_conf import Bip86Conf
from bip_utils.bip.conf.common import BipCoins
class Bip86ConfGetterConst:
"""Class container for Bip86 configuration getter constants."""
# Map from Bip86Coins to configuration classes
COIN_TO_CONF: Dict[Bip86Coins, BipCoinConf] = {
Bip86Coins.BITCOIN: Bip86Conf.BitcoinMainNet,
Bip86Coins.BITCOIN_TESTNET: Bip86Conf.BitcoinTestNet,
}
class Bip86ConfGetter:
"""
Bip86 configuration getter class.
It allows to get the Bip86 configuration of a specific coin.
"""
@staticmethod
def GetConfig(coin_type: BipCoins) -> BipCoinConf:
"""
Get coin configuration.
Args:
coin_type (BipCoins): Coin type
Returns:
BipCoinConf: Coin configuration
Raises:
TypeError: If coin type is not of a Bip86Coins enumerative
"""
if not isinstance(coin_type, Bip86Coins):
raise TypeError("Coin type is not an enumerative of Bip86Coins")
return Bip86ConfGetterConst.COIN_TO_CONF[Bip86Coins(coin_type)]
```
#### File: bip_utils/solana/spl_token.py
```python
from typing import List
from bip_utils.addr import SolAddrDecoder
from bip_utils.base58 import Base58Encoder
from bip_utils.ecc import Ed25519PublicKey
from bip_utils.utils.misc import IntegerUtils, Sha256
#
# Classes
#
class SplTokenConst:
"""Class container for SPL token constants."""
# Default program ID
DEF_PROGRAM_ID: str = "ATokenGPvbdGVxr1b2hvZbsiqW5xWH25efTNsLJA8knL"
# Default token program ID
DEF_TOKEN_PROGRAM_ID: str = "<KEY>"
# Program derived address marker
PDA_MARKER: bytes = b"ProgramDerivedAddress"
# Maximum seed bump value
SEED_BUMP_MAX_VAL: int = 2 ** 8 - 1
# Maximum number of seeds
SEEDS_MAX_NUM: int = 16
class SplToken:
"""
SPL token class.
It provides methods for getting the account address associated to a SPL token.
"""
@classmethod
def GetAssociatedTokenAddress(cls,
wallet_addr: str,
token_mint_addr: str) -> str:
"""
Get the account address associated to the specified SPL token.
Args:
wallet_addr (str) : Wallet address
token_mint_addr (str): Token mint address
Returns:
str: Associated account address
Raises:
ValueError: If the account address cannot be found or the specified addresses are not valid
"""
return cls.GetAssociatedTokenAddressWithProgramId(
wallet_addr,
token_mint_addr,
SplTokenConst.DEF_TOKEN_PROGRAM_ID
)
@classmethod
def GetAssociatedTokenAddressWithProgramId(cls,
wallet_addr: str,
token_mint_addr: str,
token_program_id: str) -> str:
"""
Get the account address associated to the specified SPL token and token program ID.
Args:
wallet_addr (str) : Wallet address
token_mint_addr (str) : Token mint address
token_program_id (str): Token program ID
Returns:
str: Associated account address
Raises:
ValueError: If the account address cannot be found or the specified addresses or ID are not valid
"""
seeds = [
SolAddrDecoder.DecodeAddr(wallet_addr),
SolAddrDecoder.DecodeAddr(token_program_id),
SolAddrDecoder.DecodeAddr(token_mint_addr),
]
return cls.FindPda(seeds, SplTokenConst.DEF_PROGRAM_ID)
@classmethod
def FindPda(cls,
seeds: List[bytes],
program_id: str) -> str:
"""
Find a valid PDA (Program Derived Address) and its corresponding bump seed.
Args:
seeds (list[bytes]): List of seeds bytes
program_id (str) : Program ID
Returns:
str: Found PDA
Raises:
ValueError: If the PDA cannot be found or the specified seeds or program ID are not valid
"""
# Check if seeds are valid
if len(seeds) > SplTokenConst.SEEDS_MAX_NUM:
raise ValueError(f"Seeds length is not valid ({len(seeds)})")
for seed in seeds:
if len(seed) > Ed25519PublicKey.CompressedLength() - 1:
raise ValueError(f"Seed length is not valid ({len(seeds)})")
program_id_bytes = SolAddrDecoder.DecodeAddr(program_id)
bump_seed = SplTokenConst.SEED_BUMP_MAX_VAL
for _ in range(SplTokenConst.SEED_BUMP_MAX_VAL):
# Add bump to seeds
seeds_with_bump = list(seeds)
seeds_with_bump.append(IntegerUtils.ToBytes(bump_seed))
# Try to create PDA
try:
return cls.__CreatePda(seeds_with_bump, program_id_bytes)
except ValueError:
# Continue with the next bump seed if PDA is not valid
bump_seed -= 1
# Very unlucky case
raise ValueError("Unable to find a valid PDA")
@staticmethod
def __CreatePda(seeds_with_bump: List[bytes],
program_id_bytes: bytes) -> str:
"""
Create a PDA (Program Derived Address) for the specified seeds and program ID.
Args:
seeds_with_bump (list[bytes]): List of seeds bytes with bump
program_id_bytes (bytes) : Program ID bytes
Returns:
str: Created PDA
Raises:
ValueError: If the created PDA is not valid
"""
sha256 = Sha256()
# Compute SHA256 of seeds with bump
for seed in seeds_with_bump:
sha256.Update(seed)
# Compute SHA256 of program ID and PDA marker
for elem in (program_id_bytes, SplTokenConst.PDA_MARKER):
sha256.Update(elem)
# Get PDA bytes
pda_bytes = sha256.Digest()
# A PDA shall NOT lie on the ed25519 curve, so it shall not be a valid public key
if Ed25519PublicKey.IsValidBytes(pda_bytes):
raise ValueError("Invalid created PDA")
return Base58Encoder.Encode(pda_bytes)
```
#### File: substrate/conf/substrate_coin_conf.py
```python
from __future__ import annotations
from typing import Dict
from bip_utils.coin_conf import CoinConf
from bip_utils.utils.conf import CoinNames as UtilsCoinNames
class SubstrateCoinConf:
"""Substrate coin configuration class."""
m_coin_names: UtilsCoinNames
m_ss58_format: int
m_addr_params: Dict[str, int]
@classmethod
def FromCoinConf(cls,
coin_conf: CoinConf) -> SubstrateCoinConf:
"""
Construct class.
Args:
coin_conf (CoinConf object): Generic coin configuration object
Returns:
SubstrateCoinConf object: SubstrateCoinConf object
"""
return cls(coin_names=coin_conf.CoinNames(),
ss58_format=coin_conf.Params("addr_ss58_format"))
def __init__(self,
coin_names: UtilsCoinNames,
ss58_format: int) -> None:
"""
Construct class.
Args:
coin_names (CoinNames object): Coin names
ss58_format (int) : SS58 format
"""
self.m_coin_names = coin_names
self.m_ss58_format = ss58_format
self.m_addr_params = {"ss58_format": ss58_format}
def CoinNames(self) -> UtilsCoinNames:
"""
Get coin names.
Returns:
CoinNames object: CoinNames object
"""
return self.m_coin_names
def SS58Format(self) -> int:
"""
Get SS58 format.
Returns:
int: SS58 format
"""
return self.m_ss58_format
def AddrParams(self) -> Dict[str, int]:
"""
Get the address parameters.
Returns:
dict: Address parameters
"""
return self.m_addr_params
```
#### File: utils/misc/sha256.py
```python
import hashlib
from typing import Any
#
# Classes
#
class Sha256:
"""
SHA256 class.
It computes SHA256 of the given data.
"""
handle: Any
def __init__(self) -> None:
"""Construct class."""
self.handle = hashlib.sha256()
def Update(self,
data_bytes: bytes) -> None:
"""
Update digest.
Args:
data_bytes (bytes): Data bytes
"""
self.handle.update(data_bytes)
def Digest(self) -> bytes:
"""
Get the computed digest.
Returns:
bytes: Digest bytes
"""
return self.handle.digest()
```
#### File: utils/mnemonic/mnemonic.py
```python
from __future__ import annotations
from enum import Enum
from typing import List
class MnemonicLanguages(Enum):
"""Base enum for mnemonic languages."""
class Mnemonic:
"""
Mnemonic class. It represents a generic mnemonic phrase.
It acts as a simple container with some helper functions, so it doesn't validate the given mnemonic.
"""
m_mnemonic_list: List[str]
@classmethod
def FromString(cls,
mnemonic_str: str) -> Mnemonic:
"""
Create a class from mnemonic string.
Args:
mnemonic_str (str): Mnemonic string
Returns:
Mnemonic: Mnemonic object
"""
return cls.FromList(mnemonic_str.lower().split(" "))
@classmethod
def FromList(cls,
mnemonic_list: List[str]) -> Mnemonic:
"""
Create a class from mnemonic list.
Args:
mnemonic_list (list[str]): Mnemonic list
Returns:
Mnemonic: Mnemonic object
"""
return cls(mnemonic_list)
def __init__(self,
mnemonic_list: List[str]) -> None:
"""
Construct class.
Args:
mnemonic_list (list[str]): Mnemonic list
"""
self.m_mnemonic_list = mnemonic_list
def WordsCount(self) -> int:
"""
Get the words count.
Returns:
int: Words count
"""
return len(self.m_mnemonic_list)
def ToList(self) -> List[str]:
"""
Get the mnemonic as a list.
Returns:
list[str]: Mnemonic as a list
"""
return self.m_mnemonic_list
def ToStr(self) -> str:
"""
Get the mnemonic as a string.
Returns:
str: Mnemonic as a string
"""
return " ".join(self.m_mnemonic_list)
def __str__(self) -> str:
"""
Get the mnemonic as a string.
Returns:
str: Mnemonic as a string
"""
return self.ToStr()
```
#### File: tests/addr/test_addr_base.py
```python
import binascii
#
# Helper class for IAddrEncoder child classes, which share the same tests
#
class AddrBaseTestHelper:
# Test encode key
@staticmethod
def test_encode_key(ut_class, addr_enc_class, pub_key_class, test_vector):
for test in test_vector:
key_bytes = binascii.unhexlify(test["pub_key"])
# Test with bytes and public key object
ut_class.assertEqual(test["address"], addr_enc_class.EncodeKey(key_bytes,
**test["address_params"]))
ut_class.assertEqual(test["address"], addr_enc_class.EncodeKey(pub_key_class.FromBytes(key_bytes),
**test["address_params"]))
# Test decode address
@staticmethod
def test_decode_addr(ut_class, addr_dec_class, test_vector):
for test in test_vector:
dec_bytes = binascii.unhexlify(test["address_dec"])
ut_class.assertEqual(dec_bytes, addr_dec_class.DecodeAddr(test["address"],
**test["address_params"]))
# Test invalid decoding
@staticmethod
def test_invalid_dec(ut_class, addr_dec_class, addr_params, test_vector):
for addr in test_vector:
ut_class.assertRaises(ValueError, addr_dec_class.DecodeAddr, addr, **addr_params)
# Test invalid keys
@staticmethod
def test_invalid_keys(ut_class, addr_enc_class, addr_params, test_vector_inv_types, test_vector_inv_keys):
# Invalid key types
for key in test_vector_inv_types:
ut_class.assertRaises(TypeError, addr_enc_class.EncodeKey, key, **addr_params)
# Invalid public keys
for key in test_vector_inv_keys:
ut_class.assertRaises(ValueError, addr_enc_class.EncodeKey, key, **addr_params)
# Test invalid parameters (decoding)
@staticmethod
def test_invalid_params_dec(ut_class, addr_dec_class, addr, err_params, ex_type):
ut_class.assertRaises(ex_type, addr_dec_class.DecodeAddr, addr, **err_params)
# Test invalid parameters (encoding)
@staticmethod
def test_invalid_params_enc(ut_class, addr_enc_class, pub_key, err_params, ex_type):
ut_class.assertRaises(ex_type, addr_enc_class.EncodeKey, pub_key, **err_params)
```
#### File: tests/addr/test_P2TR.py
```python
import unittest
from bip_utils import CoinsConf, P2TRAddrDecoder, P2TRAddrEncoder
from tests.addr.test_addr_base import AddrBaseTestHelper
from tests.addr.test_addr_const import TEST_SECP256K1_ADDR_INVALID_KEY_TYPES
from tests.ecc.test_ecc import TEST_VECT_SECP256K1_PUB_KEY_INVALID, Secp256k1PublicKey
# Some random public keys
TEST_VECT = [
#
# Main nets
#
{
"pub_key": b"<KEY>",
"address_dec": b"2771c09790b183d19c4a848282a37cb18b6aaf7edd863a689713bc7254ece2b7",
"address_params": {"hrp": CoinsConf.BitcoinMainNet.Params("p2tr_hrp")},
"address": "bc1pyacup9uskxpar8z2sjpg9gmukx9k4tm7mkrr56yhzw78y48vu2msq4xugp",
},
{
"pub_key": b"<KEY>",
"address_dec": b"<KEY>",
"address_params": {"hrp": CoinsConf.BitcoinMainNet.Params("p2tr_hrp")},
"address": "bc1psrgptjwnz4z8gqmt0fd4gkfc04cvhdyngdy4ya436v3zxhrp9dmsd05jqz",
},
#
# Test nets
#
{
"pub_key": b"02339193c34cd8ecb21ebd48af64ead71d78213470d61d7274f932489d6ba21bd3",
"address_dec": b"0449445395669a6af387056764a5a5c41d68c5fe9cdaca6d11fe85352f331014",
"address_params": {"hrp": CoinsConf.BitcoinTestNet.Params("p2tr_hrp")},
"address": "tb1pq3y5g5u4v6dx4uu8q4nkffd9cswk3307nndv5mg3l6zn2tenzq2qufyzlx",
},
]
# Tests for decoding with invalid strings
TEST_VECT_DEC_INVALID = [
# Invalid HRP
"<KEY>",
# Invalid witness version
"bc1zyacup9uskxpar8z2sjpg9gmukx9k4tm7mkrr56yhzw78y48vu2msgglnx2",
# No separator
"bcpyacup9uskxpar8z2sjpg9gmukx9k4tm7mkrr56yhzw78y48vu2msq4xugp",
# Invalid checksum
"bc1pyacup9uskxpar8z2sjpg9gmukx9k4tm7mkrr56yhzw78y48vu2msjsmz2a",
# Invalid encoding
"<KEY>",
# Invalid lengths
"bc1pw8qf0y93s0gecj5ys2p2xl933d427lkascax39cnh3e9fm8zkude9hqn",
"<KEY>",
]
#
# Tests
#
class P2TRTests(unittest.TestCase):
# Test encode key
def test_encode_key(self):
AddrBaseTestHelper.test_encode_key(self, P2TRAddrEncoder, Secp256k1PublicKey, TEST_VECT)
# Test decode address
def test_decode_addr(self):
AddrBaseTestHelper.test_decode_addr(self, P2TRAddrDecoder, TEST_VECT)
# Test invalid decoding
def test_invalid_dec(self):
AddrBaseTestHelper.test_invalid_dec(self,
P2TRAddrDecoder,
{"hrp": CoinsConf.BitcoinMainNet.Params("p2tr_hrp")},
TEST_VECT_DEC_INVALID)
# Test invalid keys
def test_invalid_keys(self):
AddrBaseTestHelper.test_invalid_keys(self,
P2TRAddrEncoder,
{"hrp": ""},
TEST_SECP256K1_ADDR_INVALID_KEY_TYPES,
TEST_VECT_SECP256K1_PUB_KEY_INVALID)
```
#### File: tests/bech32/test_segwit_bech32.py
```python
import binascii
import unittest
from bip_utils import Bech32ChecksumError, SegwitBech32Decoder, SegwitBech32Encoder
# Some random public keys
TEST_VECT = [
{
"raw": b"<KEY>",
"encode": "<KEY>",
},
{
"raw": b"30ea99599334801bf09d753af38ba546800bea8b",
"encode": "<KEY>",
},
{
"raw": b"<KEY>",
"encode": "bc1qrz46a4gt0sghvvyt4gy5kp2rswmhtufv6sdq9v",
},
{
"raw": b"5788df3047dd2c2545eee12784e6212745916bb7",
"encode": "bc1q27yd7vz8m5kz230wuyncfe3pyazez6ah58yzy0",
},
{
"raw": b"3a3eff6f41ce759a8dd95fc1a2d762077f4f3b64",
"encode": "bc1q8gl07m6pee6e4rwetlq694mzqal57wmyadd9sn",
},
{
"raw": b"<KEY>",
"encode": "bc1qxa2jqcampw4y9wgswyklq6upfwfg4z8s5m4v3v",
},
{
"raw": b"f9ce94eab4ed454dd0077e3dc24bdfb8d5df4008",
"encode": "bc1ql88ff645a4z5m5q80c7uyj7lhr2a7sqgtss7ek",
},
{
"raw": b"29595a3c78760fe90fe883b922f353b67441d28d",
"encode": "tb1q99v450rcwc87jrlgswuj9u6nke6yr55drpxuj0",
},
{
"raw": b"b819a85f25b116c2f7e64416a55b8d49b744d209",
"encode": "tb1qhqv6she9kytv9alxgst22kudfxm5f5sf2lgpc6",
},
{
"raw": b"904c82e2c1a8508ba784e4e53e195b5047682e87",
"encode": "tb1qjpxg9ckp4pgghfuyunjnux2m2prkst580chf9n",
},
]
# Tests for Segwit encoded addresses that are not valid from BIP-0173 page, plus a couple for better code coverage
TEST_VECT_ADDR_INVALID = [
#
# From BIP-0173 page (removed the ones that collide with BIP-0350)
#
# Invalid human-readable part
{
"addr": "tc1qw508d6qejxtdg4y5r3zarvary0c5xw7kg3g4ty",
"hrp": "tb",
"ex": ValueError,
},
# Invalid checksum
{
"addr": "bc1qw508d6qejxtdg4y5r3zarvary0c5xw7kv8f3t5",
"hrp": "bc",
"ex": Bech32ChecksumError,
},
# Non-zero padding in 8-to-5 conversion
{
"addr": "tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3pjxtptv",
"hrp": "tb",
"ex": ValueError,
},
#
# From BIP-0350 page
#
# Invalid human-readable part
{
"addr": "tc1p0xlxvlhemja6c4dqv22uapctqupfhlxm9h8z3k2e72q4k9hcz7vq5zuyut",
"hrp": "tb",
"ex": ValueError,
},
# Invalid checksum (Bech32 instead of Bech32m)
{
"addr": "bc1p0xlxvlhemja6c4dqv22uapctqupfhlxm9h8z3k2e72q4k9hcz7vqh2y7hd",
"hrp": "bc",
"ex": Bech32ChecksumError,
},
# Invalid checksum (Bech32 instead of Bech32m)
{
"addr": "<KEY>",
"hrp": "tb",
"ex": Bech32ChecksumError,
},
# Invalid checksum (Bech32 instead of Bech32m)
{
"addr": "<KEY>",
"hrp": "bc",
"ex": Bech32ChecksumError,
},
# Invalid checksum (Bech32m instead of Bech32)
{
"addr": "<KEY>",
"hrp": "bc",
"ex": Bech32ChecksumError,
},
# Invalid checksum (Bech32m instead of Bech32)
{
"addr": "<KEY>",
"hrp": "tb",
"ex": Bech32ChecksumError,
},
# Invalid character in checksum
{
"addr": "<KEY>",
"hrp": "bc",
"ex": ValueError,
},
# Invalid witness version
{
"addr": "BC<KEY>",
"hrp": "bc",
"ex": ValueError,
},
# Invalid program length (1 byte)
{
"addr": "bc1pw5dgrnzv",
"hrp": "bc",
"ex": ValueError,
},
# Invalid program length (41 bytes)
{
"addr": "<KEY>",
"hrp": "bc",
"ex": ValueError,
},
# Invalid program length for witness version 0
{
"addr": "<KEY>",
"hrp": "bc",
"ex": ValueError,
},
# Mixed case
{
"addr": "<KEY>",
"hrp": "tb",
"ex": ValueError,
},
# Zero padding of more than 4 bits
{
"addr": "<KEY>",
"hrp": "bc",
"ex": ValueError,
},
# Non-zero padding in 8-to-5 conversion
{
"addr": "<KEY>",
"hrp": "tb",
"ex": ValueError,
},
# Empty data section
{
"addr": "bc1gmk9yu",
"hrp": "bc",
"ex": ValueError,
},
#
# Added for improving code coverage
#
# Invalid HRP characters
{
"addr": "t 1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sl5k7",
"hrp": "tb",
"ex": ValueError,
},
# No separator
{
"addr": "tbqrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sl5k7",
"hrp": "tb",
"ex": ValueError,
},
# Empty HRP
{
"addr": "qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sl5k7",
"hrp": "tb",
"ex": ValueError,
},
]
#
# Tests
#
class SegwitBech32Tests(unittest.TestCase):
# Test decoder
def test_decoder(self):
for test in TEST_VECT:
# Test decoder
hrp = test["encode"][:test["encode"].find("1")]
wit_ver, wit_prog = SegwitBech32Decoder.Decode(hrp, test["encode"])
self.assertEqual(wit_ver, 0)
self.assertEqual(binascii.hexlify(wit_prog), test["raw"])
# Test encoder
def test_encoder(self):
for test in TEST_VECT:
# Test encoder
hrp = test["encode"][:test["encode"].find("1")]
enc = SegwitBech32Encoder.Encode(hrp, 0, binascii.unhexlify(test["raw"]))
self.assertEqual(test["encode"], enc)
# Test invalid address
def test_invalid_addr(self):
for test in TEST_VECT_ADDR_INVALID:
self.assertRaises(test["ex"], SegwitBech32Decoder.Decode, test["hrp"], test["addr"])
``` |
{
"source": "3rdIteration/emip3-python",
"score": 3
} |
#### File: 3rdIteration/emip3-python/emip3.py
```python
import hashlib
from Crypto.Cipher import ChaCha20_Poly1305
import binascii
def encryptWithPassword (password, saltHex, nonceHex, data):
salt = binascii.unhexlify(saltHex)
if len(salt) != 32:
raise ValueError("Salt length must be 32 bytes")
nonce = binascii.unhexlify(nonceHex)
if len(nonce) != 12:
raise ValueError("Salt length must be 12 bytes")
key = <KEY>('sha512', password, salt, 19162, 32)
cipher = ChaCha20_Poly1305.new(key=key, nonce=nonce)
ciphertext, tag = cipher.encrypt_and_digest(data)
return saltHex + nonceHex + tag.hex().encode() + ciphertext.hex().encode()
def decryptWithPassword(password, ciphertextHex):
saltHex = ciphertextHex[:64]
nonceHex = ciphertextHex[64:88]
tagHex = ciphertextHex[88:120]
ciphertextHex = ciphertextHex[120:]
salt = binascii.unhexlify(saltHex)
nonce = binascii.unhexlify(nonceHex)
tag = binascii.unhexlify(tagHex)
ciphertext = binascii.unhexlify(ciphertextHex)
key = <KEY>('sha512', password, salt, 19162, 32)
cipher = ChaCha20_Poly1305.new(key=key, nonce=nonce)
plaintext = cipher.decrypt_and_verify(ciphertext, tag)
return plaintext
``` |
{
"source": "3rdIteration/py_crypto_hd_wallet",
"score": 3
} |
#### File: py_crypto_hd_wallet/docs/create_doc.py
```python
import os
import shutil
from typing import List
#
# Constants
#
PROJECT: str = "py_crypto_hd_wallet"
DOC_FOLDER: str = os.path.join(".", PROJECT)
SRC_FOLDER: str = os.path.join("..", PROJECT)
DOC_EXT: str = ".rst"
SRC_EXT: str = ".py"
DOC_INDEX_FILE: str = "index" + DOC_EXT
UNDERLINE_CHAR: str = "="
TOCTREE_MAX_DEPTH: int = 10
DOC_FILE_TEMPLATE: str = """{module_name}
{title_underline}
.. automodule:: {module_path}
:members:
:undoc-members:
:show-inheritance:
"""
DOC_INDEX_TEMPLATE: str = """{index_name}
{title_underline}
.. toctree::
:maxdepth: {toctree_max_depth}
{modules_list}
"""
#
# Functions
#
def create_doc_main_dir() -> None:
shutil.rmtree(DOC_FOLDER, ignore_errors=True)
os.mkdir(DOC_FOLDER)
def is_dir_empty(d: str) -> bool:
return listdir_dirs(d) == [] and listdir_files(d) == []
def is_dir_valid(d: str) -> bool:
return not os.path.basename(d).startswith(("__", "."))
def is_file_valid(f: str) -> bool:
return not os.path.basename(f).startswith(("_", ".")) and f.find(SRC_EXT) != -1
def listdir_files(d: str) -> List[str]:
elems = [os.path.join(d, e) for e in os.listdir(d)]
return [e for e in elems
if os.path.isfile(e) and is_file_valid(e)]
def listdir_dirs(d: str) -> List[str]:
elems = [os.path.join(d, e) for e in os.listdir(d)]
return [e for e in elems
if os.path.isdir(e) and is_dir_valid(e) and not is_dir_empty(e)]
def src_to_doc_path(p: str) -> str:
return p.replace(SRC_FOLDER, DOC_FOLDER)
def src_to_doc_file(f: str) -> str:
return src_to_doc_path(f).replace(SRC_EXT, DOC_EXT)
def create_doc_dir(d: str) -> None:
doc_dir = src_to_doc_path(d)
os.mkdir(doc_dir)
print(f"Create doc directory: {doc_dir}")
def get_index_name(f: str) -> str:
return os.path.basename(f)
def get_index_modules_list(dirs: List[str], files: List[str]) -> str:
elems = list(map(lambda f: " " + get_module_name(f) + "/" + DOC_INDEX_FILE, dirs)) + \
list(map(lambda f: " " + get_module_name(f), files))
elems.sort()
return "\n".join(elems)
def get_module_name(f: str) -> str:
return os.path.basename(f).replace(DOC_EXT, "").replace(SRC_EXT, "")
def get_module_path(f: str) -> str:
return PROJECT + "." + f.replace(DOC_EXT, "").replace(DOC_FOLDER, "").replace("/", ".").replace("\\", ".")[1:]
def get_title_underline(m: str) -> str:
return UNDERLINE_CHAR * len(m)
def create_doc_file(f: str) -> None:
doc_file = src_to_doc_file(f)
with open(doc_file, "w") as fout:
module_name = get_module_name(doc_file)
fout.write(DOC_FILE_TEMPLATE.format(module_name=module_name,
title_underline=get_title_underline(module_name),
module_path=get_module_path(doc_file)))
print(f"Create doc file: {doc_file}")
def create_doc_index(d: str, dirs: List[str], files: List[str]) -> None:
if len(dirs) == 0 and len(files) == 0:
return
index_file = os.path.join(src_to_doc_path(d), DOC_INDEX_FILE)
with open(index_file, "w") as fout:
index_name = get_index_name(d)
fout.write(DOC_INDEX_TEMPLATE.format(index_name=index_name,
title_underline=get_title_underline(index_name),
toctree_max_depth=TOCTREE_MAX_DEPTH,
modules_list=get_index_modules_list(dirs, files)))
print(f"Create index file: {index_file}")
def create_doc(d: str) -> None:
files = listdir_files(d)
dirs = listdir_dirs(d)
for f in files:
create_doc_file(f)
create_doc_index(d, dirs, files)
for d in dirs:
create_doc_dir(d)
create_doc(d)
#
# Script
#
create_doc_main_dir()
create_doc(SRC_FOLDER)
```
#### File: py_crypto_hd_wallet/bip/hd_wallet_bip_factory.py
```python
from typing import Type, Union
from bip_utils import (
MnemonicChecksumError, Bip39MnemonicGenerator, Bip39SeedGenerator,
Bip32KeyError, Bip44, Bip49, Bip84, Bip86
)
from bip_utils.bip.bip44_base import Bip44Base
from py_crypto_hd_wallet.bip.hd_wallet_bip_enum import (
HdWalletBipWordsNum, HdWalletBipLanguages,
HdWalletBip44Coins, HdWalletBip49Coins, HdWalletBip84Coins, HdWalletBip86Coins
)
from py_crypto_hd_wallet.bip.hd_wallet_bip import HdWalletBip
from py_crypto_hd_wallet.common import HdWalletBase
from py_crypto_hd_wallet.utils import Utils
class HdWalletBipFactory:
"""
HD wallet BIP factory class.
It allows a HdWalletBip to be created in different way.
"""
m_bip_coin: Union[HdWalletBip44Coins, HdWalletBip49Coins, HdWalletBip84Coins]
m_bip_cls: Type[Bip44Base]
def __init__(self,
coin_type: Union[HdWalletBip44Coins,
HdWalletBip49Coins,
HdWalletBip84Coins]) -> None:
"""
Construct class.
Args:
coin_type (HdWalletBip44Coins, HdWalletBip49Coins, HdWalletBip84Coins): Coin type
Raised:
TypeError: If coin_type is not one of the accepted enum
"""
self.m_bip_coin = coin_type
self.m_bip_cls = self.__BipClassFromCoinType(coin_type)
def CreateRandom(self,
wallet_name: str,
words_num: HdWalletBipWordsNum = HdWalletBipWordsNum.WORDS_NUM_24,
lang: HdWalletBipLanguages = HdWalletBipLanguages.ENGLISH) -> HdWalletBase:
"""
Create wallet randomly.
Args:
wallet_name (str) : Wallet name
words_num (HdWalletBipWordsNum, optional): Words number (default: 24)
lang (HdWalletBipLanguages, optional) : Language (default: English)
Returns:
HdWalletBase object: HdWalletBase object
Raises:
TypeError: If words number is not a HdWalletBipWordsNum enum or language is not a HdWalletBipLanguages enum
"""
if not isinstance(words_num, HdWalletBipWordsNum):
raise TypeError("Words number is not an enumerative of HdWalletBipWordsNum")
if not isinstance(lang, HdWalletBipLanguages):
raise TypeError("Language is not an enumerative of HdWalletBipLanguages")
mnemonic = Bip39MnemonicGenerator(lang).FromWordsNumber(words_num)
return self.CreateFromMnemonic(wallet_name, mnemonic.ToStr())
def CreateFromMnemonic(self,
wallet_name: str,
mnemonic: str,
passphrase: str = "") -> HdWalletBase:
"""
Create wallet from mnemonic.
Args:
wallet_name (str) : Wallet name
mnemonic (str) : Mnemonic
passphrase (str, optional): Passphrase for protecting mnemonic, empty if not specified
Returns:
HdWalletBase object: HdWalletBase object
Raises:
ValueError: If the mnemonic is not valid
"""
try:
seed_bytes = Bip39SeedGenerator(mnemonic).Generate(passphrase)
except (ValueError, MnemonicChecksumError) as ex:
raise ValueError(f"Invalid mnemonic: {mnemonic}") from ex
bip_obj = self.m_bip_cls.FromSeed(seed_bytes, self.m_bip_coin)
return HdWalletBip(wallet_name=wallet_name,
bip_obj=bip_obj,
mnemonic=mnemonic,
passphrase=passphrase,
seed_bytes=seed_bytes)
def CreateFromSeed(self,
wallet_name: str,
seed_bytes: bytes) -> HdWalletBase:
"""
Create wallet from seed.
Args:
wallet_name (str) : Wallet name
seed_bytes (bytes): Seed bytes
Returns:
HdWalletBase object: HdWalletBase object
Raises:
ValueError: If the seed is not valid
"""
bip_obj = self.m_bip_cls.FromSeed(seed_bytes, self.m_bip_coin)
return HdWalletBip(wallet_name=wallet_name,
bip_obj=bip_obj,
seed_bytes=seed_bytes)
def CreateFromExtendedKey(self,
wallet_name: str,
ex_key_str: str) -> HdWalletBase:
"""
Create wallet from extended key.
Args:
wallet_name (str): Wallet name
ex_key_str (str) : Extended key string
Returns:
HdWalletBase object: HdWalletBase object
Raises:
ValueError: If the extended key is not valid
"""
try:
bip_obj = self.m_bip_cls.FromExtendedKey(ex_key_str, self.m_bip_coin)
except Bip32KeyError as ex:
raise ValueError(f"Invalid extended key: {ex_key_str}") from ex
return HdWalletBip(wallet_name=wallet_name,
bip_obj=bip_obj)
def CreateFromPrivateKey(self,
wallet_name: str,
priv_key: bytes) -> HdWalletBase:
"""
Create wallet from private key.
Args:
wallet_name (str): Wallet name
priv_key (bytes) : Private key bytes
Returns:
HdWalletBase object: HdWalletBase object
Raises:
ValueError: If the private key is not valid
"""
try:
bip_obj = self.m_bip_cls.FromPrivateKey(priv_key, self.m_bip_coin)
except Bip32KeyError as ex:
raise ValueError(f"Invalid private key: {Utils.BytesToHexString(priv_key)}") from ex
return HdWalletBip(wallet_name=wallet_name,
bip_obj=bip_obj)
@staticmethod
def __BipClassFromCoinType(coin_type: Union[HdWalletBip44Coins,
HdWalletBip49Coins,
HdWalletBip84Coins,
HdWalletBip86Coins]) -> Type[Bip44Base]:
"""
Get BIP class from coin type.
Args:
coin_type (HdWalletBip44Coins, HdWalletBip49Coins, HdWalletBip84Coins, HdWalletBip86Coins): Coin type
Returns:
Bip44Base class: Bip44Base class
"""
if isinstance(coin_type, HdWalletBip44Coins):
return Bip44
if isinstance(coin_type, HdWalletBip49Coins):
return Bip49
if isinstance(coin_type, HdWalletBip84Coins):
return Bip84
if isinstance(coin_type, HdWalletBip86Coins):
return Bip86
raise TypeError("Coin type is not an accepted enumerative")
```
#### File: py_crypto_hd_wallet/monero/hd_wallet_monero.py
```python
from typing import Any, Dict, Optional, Union
from bip_utils import Monero
from bip_utils.monero.monero_subaddr import MoneroSubaddressConst
from py_crypto_hd_wallet.common import HdWalletBase
from py_crypto_hd_wallet.monero.hd_wallet_monero_enum import HdWalletMoneroDataTypes, HdWalletDataTypes
from py_crypto_hd_wallet.monero.hd_wallet_monero_keys import HdWalletMoneroKeys
from py_crypto_hd_wallet.monero.hd_wallet_monero_subaddr import HdWalletMoneroSubaddresses
from py_crypto_hd_wallet.utils import Utils
class HdWalletMoneroConst:
"""Class container for HD wallet Monero constants."""
# Map data types to dictionary key
DATA_TYPE_TO_DICT_KEY: Dict[HdWalletMoneroDataTypes, str] = {
HdWalletMoneroDataTypes.WALLET_NAME: "wallet_name",
HdWalletMoneroDataTypes.COIN_NAME: "coin_name",
HdWalletMoneroDataTypes.MNEMONIC: "mnemonic",
HdWalletMoneroDataTypes.SEED_BYTES: "seed_bytes",
HdWalletMoneroDataTypes.KEY: "key",
HdWalletMoneroDataTypes.ACCOUNT_IDX: "account_idx",
HdWalletMoneroDataTypes.SUBADDRESS_OFF: "subaddress_off",
HdWalletMoneroDataTypes.SUBADDRESS: "subaddress",
}
class HdWalletMonero(HdWalletBase):
"""
HD wallet Monero class.
It basically wraps the bip_utils, allowing to generate a complete Monero wallet.
"""
m_monero_obj: Monero
m_wallet_data: Dict[str, Any]
#
# Public methods
#
def __init__(self,
wallet_name: str,
monero_obj: Monero,
mnemonic: str = "",
seed_bytes: bytes = b"") -> None:
"""
Construct class.
Args:
wallet_name (str) : Wallet name
monero_obj (Monero object) : Monero object
mnemonic (str, optional) : Mnemonic, empty if not specified
seed_bytes (bytes, optional): Seed_bytes, empty if not specified
"""
# Initialize members
self.m_monero_obj = monero_obj
self.m_wallet_data = {}
# Initialize data
self.__InitData(wallet_name, mnemonic, seed_bytes)
def Generate(self,
**kwargs: Any) -> None:
"""
Generate wallet keys and addresses.
Other Parameters:
acc_idx (int, optional): Account index (default: 0)
subaddr_num (int, optional): Subaddress number (default: 0)
subaddr_off (int, optional): Starting subaddress index (default: 0)
"""
acc_idx = kwargs.get("acc_idx", 0)
subaddr_num = kwargs.get("subaddr_num", 0)
subaddr_off = kwargs.get("subaddr_off", 0)
# Check parameters
if acc_idx < 0 or acc_idx > MoneroSubaddressConst.SUBADDR_MAX_IDX:
raise ValueError("Account index shall be greater or equal to zero and less than 2^32")
if subaddr_num < 0 or subaddr_num > MoneroSubaddressConst.SUBADDR_MAX_IDX:
raise ValueError("Subaddress number shall be greater or equal to zero and less than 2^32")
if subaddr_off < 0 or ((subaddr_off + subaddr_num) > MoneroSubaddressConst.SUBADDR_MAX_IDX):
raise ValueError("Subaddress offset shall be greater or equal to zero and less than 2^32")
# Set keys
self.__SetKeys(HdWalletMoneroDataTypes.KEY, self.m_monero_obj)
if subaddr_num > 0:
# Set subaddresses data
self.__SetData(HdWalletMoneroDataTypes.ACCOUNT_IDX, acc_idx)
self.__SetData(HdWalletMoneroDataTypes.SUBADDRESS_OFF, subaddr_off)
# Set subaddresses
self.__SetData(HdWalletMoneroDataTypes.SUBADDRESS,
HdWalletMoneroSubaddresses(self.m_monero_obj,
acc_idx,
subaddr_num,
subaddr_off))
def IsWatchOnly(self) -> bool:
"""
Get if the wallet is watch-only.
Returns :
bool: True if watch-only, false otherwise
"""
return self.m_monero_obj.IsWatchOnly()
def ToDict(self) -> Dict[str, Any]:
"""
Get wallet data as a dictionary.
Returns:
dict: Wallet data as a dictionary
"""
wallet_dict = {}
# Builddictionary
for key, value in self.m_wallet_data.items():
if isinstance(value, (HdWalletMoneroKeys, HdWalletMoneroSubaddresses)):
wallet_dict[key] = value.ToDict()
else:
wallet_dict[key] = value
return wallet_dict
def HasData(self,
data_type: HdWalletDataTypes) -> bool:
"""
Get if the wallet data of the specified type is present.
Args:
data_type (HdWalletDataTypes): Data type
Returns:
bool: True if present, false otherwise
Raises:
TypeError: If data type is not of the correct enumerative type
"""
if not isinstance(data_type, HdWalletMoneroDataTypes):
raise TypeError("Data type is not an enumerative of HdWalletMoneroDataTypes")
dict_key = HdWalletMoneroConst.DATA_TYPE_TO_DICT_KEY[HdWalletMoneroDataTypes(data_type)]
return dict_key in self.m_wallet_data
def GetData(self,
data_type: HdWalletDataTypes) -> Optional[Any]:
"""
Get wallet data of the specified type.
Args:
data_type (HdWalletDataTypes): Data type
Returns:
Any: Wallet data (it depends on the specific data)
None: If not found
Raises:
TypeError: If data type is not of the correct enumerative type
"""
if self.HasData(data_type):
return self.m_wallet_data[
HdWalletMoneroConst.DATA_TYPE_TO_DICT_KEY[HdWalletMoneroDataTypes(data_type)]
]
return None
#
# Private methods
#
def __InitData(self,
wallet_name: str,
mnemonic: str,
seed_bytes: bytes) -> None:
"""
Initialize data.
Args:
wallet_name (str): Wallet name
mnemonic (str) : Mnemonic
seed_bytes (bytes) : Seed_bytes
"""
# Set wallet name
self.__SetData(HdWalletMoneroDataTypes.WALLET_NAME, wallet_name)
# Set coin name
coin_names = self.m_monero_obj.CoinConf().CoinNames()
self.__SetData(HdWalletMoneroDataTypes.COIN_NAME, f"{coin_names.Name()} ({coin_names.Abbreviation()})")
# Set optional data if specified
if mnemonic != "":
self.__SetData(HdWalletMoneroDataTypes.MNEMONIC, mnemonic)
if seed_bytes != b"":
self.__SetData(HdWalletMoneroDataTypes.SEED_BYTES, Utils.BytesToHexString(seed_bytes))
def __SetData(self,
data_type: HdWalletMoneroDataTypes,
data_value: Union[int, str, HdWalletMoneroKeys, HdWalletMoneroSubaddresses]) -> None:
"""
Set wallet data.
Args:
data_type (HdWalletMoneroDataTypes) : Data type
data_value (int or str or HdWalletMoneroKeys or HdWalletMoneroSubaddresses): Data value
"""
dict_key = HdWalletMoneroConst.DATA_TYPE_TO_DICT_KEY[data_type]
self.m_wallet_data[dict_key] = data_value
def __SetKeys(self,
data_type: HdWalletMoneroDataTypes,
monero_obj: Monero) -> None:
"""
Add keys to wallet data.
Args:
data_type (HdWalletMoneroDataTypes): Data type
monero_obj (Monero object) : Monero object
"""
self.__SetData(data_type, HdWalletMoneroKeys(monero_obj))
``` |
{
"source": "3rduncle/knowledgeflow",
"score": 3
} |
#### File: knowledgeflow/example/text_reader_test.py
```python
import sys
import tensorflow as tf
def read_single_line_example(filename):
filename_queue = tf.train.string_input_producer([filename], num_epochs=1)
reader = tf.TextLineReader()
line, value = reader.read(filename_queue)
return line, value
line_t, value_t = read_single_line_example(sys.argv[0])
# 将reader返回的tensor打包成batched tensor,
# 注意:tf.train.batch本身如果遇到输入的tensor的
# 最后几个样本不够组成一个batch。会从输入tensor的
# 头部重新获取。但是,如果输入的是一个reader tensor,
# 它在end of tensor的时候会抛出一个OutOfRangeError的异常,
# 这将导致最后几个样本失效。
batch_line_t, batch_value_t = tf.train.batch(
[line_t, value_t],
batch_size = 4,
num_threads = 2,
capacity = 2000,
)
sess = tf.Session()
init = tf.initialize_all_variables()
sess.run(init)
tf.train.start_queue_runners(sess=sess)
while 1:
try:
line, value = sess.run([batch_line_t, batch_value_t])
print line, value
except tf.python.framework.errors.OutOfRangeError:
print 'Done'
break
```
#### File: knowledgeflow/question_answer/sentences_similarity.py
```python
import sys
import random
import logging
import numpy as np
import ConfigParser
from collections import OrderedDict
import tensorflow as tf
random.seed(2)
np.random.seed(9527)
from knowledgeflow.utility import qa_utils
from knowledgeflow.utility.qa_utils import QaPairsTrain, QaPairsTest
from knowledgeflow.utility.utility import build_vocab, embedding_layer_weights, load_word2vec
#from lcd import LCDBase, Conv1DLCD, Conv1DConcatLCD, margin_hinge, letor_binary_crossentropy
from apn import APNBase
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s:%(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
stream=sys.stderr)
class SentenceSimilarityTask(object):
def __init__(self, path):
self.loadConfig(path)
self.wdim = self.conf.getint('task', 'wdim')
train = self.conf.get('task', 'train')
print 'train path', train
try:
dev = self.conf.get('task', 'dev')
except:
dev = None
test = self.conf.get('task', 'test')
print 'test path', test
try:
self.predict = self.conf.get('task', 'predict')
except:
self.predict = None
self.loadData(train, test=test, dev=dev)
self.batch_size = self.conf.getint('train', 'batch_size')
self.epochs = self.conf.getint('train', 'epochs')
def loadConfig(self, path):
self.conf = ConfigParser.ConfigParser()
self.conf.read(path)
def loadData(self, train, test=None, dev=None):
self.qapairs = OrderedDict()
self.qapairs['train'] = QaPairsTrain(train)
if test:
self.qapairs['test'] = QaPairsTest(test)
if dev:
self.qapairs['dev'] = QaPairsTest(dev)
self.qmax = max([qa.qmax for qa in self._availableParis()])
self.amax = max([qa.amax for qa in self._availableParis()])
logging.info('Q Length %d' % self.qmax)
logging.info('A Length %d' % self.amax)
self.data = []
for name, pair in self.qapairs.items():
self.data += pair.xq_data
self.data += pair.xa_data
self.reversed_vocab, self.vocabulary = build_vocab(self.data, start_with=['<PAD>'])
map(lambda x: x.build(self.vocabulary, self.qmax, self.amax), self._availableParis())
#self.qapairs['train'].shuffle()
def remoteEmbedding(self):
host = self.conf.get('embedding', 'host', 'szwg-rp-nlp349.szwg01.baidu.com')
port = self.conf.getint('embedding', 'port')
method = self.conf.get('embedding', 'method', 'word2vec')
name = self.conf.get('embedding', 'name', 'en_google')
import pymongo
coll = pymongo.MongoClient(host=host,port=port)[method][name]
word2vec = load_word2vec(coll, self.reversed_vocab)
return embedding_layer_weights(self.reversed_vocab, word2vec, self.wdim)
def randomEmbedding(self):
return embedding_layer_weights(self.reversed_vocab, {}, self.wdim)
def equipModel(self):
weights = embedding_layer_weights(self.reversed_vocab, {}, self.wdim)
impl = APNBase(self.wdim)
impl.readDefaultConfig(self.conf)
impl.setEmbedding(weights[0])
self.model = impl
config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
self.sess = tf.Session(config=config)
with self.sess.as_default():
tf.set_random_seed(1337)
print self.sess.run(tf.random_uniform([1]))
impl.build()
self.global_step = tf.Variable(0, name="global_step", trainable=False)
optimizer = tf.train.AdamOptimizer()
grads_and_vars = optimizer.compute_gradients(self.model.tensors['loss'])
self.train_op = optimizer.apply_gradients(grads_and_vars, global_step=self.global_step)
self.sess.run(tf.initialize_all_variables())
print self.sess.run(tf.random_uniform([1]))
def train_step(self, xq_batch, xa_batch, y_batch):
feed_dict = {
self.model.tensors['q_input']: xq_batch,
self.model.tensors['a_input']: xa_batch,
self.model.tensors['label']: y_batch
}
_, step, loss, sparsity = self.sess.run(
[self.train_op, self.global_step, self.model.tensors['loss']] + self.model.tensors['summary'],
feed_dict
)
if (step % 100 == 0):
logging.info('LOSS %f @step %d sparsity %f' % (loss, step, sparsity))
def test_step(self, xq_batch, xa_batch):
feed_dict = {
self.model.tensors['q_input']: xq_batch,
self.model.tensors['a_input']: xa_batch,
}
predict = self.sess.run([self.model.tensors['similarity']], feed_dict)
return predict
def trainEpoch(self):
self.qapairs['train'].partiteSamples()
#self.qapairs['train'].shuffle()
#self.qapairs['test'].shuffle()
best_test_map = 0
for _ in xrange(self.epochs):
dev = self.qapairs.get('dev')
test = self.qapairs.get('test')
if dev:
#MAP = dev.ndcg_score(self.model, k=20, batch_size=self.batch_size)
MAP = dev.label_ranking_average_precision_score(lambda q,a: self.test_step(q,a), batch_size=self.batch_size)
print('Dev MAP %f' % MAP)
if test:
#MAP = test.ndcg_score(self.model, k=20, batch_size=self.batch_size)
MAP = test.label_ranking_average_precision_score(lambda q,a: self.test_step(q,a), batch_size=self.batch_size)
print('Test MAP %f' % MAP)
if MAP > best_test_map:
best_test_map = MAP
if self.predict: test.dumpResult(self.predict)
self.model.export('APN%f' % MAP, self.sess)
for xq, xa, y in self.qapairs['train'].pairwiseSampling(50):
self.train_step(xq, xa, y)
print('Best Test MAP %f' % best_test_map)
def _availableParis(self):
return self.qapairs.values()
def main():
task = SentenceSimilarityTask('./configure/comment_rank.conf')
task.equipModel()
task.trainEpoch()
if __name__ == '__main__':
main()
```
#### File: knowledgeflow/utility/utility.py
```python
from __future__ import print_function
from collections import Counter
import itertools
import numpy as np
import random
import os
import re
import sys
"""
Original taken from https://github.com/dennybritz/cnn-text-classification-tf
"""
def clean_str(string, TREC=False):
"""
Tokenization/string cleaning for all datasets except for SST.
Every dataset is lower cased except for TREC
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip() if TREC else string.strip().lower()
def clean_str_sst(string):
"""
Tokenization/string cleaning for the SST dataset
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
class StreamDataGenerator(object):
def __init__(self, path, batch, validation = 0.1, seed = 9527):
self.fin = open(path)
self.batch = batch
self.validation = validation
self.seed = seed
self.random = random
def processor(self, process):
self.processor = process
def generate(self):
while not self.eof():
train = []
val = []
for _ in range(self.batch):
if self.random.random() > self.validation:
train.append(self.fin.readline().rstrip('\n'))
else:
val.append(self.fin.readline().rstrip('\n'))
print(len(train), len(val))
x_train, y_train = self.processor(train)
x_val, y_val = self.processor(val)
yield {'train':(x_train, y_train), 'val':(x_val, y_val)}
def reset(self):
self.fin.seek(0)
self.random.seed(self.seed)
def eof(self):
return self.fin.tell() == os.fstat(self.fin.fileno()).st_size
def selectMaximumProbability(mat):
row, col = mat.shape
m = mat.max(axis = 1)
indices = mat == np.dot(m.reshape((row, 1)), np.ones((1, col)))
response = np.zeros_like(mat)
response[indices] = 1.0
return response
def build_vocab(sentences, start_with=[]):
'''
Builds a vocabulary mapping from word to index based on the sentences.
Returns vocabulary mapping and inverse vocabulary mapping.
'''
# Build vocabulary
word_counts = Counter(itertools.chain(*sentences))
# Mapping from index to word
reversed_vocabulary = start_with + [x[0] for x in word_counts.most_common()]
# Mapping from word to index
start = len(start_with)
vocabulary = {x: i for i, x in enumerate(reversed_vocabulary)}
return [reversed_vocabulary, vocabulary]
def load_bin_vec(fname, vocab):
"""
Loads 300x1 word vecs from Google (Mikolov) word2vec
"""
word_vecs = {}
with open(fname, "rb") as f:
header = f.readline()
vocab_size, layer1_size = map(int, header.split())
binary_len = np.dtype('float32').itemsize * layer1_size
for line in xrange(vocab_size):
word = []
while True:
ch = f.read(1)
if ch == ' ':
word = ''.join(word)
break
if ch != '\n':
word.append(ch)
if word in vocab:
word_vecs[word] = np.fromstring(f.read(binary_len), dtype='float32')
else:
f.read(binary_len)
return word_vecs
def load_word2vec(coll, reversed_vocabulary):
import pymongo
'''
Load word2vec from local mongodb
'''
word2vec = {}
hit = 0
miss = 0
for word in reversed_vocabulary:
response = coll.find_one({'word':word})
if response:
word2vec[word] = np.array(response['vector'])
hit += 1
else:
miss += 1
print('hit %d miss %d' % (hit, miss), file=sys.stderr)
return word2vec
def embedding_layer_weights(reversed_vocabulary, word2vec, dim=300):
embedding_weights = np.array([word2vec.get(w, np.random.uniform(-0.25,0.25,dim)) for w in reversed_vocabulary])
return [embedding_weights]
def bucket(sentences, size=5):
buckets = {}
for sentence in sentences:
bucket_id = len(sentence) / size + 1
buckets.setdefault(bucket_id, []).append(sentence)
return buckets
def embedding_layer_word2vec(weights, word_idx_map):
response = []
response.append('%d %d' % weights.shape)
for word, idx in word_idx_map.items():
response.append('%s\t%s' % (word, ' '.join(weights[i].tolist())))
return response
if __name__ == '__main__':
#client = pymongo.MongoClient()
#coll = client['word2vec']['en_google']
reversed_vocab, vocab = build_vocab([['hello', 'world'],['hello', 'python']], start_with=['<PAD>'])
print(reversed_vocab, vocab)
#word2vec = load_word2vec(coll, vocab)
#print(word2vec)
``` |
{
"source": "3rdvision/ripe-sdk",
"score": 2
} |
#### File: ripe_demo/controllers/base.py
```python
import appier
class BaseController(appier.Controller):
@appier.route("/", "GET")
@appier.route("/index", "GET")
def index(self):
return self.redirect(
self.url_for("base.simple")
)
@appier.route("/simple", "GET")
def simple(self):
client_id = appier.conf("OAUTH_ID", None)
client_secret = appier.conf("OAUTH_SECRET", None)
return self.template(
"simple.html.tpl",
url = self.field("url"),
brand = self.field("brand"),
model = self.field("model"),
variant = self.field("variant"),
version = self.field("version"),
country = self.field("country"),
currency = self.field("currency"),
client_id = self.field("client_id", client_id),
client_secret = self.field("client_secret", client_secret),
guess = self.field("guess", False, cast = bool),
guess_url = self.field("guess_url", False, cast = bool),
mode = self.field("mode", "full")
)
``` |
{
"source": "3rdwiki/mediagoblin",
"score": 2
} |
#### File: db/mongo/util.py
```python
import copy
# Imports that other modules might use
from pymongo import ASCENDING, DESCENDING
from pymongo.errors import InvalidId
from mongokit import ObjectId
from mediagoblin.db.mongo.indexes import ACTIVE_INDEXES, DEPRECATED_INDEXES
################
# Indexing tools
################
def add_new_indexes(database, active_indexes=ACTIVE_INDEXES):
"""
Add any new indexes to the database.
Args:
- database: pymongo or mongokit database instance.
- active_indexes: indexes to possibly add in the pattern of:
{'collection_name': {
'identifier': {
'index': [index_foo_goes_here],
'unique': True}}
where 'index' is the index to add and all other options are
arguments for collection.create_index.
Returns:
A list of indexes added in form ('collection', 'index_name')
"""
indexes_added = []
for collection_name, indexes in active_indexes.iteritems():
collection = database[collection_name]
collection_indexes = collection.index_information().keys()
for index_name, index_data in indexes.iteritems():
if not index_name in collection_indexes:
# Get a copy actually so we don't modify the actual
# structure
index_data = copy.copy(index_data)
index = index_data.pop('index')
collection.create_index(
index, name=index_name, **index_data)
indexes_added.append((collection_name, index_name))
return indexes_added
def remove_deprecated_indexes(database, deprecated_indexes=DEPRECATED_INDEXES):
"""
Remove any deprecated indexes from the database.
Args:
- database: pymongo or mongokit database instance.
- deprecated_indexes: the indexes to deprecate in the pattern of:
{'collection_name': {
'identifier': {
'index': [index_foo_goes_here],
'unique': True}}
(... although we really only need the 'identifier' here, as the
rest of the information isn't used in this case. But it's kept
around so we can remember what it was)
Returns:
A list of indexes removed in form ('collection', 'index_name')
"""
indexes_removed = []
for collection_name, indexes in deprecated_indexes.iteritems():
collection = database[collection_name]
collection_indexes = collection.index_information().keys()
for index_name, index_data in indexes.iteritems():
if index_name in collection_indexes:
collection.drop_index(index_name)
indexes_removed.append((collection_name, index_name))
return indexes_removed
#################
# Migration tools
#################
# The default migration registry...
#
# Don't set this yourself! RegisterMigration will automatically fill
# this with stuff via decorating methods in migrations.py
class MissingCurrentMigration(Exception):
pass
MIGRATIONS = {}
class RegisterMigration(object):
"""
Tool for registering migrations
Call like:
@RegisterMigration(33)
def update_dwarves(database):
[...]
This will register your migration with the default migration
registry. Alternately, to specify a very specific
migration_registry, you can pass in that as the second argument.
Note, the number of your migration should NEVER be 0 or less than
0. 0 is the default "no migrations" state!
"""
def __init__(self, migration_number, migration_registry=MIGRATIONS):
assert migration_number > 0, "Migration number must be > 0!"
assert migration_number not in migration_registry, \
"Duplicate migration numbers detected! That's not allowed!"
self.migration_number = migration_number
self.migration_registry = migration_registry
def __call__(self, migration):
self.migration_registry[self.migration_number] = migration
return migration
class MigrationManager(object):
"""
Migration handling tool.
Takes information about a database, lets you update the database
to the latest migrations, etc.
"""
def __init__(self, database, migration_registry=MIGRATIONS):
"""
Args:
- database: database we're going to migrate
- migration_registry: where we should find all migrations to
run
"""
self.database = database
self.migration_registry = migration_registry
self._sorted_migrations = None
def _ensure_current_migration_record(self):
"""
If there isn't a database[u'app_metadata'] mediagoblin entry
with the 'current_migration', throw an error.
"""
if self.database_current_migration() is None:
raise MissingCurrentMigration(
"Tried to call function which requires "
"'current_migration' set in database")
@property
def sorted_migrations(self):
"""
Sort migrations if necessary and store in self._sorted_migrations
"""
if not self._sorted_migrations:
self._sorted_migrations = sorted(
self.migration_registry.items(),
# sort on the key... the migration number
key=lambda migration_tuple: migration_tuple[0])
return self._sorted_migrations
def latest_migration(self):
"""
Return a migration number for the latest migration, or 0 if
there are no migrations.
"""
if self.sorted_migrations:
return self.sorted_migrations[-1][0]
else:
# If no migrations have been set, we start at 0.
return 0
def set_current_migration(self, migration_number):
"""
Set the migration in the database to migration_number
"""
# Add the mediagoblin migration if necessary
self.database[u'app_metadata'].update(
{u'_id': u'mediagoblin'},
{u'$set': {u'current_migration': migration_number}},
upsert=True)
def install_migration_version_if_missing(self):
"""
Sets the migration to the latest version if no migration
version at all is set.
"""
mgoblin_metadata = self.database[u'app_metadata'].find_one(
{u'_id': u'mediagoblin'})
if not mgoblin_metadata:
latest_migration = self.latest_migration()
self.set_current_migration(latest_migration)
def database_current_migration(self):
"""
Return the current migration in the database.
"""
mgoblin_metadata = self.database[u'app_metadata'].find_one(
{u'_id': u'mediagoblin'})
if not mgoblin_metadata:
return None
else:
return mgoblin_metadata[u'current_migration']
def database_at_latest_migration(self):
"""
See if the database is at the latest migration.
Returns a boolean.
"""
current_migration = self.database_current_migration()
return current_migration == self.latest_migration()
def migrations_to_run(self):
"""
Get a list of migrations to run still, if any.
Note that calling this will set your migration version to the
latest version if it isn't installed to anything yet!
"""
self._ensure_current_migration_record()
db_current_migration = self.database_current_migration()
return [
(migration_number, migration_func)
for migration_number, migration_func in self.sorted_migrations
if migration_number > db_current_migration]
def migrate_new(self, pre_callback=None, post_callback=None):
"""
Run all migrations.
Includes two optional args:
- pre_callback: if called, this is a callback on something to
run pre-migration. Takes (migration_number, migration_func)
as arguments
- pre_callback: if called, this is a callback on something to
run post-migration. Takes (migration_number, migration_func)
as arguments
"""
# If we aren't set to any version number, presume we're at the
# latest (which means we'll do nothing here...)
self.install_migration_version_if_missing()
for migration_number, migration_func in self.migrations_to_run():
if pre_callback:
pre_callback(migration_number, migration_func)
migration_func(self.database)
self.set_current_migration(migration_number)
if post_callback:
post_callback(migration_number, migration_func)
##########################
# Random utility functions
##########################
def atomic_update(table, query_dict, update_values):
table.collection.update(
query_dict,
{"$set": update_values})
def check_media_slug_used(db, uploader_id, slug, ignore_m_id):
query_dict = {'uploader': uploader_id, 'slug': slug}
if ignore_m_id is not None:
query_dict['_id'] = {'$ne': ignore_m_id}
existing_user_slug_entries = db.MediaEntry.find(
query_dict).count()
return existing_user_slug_entries
def media_entries_for_tag_slug(db, tag_slug):
return db.MediaEntry.find(
{u'state': u'processed',
u'tags.slug': tag_slug})
```
#### File: db/sql/fake.py
```python
DESCENDING = object() # a unique object for this "constant"
class InvalidId(Exception):
pass
def ObjectId(value=None):
if value is None:
return None
try:
return int(value)
except ValueError:
raise InvalidId("%r is an invalid id" % value)
```
#### File: plugins/oauth/migrations.py
```python
from sqlalchemy import MetaData, Table
from mediagoblin.db.sql.util import RegisterMigration
from mediagoblin.plugins.oauth.models import OAuthClient, OAuthToken, \
OAuthUserClient, OAuthCode
MIGRATIONS = {}
@RegisterMigration(1, MIGRATIONS)
def remove_and_replace_token_and_code(db):
metadata = MetaData(bind=db.bind)
token_table = Table('oauth__tokens', metadata, autoload=True,
autoload_with=db.bind)
token_table.drop()
code_table = Table('oauth__codes', metadata, autoload=True,
autoload_with=db.bind)
code_table.drop()
OAuthClient.__table__.create(db.bind)
OAuthUserClient.__table__.create(db.bind)
OAuthToken.__table__.create(db.bind)
OAuthCode.__table__.create(db.bind)
db.commit()
```
#### File: mediagoblin/mediagoblin/routing.py
```python
from routes import Mapper
from mediagoblin.auth.routing import auth_routes
from mediagoblin.submit.routing import submit_routes
from mediagoblin.user_pages.routing import user_routes
from mediagoblin.edit.routing import edit_routes
from mediagoblin.listings.routing import tag_routes
from mediagoblin.webfinger.routing import webfinger_well_known_routes, \
webfinger_routes
from mediagoblin.admin.routing import admin_routes
def get_mapper(plugin_routes):
mapping = Mapper()
mapping.minimization = False
# Plugin routes go first so they can override default routes.
mapping.extend(plugin_routes)
mapping.connect(
"index", "/",
controller="mediagoblin.views:root_view")
mapping.extend(auth_routes, '/auth')
mapping.extend(submit_routes, '/submit')
mapping.extend(user_routes, '/u')
mapping.extend(edit_routes, '/edit')
mapping.extend(tag_routes, '/tag')
mapping.extend(webfinger_well_known_routes, '/.well-known')
mapping.extend(webfinger_routes, '/api/webfinger')
mapping.extend(admin_routes, '/a')
return mapping
```
#### File: mediagoblin/tools/response.py
```python
from webob import Response, exc
from mediagoblin.tools.template import render_template
def render_to_response(request, template, context, status=200):
"""Much like Django's shortcut.render()"""
return Response(
render_template(request, template, context),
status=status)
def render_404(request):
"""
Render a 404.
"""
return render_to_response(
request, 'mediagoblin/404.html', {}, status=404)
def redirect(request, *args, **kwargs):
"""Returns a HTTPFound(), takes a request and then urlgen params"""
querystring = None
if kwargs.get('querystring'):
querystring = kwargs.get('querystring')
del kwargs['querystring']
return exc.HTTPFound(
location=''.join([
request.urlgen(*args, **kwargs),
querystring if querystring else '']))
``` |
{
"source": "3reedm/taurus",
"score": 2
} |
#### File: bzt/jmx/http.py
```python
import json
from bzt.jmx.base import JMX
from bzt.jmx.tools import ProtocolHandler
from bzt.six import etree
from bzt.utils import get_host_ips, BetterDict
class HTTPProtocolHandler(ProtocolHandler):
def _get_merged_ci_headers(self, scenario, req, header):
def dic_lower(dic):
return {str(k).lower(): str(dic[k]).lower() for k in dic}
ci_scenario_headers = dic_lower(scenario.get_headers())
ci_request_headers = dic_lower(req.headers)
headers = BetterDict()
headers.merge(ci_scenario_headers)
headers.merge(ci_request_headers)
if header.lower() in headers:
return headers[header]
else:
return None
def get_toplevel_elements(self, scenario):
return self._gen_managers(scenario) + self._gen_defaults(scenario)
def _gen_managers(self, scenario):
elements = []
if scenario.get("store-cache", True):
elements.append(JMX._get_cache_mgr())
elements.append(etree.Element("hashTree"))
if scenario.get("store-cookie", True):
elements.append(JMX._get_cookie_mgr(scenario))
elements.append(etree.Element("hashTree"))
if scenario.get("use-dns-cache-mgr", True):
elements.append(JMX.get_dns_cache_mgr())
elements.append(etree.Element("hashTree"))
self.system_props.merge({"system-properties": {"sun.net.inetaddr.ttl": 0}})
return elements
def _gen_defaults(self, scenario):
default_address = scenario.get("default-address", None)
retrieve_resources = scenario.get("retrieve-resources", True)
resources_regex = scenario.get("retrieve-resources-regex", None)
concurrent_pool_size = scenario.get("concurrent-pool-size", 4)
content_encoding = scenario.get("content-encoding", None)
timeout = scenario.get("timeout", None)
timeout = self.safe_time(timeout)
elements = [JMX._get_http_defaults(default_address, timeout, retrieve_resources,
concurrent_pool_size, content_encoding, resources_regex),
etree.Element("hashTree")]
return elements
def get_sampler_pair(self, scenario, request):
timeout = self.safe_time(request.priority_option('timeout'))
content_type = self._get_merged_ci_headers(scenario, request, 'content-type')
if content_type == 'application/json' and isinstance(request.body, (dict, list)):
body = json.dumps(request.body)
else:
body = request.body
use_random_host_ip = request.priority_option('random-source-ip', default=False)
host_ips = get_host_ips(filter_loopbacks=True) if use_random_host_ip else []
http = JMX._get_http_request(request.url, request.label, request.method, timeout, body,
request.priority_option('keepalive', default=True),
request.upload_files, request.content_encoding,
request.priority_option('follow-redirects', default=True),
use_random_host_ip, host_ips)
children = etree.Element("hashTree")
if request.headers:
children.append(JMX._get_header_mgr(request.headers))
children.append(etree.Element("hashTree"))
return http, children
``` |
{
"source": "3Represents/CS-250_Algorithms",
"score": 3
} |
#### File: Graded/src/A.py
```python
import sys
input = sys.stdin.readline
def grocery():
"""
Adapted from https://codeforces.com/blog/entry/71884
"""
data = []
while True:
line = input()
if line:
data += [list(map(int, line.split()))]
else:
break
n, c, e = data[0]
a = [data[i][0] for i in range(1, len(data))]
b = [data[i][1] for i in range(1, len(data))]
d = [[[0 for _ in range(e+1)] for _ in range(c+1)] for _ in range(n+1)]
d[0][0][0] = 1
for i in range(1, n+1):
for j in range(c+1):
for k in range(e+1):
l = j - a[i-1]
m = k - b[i-1]
if (l < 0) or (m < 0):
d[i][j][k] = d[i-1][j][k]
else:
d[i][j][k] = max(d[i-1][j][k], d[i-1][l][m])
print('Yes\n' if d[n][c][e] else 'No\n')
if __name__ == '__main__':
grocery()
``` |
{
"source": "3Represents/EE-556_MathsOfData",
"score": 3
} |
#### File: exercise1_code/question3/example_wavelet.py
```python
import matplotlib.pyplot as plt
from common.utils import load_image
from common.operators import Representation_Operator
def main():
# Load the image
im_shape = (512, 512)
im = load_image('data/lauterbrunnen.jpg', im_shape)
# Wavelet Transform operator
r = Representation_Operator(m=im_shape[0])
i_wav = r.W(im).reshape(im_shape)
i_recon = r.WT(i_wav).reshape(im_shape)
fig, axs = plt.subplots(1, 3, figsize=(12, 4))
axs[0].imshow(im, cmap='gray')
axs[0].set_title('Original')
axs[1].imshow(abs(i_wav) ** 0.05, cmap='gray')
axs[1].set_title('Wavelet coefficients')
axs[2].imshow(i_recon, cmap='gray')
axs[2].set_title('Inverse Wavelet transform')
for ax in axs.flatten():
ax.set_axis_off()
plt.tight_layout()
plt.show()
if __name__ == "__main__":
main()
```
#### File: code/optimizer/optimizers.py
```python
from abc import abstractmethod
import torch
class optimizer:
def __init__(self,parameters):
self.parameters=list(parameters)
@abstractmethod
def step(self):
"""
Perform one step of the optimizer using the gradient supplied by `loss.backward()`
"""
pass
def zero_grad(self):
"""
Zero the gradient of each parameter
"""
for p in self.parameters:
p.grad=None
class SGDOptimizer(optimizer):
def __init__(self,parameters, args):
super().__init__(parameters)
self.learning_rate = args.learning_rate
def step(self):
for p in self.parameters:
p.data-=p.grad * self.learning_rate
class MomentumSGDOptimizer(optimizer):
def __init__(self,parameters, args):
super().__init__(parameters)
self.learning_rate = args.learning_rate
self.rho = args.rho
self.m = None
def step(self):
if self.m is None:
self.m = [torch.zeros(p.size()) for p in self.parameters]
for i, p in enumerate(self.parameters):
self.m[i] = self.rho * self.m[i] + p.grad
p.grad = self.learning_rate * self.m[i]
p.data-=p.grad
class RMSPropOptimizer(optimizer):
def __init__(self,parameters, args):
super().__init__(parameters)
self.tau = args.tau
self.learning_rate = args.learning_rate
self.r = None
self.delta = args.delta
def step(self):
if self.r is None:
self.r = [torch.zeros(p.size()) for p in self.parameters]
for i, p in enumerate(self.parameters):
self.r[i] = self.tau * self.r[i] + (1 - self.tau) * p.grad ** 2
p.data -= self.learning_rate / (self.delta + torch.sqrt(self.r[i])) * p.grad
class AMSgradOptimizer(optimizer):
def __init__(self,parameters, args):
super().__init__(parameters)
self.beta1 = args.beta1
self.beta2 = args.beta2
self.learning_rate = args.learning_rate
self.delta = args.delta
self.iteration = None
self.m1 = None
self.m2 = None
self.m2_max = None
def step(self):
if self.m1 is None:
self.m1 = [torch.zeros(p.grad.size()) for p in self.parameters]
if self.m2 is None:
self.m2 = [torch.zeros(p.grad.size()) for p in self.parameters]
if self.iteration is None:
self.iteration = 1
if self.m2_max is None:
self.m2_max = [torch.zeros(p.grad.size()) for p in self.parameters]
for i, p in enumerate(self.parameters):
self.m1[i] = self.beta1 * self.m1[i] + (1 - self.beta1) * p.grad
self.m2[i] = self.beta2 * self.m2[i] + (1 - self.beta2) * p.grad ** 2
# Iteration starts at 1 instead of 0, (t+1) is not needed
m1_hat = self.m1[i] / (1 - self.beta1 ** self.iteration)
m2_hat = self.m2[i] / (1 - self.beta2 ** self.iteration)
self.m2_max[i] = torch.maximum(self.m2_max[i], m2_hat)
p.data -= self.learning_rate * m1_hat / (self.delta + torch.sqrt(self.m2_max[i]))
self.iteration = self.iteration+1
def createOptimizer(args,model):
p=model.parameters()
if args.optimizer == "sgd":
return SGDOptimizer(p,args)
elif args.optimizer == "momentumsgd":
return MomentumSGDOptimizer(p,args)
# elif args.optimizer == "adagrad":
# return AdagradOptimizer(p,args)
elif args.optimizer == "rmsprop":
return RMSPropOptimizer(p,args)
elif args.optimizer == "amsgrad":
return AMSgradOptimizer(p,args)
else:
raise NotImplementedError(f"Unknown optimizer {args.optimizer}")
```
#### File: code/optimizer/tutorial.py
```python
import torch
import torchvision
import torch.nn.functional as F
from torchvision import transforms
from torch import nn
class TwoLayerNet(nn.Module):
def __init__(self, input, hidden, output):
"""
create two nn.Linear objects and assign them as attributes
:param input: dimension of the input
:param hidden: number of hidden neurons
:param output: dimension of the output
"""
super().__init__()
self.linear1 = nn.Linear(input, hidden)
self.linear2 = nn.Linear(hidden, output)
def forward(self, x):
"""
In the forward method we define what is the output of the network
given an input x. In this example we use the ReLU as our activation function
"""
x = F.relu(self.linear1(x))
x = self.linear2(x)
return x
if __name__ == '__main__':
net = TwoLayerNet(input=784, hidden=100, output=10)
x = torch.randn(784)
result = net(x)
print('output of the network at input x: ' + str(result))
train_dataset = torchvision.datasets.MNIST(
root='~/data',
train=True,
transform=transforms.ToTensor(),
download=True)
train_loader = torch.utils.data.DataLoader(
dataset=train_dataset,
batch_size=128,
shuffle=True)
for _, (x, y) in enumerate(train_loader):
print('batch size: ' + str(x.shape[0]))
print('input dimension: ' + str(x[0].shape))
loss_fn = nn.CrossEntropyLoss()
x = x.view(x.shape[0], -1) # reshape 28x28 image to a 1x784 vector
net.zero_grad() # set the gradients to 0
output = net(x)
loss = loss_fn(output, y)
loss.backward() # backpropagation
for p in net.parameters():
gradient = p.grad
# perform an update based on the gradient
break # stops the for loop. remove this line to iterate through all the data
``` |
{
"source": "3rmack/shaper",
"score": 2
} |
#### File: shaper/libs/parser.py
```python
import json
import os
import sys
from collections import OrderedDict
from xml.dom.minidom import parseString
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import xmltodict
import yaml
from . import dicttoxml
from .loader import (
OrderedDictYAMLLoader,
represent_ordered_dict,
represent_unicode,
represent_multi_line,
)
class BaseParser(object):
WARNING_MESSAGE = 'Warning. Unsupported file extension for {file}\n'
@staticmethod
def parsers_choice(filepath):
"""Get parser class by file type.
:param filepath: string path to file
:return: parser class
"""
_, ext = os.path.splitext(filepath)
return PARSERS_MAPPING.get(ext)
def read(self, path): # pylint: disable=inconsistent-return-statements
"""Read file data structure according its type. Default type choose
dynamic with magic function.
:param path: string path to file
:return: File data structure
:rtype: [dict, list]
"""
parser_class = self.parsers_choice(path)
if parser_class:
try:
return parser_class().read(path)
# pylint: disable=broad-except
# disable cause of list of exceptions
# not known due to a lot of parsers
except Exception as exc:
msg = 'Failed to parse {file}'.format(file=os.path.abspath(path))
sys.stderr.write(
'{message}\n{exception}\n'.format(message=msg, exception=exc),
)
sys.stderr.write(self.WARNING_MESSAGE.format(file=path))
def write(self, data, path):
"""Write data in file according its type. Default type choose dynamic
with magic function.
:param path: string path to file
:param data: data
:type data: [str, dict, list]
:return: None
:rtype: None
"""
parser_class = self.parsers_choice(path)
if parser_class:
parser_class().write(data, path)
else:
sys.stderr.write(self.WARNING_MESSAGE.format(file=path))
class TextParser(object):
def read(self, path):
"""Read plaintext file.
:param path: string path to file
:return: list of lines of string
:rtype: [list, str]
"""
try:
with open(path, 'r') as fd:
return fd.read()
except (ValueError, OSError, IOError) as exc:
sys.stderr.write(
'Failed to read {file}: {msg}'.format(file=path, msg=str(exc)),
)
def write(self, data, path):
"""Write plaintext file.
:param data: file content
:param path: string path to file
:return: str
:return: None
:rtype: None
"""
try:
with open(path, 'wb') as fd:
fd.write(data)
except (ValueError, OSError, IOError) as exc:
sys.stderr.write(
'Failed to write {file}: {msg}'.format(file=path, msg=str(exc)),
)
class YAMLParser(TextParser):
def read(self, path):
"""YAML read.
:param path: string path to file
:return: data structure
:rtype: dict
"""
return yaml.load(
super(YAMLParser, self).read(path),
Loader=OrderedDictYAMLLoader,
)
def write(self, data, path):
"""Dump data structure to YAML.
:param data: configuration dataset
:param path: string path to file
:type data: dict
:return: None
:rtype: None
"""
yaml.add_representer(OrderedDict, represent_ordered_dict)
if sys.version_info[0] == 2:
yaml.add_representer(
unicode, # pylint: disable=undefined-variable
represent_unicode,
)
# add string representer for multi line issue
yaml.add_representer(str, represent_multi_line)
content = yaml.dump(
data,
default_flow_style=False,
allow_unicode=True,
)
if sys.version_info[0] == 3:
content = bytearray(content, 'utf-8')
super(YAMLParser, self).write(content, path)
class JSONParser(TextParser):
def read(self, path):
"""JSON read.
:param path: string path to file
:return: json data structure
:rtype: dict
"""
return json.loads(super(JSONParser, self).read(path))
def write(self, data, path):
"""Dump data to JSON.
:param data: configuration data structure
:param path: string path to file
:type data: dict
:return: None
:rtype: None
"""
kw = {'encoding': 'utf-8'} if sys.version_info[0] == 2 else {}
with open(path, 'w') as fd:
json.dump(
data,
fd,
indent=4,
separators=(',', ': '),
**kw
)
class XMLParser(TextParser):
def read(self, path):
"""XML read.
:param path: string path to file
:return: XML data structure
:rtype: dict
"""
return xmltodict.parse(super(XMLParser, self).read(path))
def write(self, data, path):
"""Dump data structure to XML.
:param data: configuration data structure
:param path: string path to file
:type data: dict
:return: None
:rtype: None
"""
dom = parseString(
dicttoxml.dict_to_xml(
data,
fold_list=False,
item_func=lambda x: x,
attr_type=False,
root=False,
),
)
super(XMLParser, self).write(dom.toprettyxml(encoding='utf-8'), path)
class PropertyParser(TextParser):
@staticmethod
def _process_multiline_string(string):
string_splitted = string.splitlines()
if len(string_splitted) > 1:
return "\n ".join(string_splitted)
return string
def read(self, path):
"""PROPERTY read.
:param path: string path to file
:return: property data structure
:rtype: dict
"""
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
content = super(PropertyParser, self).read(path)
config = StringIO()
config.write('[dummy_section]\n')
config.write(content.replace('%', '%%'))
config.seek(0, os.SEEK_SET)
conf_parser = ConfigParser.SafeConfigParser()
conf_parser.optionxform = str
# pylint: disable=deprecated-method
conf_parser.readfp(config)
return OrderedDict(conf_parser.items('dummy_section'))
def write(self, data, path):
"""Dump data structure to property.
:param data: configuration data structure
:param path: string path to file
:type data: dict
:return: None
:rtype: None
"""
if data is None:
return
stream = '\n'.join(
'{}={}'.format(item[0], self._process_multiline_string(item[1])) for item in data.items()
)
super(PropertyParser, self).write(
stream.encode(encoding='utf-8'),
path,
)
parser = BaseParser()
PARSERS_MAPPING = {
'.json': JSONParser,
'.yml': YAMLParser,
'.yaml': YAMLParser,
'.xml': XMLParser,
'.properties': PropertyParser,
'.txt': TextParser,
# '': TextParser, TODO: think about how to parse files without extension
}
```
#### File: shaper/shaper/renderer.py
```python
from __future__ import print_function
import os
from collections import OrderedDict
import yaml
from jinja2 import Environment, FileSystemLoader, Undefined
from . import manager
class IgnoreUndefinedAttr(Undefined): # pylint: disable=too-few-public-methods
"""
Class for ignoring undefined attributes
"""
def __getattr__(self, name):
return None
# override default behavior of representing empty string as None object in Jinja2
# empty string will be returned as empty string (not as None object)
# def represent_none_as_empty_string(value):
# if value is None:
# return ""
# return value
def render_template(template_path, context):
"""
Render template interface
:param template_path: path to template
:type template_path: str
:param context: variables
:type context: dict
:return: rendered template
:rtype: str
"""
env = Environment(
loader=FileSystemLoader(os.path.dirname(template_path)),
undefined=IgnoreUndefinedAttr
# finalize=represent_none_as_empty_string
)
env.globals.update(context)
template = env.get_template(os.path.basename(template_path))
return template.render()
def merge_templates(rendered_templates, out_dir):
"""
Merge templates
:param rendered_templates: list of rendered templates to merge
:param out_dir: path to rendered property files
:return: None
"""
datastructure = {}
for var in rendered_templates:
datastructure.update(yaml.safe_load(var))
datastructure = manager.backward_path_parser(datastructure)
for key in datastructure:
datastructure[key] = OrderedDict((k, v) for k, v in sorted(datastructure[key].items()))
manager.write_properties(datastructure, out_dir)
```
#### File: shaper/tests/test_manager.py
```python
import os
import shutil
from collections import OrderedDict
from shaper import manager, libs
def test_create_folder():
temp_dir_name = 'test_folder'
manager.create_folders(temp_dir_name)
assert os.path.isdir(temp_dir_name)
shutil.rmtree(temp_dir_name)
def test_read_properties(test_assets_root):
input_dir = test_assets_root / 'input'
filename_data_map = manager.read_properties(input_dir)
for filename, data in filename_data_map.items():
assert os.path.splitext(filename)[1] in libs.PARSERS_MAPPING
assert data is not None
assert isinstance(data, (dict, OrderedDict))
def test_forward_path_parser():
datastructure = {
'g/e/c6': 'c6',
'a/d/c3': 'c3',
'a/b/c1': 'c1',
'a/c4': 'c4',
'a/b/c2': 'c2',
'g/c5': 'c5',
}
expected = {
'a': {
'b': {'c1': 'c1', 'c2': 'c2'},
'c4': 'c4',
'd': {'c3': 'c3'},
},
'g': {
'c5': 'c5',
'e': {'c6': 'c6'},
},
}
assert expected == manager.forward_path_parser(datastructure)
def test_backward_path_parser():
datastructure = OrderedDict(
[
(
'a',
OrderedDict(
[
('c4.py', 'c4'),
('d', OrderedDict([('c3.py', 'c3')])),
('b', OrderedDict([('c2.py', 'c2'), ('c1.py', 'c1')])),
],
),
),
('g', OrderedDict([('c5.py', 'c5'), ('e', OrderedDict([('c6.py', 'c6')]))])),
],
)
expected = {
'a/b/c1.py': 'c1',
'a/b/c2.py': 'c2',
'a/c4.py': 'c4',
'a/d/c3.py': 'c3',
'g/c5.py': 'c5',
'g/e/c6.py': 'c6',
}
assert expected == manager.backward_path_parser(datastructure)
``` |
{
"source": "3rror/aca_pathfinding_project",
"score": 3
} |
#### File: aca_pathfinding_project/tools/sparse_dense_size_comparison.py
```python
import numpy as np
from scipy.sparse import csr_matrix
import sys
def load_matrix(file):
matrix = np.loadtxt(file, dtype=int, ndmin=2)
print("Nodes: " + str(len(matrix)))
print(f"Dense matrix: {matrix.nbytes / 1000}mb")
sparse_csr_mat = csr_matrix(matrix)
print(f"Sparse matrix: {sparse_csr_mat.data.nbytes / 1000}mb")
print("")
if __name__ == "__main__":
for f in sys.argv[1:]:
load_matrix(f)
``` |
{
"source": "3rror/pykamino",
"score": 2
} |
#### File: pykamino/_cli/features.py
```python
from pykamino._cli.config import config as cfg
from pykamino._cli.shared_utils import init_db
from pykamino.features import TimeWindow, exporter, orders, trades
def compute(*args, **kwargs):
category = kwargs['category']
params = {'start': kwargs['start'],
'end': kwargs['end'],
'res': kwargs['resolution'],
'products': cfg['global']['products'],
'stride': kwargs['stride'],
'path': kwargs['path']}
init_db()
if category == 'all':
export_orders(**params)
export_trades(**params)
elif category == 'orders':
export_orders(**params)
else:
export_trades(**params)
def export_trades(start, end, res, stride, products, path):
interval = TimeWindow(start, end)
feats = trades.extract(interval, res, stride, products)
exporter.features_to_csv(feats, path, 'trades')
def export_orders(start, end, res, products, path, **kwargs):
interval = TimeWindow(start, end)
feats = orders.extract(interval, res, products)
exporter.features_to_csv(feats, path, 'orders')
```
#### File: pykamino/pykamino/db.py
```python
from datetime import datetime
from functools import partial
import enum
import math
import os
from playhouse import pool
import peewee
# We want the database to be dinamically defined, so that we can support
# different Dbms's. In order to do that, we first declare a placeholder.
database = peewee.DatabaseProxy()
class Dbms(enum.Enum):
"""
An enum repesenting a set of supported DBMSs.
"""
MYSQL = 'mysql'
POSTGRES = 'postgres'
SQLITE = 'sqlite'
def db_factory(dbms: Dbms, db_name, user=None, psw=None, host=None, port=None):
"""
Set up the database connection with the given parameters and create needed
tables and schemas.
You must call this function before any operation on the database.
"""
args = {'database': db_name,
'user': user,
'password': <PASSWORD>,
'host': host,
'port': port,
# We don't want too many connections, but we want
# at least two (for fast feature extraction)
'max_connections': math.ceil(os.cpu_count() / 2) if os.cpu_count() > 2 else 2}
if dbms == Dbms.MYSQL:
real_db = pool.PooledMySQLDatabase(**args)
elif dbms == Dbms.POSTGRES:
real_db = pool.PooledPostgresqlDatabase(**args)
elif dbms == Dbms.SQLITE:
real_db = pool.PooledSqliteDatabase(db_name)
database.initialize(real_db)
database.create_tables(BaseModel.__subclasses__())
database.manual_close()
return real_db
CurrencyField = partial(peewee.DecimalField, max_digits=18, decimal_places=8)
CurrencyField.__doc__ = """A model corresponding to a fixed-point number with
8 decimal places and 10 digits for the integer part."""
class EnumField(peewee.SmallIntegerField):
"""
A `peewee.SmallIntegerField` that maps an integer number to a string, and vice-versa.
"""
def __init__(self, keys, *args, **kwargs):
super().__init__(null=False, *args, **kwargs)
self.enum = enum.Enum('InnerEnum', ' '.join(keys))
# Overridden
def db_value(self, value):
return self.enum[value].value
# Overridden
def python_value(self, value):
return self.enum(value).name
CryptoField = partial(EnumField, keys=('BTC-USD', 'ETH-USD'))
CryptoField.__doc__ = """An EnumField for "BTC-USD" and "ETH-USD"."""
class BaseModel(peewee.Model):
"""
A base model for all the ORM models used in pykamino.
You should extend this class if you want to define models
using the same `pykamino` database.
"""
class Meta:
database = database
legacy_table_names = False
class Trade(BaseModel):
"""
Trade Represents the table of trades.
Note:
A trade is a match in price of two orders: a "buy" one and a "sell" one.
"""
side = EnumField(keys=('sell', 'buy'))
amount = CurrencyField()
product = CryptoField()
price = CurrencyField()
time = peewee.DateTimeField()
class Meta:
table_name = 'trades'
indexes = ((('product', 'time'), False),)
class OrderState(BaseModel):
"""
OrderState represents the table of order states, i.e. the entries
in the order book.
"""
order_id = peewee.UUIDField()
product = CryptoField()
side = EnumField(keys=('ask', 'bid'))
price = CurrencyField()
amount = CurrencyField()
starting_at = peewee.DateTimeField(default=datetime.utcnow)
ending_at = peewee.DateTimeField(null=True)
class Meta:
primary_key = peewee.CompositeKey('order_id', 'starting_at')
table_name = 'order_states'
indexes = ((('product', 'ending_at', 'starting_at'), False),)
constraints = [peewee.Check('starting_at < ending_at')]
``` |
{
"source": "3rww/data-api",
"score": 2
} |
#### File: trwwapi/rainfall/views.py
```python
from datetime import timedelta
from collections import OrderedDict
# from django.contrib.auth.models import User, Group
from django.utils.safestring import mark_safe
from django.core.paginator import Paginator
from django.utils.functional import cached_property
from django_filters import filters
from django.contrib.gis.geos import Point
from django.shortcuts import render
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.generics import GenericAPIView
from rest_framework import viewsets, permissions, routers
from rest_framework.decorators import api_view
from rest_framework.pagination import PageNumberPagination, CursorPagination
from django_filters.rest_framework import FilterSet, DjangoFilterBackend
from ..common.config import TZI
# from .api_v2.config import TZI
from .serializers import (
GarrObservationSerializer,
GarrRecordSerializer,
GaugeObservationSerializer,
GaugeRecordSerializer,
RtrgRecordSerializer,
RtrrObservationSerializer,
RtrgObservationSerializer,
RainfallEventSerializer,
RtrrRecordSerializer,
GarrRecordSerializer,
GaugeRecordSerializer,
RtrrRecordSerializer,
RtrgRecordSerializer
)
from .selectors import (
handle_request_for,
get_latest_garrobservation,
get_latest_gaugeobservation,
get_latest_rainfallevent,
get_latest_rtrgobservation,
get_latest_rtrrobservation,
get_rainfall_total_for
)
from .models import (
GarrObservation,
GarrRecord,
GaugeObservation,
GaugeRecord,
RtrrObservation,
RtrrRecord,
RtrgObservation,
RtrgRecord,
RainfallEvent,
Pixel,
Gauge
)
# -------------------------------------------------------------------
# API ROOT VIEW
class ApiRouterRootView(routers.APIRootView):
"""
Controls appearance of the API root view
"""
def get_view_name(self):
return "3RWW Rainfall Data API"
def get_view_description(self, html=False):
text = "<p>The 3RWW Rainfall API provides access to real-time (provisional) and historic (calibrated) rainfall data for the physical rain gauges and 'virtual' gauges (calibrated radar pixels) in Allegheny County.</p><p>3 Rivers Wet Weather, with support from Vieux Associates, uses calibrated data from the NEXRAD radar located in Moon Township, PA with rain gauge measurements collected during the same time period and rain event for every square kilometer in Allegheny County. The resulting rainfall data is equivalent in accuracy to having 2,276 rain gauges placed across the County. Since April 2000, 3 Rivers has accumulated a massive repository of this high resolution spatiotemporal calibrated radar rainfall data for Allegheny County, which now includes nearly 2 billion data points.</p>"
if html:
return mark_safe(f"<p>{text}</p>")
else:
return text
class ApiDefaultRouter(routers.DefaultRouter):
APIRootView = ApiRouterRootView
# -------------------------------------------------------------------
# HIGH-LEVEL API VIEWS
# these are the views that do the work for us
class RainfallGaugeApiView(GenericAPIView):
"""Rain Gauge data, fully QA/QC'd and provided by 3RWW + ALCOSAN.
"""
# def get(self, request, *args, **kwargs):
# return handle_request_for(GaugeObservation, request, *args, **kwargs)
def post(self, request, *args, **kwargs):
return handle_request_for(GaugeRecord, request, *args, **kwargs)
class RainfallGarrApiView(GenericAPIView):
"""Gauge-Adjusted Radar Rainfall Data. Radar-based rainfall estimated calibrated with rain gauges, interpolated to 1km pixels. Historic data only. Provided by Vieux Associates.
"""
# def get(self, request, *args, **kwargs):
# return handle_request_for(GarrObservation, request, *args, **kwargs)
def post(self, request, *args, **kwargs):
return handle_request_for(GarrRecord, request, *args, **kwargs)
class RainfallRtrrApiView(GenericAPIView):
"""Real-time Radar Rainfall data. Provided through Vieux Associates. Data is provisional and has not be through a QA/QC process.
"""
# def get(self, request, *args, **kwargs):
# return handle_request_for(RtrrObservation, request, *args, **kwargs)
def post(self, request, *args, **kwargs):
return handle_request_for(RtrrRecord, request, *args, **kwargs)
class RainfallRtrgApiView(GenericAPIView):
"""Real-time Rain Gauge data. Provided through Datawise. Data is provisional and has not be through a QA/QC process.
"""
# def get(self, request, *args, **kwargs):
# return handle_request_for(RtrgObservation, request, *args, **kwargs)
def post(self, request, *args, **kwargs):
return handle_request_for(RtrgRecord, request, *args, **kwargs)
# -------------------------------------------------------------------
# LOW LEVEL API VIEWS
# These return paginated data from the tables in the database as-is.
# They show up in the django-rest-framework's explorable API pages.
# --------------------
# ViewSet Pagination:
class FasterDjangoPaginator(Paginator):
@cached_property
def count(self):
# only select 'id' for counting, much cheaper
#return self.object_list.values('id').count()
return len(self.object_list)
class NoCountPaginator(Paginator):
def count(self):
return 0
class PixelResultsSetPagination(PageNumberPagination):
page_size = 1
page_size_query_param = 'page_size'
max_page_size = 3
class PixelResultsSetPagination2(CursorPagination):
page_size = 50
page_size_query_param = 'page_size'
ordering = ['ts']
# django_paginator_class = NoCountPaginator
# def get_paginated_response(self, data):
# return Response(OrderedDict([
# ('next', self.get_next_link()),
# ('previous', self.get_previous_link()),
# ('results', data)
# ]))
class GaugeResultsSetPagination(PageNumberPagination):
page_size = 5
page_size_query_param = 'page_size'
max_page_size = 10
class GaugeResultsSetPagination2(CursorPagination):
page_size = 50
page_size_query_param = 'page_size'
ordering = ['ts']
# --------------------
# ViewSet Filters: Events
class RainfallEventFilter(FilterSet):
event_after = filters.DateFilter(field_name="start_dt", lookup_expr="gte")
event_before = filters.DateFilter(field_name="end_dt", lookup_expr="lte")
class Meta:
model = RainfallEvent
fields = ['event_label', 'start_dt', 'end_dt']
class RainfallEventViewset(viewsets.ReadOnlyModelViewSet):
"""
Get a lists of rainfall event time periods in Allegheny County since 2000. Events are identified by Vieux Associates; more detail on each event is provided in Vieux's monthly report to 3 Rivers Wet Weather. Please note that the list is not comprehensive.
"""
queryset = RainfallEvent.objects.all()
serializer_class = RainfallEventSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
lookup_field = 'event_label'
filter_backends = (DjangoFilterBackend,)
filterset_class = RainfallEventFilter
# --------------------
# ViewSet Filters: Rainfall Records
class RainfallRecordFilter(FilterSet):
start_dt = filters.DateFilter(field_name="ts", lookup_expr="gte")
end_dt = filters.DateFilter(field_name="ts", lookup_expr="lte")
class GaugeRecordFilter(RainfallRecordFilter):
gauge = filters.TypedMultipleChoiceFilter(
field_name="sid",
choices=[(i.web_id, "{0}: {1}".format(i.web_id, i.name)) for i in Gauge.objects.all().order_by('web_id')]
)
class Meta:
model = GaugeRecord
fields = ['gauge', 'start_dt', 'end_dt']
class RtrgRecordFilter(RainfallRecordFilter):
gauge = filters.TypedMultipleChoiceFilter(
field_name="sid",
choices=[(i.web_id, "{0}: {1}".format(i.web_id, i.name)) for i in Gauge.objects.all().order_by('web_id')]
)
class Meta:
model = RtrgRecord
fields = ['gauge', 'start_dt', 'end_dt']
class RtrrRecordFilter(RainfallRecordFilter):
pixel = filters.TypedMultipleChoiceFilter(
field_name="sid",
choices=[(i.pixel_id, i.pixel_id) for i in Pixel.objects.all().order_by('pixel_id')]
)
class Meta:
model = RtrrRecord
fields = ['pixel', 'start_dt', 'end_dt']
class GarrRecordFilter(RainfallRecordFilter):
pixel = filters.TypedMultipleChoiceFilter(
field_name="sid",
choices=[(i.pixel_id, i.pixel_id) for i in Pixel.objects.all().order_by('pixel_id')]
)
class Meta:
model = GarrRecord
fields = ['pixel', 'start_dt', 'end_dt']
# --------------------
# Default Rainfall Record Viewsets
class RainfallRecordReadOnlyViewset(viewsets.ReadOnlyModelViewSet):
"""parent class, provides shared properties for the default rainfall record model-based viewsets"""
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
lookup_field = 'timestamp'
filter_backends = (DjangoFilterBackend,)
class GarrRecordViewset(RainfallRecordReadOnlyViewset):
"""
Get calibrated, gauge-adjusted radar rainfall observations for 15-minute time intervals. Data created by Vieux Associates for 3 Rivers Wet Weather from available NEXRAD and local rain gauges.
Note: use available time- and pixel-based filtering options to get useful subsets of this data.
"""
queryset = GarrRecord.objects.all()
serializer_class = GarrRecordSerializer
filterset_class = GarrRecordFilter
pagination_class = PixelResultsSetPagination2
class GaugeRecordViewset(RainfallRecordReadOnlyViewset):
"""
Get QA/QC'd rainfall gauge observations for 15-minute time intervals. Data captured by 3 Rivers Wet Weather and ALCOSAN.
Note: use available time- and gauge-based filtering options to get useful subsets of this data.
"""
queryset = GaugeRecord.objects.all()
serializer_class = GaugeRecordSerializer
filterset_class = GaugeRecordFilter
pagination_class = GaugeResultsSetPagination2
class RtrrRecordViewset(RainfallRecordReadOnlyViewset):
"""
Get real-time radar rainfall observations for 15-minute time intervals. Data captured by Vieux Associates from NEXRAD radar in Moon Township, PA for 3 Rivers Wet Weather. Please note that this data is provisional.
Note: use available time- and pixel-based filtering options to get useful subsets of this data.
"""
queryset = RtrrRecord.objects.all()
serializer_class = RtrrRecordSerializer
filterset_class = RtrrRecordFilter
pagination_class = PixelResultsSetPagination2
class RtrgRecordViewset(RainfallRecordReadOnlyViewset):
"""
Get real-time rainfall gauge observations for 15-minute time intervals. Data captured by 3 Rivers Wet Weather and Datawise. Please note that this data is provisional and that observations may be missing due to technical/transmission difficulties.
Note: use available time- and gauge-based filtering options to get useful subsets of this data.
"""
queryset = RtrgRecord.objects.all()
serializer_class = RtrgRecordSerializer
filterset_class = RtrgRecordFilter
pagination_class = GaugeResultsSetPagination2
# -------------------------------------------------------------------
# HELPER VIEWS
# These provide helpers for specific use cases
class LatestObservationTimestampsSummary(viewsets.ReadOnlyModelViewSet):
def list(self, request, format=None):
raw_summary = {
"calibrated-radar": get_latest_garrobservation(),
"calibrated-gauge": get_latest_gaugeobservation(),
"realtime-radar": get_latest_rtrrobservation(),
"realtime-gauge": get_latest_rtrgobservation(),
"rainfall-events": get_latest_rainfallevent(),
}
summary = {
k: v.ts.astimezone(TZI).isoformat() if v is not None else None
for k, v in
raw_summary.items()
}
return Response(summary)
def get_myrain_for(request, back_to: timedelta, back_to_text: str):
text ="That didn't work."
lat = request.GET.get('lat')
lng = request.GET.get('lng')
srid = request.GET.get('srid')
if all([lat, lng]):
p = Point(float(lng), float(lat)) #, srid=srid if srid else 4326)
p.srid = srid if srid else 4326
pixels = Pixel.objects.filter(geom__contains=p)
if len(list(pixels)) > 0:
total = get_rainfall_total_for(RtrrRecord, [pixel.pixel_id for pixel in pixels], timedelta(days=2))
if total:
text = """According to 3 Rivers Wet Weather, your location received approximately {0} inches of rainfall {1}.""".format(total, back_to_text)
else:
text = "Sorry, it looks like rainfall data is unavailable for your location for that timeframe. Check back soon!"
else:
text = "Sorry, we can't get detailed rainfall data for your location."
else:
text = "Sorry, you didn't provide enough location data to answer your question."
# text += " For more information about rainfall and infrastructure in the greater Pittsburgh area, visit w w w dot 3 rivers wet weather dot org."
text += " For more information about rainfall and infrastructure in the greater Pittsburgh area, visit www.3riverswetweather.org"
return render(request, 'speech.html', {"text": text})
def get_myrain_24hours(request):
return get_myrain_for(request, timedelta(days=1), "over the past 24 hours")
def get_myrain_48hours(request):
return get_myrain_for(request, timedelta(days=2), "over the past 48 hours")
def get_myrain_pastweek(request):
return get_myrain_for(request, timedelta(days=7), "over the past week")
```
#### File: trwwapi/rainways/views.py
```python
from django.shortcuts import render
from django.utils.safestring import mark_safe
from rest_framework import routers
from rest_framework.decorators import api_view
from rest_framework.response import Response
from .core import RwPublicResult, RwPublicAnalysis
from ..common.models import TrwwApiResponseSchema
# -------------------------------------------------------------------
# API ROOT VIEW
class ApiRouterRootView(routers.APIRootView):
"""
Controls appearance of the API root view
"""
def get_view_name(self):
return "3RWW Rainways API"
def get_view_description(self, html=True):
text = """<p>The 3RWW Rainways API provides a central location for documenting access to the data and processing resources from 3 Rivers Wet Weather and other organizations that are useful for sewer and stormwater modeling in Allegheny County, PA.</p><p>The goal of the Rainways API is to provide a shortcut around a lot of the otherwise tedious work of downloading and prepping the environmental, infrastructure, and geophysical data used in sewer and stormwater modeling. It doesn't represent a new database or central repository, only a quicker and more direct way to access existing datastores that are relevant to this kind of work. To that end, it is designed to:</p><ul><li>use and evolve to use the best available data</li><li>be used for locations that you are specifically interested in; it is not meant to be a general purpose data download portal (you can go elsewhere to get the underlying data)</li>"""
if html:
return mark_safe(text)
else:
return text
class ApiDefaultRouter(routers.DefaultRouter):
APIRootView = ApiRouterRootView
# -------------------------------------------------------------------
# API Analytical Views
@api_view(['GET'])
def rainways_area_of_interest_analysis(request):
"""
Given a GeoJSON, this returns summary statistics for intersecting layers of interest.
This endpoint is used primarily for the public-facing Rainways web app.
"""
# handle malformed data in request here:
if 'geojson' not in request.data.keys():
r = TrwwApiResponseSchema(
args=request.data,
status_code=400,
status='failed',
messages=['Include geojson in `geojson` object within the submitted json']
)
return Response(
data=TrwwApiResponseSchema.Schema().dump(r),
status=r.status_code
)
# conduct analysis
analysis = RwPublicAnalysis(request.data['geojson'])
analysis.slope_summary()
analysis.soil_summary()
analysis.sustain_summary()
analysis.rainfall_summary()
r = TrwwApiResponseSchema(
# args={"geojson": analysis.aoi_geojson},
data=RwPublicResult.Schema().dump(analysis.results), # response schema expects a dictionary here.
status_code=200,
status='success',
messages=analysis.messages,
meta={"count": len(analysis.aoi_gdf.index)}
)
return Response(data=TrwwApiResponseSchema.Schema().dump(r), status=r.status_code)
```
#### File: data-api/trwwapi/routers.py
```python
from django.apps import apps
# class ModelDatabaseRouter:
# """Allows each model to set its own destiny"""
# def db_for_read(self, model, **hints):
# # Specify target database with field in_db in model's Meta class
# return getattr(model._meta, 'in_db', None)
# def db_for_write(self, model, **hints):
# # Specify target database with field in_db in model's Meta class
# return getattr(model._meta, 'in_db', None)
# def allow_migrate(self, db, app_label, model_name=None, **hints):
# # if in_db is specified and matches db, use that for migration,
# # otherwise use default
# # print(db, app_label, model_name)
# if model_name is None:
# return None
# model = apps.get_model(app_label, model_name)
# db_name = getattr(model._meta, 'in_db', None)
# if db_name is not None:
# return db_name == db
# return None
class RainfallDbRouter:
route_app_labels = {'rainfall'}
rainfall_db = 'rainfall_db'
def db_for_read(self, model, **hints):
"""
Attempts to read rainfall models go to rainfall_db.
"""
if model._meta.app_label in self.route_app_labels:
return 'rainfall_db'
return 'default'
def db_for_write(self, model, **hints):
"""
Attempts to write rainfall models go to rainfall_db.
"""
if model._meta.app_label in self.route_app_labels:
return 'rainfall_db'
return 'default'
def allow_relation(self, obj1, obj2, **hints):
"""
Allow relations if a model in the rainfall app is
involved.
"""
if (
obj1._meta.app_label in self.route_app_labels or
obj2._meta.app_label in self.route_app_labels
):
return True
return None
def allow_migrate(self, db, app_label, model_name=None, **hints):
"""
Make sure the rainfall app only appears in the
'rainfall' database.
"""
# print(db, app_label, model_name)
if app_label in self.route_app_labels:
return db == self.rainfall_db
return None
class DefaultRouter:
def db_for_read(self, model, **hints):
return 'default'
def db_for_write(self, model, **hints):
return 'default'
def allow_relation(self, obj1, obj2, **hints):
return None
def allow_migrate(self, db, app_label, model_name=None, **hints):
return True
``` |
{
"source": "3s3s/kzvmonero",
"score": 2
} |
#### File: tests/functional_tests/wallet.py
```python
from __future__ import print_function
import sys
import os
import errno
from framework.wallet import Wallet
from framework.daemon import Daemon
class WalletTest():
def run_test(self):
self.reset()
self.create()
self.check_main_address()
self.check_keys()
self.create_subaddresses()
self.tags()
self.attributes()
self.open_close()
self.languages()
self.change_password()
self.store()
def remove_file(self, name):
WALLET_DIRECTORY = os.environ['WALLET_DIRECTORY']
assert WALLET_DIRECTORY != ''
try:
os.unlink(WALLET_DIRECTORY + '/' + name)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def remove_wallet_files(self, name):
for suffix in ['', '.keys']:
self.remove_file(name + suffix)
def file_exists(self, name):
WALLET_DIRECTORY = os.environ['WALLET_DIRECTORY']
assert WALLET_DIRECTORY != ''
return os.path.isfile(WALLET_DIRECTORY + '/' + name)
def reset(self):
print('Resetting blockchain')
daemon = Daemon()
res = daemon.get_height()
daemon.pop_blocks(res.height - 1)
daemon.flush_txpool()
def create(self):
print('Creating wallet')
wallet = Wallet()
# close the wallet if any, will throw if none is loaded
try: wallet.close_wallet()
except: pass
seed = 'velvet lymph giddy number token physics poetry unquoted nibs useful sabotage limits benches lifestyle eden nitrogen anvil fewest avoid batch vials washing fences goat unquoted'
res = wallet.restore_deterministic_wallet(seed = seed)
assert res.address == '<KEY>'
assert res.seed == seed
def check_main_address(self):
print('Getting address')
wallet = Wallet()
res = wallet.get_address()
assert res.address == '<KEY>', res
assert len(res.addresses) == 1
assert res.addresses[0].address == res.address
assert res.addresses[0].address_index == 0
assert res.addresses[0].used == False
def check_keys(self):
print('Checking keys')
wallet = Wallet()
res = wallet.query_key('view_key')
assert res.key == '49774391fa5e8d249fc2c5b45dadef13534bf2483dede880dac88f061e809100'
res = wallet.query_key('spend_key')
assert res.key == '148d78d2aba7dbca5cd8f6abcfb0b3c009ffbdbea1ff373d50ed94d78286640e'
res = wallet.query_key('mnemonic')
assert res.key == 'velvet lymph giddy number token physics poetry unquoted nibs useful sabotage limits benches lifestyle eden nitrogen anvil fewest avoid batch vials washing fences goat unquoted'
def create_subaddresses(self):
print('Creating subaddresses')
wallet = Wallet()
res = wallet.create_account("idx1")
assert res.account_index == 1, res
assert res.address == '82pP87g1Vkd3LUMssBCumk3MfyEsFqLAaGDf6oxddu61EgSFzt8gCwUD4tr3kp9TUfdPs2CnpD7xLZzyC1Ei9UsW3oyCWDf', res
res = wallet.create_account("idx2")
assert res.account_index == 2, res
assert res.address == '8Bdb75y2MhvbkvaBnG7vYP6DCNneLWcXqNmfPmyyDkavAUUgrHQEAhTNK3jEq69kGPDrd3i5inPivCwTvvA12eQ4SJk9iyy', res
res = wallet.get_address(0, 0)
assert res.address == '42ey1afDFnn4886T7196doS9GPMzexD9gXpsZJDwVjeRVdFCSoHnv7KPbBeGpzJBzHRCAs9UxqeoyFQMYbqSWYTfJJQAWDm', res
assert len(res.addresses) == 1
assert res.addresses[0].address_index == 0, res
res = wallet.get_address(1, 0)
assert res.address == '<KEY>', res
assert len(res.addresses) == 1
assert res.addresses[0].label == 'idx1', res
assert res.addresses[0].address_index == 0, res
res = wallet.get_address(2, 0)
assert res.address == '<KEY>', res
assert len(res.addresses) == 1
assert res.addresses[0].label == 'idx2', res
assert res.addresses[0].address_index == 0, res
res = wallet.create_address(0, "sub_0_1")
res = wallet.create_address(1, "sub_1_1")
res = wallet.create_address(1, "sub_1_2")
res = wallet.get_address(0, [1])
assert len(res.addresses) == 1
assert res.addresses[0].address == '<KEY>'
assert res.addresses[0].label == 'sub_0_1'
res = wallet.get_address(1, [1])
assert len(res.addresses) == 1
assert res.addresses[0].address == '<KEY>'
assert res.addresses[0].label == 'sub_1_1'
res = wallet.get_address(1, [2])
assert len(res.addresses) == 1
assert res.addresses[0].address == '<KEY>'
assert res.addresses[0].label == 'sub_1_2'
res = wallet.get_address(1, [0, 1, 2])
assert len(res.addresses) == 3
assert res.addresses[0].address == '<KEY>'
assert res.addresses[0].label == 'idx1'
assert res.addresses[1].address == '<KEY>'
assert res.addresses[1].label == 'sub_1_1'
assert res.addresses[2].address == '<KEY>'
assert res.addresses[2].label == 'sub_1_2'
res = wallet.label_address((1, 2), "sub_1_2_new")
res = wallet.get_address(1, [2])
assert len(res.addresses) == 1
assert res.addresses[0].address == '<KEY>'
assert res.addresses[0].label == 'sub_1_2_new'
res = wallet.label_account(1, "idx1_new")
res = wallet.get_address(1, [0])
assert len(res.addresses) == 1
assert res.addresses[0].address == '<KEY>'
assert res.addresses[0].label == 'idx1_new'
res = wallet.get_address_index('<KEY>')
assert res.index == {'major': 1, 'minor': 2}
res = wallet.get_address_index('<KEY>')
assert res.index == {'major': 0, 'minor': 0}
res = wallet.get_address_index('84QRUYawRNrU3NN1VpFRndSukeyEb3Xpv8qZjjsoJZnTYpDYceuUTpog13D7qPxpviS7J29bSgSkR11hFFoXWk2yNdsR9WF')
assert res.index == {'major': 0, 'minor': 1}
res = wallet.get_address_index('82pP87g1Vkd3LUMssBCumk3MfyEsFqLAaGDf6oxddu61EgSFzt8gCwUD4tr3kp9TUfdPs2CnpD7xLZzyC1Ei9UsW3oyCWDf')
assert res.index == {'major': 1, 'minor': 0}
res = wallet.label_account(0, "main")
def tags(self):
print('Testing tags')
wallet = Wallet()
res = wallet.get_account_tags()
assert not 'account_tags' in res or len(res.account_tags) == 0
ok = False
try: res = wallet.get_accounts('tag')
except: ok = True
assert ok or not 'subaddress_accounts' in res or res.subaddress_accounts == 0
wallet.tag_accounts('tag0', [1])
res = wallet.get_account_tags()
assert len(res.account_tags) == 1
assert res.account_tags[0].tag == 'tag0'
assert res.account_tags[0].label == ''
assert res.account_tags[0].accounts == [1]
res = wallet.get_accounts('tag0')
assert len(res.subaddress_accounts) == 1
assert res.subaddress_accounts[0].account_index == 1
assert res.subaddress_accounts[0].base_address == '<KEY>WDf'
assert res.subaddress_accounts[0].balance == 0
assert res.subaddress_accounts[0].unlocked_balance == 0
assert res.subaddress_accounts[0].label == 'idx1_new'
assert res.subaddress_accounts[0].tag == 'tag0'
wallet.untag_accounts([0])
res = wallet.get_account_tags()
assert len(res.account_tags) == 1
assert res.account_tags[0].tag == 'tag0'
assert res.account_tags[0].label == ''
assert res.account_tags[0].accounts == [1]
wallet.untag_accounts([1])
res = wallet.get_account_tags()
assert not 'account_tags' in res or len(res.account_tags) == 0
wallet.tag_accounts('tag0', [0])
wallet.tag_accounts('tag1', [1])
res = wallet.get_account_tags()
assert len(res.account_tags) == 2
x = [x for x in res.account_tags if x.tag == 'tag0']
assert len(x) == 1
assert x[0].tag == 'tag0'
assert x[0].label == ''
assert x[0].accounts == [0]
x = [x for x in res.account_tags if x.tag == 'tag1']
assert len(x) == 1
assert x[0].tag == 'tag1'
assert x[0].label == ''
assert x[0].accounts == [1]
wallet.tag_accounts('tagA', [0, 1])
res = wallet.get_account_tags()
assert len(res.account_tags) == 1
assert res.account_tags[0].tag == 'tagA'
assert res.account_tags[0].label == ''
assert res.account_tags[0].accounts == [0, 1]
wallet.tag_accounts('tagB', [1, 0])
res = wallet.get_account_tags()
assert len(res.account_tags) == 1
assert res.account_tags[0].tag == 'tagB'
assert res.account_tags[0].label == ''
assert res.account_tags[0].accounts == [0, 1]
wallet.set_account_tag_description('tagB', 'tag B')
res = wallet.get_account_tags()
assert len(res.account_tags) == 1
assert res.account_tags[0].tag == 'tagB'
assert res.account_tags[0].label == 'tag B'
assert res.account_tags[0].accounts == [0, 1]
res = wallet.get_accounts('tagB')
assert len(res.subaddress_accounts) == 2
subaddress_accounts = []
for x in res.subaddress_accounts:
assert x.balance == 0
assert x.unlocked_balance == 0
subaddress_accounts.append((x.account_index, x.base_address, x.label))
assert sorted(subaddress_accounts) == [(0, '<KEY>', 'main'), (1, '<KEY>', 'idx1_new')]
def attributes(self):
print('Testing attributes')
wallet = Wallet()
ok = False
try: res = wallet.get_attribute('foo')
except: ok = True
assert ok
res = wallet.set_attribute('foo', 'bar')
res = wallet.get_attribute('foo')
assert res.value == 'bar'
res = wallet.set_attribute('foo', 'いっしゅん')
res = wallet.get_attribute('foo')
assert res.value == u'いっしゅん'
ok = False
try: res = wallet.get_attribute('いちりゅう')
except: ok = True
assert ok
res = wallet.set_attribute('いちりゅう', 'いっぽう')
res = wallet.get_attribute('いちりゅう')
assert res.value == u'いっぽう'
def open_close(self):
print('Testing open/close')
wallet = Wallet()
res = wallet.get_address()
assert res.address == '<KEY>'
wallet.close_wallet()
ok = False
try: res = wallet.get_address()
except: ok = True
assert ok
wallet.restore_deterministic_wallet(seed = 'peeled mixture ionic radar utopia puddle buying illness nuns gadget river spout cavernous bounced paradise drunk looking cottage jump tequila melting went winter adjust spout')
res = wallet.get_address()
assert res.address == '44Kbx4sJ7JDRDV5aAhLJzQCjDz2ViLRduE3ijDZu3osWKBjMGkV1XPk4pfDUMqt1Aiezvephdqm6YD19GKFD9ZcXVUTp6BW'
wallet.close_wallet()
ok = False
try: wallet.get_address()
except: ok = True
assert ok
wallet.restore_deterministic_wallet(seed = 'velvet lymph giddy number token physics poetry unquoted nibs useful sabotage limits benches lifestyle eden nitrogen anvil fewest avoid batch vials washing fences goat unquoted')
res = wallet.get_address()
assert res.address == '<KEY>fJJQAWDm'
def languages(self):
print('Testing languages')
wallet = Wallet()
res = wallet.get_languages()
assert 'English' in res.languages
assert 'English' in res.languages_local
assert 'Dutch' in res.languages
assert 'Nederlands' in res.languages_local
assert 'Japanese' in res.languages
assert u'日本語' in res.languages_local
try: wallet.close_wallet()
except: pass
languages = res.languages
languages_local = res.languages_local
for language in languages + languages_local:
sys.stdout.write('Creating ' + language + ' wallet\n')
wallet.create_wallet(filename = '', language = language)
res = wallet.query_key('mnemonic')
wallet.close_wallet()
def change_password(self):
print('Testing password change')
wallet = Wallet()
# close the wallet if any, will throw if none is loaded
try: wallet.close_wallet()
except: pass
self.remove_wallet_files('test1')
seed = 'velvet lymph giddy number token physics poetry unquoted nibs useful sabotage limits benches lifestyle eden nitrogen anvil fewest avoid batch vials washing fences goat unquoted'
res = wallet.restore_deterministic_wallet(seed = seed, filename = 'test1')
assert res.address == '<KEY>'
assert res.seed == seed
wallet.close_wallet()
res = wallet.open_wallet('test1', password = '')
res = wallet.get_address()
assert res.address == '<KEY>'
res = wallet.change_wallet_password(old_password = '', new_password = '<PASSWORD>')
wallet.close_wallet()
ok = False
try: res = wallet.open_wallet('test1', password = '')
except: ok = True
assert ok
res = wallet.open_wallet('test1', password = '<PASSWORD>')
res = wallet.get_address()
assert res.address == '<KEY>'
wallet.close_wallet()
self.remove_wallet_files('test1')
def store(self):
print('Testing store')
wallet = Wallet()
# close the wallet if any, will throw if none is loaded
try: wallet.close_wallet()
except: pass
self.remove_wallet_files('test1')
seed = 'velvet lymph giddy number token physics poetry unquoted nibs useful sabotage limits benches lifestyle eden nitrogen anvil fewest avoid batch vials washing fences goat unquoted'
res = wallet.restore_deterministic_wallet(seed = seed, filename = 'test1')
assert res.address == '<KEY>'
assert res.seed == seed
self.remove_file('test1')
assert self.file_exists('test1.keys')
assert not self.file_exists('test1')
wallet.store()
assert self.file_exists('test1.keys')
assert self.file_exists('test1')
wallet.close_wallet()
self.remove_wallet_files('test1')
if __name__ == '__main__':
WalletTest().run_test()
``` |
{
"source": "3salles/rankaa",
"score": 3
} |
#### File: app/fuctions/jogos.py
```python
from datetime import time
from tinydb import TinyDB, Query
def ordenar_por_data(dataset):
a = list(dataset.all())
count = len(a)
b = []
while count > 0:
menor = a[0]
menor_mes = menor['month']
menor_dia = menor['day']
for i in a:
if i['month'] < menor_mes:
menor = i
menor_mes = menor['month']
menor_dia = menor['day']
elif i['month'] == menor_mes and i['day'] == menor_dia:
menor = i
menor_mes = menor['month']
menor_dia = menor['day']
b.append(menor)
a.remove(menor)
count = count - 1
return b
def proximos_jogos(dataset):
a = ordenar_por_data(dataset)
b = []
for i in a:
if i['finalizado'] == 0:
b.append(i)
return b
def ultimos_jogos(dataset):
a = ordenar_por_data(dataset)
b = []
for i in a:
if i['finalizado'] == 1:
b.append(i)
return b
def proximos_jogos_modalidade(dataset, modalidade):
db_modalidade = TinyDB(r"app\dataset\modalidades.json")
aut = db_modalidade.search(Query().id == modalidade)
a = ordenar_por_data(dataset)
b = []
for i in a:
if (i['finalizado'] == 0) and (i['modalidade'] == aut[0]['name']):
b.append(i)
return b
def ultimos_jogos_modalidade(dataset, modalidade):
db_modalidade = TinyDB(r"app\dataset\modalidades.json")
aut = db_modalidade.search(Query().id == modalidade)
a = ordenar_por_data(dataset)
b = []
for i in a:
if (i['finalizado'] == 1) and (i['modalidade'] == aut[0]['name']):
b.append(i)
return b
def proximos_jogos_atletica(dataset, atletica):
db_atletica = TinyDB(r"app\dataset\atleticas.json")
aut = db_atletica.search(Query().id == atletica)
a = ordenar_por_data(dataset)
b = []
for i in a:
if (i['time1'] == aut[0]['name']) or (i['time2'] == aut[0]['name']):
if i['finalizado'] == 0:
b.append(i)
return b
def ultimos_jogos_atletica(dataset, atletica):
db_atletica = TinyDB(r"app\dataset\atleticas.json")
aut = db_atletica.search(Query().id == atletica)
a = ordenar_por_data(dataset)
b = []
for i in a:
if (i['time1'] == aut[0]['name']) or (i['time2'] == aut[0]['name']):
if i['finalizado'] == 1:
b.append(i)
return b
```
#### File: new.backend/controllers/atletica.py
```python
from models.Atletica import Atletica
from tinydb import Query
from extensions.database import atleticas as table_atleticas
from flask import jsonify
def cadastra_atletica(atletica: dict):
novo_atletica = Atletica(**atletica)
atletica_query = Query()
try:
existe_atletica = table_atleticas.search(
atletica_query.nome == novo_atletica.nome)
if(len(existe_atletica)):
return jsonify({"status": False, "message": "Atlética já cadastrada"}), 400
else:
try:
table_atleticas.insert(dict(novo_atletica))
return jsonify({"status": True, "message": "Success"}), 201
except Exception as e:
return jsonify({"status": False, "message": "Erro ao cadastrar atlética", "error": f"{e}"}), 400
except Exception as e:
return jsonify({"status": False, "message": "Erro ao acessar banco de dados", "error": f"{e}"}), 500
def get_atletica_by_id(id: int):
atletica_query = Query()
try:
existe_atletica = table_atleticas.search(atletica_query.id == id)
if(len(existe_atletica)):
return jsonify({"status": True, "message": "Success", "data": existe_atletica[0]}), 200
else:
return jsonify({"status": False, "message": "Atlética não encontrada"}), 200
except Exception as e:
return jsonify({"status": False, "message": "Erro ao acessar banco de dados", "error": f"{e}"}), 500
def get_atleticas():
try:
atleticas = table_atleticas.all()
if(len(atleticas)):
return jsonify({"status": True, "message":"Success", "data":atleticas}), 200
else:
return jsonify({"status": True, "message":"Nenhuma atletica cadastrada"}), 200
except Exception as e:
return jsonify({"status": False, "message": "Erro ao acessar banco de dados", "error": f"{e}"}), 500
```
#### File: new.backend/controllers/usuario.py
```python
from models.Usuario import Usuario
from extensions.database import usuarios as table_usuario
from tinydb import Query
from flask import jsonify
from werkzeug.security import generate_password_hash
from werkzeug.security import check_password_hash
def cadastra_usuario(usuario: dict):
novo_usuario = usuario
novo_usuario["password"] = generate_password_hash(novo_usuario["password"])
usuarios_query = Query()
usuario_existe = table_usuario.contains(
usuarios_query.email == novo_usuario["email"])
if(usuario_existe):
return jsonify({"status": False, "message": "Usuário já cadastrado"}), 400
else:
try:
table_usuario.insert(novo_usuario)
return jsonify({"status": True, "message": "Success", "data": {"nome": novo_usuario["nome"], "email": novo_usuario["email"]}}), 201
except Exception as e:
return jsonify({"status": False, "message": "Erro ao cadastrar usuário", "error": f"{e}"}), 400
def autentica_usuario(auth):
usuarios_query = Query()
usuario_existe = table_usuario.search(
usuarios_query.email == auth.username[1:-1])
if not auth or not auth.username or not auth.password:
return jsonify({"status": False, "message": "Could not verify", "WWW-Authenticate": "Basic auth='Login required'"}), 401
if(len(usuario_existe)):
if(check_password_hash(usuario_existe[0]["password"], auth.password[1:-1])):
return jsonify({"status": True, "message": "Success"}), 200
else:
return jsonify({"status": False, "message": "Could not verify", "WWW-Authenticate": "Basic auth='Login required'"}), 401
else:
return jsonify({"status": False, "message": "Usuário não encontrado"}), 400
```
#### File: new.backend/routes/jogo.py
```python
from flask import jsonify
from flask import request
from controllers import jogo
def instala_rota(app):
@app.route("/jogo", methods=["POST"])
def cadastra_jogo():
novo_jogo = request.json
response = jogo.cadastra_jogo(novo_jogo)
return response
@app.route("/jogo/<int:id_jogo>", methods=["GET", "PUT"])
def get_edit_jogo(id_jogo):
if(request.method == "GET"):
response = jogo.get_jogo_by_id(id_jogo)
elif(request.method == "PUT"):
dados = request.json
response = jogo.edita_jogo_by_id(dados, id_jogo)
return response
@app.route("/jogos", methods=["GET"])
def all_jogos():
jogos = jogo.get_jogos()
return jogos
``` |
{
"source": "3scale-qe/3scale-api-python",
"score": 2
} |
#### File: integration/auth/test_app_key_authorization.py
```python
import base64
import pytest
@pytest.fixture(scope="module")
def service_params(service_params):
service_params.update(backend_version="2")
return service_params
@pytest.fixture(scope="module")
def proxy(service, proxy):
service.proxy.update(params={
"credentials_location": "authorization"
})
def test_app_key_authorization(proxy, application, ssl_verify):
creds = application.authobj.credentials
encoded = base64.b64encode(
f"{creds['app_id']}:{creds['app_key']}".encode("utf-8")).decode("utf-8")
response = application.test_request(verify=ssl_verify)
assert response.status_code == 200
assert response.request.headers["Authorization"] == "Basic %s" % encoded
```
#### File: tests/integration/test_integration_activedocs.py
```python
from tests.integration import asserts
from .asserts import assert_resource, assert_resource_params
def test_active_docs_fetch(active_doc):
ac = active_doc.client.fetch(int(active_doc['id']))
assert ac
assert ac['id'] == active_doc['id']
```
#### File: tests/integration/test_integration_backend_metrics.py
```python
import pytest
import backoff
from threescale_api.errors import ApiClientError
from tests.integration import asserts
def test_should_create_metric(backend_metric, backend_metric_params):
asserts.assert_resource(backend_metric)
asserts.assert_resource_params(backend_metric, backend_metric_params)
def test_should_fields_be_required(backend):
resource = backend.metrics.create(params={}, throws=False)
asserts.assert_errors_contains(resource, ['friendly_name', 'unit'])
def test_should_system_name_be_invalid(backend, backend_metric_params):
backend_metric_params['system_name'] = 'invalid name whitespaces'
resource = backend.metrics.create(params=backend_metric_params, throws=False)
asserts.assert_errors_contains(resource, ['system_name'])
def test_should_raise_exception(backend):
with pytest.raises(ApiClientError):
backend.metrics.create(params={})
def test_should_read_metric(backend_metric, backend_metric_params):
resource = backend_metric.read()
asserts.assert_resource(resource)
asserts.assert_resource_params(resource, backend_metric_params)
def test_should_update_metric(backend_metric, backend_updated_metric_params):
resource = backend_metric.update(params=backend_updated_metric_params)
asserts.assert_resource(resource)
asserts.assert_resource_params(resource, backend_updated_metric_params)
def test_should_delete_metric(backend, backend_updated_metric_params):
resource = backend.metrics.create(params=backend_updated_metric_params)
assert resource.exists()
resource.delete()
assert not resource.exists()
def test_should_list_metrics(backend):
resources = backend.metrics.list()
assert len(resources) > 1
def test_should_apicast_return_403_when_metric_is_disabled(
service, backend_metric_params, create_backend_mapping_rule,
account, ssl_verify, backend, backend_usage):
"""Metric is disabled when its limit is set to 0."""
proxy = service.proxy.list()
plan = service.app_plans.create(params=dict(name='metrics-disabled'))
application_params = dict(name='metrics-disabled', plan_id=plan['id'],
description='metric disabled')
app = account.applications.create(params=application_params)
back_metric = backend.metrics.create(params=backend_metric_params)
plan.limits(back_metric).create(params=dict(period='month', value=0))
rules = backend.mapping_rules.list()
for rule in rules:
rule.delete()
rule = create_backend_mapping_rule(back_metric, 'GET', '/foo/bah/')
proxy = service.proxy.list()
proxy.deploy()
params = get_user_key_from_application(app, proxy)
client = app.api_client(verify=ssl_verify)
response = make_request(client, backend_usage['path'] + '/' + rule['pattern'])
assert response.status_code == 403
@backoff.on_predicate(backoff.expo, lambda resp: resp.status_code == 200,
max_tries=8)
def make_request(client, path):
return client.get(path=path)
def get_user_key_from_application(app, proxy):
user_key = app['user_key']
user_key_param = proxy['auth_user_key']
return {user_key_param: user_key}
def update_proxy_endpoint(service):
"""Update service proxy.
Bug that if the proxy is not updated the changes applied
to the mapping rules dont take effect."""
service.proxy.update(params={'endpoint': 'http://test.test:80'})
def test_should_apicast_return_429_when_limits_exceeded(
service, application_plan, create_mapping_rule,
apicast_http_client):
metric_params = dict(system_name='limits_exceeded', unit='count',
friendly_name='Limits Exceeded')
metric = service.metrics.create(params=metric_params)
application_plan.limits(metric).create(params=dict(period='day', value=1))
rule = create_mapping_rule(metric, 'GET', '/limits/exceeded/')
update_proxy_endpoint(service)
response = apicast_http_client.get(path=rule['pattern'])
while response.status_code == 200:
response = apicast_http_client.get(path=rule['pattern'])
assert response.status_code == 429
```
#### File: tests/integration/test_integration_default_client.py
```python
def test_read_by_name_account(account, api):
"""Test for read_by_name when entity has entity_name"""
acc = api.accounts.read_by_name(account.entity_name)
assert acc == account
def test_read_by_name_account_plan(account_plan, api):
"""Test for read_by_name when entity hasn't entity_name"""
acc_plan = api.account_plans.read_by_name(account_plan.entity_name)
assert acc_plan == account_plan
def test_read_by_name_application(application, account, api):
"""Test for read_by_name when entity has entity_name"""
app = account.applications.read_by_name(application.entity_name)
assert app == application
```
#### File: tests/integration/test_integration_limit.py
```python
import pytest
from threescale_api.resources import ApplicationPlan, Limits
@pytest.fixture()
def limit_client(application_plan, metric) -> Limits:
return application_plan.limits(metric)
@pytest.fixture()
def limits(metric, application_plan: ApplicationPlan):
params = dict(period='minute', value=10)
application_plan.limits(metric).create(params)
return application_plan.limits(metric).list()
def test_create_limit(limits):
assert limits is not None
limit = limits[0]
assert limit['period'] == 'minute'
assert limit['value'] == 10
```
#### File: tests/integration/test_integration_policies.py
```python
def test_policies_insert_append(proxy):
#test_append
policies = proxy.policies.list()
policy_1 = {
"name": "logging",
"configuration": {},
"version": "builtin",
"enabled": True
}
proxy.policies.append(policy_1)
policies["policies_config"].append(policy_1)
updated_policies = proxy.policies.list()
assert policies["policies_config"] == updated_policies["policies_config"]
#test_insert
policy_2 = {
"name": "echo",
"configuration": {},
"version": "builtin",
"enabled": True
}
proxy.policies.insert(1, policy_2)
updated_policies["policies_config"].insert(1, policy_2)
newly_updated_policies = proxy.policies.list()
assert updated_policies["policies_config"] == newly_updated_policies["policies_config"]
```
#### File: tests/integration/test_integration_pricing_rules.py
```python
import pytest
from threescale_api.resources import ApplicationPlan
@pytest.fixture()
def pricing_rules(metric, application_plan: ApplicationPlan):
params = dict(min=10, max=100, cost_per_unit=20)
application_plan.pricing_rules(metric).create(params)
return application_plan.pricing_rules(metric).list()
def test_create_pricing_rule(pricing_rules):
assert pricing_rules is not None
rule = pricing_rules[0]
assert rule['max'] == 100
assert rule['min'] == 10
assert rule['cost_per_unit'] == '20.0'
```
#### File: tests/integration/test_integration_provider_account_users.py
```python
from tests.integration import asserts
def test_provider_user_can_be_created(provider_account_user, provider_account_params):
asserts.assert_resource(provider_account_user)
asserts.assert_resource_params(provider_account_user, provider_account_params)
def test_provider_user_list(api):
accounts = api.provider_accounts.list()
assert len(accounts) > 0
def test_provider_user_can_be_read(api, provider_account_user, provider_account_params):
account = api.provider_account_users.read(provider_account_user.entity_id)
asserts.assert_resource(account)
asserts.assert_resource_params(account, provider_account_params)
def test_resource_role_change(provider_account_user):
assert provider_account_user['role'] == 'member'
updated = provider_account_user.set_role_admin()
assert updated['role'] == 'admin'
def test_api_role_change(api, provider_account_user):
assert provider_account_user['role'] == 'member'
updated = api.provider_account_users.set_role_admin(provider_account_user.entity_id)
assert updated['role'] == 'admin'
def test_api_read_permissions(api, provider_account_user):
provider_account_user.set_role_admin()
response = api.provider_account_users.permissions_read(provider_account_user.entity_id)
permissions = response['permissions']
assert 'portal' in permissions['allowed_sections']
def test_resource_read_permissions(provider_account_user):
provider_account_user.set_role_admin()
response = provider_account_user.permissions_read()
permissions = response['permissions']
assert 'portal' in permissions['allowed_sections']
def test_resource_update_permissions(service, provider_account_user):
provider_account_user.set_role_member()
response = provider_account_user.permissions_update()
permissions = response['permissions']
assert 'portal' not in permissions['allowed_sections']
assert service['id'] not in permissions['allowed_service_ids']
response = provider_account_user.permissions_update(
allowed_services=[service['id']], allowed_sections=['portal'])
permissions = response['permissions']
assert 'portal' in permissions['allowed_sections']
assert service['id'] in permissions['allowed_service_ids']
```
#### File: tests/integration/test_integration_services.py
```python
from tests.integration import asserts
from threescale_api.resources import Proxy, Service
from .asserts import assert_resource, assert_resource_params
def test_3scale_url_is_set(api, url, token):
assert url is not None
assert token is not None
assert api.url is not None
def test_services_list(api):
services = api.services.list()
assert len(services) >= 1
def test_service_can_be_created(api, service_params, service):
assert_resource(service)
assert_resource_params(service, service_params)
def test_service_can_be_read(api, service_params, service):
read = api.services.read(service.entity_id)
asserts.assert_resource(read)
asserts.assert_resource_params(read, service_params)
def test_service_can_be_read_by_name(api, service_params, service):
account_name = service['system_name']
read = api.services[account_name]
asserts.assert_resource(read)
asserts.assert_resource_params(read, service_params)
def test_service_can_be_updated(api, service):
assert service['backend_version'] == '1'
service['backend_version'] = '2'
service.update()
assert service['backend_version'] == '2'
updated = service.read()
assert updated['backend_version'] == '2'
assert service['backend_version'] == '2'
def test_service_get_proxy(api, service: Service, proxy: Proxy, api_backend):
assert proxy['api_backend'] == api_backend
assert proxy['api_test_path'] == '/get'
def test_service_set_proxy(api, service: Service, proxy: Proxy, api_backend):
updated = proxy.update(params=dict(api_test_path='/ip'))
assert updated['api_backend'] == api_backend
assert updated['api_test_path'] == '/ip'
def test_service_proxy_promote(service, proxy):
res = proxy.promote()
assert res is not None
assert res['environment'] == 'production'
assert res['content'] is not None
def test_service_proxy_deploy(service, proxy):
# this will not propagate to proxy config but it allows deployment
proxy.update(params=dict(support_email='<EMAIL>'))
proxy.deploy()
res = proxy.configs.list(env='staging')
proxy_config = res.entity['proxy_configs'][-1]['proxy_config']
assert proxy_config is not None
assert proxy_config['environment'] == 'sandbox'
assert proxy_config['content'] is not None
assert proxy_config['version'] > 1
def test_service_list_configs(service, proxy):
res = proxy.configs.list(env='staging')
assert res
item = res[0]
assert item
def test_service_proxy_configs_version(service, proxy):
config = service.proxy.list().configs.version(version=1)
assert config
assert config['environment'] == "sandbox"
assert config['version'] == 1
assert config['content']
def test_service_proxy_configs_latest(service, proxy):
config = service.proxy.list().configs.latest()
assert config
assert config['environment'] == "sandbox"
assert config['version']
assert config['content']
def test_service_proxy_configs_list_length(service, proxy):
configs = service.proxy.list().configs.list(env="sandbox")
length = len(configs)
proxy.update(params=dict(api_test_path='/ip'))
configs = service.proxy.list().configs.list(env="sandbox")
assert len(configs) == length + 1
def test_service_mapping_rules(service):
map_rules = service.mapping_rules.list()
assert len(map_rules) >= 1
def test_service_backend_usages_backend(backend_usage, backend):
assert backend_usage.backend.entity_id == backend.entity_id
def test_service_active_docs(service, active_doc):
assert all([acs['service_id'] == service['id'] for acs in service.active_docs.list()])
```
#### File: 3scale-api-python/threescale_api/defaults.py
```python
import logging
from typing import Dict, List, Optional, TYPE_CHECKING, Union, Any, Iterator
import collections.abc
import requests
from threescale_api import utils
if TYPE_CHECKING:
from threescale_api.client import ThreeScaleClient, RestApiClient
log = logging.getLogger(__name__)
class DefaultClient(collections.abc.Mapping):
def __init__(self, parent=None, instance_klass=None,
entity_name: str = None, entity_collection: str = None):
"""Creates instance of the default client
Args:
parent: Parent resource or client
instance_klass: Which class should be used to instantiate the resource
entity_name(str): Entity name - required for extraction
entity_collection(str): Collection name - required for extraction
"""
self._parent = parent
self._instance_klass = instance_klass
self._entity_name = entity_name
if entity_collection is None and entity_name is not None:
entity_collection = f'{entity_name}s'
self._entity_collection = entity_collection
@property
def url(self) -> str:
"""Default url for the resources collection
Returns(str): URL
"""
return self.threescale_client.admin_api_url
@property
def threescale_client(self) -> 'ThreeScaleClient':
"""Gets instance of the 3scale default client
Returns(TheeScaleClient): 3scale client
"""
return self.parent.threescale_client
@property
def parent(self) -> 'DefaultResource':
""" Instance of the parent resource
Returns(DefaultResource): Parent of the client is an subclass of the default resource
"""
return self._parent
@property
def rest(self) -> 'RestApiClient':
"""Rest API client for the 3scale instance
Returns(RestApiClient):
"""
return self.threescale_client.rest
def list(self, **kwargs) -> List['DefaultResource']:
"""List all entities
Args:
**kwargs: Optional parameters
Returns(List['DefaultResource]): List of resources
"""
log.info(self._log_message("[LIST] List", args=kwargs))
instance = self._list(**kwargs)
return instance
def create(self, params: dict = None, **kwargs) -> 'DefaultResource':
"""Create a new instance
Args:
params: Parameters required to create new instance
**kwargs: Optional parameters
Returns:
"""
log.info(self._log_message("[CREATE] Create new ", body=params, args=kwargs))
url = self._entity_url()
response = self.rest.post(url=url, json=params, **kwargs)
instance = self._create_instance(response=response)
return instance
def delete(self, entity_id: int = None, **kwargs) -> bool:
"""Delete resource
Args:
entity_id(int): Entity id
**kwargs: Optional args
Returns(bool): True if the resource has been successfully deleted
"""
log.info(self._log_message("[DELETE] Delete ", entity_id=entity_id, args=kwargs))
url = self._entity_url(entity_id=entity_id)
response = self.rest.delete(url=url, **kwargs)
return response.ok
def exists(self, entity_id=None, **kwargs) -> bool:
"""Check whether the resource exists
Args:
entity_id(int): Entity id
**kwargs: Optional args
Returns(bool): True if the resource exists
"""
log.info(self._log_message("[EXIST] Resource exist ", entity_id=entity_id, args=kwargs))
url = self._entity_url(entity_id=entity_id)
response = self.rest.get(url=url, throws=False, **kwargs)
return response.ok
def update(self, entity_id=None, params: dict = None, **kwargs) -> 'DefaultResource':
"""Update resource
Args:
entity_id(int): Entity id
params(dict): Params to be updated
**kwargs: Optional args
Returns(DefaultResource): Resource instance
"""
log.info(self._log_message("[UPDATE] Update ", body=params,
entity_id=entity_id, args=kwargs))
url = self._entity_url(entity_id=entity_id)
response = self.rest.put(url=url, json=params, **kwargs)
instance = self._create_instance(response=response)
return instance
def fetch(self, entity_id: int = None, **kwargs) -> dict:
"""Fetch the entity dictionary
Args:
entity_id(int): Entity id
**kwargs: Optional args
Returns(dict): Resource dict from the 3scale
"""
log.debug(self._log_message("[FETCH] Fetch ", entity_id=entity_id, args=kwargs))
url = self._entity_url(entity_id=entity_id)
response = self.rest.get(url=url, **kwargs)
return utils.extract_response(response=response, entity=self._entity_name)
def __getitem__(self, selector: Union[int, 'str']) -> 'DefaultResource':
"""Gets the item
Args:
selector(Union[int, 'str']): Selector whether id or string
Returns(DefaultResource): Resource instance
"""
if isinstance(selector, int):
return self.read(selector)
return self.read_by_name(selector)
def __len__(self) -> int:
return len(self._list())
def __iter__(self) -> Iterator['CRUDResource']:
return next(iter(self._list()))
def read(self, entity_id: int = None) -> 'DefaultResource':
"""Read the instance, read will just create empty resource and lazyloads only if needed
Args:
entity_id(int): Entity id
Returns(DefaultResource): Default resource
"""
log.debug(self._log_message("[READ] Read ", entity_id=entity_id))
return self._instance_klass(client=self, entity_id=entity_id)
def read_by_name(self, name: str, **kwargs) -> 'DefaultResource':
"""Read resource by name
Args:
name: Name of the resource (either system name, name, org_name ...)
**kwargs:
Returns:
"""
for item in self._list(**kwargs):
if item.entity_name and item.entity_name == name:
return item
def select(self, predicate, **kwargs) -> List['DefaultResource']:
"""Select resource s based on the predicate
Args:
predicate: Predicate
**kwargs: Optional args
Returns: List of resources
"""
return [item for item in self._list(**kwargs) if predicate(item)]
def select_by(self, **params) -> List['DefaultResource']:
"""Select by params - logical and
Args:
**params: params used for selection
Returns: List of resources
"""
log.debug(f"[SELECT] By params: {params}")
def predicate(item):
for (key, val) in params.items():
if item[key] != val:
return False
return True
return self.select(predicate=predicate)
def read_by(self, **params) -> 'DefaultResource':
"""Read by params - it will return just one instance of the resource
Args:
**params: params used for selection
Returns(DefaultResource): Resource instance
"""
result = self.select_by(**params)
return result[0] if result else None
def _log_message(self, message, entity_id=None, body=None, args=None) -> str:
msg = f"{message} {self._instance_klass.__name__}"
if entity_id:
msg += f"({entity_id}))"
if body:
msg += f" {body}"
if args:
msg += f" args={args}"
return msg
def _list(self, **kwargs) -> List['DefaultResource']:
"""Internal list implementation used in list or `select` methods
Args:
**kwargs: Optional parameters
Returns(List['DefaultResource']):
"""
url = self._entity_url()
response = self.rest.get(url=url, **kwargs)
instance = self._create_instance(response=response, collection=True)
return instance
def _entity_url(self, entity_id=None) -> str:
if not entity_id:
return self.url
return self.url + '/' + str(entity_id)
def _create_instance(self, response: requests.Response, klass=None, collection: bool = False):
klass = klass or self._instance_klass
extracted = self._extract_resource(response, collection)
instance = self._instantiate(extracted=extracted, klass=klass)
log.debug(f"[INSTANCE] Created instance: {instance}")
return instance
def _extract_resource(self, response, collection) -> Union[List, Dict]:
extract_params = dict(response=response, entity=self._entity_name)
if collection:
extract_params['collection'] = self._entity_collection
extracted = utils.extract_response(**extract_params)
return extracted
def _instantiate(self, extracted, klass):
if isinstance(extracted, list):
instance = [self.__make_instance(item, klass) for item in extracted]
return instance
return self.__make_instance(extracted, klass)
def __make_instance(self, extracted: dict, klass):
instance = klass(client=self, entity=extracted) if klass else extracted
return instance
class DefaultResource(collections.abc.MutableMapping):
def __init__(self, client: DefaultClient = None, entity_id: int = None, entity_name: str = None,
entity: dict = None):
"""Create instance of the resource
Args:
client: Client instance of the resource
entity_id(int): Entity id
entity_name(str): Entity name field (system_name or name ...)
entity(dict): Entity instance
"""
self._entity_id = entity_id or entity.get('id')
self._entity = entity
self._client = client
self._entity_name = entity_name
@property
def threescale_client(self) -> 'ThreeScaleClient':
return self.client.threescale_client
@property
def parent(self) -> 'DefaultResource':
return self.client.parent
@property
def entity_name(self) -> Optional[str]:
return self[self._entity_name]
@property
def url(self) -> str:
return self.client.url + f"/{self.entity_id}"
@property
def entity(self) -> dict:
self._lazy_load()
return self._entity
@property
def client(self) -> DefaultClient:
return self._client
@property
def entity_id(self) -> int:
return self._entity_id or self._entity.get('id')
def __getitem__(self, item: str):
return self.entity.get(item)
def __setitem__(self, key: str, value):
self.set(key, value)
def __delitem__(self, key: str):
del self.entity[key]
def __len__(self) -> int:
return len(self.entity)
def __iter__(self) -> Iterator:
return iter(self.entity)
def __str__(self) -> str:
return self.__class__.__name__ + f"({self.entity_id}): " + str(self.entity)
def __repr__(self) -> str:
return str(self)
def __eq__(self, other) -> bool:
return (
self.__class__ == other.__class__ and
self.entity_name == other.entity_name and
self.entity_id == other.entity_id
)
def get(self, item):
return self.entity.get(item)
def set(self, item: str, value: Any):
self.entity[item] = value
def _lazy_load(self, **kwargs) -> 'DefaultResource':
if not self._entity:
# Lazy load the entity
self._entity = self.fetch(**kwargs)
return self
def read(self, **kwargs) -> 'DefaultResource':
self._invalidate()
self._lazy_load(**kwargs)
return self
def fetch(self, **kwargs) -> dict:
return self.client.fetch(self.entity_id, **kwargs)
def exists(self, **kwargs) -> bool:
return self.client.exists(entity_id=self.entity_id, **kwargs)
def delete(self, **kwargs):
self.client.delete(entity_id=self.entity_id, **kwargs)
def update(self, params: dict = None, **kwargs) -> 'DefaultResource':
new_params = {**self.entity}
if params:
new_params.update(params)
new_entity = self.client.update(entity_id=self.entity_id, params=new_params, **kwargs)
self._entity = new_entity.entity
return self
def _invalidate(self):
self._entity = None
class DefaultPlanClient(DefaultClient):
def set_default(self, entity_id: int, **kwargs) -> 'DefaultPlanResource':
"""Sets default plan for the entity
Args:
entity_id: Entity id
**kwargs: Optional args
Returns(DefaultPlanResource):
"""
log.info(self._log_message("[PLAN] Set default ", entity_id=entity_id, args=kwargs))
url = self._entity_url(entity_id) + '/default'
response = self.rest.put(url=url, **kwargs)
instance = self._create_instance(response=response)
return instance
def get_default(self, **kwargs) -> Optional['DefaultResource']:
"""Get default plan if set
Args:
**kwargs: Optional arguments
Returns(DefaultResource): Resource instance
"""
default = self.select(lambda x: x.is_default, **kwargs)
if default:
return default[0]
return None
class DefaultPlanResource(DefaultResource):
def __init__(self, entity_name='system_name', **kwargs):
super().__init__(entity_name=entity_name, **kwargs)
def set_default(self, **kwargs) -> 'DefaultStateResource':
"""Set the plan default
Args:
**kwargs: Optional args
Returns(DefaultStateResource): State resource instance
"""
return self.client.set_default(entity_id=self.entity_id, **kwargs)
@property
def is_default(self) -> bool:
return self['default'] is True
class DefaultStateClient(DefaultClient):
def set_state(self, entity_id, state: str, **kwargs):
"""Sets the state for the resource
Args:
entity_id(int): Entity id
state(str): Which state
**kwargs: Optional args
Returns(DefaultStateResource): State resource instance
"""
log.info(self._log_message("[STATE] Set state ", body=f"[{state}]", args=kwargs))
url = self._entity_url(entity_id) + '/' + state
response = self.rest.put(url=url, **kwargs)
instance = self._create_instance(response=response)
return instance
class DefaultStateResource(DefaultResource):
def set_state(self, state: str, **kwargs) -> 'DefaultStateResource':
"""Sets the state for the resource
Args:
state(str): Which state
**kwargs: Optional args
Returns(DefaultStateResource): State resource instance
"""
return self.client.set_state(entity_id=self.entity_id, state=state, **kwargs)
class DefaultUserResource(DefaultStateResource):
def __init__(self, entity_name='username', **kwargs):
super().__init__(entity_name=entity_name, **kwargs)
def suspend(self, **kwargs) -> 'DefaultUserResource':
"""Suspends the user
Args:
**kwargs: Optional arguments
Returns(DefaultUserResource): User instance
"""
return self.set_state(state='suspend', **kwargs)
def resume(self, **kwargs):
"""Resumes the user
Args:
**kwargs: Optional arguments
Returns(DefaultUserResource): User instance
"""
return self.set_state(state='resume', **kwargs)
def activate(self, **kwargs):
"""Activates the user
Args:
**kwargs: Optional arguments
Returns(DefaultUserResource): User instance
"""
return self.set_state(state='activate', **kwargs)
def set_as_admin(self, **kwargs):
"""Promotes the user to admin
Args:
**kwargs: Optional arguments
Returns(DefaultUserResource): User instance
"""
return self.set_state(state='set_as_admin', **kwargs)
def set_as_member(self, **kwargs):
"""Demotes the user to s member
Args:
**kwargs: Optional arguments
Returns(DefaultUserResource): User instance
"""
return self.set_state(state='set_as_member', **kwargs)
```
#### File: 3scale-api-python/threescale_api/utils.py
```python
import logging
import shlex
from typing import Union, Iterable
from urllib.parse import urljoin
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
logger = logging.getLogger(__name__)
def extract_response(response: requests.Response, entity: str = None,
collection: str = None) -> Union[dict, list]:
"""Extract the response from the response
Args:
response(requests.Response): Response
entity(str): entity name to be extracted
collection(str): collection name to be extracted
Returns(Union[dict, list]): Extracted entity or list of entities
"""
extracted: dict = response.json()
if collection and collection in extracted:
extracted = extracted.get(collection)
if isinstance(extracted, list):
return [value.get(entity) for value in extracted]
if entity in extracted.keys():
return extracted.get(entity)
return extracted
class HttpClient:
"""3scale specific!!! HTTP Client
This provides client to easily run api calls against provided service.
Due to some delays in the infrastructure the client is configured to retry
calls under certain conditions. To modify this behavior customized session
has to be passed. session has to be fully configured in such case
(e.g. including authentication"
:param app: Application for which client should do the calls
:param endpoint: either 'sandbox_endpoint' (staging) or 'endpoint' (production),
defaults to sandbox_endpoint
:param verify: SSL verification
:param cert: path to certificate
:param disable_retry_status_list:
Iterable collection of status code that should not be retried by requests
"""
def __init__(self, app, endpoint: str = "sandbox_endpoint",
verify: bool = None, cert=None, disable_retry_status_list: Iterable = ()):
self._app = app
self._endpoint = endpoint
self.verify = verify if verify is not None else app.api_client_verify
self.cert = cert
self._status_forcelist = {503, 404} - set(disable_retry_status_list)
self.auth = app.authobj()
self.session = self._create_session()
logger.debug("[HTTP CLIENT] New instance: %s", self._base_url)
def close(self):
"""Close requests session"""
self.session.close()
@staticmethod
def retry_for_session(session: requests.Session, status_forcelist: Iterable, total: int = 8):
retry = Retry(
total=total,
backoff_factor=1,
status_forcelist=status_forcelist,
raise_on_status=False,
respect_retry_after_header=False
)
adapter = HTTPAdapter(max_retries=retry)
session.mount("https://", adapter)
session.mount("http://", adapter)
@property
def _base_url(self) -> str:
"""Determine right url at runtime"""
return self._app.service.proxy.fetch()[self._endpoint]
def _create_session(self):
"""Creates session"""
session = requests.Session()
self.retry_for_session(session, self._status_forcelist)
return session
def extend_connection_pool(self, maxsize: int):
"""Extend connection pool"""
self.session.adapters["https://"].poolmanager.connection_pool_kw["maxsize"] = maxsize
self.session.adapters["https://"].poolmanager.clear()
def request(self, method, path,
params=None, data=None, headers=None, cookies=None, files=None,
auth=None, timeout=None, allow_redirects=True, proxies=None,
hooks=None, stream=None, json=None) -> requests.Response:
"""mimics requests interface"""
url = urljoin(self._base_url, path)
session = self.session
session.auth = auth or self.auth
req = requests.Request(
method=method.upper(),
url=url,
headers=headers,
files=files,
data=data or {},
json=json,
params=params or {},
auth=auth,
cookies=cookies,
hooks=hooks,
)
prep = session.prepare_request(req)
logger.info("[CLIENT]: %s", request2curl(prep))
send_kwargs = {
"timeout": timeout,
"allow_redirects": allow_redirects
}
proxies = proxies or {}
send_kwargs.update(
session.merge_environment_settings(prep.url, proxies, stream, self.verify, self.cert))
response = session.send(prep, **send_kwargs)
logger.info("\n".join(["[CLIENT]:", response2str(response)]))
return response
def get(self, *args, **kwargs) -> requests.Response:
"""mimics requests interface"""
return self.request('GET', *args, **kwargs)
def post(self, *args, **kwargs) -> requests.Response:
"""mimics requests interface"""
return self.request('POST', *args, **kwargs)
def patch(self, *args, **kwargs) -> requests.Response:
"""mimics requests interface"""
return self.request('PATCH', *args, **kwargs)
def put(self, *args, **kwargs) -> requests.Response:
"""mimics requests interface"""
return self.request('PUT', *args, **kwargs)
def delete(self, *args, **kwargs) -> requests.Response:
"""mimics requests interface"""
return self.request('DELETE', *args, **kwargs)
def request2curl(request: requests.PreparedRequest) -> str:
"""Create curl command corresponding to given request"""
cmd = ["curl", "-X %s" % shlex.quote(request.method)]
if request.headers:
cmd.extend([
"-H %s" % shlex.quote(f"{key}: {value}")
for key, value in request.headers.items()])
if request.body:
body = request.body
if isinstance(body, bytes):
body = body.decode("utf-8")
if len(body) > 160:
body = body[:160] + "..."
cmd.append("-d %s" % shlex.quote(body))
cmd.append(shlex.quote(request.url))
return " ".join(cmd)
def response2str(response: requests.Response):
"""Return string representation of requests.Response"""
# Let's cheat with protocol, hopefully no-one will ever notice this ;)
msg = [f"HTTP/1.1 {response.status_code} {response.reason}"]
for key in response.headers:
msg.append(f"{key}: {response.headers[key]}")
msg.append("")
body = response.text
if len(body) > 160:
body = body[:160] + "..."
msg.append(body)
return "\n".join(msg)
``` |
{
"source": "3scale-qe/3scale-tests",
"score": 2
} |
#### File: tests/ui/test_account_search.py
```python
import pytest
from testsuite import rawobj
from testsuite.ui.views.admin.audience.account import AccountsView
from testsuite.utils import blame
pytestmark = pytest.mark.issue("https://issues.redhat.com/browse/THREESCALE-5486")
@pytest.fixture(scope="module")
def ui_application(service, custom_app_plan, custom_ui_application, request):
"""Create an application through UI"""
def _ui_application(account):
name = blame(request, "ui_account")
plan = custom_app_plan(rawobj.ApplicationPlan(blame(request, "aplan")), service)
return custom_ui_application(name, "description", plan, account, service)
return _ui_application
# pylint: disable=unused-argument
@pytest.mark.issue("https://issues.redhat.com/browse/THREESCALE-6205")
def test_search_account(login, navigator, custom_ui_account, ui_application, request):
"""
Preparation:
- Create custom account
- Create custom application
Test if:
- you search account by org_name it will return the correct one
- you search account by username it will return the correct one
- you search account by email it will return the correct one
- you search account by application name it will return the correct one
"""
username = blame(request, "username")
org_name = blame(request, "org_name")
email = f"{<EMAIL>"
account = custom_ui_account(username, email, "123456", org_name)
app = ui_application(account)
accounts = navigator.navigate(AccountsView)
for key in [org_name, username, email, app["name"]]:
accounts.search(key)
assert accounts.table.row()[1].text == org_name
assert accounts.table.row()[5].text == "Approved"
``` |
{
"source": "3schwartz/AdventOfCode",
"score": 3
} |
#### File: python/day4/day4_func.py
```python
from __future__ import annotations
from itertools import groupby
from abc import ABC, abstractmethod
from typing import List
class PasswordFinder:
def __init__(self, start: int, end: int) -> None:
self.start = start
self.end = end
def getPasswords(self):
return [s for s in range(self.start, self.end + 1)]
class Validation(ABC):
@abstractmethod
def validate(self, password: str) -> bool:
pass
class IncreasingValidation(Validation):
def validate(self, password: str) -> bool:
return all([a <= b for a, b in zip(password, password[1:])])
class TwoSequentuallyEqual(Validation):
def validate(self, password: str) -> bool:
return any([a == b for a, b in zip(password, password[1:])])
class TwoEqual(Validation):
def validate(self, password: str) -> bool:
return any([len(list(k)) == 2 for _, k in groupby(password)])
class PasswordValidator:
def __init__(self):
self._validations = None
@property
def validations(self):
return self._validations
@validations.setter
def validations(self, validationsInput: List[Validation]) -> None:
self._validations = validationsInput
def validatePassword(self, password: int):
return all(validation.validate(str(password)) for validation in self.validations)
def numberValidPassword(self, passwords: List[int]):
return sum([self.validatePassword(password) for password in passwords])
```
#### File: python/day4/test_day4.py
```python
import unittest
from day4_func import *
class TestDay4(unittest.TestCase):
def test_when_given_range_then_return_correct_count(self):
# Arrange
passwords = [<PASSWORD>, 1<PASSWORD>5]
validator = PasswordValidator()
validations = [TwoSequentuallyEqual(), IncreasingValidation()]
validator.validations = validations
# Act
numberValidPasswords = validator.numberValidPassword(passwords)
# Assert
self.assertEqual(numberValidPasswords, 1)
def test_wrong_password_validate_false(self):
# Arrange
password = <PASSWORD>
validator = PasswordValidator()
validations = [TwoSequentuallyEqual(), IncreasingValidation()]
validator.validations = validations
# Act
valid = validator.validatePassword(password)
# Assert
self.assertEqual(valid, False)
def test_correct_password_validate_true(self):
# Arrange
password = <PASSWORD>
validator = PasswordValidator()
validations = [TwoSequentuallyEqual(), IncreasingValidation()]
validator.validations = validations
# Act
valid = validator.validatePassword(password)
# Assert
self.assertEqual(valid, True)
def test_two_equal_false(self):
# Arrange
password = "<PASSWORD>"
validation = TwoEqual()
# Act
valid = validation.validate(password)
# Assert
self.assertEqual(valid, False)
def test_two_equal_correct(self):
# Arrange
password = "<PASSWORD>"
validation = TwoEqual()
# Act
valid = validation.validate(password)
# Assert
self.assertEqual(valid, True)
def test_two_sequentually_equal_correct(self):
# Arrange
password = "<PASSWORD>"
validation = TwoSequentuallyEqual()
# Act
valid = validation.validate(password)
# Assert
self.assertEqual(valid, True)
def test_two_sequentually_equal_false(self):
# Arrange
password = "<PASSWORD>"
validation = TwoSequentuallyEqual()
# Act
valid = validation.validate(password)
# Assert
self.assertEqual(valid, False)
def test_increasing_validation_not_all(self):
# Arrange
password = "<PASSWORD>"
validation = IncreasingValidation()
# Act
valid = validation.validate(password)
# Assert
self.assertEqual(valid, False)
def test_increasing_validation_all_decrease(self):
# Arrange
password = "<PASSWORD>"
validation = IncreasingValidation()
# Act
valid = validation.validate(password)
# Assert
self.assertEqual(valid, False)
def test_increasing_validation_correct(self):
# Arrange
password = "<PASSWORD>"
validation = IncreasingValidation()
# Act
valid = validation.validate(password)
# Assert
self.assertEqual(valid, True)
def test_password_finder_correct_range(self):
# Arrange
start = 1
end = 3
passwordFinder = PasswordFinder(start, end)
# Act
passwords = passwordFinder.getPasswords()
# Assert
self.assertEqual(len(passwords), 3)
self.assertEqual(passwords[0], 1)
self.assertEqual(passwords[1], 2)
self.assertEqual(passwords[2], 3)
```
#### File: AdventOfCode/year2020/day22.py
```python
import os
import sys
players = open(os.path.join(sys.path[0], 'day22_data.txt')).read().strip().split('\n\n')
player1, player2 = [[int(line) for line in player.split('\n')[1:]] for player in players]
while len(player1) > 0 and len(player2) > 0:
c1, c2 = player1.pop(0), player2.pop(0)
if c1 > c2:
player1.extend([c1, c2])
else:
player2.extend([c2, c1])
winner = player1 if len(player1) > 0 else player2
print(f"Part 1: {sum(((i + 1) * card for i, card in enumerate(winner[::-1])))}")
def recursive_war(p1: list, p2: list, visited: set):
while len(p1) > 0 and len(p2) > 0:
visit = (tuple(p1), tuple(p2))
if visit in visited:
return 1, p1
visited.add(visit)
card1, card2 = p1.pop(0), p2.pop(0)
if len(p1) >= card1 and len(p2) >= card2:
win, _ = recursive_war(p1[:card1], p2[:card2], set())
else:
win = 1 if card1 > card2 else 0
if win == 1:
p1.extend([card1, card2])
else:
p2.extend([card2, card1])
return (1, p1) if len(p1) > 0 else (0, p2)
player1, player2 = [[int(line) for line in player.split('\n')[1:]] for player in players]
_, cards = recursive_war(player1, player2, set())
print(f"Part 2: {sum((i+1) * card for i, card in enumerate(cards[::-1]))}")
```
#### File: python/day4/day4_func.py
```python
from typing import List
import numpy as np
class BingoGame:
def __init__(self):
self.sequence = []
self.boards = []
def setUpGame(self, input: List[str]) -> None:
self.sequence = [int(number) for number in input[0].split(',')]
self.boards = []
for board in input[1:]:
self.boards.append(Board(board))
def winGame(self) -> int:
for number in self.sequence:
for board in self.boards:
boardBingo = board.notify(number)
if boardBingo:
return board.getScore(number)
def loseGame(self) -> int:
winners = set()
for number in self.sequence:
for idx in range(len(self.boards)):
if idx in winners:
continue
board = self.boards[idx]
boardBingo = board.notify(number)
if boardBingo:
winners.add(idx)
if len(winners) == len(self.boards):
return board.getScore(number)
class Board:
def __init__(self, inputBoard: str):
board = []
for line in inputBoard.lstrip().replace(' ', ' ').replace('\n ', '\n').split('\n'):
board.append([int(number) for number in line.split(' ')])
self.board = np.array(board)
self.notified = np.zeros(self.board.shape, dtype=bool)
def notify(self, number: int) -> bool:
idx = np.where(self.board == number)
for id in zip(idx[0], idx[1]):
self.notified[id] = 1
return self.validateBingo()
def validateBingo(self):
for column in self.notified.transpose():
if sum(column) == self.notified.shape[1]:
return True
for row in self.notified:
if sum(row) == self.notified.shape[0]:
return True
return False
def getScore(self, lastNumber: int):
notNotified = self.board[~self.notified]
return sum(notNotified) * lastNumber
```
#### File: python/day6/day6_func.py
```python
from typing import List
from collections import Counter
class FishCreator:
@staticmethod
def initFishes(intputStr: str) -> List[int]:
fishes = [0] * 9
initialFishes = Counter(intputStr.split(','))
for fishKey in initialFishes:
fishes[int(fishKey)] = initialFishes[fishKey]
return fishes
class FishSpawn:
def spawn(self, fishesInit: List[int], days: int):
fishes = fishesInit.copy()
for i in range(days):
fishes += [fishes.pop(0)]
fishes[6] += fishes[8]
return sum(fishes)
```
#### File: python/day7/day7_func.py
```python
from abc import ABC, abstractmethod
from collections import Counter
class Minimizer(ABC):
@abstractmethod
def getLowestFuel(self, crabsHorizontalPosition: str) -> int:
pass
def getLowestFuelWithCustomStep(self, crabsHorizontalPosition: str, step) -> int:
ints = [int(number) for number in crabsHorizontalPosition.split(",")]
count = Counter(ints)
fuelOnIndex = [0] * (max(ints) + 1)
for i in range(max(ints) + 1):
fuel = 0
for position in count:
fuel += step(abs(i - position)) * count[position]
fuelOnIndex[i] = fuel
minFuel = min(fuelOnIndex)
return minFuel
class BinomialMinimizer(Minimizer):
def getLowestFuel(self, crabsHorizontalPosition: str) -> int:
minFuel = self.getLowestFuelWithCustomStep(
crabsHorizontalPosition,
lambda difference: int(difference * (difference + 1) / 2))
return minFuel
class StepMinimizer(Minimizer):
def getLowestFuel(self, crabsHorizontalPosition: str) -> int:
minFuel = self.getLowestFuelWithCustomStep(
crabsHorizontalPosition,
lambda difference: difference)
return minFuel
```
#### File: python/day9/day9_func.py
```python
from typing import List, Dict
import numpy as np
from functools import reduce, partial
class HeightCreator:
@staticmethod
def createHeightMap(inputMatrix: List[str]):
heights = {(i, j): int(height) for i, l in enumerate(inputMatrix)
for j, height in enumerate(l.strip())}
return heights
class HeightAnalyzer:
def getNeighbours(self, row: int, col: int, heights: Dict[tuple, int]) -> List[tuple]:
neighboursPositions = [(row, col - 1), (row, col + 1), (row - 1, col), (row + 1, col)]
neighbours = [position for position in neighboursPositions if position in heights]
return neighbours
def isLow(self, position: tuple, heights: Dict[tuple, int]) -> bool:
lowerNeighbours = [heights[position] < heights[neighbour]
for neighbour in self.getNeighbours(*position, heights)]
return all(lowerNeighbours)
def getLowest(self, heights: Dict[tuple, int]):
lowPosition = [position for position in heights if self.isLow(position, heights)]
return lowPosition
def getRisk(self, heights: Dict[tuple, int]) -> int:
lowPositions = self.getLowest(heights)
return sum(heights[position] + 1 for position in lowPositions)
def getBasinSize(self, position: tuple, heights: Dict[tuple, int]) -> int:
if position not in heights: return 0
if heights[position] == 9: return 0
del heights[position]
nextBasin = partial(self.getBasinSize, heights=heights)
basinCount = 1 + sum(map(nextBasin,
self.getNeighbours(*position, heights)))
return basinCount
def getBasinMax(self, heights: Dict[tuple, int]) -> int:
lowPositions = self.getLowest(heights)
basinSizes = [self.getBasinSize(position, heights.copy())
for position in lowPositions]
basinMaxSize = reduce(lambda a, b: a * b,
sorted(basinSizes, reverse=True)[:3])
return basinMaxSize
``` |
{
"source": "3SecDing/CV__project",
"score": 2
} |
#### File: 3SecDing/CV__project/train.py
```python
import os
import time
import glob
import rawpy
import torch
import random
import argparse
import numpy as np
from PIL import Image
from model_light import LightModel
from model_normal import NormalModel
from model_light_resiual import LightResiualModel
from model_light_resiual_reduce import LightResiualReduce
from config import _C as cfg
# get train and test image ids
def get_image_ids(images_dir):
train_image_names = glob.glob(images_dir + '0*.ARW')
train_image_ids = [int(image_name.split('/')[-1].split('_')[0]) for image_name in train_image_names]
test_image_names = glob.glob(images_dir + '1*.ARW')
test_image_ids = [int(image_name.split('/')[-1].split('_')[0]) for image_name in test_image_names]
return train_image_ids, test_image_ids
def pack_raw_images(raw_image):
# pack Bayer image to 4 channels
im = raw_image.raw_image_visible.astype(np.float32)
im = np.maximum(im - 512, 0) / (16383 - 512) # subtract the black level
im = np.expand_dims(im, axis=2)
img_shape = im.shape
H = img_shape[0]
W = img_shape[1]
packed_raw_image = np.concatenate((im[0:H:2, 0:W:2, :],
im[0:H:2, 1:W:2, :],
im[1:H:2, 1:W:2, :],
im[1:H:2, 0:W:2, :]), axis=2)
return packed_raw_image
def loss_compute(pred_img, gt_img):
return torch.abs(pred_img - gt_img).mean()
def adjust_learning_rate(optimizer):
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr'] * 0.1
def save_results_and_checkpoint(results_dir, model_dir, pred_img, gt_patch, model, train_idx, ratio, epoch):
pred = pred_img.permute(0, 2, 3, 1).cpu().data.numpy()
pred = np.minimum(np.maximum(pred, 0), 1)
out_img = np.concatenate((gt_patch[0, :, :, :], pred[0, :, :, :]), axis=1)
Image.fromarray((out_img * 255).astype('uint8')).save(results_dir + f'{train_idx:05}_00_train_{ratio}.jpg')
torch.save(model.state_dict(), os.path.join(model_dir, f'checkpoint_{epoch:04d}.pth'))
print(f"model save as checkpoint_{epoch:04d}.pth!!!")
# def images_buffer(images_dir, image_ids):
#
# for image_id in image_ids:
# images = glob.blob(os.path.join(images_dir, f'{image_id}_00*.ARW'))
#
# images = glob.blob(os.path.join(images_dir, f'{image_id}_00*.ARW'))
def train(cfg):
device = torch.device(f'cuda:{cfg.GPU_ID}')
print("device:", device)
lr = cfg.TRAIN.LEARNING_RATE
model_type = cfg.MODEL.BACKBONE
upsample_type = cfg.MODEL.UPSAMPLE_TYPE
epoch_max = cfg.TRAIN.EPOCH_MAX
gt_images_dir = cfg.DATASET.GT_IMAGES_DIR
train_images_dir = cfg.DATASET.TRAIN_IMAGES_DIR
patch_size = cfg.TRAIN.PATCH_SIZE
checkpoint_save_interval = cfg.TRAIN.SAVE_INTERVAL
train_results_dir = cfg.TRAIN.RESULTS_DIR
checkpoint_dir = cfg.TRAIN.CHECKPOINT_DIR
lr_step = cfg.TRAIN.LR_STEP
resume_model_weights = cfg.TRAIN.RESUME
start_epoch = 0
if not os.path.exists(train_results_dir):
os.makedirs(train_results_dir)
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
if model_type == 'normal':
if upsample_type == 'deconv':
model = NormalModel()
elif upsample_type == 'bilinear':
model = NormalModel(upsample_type)
elif model_type == 'light':
if upsample_type == 'deconv':
model = LightModel()
elif upsample_type == 'bilinear':
model = LightModel(upsample_type)
elif model_type == 'light_resiual':
if upsample_type == 'deconv':
model = LightResiualModel()
elif upsample_type == 'bilinear':
model = LightResiualModel(upsample_type)
elif model_type == 'light_resiual_reduce':
if upsample_type == 'deconv':
model = LightResiualReduce()
elif upsample_type == 'bilinear':
model = LightResiualReduce(upsample_type)
else:
print("Not supported model type!!!")
return
if resume_model_weights != '':
start_epoch = int(resume_model_weights.split('.')[0].split('_')[-1])
model_weights = torch.load(resume_model_weights, map_location='cpu')
model.load_state_dict(model_weights)
model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
train_gt_image_ids, test_gt_image_ids = get_image_ids(gt_images_dir)
print("train length and test length:", len(train_gt_image_ids), len(test_gt_image_ids))
gt_images = [None] * 6000
train_images = {}
train_images['300'] = [None] * len(train_gt_image_ids)
train_images['250'] = [None] * len(train_gt_image_ids)
train_images['100'] = [None] * len(train_gt_image_ids)
g_loss = np.zeros((5000, 1))
total_train_time = 0
total_train_iter = 0
for epoch in range(start_epoch, epoch_max + 1):
iteration = 0
for step in lr_step:
if epoch == step:
adjust_learning_rate(optimizer)
epoch_start = time.time()
for idx in np.random.permutation(len(train_gt_image_ids)):
train_idx = train_gt_image_ids[idx]
data_process_start = time.time()
gt_image_names = glob.glob(os.path.join(gt_images_dir, f'{train_idx:05d}_00*.ARW'))
gt_image_path = gt_image_names[0]
gt_image_name = gt_image_path.split('/')[-1]
train_image_names = glob.glob(os.path.join(train_images_dir, f'{train_idx:05d}_00*.ARW'))
train_image_path = random.choice(train_image_names)
train_image_name = train_image_path.split('/')[-1]
train_exposure = float(train_image_name[9:-5])
gt_exposure = float(gt_image_name[9:-5])
ratio = min(gt_exposure / train_exposure, 300)
if train_images[str(ratio)[0:3]][idx] is None:
data_preprocess_start = time.time()
train_raw_image = rawpy.imread(train_image_path)
train_images[str(ratio)[0:3]][idx] = np.expand_dims(pack_raw_images(train_raw_image), axis=0) * ratio
gt_raw_image = rawpy.imread(gt_image_path)
im = gt_raw_image.postprocess(use_camera_wb=True, half_size=False, no_auto_bright=True, output_bps=16)
gt_images[idx] = np.expand_dims(np.float32(im / 65535.0), axis=0)
print(f"data preprocess time: {time.time() - data_preprocess_start:.3f}")
h = train_images[str(ratio)[0:3]][idx].shape[1]
w = train_images[str(ratio)[0:3]][idx].shape[2]
y = np.random.randint(0, h - patch_size)
x = np.random.randint(0, w - patch_size)
# print("h, w, x, y:", h, w, x, y)
train_patch = train_images[str(ratio)[0:3]][idx][:, y:y + patch_size, x:x + patch_size, :]
gt_patch = gt_images[idx][:, y * 2:y * 2 + patch_size * 2, x * 2:x * 2 + patch_size * 2, :]
if np.random.randint(2, size=1)[0] == 1: # random flip
train_patch = np.flip(train_patch, axis=1)
gt_patch = np.flip(gt_patch, axis=1)
if np.random.randint(2, size=1)[0] == 1:
train_patch = np.flip(train_patch, axis=2)
gt_patch = np.flip(gt_patch, axis=2)
if np.random.randint(2, size=1)[0] == 1: # random transpose
train_patch = np.transpose(train_patch, (0, 2, 1, 3))
gt_patch = np.transpose(gt_patch, (0, 2, 1, 3))
train_patch = np.minimum(train_patch, 1.0)
gt_patch = np.maximum(gt_patch, 0.0)
train_img = torch.from_numpy(train_patch).permute(0, 3, 1, 2).to(device)
gt_img = torch.from_numpy(gt_patch).permute(0, 3, 1, 2).to(device)
data_process_end = time.time()
model.zero_grad()
train_time_start = time.time()
pred_img = model(train_img)
loss = loss_compute(pred_img, gt_img)
loss.backward()
optimizer.step()
train_time_end = time.time()
g_loss[idx] = loss.data.cpu()
mean_loss = np.mean(g_loss[np.where(g_loss)])
iteration += 1
total_train_iter += 1
total_train_time += train_time_end - train_time_start
print(f"epoch: {epoch}, iteration: {iteration}, loss:{mean_loss:.3}, "
f"iter time:{time.time() - data_process_start:.3}, "
f"data process time:{data_process_end - data_process_start:.3}, "
f"train iter time: {train_time_end - train_time_start:.3}")
if epoch % checkpoint_save_interval == 0:
epoch_result_dir = train_results_dir + f'{epoch:04}/'
if not os.path.isdir(epoch_result_dir):
os.makedirs(epoch_result_dir)
save_results_and_checkpoint(epoch_result_dir, checkpoint_dir, pred_img,
gt_patch, model, train_idx, ratio, epoch)
print(f"epoch time: {time.time() - epoch_start}, "
f"mean train time iteration: {total_train_time / total_train_iter:.3f}")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="See In The Dark!")
parser.add_argument("--config", '-c', default="configs/sony_normal.yaml",
metavar="FILE", help="path to config file",type=str,)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
cfg.merge_from_file(args.config)
cfg.merge_from_list(args.opts)
cfg.freeze()
train(cfg)
``` |
{
"source": "3sigma/Geeros-RaspberryPi-C-Python",
"score": 2
} |
#### File: Geeros-RaspberryPi-C-Python/programmes_python/geeros_api.py
```python
from websocket import create_connection
import math
import time
class geeros_api:
def __init__(self):
self.ws = create_connection("ws://192.168.0.199:9090/ws")
def Tourner(self, vitesseRotation, duree = -1):
tdebut = time.time()
t = time.time() - tdebut
while t < duree:
psidotref = eval(str(vitesseRotation))
# Saturations min et max
psidotref = max(min(360, psidotref), -360)
# Envoi de la consigne au programme principal par Websocket
self.ws.send('{"psidotref": ' + str(psidotref) + '}')
time.sleep(0.1)
t = time.time() - tdebut
if duree == -1:
psidotref = eval(str(vitesseRotation))
# Saturations min et max
psidotref = max(min(360, psidotref), -360)
# Envoi de la consigne au programme principal par Websocket
self.ws.send('{"psidotref": ' + str(psidotref) + '}')
else:
self.ws.send('{"psidotref": 0}')
def Avancer(self, vitesseLongitudinale, duree = -1):
tdebut = time.time()
t = time.time() - tdebut
while t < duree:
vref = eval(str(vitesseLongitudinale))
# Saturations min et max
vref = max(min(0.5, vref), -0.5)
# Envoi de la consigne au programme principal par Websocket
self.ws.send('{"vref": ' + str(vref) + '}')
time.sleep(0.1)
t = time.time() - tdebut
if duree == -1:
vref = eval(str(vitesseLongitudinale))
# Saturations min et max
vref = max(min(0.5, vref), -0.5)
# Envoi de la consigne au programme principal par Websocket
self.ws.send('{"vref": ' + str(vref) + '}')
else:
self.ws.send('{"vref": 0}')
def Mouvement(self, vitesseLongitudinale, vitesseRotation, duree = -1):
tdebut = time.time()
t = time.time() - tdebut
while t < duree:
vref = eval(str(vitesseLongitudinale))
psidotref = eval(str(vitesseRotation))
# Saturations min et max
vref = max(min(0.5, vref), -0.5)
psidotref = max(min(360, psidotref), -360)
# Envoi de la consigne au programme principal par Websocket
self.ws.send('{"vref": ' + str(vref) + ', "psidotref": ' + str(psidotref) + '}')
time.sleep(0.1)
t = time.time() - tdebut
if duree == -1:
vref = eval(str(vitesseLongitudinale))
psidotref = eval(str(vitesseRotation))
# Saturations min et max
vref = max(min(0.5, vref), -0.5)
psidotref = max(min(360, psidotref), -360)
# Envoi de la consigne au programme principal par Websocket
self.ws.send('{"vref": ' + str(vref) + ', "psidotref": ' + str(psidotref) + '}')
else:
self.ws.send('{"vref": 0, "psidotref": 0}')
def AngleServo(self, angle, duree = -1):
tdebut = time.time()
t = time.time() - tdebut
while t < duree:
servoref = eval(str(angle))
# Saturations min et max
servoref = max(min(30, servoref), -30)
# Envoi de la consigne au programme principal par Websocket
self.ws.send('{"servoref": ' + str(servoref) + '}')
time.sleep(0.1)
t = time.time() - tdebut
if duree == -1:
servoref = eval(str(angle))
# Saturations min et max
servoref = max(min(30, servoref), -30)
# Envoi de la consigne au programme principal par Websocket
self.ws.send('{"servoref": ' + str(servoref) + '}')
def Terminer(self):
self.ws.send('{"vref": 0, "psidotref": 0}')
self.ws.close()
```
#### File: Geeros-RaspberryPi-C-Python/programmes_python/GeerosAvecBoules.py
```python
import wiringpi2
# Imports pour le bus i2c
import smbus
import time, sched
import os
import threading
import signal
import json
import sys
# Pour la détection d'adresse IP
import socket
import fcntl
import struct
# Pour le serveur de Websocket
import tornado.httpserver
import tornado.ioloop
from tornado.ioloop import PeriodicCallback
import tornado.web
import tornado.websocket
import tornado.template
# Imports pour la communication i2c avec la carte Pololu A-Star
from a_star import AStar
a_star = AStar()
# Entete declarative
Nmoy = 10
codeurDroitDeltaPos = 0
codeurDroitDeltaPosPrec = 0
codeurGaucheDeltaPos = 0
codeurGaucheDeltaPosPrec = 0
omegaDroit = 0
omegaGauche = 0
# Les moteurs sont asservis en vitesse grâce à un régulateur de type PID
# On déclare ci-dessous les variables et paramètres nécessaires à l'asservissement et au régulateur
R = 0.045 # Rayon d'une roue
W = 0.14 # Largeur du robot
umax = 6. # valeur max de la tension de commande du moteur
umin = -6. # valeur min (ou max en négatif) de la tension de commande du moteur
vxmes = 0. # vitesse longitudinale mesurée
ximes = 0. # vitesse de rotation mesurée
Tf = 0.02 # constante de temps de filtrage de l'action dérivée du PID
Kpvx = 1. # gain proportionnel pour l'asservissement de vitesse longitudinale
Kivx = 10. # gain intégral pour l'asservissement de vitesse longitudinale
Kdvx = 0.00 # gain dérivé pour l'asservissement de vitesse longitudinale
Kpxi = 0.1 # gain proportionnel pour l'asservissement de rotation
Kixi = 1. # gain intégral pour l'asservissement de rotation
Kdxi = 0.000 # gain dérivé pour l'asservissement de rotation
commande_avant_sat_vx = 0. # commande avant la saturation pour l'asservissement de vitesse longitudinale
commande_vx = 0. # commande pour l'asservissement de vitesse longitudinale
commande_avant_sat_xi = 0. # commande avant la saturation pour l'asservissement de rotation
commande_xi = 0. # commande pour l'asservissement de rotation
P_vx = 0. # action proportionnelle pour l'asservissement de vitesse longitudinale
I_vx = 0. # action intégrale pour l'asservissement de vitesse longitudinale
D_vx = 0. # action dérivée pour l'asservissement de vitesse longitudinale
P_xi = 0. # action proportionnelle pour l'asservissement de rotation
I_xi = 0. # action intégrale pour l'asservissement de rotation
D_xi = 0. # action dérivée pour l'asservissement de rotation
commandeDroit = 0. # commande en tension calculée par le PID pour le moteur droit
commandeGauche = 0. # commande en tension calculée par le PID pour le moteur gauche
yprecvx = 0. # Mesure de la vitesse longitudinale au calcul précédent
yprecxi = 0. # Mesure de la vitesse de rotation au calcul précédent
# Variables intermédiaires
Ti = 0.
ad = 0.
bd = 0.
# Variables utilisées pour les données reçues
x1 = 0.
x2 = 0.
Kp2 = 1.
Ki2 = 1.
Kd2 = 1.
Kpxi2 = 1.
Kixi2 = 1.
Kdxi2 = 1.
# Déclarations pour les consignes de mouvement
vxref = 0.
xiref = 0.
# Time out de réception des données
timeout = 2
timeLastReceived = time.time()
T0 = time.time()
dt = 0.01
tprec = time.time()
i = 0
# Création d'un scheduler pour exécuter des opérations à cadence fixe
s = sched.scheduler(time.time, time.sleep)
# Lecture de la tension d'alimentation
idecimLectureTension = 0
decimLectureTension = 6000
tensionAlim = 7.4
# Sécurité sur la tension d'alimentation
tensionAlimMin = 6.4;
#--- setup ---
def setup():
global tensionAlim
wiringpi2.wiringPiSetupGpio() # For GPIO pin numbering
CommandeMoteurs(0, 0, tensionAlim)
# Initialisation de la position du servo
a_star.servo(45)
# Mesure de la tension d'alimentation
try:
tensionAlimBrute = a_star.read_battery_millivolts()
tensionAlimAvantMax = tensionAlimBrute / 1000.;
tensionAlim = max(tensionAlimMin, tensionAlimAvantMax);
print "Tension d'alimentation", tensionAlim
except:
print "Probleme lecture tension d'alimentation"
pass
# -- fin setup --
# -- loop --
def loop():
global i, T0
i = i+1
s.enterabs( T0 + (i * dt), 1, CalculVitesse, ())
s.run()
# -- fin loop --
def CalculVitesse():
global started, \
omegaDroit, omegaGauche, codeurDroitDeltaPosPrec, codeurGaucheDeltaPosPrec, \
ad, P_vx, I_vx, D_vx, P_xi, I_xi, D_xi, bd, Ti, yprecvx, yprecxi, timeLastReceived, timeout, \
codeurDroitDeltaPos, codeurGaucheDeltaPos, commandeDroit, commandeGauche, vxmes, ximes, vxref, xiref, dt2, tprec, \
idecimLectureTension, decimLectureTension, tensionAlim, x1, x2
# Mesure de la vitesse des moteurs grâce aux codeurs incrémentaux
try:
codeurDroitDeltaPos = a_star.read_codeurDroitDeltaPos()
if abs(codeurDroitDeltaPos) > 1000:
#print "Values out of range"
codeurDroitDeltaPos = codeurDroitDeltaPosPrec
else:
codeurDroitDeltaPosPrec = codeurDroitDeltaPos
except:
#print "Erreur lecture codeur droit"
codeurDroitDeltaPos = codeurDroitDeltaPosPrec
try:
codeurGaucheDeltaPos = a_star.read_codeurGaucheDeltaPos()
if abs(codeurGaucheDeltaPos) > 1000:
#print "Values out of range"
codeurGaucheDeltaPos = codeurGaucheDeltaPosPrec
else:
codeurGaucheDeltaPosPrec = codeurGaucheDeltaPos
except:
#print "Erreur lecture codeur gauche"
codeurGaucheDeltaPos = codeurGaucheDeltaPosPrec
# C'est bien dt qu'on utilise ici et non pas dt2 (voir plus loin l'explication de dt2)
# car codeurDroitDeltaPos et codeurGaucheDeltaPos sont mesurés en temps-réel par l'A*
omegaDroit = -2 * ((2 * 3.141592 * codeurDroitDeltaPos) / 1632) / (Nmoy * dt) # en rad/s
omegaGauche = 2 * ((2 * 3.141592 * codeurGaucheDeltaPos) / 1632) / (Nmoy * dt) # en rad/s
# Si on n'a pas reçu de données depuis un certain temps, celles-ci sont annulées
if (time.time()-timeLastReceived) > timeout:
x1 = 0.
x2 = 0.
# Application de la consigne lue
vxref = x1
xiref = x2
# Définition des entrées de la fonction d'asservissement
vxmes = (omegaDroit + omegaGauche)*R/2
ximes = -(omegaDroit - omegaGauche)*R/W
# La suite des calculs se fait avec dt2, qui correspond au "vrai" pas de temps d'échantillonnage
# de cette fonction (la RPi n'est pas un système temps-réel et au début de l'exécution du programme,
# dt2 peut être jusqu'à deux fois plus petit que dt)
dt2 = time.time() - tprec
tprec = time.time()
# Calcul du PID sur vx
# Paramètres intermédiaires
Ti = Ki2 * Kivx/(Kp2 * Kpvx + 0.01)
ad = Tf/(Tf+dt2)
bd = Kd2 * Kdvx/(Tf+dt2)
# Terme proportionnel
P_vx = Kpvx * Kp2 * (vxref - vxmes)
# Terme dérivé
D_vx = ad * D_vx - bd * (vxmes - yprecvx)
# Calcul de la commande
commande_vx = P_vx + I_vx
# Terme intégral (sera utilisé lors du pas d'échantillonnage suivant)
I_vx = I_vx + Kivx * Ki2 * dt2 * (vxref - vxmes)
# Stockage de la mesure courante pour utilisation lors du pas d'échantillonnage suivant
yprecvx = vxmes
# Fin Calcul du PID sur vx
# Calcul du PID sur xi
# Paramètres intermédiaires
Ti = Kixi2 * Kixi/(Kpxi2 * Kpxi + 0.01)
ad = Tf/(Tf+dt2)
bd = Kdxi2 * Kdxi/(Tf+dt2)
# Terme proportionnel
P_xi = Kpxi * Kpxi2 * (xiref - ximes)
# Terme dérivé
D_xi = ad * D_xi - bd * (ximes - yprecxi)
# Calcul de la commande
commande_xi = P_xi + I_xi + D_xi
# Terme intégral (sera utilisé lors du pas d'échantillonnage suivant)
I_xi = I_xi + Kixi * Kixi2 * dt2 * (xiref - ximes)
# Stockage de la mesure courante pour utilisation lors du pas d'échantillonnage suivant
yprecxi = ximes
# Fin Calcul du PID sur xi
# Calcul des commandes des moteurs
commandeDroit = (commande_vx - commande_xi);
commandeGauche = (commande_vx + commande_xi);
CommandeMoteurs(commandeDroit, commandeGauche, tensionAlim)
# Lecture de la tension d'alimentation
if idecimLectureTension >= decimLectureTension:
try:
tensionAlimBrute = a_star.read_battery_millivolts()
tensionAlimAvantMax = tensionAlimBrute / 1000.;
tensionAlim = max(tensionAlimMin, tensionAlimAvantMax);
idecimLectureTension = 0
except:
print "Probleme lecture tension d'alimentation"
pass
else:
idecimLectureTension = idecimLectureTension + 1
def CommandeMoteurs(commandeDroit, commandeGauche, tensionAlim):
# Cette fonction calcule et envoi les signaux PWM au pont en H
# en fonction des tensions de commande et d'alimentation
# L'ensemble pont en H + moteur pourrait ne pas être linéaire
tensionDroit = commandeDroit
tensionGauche = commandeGauche
# Normalisation de la tension d'alimentation par
# rapport à la tension d'alimentation
tension_int_droit = int(400 * tensionDroit / tensionAlim)
tension_int_gauche = -int(400 * tensionGauche / tensionAlim)
# Saturation par sécurité
if (tension_int_droit > 400):
tension_int_droit = 400
if (tension_int_droit < -400):
tension_int_droit = -400
if (tension_int_gauche > 400):
tension_int_gauche = 400
if (tension_int_gauche < -400):
tension_int_gauche = -400
# Commande PWM
try:
a_star.motors(tension_int_gauche, tension_int_droit)
except:
print "Erreur moteurs"
def emitData():
global T0
# Délai nécessaire pour que le serveur de Websocket ait le temps de démarrer
wiringpi2.delay(5000)
T0 = time.time()
while True: loop() # appelle fonction loop sans fin
class WSHandler(tornado.websocket.WebSocketHandler):
def open(self):
global socketOK
print 'connection opened...'
socketOK = True
self.callback = PeriodicCallback(self.sendToSocket, 100)
self.callback.start()
def on_message(self, message):
global x1, x2, Kp2, Ki2, Kd2, Kpxi2, Kixi2, Kdxi2, timeLastReceived, socketOK
jsonMessage = json.loads(message)
# Annulation du timeout de réception des données
timeLastReceived = time.time()
if jsonMessage.get('vref') != None:
x1 = float(jsonMessage.get('vref')) / 100
#print ("x1: %.2f" % x1)
if jsonMessage.get('xiref') != None:
x2 = (float(jsonMessage.get('xiref'))) * 3.141592 / 180
#print ("x2: %.2f" % x2)
if jsonMessage.get('servoref') != None:
servoref = int(jsonMessage.get('servoref'))
try:
a_star.servo(servoref)
except:
pass
#print ("servoref: %d" % servoref)
if jsonMessage.get('Kp2ref') != None:
Kp2 = float(jsonMessage.get('Kp2ref'))
#print ("Kp2: %.2f" % Kp2)
if jsonMessage.get('Ki2ref') != None:
Ki2 = float(jsonMessage.get('Ki2ref'))
#print ("Ki2: %.2f" % Ki2)
if jsonMessage.get('Kd2ref') != None:
Kd2 = float(jsonMessage.get('Kd2ref'))
#print ("Kd2: %.2f" % Kd2)
if jsonMessage.get('Kpxi2ref') != None:
Kpxi2 = float(jsonMessage.get('Kpxi2ref'))
#print ("Kpxi2: %.2f" % Kpxi2)
if jsonMessage.get('Kixi2ref') != None:
Kixi2 = float(jsonMessage.get('Kixi2ref'))
#print ("Kixi2: %.2f" % Kixi2)
if jsonMessage.get('Kdxi2ref') != None:
Kdxi2 = float(jsonMessage.get('Kdxi2ref'))
#print ("Kdxi2: %.2f" % Kdxi2)
if not socketOK:
x1 = 0
x2 = 0.
def on_close(self):
global socketOK, commandeDroit, commandeGauche
print 'connection closed...'
socketOK = False
commandeDroit = 0.
commandeGauche = 0.
def sendToSocket(self):
global started, codeurDroitDeltaPos, codeurGaucheDeltaPos, socketOK, commandeDroit, commandeGauche, vxref, xiref, \
vxmes, ximes, T0
tcourant = time.time() - T0
aEnvoyer = json.dumps( {'Temps':("%.2f" % tcourant),
'Consigne vitesse longitudinale':("%.2f" % x1),
'Consigne vitesse de rotation':("%.2f" % (180 * x2/3.141592)),
'Vitesse longitudinale':("%.2f" % vxmes),
'Vitesse de rotation':("%.2f" % (180 * ximes/3.141592)),
'omegaDroit':("%.2f" % omegaDroit),
'omegaGauche':("%.2f" % omegaGauche),
'commandeDroit':("%.2f" % commandeDroit),
'commandeGauche':("%.2f" % commandeGauche)})
if socketOK:
try:
self.write_message(aEnvoyer)
except:
pass
def check_origin(self, origin):
# Voir http://www.tornadoweb.org/en/stable/websocket.html#tornado.websocket.WebSocketHandler.check_origin
# et http://www.arundhaj.com/blog/tornado-error-during-websocket-handshake.html
return True
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
application = tornado.web.Application([
(r'/ws', WSHandler)
])
def startTornado():
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(9090)
tornado.ioloop.IOLoop.instance().start()
# Gestion du CTRL-C
def signal_handler(signal, frame):
global commandeDroit, commandeGauche
print 'You pressed Ctrl+C!'
commandeDroit = 0.
commandeGauche = 0.
CommandeMoteurs(0, 0, 5)
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
# Gestion des segmentation fault
def signal_handler2(signal, frame):
print 'Received signal ' + str(sig) + ' on line ' + str(frame.f_lineno) + ' in ' + frame.f_code.co_filename
signal.signal(signal.SIGSEGV, signal_handler2)
#--- obligatoire pour lancement du code --
if __name__=="__main__": # pour rendre le code executable
# Test pour savoir si le firmware est présent sur la carte A-Star
firmwarePresent = False
for i in range(1, 11):
time.sleep(0.1)
print "Test presence du firmware de la carte A-Star, tentative " + str(i) + " / 10"
try:
firmwarePresent = a_star.firmwareOK()
if firmwarePresent:
break
except:
print "Firmware absent"
if firmwarePresent:
print "Firmware present, on continue..."
started = False
startedDroit = False
startedGauche = False
setup() # appelle la fonction setup
print "Setup done."
th = threading.Thread(None, emitData, None, (), {})
th.daemon = True
th.start()
print "Starting Tornado."
try:
print "Connect to ws://" + get_ip_address('eth0') + ":9090/ws with Ethernet."
except:
pass
try:
print "Connect to ws://" + get_ip_address('wlan0') + ":9090/ws with Wifi."
except:
pass
socketOK = False
startTornado()
else:
print "Firmware absent, on abandonne ce programme."
print "Veuillez charger le firmware sur la carte A-Star pour exécuter ce programme."
``` |
{
"source": "3sigma/GLMF_IoT_1",
"score": 2
} |
#### File: 3sigma/GLMF_IoT_1/iot.py
```python
import pygatt.backends
import pysensortag
# Import pour la LIFX
from lifxlan import *
import sched
from time import time, sleep
import signal
import sys
# Variables pour la gestion de la luminosité
Lux = 0
etatLampe = 0
# Un hystérésis sera nécessaire
seuilLuminositeMin = 50
seuilLuminositeMax = 100
# Création d'un scheduler pour exécuter des opérations à cadence fixe
T0 = time()
dt = 1
i = 0
s = sched.scheduler(time, sleep)
def setup():
global adapter, sensortag, light, monAmpoule
adapter = pygatt.backends.BGAPIBackend()
adapter.start()
print("Connexion au SensorTag")
sensortag = pysensortag.PySensorTag(adapter, 'B0:B4:48:C0:5D:00')
print("Activation du luxometre")
sensortag.ActivateLuxometerSensor()
# Délai pour que l'activation se fasse
sleep(1)
# Gestion de l'ampoule
# Démarrage du client
lifx = LifxLAN()
# Découverte des ampoules
devices = lifx.get_lights()
# Récupération de mon ampoule (nommée "AmpouleBureau")
for device in devices:
if device.get_label() == "AmpouleBureau":
monAmpoule = device
# Allumage et extinction
print("Test de l'ampoule: allumage pendant 2 s")
monAmpoule.set_power("on")
sleep(2)
monAmpoule.set_power("off")
def loop():
global i
i = i+1
s.enterabs( T0 + (i * dt), 1, Automate, ())
s.run()
def Automate():
global sensortag, Lux, seuilLuminositeMin, seuilLuminositeMax, etatLampe, monAmpoule
# Scan des périphériques Bluetooth
devices = adapter.scan(timeout = 2, scan_interval=200, scan_window=200, active = False)
# Recherche de l'adresse de l'iBeacon
foundAddress = False
for dev in devices:
address = dev.get('address')
if address == '68:9E:19:10:DA:CE':
foundAddress = True
print("iBeacon en vue !")
else:
print("iBeacon absent !")
# On passe à la suite uniquement si l'iBeacon est présent
if foundAddress:
# Lecture de la luminosité et allumage en fonction de la
# valeur par rapport à l'hystérésis
Lux = sensortag.GetLuxometer()
print "Light intensity: %.2f lx" % Lux
if Lux < seuilLuminositeMin:
if etatLampe==0:
print("Luminosite insuffisante: allumage lampe")
monAmpoule.set_power("on")
etatLampe = 1
elif Lux > seuilLuminositeMax:
if etatLampe==1:
print("Luminosite suffisante: extinction lampe")
monAmpoule.set_power("off")
etatLampe = 0
else:
if etatLampe==1:
print("iBeacon absent: extinction lampe")
monAmpoule.set_power("off")
etatLampe = 0
print("")
# Gestion du CTRL-C
def signal_handler(signal, frame):
global adapter, monAmpoule
print("You pressed Ctrl+C!")
print("Stop")
monAmpoule.set_power("off")
adapter.stop()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
if __name__=="__main__":
setup()
print("Setup done.")
while True: loop()
``` |
{
"source": "3sigma/T-Quad-Webcam",
"score": 2
} |
#### File: T-Quad-Webcam/programmes_python/QuatreRoues_SuiviCouleur.py
```python
from pyduino import *
# Imports Généraux
import time, sched
import os
import threading
import signal
import json
import sys
# Pour la détection d'adresse IP
import socket
import fcntl
import struct
# Pour le serveur de socket
import tornado.httpserver
import tornado.ioloop
from tornado.ioloop import PeriodicCallback
import tornado.web
import tornado.websocket
import tornado.template
# Gestion de l'IMU
from mpu9250 import MPU9250
# Imports pour OpenCV
import cv2
import numpy as np
import colorsys
# Nom de l'hostname (utilisé ensuite pour savoir sur quel système
# tourne ce programme)
hostname = socket.gethostname()
# Imports pour la communication i2c avec l'Arduino Mega
from mega import Mega
mega = Mega(hostname = hostname)
# Moteurs
Nmoy = 1
omegaArriereDroit = 0.
codeurArriereDroitDeltaPos = 0
codeurArriereDroitDeltaPosPrec = 0
omegaArriereGauche = 0.
codeurArriereGaucheDeltaPos = 0
codeurArriereGaucheDeltaPosPrec = 0
omegaAvantDroit = 0.
codeurAvantDroitDeltaPos = 0
codeurAvantDroitDeltaPosPrec = 0
omegaAvantGauche = 0.
codeurAvantGaucheDeltaPos = 0
codeurAvantGaucheDeltaPosPrec = 0
# Tension effectivement appliquée
commandeArriereDroit = 0.
commandeArriereGauche = 0.
commandeAvantDroit = 0.
commandeAvantGauche = 0.
# Saturations
umax = 6. # valeur max de la tension de commande du moteur
umin = -6. # valeur min (ou max en négatif) de la tension de commande du moteur
# Paramètres mécaniques
R = 0.0225 # Rayon d'une roue
W = 0.18 # Ecart entre le centre de rotation du robot et les roues
# Variables utilisées pour les données reçues
# Couleurs initiale et précédente
couleur = "FF9600"
couleurPrec = "FF9600"
# Rayon de l'objet (balle de tennis)
Robjet = 0.035
distance = 0
# Timeout de réception des données
timeout = 2
timeLastReceived = 0
timedOut = False
T0 = time.time()
dt = 0.01
tprec = time.time()
tdebut = 0
# Création d'un scheduler pour exécuter des opérations à cadence fixe
s = sched.scheduler(time.time, time.sleep)
# Commande des moteurs
commandeRot = 0.
tcommande = T0
# Mesure de la tension de la batterie
# On la contraint à être supérieure à 7V, pour éviter une division par
# zéro en cas de problème quelconque
idecimLectureTension = 0
decimLectureTension = 6000
decimErreurLectureTension = 100
lectureTensionOK = False
tensionAlim = 7.4
while not lectureTensionOK:
try:
tensionAlim = max(7.0, float(mega.read_battery_millivolts()) / 1000.)
lectureTensionOK = True
except:
print("Erreur lecture tension")
# Initialisation de l'IMU
gz = 0.
intgz = 0.
offset_gyro = 0.
if (hostname == "pcduino"):
I2CBUS = 2
elif (hostname == "raspberrypi"):
I2CBUS = 1
else:
# pcDuino par défaut
I2CBUS = 2
initIMU_OK = False
while not initIMU_OK:
try:
imu = MPU9250(i2cbus=I2CBUS, address=0x69)
initIMU_OK = True
except:
print("Erreur init IMU")
# Initialisation OpenCV
# Résolution souhaitée pour l'image. Remarque: la résolution native de l'image est 640x480, mais
# cette résolution conduit à un temps de latence assez important. Il est préférable d'utiliser
# une résolution de 320x240
CAMERA_WIDTH = 320
CAMERA_HEIGHT = 240
# Rayon minimum du cercle entourant l'objet dont on a reconnu la couleur
MIN_RADIUS = 2
# Initialisation de la Webcam
cam = cv2.VideoCapture(0)
cam.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, CAMERA_WIDTH)
cam.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, CAMERA_HEIGHT)
camWidth = cam.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)
camHeight = cam.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)
print "Camera initialized: (" + str(camWidth) + ", " + str(camHeight) + ")"
# Compteur de boucle
i = 0
# Taille de l'objet
radius = 0.
# Position de l'objet
center = (0,0)
# Position précédente
positionPrec = CAMERA_WIDTH / 2
# Ecart angulaire
ecart_angulaire = 0.
#--- setup ---
def setup():
global offset_gyro
# Initialisation des moteurs
CommandeMoteurs(0, 0, 0, 0)
# Calibration du gyro sur 100 mesures
i = 0
somme_gyro = 0.
while (i < 100):
try:
gyro = imu.readGyro()
gz = gyro['z'] * math.pi / 180
somme_gyro = somme_gyro + gz
i = i + 1
except:
#print("Erreur lecture IMU")
pass
offset_gyro = somme_gyro/100.
print "Offset gyro", offset_gyro
i = 0
# -- fin setup --
# -- loop --
def loop():
CalculVitesse()
# -- fin loop --
def fois255entier(x):
return int(round(x * 255))
def SuiviCouleur():
global MIN_RADIUS, CAMERA_WIDTH, cam, i, center, radius, distance, ecart_angulaire, couleur, Robjet
# Conversion de la couleur de l'hexadécimal en HSV (Teinte - Saturation - Luminosité)
couleurRGB = struct.unpack('BBB',couleur.decode('hex'))
couleurHSV = map(fois255entier, colorsys.rgb_to_hsv(couleurRGB[0]/255.,couleurRGB[1]/255.,couleurRGB[2]/255.))
# Définition des seuils de reconnaissance (seule la teinte est réglable de -/+ 10 autour de la valeur nominale
THRESHOLD_LOW = (max(0,couleurHSV[0] - 10), 20, 20);
THRESHOLD_HIGH = (min(255,couleurHSV[0] + 10), 255, 255);
# Lecture d'une image
ret_val, img = cam.read()
# Suppression de bruit
img_filter = cv2.GaussianBlur(img.copy(), (3, 3), 0)
# Conversion de l'image en HSV
img_filter = cv2.cvtColor(img_filter, cv2.COLOR_BGR2HSV)
# Conversion de l'image en binaire (les pixels dans l'intervalle de couleur sélectionné
# sont convertis en blanc, les autres en noir)
img_binary = cv2.inRange(img_filter.copy(), np.array(THRESHOLD_LOW), np.array(THRESHOLD_HIGH))
# Dilatation de l'image pour augmenter la taille des blobs
img_binary = cv2.dilate(img_binary, None, iterations = 1)
# Détection du centre de l'objet à partir des contours. Voir:
# http://www.pyimagesearch.com/2015/09/14/ball-tracking-with-opencv/
img_contours = img_binary.copy()
contours = cv2.findContours(img_contours, cv2.RETR_EXTERNAL, \
cv2.CHAIN_APPROX_SIMPLE)[-2]
# Recherche du contour le plus grand et calcul du cercle l'entourant
center = None
radius = 0
if len(contours) > 0:
c = max(contours, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
if M["m00"] > 0:
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
if radius < MIN_RADIUS:
center = None
# Calculs sur le plus grand contour détecté
if center != None:
#print str(center) + " " + str(radius)
# Distance à l'objet
distance = Robjet / math.tan(0.1186823645e1 * radius / CAMERA_WIDTH)
# L'angle de vue de la Webcam est 68 degrés
# On considère que l'écart angulaire est nul si le rayon de l'objet est inférieur à 10
# (dans ce cas, l'objet n'est probablement pas dans le champ de vision)
if (radius > 10):
ecart_angulaire = (34. * (center[0] - (CAMERA_WIDTH / 2)) / (CAMERA_WIDTH / 2)) * (math.pi / 180.)
else:
ecart_angulaire = 0.
# Ajout à l'image d'un cercle vert autour du plus grandd contour
if center != None:
cv2.circle(img, center, int(round(radius)), (0, 255, 0))
# Ecriture de l'image dans un répertoire spécifique.
# Le logiciel mjpg_streamer, qui est exécuté en parallèle lors du démarrage de ce programme Python
# par l'application MyViz (ou par son script .sh associé), stream une vidéo à partir des images lues
# dans ce répertoire
cv2.imwrite('/root/programmes_python/jpg/{0:05d}.jpg'.format(i),img)
i += 1
cv2.waitKey(1)
def CalculVitesse():
global omegaArriereDroit, omegaArriereGauche, omegaAvantDroit, omegaAvantGauche, timeLastReceived, timeout, timedOut, \
tdebut, codeurArriereDroitDeltaPos, codeurArriereGaucheDeltaPos, codeurAvantDroitDeltaPos, codeurAvantGaucheDeltaPos, \
commandeArriereDroit, commandeArriereGauche, commandeAvantDroit, commandeAvantGauche, \
codeurArriereDroitDeltaPosPrec, codeurArriereGaucheDeltaPosPrec, codeurAvantDroitDeltaPosPrec, codeurAvantGaucheDeltaPosPrec, tprec, \
idecimLectureTension, decimLectureTension, decimErreurLectureTension, tensionAlim, \
imu, gz, intgz, R, W, commandeRot, offset_gyro, ecart_angulaire, tcommande
tdebut = time.time()
# Exécution de la fonction de suivi de couleur. Le résultat du calul est l'écart angulaire entre l'axe de la Webcam et
# l'objet coloré
SuiviCouleur()
# Mesure de la vitesse des moteurs grâce aux codeurs incrémentaux
try:
codeursDeltaPos = mega.read_codeursDeltaPos()
codeurArriereDroitDeltaPos = codeursDeltaPos[0]
codeurArriereGaucheDeltaPos = codeursDeltaPos[1]
codeurAvantDroitDeltaPos = codeursDeltaPos[2]
codeurAvantGaucheDeltaPos = codeursDeltaPos[3]
# Suppression de mesures aberrantes
if (abs(codeurArriereDroitDeltaPos - codeurArriereDroitDeltaPosPrec) > 10) or (abs(codeurArriereGaucheDeltaPos - codeurArriereGaucheDeltaPosPrec) > 10) or (abs(codeurAvantDroitDeltaPos - codeurAvantDroitDeltaPosPrec) > 10) or (abs(codeurAvantGaucheDeltaPos - codeurAvantGaucheDeltaPosPrec) > 10):
codeurArriereDroitDeltaPos = codeurArriereDroitDeltaPosPrec
codeurArriereGaucheDeltaPos = codeurArriereGaucheDeltaPosPrec
codeurAvantDroitDeltaPos = codeurAvantDroitDeltaPosPrec
codeurAvantGaucheDeltaPos = codeurAvantGaucheDeltaPosPrec
codeurArriereDroitDeltaPosPrec = codeurArriereDroitDeltaPos
codeurArriereGaucheDeltaPosPrec = codeurArriereGaucheDeltaPos
codeurAvantDroitDeltaPosPrec = codeurAvantDroitDeltaPos
codeurAvantGaucheDeltaPosPrec = codeurAvantGaucheDeltaPos
except:
#print "Error getting data"
codeurArriereDroitDeltaPos = codeurArriereDroitDeltaPosPrec
codeurArriereGaucheDeltaPos = codeurArriereGaucheDeltaPosPrec
codeurAvantDroitDeltaPos = codeurAvantDroitDeltaPosPrec
codeurAvantGaucheDeltaPos = codeurAvantGaucheDeltaPosPrec
# On utilise ici dt car c'est à cette cadence (10 ms) que les mesures des delta codeurs
# sont effectuées sur l'Arduino
omegaArriereDroit = -2 * ((2 * 3.141592 * codeurArriereDroitDeltaPos) / 1200) / (Nmoy * dt) # en rad/s
omegaArriereGauche = 2 * ((2 * 3.141592 * codeurArriereGaucheDeltaPos) / 1200) / (Nmoy * dt) # en rad/s
omegaAvantDroit = -2 * ((2 * 3.141592 * codeurAvantDroitDeltaPos) / 1200) / (Nmoy * dt) # en rad/s
omegaAvantGauche = 2 * ((2 * 3.141592 * codeurAvantGaucheDeltaPos) / 1200) / (Nmoy * dt) # en rad/s
# Mesure de la vitesse de rotation par odométrie (non utilisé pour l'instant)
ximes = (omegaArriereDroit - omegaArriereGauche + omegaAvantDroit - omegaAvantGauche) * R / W / 2
dt2 = time.time() - tprec
tprec = time.time()
# Lecture de la vitesse de rotation autour de la verticale (non utilisé pour l'instant)
try:
gyro = imu.readGyro()
gz = gyro['z'] * math.pi / 180 - offset_gyro
if (ximes == 0.):
offset_gyro = offset_gyro + gz
except:
#print("Erreur lecture IMU")
pass
# On utilise par la suite dt2 car c'est l'écart de temps entre deux calculs
# Intégration de la vitesse de rotation pour avoir l'angle (non utilisé pour l'instant)
intgz = intgz + gz * dt2
# Si on n'a pas reçu de données depuis un certain temps, celles-ci sont annulées
if (time.time()-timeLastReceived) > timeout and not timedOut:
timedOut = True
if timedOut:
commandeRot = 0.
else:
# On tourne de 6 degrés par volt
# A faire: ajuster ce rapport (6 degrés / V) en fonction de la mesure effectuée par le gyro
if (time.time() - tcommande) > 0.2:
commandeRot = -(180. * ecart_angulaire / math.pi) / 6.
tcommande = time.time()
else:
commandeRot = 0.
# Transformation des commandes longitudinales et de rotation en tension moteurs
commandeArriereDroit = -commandeRot # Tension négative pour faire tourner positivement ce moteur
commandeArriereGauche = -commandeRot
commandeAvantDroit = -commandeRot # Tension négative pour faire tourner positivement ce moteur
commandeAvantGauche = -commandeRot
CommandeMoteurs(commandeArriereDroit, commandeArriereGauche, commandeAvantDroit, commandeAvantGauche)
# Lecture de la tension d'alimentation (non utilisé pour l'instant)
if idecimLectureTension >= decimLectureTension:
try:
tensionAlim = max(7.0, float(mega.read_battery_millivolts()) / 1000.)
idecimLectureTension = 0
except:
# On recommence la lecture dans decimErreurLectureTension * dt
idecimLectureTension = idecimLectureTension - decimErreurLectureTension
#print("Erreur lecture tension dans Loop")
else:
idecimLectureTension = idecimLectureTension + 1
# Suggestions d'amélioration:
# - utiliser l'angle de rotation mesuré par le gyro pour recaler le rappport de 6 degrés / V
# - faire un suivi de distance
#print time.time() - tdebut
def CommandeMoteurs(commandeArriereDroit, commandeArriereGauche, commandeAvantDroit, commandeAvantGauche):
# Cette fonction calcule et envoi les signaux PWM au pont en H
# en fonction des tensions de commande et d'alimentation
global tensionAlim
# L'ensemble pont en H + moteur pourrait ne pas être linéaire
tensionArriereDroit = commandeArriereDroit
tensionArriereGauche = commandeArriereGauche
tensionAvantDroit = commandeAvantDroit
tensionAvantGauche = commandeAvantGauche
# Normalisation de la tension d'alimentation par
# rapport à la tension d'alimentation
tension_int_ArriereDroit = int(255 * tensionArriereDroit / tensionAlim)
tension_int_ArriereGauche = int(255 * tensionArriereGauche / tensionAlim)
tension_int_AvantDroit = int(255 * tensionAvantDroit / tensionAlim)
tension_int_AvantGauche = int(255 * tensionAvantGauche / tensionAlim)
# Saturation par sécurité
if (tension_int_ArriereDroit > 255):
tension_int_ArriereDroit = 255
if (tension_int_ArriereDroit < -255):
tension_int_ArriereDroit = -255
if (tension_int_ArriereGauche > 255):
tension_int_ArriereGauche = 255
if (tension_int_ArriereGauche < -255):
tension_int_ArriereGauche = -255
if (tension_int_AvantDroit > 255):
tension_int_AvantDroit = 255
if (tension_int_AvantDroit < -255):
tension_int_AvantDroit = -255
if (tension_int_AvantGauche > 255):
tension_int_AvantGauche = 255
if (tension_int_AvantGauche < -255):
tension_int_AvantGauche = -255
# Commande PWM
try:
mega.moteursArriere(tension_int_ArriereDroit, tension_int_ArriereGauche)
mega.moteursAvant(tension_int_AvantDroit, tension_int_AvantGauche)
mega.moteursCRC(tension_int_ArriereDroit + tension_int_ArriereGauche, tension_int_AvantDroit + tension_int_AvantGauche)
except:
pass
#print "Erreur moteurs"
def emitData():
global tprec
# Délai nécessaire pour que le serveur ait le temps de démarrer
#delay(5000)
tprec = time.time()
while not noLoop: loop() # appelle fonction loop sans fin
class WSHandler(tornado.websocket.WebSocketHandler):
def open(self):
global socketOK
print 'connection opened...'
socketOK = True
self.callback = PeriodicCallback(self.sendToSocket, 100)
self.callback.start()
def on_message(self, message):
global couleur, timeLastReceived, timedOut, couleurPrec, Robjet
jsonMessage = json.loads(message)
# Annulation du timeout de réception des données
timeLastReceived = time.time()
timedOut = False;
if jsonMessage.get('couleur') != None:
couleur = jsonMessage.get('couleur')[1:]
couleurPrec = couleur
if jsonMessage.get('Robjet') != None:
Robjet = 0.01 * float(jsonMessage.get('Robjet'))
if not socketOK:
couleur = couleurPrec
def on_close(self):
global socketOK
print 'connection closed...'
socketOK = False
def sendToSocket(self):
global socketOK, omegaArriereDroit, omegaArriereGauche, omegaAvantDroit, omegaAvantGauche, \
gz, intgz, commandeRot, center, radius, distance, positionPrec, ecart_angulaire
if (center == None):
position = positionPrec
else:
position = center[0]
positionPrec = position
tcourant = time.time() - T0
aEnvoyer = json.dumps({'Temps':("%.2f" % tcourant), \
'ecart_angulaire':("%.2f" % (ecart_angulaire*180/math.pi)), \
'intgz':("%.2f" % (intgz*180/math.pi)), \
'commande_rotation':("%.2f" % commandeRot), \
'position':("%d" % position), \
'rayon':("%.2f" % radius), \
'distance':("%.2f" % (100 * distance)), \
'Raw':("%.2f" % tcourant) \
+ "," + ("%.2f" % (ecart_angulaire*180/math.pi)) \
+ "," + ("%.2f" % (intgz*180/math.pi)) \
+ "," + ("%.2f" % commandeRot) \
+ "," + ("%d" % position) \
+ "," + ("%.2f" % radius) \
+ "," + ("%.2f" % (100 * distance)) \
})
if socketOK:
try:
self.write_message(aEnvoyer)
except:
pass
def check_origin(self, origin):
# Voir http://www.tornadoweb.org/en/stable/websocket.html#tornado.websocket.WebSocketHandler.check_origin
# et http://www.arundhaj.com/blog/tornado-error-during-websocket-handshake.html
return True
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
application = tornado.web.Application([
(r'/ws', WSHandler)
])
def startTornado():
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(9090)
tornado.ioloop.IOLoop.instance().start()
# Gestion du CTRL-C
def signal_handler(signal, frame):
print 'Sortie du programme'
CommandeMoteurs(0, 0, 0, 0)
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
#--- obligatoire pour lancement du code --
if __name__=="__main__": # pour rendre le code executable
setup() # appelle la fonction setup
print "Setup done."
th = threading.Thread(None, emitData, None, (), {})
th.daemon = True
th.start()
print "Starting Tornado."
try:
print "Connect to ws://" + get_ip_address('eth0') + ":9090/ws with Ethernet."
except:
pass
try:
print "Connect to ws://" + get_ip_address('wlan0') + ":9090/ws with Wifi."
except:
pass
socketOK = False
startTornado()
``` |
{
"source": "3sigma/X-Bot-RaspberryPython",
"score": 2
} |
#### File: X-Bot-RaspberryPython/programmes_python/X-Bot_SuiviLigne.py
```python
import wiringpi2
# Imports pour l'i2c
from a_star import AStar
a_star = AStar()
import time, sched
import os
import threading
import signal
import json
import sys
# Pour la détection d'adresse IP
import socket
import fcntl
import struct
# Pour le serveur de socket
import tornado.httpserver
import tornado.ioloop
from tornado.ioloop import PeriodicCallback
import tornado.web
import tornado.websocket
import tornado.template
Nmoy = 10
directionMoteurDroit = 4
pwmMoteurDroit = 5
directionMoteurGauche = 7
pwmMoteurGauche = 6
omegaDroit = 0
codeurDroitDeltaPos = 0
codeurDroitDeltaPosPrec = 0
omegaGauche = 0
codeurGaucheDeltaPos = 0
codeurGaucheDeltaPosPrec = 0
mesuremV1 = 0
mesuremV2 = 0
mesuremV3 = 0
mesuremV1Prec = 0
mesuremV2Prec = 0
mesuremV3Prec = 0
# Les moteurs sont asservis en vitesse grâce à un régulateur de type PID
# On déclare ci-dessous les variables et paramètres nécessaires à l'asservissement et au régulateur
vref = 0. # consigne de vitesse
vrefDroit = 0. # consigne vitesse de rotation du moteur droit
vrefGauche = 0. # consigne vitesse de rotation du moteur gauche
omegaDroit = 0. # vitesse de rotation du moteur droit
omegaGauche = 0. # vitesse de rotation du moteur gauche
commandeDroit = 0. # commande en tension calculée par le PID pour le moteur droit
commandeGauche = 0. # commande en tension calculée par le PID pour le moteur gauche
commande_avant_sat_Droit = 0. # valeur de la commande avant la saturation (voir ci-dessous) pour le moteur droit
commande_avant_sat_Gauche = 0. # valeur de la commande avant la saturation (voir ci-dessous) pour le moteur gauche
umax = 6. # valeur max de la tension de commande du moteur
umin = -6. # valeur min (ou max en négatif) de la tension de commande du moteur
# Déclarations pour les consignes de mouvement
vref = 3
T0 = time.time()
dt = 0.02
i = 0
tdebut = 0
# Création d'un scheduler pour exécuter des opérations à cadence fixe
s = sched.scheduler(time.time, time.sleep)
tensionBatterie = 7.4
#--- setup ---
def setup():
wiringpi2.wiringPiSetupGpio() # For GPIO pin numbering
CommandeMoteurs(0, 0, tensionBatterie)
# -- fin setup --
# -- loop --
def loop():
global i
i = i+1
s.enterabs( T0 + (i * dt), 1, CalculVitesse, ())
s.run()
# -- fin loop --
def CalculVitesse():
global ticksCodeurDroit, ticksCodeurGauche, indiceTicksCodeurDroit, indiceTicksCodeurGauche, started, vref, \
omegaDroit, omegaGauche, ticksCodeurDroitTab, ticksCodeurGaucheTab, codeurDroitDeltaPosPrec, codeurGaucheDeltaPosPrec, \
codeurDroitDeltaPos, codeurGaucheDeltaPos, commandeDroit, commandeGauche, interruptKO, vrefDroit, vrefGauche, \
codeurGaucheDeltaPosPrec, codeurDroitDeltaPosPrec, mesuremV1, mesuremV2, mesuremV3, mesuremV1Prec, mesuremV2Prec, mesuremV3Prec
debut = time.time()
# Mesure de la vitesse des moteurs grâce aux codeurs incrémentaux
try:
#codeursDeltaPos = a_star.read_codeursDeltaPos()
#print bin(codeursDeltaPos)
#codeurGaucheDeltaPos = u2s16(long(codeursDeltaPos) >> 16)
#codeurDroitDeltaPos = u2s16(long(codeursDeltaPos) & 0b00000000000000001111111111111111)
codeurGaucheDeltaPos = a_star.read_codeurGaucheDeltaPos()
codeurDroitDeltaPos = a_star.read_codeurDroitDeltaPos()
codeurGaucheDeltaPosPrec = codeurGaucheDeltaPos
codeurDroitDeltaPosPrec = codeurDroitDeltaPos
# Lecture des entrées analogiques sur lesquelles sont branchés les capteurs de suivi de ligne
mesuremV1 = a_star.analog_read(3)
mesuremV2 = a_star.analog_read(4)
mesuremV3 = a_star.analog_read(5)
mesuremV1Prec = mesuremV1
mesuremV2Prec = mesuremV2
mesuremV3Prec = mesuremV3
except:
print "Error getting data"
codeurGaucheDeltaPos = codeurGaucheDeltaPosPrec
codeurDroitDeltaPos = codeurDroitDeltaPosPrec
mesuremV1 = mesuremV1Prec
mesuremV2 = mesuremV2Prec
mesuremV3 = mesuremV3Prec
# (dt / 2) car l'A* mesure les codeurXDeltaPos toutes les 10 ms, or dt=20 ms
omegaDroit = -2 * ((2 * 3.141592 * codeurDroitDeltaPos) / 1632) / (Nmoy * (dt / 2)) # en rad/s
omegaGauche = 2 * ((2 * 3.141592 * codeurGaucheDeltaPos) / 1632) / (Nmoy * (dt / 2)) # en rad/s
# On compare par rapport à un seuil pour savoir si le capteur voit la ligne ou non
seuil = 700
surLigne1 = False
surLigne2 = False
surLigne3 = False
if mesuremV1 > seuil:
surLigne1 = True
if mesuremV2 > seuil:
surLigne2 = True
if mesuremV3 > seuil:
surLigne3 = True
# Si le robot est centré sur la ligne, on va tout droit
if ((surLigne1 == False) and (surLigne2 == True) and (surLigne3 == False)) or ((surLigne1 == True) and (surLigne2 == True) and (surLigne3 == True)):
vrefDroit = vref
vrefGauche = vref
# Si seul le capteur de droite est sur la ligne on tourne à droite fort
elif (surLigne1 == True) and (surLigne2 == False) and (surLigne3 == False):
vrefDroit = -vref * 2 / 3
vrefGauche = vref
# Si seul le capteur de gauche est sur la ligne on tourne à gauche fort
elif (surLigne1 == False) and (surLigne2 == False) and (surLigne3 == True):
vrefDroit = vref
vrefGauche = -vref * 2 / 3
# Si les deux capteurs de droite sont sur la ligne on tourne à droite normalement
elif (surLigne1 == True) and (surLigne2 == True) and (surLigne3 == False):
vrefDroit = -vref / 2
vrefGauche = vref
# Si les deux capteurs de gauche sont sur la ligne on tourne à gauche normalement
elif (surLigne1 == False) and (surLigne2 == True) and (surLigne3 == True):
vrefDroit = vref
vrefGauche = -vref / 2
# Calcul de la commande avant saturation
commande_avant_sat_Droit = vrefDroit
commande_avant_sat_Gauche = vrefGauche
# Application de la saturation sur la commande
if (commande_avant_sat_Droit > umax):
commandeDroit = umax
elif (commande_avant_sat_Droit < umin):
commandeDroit = umin
else:
commandeDroit = commande_avant_sat_Droit
if (commande_avant_sat_Gauche > umax) :
commandeGauche = umax
elif (commande_avant_sat_Gauche < umin):
commandeGauche = umin
else:
commandeGauche = commande_avant_sat_Gauche
CommandeMoteurs(commandeDroit, commandeGauche, tensionBatterie)
#print time.time() - debut
def CommandeMoteurs(commandeDroit, commandeGauche, tensionAlim):
# Cette fonction calcule et envoi les signaux PWM au pont en H
# en fonction des tensions de commande et d'alimentation
# L'ensemble pont en H + moteur pourrait ne pas être linéaire
tensionDroit = commandeDroit
tensionGauche = commandeGauche
# Normalisation de la tension d'alimentation par
# rapport à la tension d'alimentation
tension_int_droit = int(400 * tensionDroit / tensionAlim)
tension_int_gauche = -int(400 * tensionGauche / tensionAlim)
# Saturation par sécurité
if (tension_int_droit > 400):
tension_int_droit = 400
if (tension_int_droit < -400):
tension_int_droit = -400
if (tension_int_gauche > 400):
tension_int_gauche = 400
if (tension_int_gauche < -400):
tension_int_gauche = -400
# Commande PWM
try:
a_star.motors(tension_int_gauche, tension_int_droit)
except:
print "Erreur moteurs"
def emitData():
# Délai nécessaire pour que le serveur ait le temps de démarrer
wiringpi2.delay(5000)
while True: loop() # appelle fonction loop sans fin
class WSHandler(tornado.websocket.WebSocketHandler):
def open(self):
global socketOK
print 'connection opened...'
socketOK = True
self.callback = PeriodicCallback(self.sendToSocket, 100)
self.callback.start()
def on_message(self, message):
global vref
jsonMessage = json.loads(message)
if jsonMessage.get('vref') != None:
vref = float(jsonMessage.get('vref'))
def on_close(self):
global socketOK, vrefDroit, vrefGauche
print 'connection closed...'
socketOK = False
vrefDroit = 0.
vrefGauche = 0.
def sendToSocket(self):
global codeurDroitDeltaPos, codeurGaucheDeltaPos, socketOK, commandeDroit, commandeGauche, interruptKO
tcourant = time.time() - T0
aEnvoyer = json.dumps({'Temps':("%.2f" % tcourant), 'Consigne':("%.2f" % vref), 'omegaDroit':("%.2f" % omegaDroit), 'omegaGauche':("%.2f" % omegaGauche), 'Raw':("%.2f" % tcourant) + "," + ("%.2f" % vref) + "," + ("%.2f" % omegaDroit) + "," + ("%.2f" % omegaGauche)})
if socketOK:
try:
self.write_message(aEnvoyer)
except:
pass
def check_origin(self, origin):
# Voir http://www.tornadoweb.org/en/stable/websocket.html#tornado.websocket.WebSocketHandler.check_origin
# et http://www.arundhaj.com/blog/tornado-error-during-websocket-handshake.html
return True
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
application = tornado.web.Application([
(r'/ws', WSHandler)
])
def startTornado():
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(9090)
tornado.ioloop.IOLoop.instance().start()
# Gestion du CTRL-C
def signal_handler(signal, frame):
global vrefDroit, vrefGauche
print 'You pressed Ctrl+C!'
vrefDroit = 0.
vrefGauche = 0.
CommandeMoteurs(0, 0, 5)
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
#--- obligatoire pour lancement du code --
if __name__=="__main__": # pour rendre le code executable
started = False
startedDroit = False
startedGauche = False
setup() # appelle la fonction setup
print "Setup done."
th = threading.Thread(None, emitData, None, (), {})
th.daemon = True
th.start()
print "Starting Tornado."
try:
print "Connect to ws://" + get_ip_address('eth0') + ":9090/ws with Ethernet."
except:
pass
try:
print "Connect to ws://" + get_ip_address('wlan0') + ":9090/ws with Wifi."
except:
pass
socketOK = False
startTornado()
``` |
{
"source": "3springs/attentive-neural-processes",
"score": 3
} |
#### File: attentive-neural-processes/neural_processes/dict_logger.py
```python
from pytorch_lightning.loggers import LightningLoggerBase
from pytorch_lightning.loggers.tensorboard import TensorBoardLogger
class DictLogger(TensorBoardLogger):
"""PyTorch Lightning `dict` logger."""
# see https://github.com/PyTorchLightning/pytorch-lightning/blob/50881c0b31/pytorch_lightning/logging/base.py
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.metrics = []
def log_hyperparams(*args, **kwargs):
# We will do this manually with final metrics
pass
def log_metrics(self, metrics, step=None):
super().log_metrics(metrics, step=step)
self.metrics.append(metrics)
```
#### File: models/neural_process/model.py
```python
import torch
from torch import nn
import torch.nn.functional as F
from torch.utils.data import TensorDataset, DataLoader
import math
import numpy as np
from neural_processes.modules import BatchNormSequence, BatchMLP, Attention, LSTMBlock
from neural_processes.utils import kl_loss_var, log_prob_sigma
from neural_processes.utils import hparams_power
class LatentEncoder(nn.Module):
def __init__(
self,
input_dim,
hidden_dim=32,
latent_dim=32,
self_attention_type="dot",
n_encoder_layers=3,
min_std=0.01,
batchnorm=False,
dropout=0,
attention_dropout=0,
use_lvar=False,
use_self_attn=False,
attention_layers=2,
use_lstm=False
):
super().__init__()
# self._input_layer = nn.Linear(input_dim, hidden_dim)
if use_lstm:
self._encoder = LSTMBlock(input_dim, hidden_dim, batchnorm=batchnorm, dropout=dropout, num_layers=n_encoder_layers)
else:
self._encoder = BatchMLP(input_dim, hidden_dim, batchnorm=batchnorm, dropout=dropout, num_layers=n_encoder_layers)
if use_self_attn:
self._self_attention = Attention(
hidden_dim,
self_attention_type,
attention_layers,
rep="identity",
dropout=attention_dropout,
)
self._penultimate_layer = nn.Linear(hidden_dim, hidden_dim)
self._mean = nn.Linear(hidden_dim, latent_dim)
self._log_var = nn.Linear(hidden_dim, latent_dim)
self._min_std = min_std
self._use_lvar = use_lvar
self._use_lstm = use_lstm
self._use_self_attn = use_self_attn
def forward(self, x, y):
encoder_input = torch.cat([x, y], dim=-1)
# Pass final axis through MLP
encoded = self._encoder(encoder_input)
# Aggregator: take the mean over all points
if self._use_self_attn:
attention_output = self._self_attention(encoded, encoded, encoded)
mean_repr = attention_output.mean(dim=1)
else:
mean_repr = encoded.mean(dim=1)
# Have further MLP layers that map to the parameters of the Gaussian latent
mean_repr = torch.relu(self._penultimate_layer(mean_repr))
# Then apply further linear layers to output latent mu and log sigma
mean = self._mean(mean_repr)
log_var = self._log_var(mean_repr)
if self._use_lvar:
# Clip it in the log domain, so it can only approach self.min_std, this helps avoid mode collapase
# 2 ways, a better but untested way using the more stable log domain, and the way from the deepmind repo
log_var = F.logsigmoid(log_var)
log_var = torch.clamp(log_var, np.log(self._min_std), -np.log(self._min_std))
sigma = torch.exp(0.5 * log_var)
else:
sigma = self._min_std + (1 - self._min_std) * torch.sigmoid(log_var * 0.5)
dist = torch.distributions.Normal(mean, sigma)
return dist, log_var
class DeterministicEncoder(nn.Module):
def __init__(
self,
input_dim,
x_dim,
hidden_dim=32,
n_d_encoder_layers=3,
self_attention_type="dot",
cross_attention_type="dot",
use_self_attn=False,
attention_layers=2,
batchnorm=False,
dropout=0,
attention_dropout=0,
use_lstm=False,
):
super().__init__()
self._use_self_attn = use_self_attn
# self._input_layer = nn.Linear(input_dim, hidden_dim)
if use_lstm:
self._d_encoder = LSTMBlock(input_dim, hidden_dim, batchnorm=batchnorm, dropout=dropout, num_layers=n_d_encoder_layers)
else:
self._d_encoder = BatchMLP(input_dim, hidden_dim, batchnorm=batchnorm, dropout=dropout, num_layers=n_d_encoder_layers)
if use_self_attn:
self._self_attention = Attention(
hidden_dim,
self_attention_type,
attention_layers,
rep="identity",
dropout=attention_dropout,
)
self._cross_attention = Attention(
hidden_dim,
cross_attention_type,
x_dim=x_dim,
attention_layers=attention_layers,
)
def forward(self, context_x, context_y, target_x):
# Concatenate x and y along the filter axes
d_encoder_input = torch.cat([context_x, context_y], dim=-1)
# Pass final axis through MLP
d_encoded = self._d_encoder(d_encoder_input)
if self._use_self_attn:
d_encoded = self._self_attention(d_encoded, d_encoded, d_encoded)
# Apply attention as mean aggregation
h = self._cross_attention(context_x, d_encoded, target_x)
return h
class Decoder(nn.Module):
def __init__(
self,
x_dim,
y_dim,
hidden_dim=32,
latent_dim=32,
n_decoder_layers=3,
use_deterministic_path=True,
min_std=0.01,
use_lvar=False,
batchnorm=False,
dropout=0,
use_lstm=False,
):
super(Decoder, self).__init__()
self._target_transform = nn.Linear(x_dim, hidden_dim)
if use_deterministic_path:
hidden_dim_2 = 2 * hidden_dim + latent_dim
else:
hidden_dim_2 = hidden_dim + latent_dim
if use_lstm:
self._decoder = LSTMBlock(hidden_dim_2, hidden_dim_2, batchnorm=batchnorm, dropout=dropout, num_layers=n_decoder_layers)
else:
self._decoder = BatchMLP(hidden_dim_2, hidden_dim_2, batchnorm=batchnorm, dropout=dropout, num_layers=n_decoder_layers)
self._mean = nn.Linear(hidden_dim_2, y_dim)
self._std = nn.Linear(hidden_dim_2, y_dim)
self._use_deterministic_path = use_deterministic_path
self._min_std = min_std
self._use_lvar = use_lvar
def forward(self, r, z, target_x):
# concatenate target_x and representation
x = self._target_transform(target_x)
if self._use_deterministic_path:
z = torch.cat([r, z], dim=-1)
r = torch.cat([z, x], dim=-1)
r = self._decoder(r)
# Get the mean and the variance
mean = self._mean(r)
log_sigma = self._std(r)
# Bound or clamp the variance
if self._use_lvar:
log_sigma = torch.clamp(log_sigma, math.log(self._min_std), -math.log(self._min_std))
sigma = torch.exp(log_sigma)
else:
sigma = self._min_std + (1 - self._min_std) * F.softplus(log_sigma)
dist = torch.distributions.Normal(mean, sigma)
return dist, log_sigma
class NeuralProcess(nn.Module):
@staticmethod
def FROM_HPARAMS(hparams):
hparams = hparams_power(hparams)
return NeuralProcess(**hparams)
def __init__(self,
x_dim, # features in input
y_dim, # number of features in output
hidden_dim=32, # size of hidden space
latent_dim=32, # size of latent space
latent_enc_self_attn_type="ptmultihead", # type of attention: "uniform", "dot", "multihead" "ptmultihead": see attentive neural processes paper
det_enc_self_attn_type="ptmultihead",
det_enc_cross_attn_type="ptmultihead",
n_latent_encoder_layers=2,
n_det_encoder_layers=2, # number of deterministic encoder layers
n_decoder_layers=2,
use_deterministic_path=True,
min_std=0.01, # To avoid collapse use a minimum standard deviation, should be much smaller than variation in labels
dropout=0,
use_self_attn=False,
attention_dropout=0,
batchnorm=False,
use_lvar=False, # Alternative loss calculation, may be more stable
attention_layers=2,
use_rnn=True, # use RNN/LSTM?
use_lstm_le=False, # use another LSTM in latent encoder instead of MLP
use_lstm_de=False, # use another LSTM in determinstic encoder instead of MLP
use_lstm_d=False, # use another lstm in decoder instead of MLP
context_in_target=False,
**kwargs,
):
super(NeuralProcess, self).__init__()
self._use_rnn = use_rnn
self.context_in_target = context_in_target
# Sometimes input normalisation can be important, an initial batch norm is a nice way to ensure this https://stackoverflow.com/a/46772183/221742
self.norm_x = BatchNormSequence(x_dim, affine=False)
self.norm_y = BatchNormSequence(y_dim, affine=False)
if self._use_rnn:
self._lstm_x = nn.LSTM(
input_size=x_dim,
hidden_size=hidden_dim,
num_layers=attention_layers,
dropout=dropout,
batch_first=True
)
self._lstm_y = nn.LSTM(
input_size=y_dim,
hidden_size=hidden_dim,
num_layers=attention_layers,
dropout=dropout,
batch_first=True
)
x_dim = hidden_dim
y_dim2 = hidden_dim
else:
y_dim2 = y_dim
self._latent_encoder = LatentEncoder(
x_dim + y_dim2,
hidden_dim=hidden_dim,
latent_dim=latent_dim,
self_attention_type=latent_enc_self_attn_type,
n_encoder_layers=n_latent_encoder_layers,
attention_layers=attention_layers,
dropout=dropout,
use_self_attn=use_self_attn,
attention_dropout=attention_dropout,
batchnorm=batchnorm,
min_std=min_std,
use_lvar=use_lvar,
use_lstm=use_lstm_le,
)
self._deterministic_encoder = DeterministicEncoder(
input_dim=x_dim + y_dim2,
x_dim=x_dim,
hidden_dim=hidden_dim,
self_attention_type=det_enc_self_attn_type,
cross_attention_type=det_enc_cross_attn_type,
n_d_encoder_layers=n_det_encoder_layers,
attention_layers=attention_layers,
use_self_attn=use_self_attn,
dropout=dropout,
batchnorm=batchnorm,
attention_dropout=attention_dropout,
use_lstm=use_lstm_de,
)
self._decoder = Decoder(
x_dim,
y_dim,
hidden_dim=hidden_dim,
latent_dim=latent_dim,
dropout=dropout,
batchnorm=batchnorm,
min_std=min_std,
use_lvar=use_lvar,
n_decoder_layers=n_decoder_layers,
use_deterministic_path=use_deterministic_path,
use_lstm=use_lstm_d,
)
self._use_deterministic_path = use_deterministic_path
self._use_lvar = use_lvar
def forward(self, context_x, context_y, target_x, target_y=None, sample_latent=None):
if sample_latent is None:
sample_latent = self.training
device = next(self.parameters()).device
# if self.hparams.get('bnorm_inputs', True):
# https://stackoverflow.com/a/46772183/221742
target_x = self.norm_x(target_x)
context_x = self.norm_x(context_x)
context_y = self.norm_y(context_y)
if self._use_rnn:
# see https://arxiv.org/abs/1910.09323 where x is substituted with h = RNN(x)
# x need to be provided as [B, T, H]
target_x, _ = self._lstm_x(target_x)
context_x, _ = self._lstm_x(context_x)
context_y, _ = self._lstm_y(context_y)
dist_prior, log_var_prior = self._latent_encoder(context_x, context_y)
if (target_y is not None):
target_y2 = self.norm_y(target_y)
if self._use_rnn:
target_y2, _ = self._lstm_y(target_y2)
dist_post, log_var_post = self._latent_encoder(target_x, target_y2)
if self.training:
z = dist_post.rsample() if sample_latent else dist_post.loc
else:
z = dist_prior.rsample() if sample_latent else dist_prior.loc
else:
z = dist_prior.rsample() if sample_latent else dist_prior.loc
num_targets = target_x.size(1)
z = z.unsqueeze(1).repeat(1, num_targets, 1) # [B, T_target, H]
if self._use_deterministic_path:
r = self._deterministic_encoder(context_x, context_y,
target_x) # [B, T_target, H]
else:
r = None
dist, log_sigma = self._decoder(r, z, target_x)
if target_y is not None:
if self._use_lvar:
log_p = log_prob_sigma(target_y, dist.loc, log_sigma).mean(-1) # [B, T_target, Y].mean(-1)
if self.context_in_target:
log_p[:, :context_x.size(1)] /= 100
loss_kl = kl_loss_var(dist_prior.loc, log_var_prior,
dist_post.loc, log_var_post).mean(-1) # [B, R].mean(-1)
else:
log_p = dist.log_prob(target_y).mean(-1)
if self.context_in_target:
log_p[:, :context_x.size(1)] /= 100 # There's the temptation for it to fit only on context, where it knows the answer, and learn very low uncertainty.
loss_kl = torch.distributions.kl_divergence(
dist_post, dist_prior).mean(-1) # [B, R].mean(-1)
loss_kl = loss_kl[:, None].expand(log_p.shape)
mse_loss = F.mse_loss(dist.loc, target_y, reduction='none')[:,:context_x.size(1)].mean()
loss_p = -log_p
# Weight loss nearer to prediction time?
weight = (torch.arange(loss_p.shape[1]) + 1).float().to(device)[None, :]
loss_p_weighted = loss_p / torch.sqrt(weight) # We want to weight nearer stuff more
loss_p_weighted = loss_p_weighted.mean()
loss = (loss_kl - log_p).mean()
loss_kl = loss_kl.mean()
log_p = log_p.mean()
loss_p = loss_p.mean()
else:
loss_p = None
mse_loss = None
loss_kl = None
loss = None
loss_p_weighted = None
y_pred = dist.rsample() if self.training else dist.loc
return y_pred, dict(loss=loss, loss_p=loss_p, loss_kl=loss_kl, loss_mse=mse_loss, loss_p_weighted=loss_p_weighted), dict(log_sigma=log_sigma, y_dist=dist)
```
#### File: neural_processes/models/transformer_seq2seq.py
```python
import os
import numpy as np
import pandas as pd
import torch
from tqdm.auto import tqdm
from torch import nn
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from test_tube import Experiment, HyperOptArgumentParser
from neural_processes.data.smart_meter import (
collate_fns,
SmartMeterDataSet,
get_smartmeter_df,
)
import torchvision.transforms as transforms
from neural_processes.plot import plot_from_loader_to_tensor, plot_from_loader
from argparse import ArgumentParser
import json
import pytorch_lightning as pl
import math
from matplotlib import pyplot as plt
import torch
import io
import PIL
import optuna
from torchvision.transforms import ToTensor
from neural_processes.data.smart_meter import get_smartmeter_df
from neural_processes.modules import BatchNormSequence, LSTMBlock, NPBlockRelu2d
from neural_processes.utils import ObjectDict
from neural_processes.lightning import PL_Seq2Seq
from ..logger import logger
from ..utils import hparams_power
class TransformerSeq2SeqNet(nn.Module):
def __init__(self, hparams):
super().__init__()
hparams = hparams_power(hparams)
self.hparams = hparams
self._min_std = hparams.min_std
hidden_out_size = self.hparams.hidden_out_size
y_size = self.hparams.input_size - self.hparams.input_size_decoder
x_size = self.hparams.input_size_decoder
# Sometimes input normalisation can be important, an initial batch norm is a nice way to ensure this https://stackoverflow.com/a/46772183/221742
self.x_norm = BatchNormSequence(x_size, affine=False)
self.y_norm = BatchNormSequence(y_size, affine=False)
# TODO embedd both X's the same
if self.hparams.get('use_lstm', False):
self.x_emb = LSTMBlock(x_size, x_size)
self.y_emb = LSTMBlock(y_size, y_size)
self.enc_emb = nn.Linear(self.hparams.input_size, hidden_out_size)
self.dec_emb = nn.Linear(self.hparams.input_size_decoder, hidden_out_size)
encoder_norm = nn.LayerNorm(hidden_out_size)
layer_enc = nn.TransformerEncoderLayer(
d_model=hidden_out_size,
dim_feedforward=hidden_out_size*4,
dropout=self.hparams.attention_dropout,
nhead=self.hparams.nhead,
# activation
)
self.encoder = nn.TransformerEncoder(
layer_enc, num_layers=self.hparams.nlayers, norm=encoder_norm
)
layer_dec = nn.TransformerDecoderLayer(
d_model=hidden_out_size,
dim_feedforward=hidden_out_size*4,
dropout=self.hparams.attention_dropout,
nhead=self.hparams.nhead,
)
decoder_norm = nn.LayerNorm(hidden_out_size)
self.decoder = nn.TransformerDecoder(
layer_dec, num_layers=self.hparams.nlayers, norm=decoder_norm
)
self.mean = NPBlockRelu2d(hidden_out_size, self.hparams.output_size)
self.std = NPBlockRelu2d(hidden_out_size, self.hparams.output_size)
self._use_lvar = False
# self._reset_parameters()
def _reset_parameters(self):
r"""Initiate parameters in the transformer model."""
for p in self.parameters():
if p.dim() > 1:
torch.nn.init.xavier_uniform_(p)
def forward(self, context_x, context_y, target_x, target_y=None, mask_context=True, mask_target=True):
device = next(self.parameters()).device
tgt_key_padding_mask = None
# if target_y is not None and mask_target:
# # Mask nan's
# target_mask = torch.isfinite(target_y)# & (target_y!=self.hparams.nan_value)
# target_y[~target_mask] = 0
# target_y = target_y.detach()
# tgt_key_padding_mask = ~target_mask.any(-1)
src_key_padding_mask = None
# if mask_context:
# # Mask nan's
# context_mask = torch.isfinite(context_y)# & (context_y!=self.hparams.nan_value)
# context_y[~context_mask] = 0
# context_y = context_y.detach()
# src_key_padding_mask = ~context_mask.any(-1)# * float('-inf')
# Norm
context_x = self.x_norm(context_x)
target_x = self.x_norm(target_x)
context_y = self.y_norm(context_y)
# if target_y is not None:
# target_y = self.y_norm(target_y)
# LSTM
if self.hparams.get('use_lstm', False):
context_x = self.x_emb(context_x)
target_x = self.x_emb(target_x)
# Size([B, C, X]) -> Size([B, C, X])
context_y = self.y_emb(context_y)
# Size([B, T, Y]) -> Size([B, T, Y])
# Embed
x = torch.cat([context_x, context_y], -1)
x = self.enc_emb(x)
# Size([B, C, X]) -> Size([B, C, hidden_dim])
target_x = self.dec_emb(target_x)
# Size([B, C, T]) -> Size([B, C, hidden_dim])
x = x.permute(1, 0, 2) # (B,C,hidden_dim) -> (C,B,hidden_dim)
target_x = target_x.permute(1, 0, 2)
# requires (C, B, hidden_dim)
memory = self.encoder(x, src_key_padding_mask=src_key_padding_mask)
# In transformers the memory and target_x need to be the same length. Lets use a permutation invariant agg on the context
# Then expand it, so it's available as we decode, conditional on target_x
# (C, B, emb_dim) -> (B, emb_dim) -> (T, B, emb_dim)
# In transformers the memory and target_x need to be the same length. Lets use a permutation invariant agg on the context
# Then expand it, so it's available as we decode, conditional on target_x
memory_max = memory.max(dim=0, keepdim=True)[0].expand_as(target_x)
memory_mean = memory.mean(dim=0, keepdim=True)[0].expand_as(target_x)
memory_last = memory[-1:, :, :].expand_as(target_x)
memory_all = memory_max + memory_last
if self.hparams.agg == 'max':
memory = memory_max
elif self.hparams.agg == 'last':
memory = memory_last
elif self.hparams.agg == 'all':
memory = memory_all
elif self.hparams.agg == 'mean':
memory = memory_mean
else:
raise Exception(f"hparams.agg should be in ['last', 'max', 'mean', 'all'] not '{self.hparams.agg}''")
outputs = self.decoder(target_x, memory, tgt_key_padding_mask=tgt_key_padding_mask)
# [T, B, emb_dim] -> [B, T, emb_dim]
outputs = outputs.permute(1, 0, 2).contiguous()
# Size([B, T, emb_dim])
mean = self.mean(outputs)
log_sigma = self.std(outputs)
if self._use_lvar:
log_sigma = torch.clamp(
log_sigma, math.log(self._min_std), -math.log(self._min_std)
)
sigma = torch.exp(log_sigma)
else:
sigma = self._min_std + (1 - self._min_std) * F.softplus(log_sigma)
y_dist = torch.distributions.Normal(mean, sigma)
# Loss
loss_mse = loss_p = loss_p_weighted = None
if target_y is not None:
loss_mse = F.mse_loss(mean, target_y, reduction="none")
if self._use_lvar:
loss_p = -log_prob_sigma(target_y, mean, log_sigma)
else:
loss_p = -y_dist.log_prob(target_y).mean(-1)
if self.hparams["context_in_target"]:
loss_p[: context_x.size(1)] /= 100
loss_mse[: context_x.size(1)] /= 100
# Weight loss nearer to prediction time?
weight = (torch.arange(loss_p.shape[1]) + 1).float().to(device)[None, :]
loss_p_weighted = loss_p / torch.sqrt(weight) # We want to weight nearer stuff more
y_pred = y_dist.rsample if self.training else y_dist.loc
return (
y_pred,
dict(loss=loss_p.mean(), loss_p=loss_p.mean(), loss_mse=loss_mse.mean(), loss_p_weighted=loss_p_weighted.mean()),
dict(log_sigma=log_sigma, y_dist=y_dist),
)
class TransformerSeq2Seq_PL(PL_Seq2Seq):
def __init__(self, hparams, MODEL_CLS=TransformerSeq2SeqNet, **kwargs):
super().__init__(hparams, MODEL_CLS=MODEL_CLS, **kwargs)
DEFAULT_ARGS = {
"agg": "max",
"attention_dropout": 0.2,
"hidden_out_size_power": 4,
"hidden_size_power": 5,
"learning_rate": 0.002,
"nhead_power": 3,
"nlayers": 2,
"use_lstm": False
}
@staticmethod
def add_suggest(trial: optuna.Trial, user_attrs={}):
"""
Add hyperparam ranges to an optuna trial and typical user attrs.
Usage:
trial = optuna.trial.FixedTrial(
params={
'hidden_size': 128,
}
)
trial = add_suggest(trial)
trainer = pl.Trainer()
model = LSTM_PL(dict(**trial.params, **trial.user_attrs), dataset_train,
dataset_test, cache_base_path, norm)
trainer.fit(model)
"""
trial.suggest_loguniform("learning_rate", 1e-6, 1e-2)
trial.suggest_uniform("attention_dropout", 0, 0.75)
# we must have nhead<==hidden_size
# so nhead_power.max()<==hidden_size_power.min()
trial.suggest_discrete_uniform("hidden_size_power", 4, 10, 1)
trial.suggest_discrete_uniform("hidden_out_size_power", 4, 9, 1)
trial.suggest_discrete_uniform("nhead_power", 1, 4, 1)
trial.suggest_int("nlayers", 1, 12)
trial.suggest_categorical("use_lstm", [False, True])
trial.suggest_categorical("agg", ['last', 'max', 'mean', 'all'])
user_attrs_default = {
"batch_size": 16,
"grad_clip": 40,
"max_nb_epochs": 200,
"num_workers": 4,
"num_extra_target": 24 * 4,
"vis_i": "670",
"num_context": 24 * 4,
"input_size": 18,
"input_size_decoder": 17,
"context_in_target": False,
"output_size": 1,
"patience": 3,
'min_std': 0.005,
}
[trial.set_user_attr(k, v) for k, v in user_attrs_default.items()]
[trial.set_user_attr(k, v) for k, v in user_attrs.items()]
return trial
``` |
{
"source": "3springs/seq2seq-time",
"score": 2
} |
#### File: seq2seq-time/notebooks/01.0-mc-datasets.py
```python
import xarray as xr
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
from tqdm.auto import tqdm
from IPython.display import display, HTML
# +
import holoviews as hv
from holoviews import opts
from holoviews.operation.datashader import datashade, dynspread
hv.extension('bokeh')
from seq2seq_time.visualization.hv_ggplot import ggplot_theme
hv.renderer('bokeh').theme = ggplot_theme
hv.archive.auto()
# holoview datashader timeseries options
# %opts RGB [width=800 height=200 show_grid=True active_tools=["xwheel_zoom"] default_tools=["xpan","xwheel_zoom", "reset", "hover"] toolbar="right"]
# %opts Curve [width=800 height=200 show_grid=True active_tools=["xwheel_zoom"] default_tools=["xpan","xwheel_zoom", "reset", "hover"] toolbar="right"]
# %opts Scatter [width=800 height=200 show_grid=True active_tools=["xwheel_zoom"] default_tools=["xpan","xwheel_zoom", "reset", "hover"] toolbar="right"]
# %opts Layout [width=800 height=200]
# -
# ## Parameters
window_past = 48*2
window_future = 48
batch_size = 4
datasets_root = Path('../data/processed/')
# ## Plot helpers
# ## Datasets
# +
from seq2seq_time.data.data import IMOSCurrentsVel, AppliancesEnergyPrediction, BejingPM25, GasSensor, MetroInterstateTraffic
datasets = [IMOSCurrentsVel, BejingPM25, GasSensor, AppliancesEnergyPrediction, MetroInterstateTraffic, ]
datasets
# +
# plot a batch
def plot_batch_y(ds, i):
x_past, y_past, x_future, y_future = ds.get_rows(i)
y = pd.concat([y_past, y_future])
p = hv.Scatter(y)
now = y_past.index[-1]
p *= hv.VLine(now).relabel('now').opts(color='red')
return p
def plot_batches_y(dataset, window_past=window_past, window_future=window_future, n = 4):
ds_name = type(dataset).__name__
opts=dict(width=200, height=100, xaxis=None, yaxis=None)
ds_train, ds_val, ds_test = d.to_datasets(window_past=window_past,
window_future=window_future)
max_i = min(len(ds_train), len(ds_val), len(ds_test))
ii = list(np.linspace(0, max_i-10, n-1).astype(int)) + [-1]
l = hv.Layout()
for i in ii:
l += plot_batch_y(ds_train, i).opts(title=f'train {i}', **opts)
l += plot_batch_y(ds_val, i).opts(title=f'val {i}', **opts)
l += plot_batch_y(ds_test, i).opts(title=f'test {i}', **opts)
return l.opts(shared_axes=False, toolbar='right', title=f"{ds_name} freq={d.df.index.freq.freqstr}").cols(3)
# -
for dataset in datasets:
d = dataset(datasets_root)
display(HTML(f"<h3>{dataset.__name__}</h3>"))
print('Description:', d.__doc__)
print(f'Stats:\n\t{len(d)} rows at freq: "{d.df.index.freq.freqstr}"')
print('\tcolumns_forecast:', d.columns_forecast)
print('\tcolumns_past:', d.columns_past)
print('\tcolumns_target:', d.columns_target)
with pd.option_context("display.max_rows", 4, "display.max_columns", 20):
display(d.df)
display(plot_batches_y(d, n=2).opts(title=''))
# View with x and y col
for dataset in datasets:
ds_name = type(dataset).__name__
d = dataset(datasets_root)
print(d)
ds_train, ds_val, ds_test = d.to_datasets(window_past=window_past,
window_future=window_future)
display(plot_batch_y(ds_train, 10))
# +
# # View train, test, val splits
# for dataset in datasets:
# ds_name = type(dataset).__name__
# d = dataset(datasets_root)
# print(d)
# display(plot_batches_y(d))
# +
def plot_batch_x(ds, i):
"""Plot input features"""
x_past, y_past, x_future, y_future = ds.get_rows(i)
x = pd.concat([x_past, x_future])
p = hv.NdOverlay({
col: hv.Curve(x[col]) for col in x.columns
}, kdims='column')
now = y_past.index[-1]
p *= hv.VLine(now).relabel('now').opts(color='red')
return p
def plot_batches_x(d):
"""Plot input features for multiple batch"""
ds_train, ds_val, ds_test = d.to_datasets(window_past=window_past,
window_future=window_future)
l = plot_batch_x(ds_train, 10) + plot_batch_x(ds_val, 10) + plot_batch_x(ds_test, 10)
l = l.cols(1).opts(shared_axes=False, title=f'{type(d).__name__}')
return l
# -
# +
# # View input columns
# for dataset in datasets:
# d = dataset(datasets_root)
# display(plot_batches_x(d))
# -
hv.archive.export()
hv.archive.last_export_status()
# +
hv.archive.auto(enabled=False) # We can't capture dynamic plots
# View train, test, val splits
for dataset in datasets:
d = dataset(datasets_root)
p = hv.Layout()
p += dynspread(
datashade(hv.Scatter(d.df_train[d.columns_target[0]]),
cmap='red'))
p *= dynspread(
datashade(hv.Scatter(d.df_val[d.columns_target[0]]),
cmap='green'))
p *= dynspread(
datashade(hv.Scatter(d.df_test[d.columns_target[0]]),
cmap='blue'))
p = p.opts(title=f"{dataset.__name__}, n={len(d)}, freq={d.df.index.freq.freqstr}")
display(p)
# -
```
#### File: seq2seq-time/notebooks/07.1-mc-optuna.py
```python
import torch
from torch import nn, optim
from torch.nn import functional as F
from torch.autograd import Variable
import torch
import torch.utils.data
from pathlib import Path
from tqdm.auto import tqdm
import pytorch_lightning as pl
# -
from seq2seq_time.data.dataset import Seq2SeqDataSet, Seq2SeqDataSets
from seq2seq_time.predict import predict, predict_multi
from seq2seq_time.util import dset_to_nc
# +
import logging
import warnings
import seq2seq_time.silence
warnings.simplefilter('once')
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter(action='ignore', category=DeprecationWarning)
warnings.filterwarnings('ignore', 'Consider increasing the value of the `num_workers` argument', UserWarning)
warnings.filterwarnings('ignore', 'Your val_dataloader has `shuffle=True`', UserWarning)
from pytorch_lightning import _logger as log
log.setLevel(logging.WARN)
# -
# ## Parameters
# +
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f'using {device}')
timestamp = '20201108-300000'
print(timestamp)
window_past = 48*7
window_future = 48
batch_size = 64
num_workers = 4
datasets_root = Path('../data/processed/')
window_past
# -
# ## Datasets
#
# From easy to hard, these dataset show different challenges, all of them with more than 20k datapoints and with a regression output. See the 00.01 notebook for more details, and the code for more information.
#
# Some such as MetroInterstateTraffic are easier, some are periodic such as BejingPM25, some are conditional on inputs such as GasSensor, and some are noisy and periodic like IMOSCurrentsVel
from seq2seq_time.data.data import IMOSCurrentsVel, AppliancesEnergyPrediction, BejingPM25, GasSensor, MetroInterstateTraffic
datasets = [MetroInterstateTraffic, IMOSCurrentsVel, GasSensor, AppliancesEnergyPrediction, BejingPM25]
datasets
# ## Lightning
#
# We will use pytorch lightning to handle all the training scaffolding. We have a common pytorch lightning class that takes in the model and defines training steps and logging.
# +
import pytorch_lightning as pl
class PL_MODEL(pl.LightningModule):
def __init__(self, model, lr=3e-4, patience=None, weight_decay=0):
super().__init__()
self._model = model
self.lr = lr
self.patience = patience
self.weight_decay = weight_decay
def forward(self, x_past, y_past, x_future, y_future=None):
"""Eval/Predict"""
y_dist, extra = self._model(x_past, y_past, x_future, y_future)
return y_dist, extra
def training_step(self, batch, batch_idx, phase='train'):
x_past, y_past, x_future, y_future = batch
y_dist, extra = self.forward(*batch)
loss = -y_dist.log_prob(y_future).mean()
self.log_dict({f'loss/{phase}':loss})
if ('loss' in extra) and (phase=='train'):
# some models have a special loss
loss = extra['loss']
self.log_dict({f'model_loss/{phase}':loss})
return loss
def validation_step(self, batch, batch_idx):
return self.training_step(batch, batch_idx, phase='val')
def test_step(self, batch, batch_idx):
return self.training_step(batch, batch_idx, phase='test')
def configure_optimizers(self):
optim = torch.optim.AdamW(self.parameters(), lr=self.lr, weight_decay=self.weight_decay)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optim,
patience=self.patience,
verbose=False,
min_lr=1e-7,
) if self.patience else None
return {'optimizer': optim, 'lr_scheduler': scheduler, 'monitor': 'loss/val'}
# -
from torch.utils.data import DataLoader
from pytorch_lightning.loggers import CSVLogger, WandbLogger, TensorBoardLogger, TestTubeLogger
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from pytorch_lightning.callbacks import LearningRateMonitor
# ## Models
from seq2seq_time.models.baseline import BaselineLast, BaselineMean
from seq2seq_time.models.lstm_seq2seq import LSTMSeq2Seq
from seq2seq_time.models.lstm import LSTM
from seq2seq_time.models.transformer import Transformer
from seq2seq_time.models.transformer_seq2seq import TransformerSeq2Seq
from seq2seq_time.models.neural_process import RANP
from seq2seq_time.models.transformer_process import TransformerProcess
from seq2seq_time.models.tcn import TCNSeq
from seq2seq_time.models.inceptiontime import InceptionTimeSeq
from seq2seq_time.models.xattention import CrossAttention
# +
import gc
def free_mem():
gc.collect()
torch.cuda.empty_cache()
gc.collect()
# -
# +
# PARAMS: model
dropout=0.0
layers=6
nhead=4
models = [
# lambda xs, ys: BaselineLast(),
# lambda xs, ys, hidden_size: BaselineMean(),
lambda xs, ys, hidden_size, layers:TransformerProcess(xs,
ys, hidden_size=hidden_size, nhead=nhead,
latent_dim=hidden_size//2, dropout=dropout,
nlayers=layers),
lambda xs, ys, hidden_size, layers: RANP(xs,
ys, hidden_dim=hidden_size, dropout=dropout,
latent_dim=hidden_size//2, n_decoder_layers=layers, n_latent_encoder_layers=layers, n_det_encoder_layers=layers),
lambda xs, ys, hidden_size, layers:TCNSeq(xs, ys, hidden_size=hidden_size, nlayers=layers, dropout=dropout, kernel_size=2),
lambda xs, ys, hidden_size, layers: Transformer(xs,
ys,
attention_dropout=dropout,
nhead=nhead,
nlayers=layers,
hidden_size=hidden_size),
lambda xs, ys, hidden_size, layers: LSTM(xs,
ys,
hidden_size=hidden_size,
lstm_layers=layers//2,
lstm_dropout=dropout),
lambda xs, ys, hidden_size, layers: TransformerSeq2Seq(xs,
ys,
hidden_size=hidden_size,
nhead=nhead,
nlayers=layers,
attention_dropout=dropout
),
lambda xs, ys, hidden_size, layers: LSTMSeq2Seq(xs,
ys,
hidden_size=hidden_size,
lstm_layers=layers//2,
lstm_dropout=dropout),
lambda xs, ys, hidden_size, layers: CrossAttention(xs,
ys,
nlayers=layers,
hidden_size=hidden_size,),
lambda xs, ys, hidden_size, layers: InceptionTimeSeq(xs,
ys,
kernel_size=96,
layers=layers//2,
hidden_size=hidden_size,
bottleneck=hidden_size//4)
]
# +
# DEBUG: sanity check
for Dataset in datasets:
dataset_name = Dataset.__name__
dataset = Dataset(datasets_root)
ds_train, ds_val, ds_test = dataset.to_datasets(window_past=window_past,
window_future=window_future)
# Init data
x_past, y_past, x_future, y_future = ds_train.get_rows(10)
xs = x_past.shape[-1]
ys = y_future.shape[-1]
# Loaders
dl_train = DataLoader(ds_train,
batch_size=batch_size,
shuffle=True,
pin_memory=num_workers == 0,
num_workers=num_workers)
dl_val = DataLoader(ds_val,
shuffle=True,
batch_size=batch_size,
num_workers=num_workers)
for m_fn in models:
free_mem()
pt_model = m_fn(xs, ys, 8, 4)
model_name = type(pt_model).__name__
print(timestamp, dataset_name, model_name)
# Wrap in lightning
model = PL_MODEL(pt_model,
lr=3e-4
).to(device)
trainer = pl.Trainer(
fast_dev_run=True,
# GPU
gpus=1,
amp_level='O1',
precision=16,
)
# -
# ## Train
max_iters=20000
tensorboard_dir = Path(f"../outputs/{timestamp}").resolve()
print(f'For tensorboard run:\ntensorboard --logdir="{tensorboard_dir}"')
# +
def objective(trial):
"""
Optuna function to optimize
See https://github.com/optuna/optuna/blob/master/examples/pytorch_lightning_simple.py
"""
# sample
hidden_size_exp = trial.suggest_int("hidden_size_exp", 1, 8)
hidden_size = 2**hidden_size_exp
layers = trial.suggest_int("layers", 1, 12)
# Load model
pt_model = m_fn(xs, ys, hidden_size, layers)
model_name = type(pt_model).__name__
# Wrap in lightning
patience = 2
model = PL_MODEL(pt_model,
lr=3e-4, patience=patience,
).to(device)
save_dir = f"../outputs/{timestamp}/{dataset_name}_{model_name}/{trial.number}"
Path(save_dir).mkdir(exist_ok=True, parents=True)
trainer = pl.Trainer(
# Training length
min_epochs=2,
max_epochs=40,
limit_train_batches=max_iters//batch_size,
limit_val_batches=max_iters//batch_size//5,
# Misc
gradient_clip_val=20,
terminate_on_nan=True,
# GPU
gpus=1,
amp_level='O1',
precision=16,
# Callbacks
default_root_dir=save_dir,
logger=False,
callbacks=[
EarlyStopping(monitor='loss/val', patience=patience * 2),
PyTorchLightningPruningCallback(trial, monitor="loss/val")],
)
trainer.fit(model, dl_train, dl_val)
# Run on all val data, using test mode
r = trainer.test(model, test_dataloaders=dl_val, verbose=False)
return r[0]['loss/test']
# -
import optuna
from optuna.integration import PyTorchLightningPruningCallback
import subprocess
def get_git_commit():
try:
return subprocess.check_output(["git", "rev-parse", "HEAD"], cwd='..').decode().strip()
except Exception:
logging.exception("failed to get git hash")
Path(f"../outputs/{timestamp}").mkdir(exist_ok=True)
storage = f"sqlite:///../outputs/{timestamp}/optuna.db"
for Dataset in tqdm(datasets, desc='datasets'):
dataset_name = Dataset.__name__
dataset = Dataset(datasets_root)
ds_train, ds_val, ds_test = dataset.to_datasets(window_past=window_past,
window_future=window_future)
# Init data
x_past, y_past, x_future, y_future = ds_train.get_rows(10)
xs = x_past.shape[-1]
ys = y_future.shape[-1]
# Loaders
dl_train = DataLoader(ds_train,
batch_size=batch_size,
shuffle=True,
drop_last=True,
pin_memory=num_workers == 0,
num_workers=num_workers)
dl_val = DataLoader(ds_val,
shuffle=False,
batch_size=batch_size,
drop_last=True,
num_workers=num_workers)
for i, m_fn in enumerate(tqdm(models, desc=f'models ({dataset_name})')):
try:
model_name = type(m_fn(8, 8, 8, 2)).__name__
free_mem()
study_name = f'{timestamp}_{dataset_name}-{model_name}'
# Create study
pruner = optuna.pruners.MedianPruner()
study = optuna.create_study(storage=storage,
study_name=study_name,
pruner=pruner,
load_if_exists=True)
study.set_user_attr('dataset', dataset_name)
study.set_user_attr('model', model_name)
study.set_user_attr('commit', get_git_commit())
df_trials = study.trials_dataframe()
if len(df_trials):
df_trials = df_trials[df_trials.state=='COMPLETE']
nb_trials = len(df_trials)
if nb_trials==0:
# Priors
study.enqueue_trial({"layers": 6, "params_hidden_size_exp": 2})
study.enqueue_trial({"layers": 1, "params_hidden_size_exp": 3})
study.enqueue_trial({"layers": 3, "params_hidden_size_exp": 5})
if nb_trials<20:
# Opt
study.optimize(objective, n_trials=20-nb_trials,
timeout=60*60*2 # Max seconds for all optimizes
)
print("Number of finished trials: {}".format(len(study.trials)))
print("Best trial:")
trial = study.best_trial
print(" Value: {}".format(trial.value))
print(" Params: ")
for key, value in trial.params.items():
print(" {}: {}".format(key, value))
except Exception as e:
logging.exception('failed to run model')
# +
# Baseline
models2 = [
lambda xs, ys, _, l: BaselineLast(),
lambda xs, ys, _, l: BaselineMean(),
]
for Dataset in tqdm(datasets, desc='datasets'):
dataset_name = Dataset.__name__
dataset = Dataset(datasets_root)
ds_train, ds_val, ds_test = dataset.to_datasets(window_past=window_past,
window_future=window_future)
# Init data
x_past, y_past, x_future, y_future = ds_train.get_rows(10)
xs = x_past.shape[-1]
ys = y_future.shape[-1]
# Loaders
dl_train = DataLoader(ds_train,
batch_size=batch_size,
shuffle=True,
drop_last=True,
pin_memory=num_workers == 0,
num_workers=num_workers)
dl_val = DataLoader(ds_val,
shuffle=False,
batch_size=batch_size,
drop_last=True,
num_workers=num_workers)
for i, m_fn in enumerate(tqdm(models2, desc=f'models ({dataset_name})')):
try:
model_name = type(m_fn(8, 8, 8, 2)).__name__
free_mem()
study_name = f'{timestamp}_{dataset_name}-{model_name}'
# Create study
pruner = optuna.pruners.MedianPruner()
study = optuna.create_study(storage=storage,
study_name=study_name,
pruner=pruner,
load_if_exists=True)
study.set_user_attr('dataset', dataset_name)
study.set_user_attr('model', model_name)
study.set_user_attr('commit', get_git_commit())
df_trials = study.trials_dataframe()
if len(df_trials):
df_trials = df_trials[df_trials.state=='COMPLETE']
nb_trials = len(df_trials)
if nb_trials<1:
# Opt
study.optimize(objective, n_trials=1,
timeout=60*30 # Max seconds for all optimizes
)
except Exception as e:
logging.exception('failed to run model')
# +
# TODO baseline, run as sep cell, opt once
# TODO summarize time and model params at best params
# +
import pandas as pd
# Summarize studies
rs = []
study_summaries = optuna.study.get_all_study_summaries(storage=storage)
for s in study_summaries:
row = {}
if (s.best_trial is not None) and (s.best_trial.state==optuna.trial.TrialState.COMPLETE):
params = {k:v for k,v in s.best_trial.__dict__.items() if not k.startswith('_')}
row.update(s.user_attrs)
row['n_trials'] = s.n_trials
row.update({'param_'+k:v for k,v in s.best_trial._params.items()})
row.update(params)
rs.append(row)
df_studies = pd.DataFrame(rs)
df_studies = (df_studies.drop(columns=['state', 'intermediate_values', 'commit', 'datetime_complete'])
.sort_values(['dataset', 'value'])
.set_index(['dataset', 'model'])
)
df_studies
# +
# study_names = [s.study_name for s in optuna.study.get_all_study_summaries(storage=storage)]
# for study_name in study_names:
# loaded_study = optuna.load_study(study_name=study_name, storage=storage)
# # Make DF over trials
# print(study_name)
# df_trials = loaded_study.trials_dataframe()
# # df_trials.index = df_trials.apply(lambda r:f'l={r.params_layers}_hs={r.params_hidden_size_exp}', 1)
# display(df_trials)
# # Plot test curves, to see how much overfitting
# df_values = pd.DataFrame([s.intermediate_values for s in loaded_study.get_trials()]).T
# df_values.columns = [f"l={s.params['layers']}_hs={s.params['hidden_size_exp']}" for s in loaded_study.get_trials()]
# df_values.plot(ylabel='nll', xlabel='epochs', title=f'val loss "{study_name}"')
# +
# study_names = [s.study_name for s in optuna.study.get_all_study_summaries(storage=storage)]
# for study_name in study_names:
# loaded_study = optuna.load_study(study_name=study_name, storage=storage)
# fig=optuna.visualization.plot_contour(loaded_study, params=['hidden_size_exp', 'layers'])
# fig = fig.update_layout(dict(title=f'{study_name} nll'))
# display(fig)
# -
[f"{s.params['layers']}_{s.params['hidden_size_exp']}" for s in loaded_study.get_trials()]
df_values
``` |
{
"source": "3stack-software/celery-shoot",
"score": 3
} |
#### File: celery-shoot/test/tasks.py
```python
import os
import time
from celery import Celery
broker = os.environ.get('AMQP_HOST', 'amqp://guest:guest@localhost//')
celery = Celery('tasks', broker=broker)
celery.conf.update(
CELERY_ACCEPT_CONTENT=["json"],
CELERY_RESULT_BACKEND = "amqp",
CELERY_RESULT_SERIALIZER='json',
)
@celery.task
def add(x, y):
print('got task to add {} + {} = {}'.format(x, y, x+y))
return x + y
@celery.task
def sleep(x):
time.sleep(x)
return x
@celery.task
def curtime():
current_time = int(time.time() * 1000)
print('the time is {}'.format(current_time))
print('the time is {}'.format(time.time()))
return current_time
@celery.task
def error(msg):
raise Exception(msg)
@celery.task
def echo(msg):
return msg
# client should call with ignoreResult=True as results are never sent
@celery.task(ignore_result=True)
def send_email(to='<EMAIL>', title='hi'):
print("Sending email to '%s' with title '%s'" % (to, title))
if __name__ == "__main__":
celery.start()
``` |
{
"source": "3stack-software/pdfminer",
"score": 3
} |
#### File: pdfminer/tools/pdfstats.py
```python
import sys, os
import collections
from pdfminer3.pdfparser import PDFParser
from pdfminer3.pdfdocument import PDFDocument
from pdfminer3.pdfpage import PDFPage, PDFTextExtractionNotAllowed
from pdfminer3.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer3.pdfdevice import PDFDevice
from pdfminer3.converter import PDFPageAggregator
from pdfminer3.layout import LAParams, LTContainer
_, SCRIPT = os.path.split(__file__)
def msg(*args, **kwargs):
print(' '.join(map(str, args)), file=sys.stdout, **kwargs)
def flat_iter(obj):
yield obj
if isinstance(obj, LTContainer):
for ob in obj:
yield from flat_iter(ob)
def main(args):
msg(SCRIPT, args)
if len(args) != 1:
msg('Parse a PDF file and print some pdfminer-specific stats')
msg('Usage:', SCRIPT, '<PDF-filename>')
return 1
infilename, = args
lt_types = collections.Counter()
with open(infilename, 'rb') as pdf_file:
# Create a PDF parser object associated with the file object.
parser = PDFParser(pdf_file)
# Create a PDF document object that stores the document structure.
# Supply the password for initialization.
password = ''
document = PDFDocument(parser, password)
# Check if the document allows text extraction.
if not document.is_extractable:
raise PDFTextExtractionNotAllowed(filename)
# Make a page iterator
pages = PDFPage.create_pages(document)
# Set up for some analysis
rsrcmgr = PDFResourceManager()
laparams = LAParams(detect_vertical=True, all_texts=True)
# device = PDFDevice(rsrcmgr)
device = PDFPageAggregator(rsrcmgr, laparams=laparams)
interpreter = PDFPageInterpreter(rsrcmgr, device)
# Look at all (nested) objects on each page
for page_count, page in enumerate(pages, 1):
# oh so stateful
interpreter.process_page(page)
layout = device.get_result()
lt_types.update(type(item).__name__ for item in flat_iter(layout))
msg('page_count', page_count)
msg('lt_types:', ' '.join('{}:{}'.format(*tc) for tc in lt_types.items()))
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
``` |
{
"source": "3stack-software/python-aniso8601-relativedelta",
"score": 3
} |
#### File: python-aniso8601-relativedelta/aniso8601/time.py
```python
import datetime
from aniso8601.timezone import parse_timezone, build_utcoffset
from aniso8601.date import parse_date
from aniso8601.resolution import TimeResolution
def get_time_resolution(isotimestr):
#Valid time formats are:
#
#hh:mm:ss
#hhmmss
#hh:mm
#hhmm
#hh
#hh:mm:ssZ
#hhmmssZ
#hh:mmZ
#hhmmZ
#hhZ
#hh:mm:ss±hh:mm
#hhmmss±hh:mm
#hh:mm±hh:mm
#hhmm±hh:mm
#hh±hh:mm
#hh:mm:ss±hhmm
#hhmmss±hhmm
#hh:mm±hhmm
#hhmm±hhmm
#hh±hhmm
#hh:mm:ss±hh
#hhmmss±hh
#hh:mm±hh
#hhmm±hh
#hh±hh
timestr = _split_tz(isotimestr)[0]
if timestr.count(':') == 2:
#hh:mm:ss
return TimeResolution.Seconds
elif timestr.count(':') == 1:
#hh:mm
return TimeResolution.Minutes
#Format must be hhmmss, hhmm, or hh
if timestr.find('.') == -1:
#No time fractions
timestrlen = len(timestr)
else:
#The lowest order element is a fraction
timestrlen = len(timestr.split('.')[0])
if timestrlen == 6:
#hhmmss
return TimeResolution.Seconds
elif timestrlen == 4:
#hhmm
return TimeResolution.Minutes
elif timestrlen == 2:
#hh
return TimeResolution.Hours
else:
raise ValueError('String is not a valid ISO8601 time.')
def parse_time(isotimestr):
#Given a string in any ISO8601 time format, return a datetime.time object
#that corresponds to the given time. Fixed offset tzdata will be included
#if UTC offset is given in the input string. Valid time formats are:
#
#hh:mm:ss
#hhmmss
#hh:mm
#hhmm
#hh
#hh:mm:ssZ
#hhmmssZ
#hh:mmZ
#hhmmZ
#hhZ
#hh:mm:ss±hh:mm
#hhmmss±hh:mm
#hh:mm±hh:mm
#hhmm±hh:mm
#hh±hh:mm
#hh:mm:ss±hhmm
#hhmmss±hhmm
#hh:mm±hhmm
#hhmm±hhmm
#hh±hhmm
#hh:mm:ss±hh
#hhmmss±hh
#hh:mm±hh
#hhmm±hh
#hh±hh
(timestr, tzstr) = _split_tz(isotimestr)
if tzstr == None:
return _parse_time_naive(timestr)
elif tzstr == 'Z':
return _parse_time_naive(timestr).replace(tzinfo=build_utcoffset('UTC', datetime.timedelta(hours=0)))
else:
return _parse_time_naive(timestr).replace(tzinfo=parse_timezone(tzstr))
def parse_datetime(isodatetimestr, delimiter='T'):
#Given a string in ISO8601 date time format, return a datetime.datetime
#object that corresponds to the given date time.
#By default, the ISO8601 specified T delimiter is used to split the
#date and time (<date>T<time>). Fixed offset tzdata will be included
#if UTC offset is given in the input string.
isodatestr, isotimestr = isodatetimestr.split(delimiter)
datepart = parse_date(isodatestr)
timepart = parse_time(isotimestr)
return datetime.datetime.combine(datepart, timepart)
def _parse_time_naive(timestr):
#timestr is of the format hh:mm:ss, hh:mm, hhmmss, hhmm, hh
#
#hh is between 0 and 24, 24 is not allowed in the Python time format, since
#it represents midnight, a time of 00:00:00 is returned
#
#mm is between 0 and 60, with 60 used to denote a leap second
#
#No tzinfo will be included
return _resolution_map[get_time_resolution(timestr)](timestr)
def _parse_hour(timestr):
#Format must be hh or hh.
isohour = float(timestr)
if isohour == 24:
return datetime.time(hour=0, minute=0)
#Since the time constructor doesn't handle fractional hours, we put
#the hours in to a timedelta, and add it to the time before returning
hoursdelta = datetime.timedelta(hours=isohour)
return _build_time(datetime.time(hour=0), hoursdelta)
def _parse_minute_time(timestr):
#Format must be hhmm, hhmm., hh:mm or hh:mm.
if timestr.count(':') == 1:
#hh:mm or hh:mm.
timestrarray = timestr.split(':')
isohour = int(timestrarray[0])
isominute = float(timestrarray[1]) #Minute may now be a fraction
else:
#hhmm or hhmm.
isohour = int(timestr[0:2])
isominute = float(timestr[2:])
if isominute > 60:
raise ValueError('String is not a valid ISO8601 time.')
if isohour == 24:
return datetime.time(hour=0, minute=0)
#Since the time constructor doesn't handle fractional minutes, we put
#the minutes in to a timedelta, and add it to the time before returning
minutesdelta = datetime.timedelta(minutes = isominute)
return _build_time(datetime.time(hour=isohour), minutesdelta)
def _parse_second_time(timestr):
#Format must be hhmmss, hhmmss., hh:mm:ss or hh:mm:ss.
if timestr.count(':') == 2:
#hh:mm:ss or hh:mm:ss.
timestrarray = timestr.split(':')
isohour = int(timestrarray[0])
isominute = int(timestrarray[1])
#Since the time constructor doesn't handle fractional seconds, we put
#the seconds in to a timedelta, and add it to the time before returning
secondsdelta = datetime.timedelta(seconds = float(timestrarray[2]))
else:
#hhmmss or hhmmss.
isohour = int(timestr[0:2])
isominute = int(timestr[2:4])
#Since the time constructor doesn't handle fractional seconds, we put
#the seconds in to a timedelta, and add it to the time before returning
secondsdelta = datetime.timedelta(seconds = float(timestr[4:]))
if isominute > 60:
raise ValueError('String is not a valid ISO8601 time.')
if isohour == 24:
return datetime.time(hour=0, minute=0)
return _build_time(datetime.time(hour=isohour, minute=isominute),
secondsdelta)
def _build_time(time, delta):
#Combine today's date (just so we have a date object), the time, the
#delta, and return the time component
base_datetime = datetime.datetime.combine(datetime.date.today(), time)
return (base_datetime + delta).time()
def _split_tz(isotimestr):
if isotimestr.find('+') != -1:
timestr = isotimestr[0:isotimestr.find('+')]
tzstr = isotimestr[isotimestr.find('+'):]
elif isotimestr.find('-') != -1:
timestr = isotimestr[0:isotimestr.find('-')]
tzstr = isotimestr[isotimestr.find('-'):]
elif isotimestr.endswith('Z'):
timestr = isotimestr[:-1]
tzstr = 'Z'
else:
timestr = isotimestr
tzstr = None
return (timestr, tzstr)
_resolution_map = {
TimeResolution.Hours: _parse_hour,
TimeResolution.Minutes: _parse_minute_time,
TimeResolution.Seconds: _parse_second_time
}
``` |
{
"source": "3starblaze/cellular-automata",
"score": 3
} |
#### File: cellular-automata/src/test_Controller.py
```python
import copy
import pytest
import numpy as np
from Controller import Controller
def GAME_OF_LIFE_RULESET(cell, values):
if cell:
return sum(values) >= 2 and sum(values) <= 3
else:
return sum(values) == 3
GAME_OF_LIFE_INDICES = [
(-1, 1),
(0, 1),
(1, 1),
(-1, 0),
(1, 0),
(-1, -1),
(0, -1),
(1, -1),
]
def test_Controller_with_just_rules():
Controller(GAME_OF_LIFE_INDICES, GAME_OF_LIFE_RULESET)
assert True
def test_negative_width():
with pytest.raises(ValueError) as excinfo:
Controller(GAME_OF_LIFE_INDICES, GAME_OF_LIFE_RULESET, width=-20)
assert "width" in str(excinfo.value)
def test_0_width():
with pytest.raises(ValueError) as excinfo:
Controller(GAME_OF_LIFE_INDICES, GAME_OF_LIFE_RULESET, width=0)
assert "width" in str(excinfo.value)
def test_float_width():
with pytest.raises(ValueError) as excinfo:
Controller(GAME_OF_LIFE_INDICES, GAME_OF_LIFE_RULESET, width=72.8)
assert "width" in str(excinfo.value)
def test_negative_height():
with pytest.raises(ValueError) as excinfo:
Controller(GAME_OF_LIFE_INDICES, GAME_OF_LIFE_RULESET, height=-42)
assert "height" in str(excinfo.value)
def test_0_height():
with pytest.raises(ValueError) as excinfo:
Controller(GAME_OF_LIFE_INDICES, GAME_OF_LIFE_RULESET, height=0)
assert "height" in str(excinfo.value)
def test_float_height():
with pytest.raises(ValueError) as excinfo:
Controller(GAME_OF_LIFE_INDICES, GAME_OF_LIFE_RULESET, height=92.3)
assert "height" in str(excinfo.value)
def test_ensure_drawing_data_presence():
controller = Controller(GAME_OF_LIFE_INDICES, GAME_OF_LIFE_RULESET)
assert controller.drawing_data != {}
def test_ensure_data_is_synced():
data = [[0, 1, 1], [1, 0, 1], [1, 1, 1]]
controller = Controller(GAME_OF_LIFE_INDICES, GAME_OF_LIFE_RULESET, data=data)
controller.next_frame()
assert np.array_equal(controller.data, controller.state.data) and np.array_equal(
controller.state.data, controller.drawer.data
)
def test_ensure_height_change_updates_drawing_data():
controller = Controller(GAME_OF_LIFE_INDICES, GAME_OF_LIFE_RULESET, height=200)
previous_drawing_data = copy.deepcopy(controller.drawing_data)
controller.height = 400
assert previous_drawing_data != controller.drawing_data
def test_ensure_width_change_updates_drawing_data():
controller = Controller(GAME_OF_LIFE_INDICES, GAME_OF_LIFE_RULESET, width=100)
previous_drawing_data = copy.deepcopy(controller.drawing_data)
controller.width = 300
assert previous_drawing_data != controller.drawing_data
```
#### File: cellular-automata/src/test_GridDrawer.py
```python
import random
import numpy as np
import pytest
from GridDrawer import GridDrawer
VALID_COLOR = (0, 100, 200)
VALID_DATA = [[0, 0, 1], [0, 1, 0], [1, 0, 0]]
def test_negative_line_width():
with pytest.raises(ValueError) as excinfo:
GridDrawer(random.randint(-100, -1), 10, VALID_DATA)
assert "line_width" in str(excinfo.value), "negative line_width is accepted"
def test_0_line_width():
assert GridDrawer(0, 10, VALID_DATA), "line_width = 0 isn't accepted"
def test_negative_cell_size():
with pytest.raises(ValueError) as excinfo:
GridDrawer(2, random.randint(-100, -1), [])
assert "cell_size" in str(excinfo.value), "negative cell_size is accepted"
def test_0_cell_size():
with pytest.raises(ValueError) as excinfo:
GridDrawer(2, 0, [])
assert "cell_size" in str(excinfo.value), "cell_size = 0 is accepted"
def test_data_getter():
received_data = GridDrawer(2, 10, VALID_DATA).data
np.testing.assert_equal(received_data, VALID_DATA)
def test_incorrectly_shaped_1d_input():
with pytest.raises(ValueError) as excinfo:
GridDrawer(2, 10, [0, 0, 1, 0, 1, 1])
assert "data" in str(excinfo.value), "1D data accepted"
def test_correctly_shaped_2d_input():
GridDrawer(
2, 10, [[True, False, False], [False, True, False], [False, False, True]],
)
assert True, "2D input not accepted"
def test_incorrectly_shaped_3d_input():
with pytest.raises(ValueError) as excinfo:
GridDrawer(2, 10, [[[False, False]], [[True, False]], [[True, True]]])
assert "data" in str(excinfo.value), "3D data accepted"
def test_incorect_color_format():
with pytest.raises(ValueError) as excinfo:
GridDrawer(2, 10, VALID_DATA, "skrrr")
assert "color format" in str(excinfo.value), "Wrong color format accepted"
with pytest.raises(ValueError) as excinfo:
GridDrawer(2, 10, VALID_DATA, VALID_COLOR, 40)
assert "color format" in str(excinfo.value), "Wrong color format accepted"
def test_incorrectly_shaped_color_tuples():
with pytest.raises(ValueError) as excinfo:
GridDrawer(2, 10, VALID_DATA, (4, 2))
assert "color format" in str(excinfo.value), "Wrongly shaped color format accepted"
with pytest.raises(ValueError) as excinfo:
GridDrawer(2, 10, VALID_DATA, VALID_COLOR, (20, 40, 80, 20))
assert "color format" in str(excinfo.value), "Wrongly shaped color format accepted"
def test_colors_with_invalid_numbers():
with pytest.raises(ValueError) as excinfo:
GridDrawer(2, 10, VALID_DATA, (400, 20, 50))
assert "color format" in str(
excinfo.value
), "Color format with wrong integers accepted"
with pytest.raises(ValueError) as excinfo:
GridDrawer(2, 10, VALID_DATA, VALID_COLOR, (-10, 80, 90))
assert "color format" in str(
excinfo.value
), "Color format with wrong integers accepted"
def test_colors_with_floats():
with pytest.raises(ValueError) as excinfo:
GridDrawer(2, 10, VALID_DATA, (10.23, 20.21, 50))
assert "color format" in str(excinfo.value), "Color format with floats accepted"
with pytest.raises(ValueError) as excinfo:
GridDrawer(2, 10, VALID_DATA, VALID_COLOR, (43.92, 123.45, 92.2))
assert "color format" in str(excinfo.value), "Color format with floats accepted"
def test_drawer():
data = [[1, 1, 0], [0, 1, 1], [1, 0, 1]]
width, height = (10, 12)
MyDrawer = GridDrawer(2, 4, data)
returned_object = MyDrawer.draw(width, height)
assert len(returned_object["lines"]) == 4, "Line count doesn't match"
assert len(returned_object["cells"]) == 4, "Cell count doesn't match"
``` |
{
"source": "3SUM/cardiac",
"score": 2
} |
#### File: 3SUM/cardiac/cardiac.py
```python
import os
import re
import json
import discord
import joblib
import numpy as np
from discord.ext import commands
TOKEN = os.environ["TOKEN"]
intents = discord.Intents.default()
intents.members = True
bot = commands.Bot(command_prefix="+", intents=intents)
class Cardiac:
model = None
vectorizer = None
@bot.event
async def on_guild_join(guild):
print(f"{bot.user.name} joined {guild.name}")
@bot.event
async def on_member_join(member):
print(f"{member.name} has joined!")
@bot.event
async def on_member_remove(member):
print(f"{member.name} has left!")
@bot.event
async def on_message(message):
if message.author == bot.user:
return
guild = message.guild
member = message.author
warned_role = discord.utils.get(guild.roles, name="Warned")
text = [message.content]
prob = Cardiac.predict_prob(text)
if prob >= 0.80:
print(f"{message.content} [{prob}]")
if warned_role in member.roles:
await message.delete()
await guild.ban(member, reason="Used profanity")
banned_embed = discord.Embed(
title="Banned User",
description=f"{member.name} has been banned!",
color=0xD0021B,
)
await message.channel.send(embed=banned_embed)
else:
await message.delete()
await discord.Member.add_roles(member, warned_role)
warned_embed = discord.Embed(
title="Warned User",
description=f"{member.mention} has been warned!",
color=0xFF8C00,
)
await message.channel.send(embed=warned_embed)
await bot.process_commands(message)
@bot.event
async def on_ready():
print(f"Logged in as {bot.user.name}")
def get_profane_prob(prob):
return prob[1]
def predict(message):
return Cardiac.model.predict(Cardiac.vectorizer.transform(message))
def predict_prob(message):
return np.apply_along_axis(
Cardiac.get_profane_prob,
1,
Cardiac.model.predict_proba(Cardiac.vectorizer.transform(message)),
)
def main():
Cardiac.model = joblib.load("data/model.joblib")
Cardiac.vectorizer = joblib.load("data/vectorizer.joblib")
bot.run(TOKEN)
if __name__ == "__main__":
Cardiac.main()
```
#### File: 3SUM/cardiac/test.py
```python
import numpy as np
import joblib
vectorizer = joblib.load("data/vectorizer.joblib")
model = joblib.load("data/model.joblib")
def _get_profane_prob(prob):
return prob[1]
def predict(texts):
return model.predict(vectorizer.transform(texts))
def predict_prob(texts):
return np.apply_along_axis(
_get_profane_prob, 1, model.predict_proba(vectorizer.transform(texts))
)
if __name__ == "__main__":
texts = [
"Hello there, how are you",
"Lorem Ipsum is simply dummy text of the printing and typesetting industry.",
"!!!! Click this now!!! -> https://example.com",
"fuck you",
"fUcK u",
"GO TO hElL, you dirty scum",
]
probs = predict_prob(texts)
print(probs)
``` |
{
"source": "3SUM/fuzzy",
"score": 3
} |
#### File: 3SUM/fuzzy/fuzzy.py
```python
import os
import base64
from rich.console import Console
from rich.prompt import Prompt
class Fuzzy:
def __init__(self):
self.console = Console()
self.OFFSET = 10
self.VARIABLE_NAME = "__Nl1pR2MuIkzDocKUKk5vwoLDnMKAwpDCm8K4w7zDl8K2w63Diw__w71Kw7jChxrDpMK2JTFQwqzCjCBQaDHDgGfDg29EwqHChA" * 1000
def obfuscate(self, content):
index = 0
code = f'{self.VARIABLE_NAME} = ""\n'
b64_content = base64.b64encode(content.encode()).decode()
for _ in range(int(len(b64_content) / self.OFFSET) + 1):
_str = ""
for char in b64_content[index : index + self.OFFSET]:
byte = str(hex(ord(char)))[2:]
if len(byte) < 2:
byte = "0" + byte
_str += "\\x" + str(byte)
code += f'{self.VARIABLE_NAME} += "{_str}"\n'
index += self.OFFSET
code += f'exec(__import__("\\x62\\x61\\x73\\x65\\x36\\x34").b64decode({self.VARIABLE_NAME}.encode("\\x75\\x74\\x66\\x2d\\x38")).decode("\\x75\\x74\\x66\\x2d\\x38"))'
return code
def main(self):
self.console.print("[bold cyan]=[/]" * 60)
self.console.print("[b]Welcome to Fuzzy Python Obfuscator![/b]")
path = Prompt.ask("Enter path to Python script", default="default=none")
if not os.path.exists(path):
self.console.print("\t[-] Invalid path/file not found!")
exit()
if not os.path.isfile(path) or not path.endswith(".py"):
self.console.print("\t[-] Invalid file provided! Must be a Python script.")
exit()
with open(path, "r", encoding="utf-8", errors="ignore") as file:
file_content = file.read()
obfuscated_content = self.obfuscate(file_content)
with open(f"{path.split('.')[0]}-fuzzy-obfuscated.py", "w") as file:
file.write(obfuscated_content)
self.console.print("[b]Fuzzy obfuscation successful![/b]")
self.console.print("[bold cyan]=[/]" * 60)
if __name__ == "__main__":
fuzzy = Fuzzy()
fuzzy.main()
``` |
{
"source": "3sunny/LARK",
"score": 2
} |
#### File: LARK/ERNIE/predict_classifier.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import argparse
import numpy as np
import multiprocessing
import paddle.fluid as fluid
from reader.task_reader import ClassifyReader
from model.ernie import ErnieConfig
from finetune.classifier import create_model
from utils.args import ArgumentGroup, print_arguments
from utils.init import init_pretraining_params
from finetune_args import parser
# yapf: disable
parser = argparse.ArgumentParser(__doc__)
model_g = ArgumentGroup(parser, "model", "options to init, resume and save model.")
model_g.add_arg("ernie_config_path", str, None, "Path to the json file for bert model config.")
model_g.add_arg("init_checkpoint", str, None, "Init checkpoint to resume training from.")
model_g.add_arg("use_fp16", bool, False, "Whether to resume parameters from fp16 checkpoint.")
model_g.add_arg("num_labels", int, 2, "num labels for classify")
data_g = ArgumentGroup(parser, "data", "Data paths, vocab paths and data processing options.")
data_g.add_arg("predict_set", str, None, "Predict set file")
data_g.add_arg("vocab_path", str, None, "Vocabulary path.")
data_g.add_arg("label_map_config", str, None, "Label_map_config json file.")
data_g.add_arg("max_seq_len", int, 128, "Number of words of the longest seqence.")
data_g.add_arg("batch_size", int, 32, "Total examples' number in batch for training. see also --in_tokens.")
data_g.add_arg("do_lower_case", bool, True,
"Whether to lower case the input text. Should be True for uncased models and False for cased models.")
run_type_g = ArgumentGroup(parser, "run_type", "running type options.")
run_type_g.add_arg("use_cuda", bool, True, "If set, use GPU for training.")
run_type_g.add_arg("do_prediction", bool, True, "Whether to do prediction on test set.")
args = parser.parse_args()
# yapf: enable.
def main(args):
ernie_config = ErnieConfig(args.ernie_config_path)
ernie_config.print_config()
reader = ClassifyReader(
vocab_path=args.vocab_path,
label_map_config=args.label_map_config,
max_seq_len=args.max_seq_len,
do_lower_case=args.do_lower_case,
in_tokens=False)
predict_prog = fluid.Program()
predict_startup = fluid.Program()
with fluid.program_guard(predict_prog, predict_startup):
with fluid.unique_name.guard():
predict_pyreader, probs, feed_target_names = create_model(
args,
pyreader_name='predict_reader',
ernie_config=ernie_config,
is_prediction=True)
predict_prog = predict_prog.clone(for_test=True)
if args.use_cuda:
place = fluid.CUDAPlace(0)
dev_count = fluid.core.get_cuda_device_count()
else:
place = fluid.CPUPlace()
dev_count = int(os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
place = fluid.CUDAPlace(0) if args.use_cuda == True else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(predict_startup)
if args.init_checkpoint:
init_pretraining_params(exe, args.init_checkpoint, predict_prog)
else:
raise ValueError("args 'init_checkpoint' should be set for prediction!")
predict_exe = fluid.Executor(place)
predict_data_generator = reader.data_generator(
input_file=args.predict_set,
batch_size=args.batch_size,
epoch=1,
shuffle=False)
predict_pyreader.decorate_tensor_provider(predict_data_generator)
predict_pyreader.start()
all_results = []
time_begin = time.time()
while True:
try:
results = predict_exe.run(program=predict_prog, fetch_list=[probs.name])
all_results.extend(results[0])
except fluid.core.EOFException:
predict_pyreader.reset()
break
time_end = time.time()
np.set_printoptions(precision=4, suppress=True)
print("-------------- prediction results --------------")
for index, result in enumerate(all_results):
print(str(index) + '\t{}'.format(result))
if __name__ == '__main__':
print_arguments(args)
main(args)
``` |
{
"source": "3t13nn3/tOoLs",
"score": 3
} |
#### File: 3t13nn3/tOoLs/main.py
```python
import sys
import random
def data_recovery():
text = []
words = []
if sys.argv[1] == "1":
while True:
try:
line = input()
except EOFError:
break
words.append(line + "\n")
words[len(words)-1] = words[len(words)-1][:len(words[len(words)-1])-1]
else:
words = sys.argv[2:]
for i in range(len(words)):
words[i] += " "
for i in words:
text.append(str(i))
return text
def tRaNsFoRm(og_text):
text = list("".join(og_text))
for i in range(len(text)):
if random.randint(0, 1):
text[i] = text[i].upper()
else:
text[i] = text[i].lower()
return "".join(text)
if __name__ == "__main__":
og_text = data_recovery()
text = tRaNsFoRm(og_text)
print(text)
``` |
{
"source": "3t1n/DynamicsPy",
"score": 3
} |
#### File: 3t1n/DynamicsPy/d365.py
```python
import requests
import xml.etree.ElementTree as ET
import urllib.parse
import time
__author__ = "<NAME>"
class D365:
"""
A class used to connect to the Dynamics 365 CRM.
This class use Odata API
A classe é usada para se conectar ao Dynamics 365 CRM
Essa classe usa o Odata API
Attributes
----------
__client_id : str
Is the client_id, the code of your azure application
O client_id usado no seu aplicativo do azure
__client_secret : str
Is the client_secret, the token generate in azure application
O client_secret, que você gera no seu aplicativo do azure
__username : str
Is the email used to connect in Dynamics 365 CRM
O email usado para se conectar com o Dynamics 365 CRM
__password : str
Is the password used to connect in Dynamics 365 CRM
A senha usada para se conectar com o Dynamics 365 CRM
__crm_org : str
Your Dynamics 365 URL, example: https://test.crm2.dynamics.com
Sua URL do Dynamics 365 CRM, exemplo: https://test.crm2.dynamics.com
__api_version : str
Is your version of Web API
Sua versão da Web API
__crm_url : str
Is complete url with API version
Url Completa com a versão da API
__header : str
Is the header of requests used in code
O cabeçalho usado para fazer as requisições no código
Methods
-------
get_rows(self,query)
Returns a list of Odata query lines
Retorna uma lista com as linhas da consulta Odata
fetch_xml(self, fetchxml)
Returns a list of fetchxml query lines
Retorna uma lista com as linhas da consulta fetchxml
__request_crm(self, method, url, header, data=None, **kwargs)
Send requests for Dynamics 365 CRM
Envia requisições para o Dynamics 365 CRM
__get_token(self)
This method get the response request and returns token data
Esse método pega a resposta da requisição e retorna os dados do token
__parse_response(self, response)
This method get the response request and returns json data or raise exceptions
Esse método pega a resposta do request e retorna os dados em JSON
"""
def __init__(self, username, password, client_id, crm_org, client_secret):
self.__start_time = time.time()
self.__client_id = client_id
self.__client_secret = client_secret
self.__username = username
self.__password = password
self.__crm_org = crm_org
self.__api_version = "/api/data/v9.1/"
self.__crm_url = crm_org + self.__api_version
self.__header = {
'Authorization': '',
'OData-MaxVersion': '4.0',
'OData-Version': '4.0',
'Accept': 'application/json',
'Content-Type': 'application/json; charset=utf-8'
}
def get_rows(self, query: str) -> list:
"""
Returns a list of Odata query lines
Retorna uma lista com as linhas da consulta Odata
Parameters
----------
query : str
The query Odata passed in web API, example: contacts?$select=fullname
A query Odata passada na web API, exemplo: contacts?$select=fullname
"""
self.__header['Prefer'] = "odata.maxpagesize=5000"
self.__header['Authorization'] = self.__get_token()
header = self.__header
if self.__api_version not in query:
url = self.__crm_url + query
else:
url = query
response = self.__request_crm('get', url, header)
print("Page 1")
if '@odata.nextLink' in response.keys():
all_records = response
url = response['@odata.nextLink']
page = 0
while True:
page += 1
print("Page " + str(page))
response = self.__request_crm('get', url, header)
entries = len(response['value'])
count = 0
while count < entries:
all_records['value'].append(response['value'][count])
count += 1
if '@odata.nextLink' in response.keys():
url = response['@odata.nextLink']
else:
break
print("Lines " + str(len(all_records['value']) - 1))
print("--- %s seconds ---" % (time.time() - self.__start_time))
print("--- %s minutes ---" % ((time.time() - self.__start_time)/60))
return all_records['value']
else:
if 'value' in response.keys():
return response['value']
else:
return response
def fetch_xml(self, fetchxml: str) -> list:
"""
Returns a list of fetchxml query lines
Retorna uma lista com as linhas da consulta fetchxml
Parameters
----------
fetchxml : str
The query fetchxml passed in web API
A query fetchxml passada na web API
"""
self.__header['Prefer'] = "odata.include-annotations=*"
self.__header['Authorization'] = self.__get_token()
header = self.__header
root = ET.fromstring(fetchxml)
try:
entidade = root[0].attrib['name']
except KeyError:
raise Exception("Could not get the name of entity")
fetchxml = ET.tostring(root, encoding='utf8', method='xml')
url_body = self.__crm_url + entidade + "s" + "?fetchXml="
url = url_body + urllib.parse.quote(fetchxml)
response_fisrt = self.__request_crm('get', url, header)
print("Page 1")
if '@Microsoft.Dynamics.CRM.fetchxmlpagingcookie' in response_fisrt.keys():
xml_cookie = ET.fromstring(response_fisrt['@Microsoft.Dynamics.CRM.fetchxmlpagingcookie'])
all_records = response_fisrt
page = 2
test = xml_cookie.attrib['pagingcookie']
data = urllib.parse.unquote(urllib.parse.unquote(test))
data.replace("&", '&').replace("<", "<").replace(">", ">").replace("\"", """)
root.set("paging-cookie", data)
root.set("count", "5000")
root.set("page", str(page))
fetchxml = ET.tostring(root, encoding='utf8', method='xml')
url = url_body + urllib.parse.quote(fetchxml)
while True:
print("Page " + str(page))
page += 1
response = self.__request_crm('get', url, header)
entries = len(response['value'])
count = 0
while count < entries:
all_records['value'].append(response['value'][count])
count += 1
if '@Microsoft.Dynamics.CRM.fetchxmlpagingcookie' in response.keys():
test = xml_cookie.attrib['pagingcookie']
data = urllib.parse.unquote(urllib.parse.unquote(test))
data.replace("&", '&').replace("<", "<").replace(">", ">").replace("\"", """)
root.set("paging-cookie", data)
root.set("page", str(page))
fetchxml = ET.tostring(root, encoding='utf8', method='xml')
url = self.__crm_url + entidade + "s" + "?fetchXml=" + urllib.parse.quote(fetchxml)
else:
break
print("Lines " + str(len(all_records['value']) - 1))
print("--- %s seconds ---" % (time.time() - self.__start_time))
print("--- %s minutes ---" % ((time.time() - self.__start_time)/60))
return all_records['value']
else:
if 'value' in response_fisrt.keys():
return response_fisrt['value']
else:
return response_fisrt
def __request_crm(self, method: str, url: str, header: dict, data=None, **kwargs):
"""
Send requests for Dynamics 365 CRM
Envia requisições para o Dynamics 365 CRM
Parameters
----------
method : str
Method used in request
Método usado na requisição
url : str
Url used in request
Url usada na requisição
header : dict
Header of request
Cabeçalho da requisição
data : list
Data in body request
Dados no corpo da requisição
"""
response = ""
if method.lower() == "get":
response = requests.get(url, headers=header, params=kwargs)
elif method.lower() == "post":
response = requests.post(url, headers=header, data=data)
return self.__parse_response(response)
def __get_token(self) -> str:
"""
This method get the response request and returns token data
Esse método pega a resposta da requisição e retorna os dados do token
"""
tokenpost = {
'client_id': self.__client_id,
'resource': self.__crm_org,
'username': self.__username,
'password': self.__password,
'client_secret': self.__client_secret,
'grant_type': 'password'
}
response = self.__parse_response(
requests.post("https://login.microsoftonline.com/common/oauth2/token", data=tokenpost))
try:
return response['access_token']
except KeyError:
raise Exception("Could not get access token")
@staticmethod
def __parse_response(response) -> list:
"""
This method get the response request and returns json data or raise exceptions
Esse método pega a resposta da requisição e retorna os dados em JSON
Parameters
----------
response : str
The request response
A resposta da requisição
"""
if response.status_code == 204 or response.status_code == 201:
return True
elif response.status_code == 400:
raise Exception(
"The URL {0} retrieved an {1} error. "
"Please check your request body and try again.\nRaw message: {2}".format(
response.url, response.status_code, response.text))
elif response.status_code == 401:
raise Exception(
"The URL {0} retrieved and {1} error. Please check your credentials, make sure you have permission to "
"perform this action and try again.".format(
response.url, response.status_code))
elif response.status_code == 403:
raise Exception(
"The URL {0} retrieved and {1} error. Please check your credentials, make sure you have permission to "
"perform this action and try again.".format(
response.url, response.status_code))
elif response.status_code == 404:
raise Exception(
"The URL {0} retrieved an {1} error. Please check the URL and try again.\nRaw message: {2}".format(
response.url, response.status_code, response.text))
elif response.status_code == 412:
raise Exception(
"The URL {0} retrieved an {1} error. Please check the URL and try again.\nRaw message: {2}".format(
response.url, response.status_code, response.text))
elif response.status_code == 413:
raise Exception(
"The URL {0} retrieved an {1} error. Please check the URL and try again.\nRaw message: {2}".format(
response.url, response.status_code, response.text))
elif response.status_code == 500:
raise Exception(
"The URL {0} retrieved an {1} error. Please check the URL and try again.\nRaw message: {2}".format(
response.url, response.status_code, response.text))
elif response.status_code == 501:
raise Exception(
"The URL {0} retrieved an {1} error. Please check the URL and try again.\nRaw message: {2}".format(
response.url, response.status_code, response.text))
elif response.status_code == 503:
raise Exception(
"The URL {0} retrieved an {1} error. Please check the URL and try again.\nRaw message: {2}".format(
response.url, response.status_code, response.text))
return response.json()
``` |
{
"source": "3t4n/celery-pickle-poc",
"score": 3
} |
#### File: project/server/tasks.py
```python
import os
import time
from celery import Celery
celery = Celery(__name__)
celery.conf.broker_url = os.environ.get("CELERY_BROKER_URL", "redis://localhost:6379")
celery.conf.result_backend = os.environ.get("CELERY_RESULT_BACKEND", "redis://localhost:6379")
celery.conf.task_serializer = 'pickle'
celery.conf.accept_content = ['application/json','application/x-python-serialize']
@celery.task(name="create_task")
def create_task(task_type,obj=None):
time.sleep(int(task_type) * 10)
return True
``` |
{
"source": "3tch-a-sketch/sds1004x_bode",
"score": 2
} |
#### File: 3.x/awgdrivers/fy.py
```python
import serial
import time
import exceptions
from base_awg import BaseAWG
AWG_ID = "fy"
AWG_OUTPUT_IMPEDANCE = 50.0
MAX_READ_SIZE = 256
RETRY_COUNT = 2
VERBOSE = False # Set to True for protocol debugging
def debug(msg, *args):
if VERBOSE:
print(msg % args)
class FygenAWG(BaseAWG):
"""Driver API."""
SHORT_NAME = "fy"
def __init__(self, port, baud_rate=115200, timeout=5):
self.fy = None
self.port = None
self.serial_path = port
self.baud_rate = baud_rate
self.timeout = timeout
# None -> Hi-Z
self.load_impedance = {
1: None,
2: None,
}
def connect(self):
if self.port:
return
self.port = serial.Serial(
port=self.serial_path,
baudrate=115200,
bytesize=serial.EIGHTBITS,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
rtscts=False,
dsrdtr=False,
xonxoff=False,
timeout=self.timeout)
debug("Connected to %s", self.serial_path)
self.port.reset_output_buffer()
self.port.reset_input_buffer()
def disconnect(self):
if self.port:
debug("Disconnected from %s", self.serial_path)
self.port.close()
self.port = None
def initialize(self):
self.connect()
self.enable_output(0, False)
def get_id(self):
return AWG_ID
def enable_output(self, channel, on):
"""Turns a channel on (True) or off (False)."""
self._retry(
channel,
"N",
"1" if on else "0",
"255" if on else "0")
def set_frequency(self, channel, freq):
"""Sets frequency for a channel.
freq is a floating point value in Hz.
"""
uhz = int(freq * 1000000.0)
# AWG Bug: With the FY2300 and some values of frequency (for example
# 454.07 Hz) a bug occurs where the UI of the generator shows the
# correct value on the UI but the "RMF" command returns an incorrect
# fractional hertz value (454.004464 Hz for the example above).
# The work-around is to just match the Hz part of the return
# value.
def match_hz_only(match, got):
if '.' in got and match == got[:got.index('.')]:
return True
debug('set_frequency mismatch (looking at Hz value only)')
return False
self._retry(
channel,
"F",
"%014u" % uhz,
"%08u" % int(freq),
match_fn=match_hz_only)
def set_phase(self, phase):
"""Sets the phase of a channel in degrees."""
self._retry(
2, # always channel 2 (not sure why)
"P",
"%.3f" % phase,
"%u" % (phase * 1000))
def set_wave_type(self, channel, wvtp):
"""Sets a channel to a sin wave."""
del wvtp # This parameter is ignored, always set a sin wave
self._retry(channel, "W", "0", "0")
def set_amplitue(self, channel, amp):
"""Sets a channel amplitude in volts.
Load impedeance for the channel is taken into account
when calculating the amplitude. For example, if the load
impedance is 50 ohms and amp=50 ohms, the actual voltage
set is 1 * (50 + 50) / 50 = 2V.
"""
volts = round(self._apply_load_impedance(channel, amp), 4)
self._retry(
channel,
"A",
"%.4f" % volts,
"%u" % (volts * 10000))
def set_offset(self, channel, offset):
"""Sets the voltage offset for a channel.
offset is a floating point number.
"""
# Factor in load impedance.
offset = self._apply_load_impedance(channel, offset)
# AWG Bug: The FY2300 returns negative offsets as
# an unsigned integer. Thus math is needed to predict
# the returned value correctly
offset_unsigned = int(round(offset, 3) * 1000.0)
if offset_unsigned < 0:
offset_unsigned = 0x100000000 + offset_unsigned
self._retry(
channel,
"O",
"%.3f" % offset,
"%u" % offset_unsigned)
def set_load_impedance(self, channel, z):
"""Sets the load impedance for a channel."""
maxz = 10000000.0
if z > maxz:
z = None # Hi-z
self.load_impedance[channel] = z
def _apply_load_impedance(self, channel, volts):
if channel not in self.load_impedance:
raise exceptions.UnknownChannelError("Unknown channel: %s" % channel)
if not self.load_impedance[channel]:
return volts # Hi-Z
loadz = self.load_impedance[channel]
return volts * (AWG_OUTPUT_IMPEDANCE + loadz) / loadz
def _recv(self, command):
"""Waits for device."""
response = self.port.read_until(size=MAX_READ_SIZE).decode("utf8")
debug("%s -> %s", command.strip(), response.strip())
return response
def _send(self, command, retry_count=5):
"""Sends a low-level command. Returns the response."""
debug("send (attempt %u/5) -> %s", 6 - retry_count, command)
data = command + "\n"
data = data.encode()
self.port.reset_output_buffer()
self.port.reset_input_buffer()
self.port.write(data)
self.port.flush()
response = self._recv(command)
if not response and retry_count > 0:
# sometime the siggen answers queries with nothing. Wait a bit,
# then try again
time.sleep(0.1)
return self._send(command, retry_count - 1)
return response.strip()
def _retry(self, channel, command, value, match, match_fn=None):
"""Retries the command until match is satisfied."""
if channel == 0:
self._retry(1, command, value, match)
self._retry(2, command, value, match)
return
elif channel == 1:
channel = "M"
elif channel == 2:
channel = "F"
else:
raise exceptions.UnknownChannelError("Channel shoud be 1 or 2")
if not match_fn:
# usually we want ==
match_fn = lambda match, got: match == got
if match_fn(match, self._send("R" + channel + command)):
debug("already set %s", match)
return
for _ in range(RETRY_COUNT):
self._send("W" + channel + command + value)
if match_fn(match, self._send("R" + channel + command)):
debug("matched %s", match)
return
debug("mismatched %s", match)
# Print a warning. This is not an error because the AWG read bugs
# worked-around in this module could vary by AWG model number or
# firmware revision number.
sys.stderr.write(
"Warning: %s did not produce an expected response after %d "
"retries\n" % (
"W" + channel + command + value, RETRY_COUNT))
if __name__ == '__main__':
print "This module shouldn't be run. Run awg_tests.py instead."
``` |
{
"source": "3tew/rox-auto-fishing",
"score": 3
} |
#### File: 3tew/rox-auto-fishing/main.py
```python
import os
import ctypes
from threading import Thread
import time
import numpy as np
import cv2
from mss import mss
from win32api import GetSystemMetrics
import config
from repositories import detector_repo
from repositories import fishing_repo
from repositories import render_repo
config.PID = os.getpid()
config.SCREEN_WIDTH = int(GetSystemMetrics(0))
config.SCREEN_HEIGHT = int(GetSystemMetrics(1))
sct = mss()
ctypes.windll.kernel32.SetConsoleTitleW(
config.TITLE + ' ' + str(config.VERSION))
# Clear console
os.system('cls' if os.name in ('nt', 'dos') else 'clear')
print("RO:X Next Generation - Auto Fishing version %s" % config.VERSION)
print("Made by <NAME>. (fb.com/thanatos1995)\n")
print("Screen resolution")
print("width = ", config.SCREEN_WIDTH)
print("height = ", config.SCREEN_HEIGHT)
print("\nPress 'R' button to reset limit.")
print("Press 'H' button to toggle fishing.")
print("Press 'Q' button to exit program.\n")
def main_function():
while True:
# Reset when reaching the limit
if config.COUNT - config.LIMIT == 0:
fishing_repo.set_limit()
# ดักปุ่มกด
key = cv2.waitKey(25)
# Press "R" button to Reset
if key & 0xFF == ord('r'):
fishing_repo.set_limit()
# Press "H" button to Hold
if key & 0xFF == ord('h'):
config.HOLD ^= True
# Press "Q" button to exit program
if key & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
# เวลาปัจจุบัน
config.CURRENT_TIME = time.time()
# Crop
render_repo.crop_screenshot(sct=sct)
# แปลงสีภาพ
config.FRAME = cv2.cvtColor(
np.array(config.FRAME), cv2.COLOR_BGR2RGB)
hsv_frame = cv2.cvtColor(config.FRAME, cv2.COLOR_BGR2HSV)
# Detect green color
detector = detector_repo.detect_green_color(hsv_frame=hsv_frame)
# Render window
render_repo.show(detector=detector)
# ระยะเวลาที่ห่างจากการคลิกครั้งล่าสุด
last_click_sec = config.CURRENT_TIME - config.LAST_CLICK_TIME
# ตรวจจับพื้นที่สีเขียว
if not config.HOLD and config.LOOP != 0:
if cv2.countNonZero(detector["mask"]) > 0: # เขียวแล้ว
if config.IS_FISHING and last_click_sec > 2: # รออย่างน้อย 2 วินาที เพื่อกดใหม่
fishing_repo.gotcha()
else: # ยังไม่เขียว
if not config.IS_FISHING and last_click_sec > 4 and config.COUNT > 0: # รอโยนเบ็ดครั้งถัดไปในอีก 4 วินาที
fishing_repo.throw_fishing_rod()
if __name__ == "__main__":
thread = Thread(target=main_function)
thread.start()
thread.join()
``` |
{
"source": "3tew/rox-auto-harvest",
"score": 2
} |
#### File: 3tew/rox-auto-harvest/main.py
```python
import config
from repositories import harvest_repo
from repositories import question_repo
from repositories import render_repo
from repositories import func_repo
import os
import sys
import ctypes
import time
import cv2
import numpy as np
from threading import Thread
from mss import mss
from pynput import keyboard
config.PID = os.getpid()
sct = mss()
def main_function():
while config.IS_RUNNING:
# เวลาปัจจุบัน
config.CURRENT_TIME = time.time()
# Reset when reaching the limit
if config.COUNT - config.LIMIT == 0:
harvest_repo.set_limit()
# Get emulator areas
func_repo.get_emulator_area(sct=sct)
# Process screenshort
render_repo.set_harvest_btn_bounding()
render_repo.crop_question_quiz_screenshot(sct=sct)
render_repo.set_question_answer_bounding()
render_repo.set_question_btn_bounding()
# Process
last_click_sec = config.CURRENT_TIME - config.LAST_CLICK_TIME
if not config.HOLD:
if config.LOOP != 0:
if not config.IS_HARVESTING: # รอ Cooldown เพื่อกดใหม่
if last_click_sec > config.COOLDOWN:
harvest_repo.harvest()
else:
# Check quiz
if not config.IS_QUIZ:
question_repo.get_quiz()
# Show interface
render_repo.show()
# # Resize
# scale_percent = 40 # percent of original size
# width = int(config.FRAME_EMULATOR.shape[1] * scale_percent / 100)
# height = int(config.FRAME_EMULATOR.shape[0] * scale_percent / 100)
# resized = cv2.resize(config.FRAME_EMULATOR, (width, height))
# # Render frame
# cv2.imshow(
# config.TITLE,
# np.hstack([
# resized,
# # config.FRAME_QUESTION_TEXT
# ])
# )
# Thread terminate
sys.exit(0)
if __name__ == "__main__":
# Set title
ctypes.windll.kernel32.SetConsoleTitleW(config.TITLE)
print(config.TITLE)
print("Made by <NAME>. (fb.com/thanatos1995)\n")
# pre-process
func_repo.select_emulator()
harvest_repo.set_limit()
harvest_repo.set_cooldown()
# Wait for ready
input("\nPress Enter to start harvest...")
config.HOLD = False
config.LAST_CLICK_TIME = time.time() - config.COOLDOWN
# Show menu function
func_repo.mainmenu()
# Create thread
thread1 = Thread(target=main_function)
thread2 = keyboard.Listener(on_press=func_repo.on_press)
# Start thread
thread1.start()
thread2.start() # start to listen on a separate thread
# Thread terminate
thread1.join()
thread2.join() # remove if main thread is polling self.keys
# Exit program
sys.exit(0)
``` |
{
"source": "3th3l/bootcamp-8-nbo",
"score": 4
} |
#### File: bootcamp-8-nbo/day2/fizzbuzz.py
```python
def fizz_buzz(n):
""" return fizz when divisible by 3
return buzz when n is divisible by 5
return fizzbuzz when n is divisible by both 3 and 5
"""
if n % 15 == 0:
return 'FizzBuzz'
elif n % 3 == 0:
return 'Fizz'
elif n % 5 == 0:
return 'Buzz'
else:
return n
```
#### File: bootcamp-8-nbo/day3/NotesApplication.py
```python
class NotesApplication(object):
notes_list = []
def __init__(self, author):
self.author = author
#notes_list = []
def create(self, note_content):
#self.note_content = note_content
self.notes_list.append(note_content)
def list(self):
for note in self.notes_list:
print "Note ID: ", self.notes_list.index(note) + 1
print note
print "By Author ", self.author
print "--------------------------------------------"
def get(self, note_id):
if note_id >= 0 and note_id < len(self.notes_list):
print "Note ID: ", str(note_id)
print "Note Content: ", self.notes_list[note_id]
print "Author: ", self.author
print "--------------------------------------------"
def search(self, note_id):
self.search_text = search_text
for search_text in self.note_content:
print "Showing results for search ", self.search_text
print "Note ID: ", self.notes_list.index(note) + 1
print note
print "By Author ", self.author
print "--------------------------------------------
def edit(self, note_id, new_content):
my_notes = NotesApplication("Ethel")
my_notes.create("This girl rocks!")
my_notes.create("TIA rocks!")
my_notes.create("This girl rocks even more!")
my_notes.list()
``` |
{
"source": "3than0ls/Imperial",
"score": 2
} |
#### File: cogs/profile/helper.py
```python
import discord
from discord.ext import commands
from discord.ext.commands import errors
from discord.ext.commands.converter import RoleConverter
from firecord import firecord # pylint: disable=import-error
def role_filter(role):
return not (
role.is_default()
or role.is_bot_managed()
or role.is_premium_subscriber()
or role.is_integration()
)
### DISCORDPY STRING TO MEMBER/ROLE FUNCTIONS
async def convert_str_to_thing(ctx, _str):
"""converts a string to either a discord Member or discord Role object"""
try:
return await commands.MemberConverter().convert(ctx, _str)
except errors.MemberNotFound:
pass
try:
return await commands.RoleConverter().convert(ctx, _str)
except errors.RoleNotFound:
pass
raise errors.BadArgument(
f'"{_str}" is not a valid source of roles (could not be converted into a role or a list of roles from a member). Check if you spelled it correctly.'
)
async def convert_to_roles(ctx, thing):
"""attempts to convert argument thing into a list of role(s). if thing is unable to be compiled into a list of roles, it throws an error"""
roles = []
if isinstance(thing, str):
thing = await convert_str_to_thing(ctx, thing)
if isinstance(thing, discord.Member):
roles.extend(thing.roles)
elif isinstance(thing, discord.Role):
roles.append(thing)
# filter out discord or integration managed roles
roles = filter(
role_filter,
roles,
)
return roles
async def validate_convert_roles(ctx, profile):
"""validates (checks if exists) role ids in profile_roles, converts them into a role object, and return valid ones whilst deleting invalid ones"""
valid = []
invalid = []
for role_id in profile["profile_roles"]:
try:
valid.append(await RoleConverter().convert(ctx, str(role_id)))
except commands.errors.RoleNotFound:
invalid.append(role_id)
if len(invalid) == len(profile["profile_roles"]):
# if all roles in a profile are deleted, delete the profile and throw error
firecord.profile_delete(str(ctx.guild.id), profile["name"])
raise errors.BadArgument(
f"The profile \"{profile['name']}\" no longer exists. The roles used by this profile have all been deleted, and so the profile was automatically deleted as well."
)
else: # if only some roles are deleted, remove those from the profile on firebase
firecord.profile_edit_roles(
str(ctx.guild.id), profile["name"], [str(role.id) for role in valid]
)
return list(set(valid))
```
#### File: Imperial/utils/cache.py
```python
from collections import defaultdict
##### https://stackoverflow.com/questions/19189274/nested-defaultdict-of-defaultdict
def _cache():
return defaultdict(_cache)
# cache can be accessed by importing it from this file, alternatively it should be assigned a .cache property in client.py for cogs to access
cache = _cache()
```
#### File: Imperial/utils/embed.py
```python
import random
from discord import Embed
class EmbedFactory(Embed):
"""a embed factory that formats the embed data"""
def __new__(
cls, data, formatting_data=None, error=False, error_command_string=None
):
if "color" in data:
if data["color"] == "random":
data["color"] = random.randint(0, 16777215)
elif data["color"] == "success":
data["color"] = 7208711
elif data["color"] == "error":
data["color"] = 16715054
elif data["color"] == "confirm":
data["color"] = 15329284
elif data["color"] == "cooldown":
data["color"] = 15571792
if error:
if "color" not in data:
data["color"] = 16715054
if "title" not in data:
data["title"] = (
"An Error has occured."
if error_command_string is None
else f"An error occured attempting to run `{error_command_string}`"
)
if formatting_data:
formatted_data = EmbedFactory.format_text(data, **formatting_data)
return Embed.from_dict(formatted_data)
else:
return Embed.from_dict(data)
@staticmethod
def format_text(data, **kwargs):
formatted_data = {}
for key, value in data.items():
if isinstance(
value, dict
): # for nested dictionaries, recursively call format_text
formatted_data[key] = EmbedFactory.format_text(value, **kwargs)
elif isinstance(
value, list
): # for embed fields list, iterate through and format
formatted_embed_fields = []
for embed_field in value:
formatted_embed_fields.append(
{
"name": embed_field["name"].format(**kwargs),
"value": embed_field["value"].format(**kwargs),
"inline": embed_field.get("inline", False),
}
)
formatted_data[key] = formatted_embed_fields
elif isinstance(value, str): # for strings, just format
formatted_data[key] = value.format(**kwargs)
else: # don't modify anything else (ints, bools, etc.)
formatted_data[key] = value
return formatted_data
``` |
{
"source": "3tilley/symphony-api-client-python",
"score": 2
} |
#### File: sym_api_client_python/clients/api_client.py
```python
import logging
import asyncio
import aiohttp
import requests
import json
from requests_toolbelt.multipart.encoder import MultipartEncoder
from ..exceptions.APIClientErrorException import APIClientErrorException
from ..exceptions.ServerErrorException import ServerErrorException
from ..exceptions.UnauthorizedException import UnauthorizedException
from ..exceptions.ForbiddenException import ForbiddenException
from ..exceptions.DatafeedExpiredException import DatafeedExpiredException
from ..exceptions.MaxRetryException import MaxRetryException
# error handling class --> take status code and raise appropriate exceptions
# this class acts as a parent class to each of the other client class.
# each child class extends error handling functionality
class APIClient:
def __init__(self, bot_client):
self.bot_client = bot_client
def make_mulitpart_form(self, fields, aio=False):
"""Create a multipart form to be used across the Symphony API, that works for both requests
and the asynchronous aiohttp. Requests basically uses requests-toolbelt, but it's a little
bit more involved for aiohttp. The output of this is expected to be passed to either
execute_rest_request or execute_rest_request_async depending whether aio was true"""
if aio:
# This appears to be the canonical way to use aiohttp to pass mulipart data into the API
# in the same way that MultipartEncoder does for Requests.
# aiohttp.FormData does appear to work because of the way the Symphony API demands a boundary
# in the header. aiohttp.MultipartWriter.append_form doesn't appear to work because it
# encodes as a application/x-www-form-urlencoded that Symphony doesn't appear to like for
# attachments
with aiohttp.MultipartWriter("form-data") as data:
for k, v in fields.items():
if len(v) == 1:
part = data.append(v)
part.set_content_disposition("form-data", name=k)
if len(v) == 3:
filename, file_object, content_type = v
part = data.append(file_object, {'Content-Type': content_type})
part.set_content_disposition('form-data', name=k, filename=filename)
headers = {
'Content-Type': 'multipart/form-data; boundary=' + data.boundary
}
else:
print(fields)
data = MultipartEncoder(
fields=fields
)
headers = {
'Content-Type': data.content_type
}
return {"data": data, "headers": headers}
def handle_error(self, response, bot_client, error_json=None, text=None):
logging.debug('api_client/handle_error() function started')
_error_field = "message"
if isinstance(response, requests.Response):
status = response.status_code
else:
# The assumption is that it's an aiohttp response from an async request
status = response.status
try:
if error_json is not None:
try:
err_message = error_json[_error_field]
except KeyError:
if text is not None:
err_message = text
else:
err_message = ""
elif text is not None:
err_message = text
else:
err_message = ""
except Exception:
logging.error("Unable to parse error message: {}".format(text))
err_message = ""
if status == 400 and 'Could not find a datafeed with the' in err_message:
logging.debug('datafeed expired, start_datafeed()')
raise DatafeedExpiredException()
elif status == 401:
logging.debug('api_client()/handling 401 error')
bot_client.reauth_client()
logging.debug('api_client()/successfully reauthenticated')
raise UnauthorizedException(
'User, unauthorized, refreshing tokens: {}'
.format(status))
elif status == 403:
raise ForbiddenException(
'Forbidden: Caller lacks necessary entitlement: {}'
.format(status))
elif status == 405:
logging.debug('Handling 405 error')
raise ForbiddenException(
'Method Not Allowed: The method received in the request-line is known by the origin server but not supported by the target resource: {}'
.format(status))
# Response dict is a bit of an information overload, could consider trimming it
elif 400 <= status < 500:
raise APIClientErrorException('Client Error Occurred: {}. Response contents: {}'
.format(err_message, response.__dict__))
elif status >= 500:
raise ServerErrorException(
'Server Error Exception: {}, {}'
.format(status, err_message))
else:
# This shouldn't happen
raise RuntimeError("Unhandled error: {}".format(response.__dict__))
```
#### File: sym_api_client_python/clients/health_check_client.py
```python
import logging
from .api_client import APIClient
from ..exceptions.ServerErrorException import ServerErrorException
class HealthCheckClient(APIClient):
def __init__(self, bot_client):
self.bot_client = bot_client
def get_health_check(self, show_firehose_errors=False):
logging.debug('HealthCheckClient/get_health_check()')
params = {'showFirehoseErrors': show_firehose_errors}
url = '/agent/v2/HealthCheck'
return self.bot_client.execute_rest_call('GET', url, params=params)
def ensure_all_services_up(self, check_firehose=False, fields_to_check=None):
logging.debug('HealthCheckClient/ensure_all_services_up()')
# This list would have to be updated if new fields became available in the health check
if fields_to_check is None:
fields_to_check = [
'podConnectivity',
'keyManagerConnectivity',
'encryptDecryptSuccess',
'agentServiceUser',
'ceServiceUser',
]
if check_firehose:
fields_to_check.append('firehoseConnectivity')
health_check = self.get_health_check(check_firehose)
logging.debug(health_check)
functioning = [ health_check[field] for field in fields_to_check ]
if not all(functioning):
problems = [fields_to_check[i] for i,v in enumerate(functioning) if not v]
raise RuntimeError(f"Not all services available: {problems}")
```
#### File: sym_api_client_python/clients/stream_client.py
```python
import requests
import json
import logging
from .api_client import APIClient
from ..exceptions.UnauthorizedException import UnauthorizedException
# child class of APIClient --> Extends error handling functionality
# StreamClient class contains a series of functions corresponding to all stream
# endpoints on the REST API.
class StreamClient(APIClient):
def __init__(self, bot_client):
self.bot_client = bot_client
def create_im(self, users_array):
"""
Creates a new single or multi-party instant message conversation or returns
an existing IM or MIM between the specified users and the calling user.
"""
logging.debug('StreamClient/create_im()')
url = '/pod/v1/im/create'
return self.bot_client.execute_rest_call("POST", url, json=users_array)
def create_im_admin(self, users_array):
"""
Creates a new single or multi-party instant message conversation or returns
an existing IM or MIM between the specified users, but excludes the calling user.
"""
logging.debug('StreamClient/create_im_admin()')
url = '/pod/v1/admin/im/create'
return self.bot_client.execute_rest_call("POST", url, json=users_array)
def create_room(self, roomToCreate):
"""
Creates a new chatroom. See Room Attributes for room creation parameters.
Example Room Object. All required
roomToCreate = {
"name": (string) Room name,
"description": (string) Room description,
"keywords": [
{"key": "key1", "value": "value1"},
{"key": "key2", "value": "value2"}
...
],
"membersCanInvite": (bool) If true, any chat room participant can add new participants. If false, only owners can add new participants,
"discoverable": (bool) If true, this chat room (name, description and messages) non-participants can search for this room. If false, only participants can search for this room,
"public": (bool) If true, this is a public chatroom. If false, a private chatroom. Note: Once this value is set for a room, it is read-only and can’t be updated,
"readOnly": (bool) If true, only stream owners can send messages. Once this value is set for a room, it is read-only and can’t be updated,
"copyProtected": (bool) If true, users cannot copy content from this room. Once this value is set to true for a room, it is read-only and can’t be updated,
"crossPod": (bool) If true, this room is a cross-pod room,
"viewHistory": (bool) If true, new members can view the room chat history of the room,
"multiLateralRoom": (bool) If true, this is a multilateral room where users belonging to more than 2 companies can be found
}
"""
logging.debug('StreamClient/create_room()')
url = '/pod/v3/room/create'
return self.bot_client.execute_rest_call("POST", url, json=roomToCreate)
def update_room(self, stream_id, **kwargs):
"""
Updates the attributes of an existing chat room.
Pass in room_obj like such:
room_obj = {
"name" : "updated room",
"description": "testing update room function",
}
update_room(stream_id, kwargs=room_obj)
"""
logging.debug('StreamClient/update_room()')
url = '/pod/v3/room/{0}/update'.format(stream_id)
return self.bot_client.execute_rest_call('POST', url, json=kwargs)
def get_room_info(self, stream_id):
"""
Returns information about a particular chat room.
"""
logging.debug('StreamClient/get_room_info()')
url = '/pod/v3/room/{0}/info'.format(stream_id)
return self.bot_client.execute_rest_call('GET', url)
def activate_room(self, stream_id):
"""
Deactivate or reactivate a chatroom. At creation, a new chatroom is active.
"""
logging.debug('StreamClient/activate_room()')
url = '/pod/v1/room/{0}/setActive'.format(stream_id)
params = {
'active': True
}
return self.bot_client.execute_rest_call('POST', url, params=params)
def deactivate_room(self, stream_id):
"""
Deactivate or reactivate a chatroom. At creation, a new chatroom is active.
"""
logging.debug('StreamClient/activate_room()')
url = '/pod/v1/room/{0}/setActive'.format(stream_id)
params = {
'active': False
}
return self.bot_client.execute_rest_call('POST', url, params=params)
def get_room_members(self, stream_id):
"""
Lists the current members of an existing room.
"""
logging.debug('StreamClient/get_room_members()')
url = '/pod/v2/room/{0}/membership/list'.format(stream_id)
return self.bot_client.execute_rest_call('GET', url)
def add_member_to_room(self, stream_id, user_id):
"""
Adds a new member to an existing room.
"""
logging.debug('StreamClient/add_member_to_room()')
url = '/pod/v1/room/{0}/membership/add'.format(stream_id)
data = {'id': user_id}
return self.bot_client.execute_rest_call('POST', url, json=data)
# Content Object. * is required. Either articleId or articleUrl must be specified
# "content":{
# "articleId": (string) A unique ID for this article, not used by any other article,
# "title": (string) The title of the article,
# "subTitle": (string) The subtitle of the article
# "description": (string) The description of the article,
# "message" : (string) The message text that can be sent along with the shared article,
# "publisher": (string)* Publisher of the article,
# "publishDate": (string) Article publish date in unix timestamp,
# "thumbnailUrl": (string) URL to the thumbnail image,
# "author": (string)* Author of the article,
# "articleUrl": (string) URL to the article,
# "summary": (string) Preview summary of the article,
# "appId": (string)* App ID of the calling application,
# "appName": (string) App name of the calling application,
# "appIconUrl": (string) App icon URL of the calling application
# }
def share_room(self, stream_id, content):
"""
Share third-party content, such as a news article, into the specified stream.
The stream can be a chat room, an IM, or an MIM.
content: An object containing the fields required for defining an article
content = {
"articleId": (string) A unique ID for this article, not used by any other article,
"title": (string) The title of the article,
"subTitle": (string) The subtitle of the article
"description": (string) The description of the article,
"message" : (string) The message text that can be sent along with the shared article,
"publisher": (string)* Publisher of the article,
"publishDate": (string) Article publish date in unix timestamp,
"thumbnailUrl": (string) URL to the thumbnail image,
"author": (string)* Author of the article,
"articleUrl": (string) URL to the article,
"summary": (string) Preview summary of the article,
"appId": (string)* App ID of the calling application,
"appName": (string) App name of the calling application,
"appIconUrl": (string) App icon URL of the calling application
}
"""
logging.debug('StreamClient/share_room()')
url = '/agent/v3/stream/{0}/share'.format(stream_id)
data = {
"type": "com.symphony.sharing.article",
"content": content
}
return self.bot_client.execute_rest_call('POST', url, json=data)
def remove_member_from_room(self, stream_id, user_id):
"""
Removes an existing member from an existing room
"""
logging.debug('StreamClient/remove_member_from_room()')
url = '/pod/v1/room/{0}/membership/remove'.format(stream_id)
data = {'id': user_id}
return self.bot_client.execute_rest_call('POST', url, json=data)
def promote_user_to_owner(self, stream_id, user_id):
"""
Promotes user to owner of the chat room.
"""
logging.debug('StreamClient/promote_user_to_owner()')
url = '/pod/v1/room/{0}/membership/promoteOwner'.format(stream_id)
data = {'id': user_id}
return self.bot_client.execute_rest_call('POST', url, json=data)
def demote_user_from_owner(self, stream_id, user_id):
"""
Demotes room owner to a participant in the chat room.
"""
logging.debug('StreamClient/demote_user_from_owner()')
url = '/pod/v1/room/{0}/membership/demoteOwner'.format(stream_id)
data = {'id': user_id}
return self.bot_client.execute_rest_call('POST', url, json=data)
# Available kwargs:
# labels: A list of room keywords whose values will be queried.
# active: If true, restricts the search to active rooms. If false, restricts the search to inactive rooms. If not specified, includes both active and inactive rooms.
# private: If true, includes public and private rooms in the search results. If false or unspecified only public rooms are returned.
# creator: If provided, restricts the search to rooms created by the specified user.
# owner: If provided, restricts the search to rooms owned by the specified user.
# member: If provided, restricts the search to rooms where the specified user is a member.
# sortOrder: Sort algorithm to be used. Supports two values: BASIC (legacy algorithm) and RELEVANCE (enhanced algorithm).
def search_rooms(self, query, skip=0, limit=50, **kwargs):
"""
Search for rooms, querying name, description, and specified keywords.
search_rooms('query', labels = ['hack', 'test', 'room'], active = True)
"""
logging.debug('StreamClient/search_rooms()')
url = '/pod/v3/room/search'
params = {
'skip': skip,
'limit':limit
}
data = {
'query': query
}
data.update(kwargs)
return self.bot_client.execute_rest_call('POST', url, params=params, json=data)
def get_user_streams(self, skip=0, limit=50, stream_types = 'ALL', include_inactive = True):
"""
Returns a list of all the streams of which the requesting user is a member,
sorted by creation date (ascending - oldest to newest).
"""
logging.debug('StreamClient/get_user_streams()')
url = '/pod/v1/streams/list'
if (stream_types == 'ALL'):
stream_types = [
{"type": "IM"},
{"type": "MIM"},
{"type": "ROOM"},
{"type": "POST"}
]
data = {
'streamTypes': stream_types,
'includeInactiveStreams': include_inactive
}
params = {
'skip': skip,
'limit': limit
}
return self.bot_client.execute_rest_call('POST', url, json=data, params=params)
def stream_info_v2(self, stream_id):
"""
Returns information about a particular stream.
"""
logging.debug('StreamClient/stream_info_v2()')
url = '/pod/v2/streams/{0}/info'.format(stream_id)
return self.bot_client.execute_rest_call('GET', url)
def list_streams_enterprise(self, skip=0, limit=50, **kwargs):
"""
Returns a list of all the streams (IMs, MIMs, and chatrooms) for the calling
user's company, sorted by creation date (ascending – oldest to newest).
Filtering parameters can be used to narrow the list of streams that are returned.
Required Permissions: This endpoint may only be called by Service User accounts with the User Provisioning role.
Available kwargs:
streamTypes: A list of stream types that will be returned (IM, MIM, ROOM). If not specified, streams of all types are returned.
scope: The scope of the stream: INTERNAL (restricted to members of the calling user's company) or EXTERNAL (including members of the calling user's company, as well as another firm). If not specified, returns streams of either scope.
origin: The origin of the room: INTERNAL (created by a user of the calling user's company) or EXTERNAL (created by a user of another company). If not specified, returns streams of either origin. Only applies to chatrooms with External scope.
privacy: The privacy setting of the room: PRIVATE (members must be added) OR PUBLIC (anyone can join). If not specified, returns both private and public rooms. Only applies to chatrooms with internal scope.
status: The status of the room: ACTIVE or INACTIVE. If not specified, both active and inactive streams are returned.
startDate: Restricts result set to rooms that have been modified since this date (an Epoch timestamp specified in milliseconds). When specified along with endDate, enables the developer to specify rooms modified within a given time range.
endDate: Restricts result set to rooms that have been modified prior to this date (an Epoch timestamp specified in milliseconds). When specified along with startDate, enables the developer to specify rooms modified within a given time range.
"""
logging.debug('StreamClient/list_streams_enterprise()')
url = '/pod/v1/admin/streams/list'
params = {
'skip': skip,
'limit': limit
}
return self.bot_client.execute_rest_call('POST', url, params=params, json=kwargs)
def list_streams_enterprise_v2(self, skip=0, limit=50, **kwargs):
"""
Returns a list of all the streams (IMs, MIMs, and chatrooms) for the calling
user's company, sorted by creation date (ascending – oldest to newest).
Filtering parameters can be used to narrow the list of streams that are returned.
Required Permissions: This endpoint may only be called by Service User accounts with the User Provisioning role.
Available kwargs:
streamTypes: A list of stream types that will be returned (IM, MIM, ROOM). If not specified, streams of all types are returned.
scope: The scope of the stream: INTERNAL (restricted to members of the calling user's company) or EXTERNAL (including members of the calling user's company, as well as another firm). If not specified, returns streams of either scope.
origin: The origin of the room: INTERNAL (created by a user of the calling user's company) or EXTERNAL (created by a user of another company). If not specified, returns streams of either origin. Only applies to chatrooms with External scope.
privacy: The privacy setting of the room: PRIVATE (members must be added) OR PUBLIC (anyone can join). If not specified, returns both private and public rooms. Only applies to chatrooms with internal scope.
status: The status of the room: ACTIVE or INACTIVE. If not specified, both active and inactive streams are returned.
startDate: Restricts result set to rooms that have been modified since this date (an Epoch timestamp specified in milliseconds). When specified along with endDate, enables the developer to specify rooms modified within a given time range.
endDate: Restricts result set to rooms that have been modified prior to this date (an Epoch timestamp specified in milliseconds). When specified along with startDate, enables the developer to specify rooms modified within a given time range.
"""
logging.debug('StreamClient/list_streams_enterprise_v2()')
url = '/pod/v2/admin/streams/list'
params = {
'skip': skip,
'limit': limit
}
return self.bot_client.execute_rest_call('POST', url, params=params, json=kwargs)
def get_stream_members(self, stream_id, skip=0, limit=100):
"""
Returns a list of all the current members of a stream (IM, MIM, or chatroom
Required Permissions:
To get the stream membership of any stream in your enterprise, you should
call this endpoint with a Service User account with the User Provisioning role.
The Service User does not need to be a member of the stream.
See Permissions for a list of roles and associated privileges.
"""
logging.debug('StreamClient/get_stream_members()')
url = '/pod/v1/admin/stream/{0}/membership/list'.format(stream_id)
params = {
'skip': skip,
'limit': limit
}
return self.bot_client.execute_rest_call('POST', url, params=params)
```
#### File: sym_api_client_python/configure/configure.py
```python
import json
import logging
import os
class SymConfig:
# initialize object by passing in path to config file
# store configuration data in variable data
def __init__(self, path_to_config, relative_to=None):
"""If relative_to is supplied, all relative paths will be recomputed to be relative to the file or directory
supplied. This allows starting the bots from other directories than where the main is. If a directory is
given in this field, it should end with a slash.
In testing one may want to set relative_to to be the path to the config json, so all references are relative
to that. An application may wish to set this to __file__ in its main, meaning a configuration file from anywhere
could be used.
"""
self.path_to_config = path_to_config
self.relative_to = os.path.dirname(relative_to) if relative_to is not None else os.curdir
self.data = {}
def _fix_relative_path(self, json_data, path_key, filename_key=None, warn_if_absent=True):
"""Given a json file, a key for a path and an optional key for a filename, extract the path
and potentially name, resolve and join them. If warn_if_absent, issue a warning if the file
or path does not exist at that location"""
path = json_data[path_key]
# Blank values are used to ignore entry. They should probably be None instead of blank but to maintain
# backwards compatibility the function just returns "". If it continued "" would get resolved to "."
if path == "":
return ""
if filename_key is not None:
filename = json_data[filename_key]
path = os.path.join(path, filename)
result = os.path.normpath(os.path.join(self.relative_to, path))
if warn_if_absent and (not os.path.exists(result)):
parts = [p for p in [path_key, filename_key] if p is not None]
logging.warning(
"{} specified in config, but resolved path {} does not exist"
.format(", ".join(parts), result))
return result
def load_config(self):
logging.info("Loading config from: {}".format(os.path.realpath(self.path_to_config)))
with open(self.path_to_config, "r") as read_file:
data = json.load(read_file)
self.data = data
if 'sessionAuthPort' in data:
self.data['sessionAuthHost'] = 'https://'+ data['sessionAuthHost'] + ':' + str(data['sessionAuthPort'])
else:
self.data['sessionAuthHost'] = 'https://'+ data['sessionAuthHost']
if 'keyAuthPort' in data:
self.data['keyAuthHost'] = 'https://'+ data['keyAuthHost'] + ':' + str(data['keyAuthPort'])
else:
self.data['keyAuthHost'] = 'https://'+ data['keyAuthHost']
if 'podPort' in data:
self.data['podHost'] = 'https://'+ data['podHost'] + ':' + str(data['podPort'])
else:
self.data['podHost'] = 'https://'+ data['podHost']
if 'agentPort' in data:
self.data['agentHost'] = 'https://'+ data['agentHost'] + ':' + str(data['agentPort'])
else:
self.data['agentHost'] = 'https://'+ data['agentHost']
#backwards compatible
if 'botCertPath' in data:
self.data['botCertPath'] = self._fix_relative_path(data,'botCertPath')
if 'botRSAName' in data:
self.data['botRSAPath'] = self._fix_relative_path(data, 'botRSAPath', 'botRSAName')
if 'botPrivateKeyName' in data:
self.data['botRSAPath'] = self._fix_relative_path(data, 'botPrivateKeyPath', 'botPrivateKeyName')
if 'botCertName' in data:
self.data['p.12'] = self._fix_relative_path(data, 'botCertPath', 'botCertName')
if 'truststorePath' in data:
self.data['truststorePath'] = self._fix_relative_path(data, 'truststorePath')
if 'proxyURL' in data:
self.data['podProxyURL'] = data['proxyURL']
self.data['podProxyUsername'] = data['proxyUsername'] if 'proxyUsername' in data else ""
self.data['podProxyPassword'] = data['proxyPassword'] if 'proxyPassword' in data else ""
self.data['agentProxyURL'] = data['proxyURL']
self.data['agentProxyUsername'] = data['proxyUsername'] if 'proxyUsername' in data else ""
self.data['agentProxyPassword'] = data['proxyPassword'] if 'proxyPassword' in data else ""
self.data['keyManagerProxyURL'] = data['proxyURL']
self.data['keyManagerProxyUsername'] = data['proxyUsername'] if 'proxyUsername' in data else ""
self.data['keyManagerProxyPassword'] = data['proxyPassword'] if 'proxyPassword' in data else ""
if 'podProxyURL' not in data or not data['podProxyURL']:
self.data['podProxyRequestObject'] = {}
self.data['podProxyURL'] = ""
else:
self.data['podProxyURL'] = data['podProxyURL']
if 'podProxyUsername' in data and data['podProxyUsername']:
self.data['podProxyUsername'] = data['podProxyUsername']
self.data['podProxyPassword'] = data['<PASSWORD>']
pod_proxy_parse = data['podProxyURL'].split('://')
pod_proxy_auth = data['podProxyUsername'] + ':' + data['podProxyPassword']
pod_proxy_url = pod_proxy_parse[0] + '://' + pod_proxy_auth + '@' + pod_proxy_parse[1]
self.data['podProxyRequestObject'] = {
'http' : pod_proxy_url,
'https' : pod_proxy_url,
}
else:
self.data['podProxyRequestObject'] = {
'http' : data['podProxyURL'],
'https' : data['podProxyURL'],
}
if 'agentProxyURL' not in data or not data['agentProxyURL']:
self.data['agentProxyRequestObject'] = {}
self.data['agentProxyURL'] = ""
else:
self.data['agentProxyURL'] = data['agentProxyURL']
if 'agentProxyUsername' in data and data['agentProxyUsername']:
self.data['agentProxyUsername'] = data['agentProxyUsername']
self.data['agentProxyPassword'] = data['agentProxyPassword']
agent_proxy_parse = data['agentProxyURL'].split('://')
agent_proxy_auth = data['agentProxyUsername'] + ':' + data['agentProxyPassword']
agent_proxy_url = agent_proxy_parse[0] + '://' + agent_proxy_auth + '@' + agent_proxy_parse[1]
self.data['agentProxyRequestObject'] = {
'http' : agent_proxy_url,
'https' : agent_proxy_url,
}
else:
self.data['agentProxyRequestObject'] = {
'http' : data['agentProxyURL'],
'https' : data['agentProxyURL'],
}
if 'keyManagerProxyURL' not in data or not data['keyManagerProxyURL']:
self.data['keyManagerProxyRequestObject'] = {}
self.data['keyManagerProxyURL'] = ""
else:
self.data['keyManagerProxyURL'] = data['keyManagerProxyURL']
if 'keyManagerProxyUsername' in data and data['keyManagerProxyUsername']:
self.data['keyManagerProxyUsername'] = data['keyManagerProxyUsername']
self.data['keyManagerProxyPassword'] = data['keyManagerProxyPassword']
km_proxy_parse = data['keyManagerProxyURL'].split('://')
km_proxy_auth = data['keyManagerProxyUsername'] + ':' + data['keyManagerProxyPassword']
km_proxy_url = km_proxy_parse[0] + '://' + km_proxy_auth + '@' + km_proxy_parse[1]
self.data['keyManagerProxyRequestObject'] = {
'http' : km_proxy_url,
'https' : km_proxy_url,
}
else:
self.data['keyManagerProxyRequestObject'] = {
'http' : data['keyManagerProxyURL'],
'https' : data['keyManagerProxyURL'],
}
if 'datafeedEventsErrorTimeout' in data:
self.data['datafeedEventsErrorTimeout'] = data['datafeedEventsErrorTimeout']
loggable_config_dict = {}
for k, v in self.data.items():
if "password" not in k.lower() or v == "":
loggable_config_dict[k] = v
else:
loggable_config_dict[k] = "---HIDDEN---"
logging.info(json.dumps(loggable_config_dict, indent=4))
```
#### File: sym_api_client_python/listeners/room_listener.py
```python
from abc import ABC, abstractmethod
# Abstract class for ROOM listener
# class is just an interface of functions to handle the Room events received
# from DataFeed see Real Time Events in REST API documentation for more
# details. The developer will handle actual event logic in your implementation
# of this abstract class
class RoomListener(ABC):
@abstractmethod
def on_room_msg(self, message):
pass
@abstractmethod
def on_room_created(self, roomCreated):
pass
@abstractmethod
def on_room_deactivated(self, roomDeactivated):
pass
@abstractmethod
def on_room_member_demoted_from_owner(self, roomMemberDemotedFromOwner):
pass
@abstractmethod
def on_room_member_promoted_to_owner(self, roomMemberPromotedToOwner):
pass
@abstractmethod
def on_room_reactivated(self, roomReactivated):
pass
@abstractmethod
def on_room_updated(self, roomUpdated):
pass
@abstractmethod
def on_user_joined_room(self, userJoinedRoom):
pass
@abstractmethod
def on_user_left_room(self, userLeftRoom):
pass
```
#### File: sym_api_client_python/listeners/wall_post_listener_imp.py
```python
from .wall_post_listener import WallPostListener
import logging
# A sample implementation of Abstract WallPostListener class
# The listener can respond to incoming events if the respective event
# handler has been implemented
class WallPostListenerImp(WallPostListener):
"""Example implementation of WallPostListener
sym_bot_client: contains clients which respond to incoming events
"""
def __init__(self, sym_bot_client):
self.bot_client = sym_bot_client
def on_wall_post_msg(self, wall_post_msg):
logging.debug('receieved incoming wall post %s', wall_post_msg)
def on_shared_post(self, shared_post):
logging.debug('received incoming shared post %s', shared_post)
```
#### File: sym_api_client_python/processors/message_formatter.py
```python
from yattag import Doc
class MessageFormatter:
def __init__(self):
pass
def format_message(self, message):
"""
appends messageML tags to plain text and returns a dictionary:
{message : messageML object}
"""
doc,tag,text,line = Doc().ttl()
with tag('messageML'):
text(message)
return dict(message = doc.getvalue())
```
#### File: symphony-api-client-python/tests/test_userClient.py
```python
import json
import logging
import unittest
import requests
from sym_api_client_python.auth.auth import Auth
from sym_api_client_python.auth.rsa_auth import SymBotRSAAuth
from sym_api_client_python.clients.sym_bot_client import SymBotClient
from sym_api_client_python.configure.configure import SymConfig
from sym_api_client_python.listeners.im_listener_test_imp import IMListenerTestImp
from sym_api_client_python.listeners.room_listener_test_imp import RoomListenerTestImp
from sym_api_client_python.loaders import load_from_env_var
logging.basicConfig(filename='sym_api_client_python/logs/example.log', format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', filemode='w', level=logging.DEBUG)
logging.getLogger("urllib3").setLevel(logging.WARNING)
# 344147139494862
# 344147139494909
# <EMAIL>
# <EMAIL>
#TestUsers class extends unittest class
#setUp functions executes before every test function runs --> grabs necessary data to run each client test
#streamId is hard coded --> replace with your own streamId to test if necessary
#execute by navigating to tests folder --> type 'python3 -m unittest discover' in commandline to execute each test
#comment any function that you no longer wish to test
class TestUsers(unittest.TestCase):
def setUp(self):
try:
conf, auth = load_from_env_var("SYMPHONY_TEST_CONFIG")
self.configure = conf
self.auth = auth
except ValueError:
#RSA Auth flow:
self.configure = SymConfig('sym_api_client_python/resources/config.json')
self.configure.load_config()
auth = SymBotRSAAuth(self.configure)
auth.authenticate()
# Initialize SymBotClient with auth and configure objects
self.bot_client = SymBotClient(auth, self.configure)
#pass
def test_getUserFromUsername(self):
print('testing get_user_from_user_name function')
username = self.configure.data["botUsername"]
self.assertTrue(self.bot_client.get_user_client().get_user_from_user_name(username))
#pass
def test_getUserFromEmail(self):
print('testing get_user_from_email function')
email = self.configure.data["botEmailAddress"]
self.assertTrue(self.bot_client.get_user_client().get_user_from_email(email))
#pass
def test_getUserFromId(self):
print('testing get_user_from_id function')
self.assertTrue(self.bot_client.get_user_client().get_user_from_id('344147139494909'))
#pass
def test_getUsersFromIdList(self):
print('testing get_users_from_id_list function')
self.assertTrue(self.bot_client.get_user_client().get_users_from_id_list(['344147139494862', '344147139494909']))
#pass
def test_getUsersFromEmailList(self):
print('testing get_users_from_email_list function')
self.assertTrue(self.bot_client.get_user_client().get_users_from_email_list(['<EMAIL>','<EMAIL>']))
#pass
def test_searchUsers(self):
print('testing search users function')
username = self.configure.data["botUsername"]
# Make a search string by taking the first half of the username
search_string = username[:int(len(username) / 2)]
self.assertTrue(self.bot_client.get_user_client().search_users(search_string))
# if __name__ == '__main__':
# unittest.main()
``` |
{
"source": "3to1null/grpc-client-test",
"score": 3
} |
#### File: 3to1null/grpc-client-test/greeter_client.py
```python
from __future__ import print_function
import grpc
import sys
import time
import helloworld_pb2
def run():
if len(sys.argv) >= 2:
grpc_server = sys.argv[1]
else:
grpc_server = 'localhost:50051'
print(grpc_server)
channel = grpc.insecure_channel(grpc_server)
stub = helloworld_pb2.GreeterStub(channel)
response = stub.SayHello(helloworld_pb2.HelloRequest(name='PTI'))
print("Greeter client received: " + response.message)
if __name__ == '__main__':
while True:
try:
run()
except Exception as E:
print(E)
finally:
time.sleep(1)
``` |
{
"source": "3togo/flownet2-pytorch",
"score": 2
} |
#### File: flownet2-pytorch/networks/FlowNetSD.py
```python
import torch
import torch.nn as nn
from torch.nn import init
import math
import numpy as np
from .submodules import *
'Parameter count = 45,371,666'
class FlowNetSD(nn.Module):
def __init__(self, args, batchNorm=True):
super(FlowNetSD,self).__init__()
self.batchNorm = batchNorm
self.conv0 = conv(self.batchNorm, 6, 64)
self.conv1 = conv(self.batchNorm, 64, 64, stride=2)
self.conv1_1 = conv(self.batchNorm, 64, 128)
self.conv2 = conv(self.batchNorm, 128, 128, stride=2)
self.conv2_1 = conv(self.batchNorm, 128, 128)
self.conv3 = conv(self.batchNorm, 128, 256, stride=2)
self.conv3_1 = conv(self.batchNorm, 256, 256)
self.conv4 = conv(self.batchNorm, 256, 512, stride=2)
self.conv4_1 = conv(self.batchNorm, 512, 512)
self.conv5 = conv(self.batchNorm, 512, 512, stride=2)
self.conv5_1 = conv(self.batchNorm, 512, 512)
self.conv6 = conv(self.batchNorm, 512, 1024, stride=2)
self.conv6_1 = conv(self.batchNorm,1024, 1024)
self.deconv5 = deconv(1024,512)
self.deconv4 = deconv(1026,256)
self.deconv3 = deconv(770,128)
self.deconv2 = deconv(386,64)
self.inter_conv5 = i_conv(self.batchNorm, 1026, 512)
self.inter_conv4 = i_conv(self.batchNorm, 770, 256)
self.inter_conv3 = i_conv(self.batchNorm, 386, 128)
self.inter_conv2 = i_conv(self.batchNorm, 194, 64)
self.predict_flow6 = predict_flow(1024)
self.predict_flow5 = predict_flow(512)
self.predict_flow4 = predict_flow(256)
self.predict_flow3 = predict_flow(128)
self.predict_flow2 = predict_flow(64)
self.upsampled_flow6_to_5 = nn.ConvTranspose2d(2, 2, 4, 2, 1)
self.upsampled_flow5_to_4 = nn.ConvTranspose2d(2, 2, 4, 2, 1)
self.upsampled_flow4_to_3 = nn.ConvTranspose2d(2, 2, 4, 2, 1)
self.upsampled_flow3_to_2 = nn.ConvTranspose2d(2, 2, 4, 2, 1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
if m.bias is not None:
init.uniform_(m.bias)
init.xavier_uniform_(m.weight)
if isinstance(m, nn.ConvTranspose2d):
if m.bias is not None:
init.uniform_(m.bias)
init.xavier_uniform_(m.weight)
# init_deconv_bilinear(m.weight)
self.upsample1 = nn.Upsample(scale_factor=4, mode='bilinear')
def forward(self, x):
out_conv0 = self.conv0(x)
out_conv1 = self.conv1_1(self.conv1(out_conv0))
out_conv2 = self.conv2_1(self.conv2(out_conv1))
out_conv3 = self.conv3_1(self.conv3(out_conv2))
out_conv4 = self.conv4_1(self.conv4(out_conv3))
out_conv5 = self.conv5_1(self.conv5(out_conv4))
out_conv6 = self.conv6_1(self.conv6(out_conv5))
flow6 = self.predict_flow6(out_conv6)
flow6_up = self.upsampled_flow6_to_5(flow6)
out_deconv5 = self.deconv5(out_conv6)
concat5 = torch.cat((out_conv5,out_deconv5,flow6_up),1)
out_interconv5 = self.inter_conv5(concat5)
flow5 = self.predict_flow5(out_interconv5)
flow5_up = self.upsampled_flow5_to_4(flow5)
out_deconv4 = self.deconv4(concat5)
concat4 = torch.cat((out_conv4,out_deconv4,flow5_up),1)
out_interconv4 = self.inter_conv4(concat4)
flow4 = self.predict_flow4(out_interconv4)
flow4_up = self.upsampled_flow4_to_3(flow4)
out_deconv3 = self.deconv3(concat4)
concat3 = torch.cat((out_conv3,out_deconv3,flow4_up),1)
out_interconv3 = self.inter_conv3(concat3)
flow3 = self.predict_flow3(out_interconv3)
flow3_up = self.upsampled_flow3_to_2(flow3)
out_deconv2 = self.deconv2(concat3)
concat2 = torch.cat((out_conv2,out_deconv2,flow3_up),1)
out_interconv2 = self.inter_conv2(concat2)
flow2 = self.predict_flow2(out_interconv2)
if self.training:
return flow2,flow3,flow4,flow5,flow6
else:
return flow2,
``` |
{
"source": "3togo/noip-renew",
"score": 2
} |
#### File: 3togo/noip-renew/noip-renew.py
```python
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
import time
import sys
import os
class Robot:
USER_AGENT = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:64.0) Gecko/20100101 Firefox/64.0"
LOGIN_URL = "https://www.noip.com/login"
HOST_URL = "https://my.noip.com/#!/dynamic-dns"
def __init__(self, debug=0):
self.debug = debug
options = webdriver.ChromeOptions()
options.add_argument("headless")
#options.add_argument("privileged")
#options.add_argument("disable-gpu")
options.add_argument("no-sandbox") # need when run in docker
options.add_argument("window-size=1200x800")
options.add_argument("user-agent=%s" % Robot.USER_AGENT)
if 'https_proxy' in os.environ:
options.add_argument("proxy-server=" + os.environ['https_proxy'])
self.browser = webdriver.Chrome(chrome_options=options)
self.browser.set_page_load_timeout(60)
def log_msg(self, msg, level=None):
tstr = time.strftime('%Y/%m/%d %H:%M:%S', time.localtime(time.time()))
if level is None:
level = self.debug
if level > 0:
print("%s [%s] - %s" % (tstr, self.username, msg))
def login(self, username, password):
self.log_msg("Open %s..." % Robot.LOGIN_URL)
self.browser.get(Robot.LOGIN_URL)
if self.debug > 1:
self.browser.save_screenshot("debug1.png")
self.log_msg("Login...")
ele_usr = self.browser.find_element_by_name("username")
ele_pwd = self.browser.find_element_by_name("password")
ele_usr.send_keys(username)
ele_pwd.send_keys(password)
#self.browser.find_element_by_name("login").click()
form = self.browser.find_element_by_id("clogs")
form.submit()
if self.debug > 1:
time.sleep(1)
self.browser.save_screenshot("debug2.png")
@staticmethod
def xpath_of_button(cls_name):
return "//button[contains(@class, '%s')]" % cls_name
def update_hosts(self, num_hosts):
self.log_msg("Open %s..." % Robot.HOST_URL)
try:
self.browser.get(Robot.HOST_URL)
except TimeoutException as e:
self.browser.save_screenshot("timeout.png")
self.log_msg("Timeout. Try to ignore")
invalid = True
retry = 5
while retry > 0:
time.sleep(1)
buttons_todo = self.browser.find_elements_by_xpath(Robot.xpath_of_button('btn-confirm'))
buttons_done = self.browser.find_elements_by_xpath(Robot.xpath_of_button('btn-configure'))
count = len(buttons_todo)
if count + len(buttons_done) == num_hosts:
invalid = False
break
self.log_msg("Cannot find the buttons", 2)
retry -= 1
if invalid:
self.log_msg("Invalid page or something wrong. See error.png", 2)
self.browser.save_screenshot("error.png")
return False
if self.debug > 1:
self.browser.save_screenshot("debug3.png")
self.log_msg("Hosts to be confirmed: %d" % count)
for button in buttons_todo:
button.click()
time.sleep(1)
self.browser.save_screenshot("result.png")
self.log_msg("Confirmed hosts: %d" % count, 2)
return True
def run(self, username, password, num_hosts):
rc = 0
self.username = username
self.log_msg("Debug level: %d" % self.debug)
try:
self.login(username, password)
if not self.update_hosts(num_hosts):
rc = 3
except Exception as e:
self.log_msg(str(e), 2)
self.browser.save_screenshot("exception.png")
rc = 2
finally:
self.browser.quit()
return rc
def main(argv=None):
if argv is None:
argv = sys.argv
if len(argv) < 4:
print("Usage: %s <username> <password> <num-hosts> [<debug-level>]" % argv[0])
return 1
username = argv[1]
password = argv[2]
num_hosts = int(argv[3])
debug = 1
if len(argv) > 4:
debug = int(argv[4])
robot = Robot(debug)
return robot.run(username, password, num_hosts)
if __name__ == "__main__":
sys.exit(main())
``` |
{
"source": "3togo/OpenSfM",
"score": 2
} |
#### File: opensfm/test/conftest.py
```python
from distutils.version import LooseVersion
import numpy
def pytest_configure(config):
use_legacy_numpy_printoptions()
def use_legacy_numpy_printoptions():
"""Ensure numpy use legacy print formant."""
if LooseVersion(numpy.__version__) > LooseVersion('1.13'):
numpy.set_printoptions(legacy='1.13')
``` |
{
"source": "3togo/PYNQ-HLS",
"score": 3
} |
#### File: pynqhls/sharedmem/sharedmem.py
```python
from pynq import Overlay, GPIO, Register, Xlnk
import os
import inspect
import numpy as np
class sharedmemOverlay(Overlay):
"""A simple Mem-Mapped Overlay for PYNQ.
This overlay is implemented with a single Matrix Multiply Core fed
connected directly to the ARM Core AXI interface.
"""
__RESET_VALUE = 0
__NRESET_VALUE = 1
""" For convenince, we define register offsets that are scraped from
the HLS implementation header files.
"""
__MMULT_AP_CTRL_OFF = 0x00
__MMULT_AP_CTRL_START_IDX = 0
__MMULT_AP_CTRL_DONE_IDX = 1
__MMULT_AP_CTRL_IDLE_IDX = 2
__MMULT_AP_CTRL_READY_IDX = 3
__MMULT_GIE_OFF = 0x04
__MMULT_IER_OFF = 0x08
__MMULT_ISR_OFF = 0x0C
__MMULT_ADDR_A_DATA = 0x10
__MMULT_ADDR_BT_DATA = 0x18
__MMULT_ADDR_C_DATA = 0x20
__MMULT_A_SHAPE = (100, 100)
__MMULT_BT_SHAPE = (100, 100)
__MMULT_C_SHAPE = (100, 100)
__MMULT_A_SIZE = __MMULT_A_SHAPE[0] * __MMULT_A_SHAPE[1]
__MMULT_BT_SIZE = __MMULT_BT_SHAPE[0] * __MMULT_BT_SHAPE[1]
__MMULT_C_SIZE = __MMULT_C_SHAPE[0] * __MMULT_C_SHAPE[1]
def __init__(self, bitfile, **kwargs):
"""Initializes a new sharedmemOverlay object.
"""
# The following lines do some path searching to enable a
# PYNQ-Like API for Overlays. For example, without these
# lines you cannot call sharedmemOverlay('sharedmem.bit') because
# sharedmem.bit is not on the bitstream search path. The
# following lines fix this for any non-PYNQ Overlay
#
# You can safely reuse, and ignore the following lines
#
# Get file path of the current class (i.e. /opt/python3.6/<...>/sharedmem.py)
file_path = os.path.abspath(inspect.getfile(inspect.currentframe()))
# Get directory path of the current class (i.e. /opt/python3.6/<...>/sharedmem/)
dir_path = os.path.dirname(file_path)
# Update the bitfile path to search in dir_path
bitfile = os.path.join(dir_path, bitfile)
# Upload the bitfile (and parse the colocated .tcl script)
super().__init__(bitfile, **kwargs)
# Manually define the GPIO pin that drives reset
self.__resetPin = GPIO(GPIO.get_gpio_pin(0), "out")
self.nreset()
# Define a Register object at address 0x0 of the mmult address space
# We will use this to set bits and start the core (see start())
# Do NOT write to __ap_ctrl unless __resetPin has been set to __NRESET_VALUE
self.__ap_ctrl = Register(self.mmultCore.mmio.base_addr, 32)
self.__a_offset = Register(self.mmultCore.mmio.base_addr +
self.__MMULT_ADDR_A_DATA, 32)
self.__bt_offset = Register(self.mmultCore.mmio.base_addr +
self.__MMULT_ADDR_BT_DATA, 32)
self.__c_offset = Register(self.mmultCore.mmio.base_addr +
self.__MMULT_ADDR_C_DATA, 32)
self.xlnk = Xlnk()
def __start(self):
"""Raise AP_START and enable the HLS core
"""
self.__ap_ctrl[self.__MMULT_AP_CTRL_START_IDX] = 1
pass
def __stop(self):
"""Lower AP_START and disable the HLS core
"""
self.__ap_ctrl[self.__MMULT_AP_CTRL_START_IDX] = 0
pass
def nreset(self):
"""Set the reset pin to self.__NRESET_VALUE to place the core into
not-reset (usually run)
"""
self.__resetPin.write(self.__NRESET_VALUE)
def reset(self):
"""Set the reset pin to self.__RESET_VALUE to place the core into
reset
"""
self.__resetPin.write(self.__RESET_VALUE)
def run(self, A, B):
""" Launch computation on the mmult HLS core
Parameters
----------
A : Numpy ndarray of at most size TODOxTODO (it will be padded)
A buffer containing ND Array Elements to be transferred to the core
B : Numpy ndarray of at most size TODOxTODO (it will be padded)
A buffer containing ND Array Elements to be transferred to the core
"""
if(not isinstance(A, np.ndarray)):
raise TypeError("Parameter A must be an instance of "
"numpy.ndarray")
if(not isinstance(B, np.ndarray)):
raise RuntimeError("Parameter B must be an instance of "
"numpy.ndarray")
sza = A.shape
if(sza[0] > self.__MMULT_A_SHAPE[0]):
raise RuntimeError(f"Dimension 0 of A must be less than or equal to"
f"{self.__MMULT_A_SHAPE[0]}")
if(sza[1] > self.__MMULT_A_SHAPE[1]):
raise RuntimeError(f"Dimension 1 of A must be less than or equal to"
f"{self.__MMULT_A_SHAPE[1]}")
szb = B.shape
if(szb[0] > self.__MMULT_BT_SHAPE[1]):
raise RuntimeError(f"Dimension 0 of B must be less than or equal to"
f"{self.__MMULT_BT_SHAPE[0]}")
if(szb[1] > self.__MMULT_BT_SHAPE[0]):
raise RuntimeError(f"Dimension 1 of B must be less than or equal to"
f"{self.__MMULT_BT_SHAPE[1]}")
# Check size of A
# Check size of B
# Allocate C
a = self.xlnk.cma_array(self.__MMULT_A_SHAPE, "int")
bt = self.xlnk.cma_array(self.__MMULT_BT_SHAPE, "int")
c = self.xlnk.cma_array(self.__MMULT_C_SHAPE, "int")
# Copy A->a
a[:A.shape[0], :A.shape[1]] = A
# Copy BT->bt
bt[:B.shape[1], :B.shape[0]] = B.transpose()
# TODO: Enable Interrupts
# Write address of a, bt, c to HLS core
self.__a_offset[31:0] = self.xlnk.cma_get_phy_addr(a.pointer)
self.__bt_offset[31:0] = self.xlnk.cma_get_phy_addr(bt.pointer)
self.__c_offset[31:0] = self.xlnk.cma_get_phy_addr(c.pointer)
self.__start()
# TODO: Wait for ASYNC Interrupt
# TODO: Clear Interrupt
import time
time.sleep(1)
self.__stop()
C = np.zeros((A.shape[0], B.shape[1]), np.int32)
# Transform C into a Numpy Array
C[:A.shape[0], :B.shape[1]] = c[:A.shape[0], :B.shape[1]]
a.freebuffer()
bt.freebuffer()
c.freebuffer()
return C
```
#### File: PYNQ-HLS/y2k22_patch/patch.py
```python
import os, sys, datetime, shutil, glob, re
version='1.2'
today = datetime.date.today()
today = datetime.date.strftime(today, "%Y-%m-%d")
log_file = open("y2k22_patch.log", "a+")
def log(msg, lvl='INFO'):
if 'DEBUG' in lvl and not os.environ.get('DEBUG_LOG'):
return
msg='[%s] %s: %s' %(today,lvl,msg)
print(msg)
log_file.write('%s\n' %msg)
log_file.flush()
valid_rels=['2014.*','2015.*','2016.*','2017.*','2018.*','2019.*','2020.*','2021.*']
formatted_rels=''
for rel in valid_rels:
formatted_rels+='%s, ' %rel
formatted_rels= formatted_rels[:-2]
k = formatted_rels.rfind(", ")
formatted_rels=formatted_rels[:k] + " and" + formatted_rels[k+1:]
log("This script (version: %s) patches Xilinx Tools for HLS Y2k22 bug for the following release: \n\t\t%s" %(version,formatted_rels) )
# log("Script version %s is targeted for %s releases " %(version,formatted_rels))
install_root = os.getcwd()
filePath='%s/*/%s/common/scripts/automg.tcl'
if os.environ.get("INSTALL_PATH"):
install_root=os.environ.get("INSTALL_PATH")
dry_Run=False
if os.environ.get('DRY_RUN'):
dry_Run=os.environ.get('DRY_RUN') == 'True'
def do_copy(src,dest):
'''
pure copy, no manipulation
'''
src=src.strip()
if not os.path.exists(src):
log("%s does not exists" %src, "IGNORED")
dest=dest.strip()
if os.path.isdir(dest) :
dest=os.path.join(dest,os.path.basename(src))
if dry_Run:
log("Won't copy %s to %s " % (src,dest),"DRYRUN")
return
log("%s to %s " % (src,dest),"COPY")
try:
shutil.copyfile(src,dest)
try:
os.chmod(dest, 0o755)
except Exception as e:
log("Unable to change file permission for %s\n%s" %(dest,e),"WARNING")
pass
except:
pass
for rel in valid_rels:
path=filePath % (install_root,rel)
for file in glob.glob(path):
dir=os.path.dirname(file)
log("%s" %dir, "UPDATE")
do_copy("%s/y2k22_patch/automg_patch_20220104.tcl"%os.getcwd(),dir)
``` |
{
"source": "3togo/skeleton-tracing",
"score": 3
} |
#### File: skeleton-tracing/swig/example.py
```python
import trace_skeleton
import cv2
import random
import os
import sys
def get_fname(fname):
for i in range(5):
if os.path.exists(fname):
return fname
fname = os.path.join("..", fname)
print(fname)
fname = get_fname("test_images/opencv-thinning-src-img.png")
if not fname:
print(f"{fname} not found")
sys.exit()
im = cv2.imread(fname,0)
_,im = cv2.threshold(im,128,255,cv2.THRESH_BINARY);
# cv2.imshow("",im);cv2.waitKey(0)
polys = trace_skeleton.from_numpy(im);
for l in polys:
c = (200*random.random(),200*random.random(),200*random.random())
for i in range(0,len(l)-1):
cv2.line(im,(l[i][0],l[i][1]),(l[i+1][0],l[i+1][1]),c)
cv2.imshow('',im);cv2.waitKey(0)
``` |
{
"source": "3tones/brybus",
"score": 3
} |
#### File: 3tones/brybus/dblogger.py
```python
import time
writestart = time.time()
format = '%x %X'
import csv
import ConfigParser
cfg = ConfigParser.ConfigParser()
cfg.read('brybus.cfg')
serialport = cfg.get('brybus','serialport')
database = cfg.get('db','database')
mysql_host = cfg.get('db','mysql_host')
mysql_user = cfg.get('db','mysql_user')
mysql_pass = cfg.get('db','mysql_pass')
mysql_db = cfg.get('db','mysql_db')
sqlitefile = cfg.get('db','sqlitefile')
if database == 'mysql':
try:
import _mysql
db=_mysql.connect(mysql_host,mysql_user,mysql_pass,mysql_db)
schema_sql = """
CREATE TABLE if not exists data (
ts DATETIME NOT NULL,
request VARCHAR(25) NULL DEFAULT NULL,
response VARCHAR(350) NULL DEFAULT NULL,
INDEX ts (ts)
);
"""
schema_sql2 = """
CREATE TABLE if not exists realtime (
ts DATETIME NULL DEFAULT NULL,
request VARCHAR(25) NULL DEFAULT NULL,
response VARCHAR(350) NULL DEFAULT NULL,
UNIQUE INDEX request_idx (request)
);
"""
db.query(schema_sql)
db.query(schema_sql2)
except _mysql.Error, e:
print "Error %d: %s" % (e.args[0], e.args[1])
quit()
elif database == 'sqlite':
try:
import sqlite3
dbh=sqlite3.connect(sqlitefile, isolation_level=None)
db = dbh.cursor();
schema_sql = """
begin;
create table if not exists data (
ts timestamp not null,
request varchar(25),
response varchar(350)
);
create index if not exists ts_index on data(ts);
commit;""";
db.executescript(schema_sql)
except sqlite3.Error, e:
print "sqlite error: %s" % (e.args[0])
quit()
import brybus
ByteToHex = brybus.ByteToHex
HexToByte = brybus.HexToByte
def scantable():
#make queue for 3b table
scan_q = brybus.writequeue()
print "Building Queue"
#table 3c isn't complete so use this array of valid rows
table_3c = (
'200130010300000B003C01',
'200130010300000B003C03',
'200130010300000B003C04',
'200130010300000B003C05',
'200130010300000B003C06',
'200130010300000B003C08',
'200130010300000B003C09',
'200130010300000B003C0A',
'200130010300000B003C0B',
'200130010300000B003C0C',
'200130010300000B003C0D',
'200130010300000B003C0E',
'200130010300000B003C0F',
'200130010300000B003C10',
'200130010300000B003C11',
'200130010300000B003C12',
'200130010300000B003C13',
'200130010300000B003C14',
'200130010300000B003C1E',
'200130010300000B003C1F',
'200130010300000B003C28',
'200130010300000B003C29'
)
for r in range(1,16):
reg = '00' + '3B' + "{0:02X}".format(r)
f = brybus.frame(reg,'C','2001','3001','0B')
scan_q.pushframe(f)
#use the list above
for r in table_3c:
f = brybus.frame(r,'S')
scan_q.pushframe(f)
for r in range(1,4):
reg = '00' + '3D' + "{0:02X}".format(r)
f = brybus.frame(reg,'C','2001','3001','0B')
scan_q.pushframe(f)
return scan_q
def db_insert(head,data):
if database == 'mysql':
query = "insert into data values (now(),'"+head+"','"+data+"')"
db.query(query)
db_update(head,data)
elif database == 'sqlite':
query = "insert into data values (datetime('now'),'"+head+"','"+data+"')"
db.execute(query)
def db_update(head,data):
if database == 'mysql':
query = "insert into realtime (ts,request,response) values (now(),'"+head+"','"+data+"') ON DUPLICATE KEY UPDATE ts=VALUES(ts), response=VALUES(response)"
db.query(query)
#=======main========
q = scantable()
#q.printqueue()
#setup the stream and bus
s = brybus.stream('S',serialport)
b = brybus.bus(s)
table=[]
db_insert("START","START")
while(1):
#get write frame and write it
#write blocks, writes if the timeout passes, but returns without writing if data is received on the serial port
wf_raw = q.writeframe()
w = b.write(wf_raw)
f = brybus.frame(b.read(),"B")
#if w==1:
#print "write", q.printstatus()
#print wf.dst,wf.src,wf.len,wf.func,wf.data,wf.crc
#print f.dst,f.src,f.len,f.func,f.data,f.crc
#check the frame that was read against the queue to match the response with the request
q.checkframe(f)
#test for end of queue, then restart the queue
if q.writeframe() == '':
print "Write Queue Done. Seconds Elapsed:",(time.time()-writestart)
writestart = time.time()
for k,v in q.queue.iteritems():
v.done = False
found = 0
for row in table:
if row[0] == ByteToHex(f.raw[0:11]):
if row[1] == f.data[6:]:
1==1 #no change
db_update(ByteToHex(f.raw[0:11]),f.data[6:])
#print "NC", time.strftime(format), ByteToHex(f.raw[0:11]),f.data[6:]
else:
row[1]=f.data[6:]
db_insert(ByteToHex(f.raw[0:11]),f.data[6:])
#db.query(query)
print " C", time.strftime(format), ByteToHex(f.raw[0:11]),f.data[6:]
found=1
if found==0 and (f.func in ('0C','06')):
table.append([ByteToHex(f.raw[0:11]),f.data[6:]])
db_insert(ByteToHex(f.raw[0:11]),f.data[6:])
print " A",time.strftime(format), ByteToHex(f.raw[0:11]),f.data[6:]
``` |
{
"source": "3top1a/auto-strava",
"score": 3
} |
#### File: 3top1a/auto-strava/strava.py
```python
import requests
from bs4 import BeautifulSoup
import sys
import datetime
number_of_listings = 5
simple = True
# Args
if (len(sys.argv) < 4):
print("Usage : python strava.py jmeno heslo jidelna")
sys.exit(1)
# Start the session
session = requests.Session()
# Create the payload
payload = {'uzivatel' : sys.argv[1],
'heslo' : sys.argv[2],
'zarizeni' : sys.argv[3]
}
# Post the payload to the site to log in
s = session.post("https://www.strava.cz/Strava/Stravnik/prihlaseni", data=payload)
# Navigate to the next page and scrape the data
s = session.get('https://www.strava.cz/Strava/Stravnik/Objednavky')
#Parse
soup = BeautifulSoup(s.text, 'html.parser')
res = soup.find_all(class_="objednavka-obalka objednavka-obalka-jednotne")
def display_simple():
# For the first `number_of_listings` listings
for x in res[:number_of_listings]:
day = x.find("div").find("div").text.split('\n')[1].split('\r')[0].strip()
# Only today
if(int(day.split(' ')[2].strip()[::2]) == int(datetime.datetime.now().strftime("%m")) and int(day.split(' ')[1].strip()[:-1]) == int(datetime.datetime.now().strftime("%d"))):
pass
else:
continue
# Find all the foods
foods = x.find_all(class_="objednavka-jidla-obalka")[0].find_all(class_="objednavka-jidlo-obalka")
for food in foods:
# Find the values
food_name = food.find(class_="objednavka-jidlo-nazev").text
food_type = food.find(class_="objednavka-jidlo-popis").text
food_value = food.find(class_="objednavka-jidlo-zmena").contents[1].contents[3].attrs["value"]
# Remove this if you need to
# This just removes the soup entry
if(food_type == "Polévka"):
continue
# Turn the value from text to markdown-like text
if food_value == "zaskrtnuto":
print((food_name).strip())
def display_table():
# For the first `number_of_listings` listings
for x in res[:number_of_listings]:
# Get the day and print
day = x.find("div").find("div").text.split('\n')[1].split('\r')[0].lstrip()
print(day)
# Find all the foods
foods = x.find_all(class_="objednavka-jidla-obalka")[0].find_all(class_="objednavka-jidlo-obalka")
for food in foods:
# Find the values
food_name = food.find(class_="objednavka-jidlo-nazev").text
food_type = food.find(class_="objednavka-jidlo-popis").text
food_value = food.find(class_="objednavka-jidlo-zmena").contents[1].contents[3].attrs["value"]
# Remove this if you need to
# This just removes the soup entry
if(food_type == "Polévka"):
continue
# Turn the value from text to markdown-like text
if food_value == "zaskrtnuto":
food_value = "[x]"
elif food_value == "nezaskrtnuto":
food_value = "[ ]"
else:
food_value = "[-]"
# Strip in case of leading/trailing spaces and print
print((food_value + " " + food_type + " - " + food_name).lstrip().rstrip())
# Empty line for cleanness
print()
if(simple):
display_simple()
else:
display_table()
``` |
{
"source": "3top1a/m.a.c.c.-server",
"score": 3
} |
#### File: m.a.c.c.-server/Code/HistoryTab.py
```python
import tkinter as tk
class HistoryTab(tk.Frame):
def __init__(self, parent, command, main):
tk.Frame.__init__(self, parent, bg="red")
self.label = tk.Label(self, text=command, anchor="w")
self.label.pack(side="left", fill="both", expand=True)
def click_cb():
main.send_specific(command)
self.button = tk.Button(self, text="send again", command=click_cb, anchor="s")
self.button.pack(side="right", fill="both", expand=True)
```
#### File: Code/OLDER/Main.py
```python
import Agent as agent
import threading
import tkinter as tk
import tkinter.ttk
### SCRIPT STRUCTURE
## AGENT MANAGMENT
## GUI
## CODE THAT ACTUALLY RUNS
##AGENT MANAGMENT
agents = []
def RunAgent():
for i in agents:
i.run()
##GUI
def StartGUI():
window = tk.Tk()
SideLabelVar = tk.StringVar()
window.title("AutoMa Craft Controller")
window.minsize(150,150)
#ADD AGENT BUTTON
ConnectButton = tk.Button(window, text ="Connect", command = RunAgent)
ConnectButton.pack()
SideLabel = tk.Label(window, textvariable = SideLabelVar)
SideLabel.pack()
while(True):
SideLabelVar.set(agents[0].X)
window.update_idletasks()
window.mainloop()
##CODE THAT ACTUALLY RUNS (and maybe works...)
def __init__():
x = agent.Agent()
x.IP = "localhost"
x.Port = 6667
agents.append(x)
x = threading.Thread(target=StartGUI, args=())
x.start()
RunAgent()
__init__()
``` |
{
"source": "3TUSK/khl.py",
"score": 2
} |
#### File: khl.py/khl/api.py
```python
import functools
import inspect
import logging
import re
from typing import Dict, Callable, Tuple
import aiohttp
log = logging.getLogger(__name__)
_RE_ROUTE = re.compile(r'(?<!^)(?=[A-Z])')
class _Req:
def __init__(self, method: str, route: str, params: Dict):
self.method = method
self.route = route
self.params = params
def req(method: str, **http_fields):
def _method(func: Callable):
@functools.wraps(func)
def req_maker(*args, **kwargs) -> _Req:
route = _RE_ROUTE.sub('-', func.__qualname__).lower().replace('.', '/')
# dump args into kwargs
param_names = list(inspect.signature(func).parameters.keys())
for i in range(len(args)):
kwargs[param_names[i]] = args[i]
params = _merge_params(method, http_fields, kwargs)
return _Req(method, route, params)
return req_maker
return _method
def _merge_params(method: str, http_fields: dict, req_args: dict) -> dict:
payload = req_args
payload_key = 'params' # default payload_key: params=
if method == 'POST':
payload_key = 'json' # POST: in default json=
content_type = http_fields.get('headers', {}).get('Content-Type', None)
if content_type == 'multipart/form-data':
payload_key, payload = _build_form_payload(req_args)
http_fields = _remove_content_type(http_fields) # headers of form-data req are delegated to aiohttp
elif content_type is not None:
raise ValueError(f'unrecognized Content-Type {content_type}')
params = {payload_key: payload}
params.update(http_fields)
return params
def _remove_content_type(http_fields: dict) -> dict:
"""in some situation, such as content-type=multipart/form-data,
content-type should be delegated to aiohttp to auto-generate,
thus content-type is required to be removed in http_fields
"""
if http_fields.get('headers', {}).get('Content-Type', None) is not None:
http_fields = http_fields.copy()
http_fields['headers'] = http_fields.get('headers', {}).copy()
del http_fields['headers']['Content-Type']
return http_fields
def _build_form_payload(req_args: dict) -> Tuple[str, aiohttp.FormData]:
data = aiohttp.FormData()
for k, v in req_args.items():
data.add_field(k, v)
return 'data', data
class Guild:
@staticmethod
@req('GET')
def list():
...
@staticmethod
@req('GET')
def view(guild_id):
...
@staticmethod
@req('GET')
def userList(
guild_id,
channel_id,
search,
role_id,
mobile_verified,
active_time,
joined_at,
page,
page_size,
):
...
@staticmethod
@req('POST')
def nickname(
guild_id,
nickname,
user_id,
):
...
@staticmethod
@req('POST')
def leave(guild_id):
...
@staticmethod
@req('POST')
def kickout(guild_id, target_id):
...
class GuildMute:
@staticmethod
@req('GET')
def list(guild_id, return_type):
...
@staticmethod
@req('POST')
def create(
guild_id,
user_id,
type,
):
...
@staticmethod
@req('POST')
def delete(
guild_id,
user_id,
type,
):
...
class Channel:
@staticmethod
@req('GET')
def list(guild_id):
...
@staticmethod
@req('GET')
def view(target_id):
...
@staticmethod
@req('POST')
def create(
guild_id,
parent_id,
name,
type,
limit_amount,
voice_quality,
):
...
@staticmethod
@req('POST')
def delete(channel_id):
...
@staticmethod
@req('POST')
def moveUser(target_id, user_ids):
...
class ChannelRole:
@staticmethod
@req('GET')
def index(channel_id):
...
@staticmethod
@req('POST')
def create(
channel_id,
type,
value,
):
...
@staticmethod
@req('POST')
def update(
channel_id,
type,
value,
allow,
deny,
):
...
@staticmethod
@req('POST')
def delete(
channel_id,
type,
value,
):
...
class Message:
@staticmethod
@req('GET')
def view(msg_id):
...
@staticmethod
@req('GET')
def list(
target_id,
msg_id,
pin,
flag,
):
...
@staticmethod
@req('POST')
def create(
type,
target_id,
content,
quote,
nonce,
temp_target_id,
):
...
@staticmethod
@req('POST')
def update(
msg_id,
content,
quote,
temp_target_id,
):
...
@staticmethod
@req('POST')
def delete(msg_id, ):
...
@staticmethod
@req('GET')
def reactionList(msg_id, emoji):
...
@staticmethod
@req('POST')
def addReaction(msg_id, emoji):
...
@staticmethod
@req('POST')
def deleteReaction(msg_id, emoji, user_id):
...
@staticmethod
@req('POST')
def addReaction(msg_id, emoji):
...
@staticmethod
@req('POST')
def addReaction(msg_id, emoji):
...
class UserChat:
@staticmethod
@req('GET')
def list():
...
@staticmethod
@req('GET')
def view(chat_code):
...
@staticmethod
@req('POST')
def create(target_id):
...
@staticmethod
@req('POST')
def delete(chat_code):
...
class DirectMessage:
@staticmethod
@req('GET')
def list(
chat_code,
target_id,
msg_id,
flag,
):
...
@staticmethod
@req('POST')
def create(
type,
target_id,
chat_code,
content,
quote,
nonce,
):
...
@staticmethod
@req('POST')
def update(
msg_id,
content,
quote,
):
...
@staticmethod
@req('POST')
def delete(msg_id):
...
@staticmethod
@req('GET')
def reactionList(msg_id, emoji):
...
@staticmethod
@req('POST')
def addReaction(msg_id, emoji):
...
@staticmethod
@req('POST')
def deleteReaction(msg_id, emoji):
...
class Gateway:
@staticmethod
@req('GET')
def index(compress):
...
class User:
@staticmethod
@req('GET')
def me():
...
@staticmethod
@req('GET')
def view(
user_id,
guild_id,
):
...
class Asset:
@staticmethod
@req('POST', headers={'Content-Type': 'multipart/form-data'})
def create(file):
...
class GuildRole:
@staticmethod
@req('GET')
def list(guild_id):
...
@staticmethod
@req('POST')
def create(
name,
guild_id,
):
...
@staticmethod
@req('POST')
def update(
guild_id,
role_id,
hoist,
mentionable,
permissions,
color,
name,
):
...
@staticmethod
@req('POST')
def delete(
guild_id,
role_id,
):
...
@staticmethod
@req('POST')
def grant(
guild_id,
user_id,
role_id,
):
...
@staticmethod
@req('POST')
def revoke(
guild_id,
user_id,
role_id,
):
...
class Intimacy:
@staticmethod
@req('GET')
def index(user_id):
...
@staticmethod
@req('POST')
def update(
user_id,
score,
social_info,
img_id,
):
...
class GuildEmoji:
@staticmethod
@req('GET')
def list(guild_id):
...
@staticmethod
@req('POST', headers={'Content-Type': 'multipart/form-data'})
def create(
name,
guild_id,
emoji,
):
...
@staticmethod
@req('POST')
def update(
name,
id,
):
...
@staticmethod
@req('POST')
def delete(id):
...
class Invite:
@staticmethod
@req('GET')
def list(
guild_id,
channel_id,
):
...
@staticmethod
@req('POST')
def create(
guild_id,
channel_id,
):
...
@staticmethod
@req('POST')
def delete(
guild_id,
channel_id,
url_code,
):
...
@req('GET')
def game():
...
class Game:
@staticmethod
@req('POST')
def create(
name,
process_name,
icon,
):
...
@staticmethod
@req('POST')
def update(
id,
name,
icon,
):
...
@staticmethod
@req('POST')
def delete(id):
...
@staticmethod
@req('POST')
def activity(
id,
data_type,
):
...
@staticmethod
@req('POST')
def deleteActivity():
...
```
#### File: khl/card/card.py
```python
from typing import List, Union, Tuple, Optional, Dict
from .color import Color, make_color
from .interface import Types, _Common
from .module import _Module
class Card(_Common):
_type = 'card'
_color: Optional[Color]
_modules: List[_Module]
def __init__(self,
*modules: _Module,
color: Union[Color, Tuple[int, int, int], str, None] = None,
theme: Union[Types.Theme, str, None] = None,
size: Union[Types.Size, str, None] = Types.Size.LG):
self._modules = list(modules)
self._color = make_color(color)
super().__init__(theme, size)
def append(self, module: _Module):
self._modules.append(module)
def pop(self, index: int = None) -> _Module:
return self._modules.pop(index)
@property
def color(self) -> Optional[Color]:
return self._color
@color.setter
def color(self, value: Union[Color, Tuple[int, int, int], str]):
self._color = make_color(value)
@property
def theme(self) -> Types.Theme:
return self._theme
@theme.setter
def theme(self, value: Union[Types.Theme, str]):
self._theme = Types.Theme(value)
@property
def size(self) -> Types.Size:
return self._size
@size.setter
def size(self, value: Union[Types.Size, str]):
self._size = Types.Size(value)
@property
def _repr(self) -> Dict:
return self._gen_dict(['type', 'theme', 'size', 'color', 'modules'])
```
#### File: khl/card/interface.py
```python
from abc import ABC, abstractmethod
from enum import Enum
from typing import Union, Dict, List
class Representable(ABC):
@property
@abstractmethod
def _repr(self) -> Union[str, Dict, List]:
"""cast class object to JSON serializable representation"""
raise NotImplementedError
class _TypeEnum(Enum):
"""base class of all types(involved in card components)
REMIND: TypeEnum implements _repr but not inherits from Representable, since
" TypeError: metaclass conflict:
the metaclass of a derived class must be a (non-strict) subclass of the metaclasses of all its bases "
"""
@property
def _repr(self):
return self.value
class Types:
class Theme(_TypeEnum):
NA = ''
PRIMARY = 'primary'
SECONDARY = 'secondary'
SUCCESS = 'success'
DANGER = 'danger'
WARNING = 'warning'
INFO = 'info'
class Size(_TypeEnum):
NA = ''
XS = 'xs'
SM = 'sm'
MD = 'md'
LG = 'lg'
class Text(_TypeEnum):
PLAIN = 'plain-text'
KMD = 'kmarkdown'
class Click(_TypeEnum):
LINK = 'link'
RETURN_VAL = 'return-val'
class SectionMode(_TypeEnum):
LEFT = 'left'
RIGHT = 'right'
class File(_TypeEnum):
FILE = 'file'
AUDIO = 'audio'
VIDEO = 'video'
class CountdownMode(_TypeEnum):
DAY = 'day'
HOUR = 'hour'
SECOND = 'second'
def _get_repr(item) -> Union[str, Dict, List]:
"""a helper function for serialization"""
return [_get_repr(i) for i in item] if isinstance(item, list) else getattr(item, '_repr', item)
class _Common(Representable, ABC):
_type: str
_theme: Types.Theme
_size: Types.Size
def __init__(self, theme: Union[Types.Theme, str, None], size: Union[Types.Size, str, None]):
super().__init__()
self._theme = Types.Theme(theme) if theme else None
self._size = Types.Size(size) if size else None
def _gen_dict(self, field_list: List) -> Dict:
d = {}
for k in field_list:
# get repr of k/_k(private field with exported key)
obj = _get_repr(getattr(self, k, None)) or _get_repr(getattr(self, '_' + k, None))
if obj is not None:
d[k] = obj
return d
class _Element(_Common, ABC):
...
class _Module(_Common, ABC):
...
class _Struct(_Common, ABC):
...
```
#### File: khl/task/manager.py
```python
import logging
from apscheduler.events import EVENT_JOB_ERROR
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from apscheduler.triggers.cron import CronTrigger
from apscheduler.triggers.date import DateTrigger
from apscheduler.triggers.interval import IntervalTrigger
from khl import AsyncRunnable
log = logging.getLogger(__name__)
class TaskManager(AsyncRunnable):
_scheduler: AsyncIOScheduler
def __init__(self):
self._scheduler = AsyncIOScheduler()
def add_interval(self,
weeks=0,
days=0,
hours=0,
minutes=0,
seconds=0,
start_date=None,
end_date=None,
timezone=None,
jitter=None):
"""decorator, add a interval type task"""
trigger = IntervalTrigger(weeks=weeks,
days=days,
hours=hours,
minutes=minutes,
seconds=seconds,
start_date=start_date,
end_date=end_date,
timezone=timezone,
jitter=jitter)
return lambda func: self._scheduler.add_job(func, trigger)
def add_cron(self,
year=None,
month=None,
day=None,
week=None,
day_of_week=None,
hour=None,
minute=None,
second=None,
start_date=None,
end_date=None,
timezone=None,
jitter=None):
"""decorator, add a cron type task"""
trigger = CronTrigger(year=year,
month=month,
day=day,
week=week,
day_of_week=day_of_week,
hour=hour,
minute=minute,
second=second,
start_date=start_date,
end_date=end_date,
timezone=timezone,
jitter=jitter)
return lambda func: self._scheduler.add_job(func, trigger)
def add_date(self, run_date=None, timezone=None):
"""decorator, add a date type task"""
trigger = DateTrigger(run_date=run_date, timezone=timezone)
return lambda func: self._scheduler.add_job(func, trigger)
async def start(self):
self._scheduler.configure({'event_loop': self.loop}, '')
self._scheduler.add_listener(lambda e: log.exception(f'error raised during task', exc_info=e.exception),
EVENT_JOB_ERROR)
self._scheduler.start() # reminder: this is not blocking
``` |
{
"source": "3upperm2n/block_trace_analyzer",
"score": 2
} |
#### File: block_trace_analyzer/mem_mem/read_trace.py
```python
import pandas as pd
import numpy as np
import operator
class transfer():
def __init__(self, start=0.0,end=0.0):
self.start_time_ms = start
self.end_time_ms = end
class KernConfig():
def __init__(self,
grid_x = 0, grid_y = 0, grid_z = 0,
blk_x = 0, blk_y = 0, blk_z = 0,
regs_per_thread = 0, sm_per_block = 0):
self.grid_x = grid_x
self.grid_y = grid_y
self.grid_z = grid_z
self.blk_x = blk_x
self.blk_y = blk_y
self.blk_z = blk_z
self.regs_per_thread = regs_per_thread
self.sm_per_block = sm_per_block
class streams():
def __init__(self):
self.h2d = []
self.d2h = []
self.kernel = []
self.kernel_info = []
def time_coef_ms(df_trace):
rows, cols = df_trace.shape
start_unit = df_trace['Start'].iloc[0]
duration_unit = df_trace['Duration'].iloc[0]
start_coef = 1.0
if start_unit == 's':
start_coef = 1e3
if start_unit == 'us':
start_coef = 1e-3
duration_coef = 1.0
if duration_unit == 's':
duration_coef = 1e3
if duration_unit == 'us':
duration_coef = 1e-3
return start_coef, duration_coef
def sm_coef_bytes(df_trace):
ssm_unit = df_trace['Static SMem'].iloc[0]
dsm_unit = df_trace['Dynamic SMem'].iloc[0]
ssm_coef = 1.0
if ssm_unit == 'KB':
ssm_coef = 1e3
if ssm_unit == 'MB':
ssm_coef = 1e6
dsm_coef = 1.0
if dsm_unit == 'KB':
dsm_coef = 1e3
if dsm_unit == 'MB':
dsm_coef = 1e6
return ssm_coef, dsm_coef
# read data for the current row
def read_row(df_row, start_coef_ms, duration_coef_ms, ssm_coef = None, dsm_coef = None):
start_time_ms = float(df_row['Start']) * start_coef_ms
end_time_ms = start_time_ms + float(df_row['Duration']) * duration_coef_ms
stream_id = int(df_row['Stream'])
api_name = df_row['Name'].to_string()
kernelinfo = KernConfig()
if "DtoH" in api_name:
api_type = 'd2h'
elif "HtoD" in api_name:
api_type = 'h2d'
else:
api_type = 'kernel'
# read kernel and update the info
grid_x = float(df_row['Grid X'])
grid_y = float(df_row['Grid Y'])
grid_z = float(df_row['Grid Z'])
blk_x = float(df_row['Block X'])
blk_y = float(df_row['Block Y'])
blk_z = float(df_row['Block Z'])
regs_per_thread = float(df_row['Registers Per Thread'])
static_sm = float(df_row['Static SMem'])
dynamic_sm = float(df_row['Dynamic SMem'])
sm_per_block = static_sm * ssm_coef + dynamic_sm * dsm_coef
kernelinfo.blk_x = blk_x
kernelinfo.blk_y = blk_y
kernelinfo.blk_z = blk_z
kernelinfo.grid_x = grid_x
kernelinfo.grid_y = grid_y
kernelinfo.grid_z = grid_z
kernelinfo.regs_per_thread = regs_per_thread
kernelinfo.sm_per_block = sm_per_block
return stream_id, api_type, start_time_ms, end_time_ms, kernelinfo
def trace2dataframe(trace_file):
"""
read the trace file into dataframe using pandas
"""
# There are max 17 columns in the output csv
col_name = ["Start","Duration","Grid X","Grid Y","Grid Z","Block X","Block Y","Block Z","Registers Per Thread","Static SMem","Dynamic SMem","Size","Throughput","Device","Context","Stream","Name"]
df_trace = pd.read_csv(trace_file, names=col_name, engine='python')
rows_to_skip = 0
# find out the number of rows to skip
for index, row in df_trace.iterrows():
if row['Start'] == 'Start':
rows_to_skip = index
break
# read the input csv again
df_trace = pd.read_csv(trace_file, skiprows=rows_to_skip)
return df_trace
def get_stream_info(df_trace):
"""
read dataframe into stream list which contains the h2d/d2h/kernel star and end time in ms.
"""
streamList = []
# read the number of unique streams
stream_id_list = df_trace['Stream'].unique()
stream_id_list = stream_id_list[~np.isnan(stream_id_list)] # remove nan
num_streams = len(stream_id_list)
for i in xrange(num_streams):
streamList.append(streams())
start_coef, duration_coef = time_coef_ms(df_trace)
ssm_coef, dsm_coef = sm_coef_bytes(df_trace)
# read row by row
for rowID in xrange(1, df_trace.shape[0]):
# extract info from the current row
stream_id, api_type, start_time_ms, end_time_ms, kerninfo = read_row(df_trace.iloc[[rowID]], start_coef, duration_coef, ssm_coef, dsm_coef)
# find out index of the stream
sid, = np.where(stream_id_list==stream_id)
# add the start/end time for different api calls
if api_type == 'h2d':
streamList[sid].h2d.append(transfer(start_time_ms, end_time_ms))
elif api_type == 'd2h':
streamList[sid].d2h.append(transfer(start_time_ms, end_time_ms))
elif api_type == 'kernel':
streamList[sid].kernel.append(transfer(start_time_ms, end_time_ms))
streamList[sid].kernel_info.append(kerninfo) # add the kernel info
else:
print "Unknown. Error."
return streamList
def check_kernel_ovlprate(trace_file):
"""
Read the trace file and figure out the overlapping rate for the two kernel execution.
"""
# read data from the trace file
df_trace = trace2dataframe(trace_file)
# extract stream info
streamList = get_stream_info(df_trace)
# check kernel overlapping
preK_start = streamList[0].kernel[0].start_time_ms
preK_end = streamList[0].kernel[0].end_time_ms
curK_start = streamList[1].kernel[0].start_time_ms
curK_end = streamList[1].kernel[0].end_time_ms
preK_runtime = preK_end - preK_start
curK_runtime = curK_end - curK_start
ovlp_duration = preK_end - curK_start
ovlp_ratio = ovlp_duration / preK_runtime
# if curK_start >= preK_start and curK_start <= preK_end:
# print('concurrent kernel execution :\n\t stream-prev {} ms \n\t stream-cur {} ms'
# '\n\t overlapping {} ms \n\t ovlp ratio (based on prev stream) {}%'\
# .format(preK_runtime, curK_runtime, ovlp_duration, ovlp_ratio))
cke_time_ms = curK_end - preK_start
return ovlp_ratio, cke_time_ms
def get_kernel_time_from_trace(df_trace):
"""
Read kernel time from trace.
"""
# read the number of unique streams
stream_id_list = df_trace['Stream'].unique()
stream_id_list = stream_id_list[~np.isnan(stream_id_list)] # remove nan
start_coef, duration_coef = time_coef_ms(df_trace)
ssm_coef, dsm_coef = sm_coef_bytes(df_trace)
kernel_time_dd = {}
# read row by row
for rowID in xrange(1, df_trace.shape[0]):
# extract info from the current row
stream_id, api_type, start_time_ms, end_time_ms, _ = read_row(df_trace.iloc[[rowID]], start_coef, duration_coef)
# find out index of the stream
sid, = np.where(stream_id_list == stream_id)
sid = int(sid)
# find out the duration for kernel
if api_type == 'kernel':
duration = end_time_ms - start_time_ms
kernel_time_dd[sid] = duration
return kernel_time_dd
def kernel_slowdown(s1_kernel_dd, s2_kernel_dd):
slow_down_ratio_list = []
for key, value in s2_kernel_dd.items():
v_s1 = s1_kernel_dd[0]
slow_down_ratio_list.append(value / float(v_s1))
return slow_down_ratio_list
``` |
{
"source": "3upperm2n/handson_tensorflow",
"score": 3
} |
#### File: handson_tensorflow/06_dnn_plain_tf_api/train.py
```python
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/")
n_epochs = 400
batch_size = 50
learning_rate = 0.01
n_inputs = 28 * 28
n_hidden1 = 300
n_hidden2 = 200
n_outputs = 10
X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X")
y = tf.placeholder(tf.int32, shape=(None), name="y")
def neuron_layer(X, n_neurons, name, activation=None):
with tf.name_scope(name):
n_inputs = int(X.get_shape()[1])
stddev = 2 / np.sqrt(n_inputs)
init = tf.truncated_normal((n_inputs, n_neurons), stddev=stddev)
W = tf.Variable(init, name="weights")
b = tf.Variable(tf.zeros([n_neurons]), name="biases")
z = tf.matmul(X,W) + b
if activation == "relu":
return tf.nn.relu(z)
else:
return z
with tf.name_scope("dnn"):
hidden1 = neuron_layer(X, n_hidden1, "hidden1", activation="relu")
hidden2 = neuron_layer(hidden1, n_hidden2, "hidden2", activation="relu")
logits = neuron_layer(hidden2, n_outputs, "outputs")
with tf.name_scope("loss"):
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
saver = tf.train.Saver()
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for iteration in range(mnist.train.num_examples // batch_size):
X_batch, y_batch = mnist.train.next_batch(batch_size)
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
acc_train = accuracy.eval(feed_dict={X:X_batch,y:y_batch})
acc_test = accuracy.eval(feed_dict={X:mnist.test.images,y:mnist.test.labels})
print(epoch, "Train accuracy:", acc_train, "Test accuracy:", acc_test)
save_path = saver.save(sess, "./my_model_final.ckpt")
``` |
{
"source": "3upperm2n/trans_kernel_model",
"score": 2
} |
#### File: trans_kernel_model/mem_mem/avgblk.py
```python
import pandas as pd
import numpy as np
from math import *
import copy # deep copy objects
from model_param import *
#------------------------------------------------------------------------------
# Figure out when to launch another block for current kernel
#------------------------------------------------------------------------------
def Search_block_start(df_sm_trace, current_kernel_id):
"""
Read the sm_trace table, find out all the active blocks on current sm,
look for the earliest start
"""
df_active = df_sm_trace.loc[df_sm_trace['active'] == 1]
if not df_active.empty:
blk2start = df_active['block_start'].max() # find the closest block
df_active_current_kernel = \
df_active.loc[df_active['kernel_id'] == current_kernel_id]
if not df_active_current_kernel.empty:
# find the closest blk for current kernel
blk2start = df_active_current_kernel['block_start'].max()
return blk2start
else:
# when, on current sm, all the blocks are done/de-activated
# warning!!!
return 0.0
#------------------------------------------------------------------------------
# Figure out which sm to start for current kernel
#------------------------------------------------------------------------------
def find_sm2start(sm_trace_list, kern_start):
sm_num = len(sm_trace_list)
AfterPrevKern = False
empSM = 0
# case 1) there are no trace on each sm
for df_sm in sm_trace_list:
if df_sm.empty:
empSM = empSM + 1 # do nothing
if empSM == sm_num:
return 0, AfterPrevKern
# case 2) there are traces: by the time where the kernel starts,
# all the blocks are done already, use sm 0
max_t = 0
for df_sm in sm_trace_list:
cur_max = df_sm.block_end.max()
if cur_max > max_t:
max_t = cur_max
if max_t <= kern_start:
AfterPrevKern = True
return 0, AfterPrevKern
else:
# case 3) : check currently active blocks
df_sm = sm_trace_list[0]
df_activeblk = df_sm.loc[df_sm['active'] == 1]
min_t = df_activeblk.block_end.min()
target_sm = 0
for i in range(1,sm_num):
df_sm = sm_trace_list[i]
df_activeblk = df_sm.loc[df_sm['active'] == 1]
sm_blk_min = df_activeblk.block_end.min()
if sm_blk_min < min_t:
min_t = sm_blk_min
target_sm = i
return target_sm, AfterPrevKern
#------------------------------------------------------------------------------
# model cke function
#------------------------------------------------------------------------------
def cke_model(Gpu, sms_, sm_trace_, kernels_):
# deep copy the input
# we need to return the resource and trace for each sm after modeling
sms = copy.deepcopy(sms_)
sm_trace = copy.deepcopy(sm_trace_)
kernels = copy.deepcopy(kernels_)
kernel_num = len(kernels)
sm_num = Gpu.sm_num
# go through each kernel
for i in range(kernel_num):
kern = kernels[i] # schedule current kernel on the device
kernel_blocks = int(kern.gridDim) # total block for current kern
kern_start = kern.start_ms
# 1) find the which sm to start
# 2) compute whether kernel_start happens before previous kernel ends or not
sm2start, AfterPrevKern = find_sm2start(sm_trace, kern_start)
#---------------------------------------------------------
# Run after previous kernel
#---------------------------------------------------------
if AfterPrevKern:
# deactivate all the previous active blocks
myid = 0
for df_sm in sm_trace:
df_activeblk = df_sm.loc[df_sm['active'] == 1]
# find the row index of active blocks
for index, row in df_activeblk.iterrows():
sm_trace[myid].loc[index]['active'] = 0 # deactivate
sms[myid].Rm(kern) # free the block resource
myid = myid + 1
#---------------------------------------------------------
# Continue current kernel
#---------------------------------------------------------
for bid in range(kernel_blocks):
sm_id = (bid + sm2start) % sm_num
to_allocate_another_block = check_sm_resource(sms[sm_id], kern)
#----------------------------------
# there is enough resource to host the current block
#----------------------------------
if to_allocate_another_block == True:
# deduct resources on the current sm
sms[sm_id].Allocate_block(kern)
#---------------------------------------
# register the block in the trace table
#---------------------------------------
block_start = None
offset = 0.0
# Noted: only the 1st block will adjut the kern_start
if AfterPrevKern and bid < sm_num:
offset = kern_start
# if current sm trace table is empty, start from kernel_start
# else find the blocks that will end soon, and retire them
if sm_trace[sm_id].empty:
block_start = kern_start # (fixed!)
else:
# read the sm_trace table, find out all the active blocks
# on current sm, look for the earliest start
block_start = Search_block_start(sm_trace[sm_id], i) + offset
#block_end = block_start + avg_blk_time_list[i]
block_end = block_start + kern.avg_blk_time
# add the current block info to the current sm
sm_trace[sm_id] = sm_trace[sm_id].append({'sm_id': sm_id,
'block_id': bid,
'block_start': block_start,
'block_end' : block_end,
'batch_id': sms[sm_id].batch,
'kernel_id': i,
'active': 1}, ignore_index=True)
#-------------------------------------------
# There is no more resources to host the blk, consider SM is full now
# we need to (1) decide how many blks to retire
# (2) when to start current blk
if to_allocate_another_block == False:
# find out the active blocks on current sm
df_sm = sm_trace[sm_id]
df_activeblk = df_sm.loc[df_sm['active'] == 1]
df_loc = df_activeblk.copy(deep=True)
cur_activeblk_num = df_activeblk.shape[0]
for ii in range(cur_activeblk_num):
# find out blocks ending soon
blkend_min = df_loc['block_end'].min()
df_blk2end = df_loc.loc[df_loc['block_end'] == blkend_min]
# retire the blocks
for index, row in df_blk2end.iterrows():
sm_trace[sm_id].loc[index]['active'] = 0
sms[sm_id].Rm(kern) # free the block resource
# enough to allocate a current block
if check_sm_resource(sms[sm_id], kern):
sms[sm_id].Allocate_block(kern)
# when prev blks end, current block starts
block_start = blkend_min
# add avgblktime for currrent kernel
#block_end = block_start + avg_blk_time_list[i]
block_end = block_start + kern.avg_blk_time
break # jump out of the loop
else:
# not enough to allocat another block, remove
# Warning: ??? I may just pass
#df_loc = df_sm.loc[df_sm['active'] == 1]
pass
# update the trace table
sm_trace[sm_id] = sm_trace[sm_id].append({'sm_id': sm_id,
'block_id': bid,
'block_start': block_start,
'block_end' : block_end,
'batch_id': sms[sm_id].batch,
'kernel_id': i,
'active': 1}, ignore_index=True)
# end of running blocks for current kernel
#end of kernel iteration
# return the updated sm resource and trace table
return sms, sm_trace
#------------------------------------------------------------------------------
# Find kern time on current sm
#------------------------------------------------------------------------------
def find_kernel_time(df_sm_trace, kern_id):
df_kern = df_sm_trace.loc[df_sm_trace.kernel_id == kern_id]
# min of start time, max of end time
return df_kern.block_start.min(), df_kern.block_end.max()
#------------------------------------------------------------------------------
# Find out kernel runtime by reading the traces from each SM
#------------------------------------------------------------------------------
def Get_KernTime(sm_trace):
kern_dd = {}
kernel_unique_ls = []
for df_sm in sm_trace:
kids = df_sm.kernel_id.unique() # find out all the kernels on current sm
#
# case 1: given the empty dd
if not kern_dd:
for kern_id in kids: # find kernel time for each unique kernel
startT, endT = find_kernel_time(df_sm, kern_id)
kern_dd[kern_id] = [startT, endT]
kernel_unique_ls.append(kern_id)
# case 2: the dd has values
if kern_dd:
for kern_id in kids: # find kernel time for each unique kernel
startT, endT = find_kernel_time(df_sm, kern_id)
if kern_id in kernel_unique_ls:
# compare the min and max for start and end, update
prev_start = kern_dd[kern_id][0]
prev_end = kern_dd[kern_id][1]
cur_start, cur_end = find_kernel_time(df_sm, kern_id)
update = 0
if cur_start < prev_start:
prev_start = cur_start # update
update = update + 1
if cur_end > prev_end:
prev_end = cur_end # update
update = update + 1
if update > 0:
kern_dd[kern_id] = [prev_start, prev_end]
else:
kern_dd[kern_id] = [startT, endT] # add to dd
kernel_unique_ls.append(kern_id)
return kern_dd
#------------------------------------------------------------------------------
# run a single gpu kernel one at a time
#------------------------------------------------------------------------------
def run_gpu_kernel(Gpu, sms_, sm_trace_, kern, kern_id):
sms = copy.deepcopy(sms_)
sm_trace = copy.deepcopy(sm_trace_)
sm_num = Gpu.sm_num
kernel_blocks = int(kern.gridDim) # total block for current kern
kern_start = kern.start_ms
# 1) find the which sm to start
# 2) compute whether kernel_start happens before previous kernel ends or not
sm2start, AfterPrevKern = find_sm2start(sm_trace, kern_start)
#---------------------------------------------------------
# Run after previous kernel
#---------------------------------------------------------
if AfterPrevKern:
# deactivate all the previous active blocks
for df_sm in sm_trace:
df_activeblk = df_sm.loc[df_sm['active'] == 1]
if not df_activeblk.empty:
myid = int(df_activeblk.iloc[0]['sm_id'])
for index, row in df_activeblk.iterrows(): # find the row index of active blocks
sm_trace[myid].loc[index]['active'] = 0 # deactivate
sms[myid].Rm(kern) # free the block resource
#---------------------------------------------------------
# Continue current kernel
#---------------------------------------------------------
for bid in range(kernel_blocks):
sm_id = (bid + sm2start) % sm_num
to_allocate_another_block = check_sm_resource(sms[sm_id], kern)
#----------------------------------
# there is enough resource to host the current block
#----------------------------------
if to_allocate_another_block == True:
sms[sm_id].Allocate_block(kern) # deduct resources on the current sm
#---------------------------------------
# register the block in the trace table
#---------------------------------------
block_start = None
offset = 0.0
if AfterPrevKern and bid < sm_num: # Noted: only the 1st block will adjut the kern_start
offset = kern_start
# if current sm trace table is empty, start from kern_start
# else find the blocks that will end soon, and retire them
if sm_trace[sm_id].empty:
block_start = kern_start
else:
# read the sm_trace table, find out all the active blocks on current sm, look for the earliest start
block_start = Search_block_start(sm_trace[sm_id], kern_id) + offset
block_end = block_start + kern.avg_blk_time
# add the current block info to the current sm
sm_trace[sm_id] = sm_trace[sm_id].append({'sm_id': sm_id,
'block_id': bid,
'block_start': block_start, # add the kern stat
'block_end' : block_end,
'batch_id': sms[sm_id].batch,
'kernel_id': kern_id,
'active': 1}, ignore_index=True)
#-------------------------------------------
# There is no more resources to host the blk, consider SM is full now
# we need to (1) decide how many blks to retire (2) when to start current blk
if to_allocate_another_block == False:
# find out the active blocks on current sm
df_sm = sm_trace[sm_id]
df_activeblk = df_sm.loc[df_sm['active'] == 1]
df_loc = df_activeblk.copy(deep=True)
cur_activeblk_num = df_activeblk.shape[0]
for ii in range(cur_activeblk_num):
# find out blocks ending soon
blkend_min = df_loc['block_end'].min()
df_blk2end = df_loc.loc[df_loc['block_end'] == blkend_min]
# retire the blocks
for index, row in df_blk2end.iterrows():
sm_trace[sm_id].loc[index]['active'] = 0
sms[sm_id].Rm(kern) # free the block resource
# enough to allocate a current block
if check_sm_resource(sms[sm_id], kern):
sms[sm_id].Allocate_block(kern)
block_start = blkend_min # when prev blks end, current block starts
block_end = block_start + kern.avg_blk_time
break # jump out of the loop
else:
# not enough to allocat another block, remove
df_loc = df_sm.loc[df_sm['active'] == 1]
# update the trace table
sm_trace[sm_id] = sm_trace[sm_id].append({'sm_id': sm_id,
'block_id': bid,
'block_start': block_start,
'block_end' : block_end,
'batch_id': sms[sm_id].batch,
'kernel_id': kern_id,
'active': 1}, ignore_index=True)
# return the updated sm resource and trace table
return sms, sm_trace
```
#### File: trans_kernel_model/mem_mem/cke.py
```python
import pandas as pd
import numpy as np
from math import *
import sys
from model_param import *
import avgblk
import copy
#------------------------------------------------------------------------------
# select the first sleep call
#------------------------------------------------------------------------------
def Pick_first_in_sleep(df_all_api):
df_sleep = df_all_api.loc[df_all_api.status == 'sleep']
# when apis are 'done' or 'wake', there are no inactive api
if df_sleep.shape[0] == 0:
return None
else:
count = 0
target_rowid = 0
for index, row in df_sleep.iterrows():
if count == 0: # 1st row
target_rowid = index
break
target_rowid = int(target_rowid)
return target_rowid
#------------------------------------------------------------------------------
# select the first wake call
#------------------------------------------------------------------------------
def Pick_first_in_wake(df_all_api):
df_wake = df_all_api.loc[df_all_api.status == 'wake']
# when apis are 'done' or 'wake', there are no inactive api
if df_wake.shape[0] == 0:
return None
else:
count = 0
target_rowid = 0
for index, row in df_wake.iterrows():
if count == 0: # 1st row
target_rowid = index
break
target_rowid = int(target_rowid)
return target_rowid
#------------------------------------------------------------------------------
# Set the target row to be wake status
#------------------------------------------------------------------------------
def AllDone(df_all):
df_haswork = df_all.loc[df_all.status <> 'done']
if df_haswork.empty:
return True
else:
return False
#------------------------------------------------------------------------------
# select two api calls to start prediction
#------------------------------------------------------------------------------
def PickTwo(df_all_api):
df_all = df_all_api.copy(deep=True)
# case 1) : at the beginning, all calls are sleep, select the first two
df_ActiveAndDone = df_all.loc[df_all.status <> 'sleep']
all_num = df_all.shape[0]
wake_num = df_all.loc[df_all.status == 'wake'].shape[0]
done_num = df_all.loc[df_all.status == 'done'].shape[0]
sleep_num = df_all.loc[df_all.status == 'sleep'].shape[0]
# case 1) at the very beginning, or there are only sleep and done (no wake api)
if df_ActiveAndDone.empty or wake_num == 0:
# pick the 1st sleep call and wake up
r1 = Pick_first_in_sleep(df_all)
#print('r1: {} '.format(r1))
if r1 is not None:
df_all = SetWake(df_all, r1)
# pick another
r2 = Pick_first_in_sleep(df_all)
if r2 is not None:
df_all = SetWake(df_all, r2)
else:
# case 3) the last api (last sleep)
if sleep_num == 0 and wake_num == 1:
r1 = None
r2 = None
else:
# case 2): during iteration, select the 1st wake, wake up 2nd in sleep
# there is only sleep one
r1 = Pick_first_in_wake(df_all)
r2 = Pick_first_in_sleep(df_all)
if r2 is not None: df_all = SetWake(df_all, r2)
print('row:{} row:{}'.format(r1, r2))
return df_all, r1, r2
#------------------------------------------------------------------------------
# Check whether two api calls are from the same stream
#------------------------------------------------------------------------------
def Check_stream_id(df_all, r1, r2):
r1_stream = df_all['stream_id'][r1]
r2_stream = df_all['stream_id'][r2]
if r1_stream == r2_stream:
return True
else:
return False
#------------------------------------------------------------------------------
# Check overlapping
#------------------------------------------------------------------------------
def Check_ovlp(df_all_api, first, second):
r1 = first
r2 = second
curapi_start = df_all_api.loc[r1]['start']
curapi_end = df_all_api.loc[r1]['end']
nextapi_start = df_all_api.loc[r2]['start']
nextapi_end = df_all_api.loc[r2]['end']
#print('{} {} {}'.format(curapi_start, nextapi_start, curapi_end))
ovlp = False
if curapi_start <= nextapi_start < curapi_end:
ovlp = True
return ovlp
#------------------------------------------------------------------------------
# Find the concurrency starting pos and update the
#------------------------------------------------------------------------------
def Update_before_ovlp(df_all, r1, r2):
df_all_api = df_all.copy(deep=True)
#print('{} {}'.format(r1, r2))
curapi_start = df_all_api.loc[r1]['start']
curapi_end = df_all_api.loc[r1]['end']
curapi = df_all_api.loc[r1]['api_type']
curapi_stream = df_all_api.loc[r1]['stream_id']
nextapi_start = df_all_api.loc[r2]['start']
nextapi_end = df_all_api.loc[r2]['end']
nextapi = df_all_api.loc[r2]['api_type']
nextapi_stream = df_all_api.loc[r2]['stream_id']
no_ovlap_time = nextapi_start - curapi_start
#print('cur start {} next start {}'.format(curapi_start, nextapi_start))
#print no_ovlap_time
#----------------------------
# update r1 with current pos
#----------------------------
df_all_api = UpdateCell(df_all_api, r1, 'current_pos', nextapi_start)
# the call type for r1 is h2d or d2h
if curapi in ['h2d', 'd2h'] :
curr_trans = df_all_api.loc[r1]['bw'] * no_ovlap_time # full bw since no ovlp
curr_tot = df_all_api.loc[r1]['size_kb']
curr_left = curr_tot - curr_trans
# update the bytes_done
df_all_api = UpdateCell(df_all_api, r1, 'bytes_done', curr_trans)
df_all_api = UpdateCell(df_all_api, r1, 'bytes_left', curr_left)
#----------------------------
# update r2 with current pos
#----------------------------
df_all_api = UpdateCell(df_all_api, r2, 'current_pos', nextapi_start)
return df_all_api
#------------------------------------------------------------------------------
# Predict the end time when there is no conflict.
#------------------------------------------------------------------------------
def Predict_noConflict(df_all, first, second):
df_all_api = df_all.copy(deep=True)
target_rows = [first, second]
for r1 in target_rows: # work on the target row
r1_type = df_all_api.loc[r1]['api_type']
cur_pos = df_all_api.loc[r1]['current_pos']
# update the predicted end time based on the api type
if r1_type in ['h2d', 'd2h']:
# check the bytes left and use bw to predict the end time
bw = df_all_api.loc[r1]['bw']
bytesleft = df_all_api.loc[r1]['bytes_left']
pred_time_left = bytesleft / bw
df_all_api = UpdateCell(df_all_api, r1, 'pred_end', cur_pos + pred_time_left)
elif r1_type == 'kern':
# no overlapping, no change to kernel time: curpos + kernel_runtime
kernel_time = df_all_api.loc[r1]['end'] - df_all_api.loc[r1]['start']
df_all_api = UpdateCell(df_all_api, r1, 'pred_end', kernel_time + cur_pos)
else:
sys.stderr.write('Unknown API call.')
return df_all_api
#------------------------------------------------------------------------------
# Predict the end time when there concurrency for data transfer
#------------------------------------------------------------------------------
def Predict_transCC(df_all, first, second):
df_all_api = df_all.copy(deep=True)
cc = 2.0
row_list = [first, second]
for i in row_list:
# the bandwidth is shared among all the concurrent transfer
bw = df_all_api.loc[i]['bw'] / cc
# predict the transfer time based on the bandwidth
cur_pred_time_left = df_all_api.loc[i]['bytes_left'] / bw
# update the future ending time
df_all_api = UpdateCell(df_all_api, i, 'pred_end',
cur_pred_time_left + df_all_api.loc[i]['current_pos'] )
return df_all_api
#------------------------------------------------------------------------------
# Predict the ending time: based on the concurrency
# 1) if they are both h2d_h2d, d2h_d2h or kern_kern, we need to predict use different mode
# 2) if they are different apis, there is no interference
#------------------------------------------------------------------------------
def Predict_end(df_all, r1, r2, ways = 1.0):
"""
From the input dataframe, adjust the row info, depending on the concurrency.
"""
df_all_api = df_all.copy(deep=True)
cc = ways # concurrency
r1_apitype = df_all_api.loc[r1]['api_type']
r2_apitype = df_all_api.loc[r2]['api_type']
interference = True if r1_apitype == r2_apitype else False
if interference == False:
df_all_api = Predict_noConflict(df_all_api, r1, r2)
else:
if r1_apitype in ['h2d', 'd2h']: # data transfer model
df_all_api = Predict_transCC(df_all_api, r1, r2)
elif r1_apitype == 'kern': # todo: cke model
pass
else:
sys.stderr.write('Unknown API call.')
return df_all_api
#------------------------------------------------------------------------------
# get the time range from wake api, to check the next concurrent api
#------------------------------------------------------------------------------
def Get_pred_range(df_all):
df_wake = df_all.loc[df_all.status == 'wake']
begT = df_wake.current_pos.min()
endT = df_wake.pred_end.min()
return [begT, endT]
#------------------------------------------------------------------------------
# check concurrency by another cuda stream within a time range
#------------------------------------------------------------------------------
def Check_cc_by_time(df_all, time_range):
df_all_api = df_all.copy(deep=True)
df_wake = df_all_api.loc[df_all_api.status == 'wake']
df_sleep = df_all_api.loc[df_all_api.status == 'sleep']
# find out the stream ids in df_wake
new_stream_ls = []
for x in df_sleep.stream_id.unique():
if x not in df_wake.stream_id.unique():
new_stream_ls.append(x)
has_conc_stream = 1 if new_stream_ls else 0;
#print('has_conc_stream {}'.format(has_conc_stream))
# todo:
# look for streams that start within the time range
extra_cc = 0
if has_conc_stream == 1:
for sid in new_stream_ls:
df_cur = df_sleep.loc[df_sleep.stream_id == sid]
for index, row in df_cur.iterrows():
startT = row.start
if time_range[0] <= startT < time_range[1]: # api in the range
extra_cc = 1
return extra_cc
#------------------------------------------------------------------------------
# Update the ending time: based on the concurrency
#------------------------------------------------------------------------------
def Update_ovlpTrans(df_all, timerange_list, ways = 1.0):
startT = timerange_list[0]
endT = timerange_list[1]
dur = endT - startT
cc = ways
df_all_api = df_all.copy(deep=True)
# since the df_all_api are sorted by start
# we only need to check the wake stream and start from top
for index, row in df_all_api.iterrows():
if row.status == 'wake':
bw = row.bw / cc
bytes_don = row.bytes_done
bytes_lft = row.bytes_left
bytes_tran = dur * bw
bytes_left = row.bytes_left - bytes_tran
done = 0
if abs(bytes_left - 0.0) < 1e-3: # smaller than 1 byte
done = 1
#print index
if done == 1:
# update bytes_done
tot_size = row.size_kb
#print tot_size
df_all_api.set_value(index,'bytes_done', tot_size)
df_all_api.set_value(index,'bytes_left', 0)
df_all_api.set_value(index,'time_left', 0) # no time_left
df_all_api.set_value(index,'current_pos', row.pred_end)
df_all_api.set_value(index,'status', 'done')
else:
# deduct the bytes, update teh current pos
df_all_api.set_value(index,'bytes_done', bytes_don + bytes_tran)
df_all_api.set_value(index,'bytes_left', bytes_lft - bytes_tran)
df_all_api.set_value(index,'current_pos', endT)
df_all_api.set_value(index,'time_left', 0) # clear
df_all_api.set_value(index,'pred_end', 0) # clear
return df_all_api
#------------------------------------------------------------------------------
# For cuda calls with 'done' status, update the timing for that stream
#------------------------------------------------------------------------------
def UpdateStreamTime(df_all_api):
# copy the input
df_all = df_all_api.copy(deep=True)
df_done = df_all.loc[df_all.status == 'done'] # find out which api is done
if df_done.empty:
return df_all
done_streams = df_done.stream_id.unique() # np.array
for x in done_streams:
# read the stream
df_cur = df_all.loc[df_all.stream_id == x] # the api in order
prev_start = 0.0
prev_end = 0.0
prev_pred_end = 0.0
prev_status = ''
prev_newEnd = 0.0
count = 0
for index, row in df_cur.iterrows(): # process each row
# record previous timing and status
if count == 0:
prev_start = row.start
prev_end = row.end
#print('prev_end {}'.format(prev_end))
prev_pred_end = row.pred_end
prev_status = row.status
# read current stat
cur_start = row.start
#print('cur_start {}'.format(cur_start))
cur_end = row.end
cur_pred_end = row.pred_end
cur_status = row.status
#print('count {} : cur_start {} prev_end {}'.format(count, cur_start, prev_end))
if cur_status == 'done':
# if it is done, no need to update, save it for coming row
prev_start = row.start
prev_end = row.end
prev_pred_end = row.pred_end
prev_status = row.status
else:
# adjust offset according to the previous predicted_end
ovhd = cur_start - prev_end
#print('stream {} : cur_start {}'.format(x, cur_start))
if prev_status == 'done':
new_start = prev_pred_end + ovhd # offset with the pred_end
else:
new_start = prev_newEnd + ovhd # with previous new_end
new_end = new_start + (cur_end - cur_start) # new start + duration
# before updating the current record, save the current
prev_start = cur_start
prev_end = cur_end
prev_pred_end = cur_pred_end
prev_status = cur_status
prev_newEnd = new_end # important!
# update the dataframe record
#print index
df_all.set_value(index, 'start', new_start)
df_all.set_value(index, 'end', new_end)
df_all.set_value(index, 'pred_end', new_end)
# update the count for current iter
count = count + 1
# update the end column for rows with 'done' status
df_cur_done = df_cur.loc[df_cur.status == 'done']
for index, row in df_cur_done.iterrows():
df_all.set_value(index, 'end', row.pred_end) # update with pred_end
#----------------------
# end of current stream
#--------------------------------------
#end of all the streams with 'done' call
return df_all
#------------------------------------------------------------------------------
# Find out when to start current stream.
# Read the prevous stream trace, 1) when current h2d exceeds the threshold timing,
# record the current start time, and add the threshold
# 2) if not, the current will start from the last h2d end time for previous one
#------------------------------------------------------------------------------
def find_h2d_start(df_trace, H2D_H2D_OVLP_TH):
h2d_ovlp = 0
h2d_starttime = 0
h2d_endtime = 0
for index, row in df_trace.iterrows():
if row.api_type == 'h2d':
h2d_duation = row['duration']
h2d_starttime = row['start'] # record the latest h2d
h2d_endtime = row['end'] # record the latest h2d
if h2d_duation > H2D_H2D_OVLP_TH:
h2d_ovlp = 1
break
if row.api_type == 'kern': # break when the next is kernel
break
stream_start_time = 0.0
# if there is no overlapping for all h2d api,
# the second stream will start from the last h2d ending time
if h2d_ovlp == 0:
#stream_start_time = h2d_starttime
stream_start_time = h2d_endtime
# if there is overlapping, we add the overlapping starting time
# with the overlapping threshold
if h2d_ovlp == 1:
stream_start_time = h2d_starttime + H2D_H2D_OVLP_TH
## warning : add api launch ovhd
stream_start_time += 0.002
return stream_start_time
#------------------------------------------------------------------------------
# Deep copy the timing trace
# for multiple cuda stream case, each trace will be appended to a list
#------------------------------------------------------------------------------
def init_trace_list(df_trace, stream_num = 1, h2d_ovlp_th = 3.158431):
df_cke_list = []
for x in range(stream_num):
df_dup = df_trace.copy(deep=True)
df_dup.stream = x # update the stream id
df_cke_list.append(df_dup)
#--------------------
# set up the trace table by adjusting the starting timing
#--------------------
for i in range(1,stream_num):
# compute the time for the previous data transfer
stream_startTime = find_h2d_start(df_cke_list[i-1], h2d_ovlp_th)
print('stream_startTime : {}'.format(stream_startTime))
df_cke_list[i].start += stream_startTime
df_cke_list[i].end += stream_startTime
return df_cke_list
#------------------------------------------------------------------------------
# Sort api at the 1st time.
# Return the sorted dataframe from the df_cke_list
#------------------------------------------------------------------------------
def init_sort_api_with_extra_cols(df_cke_list):
columns_ = ['start', 'end', 'api_type', 'size_kb', 'stream_id', 'status']
df_all_api = pd.DataFrame(columns=columns_) # init
stream_num = len(df_cke_list)
#-------------------------------
# generate the trace table
#-------------------------------
for i in range(stream_num): # read each stream
stream_id = i
df_current = df_cke_list[i]
rows = df_current.shape[0]
for j in range(rows): # read each row
start_t = df_current['start'][j]
end_t = df_current['end'][j]
api_t = df_current['api_type'][j]
size_kb = df_current['size'][j]
df_all_api = df_all_api.append({'start': start_t, 'end': end_t,
'api_type': api_t, 'stream_id': stream_id, 'size_kb': size_kb,
'status': 'sleep'}, ignore_index = True)
#-------------------------------
# sort by the start column
#-------------------------------
result = df_all_api.sort_values('start', ascending=1)
# add bandwidth column
result['bw'] = 0.0
# compute bandwidth
for index, row in result.iterrows():
if row.size_kb > 0.0:
bw = row.size_kb / (row.end - row.start)
result.loc[index, 'bw'] = bw
#-------------------------------
# add extra columns
#-------------------------------
result['kern_id'] = None
result['bytes_done'] = 0.0
result['bytes_left'] = result['size_kb']
result['current_pos'] = 0.0
#result['time_left'] = 0.0
result['pred_end'] = 0.0
#---------------------------
# update kernel_id
#---------------------------
kid = 0
for index, row in result.iterrows():
if row.api_type == 'kern':
result = UpdateCell(result, index, 'kern_id', kid)
kid+=1
return result
#------------------------------------------------------------------------------
# check concurrency during an interval for wake api calls
#------------------------------------------------------------------------------
def Check_CC(df_wake, begT, endT):
cc = 0.0
cc_rows = []
for index, row in df_wake.iterrows():
mystart = row.start
myend = row.end
if mystart < endT : # if current wake starts before the end time, add cc
cc = cc + 1.0
cc_rows.append(index) # find out the row index
return cc, cc_rows
#------------------------------------------------------------------------------
#
#------------------------------------------------------------------------------
def Update_row_by_cc(df_all, r, cc, timeRange):
df = df_all.copy(deep=True)
begT = timeRange[0]
endT = timeRange[1]
duration = endT - begT
my_type = df.loc[r]['api_type']
my_curpos = df.loc[r]['current_pos']
my_end = df.loc[r]['end']
my_pred_end = df.loc[r]['pred_end']
my_kb = df.loc[r]['size_kb']
my_left_new = 0.0
my_bytes_done_new = 0.0
my_left_time = 0.0
if my_type in ['h2d', 'd2h']:
my_bytes_left = df.loc[r]['bytes_left']
my_bytes_done = df.loc[r]['bytes_done']
my_bw = df.loc[r]['bw'] / cc
bytes_tran = duration * my_bw
my_left_time = my_bytes_left / my_bw
if my_bytes_left < 1e-3:
sys.stderr.write('no bytes left')
# calculate how many bytes left
my_left_new = my_bytes_left - bytes_tran
print('row {}, end {}, my_curpos {}, my_pred_end {}, my_left_new {}'.format(r, my_end, my_curpos, my_pred_end, my_left_new))
if my_left_new < 1e-3:
my_left_new = 0.0
# compute bytes done so far
my_bytes_done_new = my_bytes_done + bytes_tran
df = UpdateCell(df, r, 'current_pos', endT)
if my_type in ['h2d', 'd2h']:
df = UpdateCell(df, r, 'bytes_left', my_left_new)
df = UpdateCell(df, r, 'bytes_done', my_bytes_done_new)
if my_left_new == 0.0:
my_new_end = my_curpos + my_left_time
#print('my_new_end {}'.format(my_new_end))
# WARNING: use the org end time :
# if current call is done by the time minT starts
df= UpdateCell(df, r, 'current_pos', my_new_end)
df= UpdateCell(df, r, 'pred_end', my_new_end) # update
df= UpdateCell(df, r, 'bytes_done', my_kb)
df= UpdateCell(df, r, 'status', 'done')
return df
#------------------------------------------------------------------------------
# Predict new end time based on the concurrency
#------------------------------------------------------------------------------
def Adjust_pred(df_all, cc, cc_row_list):
df = df_all.copy(deep=True)
for rid in cc_row_list:
my_type = GetInfo(df, rid, 'api_type')
my_curpos = GetInfo(df, rid, 'current_pos')
if my_type in ['h2d', 'd2h']:
my_bw = GetInfo(df, rid, 'bw')
my_bw = my_bw / cc
my_bytes_left = GetInfo(df, rid, 'bytes_left')
trans_time = my_bytes_left / my_bw
# new predicted end
my_new_end = my_curpos + trans_time
# update
df= UpdateCell(df, rid, 'pred_end', my_new_end)
return df
#------------------------------------------------------------------------------
# Start the target api, check prev ovlapping, and update the timing accordingly
#
# WARNING:
# 1) if one call is done, the prediction using cc still happen on other call
# where we probably over-predict the ending time
#------------------------------------------------------------------------------
def MoveCurPos(df_all, r1):
df = df_all.copy(deep=True)
df_wake = df.loc[df.status == 'wake']
#print df_wake
wake_api_num = df_wake.shape[0]
#print wake_api_num
# get the range to check ovlp
begT = df_wake.current_pos.min()
endT = df_wake.pred_end.min()
midT = endT
# check any wake api start between the range
for index, row in df_wake.iterrows():
cur_start = row.start
if begT < cur_start < endT:
midT = cur_start
#print midT
# check concurrency [begT,midT]
cc, cc_rows = Check_CC(df_wake, begT, midT)
print('from {} to {}, cc = {}'.format(begT, midT, cc))
print cc_rows
# predict based on current cc
for r in cc_rows:
df = Update_row_by_cc(df, r, cc, [begT, midT])
if midT < endT:
cc_new, cc_rows_new = Check_CC(df_wake, midT, endT)
print('from {} to {}, cc = {}'.format(midT, endT, cc_new))
print cc_rows_new
# when the concurrency changes: update the pred_end based on current cc
if cc_new <> cc:
df = Adjust_pred(df, cc_new, cc_rows_new)
# find out the new time range to predict
chk_start, chk_end = GetRangeFromWake(df)
print('new pred range from {} to {}'.format(chk_start, chk_end))
# update row during this range
for r in cc_rows:
df = Update_row_by_cc(df, r, cc_new, [chk_start, chk_end])
# check whether any api call has ended
# if so, update timing for all the calls in the stream
df = UpdateStreamTime(df)
return df
#------------------------------------------------------------------------------
# start next api
# 1) if in sleep, wake it up 2) if active, directly return
#------------------------------------------------------------------------------
def start_next_call(df_all, prev_row):
df = df_all.copy(deep=True)
row_id = None
row_stream = None
#df_sleep = df.loc[df.status == 'sleep']
#if df_sleep.shape[0] > 0:
# # pick the 1st one in sleep
# row_id = Pick_first_in_sleep(df)
# df = SetWake(df, row_id)
# row_stream = GetInfo(df, row_id, 'stream_id')
df_nodone = df.loc[df.status <> 'done']
found_prev = False
for index, row in df_nodone.iterrows():
if found_prev:
row_id = index # the row after prev_row
break
if index == prev_row:
found_prev = True
my_status = GetInfo(df, row_id, 'status')
if my_status == 'sleep':
df = SetWake(df, row_id)
row_stream = GetInfo(df, row_id, 'stream_id')
if my_status == 'wake':
row_stream = GetInfo(df, row_id, 'stream_id')
return df, row_id, row_stream
#------------------------------------------------------------------------------
# start next api
# todo: add cases for kernels
#------------------------------------------------------------------------------
def StartNext_byType(df_all, row_list):
df_all_api = df_all.copy(deep=True)
# row r1 and r2 should be wake
r1 = row_list[0]
r2 = row_list[1]
#----------------
# row r2
#----------------
r2_start = df_all_api.loc[r2]['start']
#----------------
# row r1: previous one that in wake
#----------------
r1_type = df_all_api.loc[r1]['api_type']
r1_cur_pos = df_all_api.loc[r1]['current_pos']
r1_end = df_all_api.loc[r1]['end']
r1_left_new = 0.0
r1_bytesdone_new = 0.0
r1_kb = 0.0
# if r1 type is transfer call, we need to update the transfer status
if r1_type in ['h2d', 'd2h']:
r1_bw = df_all_api.loc[r1]['bw']
r1_bytesdone = df_all_api.loc[r1]['bytes_done']
r1_kb = df_all_api.loc[r1]['size_kb']
# compute trans size
duration = r2_start - r1_cur_pos
r1_bytes_tran = duration * r1_bw
# check bytes left
r1_bytes_left = df_all_api.loc[r1]['bytes_left']
#print('bytes left : {}'.format(r1_bytes_left))
if r1_bytes_left < 1e-3:
sys.stderr.write('no bytes left')
# calculate how many bytes left
r1_left_new = r1_bytes_left - r1_bytes_tran
if r1_left_new < 1e-3:
r1_left_new = 0.0
# compute bytes done so far
r1_bytesdone_new = r1_bytesdone + r1_bytes_tran
# update r1 status
if r2_start < r1_end: # r2 starts before r1 ends
df_all_api = UpdateCell(df_all_api, r1, 'current_pos', r2_start)
else: # r2 start after r1 ends
df_all_api = UpdateCell(df_all_api, r1, 'current_pos', r1_end)
if r1_type in ['h2d', 'd2h']:
df_all_api = UpdateCell(df_all_api, r1, 'bytes_left', r1_left_new)
df_all_api = UpdateCell(df_all_api, r1, 'bytes_done', r1_bytesdone_new)
if r1_left_new == 0.0:
# WARNING: use the org end time :
# if current call is done by the time r2 starts
df_all_api = UpdateCell(df_all_api, r1, 'current_pos', r1_end)
df_all_api = UpdateCell(df_all_api, r1, 'pred_end', r1_end) # update
df_all_api = UpdateCell(df_all_api, r1, 'bytes_done', r1_kb)
df_all_api = UpdateCell(df_all_api, r1, 'status', 'done')
# update r2 status: current pos
df_all_api = UpdateCell(df_all_api, r2, 'current_pos', r2_start)
#print('r1 : {}, r2 : {}, r2_start : {}'.format(r1, r2, r2_start))
return df_all_api
#------------------------------------------------------------------------------
# update time between an interval
#------------------------------------------------------------------------------
def update_by_range(df_all, begT, endT, Gpu, SM_resList, SM_traceList, stream_kernel_list):
df = df_all.copy(deep=True)
SMreslist = copy.deepcopy(SM_resList)
SMtracelist = copy.deepcopy(SM_traceList)
if len(SMreslist) <> len(SMtracelist):
sys.stderr.write('miss match on sm resource and trace table!')
return df, SMreslist, SMtracelist
sm_num = len(SMreslist)
# find out the wake api during the range
wake_list = GetWakeListByTime(df, begT, endT)
print('wake list {} '.format(wake_list))
#wake_list = GetWakeListBefore(df, endT)
#print('wake list {} '.format(wake_list))
# no wake api
if not wake_list:
return df
# how many h2d ovlp
h2d_list, d2h_list, kern_list = FindOvlp(df, wake_list)
print ('h2d_list : {}'.format(h2d_list))
print ('d2h_list : {}'.format(d2h_list))
print ('kern_list : {}'.format(kern_list))
#--------------------------------
# check whether there is h2d ovlp
#--------------------------------
if h2d_list:
cc = len(h2d_list)
print('cc {} for all the wake h2d list'.format(cc))
for r in h2d_list:
df = Update_trans_bytes(df, r, begT, endT, ways = cc)
#--------------------------------
# check whether there is d2h ovlp
#--------------------------------
if d2h_list:
cc = len(d2h_list)
print('cc {} for all the wake d2h list'.format(cc))
for r in d2h_list:
df = Update_trans_bytes(df, r, begT, endT, ways = cc)
#--------------------------------
# check whether there is kern ovlp
#--------------------------------
if kern_list:
"""
First, we need to check whether the kernel is already running
Then
case 1) there is one kernel, register in the gpu trace table
case 2) if there are more, add them by the kernel starting time, to the
gpu SM strace table
"""
#
# sort kern rows by the starting time
sorted_kerns = SortKern(df, kern_list)
print('sorted kernel rows: {}'.format(sorted_kerns))
#
# if kernel start is before begT, then it is already running, no need to cnt
kern_nums = len(sorted_kerns)
for i in range(0, kern_nums):
# kernel id label
row = sorted_kerns[i]
kid = GetInfo(df, row, 'kern_id')
Found = FindKernelRecord(SMtracelist, kid)
print('find kernel ? {} : row {}, in SM trace'.format(Found, row))
print('kid = {}'.format(kid))
if not Found:
#
# not Found : this is a new kernel
# find the kernel info and kernel_id to run on gpu
my_kernel_info, kid = GetKernelInfoAndTag(df, row, stream_kernel_list)
Dump_kernel_info(my_kernel_info)
#
# run cke model
SMreslist, SMtracelist = avgblk.run_gpu_kernel(Gpu,
SMreslist, SMtracelist, my_kernel_info, kid)
#sys.stderr.write('kernel model no accomplished yet!')
## find the kernel execution time from the sm trace table
result_kernel_runtime_dd = avgblk.Get_KernTime(SMtracelist)
print result_kernel_runtime_dd
# 4/9
# according to the new kernel time, update the start and end time for the kernel,
# and all the api calls behind
# 4/10
# instead, update the pred_end for these kernels
df = UpdateKernelPred(df, result_kernel_runtime_dd, sorted_kerns)
#print GetInfo(df, 2, 'pred_end')
#print GetInfo(df, 6, 'pred_end')
#sys.stderr.write('kernel model no accomplished yet!')
print('kernel model still need some work!')
#pass
return df, SMreslist, SMtracelist
#------------------------------------------------------------------------------
# check active stream dd and terminate an api that ends soon
#------------------------------------------------------------------------------
def check_activestream_and_update(df_all, activestream_dd, simPos):
df = df_all.copy(deep=True)
as_dd = copy.deepcopy(activestream_dd)
full = True
for key, value in activestream_dd.items():
if value == None:
full = False
break
if not full:
return df, as_dd, simPos
#--------------------------------------------------------------------------
# find out which call to terminate
df_wake = df.loc[df.status == 'wake']
wake_list = FindWakeList(df_wake)
print('check_activestream_and_update : wakelist {}'.format(wake_list))
# sort
df_sorted = df_wake.sort_values(['pred_end'],ascending = True)
#print df_sorted
row2end = df_sorted.iloc[0].name
row2end_stream = df_sorted.iloc[0]['stream_id']
print('row to end : {}, its stream {}'.format(row2end, row2end_stream))
#print type(row2end)
#
# find out the next call after row2nd
row_afterprevcall = Find_nextcall_samestream(df, row2end, row2end_stream)
print('row_afterprevcall {}'.format(row_afterprevcall))
print('-----')
if row_afterprevcall is not None:
nextCall_start = GetInfo(df, row_afterprevcall, 'start')
print('next call after : {}'.format(row_afterprevcall))
else:
# the last call when row_afterprevcall is None
# 1) finish row2end 2) set stream to none
df = FinishLastCall(df, row2end)
as_dd[row2end_stream] = None
simPos = GetInfo(df, row2end, 'end')
#
# move current_pos to row2nd pred_end
row2end_predend = GetInfo(df, row2end, 'pred_end')
for wake_row in wake_list:
if wake_row <> row2end:
local_pos = GetInfo(df, wake_row, 'current_pos')
# no need to update if current pos is ahead of previous row end time
if local_pos < row2end_predend:
df = UpdateCell(df, wake_row, 'current_pos', row2end_predend)
return df, as_dd, simPos
#
# end the target row, update the bytes for other call
# todo: simPos may be ahead of next call start time
df = end_target_row(df, row2end, simPos, nextCall_start)
#----------------------
#----------------------
#
# the start has been shifted right
nextCall_start = GetInfo(df, row_afterprevcall, 'start')
#
# move current_pos to row2nd pred_end
row2end_predend = GetInfo(df, row2end, 'pred_end')
for wake_row in wake_list:
if wake_row <> row2end:
local_pos = GetInfo(df, wake_row, 'current_pos')
#
# no need to update if current pos is ahead of previous row end time
if local_pos < row2end_predend:
df = UpdateCell(df, wake_row, 'current_pos', row2end_predend)
#
# move to row2end_end to nextcall_start
print('currpos {} to next call start {}'.format(row2end_predend, nextCall_start))
df = move_wake_for_coming_call(df, row2end_predend, nextCall_start)
#
# since row2nd is done, we need remove it from activestream pool
#row2end_stream = int(row2end_stream)
#print as_dd[row2end_stream]
as_dd[row2end_stream] = None
#print as_dd
#
# simulation postion = nextCall_start
simPos = nextCall_start
return df, as_dd, simPos
#------------------------------------------------------------------------------
# Move wake calls to the coming api start: no ovlp during the rangeT
# Warning: there are cases where overlapping exists
#------------------------------------------------------------------------------
def move_wake_for_coming_call(df_all, preEndT, curStartT):
df = df_all.copy(deep=True)
wake_list = GetWakeListBefore(df, preEndT)
print('move_wake_for_coming_call, wake list {} '.format(wake_list))
#
dur = curStartT - preEndT
for wake_row in wake_list:
wake_row_api = GetInfo(df, wake_row, 'api_type')
print('wake row {}, pred_end {}'.format(wake_row, GetInfo(df, wake_row, 'pred_end')))
if wake_row_api in ['h2d', 'd2h']:
bw = GetInfo(df, wake_row, 'bw')
bytes_left = GetInfo(df, wake_row, 'bytes_left')
bytes_done = GetInfo(df, wake_row, 'bytes_done')
trans_bytes = dur * bw
bytes_left_new = bytes_left - trans_bytes
bytes_done_new = bytes_done + trans_bytes
#
# no need to update pred_end, since Update_row_h2d assume on ovlp
df = UpdateCell(df, wake_row, 'bytes_left', bytes_left_new)
df = UpdateCell(df, wake_row, 'bytes_done', bytes_done_new)
df = UpdateCell(df, wake_row, 'current_pos', curStartT)
# warning
# check other wake kernels, if the start is behind curStartT still label the current pos
wake_kern_list = GetWakeKernList(df)
for row in wake_kern_list:
df = UpdateCell(df, row, 'current_pos', curStartT)
return df
#------------------------------------------------------------------------------
# finish the target row and update the timing
#------------------------------------------------------------------------------
def end_target_row(df_all, row2nd, simT, curT):
df = df_all.copy(deep=True)
#
# pick max of simT and curT
#time_door = max(simT, curT)
#
# find wake apis before curT
wake_list = GetWakeListBefore(df, curT)
print('simT {}, curT{} '.format(simT, curT))
print('Before time {}, wake list {} '.format(curT, wake_list))
#
# check row2nd api type
mytype = GetInfo(df, row2nd, 'api_type')
if mytype == 'h2d':
#
# how many h2d ovlp during the interval
h2d_list, _, _ = FindOvlp(df, wake_list)
cc = len(h2d_list)
print('h2d cc {}'.format(cc))
#
# finish current row and update the pred time
df = Finish_row_h2d(df, row2nd, simT, ways = cc)
#
# if an api is done, update the timing for the stream
df = UpdateStreamTime(df)
pred_end = GetInfo(df, row2nd, 'pred_end')
if cc > 1.0:
# update the time for other stream
for x in h2d_list:
if x <> row2nd:
# update bytes_left and bytes_done
df = Update_row_h2d(df, x, simT, pred_end, ways = cc)
if mytype == 'd2h':
##
## how many d2h ovlp during the interval
#__, d2h_list, _ = FindOvlp(df, wake_list)
#cc = len(d2h_list)
#print('d2h cc {}'.format(cc))
sys.stderr.write('end_target_row, d2h not implemented')
if mytype == 'kern':
# update end/current_pos with pred_end
mypred = GetInfo(df, row2nd, 'pred_end')
df.set_value(row2nd, 'end', mypred)
df.set_value(row2nd, 'current_pos', mypred)
df.set_value(row2nd, 'status', 'done')
#sys.stderr.write('end_target_row, kern not implemented')
#pass
return df
#------------------------------------------------------------------------------
# check concurrency using current_pos
#------------------------------------------------------------------------------
def Predict_checkCC(df_all, first, second):
df_all_api = df_all.copy(deep=True)
r1 = first
r2 = second
# if r1 current_pos == r2 start, there is overlapping
r1_cur_pos = df_all_api.loc[r1]['current_pos']
r2_start = df_all_api.loc[r2]['start']
conc = 0
if r1_cur_pos == r2_start: # when the two api start at the same time
conc = 1
# when there is overlapping
if conc == 1:
cc = 2.0
# predcit the next
df_all_api = Predict_end(df_all_api, r1, r2, ways = cc)
return df_all_api
#------------------------------------------------------------------------------
# Check the api type or not, return type
#------------------------------------------------------------------------------
def CheckType(df_all, r1, r2):
r1_type = df_all.loc[r1]['api_type']
r2_type = df_all.loc[r2]['api_type']
whichType = None
if r1_type == r2_type:
whichType = r1_type
return whichType
#------------------------------------------------------------------------------
# Update using pred_end when there is no conflict.
#------------------------------------------------------------------------------
def Update_wake_noConflict(df_all, timeRange):
df_all_api = df_all.copy(deep=True)
df_wake = df_all_api.loc[df_all_api.status == 'wake'] # wake apis
startT = timeRange[0]
endT = timeRange[1]
dur = endT - startT
# iterate through each row to update the pred_end
for index, row in df_wake.iterrows():
apitype = row.api_type
if apitype in ['h2d', 'd2h']: # for transfer, we need to update the bytes also
bw = row.bw
bytes_tran = dur * bw
bytes_don = row.bytes_done
bytes_lft = row.bytes_left
bytes_left = row.bytes_left - bytes_tran
done = 0
if abs(bytes_left - 0.0) < 1e-3: # smaller than 1 byte
done = 1
if done == 1:
# update bytes_done
tot_size = row.size_kb
#print tot_size
df_all_api.set_value(index,'bytes_done', tot_size)
df_all_api.set_value(index,'bytes_left', 0)
df_all_api.set_value(index,'time_left', 0) # no time_left
df_all_api.set_value(index,'current_pos', row.pred_end)
df_all_api.set_value(index,'status', 'done')
else:
# deduct the bytes, update teh current pos
df_all_api.set_value(index,'bytes_done', bytes_don + bytes_tran)
df_all_api.set_value(index,'bytes_left', bytes_lft - bytes_tran)
df_all_api.set_value(index,'current_pos', endT)
df_all_api.set_value(index,'time_left', 0) # clear
df_all_api.set_value(index,'pred_end', 0) # clear
elif apitype == 'kern': # update current_pos and status
k_pred_end = row.pred_end
k_start = row.start
k_end = row.end
if k_pred_end > k_end: # there is more work to do
df_all_api.set_value(index, 'current_pos', endT)
df_all_api.set_value(index, 'time_left', k_pred_end - k_end)
else: # the kernel is done
df_all_api.set_value(index, 'current_pos', k_pred_end)
df_all_api.set_value(index, 'status', 'done')
else:
sys.stderr.write('Unknown API call.')
return df_all_api
#------------------------------------------------------------------------------
# Predict the end time when there is memory transfer overlapping.
#------------------------------------------------------------------------------
def Predict_transferOvlp(df_all, first, second, ways = 1.0):
df_all_api = df_all.copy(deep=True)
target_rows = [first, second]
cc = ways
for r1 in target_rows: # work on the target row
r1_type = df_all_api.loc[r1]['api_type']
cur_pos = df_all_api.loc[r1]['current_pos']
# check the bytes left and use bw to predict the end time
bw = df_all_api.loc[r1]['bw'] / cc
bytesleft = df_all_api.loc[r1]['bytes_left']
pred_time_left = bytesleft / bw
df_all_api = UpdateCell(df_all_api, r1, 'pred_end', cur_pos + pred_time_left)
return df_all_api
def Predict_transferOvlp(df_all, row_list):
df_all_api = df_all.copy(deep=True)
cc = float(len(row_list))
for r1 in row_list: # work on the target row
cur_pos = df_all_api.loc[r1]['current_pos']
# check the bytes left and use bw to predict the end time
bw = df_all_api.loc[r1]['bw'] / cc
bytesleft = df_all_api.loc[r1]['bytes_left']
pred_time_left = bytesleft / bw
df_all_api = UpdateCell(df_all_api, r1, 'pred_end', cur_pos + pred_time_left)
return df_all_api
#------------------------------------------------------------------------------
# Update using pred_end when there is no conflict.
#------------------------------------------------------------------------------
def Update_wake_transferOvlp(df_all, timeRange, ways = 1.0):
df_all_api = df_all.copy(deep=True)
df_wake = df_all_api.loc[df_all_api.status == 'wake'] # wake apis
startT = timeRange[0]
endT = timeRange[1]
dur = endT - startT
cc = ways
# iterate through each row to update the pred_end
for index, row in df_wake.iterrows():
bw = row.bw / cc
bytes_tran = dur * bw
bytes_don = row.bytes_done
bytes_lft = row.bytes_left
bytes_left = row.bytes_left - bytes_tran
done = 0
if abs(bytes_left - 0.0) < 1e-3: # smaller than 1 byte
done = 1
if done == 1:
# update bytes_done
tot_size = row.size_kb
#print tot_size
df_all_api.set_value(index,'bytes_done', tot_size)
df_all_api.set_value(index,'bytes_left', 0)
df_all_api.set_value(index,'time_left', 0) # no time_left
df_all_api.set_value(index,'current_pos', row.pred_end)
df_all_api.set_value(index,'status', 'done')
else:
# deduct the bytes, update teh current pos
df_all_api.set_value(index,'bytes_done', bytes_don + bytes_tran)
df_all_api.set_value(index,'bytes_left', bytes_lft - bytes_tran)
df_all_api.set_value(index,'current_pos', endT)
df_all_api.set_value(index,'time_left', 0) # clear
df_all_api.set_value(index,'pred_end', 0) # clear
return df_all_api
#------------------------------------------------------------------------------
# consider the two kernels are done, update the current pos
#------------------------------------------------------------------------------
def Update_wake_kernOvlp(df_all):
df_all_api = df_all.copy(deep=True)
df_wake = df_all_api.loc[df_all_api.status == 'wake'] # wake apis
df_wake_kern = df_wake.loc[df_wake.api_type== 'kern'] # wake kernels
# iterate through each row to update the pred_end
for index, row in df_wake_kern.iterrows():
df_all_api.set_value(index,'current_pos', row.pred_end)
df_all_api.set_value(index,'status', 'done')
return df_all_api
#------------------------------------------------------------------------------
# For the last api call, update the entire trace table.
#------------------------------------------------------------------------------
def UpdateStream_lastapi(df_all_api):
# copy the input
df_all = df_all_api.copy(deep=True)
df_lastwake = df_all.loc[df_all.status == 'wake'] # find out the last active api
for index, row in df_lastwake.iterrows():
apitype = row.api_type
if apitype in ['h2d', 'd2h']: # there is no overlapping since the last one
bw = row.bw
cur_pos = row.current_pos
bytes_left = row.bytes_left # bytes left to transfer
time_to_finish= bytes_left / bw
pred_end = cur_pos + time_to_finish
# compute the new end : cur_pos + time_to_finish
df_all.set_value(index, 'pred_end', pred_end)
df_all.set_value(index, 'bytes_left', 0)
df_all.set_value(index, 'bytes_done', row.size_kb)
#df_all.set_value(index, 'time_left', 0)
df_all.set_value(index, 'status', 'done')
df_all.set_value(index, 'current_pos', pred_end) # current will be the pred_end
df_all.set_value(index, 'end', pred_end) # end will be the pred_end
return df_all
#------------------------------------------------------------------------------
# Check whether any row is done
#------------------------------------------------------------------------------
def CheckRowDone(df_all, r1, r2):
r1_status = df_all.loc[r1]['status']
r2_status = df_all.loc[r2]['status']
next_iter = False
if r1_status == 'done' or r2_status == 'done':
next_iter = True
return next_iter
def CheckRowDone(df_all, row_list):
next_iter = False
for r1 in row_list:
r1_status = df_all.loc[r1]['status']
if r1_status == 'done':
next_iter = True
break
done_list = []
for r1 in row_list:
r1_status = df_all.loc[r1]['status']
if r1_status == 'done':
done_list.append(r1)
return next_iter, done_list
#------------------------------------------------------------------------------
# Check whether any row is done
#------------------------------------------------------------------------------
def FindStreamAndKernID(df_all_api, r1):
stream_id = df_all_api.loc[r1]['stream_id']
stream_id = int(stream_id)
df_stream = df_all_api.loc[df_all_api.stream_id == stream_id]
# iterate through each row, count when the index == r1
kernel_id = 0
kcount = 0
for index, row in df_stream.iterrows():
if row.api_type == 'kern':
kcount = kcount + 1
if index == r1:
kernel_id = kcount - 1 # index kernel from 0
kernel_id = int(kernel_id)
return stream_id, kernel_id
#------------------------------------------------------------------------------
# Get the start time for the current row/api call.
#------------------------------------------------------------------------------
def GetStartTime(df_all_api, r1):
return float(df_all_api.loc[r1]['start'])
#------------------------------------------------------------------------------
#
#------------------------------------------------------------------------------
def pick_first_call(df_all_api, mode = 'sleep'):
df_all = df_all_api.copy(deep=True)
df_sleep = df_all.loc[df_all.status == mode]
count = 0
target_rowid = 0
target_stream = 0
for index, row in df_sleep.iterrows():
if count == 0: # 1st row
target_rowid = index
target_stream = row.stream_id
break
return df_all, int(target_rowid), target_stream
#------------------------------------------------------------------------------
# pick an api to start
#------------------------------------------------------------------------------
def pick_base_call(df_all):
df = df_all.copy(deep=True)
total_calls = df.shape[0]
df_sleep = df.loc[df.status == 'sleep']
df_wake = df.loc[df.status == 'wake']
sleep_num = df_sleep.shape[0]
r1 , r1_stream = None, None
if sleep_num == total_calls: # all sleep
df, r1, r1_stream = pick_first_call(df, mode = 'sleep')
if not df_wake.empty:
df, r1, r1_stream = pick_first_call(df, mode = 'wake')
return df, r1, r1_stream
#------------------------------------------------------------------------------
# find unique streams in the dataframe
#------------------------------------------------------------------------------
def find_unique_streams(df_all_api):
df_all = df_all_api.copy(deep=True)
results = list(df_all.stream_id.unique()) # numpy array to list
return results
#------------------------------------------------------------------------------
# Finish current call
#------------------------------------------------------------------------------
def finish_call(df_all, row):
df = df_all.copy(deep=True)
my_type = GetInfo(df, row, 'api_type')
my_pred_end = GetInfo(df, row, 'pred_end')
# if it is transfer api
if my_type in ['h2d', 'd2h']:
# update the bytes
df = UpdateCell(df, row, 'bytes_done', GetInfo(df, row, 'size_kb'))
df = UpdateCell(df, row, 'bytes_left', 0)
# use pred_end to update the end time
#df = UpdateCell(df, row, 'end', my_pred_end)
# move current pos to the end time
df = UpdateCell(df, row, 'current_pos', my_pred_end)
df = UpdateCell(df, row, 'status', 'done')
return df
``` |
{
"source": "3usi9/qinmanga-dl",
"score": 3
} |
#### File: 3usi9/qinmanga-dl/main.py
```python
import requests
import urllib
import os
from bs4 import BeautifulSoup
protocol="https://"
base="www.qinmanga.com"
urlp="/comic/"
urlid="25776"
postfix="/"
# def main():
handoutstr = base + urlp + urlid + postfix
handoutstr = protocol + urllib.parse.quote(handoutstr)
print("Handoutstr:",handoutstr);
req = requests.get(handoutstr);
soup = BeautifulSoup(req.text, 'html.parser')
data = soup.find_all('a',attrs={"class":"","target":"_blank"})
print("Title:",soup.title.text)
if not os.path.exists(soup.title.text):
os.mkdir(soup.title.text)
os.chdir(soup.title.text)
for i in data:
if not os.path.exists(i['title']):
hole=os.mkdir(i['title'])
os.chdir(i['title'])
if os.path.exists('_Complete'):
print(i['title'] + " is already downloaded")
os.chdir('..')
continue
tmpreq = requests.get(protocol + base + i['href'])
tmpsoup = BeautifulSoup(tmpreq.text, 'html.parser')
tmpdata = tmpsoup.find_all('amp-img')
cnt = 1
for tmpi in tmpdata:
imgreq = ""
while True:
imgreq = requests.get(tmpi['src'])
if(imgreq.status_code == 200):
break
print("Write:",i['title']," - ",str(cnt)+".jpg")
f=open(str(cnt)+".jpg","wb")
f.write(imgreq.content)
f.close()
cnt = cnt+1
os.system('touch _Complete')
os.chdir('..')
# if(tmpreq.status_code
# if __name__ == '__main__':
# main()
``` |
{
"source": "3v1l91l/genetic_python",
"score": 3
} |
#### File: 3v1l91l/genetic_python/geneticTests.py
```python
import unittest
import genetic
class GeneticTests(unittest.TestCase):
GeneSet = " abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!.,"
def test_get_fitness_empty_guess(self):
expected_fitness = 0
actual_fitness = genetic.get_fitness(guess='', target='Test')
self.assertEqual(actual_fitness, expected_fitness)
def test_get_fitness_empty_target(self):
expected_fitness = 0
actual_fitness = genetic.get_fitness(guess='Test', target='')
self.assertEqual(actual_fitness, expected_fitness)
def test_get_fitness_max_fitness(self):
expected_fitness = len("Test")
actual_fitness = genetic.get_fitness(guess='Test', target='Test')
self.assertEqual(actual_fitness, expected_fitness)
def test_get_fitness_medium_similarity(self):
expected_fitness = 2
actual_fitness = genetic.get_fitness(guess='12st', target='Test')
self.assertEqual(actual_fitness, expected_fitness)
def test_generate_chromosome(self):
target = 'Test'
def fnGetFitness(genes):
return genetic.get_fitness(genes, target)
actual_chromosome = genetic._generate_chromosome(self.GeneSet, len(target), fnGetFitness)
self.assertEqual(len(actual_chromosome.Genes), len(target))
self.assertIsNotNone(actual_chromosome.Fitness)
def test_mutate_keeps_same_length(self):
target = 'Test'
gene_seq = target
def fnGetFitness(genes):
return genetic.get_fitness(genes, target)
actual_chromosome = genetic._mutate(gene_seq, self.GeneSet, fnGetFitness)
self.assertEqual(len(actual_chromosome.Genes), len(target))
def test_get_best_gets_target_gene_chromosome(self):
target = 'Test'
gene_seq = target
def fnGetFitness(genes):
return genetic.get_fitness(genes, target)
actual_chromosome = genetic.get_best(self.GeneSet, len(gene_seq), fnGetFitness, display)
self.assertEqual(actual_chromosome.Genes, target)
def display(chromosome):
return None
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "3v1lW1th1n/caldera",
"score": 2
} |
#### File: app/objects/c_agent.py
```python
from app.objects.base_object import BaseObject
class Agent(BaseObject):
@property
def unique(self):
return self.hash(self.paw)
@property
def display(self):
return dict(paw=self.paw, group=self.group, architecture=self.architecture, platform=self.platform,
server=self.server, location=self.location, pid=self.pid, ppid=self.ppid, trusted=self.trusted,
last_seen=self.last_seen, last_trusted_seen=self.last_trusted_seen, sleep_min=self.sleep_min,
sleep_max=self.sleep_max, executors=self.executors)
def __init__(self, paw, last_seen=None, architecture=None, platform=None, server=None, group=None,
location=None, pid=None, ppid=None, trusted=None, last_trusted_seen=None, sleep_min=None,
sleep_max=None, executors=None):
self.paw = paw
self.group = group
self.architecture = architecture
self.platform = platform
self.server = server
self.location = location
self.pid = pid
self.ppid = ppid
self.trusted = trusted
self.last_seen = last_seen
self.last_trusted_seen = last_trusted_seen
self.sleep_min = sleep_min
self.sleep_max = sleep_max
self.executors = executors
def store(self, ram):
existing = self.retrieve(ram['agents'], self.unique)
if not existing:
ram['agents'].append(self)
return self.retrieve(ram['agents'], self.unique)
else:
existing.update('trusted', self.trusted)
if existing.trusted:
self.update('trusted', self.last_trusted_seen)
existing.update('last_seen', self.last_seen)
existing.update('pid', self.pid)
existing.update('ppid', self.ppid)
existing.update('executors', self.executors)
existing.update('sleep_min', self.sleep_min)
existing.update('sleep_max', self.sleep_max)
existing.update('group', self.group)
return existing
``` |
{
"source": "3v1lW1th1n/locationapi-client-libraries",
"score": 2
} |
#### File: python/test/test_geolocation_api.py
```python
from __future__ import absolute_import
import unittest
import unwired
from unwired.api.geolocation_api import GEOLOCATIONApi # noqa: E501
from unwired.rest import ApiException
class TestGEOLOCATIONApi(unittest.TestCase):
"""GEOLOCATIONApi unit test stubs"""
def setUp(self):
self.api = unwired.api.geolocation_api.GEOLOCATIONApi() # noqa: E501
def tearDown(self):
pass
def test_geolocation(self):
"""Test case for geolocation
Geolocation # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
```
#### File: unwired/models/geolocation_response_schema.py
```python
import pprint
import re # noqa: F401
import six
class GeolocationResponseSchema(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'status': 'str',
'message': 'str',
'balance': 'int',
'balance_slots': 'int',
'lat': 'float',
'lon': 'float',
'accuracy': 'int',
'address': 'str',
'address_details': 'AddressDetailsSchema',
'aged': 'int',
'fallback': 'FallbackSchema'
}
attribute_map = {
'status': 'status',
'message': 'message',
'balance': 'balance',
'balance_slots': 'balance_slots',
'lat': 'lat',
'lon': 'lon',
'accuracy': 'accuracy',
'address': 'address',
'address_details': 'address_details',
'aged': 'aged',
'fallback': 'fallback'
}
def __init__(self, status=None, message=None, balance=None, balance_slots=None, lat=None, lon=None, accuracy=None, address=None, address_details=None, aged=None, fallback=None): # noqa: E501
"""GeolocationResponseSchema - a model defined in OpenAPI""" # noqa: E501
self._status = None
self._message = None
self._balance = None
self._balance_slots = None
self._lat = None
self._lon = None
self._accuracy = None
self._address = None
self._address_details = None
self._aged = None
self._fallback = None
self.discriminator = None
if status is not None:
self.status = status
if message is not None:
self.message = message
if balance is not None:
self.balance = balance
if balance_slots is not None:
self.balance_slots = balance_slots
if lat is not None:
self.lat = lat
if lon is not None:
self.lon = lon
if accuracy is not None:
self.accuracy = accuracy
if address is not None:
self.address = address
if address_details is not None:
self.address_details = address_details
if aged is not None:
self.aged = aged
if fallback is not None:
self.fallback = fallback
@property
def status(self):
"""Gets the status of this GeolocationResponseSchema. # noqa: E501
If the request is successful, ok is returned. Otherwise error is returned # noqa: E501
:return: The status of this GeolocationResponseSchema. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this GeolocationResponseSchema.
If the request is successful, ok is returned. Otherwise error is returned # noqa: E501
:param status: The status of this GeolocationResponseSchema. # noqa: E501
:type: str
"""
self._status = status
@property
def message(self):
"""Gets the message of this GeolocationResponseSchema. # noqa: E501
Any additional information from the server is returned here # noqa: E501
:return: The message of this GeolocationResponseSchema. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this GeolocationResponseSchema.
Any additional information from the server is returned here # noqa: E501
:param message: The message of this GeolocationResponseSchema. # noqa: E501
:type: str
"""
self._message = message
@property
def balance(self):
"""Gets the balance of this GeolocationResponseSchema. # noqa: E501
This represents the remaining balance on the API token. Requests that return error are not charged and do not affect balance # noqa: E501
:return: The balance of this GeolocationResponseSchema. # noqa: E501
:rtype: int
"""
return self._balance
@balance.setter
def balance(self, balance):
"""Sets the balance of this GeolocationResponseSchema.
This represents the remaining balance on the API token. Requests that return error are not charged and do not affect balance # noqa: E501
:param balance: The balance of this GeolocationResponseSchema. # noqa: E501
:type: int
"""
self._balance = balance
@property
def balance_slots(self):
"""Gets the balance_slots of this GeolocationResponseSchema. # noqa: E501
This represents the remaining balance of device slots. Requests that return error are not charged and do not affect balance. If -1 is returned, then observe it as an error while calculating slots balance. This element will only exist if you are on a device plan. # noqa: E501
:return: The balance_slots of this GeolocationResponseSchema. # noqa: E501
:rtype: int
"""
return self._balance_slots
@balance_slots.setter
def balance_slots(self, balance_slots):
"""Sets the balance_slots of this GeolocationResponseSchema.
This represents the remaining balance of device slots. Requests that return error are not charged and do not affect balance. If -1 is returned, then observe it as an error while calculating slots balance. This element will only exist if you are on a device plan. # noqa: E501
:param balance_slots: The balance_slots of this GeolocationResponseSchema. # noqa: E501
:type: int
"""
self._balance_slots = balance_slots
@property
def lat(self):
"""Gets the lat of this GeolocationResponseSchema. # noqa: E501
The latitude representing the location # noqa: E501
:return: The lat of this GeolocationResponseSchema. # noqa: E501
:rtype: float
"""
return self._lat
@lat.setter
def lat(self, lat):
"""Sets the lat of this GeolocationResponseSchema.
The latitude representing the location # noqa: E501
:param lat: The lat of this GeolocationResponseSchema. # noqa: E501
:type: float
"""
self._lat = lat
@property
def lon(self):
"""Gets the lon of this GeolocationResponseSchema. # noqa: E501
The longitude representing the location # noqa: E501
:return: The lon of this GeolocationResponseSchema. # noqa: E501
:rtype: float
"""
return self._lon
@lon.setter
def lon(self, lon):
"""Sets the lon of this GeolocationResponseSchema.
The longitude representing the location # noqa: E501
:param lon: The lon of this GeolocationResponseSchema. # noqa: E501
:type: float
"""
self._lon = lon
@property
def accuracy(self):
"""Gets the accuracy of this GeolocationResponseSchema. # noqa: E501
The accuracy of the position is returned in meters # noqa: E501
:return: The accuracy of this GeolocationResponseSchema. # noqa: E501
:rtype: int
"""
return self._accuracy
@accuracy.setter
def accuracy(self, accuracy):
"""Sets the accuracy of this GeolocationResponseSchema.
The accuracy of the position is returned in meters # noqa: E501
:param accuracy: The accuracy of this GeolocationResponseSchema. # noqa: E501
:type: int
"""
self._accuracy = accuracy
@property
def address(self):
"""Gets the address of this GeolocationResponseSchema. # noqa: E501
The physical address of the location # noqa: E501
:return: The address of this GeolocationResponseSchema. # noqa: E501
:rtype: str
"""
return self._address
@address.setter
def address(self, address):
"""Sets the address of this GeolocationResponseSchema.
The physical address of the location # noqa: E501
:param address: The address of this GeolocationResponseSchema. # noqa: E501
:type: str
"""
self._address = address
@property
def address_details(self):
"""Gets the address_details of this GeolocationResponseSchema. # noqa: E501
:return: The address_details of this GeolocationResponseSchema. # noqa: E501
:rtype: AddressDetailsSchema
"""
return self._address_details
@address_details.setter
def address_details(self, address_details):
"""Sets the address_details of this GeolocationResponseSchema.
:param address_details: The address_details of this GeolocationResponseSchema. # noqa: E501
:type: AddressDetailsSchema
"""
self._address_details = address_details
@property
def aged(self):
"""Gets the aged of this GeolocationResponseSchema. # noqa: E501
Shown when the location is based on a single measurement or those older than 90 days or is an LAC fallback # noqa: E501
:return: The aged of this GeolocationResponseSchema. # noqa: E501
:rtype: int
"""
return self._aged
@aged.setter
def aged(self, aged):
"""Sets the aged of this GeolocationResponseSchema.
Shown when the location is based on a single measurement or those older than 90 days or is an LAC fallback # noqa: E501
:param aged: The aged of this GeolocationResponseSchema. # noqa: E501
:type: int
"""
self._aged = aged
@property
def fallback(self):
"""Gets the fallback of this GeolocationResponseSchema. # noqa: E501
:return: The fallback of this GeolocationResponseSchema. # noqa: E501
:rtype: FallbackSchema
"""
return self._fallback
@fallback.setter
def fallback(self, fallback):
"""Sets the fallback of this GeolocationResponseSchema.
:param fallback: The fallback of this GeolocationResponseSchema. # noqa: E501
:type: FallbackSchema
"""
self._fallback = fallback
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GeolocationResponseSchema):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
``` |
{
"source": "3v1lW1th1n/pywbemtools",
"score": 3
} |
#### File: pywbemtools/pywbemcli/_click_extensions.py
```python
from collections import OrderedDict
import click
class PywbemcliGroup(click.Group):
"""
Extend Click Group class to:
1. Order the display of commands within the help.
The commands are ordered in the order that their definitions
appears in the source code for each command group.
This extension has a general name because it may be used for more than
one extension to the Click.Group class.
"""
# Use ordered dictionary to sort commands by their order defined in the
# _cmd_... source file.
def __init__(self, name=None, commands=None, **attrs):
"""
Use OrderedDict to keep order commands inserted into command dict
"""
if commands is None:
commands = OrderedDict()
elif not isinstance(commands, OrderedDict):
commands = OrderedDict(commands)
click.Group.__init__(self, name=name,
commands=commands,
**attrs)
def list_commands(self, ctx):
"""
Replace list_commands to eliminate the sorting
"""
return self.commands.keys()
class PywbemcliTopGroup(click.Group):
"""
Extensions to be used with the top level help (pywbemcli --help)
Extend Click Group class to:
1. Order the display of the commands and command groups in the top level
help output to sort and then put names defined in the predefined_list
at the end of the list of commands/groups. Since ordering of the top
level cannot be tied to order commands are inserted in list, we elected
to just move the generic ones to the end of the list.
This extension has a general name because it may be used for more than
one extension to the Click.Group class..
"""
def list_commands(self, ctx):
"""
Order commands by sorting and then moving any commands defined in
move_to_end list to the end of the list.
"""
# tuple of commands to move to bottom after sort
move_to_end = ('connection', 'help', 'repl')
cmd_list = sorted(self.commands.keys())
pop_count = 0
# reorder list so the move_to_end list commands are at bottom
for i in range(len(cmd_list)):
if cmd_list[i - pop_count] in move_to_end:
cmd_list.append(cmd_list.pop(i - pop_count))
pop_count += 1
return cmd_list
```
#### File: pywbemtools/pywbemcli/_cmd_server.py
```python
from __future__ import absolute_import, print_function
import click
from pywbem import ValueMapping, Error
from .pywbemcli import cli
from ._common import CMD_OPTS_TXT, format_table, raise_pywbem_error_exception
from ._click_extensions import PywbemcliGroup
# NOTE: A number of the options use double-dash as the short form. In those
# cases, a third definition of the options without the double-dash defines
# the corresponding option name, ex. 'include_qualifiers'. It should be
# defined with underscore and not dash
@cli.group('server', cls=PywbemcliGroup, options_metavar=CMD_OPTS_TXT)
def server_group():
"""
Command group for WBEM servers.
This command group defines commands to inspect and manage core components
of a WBEM server including server attributes, namespaces, the Interop
namespace, management profiles, and access to profile central instances.
In addition to the command-specific options shown in this help text, the
general options (see 'pywbemcli --help') can also be specified before the
'server' keyword.
"""
pass # pylint: disable=unnecessary-pass
@server_group.command('namespaces', options_metavar=CMD_OPTS_TXT)
@click.pass_obj
def server_namespaces(context, **options):
"""
List the namespaces of the server.
"""
# pylint: disable=too-many-function-args
context.execute_cmd(lambda: cmd_server_namespaces(context, options))
@server_group.command('interop', options_metavar=CMD_OPTS_TXT)
@click.pass_obj
def server_interop(context):
"""
Get the Interop namespace of the server.
"""
# pylint: disable=too-many-function-args
context.execute_cmd(lambda: cmd_server_interop(context))
@server_group.command('brand', options_metavar=CMD_OPTS_TXT)
@click.pass_obj
def server_brand(context):
"""
Get the brand of the server.
Brand information is defined by the server implementor and may or may
not be available. Pywbem attempts to collect the brand information from
multiple sources.
"""
# pylint: disable=too-many-function-args
context.execute_cmd(lambda: cmd_server_brand(context))
@server_group.command('info', options_metavar=CMD_OPTS_TXT)
@click.pass_obj
def server_info(context):
"""
Get information about the server.
The information includes CIM namespaces and server brand.
"""
context.execute_cmd(lambda: cmd_server_info(context))
@server_group.command('profiles', options_metavar=CMD_OPTS_TXT)
@click.option('-o', '--organization', type=str, metavar='ORG-NAME',
required=False,
help='Filter by the defined organization. (ex. -o DMTF')
@click.option('-p', '--profile', type=str, metavar='PROFILE-NAME',
required=False,
help='Filter by the profile name. (ex. -p Array')
@click.pass_obj
def server_profiles(context, **options):
"""
List management profiles advertized by the server.
Retrieve the CIM instances representing the WBEM management profiles
advertized by the WBEM server, and display information about each profile.
WBEM management profiles are defined by DMTF and SNIA and define the
management functionality that is available.
The retrieved profiles can be filtered using the --organization and
--profile options.
The output is formatted as a table showing the organization, name, and
version for each profile. The --output-format option is ignored unless it
specifies a table format.
"""
context.execute_cmd(lambda: cmd_server_profiles(context, options))
@server_group.command('centralinsts', options_metavar=CMD_OPTS_TXT)
@click.option('-o', '--organization', type=str, metavar='ORG-NAME',
required=False,
help='Filter by the defined organization. (ex. -o DMTF')
@click.option('-p', '--profile', type=str, metavar='PROFILE-NAME',
required=False,
help='Filter by the profile name. (ex. -p Array')
@click.option('--cc', '--central-class', 'central_class', type=str,
metavar='CLASSNAME', required=False,
help='Optional. Required only if profiles supports only '
'scopig methodology')
@click.option('--sc', '--scoping-class', 'scoping_class', type=str,
metavar='CLASSNAME', required=False,
help='Optional. Required only if profiles supports only '
'scopig methodology')
@click.option('--sp', '--scoping-path', 'scoping_path', type=str,
metavar='CLASSLIST', required=False, multiple=True,
help='Optional. Required only if profiles supports only '
'scopig methodology. Multiples allowed')
@click.option('--rd', '--reference-direction', 'reference_direction',
type=click.Choice(['snia', 'dmtf']),
default='dmtf',
show_default=True,
help='Navigation direction for association.')
@click.pass_obj
def centralinsts(context, **options):
"""
List central instances of mgmt profiles on the server.
Retrieve the CIM instances that are central instances of the specified
WBEM management profiles, and display these instances. By default, all
management profiles advertized on the server are used. The profiles
can be filtered by using the --organization and --profile options.
The central instances are determined using all methodologies defined
in DSP1033 V1.1 in the order of GetCentralInstances, central class,
and scoping class methodology.
Profiles that only use the scoping class methodology require the
specification of the --central-class, --scoping-class, and --scoping-path
options because additional information is needed to perform the scoping
class methodology.
The retrieved central instances are displayed along with the organization,
name, and version of the profile they belong to, formatted as a table.
The --output-format general option is ignored unless it specifies a table
format.
"""
context.execute_cmd(lambda: cmd_server_centralinsts(context, options))
# TODO: reactivate and implement this in version 0.6.0
# @server_group.command('test_pull', options_metavar=CMD_OPTS_TXT)
# @click.pass_obj
# def server_test_pull(context):
# """
# Test existence of pull opeations.
#
# Test whether the pull WBEMConnection methods (ex. OpenEnumerateInstances)
# exist on the WBEM server.
#
# This command tests all of the pull operations and reports any that
# return a NOT_SUPPORTED response.
# """
# context.execute_cmd(lambda: cmd_server_test_pull(context))
###############################################################
# Server cmds
###############################################################
def cmd_server_test_pull(context):
"""
Test the execution of pull operations against the target server.
Executes pull operations and reports whether pull is supported.
"""
raise click.ClickException('test_pull Not implemented')
def cmd_server_namespaces(context, options):
"""
Display namespaces in the current WBEM server
"""
try:
namespaces = context.wbem_server.namespaces
namespaces.sort()
context.spinner_stop()
# create list for each row
rows = [[ns] for ns in namespaces]
click.echo(format_table(rows, ['Namespace Name'],
title='Server Namespaces:',
table_format=context.output_format))
except Error as er:
raise click.ClickException('{}: {}'.format(er.__class__.__name__, er))
def cmd_server_interop(context):
"""
Display interop namespace in the current WBEM server
"""
try:
interop_ns = context.wbem_server.interop_ns
context.spinner_stop()
rows = [[interop_ns]]
click.echo(format_table(rows, ['Namespace Name'],
title='Server Interop Namespace:',
table_format=context.output_format))
except Error as er:
raise_pywbem_error_exception(er)
def cmd_server_brand(context):
"""
Display product and version info of the current WBEM server
"""
try:
brand = context.wbem_server.brand
context.spinner_stop()
rows = [[brand]]
click.echo(format_table(rows, ['WBEM server brand'],
title='Server brand:',
table_format=context.output_format))
except Error as er:
raise_pywbem_error_exception(er)
def cmd_server_info(context):
"""
Display general overview of info from current WBEM server
"""
try:
# execute the namespaces to force contact with server before
# turning off the spinner.
server = context.wbem_server
server.namespaces # pylint: disable=pointless-statement
context.spinner_stop()
server = context.wbem_server
rows = []
headers = ['Brand', 'Version', 'Interop Namespace', 'Namespaces']
if len(server.namespaces) > 3:
namespaces = '\n'.join(server.namespaces)
else:
namespaces = ', '.join(server.namespaces)
rows.append([server.brand, server.version,
server.interop_ns,
namespaces])
click.echo(format_table(rows, headers,
title='Server General Information',
table_format=context.output_format))
except Error as er:
raise_pywbem_error_exception(er)
def get_profile_info(org_vm, inst):
"""
Get the org, name, and version from the profile instance and
return them as a tuple.
"""
org = org_vm.tovalues(inst['RegisteredOrganization'])
name = inst['RegisteredName']
vers = inst['RegisteredVersion']
return org, name, vers
def cmd_server_profiles(context, options):
"""
Display general overview of info from current WBEM server
"""
server = context.wbem_server
try:
found_server_profiles = server.get_selected_profiles(
registered_org=options['organization'],
registered_name=options['profile'])
org_vm = ValueMapping.for_property(server,
server.interop_ns,
'CIM_RegisteredProfile',
'RegisteredOrganization')
rows = []
for inst in found_server_profiles:
row = get_profile_info(org_vm, inst)
rows.append(row)
# sort by org
rows.sort(key=lambda x: (x[0], x[1]))
headers = ['Organization', 'Registered Name', 'Version']
click.echo(format_table(rows, headers,
title='Advertised management profiles:',
table_format=context.output_format))
except Error as er:
raise_pywbem_error_exception(er)
def cmd_server_centralinsts(context, options):
"""
Display general information on the central instances of one or more
profiles.
"""
server = context.wbem_server
try:
found_server_profiles = server.get_selected_profiles(
registered_org=options['organization'],
registered_name=options['profile'])
org_vm = ValueMapping.for_property(server,
server.interop_ns,
'CIM_RegisteredProfile',
'RegisteredOrganization')
rows = []
for inst in found_server_profiles:
pi = get_profile_info(org_vm, inst)
row = [":".join(pi)]
try:
ci = server.get_central_instances(
inst.path,
central_class=options['central_class'],
scoping_class=options['scoping_class'],
scoping_path=options['scoping_path'],
reference_direction=options['reference_direction'])
row.append("\n".join([str(p) for p in ci]))
# mark current inst as failed and continue
except Exception as ex: # pylint: disable=broad-except
click.echo('Exception: {} {}'.format(row, ex))
row.append("Failed")
rows.append(row)
# sort by org
rows.sort(key=lambda x: (x[0]))
headers = ['Profile', 'Central Instance paths']
click.echo(format_table(rows,
headers,
title='Advertised Central Instances:',
table_format=context.output_format))
except Error as er:
raise_pywbem_error_exception(er)
```
#### File: pywbemtools/pywbemcli/_connection_repository.py
```python
from __future__ import absolute_import, print_function
import os
import yaml
import six
from ._pywbem_server import PywbemServer
if six.PY2:
import codecs # pylint: disable=wrong-import-order
DEFAULT_CONNECTIONS_FILE = 'pywbemcli_connection_definitions.yaml'
DEFAULT_CONNECTIONS_PATH = os.path.join(os.getcwd(), DEFAULT_CONNECTIONS_FILE)
class ConnectionRepository(object):
# pylint: disable=useless-object-inheritance
"""
Manage the set of connections defined. The data for the predefined
connection exists on disk between pywbemcli sessions and within an
instance of ConnectionRepository while pywbemcli is running.
"""
# class variables
_pywbemcli_servers = {}
_loaded = False
_connections_file = None
connections_group_name = 'connection_definitions'
default_connection_grp_name = 'default_connection_name'
# default connection name Must be the name of a
# connection in the connections file or None.
default_connection_name = None
# class level variable so
def __init__(self, connections_file=None):
"""
Initialize the object instance if it has not already been initialized
(class level variable is not None)by reading the connection file.
"""
if not ConnectionRepository._loaded:
if connections_file is None:
ConnectionRepository._connections_file = \
DEFAULT_CONNECTIONS_PATH
else:
ConnectionRepository._connections_file = connections_file
self._read_connections_file()
else:
if connections_file is not None and \
connections_file != self._connections_file:
raise ValueError("Cannot change connection file name after"
"initalization original {} new {}".
format(self._connections_file,
connections_file))
@property
def connections_file(self):
"""
Return the current connections file
"""
return self._connections_file
def __repr__(self):
"""
Return a string representation of the
servers dictionary that is suitable for debugging.
The order of items in the result is the preserved order of
adding or deleting items.
The lexical case of the keys in the result is the preserved lexical
case.
"""
# items = [_format("{0!A}: {1!A}", key, value)
# for key, value in self._pywbemcli_servers.iteritems()]
items = []
for key, value in self._pywbemcli_servers.items():
items.append('{}: {}'.format(key, value))
items_str = ', '.join(items)
return "{0.__class__.__name__}({{{1}}}, default_connection {2})]". \
format(self, items_str, self.default_connection_name)
def __contains__(self, key):
return key in self._pywbemcli_servers
def __getitem__(self, key):
return self._pywbemcli_servers[key]
def __delitem__(self, key):
del self._pywbemcli_servers[key]
self._write_file()
def __len__(self):
return len(ConnectionRepository._pywbemcli_servers)
def __iter__(self):
return six.iterkeys(ConnectionRepository._pywbemcli_servers)
def items(self):
"""
Return a list of the items in the server repo
"""
return list(self.__iteritems__())
def keys(self):
"""
Return a copied list of the dictionary keys, in their original case.
"""
return list(self.iterkeys())
def __iteritems__(self): # pylint: disable=no-self-use
return six.iteritems(self._pywbemcli_servers)
def iterkeys(self):
"""
Return an iterator through the dictionary keys in their original
case, preserving the original order of items.
"""
for item in six.iterkeys(self._pywbemcli_servers):
yield item
def iteritems(self):
"""
Return an iterator through the dictionary items, where each item is a
tuple of its original key and its value, preserving the original order
of items.
"""
for item in six.iteritems(self._pywbemcli_servers):
yield item[1]
def _read_connections_file(self):
"""
If there is a file, read it in and install into the dictionary.
"""
if os.path.isfile(self._connections_file):
with self.open_file(self._connections_file, 'r') as _fp:
try:
dict_ = yaml.safe_load(_fp)
# put all the connection definitions into a group
# in the connection file
connections_dict = dict_[
ConnectionRepository.connections_group_name]
ConnectionRepository.default_connection_name = dict_[
ConnectionRepository.default_connection_grp_name]
try:
for name, svr in six.iteritems(connections_dict):
ConnectionRepository._pywbemcli_servers[name] = \
PywbemServer.create(
replace_underscores=True, **svr)
ConnectionRepository._loaded = True
except KeyError as ke:
raise KeyError("Items missing from record %s in "
"connections file %s" %
(ke, self._connections_file))
except ValueError as ve:
raise ValueError("Invalid YAML in connections file %s. "
"Exception %s" %
(self._connections_file, ve))
def add(self, name, svr_definition):
"""
Add a new connection to the connections repository or replace an
existing connection. Users of this method should check before add if
they do not want to replace an existing entry.
"""
assert svr_definition.mock_server is not None # must be empty list
ConnectionRepository._pywbemcli_servers[name] = svr_definition
self._write_file()
def delete(self, name): # pylint: disable=no-self-use
"""Delete a definition from the connections repository"""
del ConnectionRepository._pywbemcli_servers[name]
# remove default_name if it is the one being deleted
if name == self.default_connection_name:
self.default_connection_name = None
self._write_file()
@staticmethod
def open_file(filename, file_mode='w'):
"""
A static convenience function that performs the open of the connection
definitions file correctly for different versions of Python.
This covers the issue where the file should be opened in text mode but
that is done differently in Python 2 and Python 3.
The returned file-like object must be closed by the caller.
Parameters:
filename(:term:`string`):
Name of the file where the recorder output will be written
file_mode(:term:`string`):
Optional file mode. The default is 'w' which overwrites any
existing file. if 'a' is used, the data is appended to any
existing file.
Returns:
File-like object.
"""
if six.PY2:
# Open with codecs to define text mode
return codecs.open(filename, mode=file_mode, encoding='utf-8')
return open(filename, file_mode, encoding='utf8')
def _write_file(self): # pylint: disable=no-self-use
"""
Write the connections file if one has been loaded.
If the dictionary is empty, it attempts to delete the file.
If there is an existing file it is moved to filename.bak and a new
current file written.
"""
conn_dict = {}
if self._pywbemcli_servers:
if ConnectionRepository._pywbemcli_servers:
conn_dict = \
{name: value.to_dict() for name, value in
ConnectionRepository._pywbemcli_servers.items()}
# build dictionary for yaml output
yaml_dict = {ConnectionRepository.connections_group_name: conn_dict,
ConnectionRepository.default_connection_grp_name:
self.default_connection_name}
# Write to tmpfile and if successful create backup file and
# move the tmpfile to be the new connections file contents.
tmpfile = '{}.tmp'.format(self._connections_file)
with self.open_file(tmpfile, 'w') as _fp:
data = yaml.safe_dump(yaml_dict,
encoding=None,
allow_unicode=True,
default_flow_style=False,
indent=4)
data = data.replace('\n\n', '\n') # YAML dump dups newlines
_fp.write(data)
_fp.flush()
# create bak file and then rename tmp file
if os.path.isfile(self._connections_file):
bakfile = '{}.bak'.format(self._connections_file)
if os.path.isfile(bakfile):
os.remove(bakfile)
if os.path.isfile(self._connections_file):
os.rename(self._connections_file, bakfile)
if self._pywbemcli_servers:
os.rename(tmpfile, self._connections_file)
def set_default_connection(self, connection_name):
"""
Set the connection defined by connection_name to be the current
connection in the connections file.
This is accomplished by modifying the "current_connection" entry
and rewriting the file.
"""
if connection_name in self._pywbemcli_servers:
ConnectionRepository.default_connection_name = connection_name
self._write_file()
else:
# TODO should "Default failed be part of this message"?
raise ValueError('Connection name "{}" does not exist in '
'connection repository {}'
.format(connection_name, self.connections_file))
def get_default_connection_name(self):
"""
Returns the name of the current connection in the connections file.
This may be the name of a connection in the connections file or
None if no connection is defined as the current connection.
"""
return self.default_connection_name
```
#### File: pywbemtools/pywbemcli/_pywbemcli_operations.py
```python
from __future__ import absolute_import, print_function
import os
import sys
import traceback
import click
from pywbem import WBEMConnection, MOFParseError
import pywbem_mock
from .config import DEFAULT_MAXPULLCNT
# __all__ = ['PYWBEMCLIConnection', 'PYWBEMCLIFakedConnection']
# pylint: disable=useless-object-inheritance
class PYWBEMCLIConnectionMixin(object):
"""
Mixin class to extend WBEMConnection with a set of methods that use the
iter<...> methods as the basis for getting Instances, etc. but add the
generator processing to retrieve the instances. These can be used within
pywbemcli to allow one method call to ack as either a pull or traditional
operation pushing the differences into this mixin.
These methods do not resolve the core issues between the traditional and
pull operations such as the fact that only the pull operations pass
the FilterQuery parameter.
They are a pywbemcli convience to simplify the individual action processing
methods to a single call.
"""
def PyWbemcliEnumerateInstancePaths(self, ClassName, namespace=None,
FilterQueryLanguage=None,
FilterQuery=None,
OperationTimeout=None,
ContinueOnError=None,
MaxObjectCount=DEFAULT_MAXPULLCNT,
**extra):
# pylint: disable=unused-argument
# pylint: disable=invalid-name
"""
Execute IterEnumerateInstancePaths and retrieve the instances. Returns
the returned instances.
Uses the same parameters as the IterEnumerateInstancePaths method.
All exceptions from the underlying command are passed through this
method.
"""
result = [path for path in self.IterEnumerateInstancePaths(
ClassName,
namespace=namespace,
FilterQueryLanguage=FilterQueryLanguage,
FilterQuery=FilterQuery,
OperationTimeout=OperationTimeout,
ContinueOnError=ContinueOnError,
MaxObjectCount=MaxObjectCount)]
return result
def PyWbemcliEnumerateInstances(self, ClassName, namespace=None,
LocalOnly=None,
DeepInheritance=None,
IncludeQualifiers=None,
IncludeClassOrigin=None, PropertyList=None,
FilterQueryLanguage=None, FilterQuery=None,
OperationTimeout=None, ContinueOnError=None,
MaxObjectCount=DEFAULT_MAXPULLCNT,
**extra):
# pylint: disable=unused-argument
# pylint: disable=invalid-name
"""
Execute IterEnumerateInstances and retrieve the instances. Returns
the returned instances.
Uses the same parameters as the IterEnumerateInstances method.
All exceptions from the underlying method are passed through this
method.
"""
result = [inst for inst in self.IterEnumerateInstances(
ClassName,
namespace=namespace,
LocalOnly=LocalOnly,
DeepInheritance=DeepInheritance,
IncludeQualifiers=IncludeQualifiers,
IncludeClassOrigin=IncludeClassOrigin,
PropertyList=PropertyList,
FilterQueryLanguage=FilterQueryLanguage,
FilterQuery=FilterQuery,
OperationTimeout=OperationTimeout,
ContinueOnError=ContinueOnError,
MaxObjectCount=MaxObjectCount)]
return result
def PyWbemcliReferenceInstancePaths(self, InstanceName, ResultClass=None,
Role=None,
FilterQueryLanguage=None,
FilterQuery=None,
OperationTimeout=None,
ContinueOnError=None,
MaxObjectCount=DEFAULT_MAXPULLCNT,
**extra):
# pylint: disable=unused-argument
# pylint: disable=invalid-name
"""
Execute IterReferemceInstancePaths and retrieve the instances. Returns
the paths that result from iterating the IterReferenceInstancePaths.
Uses the same parameters as the IterReferemceInstancePaths method.
All exceptions from the underlying method are passed through this
method.
"""
result = [path for path in self.IterReferenceInstancePaths(
InstanceName,
ResultClass=ResultClass,
Role=Role,
FilterQueryLanguage=FilterQueryLanguage,
FilterQuery=FilterQuery,
OperationTimeout=OperationTimeout,
ContinueOnError=ContinueOnError,
MaxObjectCount=MaxObjectCount)]
return result
def PyWbemcliReferenceInstances(self, InstanceName, ResultClass=None,
Role=None, IncludeQualifiers=None,
IncludeClassOrigin=None, PropertyList=None,
FilterQueryLanguage=None, FilterQuery=None,
OperationTimeout=None, ContinueOnError=None,
MaxObjectCount=DEFAULT_MAXPULLCNT,
**extra):
# pylint: disable=unused-argument
# pylint: disable=invalid-name
"""
Execute IterReferencesInstances and retrieve the instances. Returns
the returned instances.
Uses the same parameters as the IterReferencesInstances method.
All exceptions from the underlying method are passed through this
method.
"""
result = [inst for inst in self.IterReferenceInstances(
InstanceName,
ResultClass=ResultClass,
Role=Role,
IncludeQualifiers=IncludeQualifiers,
IncludeClassOrigin=IncludeClassOrigin,
PropertyList=PropertyList,
FilterQueryLanguage=FilterQueryLanguage,
FilterQuery=FilterQuery,
OperationTimeout=OperationTimeout,
ContinueOnError=ContinueOnError,
MaxObjectCount=MaxObjectCount)]
return result
def PyWbemcliAssociatorInstancePaths(self, InstanceName, AssocClass=None,
ResultClass=None,
Role=None, ResultRole=None,
FilterQueryLanguage=None,
FilterQuery=None,
OperationTimeout=None,
ContinueOnError=None,
MaxObjectCount=DEFAULT_MAXPULLCNT,
**extra):
# pylint: disable=unused-argument
# pylint: disable=invalid-name
"""
Execute IterAssociatorInstancePaths and retrieve the paths. Returns
the paths that result from iterating the IterAssociatorInstancePaths.
Uses the same parameters as the IterAssociatorInstancePaths method.
All exceptions from the underlying method are passed through this
method.
"""
result = [path for path in self.IterAssociatorInstancePaths(
InstanceName,
AssocClass=AssocClass,
ResultClass=ResultClass,
Role=Role,
ResultRole=ResultRole,
FilterQueryLanguage=FilterQueryLanguage,
FilterQuery=FilterQuery,
OperationTimeout=OperationTimeout,
ContinueOnError=ContinueOnError,
MaxObjectCount=MaxObjectCount)]
return result
def PyWbemcliAssociatorInstances(self, InstanceName, AssocClass=None,
ResultClass=None,
Role=None, ResultRole=None,
IncludeQualifiers=None,
IncludeClassOrigin=None, PropertyList=None,
FilterQueryLanguage=None, FilterQuery=None,
OperationTimeout=None,
ContinueOnError=None,
MaxObjectCount=DEFAULT_MAXPULLCNT,
**extra):
# pylint: disable=unused-argument
# pylint: disable=invalid-name
"""
Execute IterAssociatorInstances and retrieve the instances. Returns
the instances that result from iterating the IterAssociatorInstances.
Uses the same parameters as the IterAssociatorInstances method.
All exceptions from the underlying method are passed through this
method.
"""
result = [inst for inst in self.IterAssociatorInstances(
InstanceName,
AssocClass=AssocClass,
ResultClass=ResultClass,
Role=Role,
ResultRole=ResultRole,
IncludeQualifiers=IncludeQualifiers,
IncludeClassOrigin=IncludeClassOrigin,
PropertyList=PropertyList,
FilterQueryLanguage=FilterQueryLanguage,
FilterQuery=FilterQuery,
OperationTimeout=OperationTimeout,
ContinueOnError=ContinueOnError,
MaxObjectCount=MaxObjectCount)]
return result
def PyWbemcliQueryInstances(self, FilterQueryLanguage, FilterQuery,
namespace=None, ReturnQueryResultClass=None,
OperationTimeout=None, ContinueOnError=None,
MaxObjectCount=DEFAULT_MAXPULLCNT,
**extra):
# pylint: disable=invalid-name
"""
Execute IterQueryInstances and retrieve the instances. Returns
the instances that result from iterating the IterQueryInstances.
Uses the same parameters as the IterQueryInstances method.
All exceptions from the underlying method are passed through this
method.
"""
result = [inst for inst in self.IterQueryInstances(
FilterQueryLanguage,
FilterQuery,
namespace=namespace,
ReturnQueryResultClass=ReturnQueryResultClass,
OperationTimeout=OperationTimeout,
ContinueOnError=ContinueOnError,
MaxObjectCount=MaxObjectCount)]
return result
class BuildRepositoryMixin(object):
# pylint: disable=too-few-public-methods
"""
Builds the mock repository from the definitions in self._mock_server.
Each item in the iterable in self._mock_server must be a file path
identifying a file to be used to prepare for the mock test.
Each file path may be:
a python file if the suffix is 'mof'. A mof file is compiled into the
repository with the method
Returns a variety of errors for file not found, MOF syntax errors, and
python syntax errors.
"""
def build_repository(self, conn, server, file_path_list, verbose):
"""
Build the repository from the file_path list
"""
for file_path in file_path_list:
if not os.path.exists(file_path):
raise IOError('No such file: {}'.format(file_path))
ext = os.path.splitext(file_path)[1]
if ext == '.mof':
try:
# Displays any MOFParseError already
conn.compile_mof_file(file_path)
except MOFParseError:
# Abort the entire pywbemcli command because the
# MOF compilation might have caused inconsistencies in the
# mock repository.
click.echo(
"Mock MOF file '{}' failed compiling (see above)".
format(file_path),
err=True)
raise click.Abort()
else:
assert ext == '.py' # already checked
with open(file_path) as fp:
# May raise IOError
file_source = fp.read()
# the exec includes CONN and VERBOSE
globalparams = {'CONN': conn,
'SERVER': server,
'VERBOSE': verbose}
try:
# Using compile+exec instead of just exec allows
# specifying the file name, causing it to appear in
# any tracebacks.
file_code = compile(file_source, file_path, 'exec')
# pylint: disable=exec-used
exec(file_code, globalparams, None)
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
tb = traceback.format_exception(exc_type, exc_value,
exc_traceback)
# Abort the entire pywbemcli command because the
# script might have caused inconsistencies in the
# Python namespace and in the mock repository.
click.echo(
"Mock Python script '{}' failed:\n{}".
format(file_path, "\n".join(tb)),
err=True)
raise click.Abort()
class PYWBEMCLIConnection(WBEMConnection, PYWBEMCLIConnectionMixin):
"""
PyWBEMCLIConnection subclass adds the methods added by
PYWBEMCLIConnectionMixin
"""
def __init__(self, *args, **kwargs):
"""
ctor passes all input parameters to superclass
"""
super(PYWBEMCLIConnection, self).__init__(*args, **kwargs)
class PYWBEMCLIFakedConnection(pywbem_mock.FakedWBEMConnection,
PYWBEMCLIConnectionMixin,
BuildRepositoryMixin):
"""
PyWBEMCLIFakedConnection subclass adds the methods added by
PYWBEMCLIConnectionMixin
"""
def __init__(self, *args, **kwargs):
"""
ctor passes all input parameters to superclass
"""
super(PYWBEMCLIFakedConnection, self).__init__(*args, **kwargs)
```
#### File: tests/manual/test_pegasus.py
```python
from __future__ import print_function, absolute_import
import unittest
import re
from subprocess import Popen, PIPE
import six
class ClientTest(unittest.TestCase):
"""Top level container. Performs any setup and teardown"""
def setUp(self):
"""Setup the test
"""
self.host = 'http://localhost'
self.verbose = False
class TestsContainer(ClientTest):
"""Container class for all tests"""
def execute_cmd(self, cmd_str): # pylint: disable=no-self-use
"""Execute the command defined by cmd_str and return results."""
if self.verbose:
print('cmd {}'.format(cmd_str))
# Disable python warnings for pywbemcli call.See issue #42
command = 'export PYTHONWARNINGS="" && {}'.format(cmd_str)
proc = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)
std_out, std_err = proc.communicate()
exitcode = proc.returncode
if six.PY3:
std_out = std_out.decode()
std_err = std_err.decode()
if self.verbose:
print('rtn {}\n{}\n{}'.format(std_out, std_err, exitcode))
# return tuple of exitcode, stdout, stderr
return exitcode, std_out, std_err
def assert_not_found(self, regex, test_str):
""" Test of find regex on multiline string.
If regex is a list each entry is tested.
"""
if isinstance(regex, list):
for i in regex:
self.assert_not_found(i, test_str)
else:
match = re.search(regex, test_str)
if match:
self.fail('Found in error search regex {}, str {}'
.format(regex, test_str))
def assert_found(self, regex, test_str):
""" Test of find regex on multiline string.
If regex is a list each entry is tested.
"""
if isinstance(regex, list):
for i in regex:
self.assert_found(i, test_str)
else:
match = re.search(regex, test_str)
if match is None:
self.fail('Failed search regex {}, str {}'
.format(regex, test_str))
def assertRegexp(self, regex, test_str):
"""
This function eliminates the issue between the unittest assertRegex
and assertRegexpMatches functions between unittiest in python 2 and 3
"""
if six.PY3:
# pylint: disable=no-member
return self.assertRegex(test_str, regex)
# pylint: disable=no-member, deprecated-method
return self.assertRegexpMatches(test_str,
regex) # pylint: disable=no-member
class ClassTests(TestsContainer):
"""Test operations in the class group"""
def class_cmd(self, params):
"""Adds the cmd name prefix and executes"""
cmd = 'pywbemcli -s {} class {}'.format(self.host, params)
exitcode, std_out_str, std_err_str = self.execute_cmd(cmd)
return exitcode, std_out_str, std_err_str
def test_get_simple(self):
"""Test a get of CIM_ManagedElement"""
exitcode, out, err = self.class_cmd('get CIM_ManagedElement')
self.assertEqual(exitcode, 0)
self.assertEqual(err, '', 'Expect no std_err. Found {}'.format(err))
self.assert_found('CIM_ManagedElement ', out)
def test__get_localonly(self):
"""Test class get --local-only"""
exitcode, out, err = self.class_cmd('get CIM_ManagedElement -l')
self.assertEqual(exitcode, 0)
self.assertEqual(err, "")
self.assert_found('CIM_ManagedElement', out)
exitcode, out, err = self.class_cmd(
'get CIM_ManagedElement --local-only')
self.assertEqual(exitcode, 0)
self.assert_found('CIM_ManagedElement', out)
def test_get_no_includequalifiers(self):
""" """
exitcode, out, err = self.class_cmd(
'get CIM_ManagedElement --no-qualifiers')
self.assertEqual(exitcode, 0)
self.assert_found('CIM_ManagedElement', out)
def test_propertylist(self):
"""Test property list on the get"""
exitcode, out, err = self.class_cmd(
'get CIM_ManagedElement -p InstanceID')
self.assertEqual(exitcode, 0)
self.assert_found(['class CIM_ManagedElement', 'InstanceID'], out)
exitcode, out, err = self.class_cmd(
'get CIM_ManagedElement -p InstanceID -p Caption')
self.assertEqual(exitcode, 0)
self.assert_found('class CIM_ManagedElement', out)
self.assert_found('InstanceID', out)
self.assert_found('Caption', out)
exitcode, out, err = self.class_cmd(
'get CIM_ManagedElement -p ""')
self.assertEqual(exitcode, 0)
self.assert_found('class CIM_ManagedElement', out)
self.assert_not_found(['InstanceID', 'Caption'], out)
def test_simple_invoke(self):
"""Execute simple invoke method defined in pegasus"""
exitcode, out, err = self.class_cmd(
'invokemethod Test_IndicationProviderClass '
'SendTestIndicationsCount -p indicationSendCount=0 '
' -n test/TestProvider')
self.assertEqual(exitcode, 0)
# TODO finish this based on the test_ops in the tools directory
# cmd "class get CIM_ManagedElement -c"
# cmd "class get CIM_ManagedElement --include-classorigin"
# cmd "class get CIM_ManagedElement --namespace root/PG_Interop"
# cmd "class get CIM_ManagedElement - root/PG_Interop"
# TODO create tests for qualifier, server
class InstanceTests(TestsContainer):
"""Test operations in the instance group"""
def instance_cmd(self, params):
"""Adds the instance cmd name prefix and executes"""
cmd = 'pywbemcli -s {} instance {}'.format(self.host, params)
exitcode, std_out_str, std_err_str = self.execute_cmd(cmd)
return exitcode, std_out_str, std_err_str
def test_enumerate_simple(self):
""" """
exitcode, out, err = self.instance_cmd('enumerate PyWBEM_Person')
self.assertEqual(exitcode, 0)
self.assert_found('instance of PyWBEM_Person', out)
def test_enumerate_proplist(self):
""" """
exitcode, out, err = self.instance_cmd('enumerate PyWBEM_Person '
'-p Name')
self.assertEqual(exitcode, 0)
self.assert_found(['instance of PyWBEM_Person', 'Name'], out)
self.assert_not_found('CreationClassName', out)
def test_get_simple(self):
"""Execute simple get of known instance """
exitcode, out, err = self.instance_cmd(
'get PyWBEM_Person.CreationClassname=PyWBEM_Person,Name=Bob')
self.assertEqual(exitcode, 0)
self.assert_found('PyWBEM_Person', out)
def test_create_simple(self):
"""
Test create a simple instance. To be complete this must both
create and delete the instance since tests are not ordered and each
test should leave the repository in the same state in which it
was before the test.
"""
exitcode, out, err = self.instance_cmd(
'create PyWBEM_Person --property name=Fred '
'--property CreationClassname=PyWBEM_Person')
self.assertEqual(exitcode, 0)
exitcode, out, err = self.instance_cmd(
'delete PyWBEM_Person.Name=Fred,CreationClassName=PyWBEM_Person')
self.assertEqual(exitcode, 0)
self.assert_found(['Deleted', 'Fred'], out)
def test_create_array_prop(self):
"""Create an instance of an array property"""
exitcode, out, err = self.instance_cmd(
'create pywbem_alltypes --property InstanceId=ArrayBool '
'--property arrayBool=True,False')
self.assertEqual(exitcode, 0, "Failed create test")
exitcode, out, err = self.instance_cmd(
'get pywbem_alltypes.InstanceId=ArrayBool')
self.assert_found(["instance of PyWBEM_AllTypes", 'ArrayBool',
"{True, False}"], out)
exitcode, out, err = self.instance_cmd(
'delete PyWBEM_AllTypes.InstanceId=ArrayBool')
self.assertEqual(exitcode, 0)
self.assert_found(['Deleted', 'ArrayBool'], out)
def test_create_alltypes(self):
"""
Create an instance of a class with all types
"""
exitcode, out, err = self.instance_cmd(
'create PyWBEM_AllTypes --property InstanceId=ScalarTest1 '
'--property scalBool=True '
'--property scalUint8=8 '
'--property scalSint8=-8 '
'--property scalUint32=9999 '
'--property scalSint32=-9999 '
'--property scalUint64=12345678 '
'--property scalSint64=-12345678 '
'--property scalReal32=5678.32 '
'--property scalReal64=345876.3 '
'--property scalDateTime="19991224120000.000000+360" '
'--property scalString="A string value" ')
self.assertEqual(exitcode, 0, 'Expected good response. Rcvd '
' exitcode {}, err {}'.format(exitcode, err))
self.assertEqual(exitcode, 0, 'Create instance of Pywbem_AllTypes '
'failed. exitcode {}, err {}'.format(exitcode, err))
exitcode, out, err = self.instance_cmd(
'delete PyWBEM_AllTypes.InstanceId=ScalarTest1')
self.assertEqual(exitcode, 0)
self.assert_found(['Deleted', 'ScalarTest1'], out)
def test_property_notexist(self):
"""
Validate the error when property does not exist in class
"""
exitcode, out, err = self.instance_cmd(
'create pywbem_alltypes --property InstanceId=ArrayBool '
'--property BlahBool=True,False')
print('err {}'.format(err))
self.assertEqual(exitcode, 1)
def test_references(self):
exitcode, out, err = self.instance_cmd(
'references PyWBEM_Person.CreationClassname=PyWBEM_Person,'
'Name=Bob')
self.assertEqual(exitcode, 0)
self.assert_found('instance of PyWBEM_MemberOfPersonCollection', out)
def test_reference_paths(self):
exitcode, out, err = self.instance_cmd(
'references PyWBEM_Person.CreationClassname=PyWBEM_Person,'
'Name=Bob -o')
self.assertEqual(exitcode, 0)
self.assert_found(':PyWBEM_MemberOfPersonCollection.Member', out)
def test_associators(self):
exitcode, out, err = self.instance_cmd(
'associators PyWBEM_Person.CreationClassname=PyWBEM_Person,'
'Name=Bob')
self.assertEqual(exitcode, 0)
self.assert_found('instance of PyWBEM_PersonCollection', out)
def test_associator_paths(self):
exitcode, out, err = self.instance_cmd(
'associators PyWBEM_Person.CreationClassname=PyWBEM_Person,'
'Name=Bob -o')
self.assertEqual(exitcode, 0)
self.assert_found(':PyWBEM_PersonCollection.InstanceID', out)
if __name__ == '__main__':
unittest.main()
```
#### File: tests/unit/conftest.py
```python
from __future__ import absolute_import, print_function
import os
import pytest
from pywbemtools.pywbemcli._connection_repository \
import DEFAULT_CONNECTIONS_FILE
SCRIPT_DIR = os.path.dirname(__file__)
TEST_DIR = os.getcwd()
REPO_FILE_PATH = os.path.join(TEST_DIR, DEFAULT_CONNECTIONS_FILE)
# if there is a config file, save to this name during tests
SAVE_FILE = DEFAULT_CONNECTIONS_FILE + '.testsave'
SAVE_FILE_PATH = os.path.join(SCRIPT_DIR, SAVE_FILE)
@pytest.fixture
def repo_file_path():
"""
Fixture to return the file path to the repository file
"""
return REPO_FILE_PATH
@pytest.fixture(scope='session', autouse=True)
def set_connections_file(request):
"""
Fixture to hide any existing connection repository at the beginning of a
session and restore it at the end of the session. This assumes that the
connection repository is in the root directory of pywbemcli which is
logical since that file is defined by the call to pywbemcli in tests.
"""
if os.path.isfile(REPO_FILE_PATH):
os.rename(REPO_FILE_PATH, SAVE_FILE_PATH)
def teardown():
"""
Remove any created repository file and restore saved file. This
should occur as session end.
"""
if os.path.isfile(REPO_FILE_PATH):
os.remove(REPO_FILE_PATH)
if os.path.isfile(SAVE_FILE_PATH):
os.rename(SAVE_FILE_PATH, REPO_FILE_PATH)
request.addfinalizer(teardown)
```
#### File: tests/unit/simple_python_mock_script.py
```python
from pywbem import CIMQualifier, CIMClass, CIMProperty, CIMMethod
# test that GLOBALS exist
assert "CONN" in globals()
assert 'SERVER' in globals()
assert 'VERBOSE'in globals()
def build_classes():
"""
Function that builds and returns a single class: CIM_Foo that will to be
used as a test class for the mock class tests.
"""
# build the key properties
qkey = {'Key': CIMQualifier('Key', True)}
dkey = {'Description': CIMQualifier('Description', 'blah blah')}
# build the CIMClass with properties and methods.
c = CIMClass(
'CIM_FooDirLoad', qualifiers=dkey,
properties={'InstanceID':
CIMProperty('InstanceID', None, qualifiers=qkey,
type='string', class_origin='CIM_Foo',
propagated=False)},
methods={'Delete': CIMMethod('Delete', 'uint32', qualifiers=dkey,
class_origin='CIM_Foo',
propagated=False),
'Fuzzy': CIMMethod('Fuzzy', 'string', qualifiers=dkey,
class_origin='CIM_Foo',
propagated=False)})
# add the objects to the mock repository
CONN.add_cimobjects(c) # noqa: F821
build_classes()
assert(CONN.GetClass('CIM_FooDirLoad')) # noqa: F821
```
#### File: tests/unit/test_log_option.py
```python
from __future__ import absolute_import, print_function
import os
import pytest
from .cli_test_extensions import CLITestsBase
TEST_DIR = os.path.dirname(__file__)
SIMPLE_MOCK_FILE = 'simple_mock_model.mof'
OK = True
RUN = True
FAIL = False
TEST_CASES = [
# desc - Description of test
# inputs - String, or list of args or dict of 'env', 'args', 'general',
# and 'stdin'. See See CLITestsBase.subcmd_test() for
# detailed documentation
# exp_response - Dictionary of expected responses (stdout, stderr, rc) and
# test definition (test: <testname>).
# See CLITestsBase.subcmd_test() for detailed documentation.
# mock - None or name of files (mof or .py),
# condition - If True, the test is executed, Otherwise it is skipped.
['Verify log of class get blah. class get that fails',
{'general': ['-l', 'all=stderr'],
'cmdgrp': 'class',
'args': ['get', 'blah']},
{'stderr': ['-pywbem.api',
'FakedWBEMConnection',
r'GetClass\(ClassName=', r'blah',
'-Exception:'],
'test': 'regex',
'rc': 1},
SIMPLE_MOCK_FILE, OK],
['Verify log of class get command get local-only. Test stdoit',
{'general': ['-l', 'all=stderr:summary'],
'cmdgrp': 'class',
'args': ['get', 'CIM_Foo_sub2', '--local-only']},
{'stdout': ['class CIM_Foo_sub2 : CIM_Foo {',
'',
' string cimfoo_sub2;',
'',
'};', '', ],
'stderr': ["blahblah"],
'test': 'patterns'},
SIMPLE_MOCK_FILE, OK],
['Verify Log of class get command local-only. test stderr'
'Cannot test stderr and stdout in same test.',
{'general': ['-l', 'all=stderr:summary'],
'cmdgrp': 'class',
'args': ['get', 'CIM_Foo_sub2', '--local-only']},
{'stderr': [r'-pywbem.api.',
r'FakedWBEMConnection',
r'-Request:',
r'-Return:',
r'GetClass\(CIMClass ', r'CIM_Foo_sub2'],
'test': 'regex'},
SIMPLE_MOCK_FILE, OK],
['Verify log of class get command local-only. Test stderr'
'Cannot test stderr and stdout in same test.',
{'general': ['-l', 'api=stderr:summary'],
'cmdgrp': 'class',
'args': ['get', 'CIM_Foo_sub2', '--local-only']},
{'stderr': [r'-pywbem.api.',
r'FakedWBEMConnection',
r'-Request:',
r'-Return:',
r'GetClass\(CIMClass ', r'CIM_Foo_sub2'],
'test': 'regex'},
SIMPLE_MOCK_FILE, OK],
['Verify log http class get command local-only. Should be no log '
'because mock does not use http'
'Cannot test stderr and stdout in same test.',
{'general': ['-l', 'http=stderr:summary'],
'cmdgrp': 'class',
'args': ['get', 'CIM_Foo_sub2', '--local-only']},
{'stderr': [],
'test': 'in'},
SIMPLE_MOCK_FILE, OK],
['Verify log with error in definition. Cannot test stderr and stdout in '
'same test.',
{'general': ['-l', 'httpx=stderr'],
'cmdgrp': 'class',
'args': ['get', 'CIM_Foo_sub2', '--local-only']},
{'stderr': ["Error: Logger configuration error. input: "],
'rc': 1,
'test': 'regex'},
SIMPLE_MOCK_FILE, OK],
['Verify log with error in definition. invalid type',
{'general': ['-l', 'allx=stderr'],
'cmdgrp': 'class',
'args': ['get', 'CIM_Foo_sub2', '--local-only']},
{'stderr': ["Error: Logger configuration error. input: allx=stderr. "
"Exception: Invalid simple logger name:"],
'rc': 1,
'test': 'regex'},
SIMPLE_MOCK_FILE, OK],
['Verify invalid log parameter fails (no value) same test.',
{'general': ['-l'],
'cmdgrp': 'class',
'args': ['get', 'CIM_Foo_sub2', '--local-only']},
{'stderr': ["Usage: pywbemcli [GENERAL-OPTIONS] COMMAND [ARGS]"],
'rc': 2,
'test': 'in'},
SIMPLE_MOCK_FILE, OK],
]
class TestLogOption(CLITestsBase):
"""
Test use of the general log option. This was be tested in the
test_general_opts.py file because it requires execution of a command
to actually use the log and create logs.
"""
@pytest.mark.parametrize(
"desc, inputs, exp_response, mock, condition", TEST_CASES)
def test_log_option(self, desc, inputs, exp_response, mock, condition):
"""
Common test method for those subcommands and options in the
class subcmd that can be tested. This includes:
* Subcommands like help that do not require access to a server
* Subcommands that can be tested with a single execution of a
pywbemcli command.
"""
cmd_grp = inputs['cmdgrp'] if inputs['cmdgrp'] else ''
self.command_test(desc, cmd_grp, inputs, exp_response,
mock, condition)
```
#### File: tests/unit/test_qualdecl_cmds.py
```python
from __future__ import absolute_import, print_function
import os
import pytest
from .cli_test_extensions import CLITestsBase
from .common_options_help_lines import CMD_OPTION_HELP_HELP_LINE, \
CMD_OPTION_NAMESPACE_HELP_LINE, CMD_OPTION_SUMMARY_HELP_LINE
TEST_DIR = os.path.dirname(__file__)
# A mof file that defines basic qualifier decls, classes, and instances
# but not tied to the DMTF classes.
SIMPLE_MOCK_FILE = 'simple_mock_model.mof'
QD_HELP_LINES = [
'Usage: pywbemcli qualifier [COMMAND-OPTIONS] COMMAND [ARGS]...'
'Command group for CIM qualifier declarations.',
CMD_OPTION_HELP_HELP_LINE,
'enumerate List the qualifier declarations in a namespace.',
'get Get a qualifier declaration.',
]
QD_ENUMERATE_HELP_LINES = [
'Usage: pywbemcli qualifier enumerate [COMMAND-OPTIONS]',
'List the qualifier declarations in a namespace.',
CMD_OPTION_NAMESPACE_HELP_LINE,
CMD_OPTION_SUMMARY_HELP_LINE,
CMD_OPTION_HELP_HELP_LINE,
]
QD_GET_HELP_LINES = [
'Usage: pywbemcli qualifier get [COMMAND-OPTIONS] QUALIFIERNAME',
"Get a qualifier declaration.",
CMD_OPTION_NAMESPACE_HELP_LINE,
CMD_OPTION_HELP_HELP_LINE,
]
QD_ENUM_MOCK = """Qualifier Abstract : boolean = false,
Scope(class, association, indication),
Flavor(EnableOverride, Restricted);
Qualifier Aggregate : boolean = false,
Scope(reference),
Flavor(DisableOverride, ToSubclass);
Qualifier Association : boolean = false,
Scope(association),
Flavor(DisableOverride, ToSubclass);
Qualifier Description : string,
Scope(any),
Flavor(EnableOverride, ToSubclass, Translatable);
Qualifier In : boolean = true,
Scope(parameter),
Flavor(DisableOverride, ToSubclass);
Qualifier Indication : boolean = false,
Scope(class, indication),
Flavor(DisableOverride, ToSubclass);
Qualifier Key : boolean = false,
Scope(property, reference),
Flavor(DisableOverride, ToSubclass);
Qualifier Out : boolean = false,
Scope(parameter),
Flavor(DisableOverride, ToSubclass);
Qualifier Override : string,
Scope(property, reference, method),
Flavor(EnableOverride, Restricted);
"""
QD_GET_MOCK = """Qualifier Description : string,
Scope(any),
Flavor(EnableOverride, ToSubclass, Translatable);
"""
# pylint: disable=line-too-long
QD_GET_MOCK_XML = """<QUALIFIER.DECLARATION ISARRAY="false" NAME="Description" OVERRIDABLE="true" TOSUBCLASS="true" TRANSLATABLE="true" TYPE="string">
<SCOPE ASSOCIATION="true" CLASS="true" INDICATION="true" METHOD="true" PARAMETER="true" PROPERTY="true" REFERENCE="true"/>
</QUALIFIER.DECLARATION>
""" # noqa: E501
QD_TBL_OUT = """Qualifier Declarations
+-------------+---------+---------+---------+-------------+-----------------+
| Name | Type | Value | Array | Scopes | Flavors |
+=============+=========+=========+=========+=============+=================+
| Abstract | boolean | False | False | CLASS | EnableOverride |
| | | | | ASSOCIATION | Restricted |
| | | | | INDICATION | |
+-------------+---------+---------+---------+-------------+-----------------+
| Aggregate | boolean | False | False | REFERENCE | DisableOverride |
| | | | | | ToSubclass |
+-------------+---------+---------+---------+-------------+-----------------+
| Association | boolean | False | False | ASSOCIATION | DisableOverride |
| | | | | | ToSubclass |
+-------------+---------+---------+---------+-------------+-----------------+
| Description | string | | False | ANY | EnableOverride |
| | | | | | ToSubclass |
| | | | | | Translatable |
+-------------+---------+---------+---------+-------------+-----------------+
| In | boolean | True | False | PARAMETER | DisableOverride |
| | | | | | ToSubclass |
+-------------+---------+---------+---------+-------------+-----------------+
| Indication | boolean | False | False | CLASS | DisableOverride |
| | | | | INDICATION | ToSubclass |
+-------------+---------+---------+---------+-------------+-----------------+
| Key | boolean | False | False | PROPERTY | DisableOverride |
| | | | | REFERENCE | ToSubclass |
+-------------+---------+---------+---------+-------------+-----------------+
| Out | boolean | False | False | PARAMETER | DisableOverride |
| | | | | | ToSubclass |
+-------------+---------+---------+---------+-------------+-----------------+
| Override | string | | False | PROPERTY | EnableOverride |
| | | | | REFERENCE | Restricted |
| | | | | METHOD | |
+-------------+---------+---------+---------+-------------+-----------------+
"""
QD_TBL_GET_OUT = """Qualifier Declarations
+----------+---------+---------+---------+-------------+----------------+
| Name | Type | Value | Array | Scopes | Flavors |
+==========+=========+=========+=========+=============+================+
| Abstract | boolean | False | False | CLASS | EnableOverride |
| | | | | ASSOCIATION | Restricted |
| | | | | INDICATION | |
+----------+---------+---------+---------+-------------+----------------+
"""
# TODO: Add tests for xml, repr, txt formats.
# The following variables are used to control tests executed during
# development of tests
OK = True # set to OK for tests passed. Set OK = False to execute one test
RUN = True # set RUN condition in test being run
FAIL = False # flag any tests that fail
TEST_CASES = [
# desc - Description of test
# inputs - String, or list of args or dict of 'env', 'args', 'general',
# and 'stdin'. See See CLITestsBase.command_test() for
# detailed documentation
# exp_response - Dictionary of expected responses (stdout, stderr, rc) and
# test definition (test: <testname>).
# See CLITestsBase.command_test() for detailed documentation.
# mock - None or name of files (mof or .py),
# condition - If True, the test is executed, Otherwise it is skipped.
['Verify qualifier command --help response',
'--help',
{'stdout': QD_HELP_LINES,
'test': 'innows'},
None, OK],
['Verify qualifier command -h response',
'-h',
{'stdout': QD_HELP_LINES,
'test': 'innows'},
None, OK],
['Verify qualifier command enumerate --help response',
['enumerate', '--help'],
{'stdout': QD_ENUMERATE_HELP_LINES,
'test': 'innows'},
None, OK],
['Verify qualifier command enumerate -h response.',
['enumerate', '-h'],
{'stdout': QD_ENUMERATE_HELP_LINES,
'test': 'innows'},
None, OK],
['Verify qualifier command get --help response.',
['get', '--help'],
{'stdout': QD_GET_HELP_LINES,
'test': 'innows'},
None, OK],
['Verify qualifier command get -h response.',
['get', '-h'],
{'stdout': QD_GET_HELP_LINES,
'test': 'innows'},
None, OK],
['Verify qualifier command enumerate returns qual decls.',
['enumerate'],
{'stdout': QD_ENUM_MOCK,
'test': 'lines'},
SIMPLE_MOCK_FILE, OK],
['Verify qualifier command enumerate with namespace returns qual decls.',
['enumerate', '--namespace', 'root/cimv2'],
{'stdout': QD_ENUM_MOCK,
'test': 'lines'},
SIMPLE_MOCK_FILE, OK],
['Verify qualifier command enumerate summary returns qual decls.',
['enumerate', '--summary'],
{'stdout': ['9', 'CIMQualifierDeclaration'],
'test': 'in'},
SIMPLE_MOCK_FILE, OK],
['Verify qualifier command enumerate summary returns qual decls table',
{'args': ['enumerate', '--summary'],
'general': ['--output-format', 'table']},
{'stdout': ["""Summary of CIMQualifierDeclaration returned
+---------+-------------------------+
| Count | CIM Type |
|---------+-------------------------|
| 9 | CIMQualifierDeclaration |
+---------+-------------------------+
"""],
'test': 'linesnows'},
SIMPLE_MOCK_FILE, OK],
['Verify qualifier command get Description',
['get', 'Description'],
{'stdout': QD_GET_MOCK,
'test': 'lines'},
SIMPLE_MOCK_FILE, OK],
['Verify qualifier command get invalid qual decl name .',
['get', 'NoSuchQualDecl'],
{'stderr': ["Error: CIMError: 6"],
'rc': 1,
'test': 'in'},
SIMPLE_MOCK_FILE, OK],
['Verify qualifier command get Description outputformat xml',
{'args': ['get', 'Description'],
'general': ['--output-format', 'xml']},
{'stdout': QD_GET_MOCK_XML,
'test': 'lines'},
SIMPLE_MOCK_FILE, OK],
['Verify qualifier command -o grid enumerate produces table out',
{'args': ['enumerate'],
'general': ['-o', 'grid']},
{'stdout': QD_TBL_OUT,
'rc': 0,
'test': 'lines'},
SIMPLE_MOCK_FILE, OK],
['Verify qualifier command -o grid get Abstract table out',
{'args': ['get', 'abstract'],
'general': ['-o', 'grid']},
{'stdout': QD_TBL_GET_OUT,
'rc': 0,
'test': 'lines'},
SIMPLE_MOCK_FILE, OK],
['Verify qualifier command enumerate invalid namespace Fails',
['enumerate', '--namespace', 'root/blah'],
{'stderr': ["Error: CIMError: 3", "CIM_ERR_INVALID_NAMESPACE"],
'rc': 1,
'test': 'in'},
SIMPLE_MOCK_FILE, OK],
['Verify qualifier command --timestats gets stats output. Cannot test'
'with lines because execution time is variable.',
{'args': ['get', 'IN'],
'general': ['--timestats']},
{'stdout': ['Qualifier In : boolean = true,',
'Scope(parameter),',
'Count Exc Time ReqLen ReplyLen Operation',
' 1 0',
'0 0 GetQualifier'],
'rc': 0,
'test': 'in'},
SIMPLE_MOCK_FILE, OK],
['Verify qualifier command -o repr get Description produces repr out',
{'args': ['get', 'Description'],
'general': ['-o', 'repr']},
{'stdout': "CIMQualifierDeclaration(name='Description', value=None, "
"type='string', is_array=False, array_size=None, "
"scopes=NocaseDict({'CLASS': False, 'ASSOCIATION': False, "
"'INDICATION': False, 'PROPERTY': False, 'REFERENCE': False, "
"'METHOD': False, 'PARAMETER': False, 'ANY': True}), "
"tosubclass=True, overridable=True, translatable=True, "
"toinstance=None)",
'rc': 0,
'test': 'lines'},
SIMPLE_MOCK_FILE, OK],
]
class TestcmdQualifiers(CLITestsBase):
"""
Test all of the qualifiers command variations.
"""
command_group = 'qualifier'
@pytest.mark.parametrize(
"desc, inputs, exp_response, mock, condition", TEST_CASES)
def test_qualdecl(self, desc, inputs, exp_response, mock, condition):
"""
Common test method for those commands and options in the
qualifier command group that can be tested. This includes:
* Subcommands like help that do not require access to a server
* Subcommands that can be tested with a single execution of a
pywbemcli command.
"""
self.command_test(desc, self.command_group, inputs, exp_response,
mock, condition)
``` |
{
"source": "3v1lW1th1n/stix-shifter",
"score": 2
} |
#### File: modules/aws_security_hub/aws_security_hub_translator.py
```python
from ..base.base_translator import BaseTranslator
from ...json_to_stix.json_to_stix import JSONToStix
from . import query_constructor
from os import path
class Translator(BaseTranslator):
def transform_query(self, data, antlr_parsing_object, data_model_mapper, options, mapping=None):
query_string = query_constructor.translate_pattern(
antlr_parsing_object, data_model_mapper)
return query_string
def __init__(self):
basepath = path.dirname(__file__)
filepath = path.abspath(
path.join(basepath, "json", "to_stix_map.json"))
self.mapping_filepath = filepath
self.result_translator = JSONToStix(filepath)
self.query_translator = self
```
#### File: modules/base/base_data_mapper.py
```python
from abc import ABCMeta, abstractmethod
from os import path
import json
from stix_shifter.stix_translation.src.utils.exceptions import DataMappingException
class BaseDataMapper(object, metaclass=ABCMeta):
def fetch_mapping(self, basepath):
"""
Fetches STIX-to-datasource mapping JSON from the module's from_stix_map.json file
:param basepath: path of data source translation module
:type basepath: str
"""
try:
filepath = path.abspath(
path.join(basepath, "json", "from_stix_map.json"))
map_file = open(filepath).read()
map_data = json.loads(map_file)
return map_data
except Exception as ex:
print('exception in main():', ex)
return {}
def map_field(self, stix_object_name, stix_property_name):
"""
Maps the STIX object:property pair to any matching data source fields.
Mapping is based on a JSON object defined in the data source DataMapper class
:param stix_object_name: STIX object (ie. url)
:type stix_object_name: str
:param stix_property_name: STIX property associated to the object (ie. value)
:type stix_property_name: str
:return: A list of 0 or more data source fields that map to a combination of stix_object_name and stix_property_name
:rtype: list
"""
if stix_object_name in self.map_data and stix_property_name in self.map_data[stix_object_name]["fields"]:
return self.map_data[stix_object_name]["fields"][stix_property_name]
else:
return []
```
#### File: modules/cloudsql/cloudsql_connector.py
```python
from ..base.base_connector import BaseConnector
from .cloudsql_ping import CloudSQLPing
from .cloudsql_query_connector import CloudSQLQueryConnector
from .cloudsql_status_connector import CloudSQLStatusConnector
from .cloudsql_results_connector import CloudSQLResultsConnector
from .cloudsql_delete_connector import CloudSQLDeleteConnector
from ibmcloudsql import SQLQuery
import json
class Connector(BaseConnector):
# TODO: config params passed into constructor instance
def __init__(self, connection, configuration):
auth = configuration.get('auth')
client_info = configuration.get('client_info')
instance_crn = connection.get('instance_crn')
target_cos = connection.get('target_cos')
self.api_client = SQLQuery(auth["bxapikey"], instance_crn, target_cos,
client_info=client_info)
self.api_client.logon()
self.results_connector = CloudSQLResultsConnector(self.api_client)
self.status_connector = CloudSQLStatusConnector(self.api_client)
self.query_connector = CloudSQLQueryConnector(self.api_client)
self.ping_connector = CloudSQLPing(self.api_client)
self.delete_connector = CloudSQLDeleteConnector(self.api_client)
self.is_async = True
```
#### File: stix_shifter/utils/proxy_host.py
```python
from stix_shifter.stix_translation import stix_translation
from stix_shifter.stix_transmission import stix_transmission
from flask import request
import json
class ProxyHost():
def __init__(self):
self.request_args = request.get_json(force=True)
self.connection = self.request_args.get("connection")
self.configuration = self.request_args.get("configuration")
if self.connection:
self.options = self.connection.get("options", {})
else:
self.connection = self.request_args.get("options", {})
def transform_query(self):
query = self.request_args["query"]
translation_module = self.connection['type'].lower()
translation = stix_translation.StixTranslation()
dsl = translation.translate(translation_module, 'query', '{}', query, self.connection)
return json.dumps(dsl['queries'])
def translate_results(self, data_source_identity_object):
data_source_results = self.request_args["results"]
translation_module = self.connection['type'].lower()
translation = stix_translation.StixTranslation()
dsl = translation.translate(translation_module, 'results', data_source_identity_object, data_source_results, self.connection)
return json.dumps(dsl)
def create_query_connection(self):
query = self.request_args["query"]
transmission_module = self.connection['type'].lower()
transmission = stix_transmission.StixTransmission(transmission_module, self.connection, self.configuration)
return json.dumps(transmission.query(query))
def create_status_connection(self):
search_id = self.request_args["search_id"]
transmission_module = self.connection['type'].lower()
transmission = stix_transmission.StixTransmission(transmission_module, self.connection, self.configuration)
return json.dumps(transmission.status(search_id))
def create_results_connection(self):
search_id = self.request_args["search_id"]
offset = self.request_args["offset"]
length = self.request_args["length"]
transmission_module = self.connection['type'].lower()
transmission = stix_transmission.StixTransmission(transmission_module, self.connection, self.configuration)
return json.dumps(transmission.results(search_id, offset, length))
def delete_query_connection(self):
search_id = self.request_args["search_id"]
transmission_module = self.connection['type'].lower()
transmission = stix_transmission.StixTransmission(transmission_module, self.connection, self.configuration)
return json.dumps(transmission.delete(search_id))
def ping(self):
transmission_module = self.connection['type'].lower()
transmission = stix_transmission.StixTransmission(transmission_module, self.connection, self.configuration)
return json.dumps(transmission.ping())
def is_async(self):
transmission_module = self.connection['type'].lower()
transmission = stix_transmission.StixTransmission(transmission_module, self.connection, self.configuration)
return "{}".format(transmission.is_async())
```
#### File: stix_translation/patterns/test_web_api.py
```python
import unittest
from multiprocessing import Process
import os
from web_api import *
from .helpers.input_file_helpers import *
DEFAULT_LIMIT = 10000
DEFAULT_TIMERANGE = 5
default_timerange_spl = '-' + str(DEFAULT_TIMERANGE) + 'minutes'
class TestRunFlask(unittest.TestCase):
""" Test the Flask server for Analytic Translator
expects input files in test/input_files/ """
@classmethod
def setUpClass(cls):
cls.server = Process(target=run_server)
cls.server.start()
app.testing = True
return "Starting Flask server..."
@classmethod
def tearDownClass(cls):
cls.server.terminate()
cls.server.join()
return "Flask Server shutting down..."
@staticmethod
def success_test_generator(pattern, platform, expected_result):
""" Generates a successful test """
def test(self):
with app.test_client() as client:
resp = client.post(platform, data=pattern, content_type='text/plain')
data = resp.data
print("\n DATA: ", data.decode("utf-8")) # TEST-PRINT
self.assertEqual(TestRunFlask.normalize_spacing(data.decode("utf-8")),
TestRunFlask.normalize_spacing(expected_result))
return test
@staticmethod
def failure_test_generator(pattern, platform):
""" Generates a test for an error """
def test(self):
with self.assertRaises(Exception):
with app.test_client() as client:
resp = client.post(platform, data=pattern, content_type='text/plain')
data = resp.data
print("\n DATA: ", data.decode("utf-8")) # TEST-PRINT
return test
def normalize_spacing(pattern):
""" Normalizes spacing across expected results and actual results,
so you don't need to put newlines/etc to match weird spacing exactly"""
return re.sub(r"\s+", ' ', pattern)
@staticmethod
def input_files():
""" Collects the test files
Each input file is a JSON object which contains
a pattern and the expected results for each platform+language """
input_patterns = {} # The input values and expected results
absolute_path_prefix = os.path.dirname(os.path.realpath(__file__))
# do traversal of input_files
for filename in listdir(path.join(absolute_path_prefix, "input_files")):
name, ext = path.splitext(filename) # reveal pattern name
with open(path.join(absolute_path_prefix, "input_files", filename), "r") as json_data:
# add each file's pattern-dict to collected-dict
input_patterns[name] = json.load(json_data)
return input_patterns
@staticmethod
def generate_tests():
""" Generate the tests """
# For now, supported platforms are combined with data models
# TODO: This will be split when the refactor is merged
platform_map = {
'car-elastic': (DataModels.CAR, SearchPlatforms.ELASTIC),
'car-splunk': (DataModels.CAR, SearchPlatforms.SPLUNK)
}
# for each item in the collected-dict,
# the key is the test file's name,
# the value is inner dict
for k, v in TestRunFlask.input_files().items():
# Get the input test file
test_pattern = v.pop("stix-input")
# Generate a test for each remaining key in the dictionary
for platform, expected_result in v.items():
# each test is named in format: test_stg_md5_hash_car-splunk
# test_name = "test_[GENERATOR]_{}_{}".format(k, platform)
if platform in platform_map: # Some platforms not yet supported
if expected_result is not None:
test_name = "test_stg_{}_{}".format(k, platform)
new_test = TestRunFlask.success_test_generator(
test_pattern, platform, expected_result)
setattr(TestRunFlask, test_name, new_test)
else:
test_name = "test_ftg_{}_{}".format(k, platform)
new_test = TestRunFlask.failure_test_generator(
test_pattern, platform)
setattr(TestRunFlask, test_name, new_test)
TestRunFlask.generate_tests()
``` |
{
"source": "3verlyn/DL-abstract-argumentation",
"score": 2
} |
#### File: data/classes/ArgumentationFramework.py
```python
import copy
import pickle
from pathlib import Path
from typing import Set
from networkx.classes.digraph import DiGraph
from networkx.classes.function import (
set_edge_attributes,
set_node_attributes,
non_edges,
)
from src.data.scripts.utils import apx2nxgraph, nxgraph2apx
from src.data.solvers.AcceptanceSolver import AcceptanceSolver
class ArgumentationFramework:
"""Argumentation Framework class to compute extensions, determine argument acceptance
and get graph representations"""
graph: DiGraph
@classmethod
def from_pkl(cls, pkl_path: Path):
state_dict = pickle.load(open(pkl_path, "rb"))
# legacy _id
if "_id" in state_dict:
state_dict["id"] = state_dict.pop("_id")
return cls(**state_dict)
@classmethod
def from_apx(cls, apx: str, id=None):
"""
Initalize AF object from apx file
"""
graph = apx2nxgraph(apx)
return cls(id, graph)
def __init__(self, id, graph, extensions=None, **kwargs):
self.extensions = extensions if extensions is not None else {}
self.graph = graph
self.representations = {}
self.id = id
def to_apx(self):
return nxgraph2apx(self.graph)
def edge_hamming_distance(self, AF: "ArgumentationFramework"):
edges1 = set(self.graph.edges)
edges2 = set(AF.graph.edges)
return len(edges1.symmetric_difference(edges2))
def get_extensions_containing_s(self, semantic, S: set) -> Set[frozenset]:
extensions = set(
[
extension
for extension in self.extensions[semantic]
if S.issubset(extension)
]
)
return extensions
def get_cred_accepted_args(self, semantic, S: frozenset = None) -> frozenset:
credulous = frozenset()
extensions = (
self.extensions[semantic]
if S is None
else self.get_extensions_containing_s(semantic, S)
)
if len(extensions) > 0:
credulous = frozenset.union(*extensions)
return credulous
def get_scept_accepted_args(self, semantic, S: frozenset = None) -> frozenset:
sceptical = frozenset()
extensions = (
self.extensions[semantic]
if S is None
else self.get_extensions_containing_s(semantic, S)
)
if len(extensions) > 0:
sceptical = frozenset.intersection(*extensions)
return sceptical
@property
def state_dict(self) -> dict:
return self.__dict__.copy()
@property
def num_arguments(self) -> int:
return len(self.graph.nodes)
@property
def num_attacks(self) -> int:
return len(self.graph.edges)
@property
def arguments(self) -> set:
return set(n for n in range(self.num_arguments))
def get_representation(self, type) -> DiGraph:
assert type in ["base", "AGNN", "enforcement", "FM2", "GCN"]
if type not in self.representations:
self.representations[type] = getattr(self, f"get_{type}_representation")()
return self.representations[type]
def get_base_representation(self) -> DiGraph:
graph = copy.deepcopy(self.graph)
set_node_attributes(graph, 0, "node_input")
return graph
def get_AGNN_representation(self) -> DiGraph:
graph = self.get_base_representation()
set_node_attributes(graph, 0, "node_y")
return graph
def get_GCN_representation(self) -> DiGraph:
graph = self.get_AGNN_representation()
set_node_attributes(graph, float(1), "node_x")
return graph
def get_FM2_representation(self) -> DiGraph:
graph = self.get_AGNN_representation()
for node in graph.nodes:
graph.nodes[node]["node_x_in"] = float(graph.in_degree(node))
graph.nodes[node]["node_x_out"] = float(graph.out_degree(node))
return graph
def get_enforcement_representation(self) -> DiGraph:
graph = self.get_base_representation()
set_edge_attributes(graph, 1, "edge_input")
for u, v in non_edges(graph):
graph.add_edge(u, v, edge_input=0)
# self attacks
for n in graph.nodes:
if graph.has_edge(n, n):
graph.edges[n, n]["edge_input"] = 3
else:
graph.add_edge(n, n, edge_input=2)
set_edge_attributes(graph, 0, "edge_y")
return graph
def verify(self, S: frozenset, semantics, solver=None):
if semantics == "ST":
return self.verify_stable(S)
elif semantics == "CO":
return self.verify_complete(S)
elif semantics in ["GR", "PR"]:
return self.verify_solver(S, semantics, solver)
else:
raise Exception("Semantics not known")
def verify_stable(self, S: frozenset):
# "the set of arguments which are not attacked by S and then testing if this set is equal to S"
not_attacked_by_S = self.arguments - self.attacked_by(S)
return S == frozenset(not_attacked_by_S)
def verify_complete(self, S: frozenset):
# "Compute the set of arguments defended by S, the set of arguments not attacked by S and then to test if their intersection is equal to S."
attacked_by_S = self.attacked_by(S)
defended_by_S = set()
for arg in self.arguments:
attackers = set(self.graph.predecessors(arg))
if attackers.issubset(attacked_by_S):
defended_by_S.add(arg)
not_attacked_by_S = self.arguments - attacked_by_S
intersection = defended_by_S.intersection(not_attacked_by_S)
return S == frozenset(intersection)
def verify_solver(self, S: frozenset, semantics, solver: AcceptanceSolver):
return S in solver.solve(self, semantics)
def attacked_by(self, S: frozenset):
attacked_args = set()
for arg in S:
for attacked_arg in self.graph.successors(arg):
attacked_args.add(attacked_arg)
return attacked_args
```
#### File: data/generators/GraphGenerator.py
```python
import random
import subprocess
from pathlib import Path
class GraphGenerator:
def __init__(self, dir, min_nodes, max_nodes):
"""
Generator since it does not accept variables in imap
"""
self.min_nodes = min_nodes
self.max_nodes = max_nodes
self.dir = dir
self.path = Path(__file__).parent.parent
self.jAFBench = self.path / 'generators/AFBenchGen2/target/jAFBenchGen-2.jar'
self.AFGen_cp = f'{self.path}/generators/AFGenBenchmarkGenerator/target:'
self.probo_cp = f'{self.path}/generators/probo/target/:' \
f'{self.path}/generators/probo/lib/*'
def generate(self, id) -> Path:
timeout = 30 * 60
name, generator = self.random_graph_generator_cmd(id)
cmd = ['timeout', str(timeout)] + generator
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.path)
output, stderr = p.communicate()
if p.returncode != 0:
print(' '.join(cmd))
print(stderr)
return None
file_path = self.dir / '{}.apx'.format(id)
# save output to file when using jAFBench
if name in ['BarabasiAlbert', 'WattsStrogatz', 'ErdosRenyi']:
with open(file_path, 'wb') as f:
f.write(output)
assert file_path.exists()
return file_path
def random_graph_generator_cmd(self, id):
num_nodes = random.randint(self.min_nodes, self.max_nodes)
generators = {
'AFGen': [
'-cp',
self.AFGen_cp,
'AFGen.AFGen',
str(self.dir / str(id)),
str(num_nodes),
str(random.randrange(1, 20) / 100),
str(random.randrange(1, 40) / 100)
],
'BarabasiAlbert': [
'-jar',
str(self.jAFBench),
'-numargs', str(num_nodes - 1),
'-type', 'BarabasiAlbert',
'-BA_WS_probCycles', str(random.randrange(1, 25) / 100)
],
'WattsStrogatz': [
'-jar',
str(self.jAFBench),
'-numargs', str(num_nodes),
'-type', 'WattsStrogatz',
'-BA_WS_probCycles', str(random.randrange(1, 25) / 100),
'-WS_baseDegree', str(random.randrange(2, num_nodes - 1, 2)),
'-WS_beta', str(random.randrange(1, 25) / 100)
],
'ErdosRenyi': [
'-jar',
str(self.jAFBench),
'-numargs', str(num_nodes - 1),
'-type', 'ErdosRenyi',
'-ER_probAttacks', str(random.randrange(25, 50) / 100),
],
'Grounded': [
'-cp',
f'{self.probo_cp}',
'net.sf.probo.generators.GroundedGenerator',
str(self.dir / str(id)),
str(self.max_nodes),
],
'Scc': [
'-cp',
f'{self.probo_cp}',
'net.sf.probo.generators.SccGenerator',
str(self.dir / str(id)),
str(self.max_nodes),
],
'Stable': [
'-cp',
f'{self.probo_cp}',
'net.sf.probo.generators.StableGenerator',
str(self.dir / str(id)),
str(self.max_nodes),
]
}
name, generator = random.choice(list(generators.items()))
return name, ['java'] + generator
```
#### File: src/models/GNN.py
```python
from collections import OrderedDict
import torch
import torch.nn as nn
from torch_geometric.data.batch import Batch
class GNN(nn.Module):
def __init__(self, mp_steps, **config):
super().__init__()
self.mp_steps = mp_steps
self.update_fns = self.assign_update_fns()
self.readout_fns = self.assign_readout_fns()
def assign_update_fns(self) -> OrderedDict:
raise NotImplementedError
def assign_readout_fns(self) -> dict:
raise NotImplementedError
def forward(self, batch: Batch, output_all_steps=True):
edge_index = batch.edge_index
sections = (
torch.bincount(batch.batch).tolist() if hasattr(batch, "batch") else None
)
hiddens = self.initialize(batch)
del batch
# update attributes with update and aggregation step
outputs = {element: [] for element in self.readout_fns.keys()}
for step in range(self.mp_steps):
hiddens = self.step(edge_index=edge_index, sections=sections, **hiddens)
if not output_all_steps and (step + 1) != self.mp_steps:
continue
for element, readout_fn in self.readout_fns.items():
outputs[element].append(readout_fn(**hiddens))
return outputs
def initialize(self, batch):
hiddens = {}
# initialize attributes trough embeddings and intialize lstm states to None
for element in self.embeddings.keys():
embedding = self.embeddings[element](batch[f"{element}_input"])
hiddens.update(
{
f"{element}_input": embedding,
f"{element}_embedding": embedding.clone(),
f"{element}_lstm": None,
}
)
return hiddens
def step(self, edge_index, sections, **hiddens):
"""
Perform a message passing step by propagating information and updating each element
"""
for element, update_fn in self.update_fns.items():
hiddens[f"{element}_embedding"], hiddens[f"{element}_lstm"] = update_fn(
edge_index=edge_index, sections=sections, element=element, **hiddens
)
return hiddens
```
#### File: src/sl/dataset.py
```python
import argparse
import pickle
import random
from pathlib import Path
from typing import Tuple
import pandas as pd
import torch
from torch.utils.data import Dataset
from torch_geometric.data.data import Data
from torch_geometric.data.in_memory_dataset import InMemoryDataset
from torch_geometric.utils import from_networkx
from src import config
from src.data.classes.ArgumentationFramework import ArgumentationFramework
class ArgumentationFrameworkDS(Dataset):
"""
A dataset of Argumentation Frameworks from a dataset directory
"""
def __init__(self, name, representation="base"):
self.dir = config.dataset_dir / name / "AFs"
self.AFs = list((self.dir).glob("*.pkl"))
self.representation = representation
def __len__(self) -> int:
return len(self.AFs)
def __getitem__(self, idx) -> Tuple[ArgumentationFramework, Data]:
AF = ArgumentationFramework.from_pkl(self.AFs[idx])
graph = AF.get_representation(self.representation)
data = from_networkx(graph)
data["idx"] = idx
return AF, data
class AcceptanceProblemDS(Dataset):
"""
Dataset of acceptance problems on Argumentation Frameworks from a dataset directory
"""
def __init__(self, name, task, semantics, representation):
self.AF_ds = ArgumentationFrameworkDS(name, representation)
self.name = name
self.representation = representation
self.task = task
self.semantics = semantics
def __len__(self) -> int:
return len(self.AF_ds)
def __getitem__(self, idx: int) -> Data:
AF, data = self.AF_ds[idx]
S = set()
if self.task == "enum":
S = self.sample_set_of_arguments(AF)
Y = AF.get_cred_accepted_args(self.semantics, S=S)
elif self.task == "scept":
Y = AF.get_scept_accepted_args(self.semantics)
else:
Y = AF.get_cred_accepted_args(self.semantics)
data["node_input"][list(S)] = 1
data["node_y"][list(Y)] = 1
data["node_y"] = data["node_y"].float()
return data
def sample_set_of_arguments(self, AF: ArgumentationFramework) -> frozenset:
"""
Generate a datapoint for the enumeration tree search with either a
empty S, legal S or illegal S
"""
S = frozenset()
if len(AF.extensions[self.semantics]) == 0:
return S
random_extension = random.choice(list(AF.extensions[self.semantics]))
if len(random_extension) == 0:
return S
# subset of the randomly chosen extension
S = frozenset(
random.sample(random_extension, random.randrange(0, len(random_extension)))
)
type = random.choice(["legal", "illegal"])
if type == "legal":
return S
# an illegal set has none of the extensions as its superset
if type == "illegal":
possible_illegal_additions = AF.arguments - (
S | AF.get_cred_accepted_args(self.semantics, S=S)
)
if len(possible_illegal_additions) != 0:
S = S | frozenset(random.sample(possible_illegal_additions, 1))
return S
class MemoryDS(InMemoryDataset):
"""Dataset base class for creating graph datasets which fit completely
into memory. Saves processed files
See `here <https://pytorch-geometric.readthedocs.io/en/latest/notes/
create_dataset.html#creating-in-memory-datasets>`__ for the accompanying
tutorial."""
def __init__(self, ds, semantics, task, representation):
self._processed_file_names = [f"{semantics}_{task}_{representation}_data.pt"]
self.ds = ds
root = config.dataset_dir / ds.name
super().__init__(root=root, transform=None, pre_transform=None)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_file_names(self):
return ["AFs.pkl"]
@property
def processed_file_names(self):
return self._processed_file_names
def download(self):
pass
def process(self):
"""Processes the dataset to the :obj:`self.processed_dir` folder."""
data, slices = self.collate([d for d in self.ds])
torch.save((data, slices), self.processed_paths[0])
``` |
{
"source": "3vivekb/hail",
"score": 3
} |
#### File: benchmark_hail/create_resources/create_resources.py
```python
import argparse
from ..run.utils import download_data
from .. import init_logging
def main(args_):
parser = argparse.ArgumentParser()
parser.add_argument("--data-dir", "-d",
type=str,
required=True,
help="Data directory.")
parser.add_argument("--group",
type=str,
required=False,
help="Resource group to download.")
args = parser.parse_args(args_)
init_logging()
download_data(args.data_dir, args.group)
```
#### File: ci/ci/utils.py
```python
import string
import secrets
def generate_token(size=12):
assert size > 0
alpha = string.ascii_lowercase
alnum = string.ascii_lowercase + string.digits
return secrets.choice(alpha) + ''.join([secrets.choice(alnum) for _ in range(size - 1)])
def flatten(xxs):
return [x for xs in xxs for x in xs]
```
#### File: ci/test/test_ci.py
```python
import os
import sys
import pytest
from gidgethub import aiohttp as gh_aiohttp
import aiohttp
import subprocess as sp
import asyncio
from hailtop.config import get_deploy_config
pytestmark = pytest.mark.asyncio
deploy_config = get_deploy_config()
org = os.environ['ORGANIZATION']
repo = os.environ['REPO_NAME']
namespace = os.environ['NAMESPACE']
with open('/secret/ci-secrets/user1', 'r') as f:
user1_token = f.read()
with open('/secret/ci-secrets/user2', 'r') as f:
user2_token = f.read()
def wait_for_hello():
wait_cmd = f'python3 wait-for.py 900 {namespace} Service --location gce hello'
result = sp.run(wait_cmd, stdout=sp.PIPE, stderr=sp.PIPE, shell=True)
if result.returncode != 0:
raise Exception(f'hello service was not deployed: {result!r}')
url = deploy_config.url('hello', f'/sha')
sha = sp.check_output(f"curl {url}", shell=True)
return sha
async def wait_for_redeployment(old_sha):
wait_interval = 10
elapsed_time = 0
while elapsed_time < 300:
try:
new_sha = wait_for_hello()
if new_sha != old_sha:
print('hello was redeployed', file=sys.stderr)
return
elapsed_time += wait_interval
except Exception:
pass
await asyncio.sleep(wait_interval)
raise Exception(f'hello service was not redeployed in 300 seconds')
@pytest.fixture
async def gh_client1():
session = aiohttp.ClientSession(
raise_for_status=True,
timeout=aiohttp.ClientTimeout(total=60))
gh_client = gh_aiohttp.GitHubAPI(session, 'test-ci-1', oauth_token=user1_token)
yield gh_client
await session.close()
@pytest.fixture
async def gh_client2():
session = aiohttp.ClientSession(
raise_for_status=True,
timeout=aiohttp.ClientTimeout(total=60))
gh_client = gh_aiohttp.GitHubAPI(session, 'test-ci-2', oauth_token=user2_token)
yield gh_client
await session.close()
async def test_deploy():
wait_for_hello()
# FIXME: This test requires either putting user1 as an authorized user
# or having a fake developer who can authorize the sha
# async def test_pr_merge(gh_client1, gh_client2):
# sha = wait_for_hello()
#
# script = f'''
# git clone https://{user1_token}@github.com/{org}/{repo}.git
# cd {repo}
# git config --global user.email <EMAIL>
# git config --global user.name ci
# git checkout -b benign-changes
# echo "hello" > hello.txt
# git add hello.txt && git commit -m "add hello.txt"
# git push --set-upstream origin benign-changes
# '''
# sp.run(script, stdout=sp.PIPE, stderr=sp.PIPE, shell=True)
#
# data = {
# 'title': 'Benign changes',
# 'head': 'benign-changes',
# 'base': 'master'
# }
#
# result = await gh_client1.post(f'/repos/{org}/{repo}/pulls',
# data=data)
# pull_number = result['number']
#
# await gh_client2.post(f'/repos/{org}/{repo}/pulls/{pull_number}/reviews',
# data={'event': 'APPROVE'})
#
# await wait_for_redeployment(sha)
```
#### File: hail/expr/blockmatrix_type.py
```python
from hail.typecheck import *
from hail.utils.java import jiterable_to_list
from hail.expr.types import dtype, hail_type
class tblockmatrix(object):
@staticmethod
def _from_java(jtbm):
return tblockmatrix(
dtype(jtbm.elementType().toString()),
jiterable_to_list(jtbm.shape()),
jtbm.isRowVector(),
jtbm.blockSize())
@staticmethod
def _from_json(json):
return tblockmatrix(dtype(json['element_type']),
json['shape'],
json['is_row_vector'],
json['block_size'])
@typecheck_method(element_type=hail_type, shape=sequenceof(int), is_row_vector=bool, block_size=int)
def __init__(self, element_type, shape, is_row_vector, block_size):
self.element_type = element_type
self.shape = shape
self.is_row_vector = is_row_vector
self.block_size = block_size
def __eq__(self, other):
return isinstance(other, tblockmatrix) and \
self.element_type == other.element_type and \
self.shape == other.shape and \
self.is_row_vector == other.is_row_vector and \
self.block_size == other.block_size
def __hash__(self):
return 43 + hash(str(self))
def __repr__(self):
return f'tblockmatrix(element_type={self.element_type!r}, shape={self.shape!r}, ' \
f'is_row_vector={self.is_row_vector!r}, block_size={self.block_size!r})'
def __str__(self):
return f'blockmatrix {{element_type: {self.element_type}, shape: {self.shape}, ' \
f'is_row_vector: {self.is_row_vector}, block_size: {self.block_size})'
def pretty(self, indent=0, increment=4):
l = []
l.append(' ' * indent)
l.append('blockmatrix {\n')
indent += increment
l.append(' ' * indent)
l.append('element_type: ')
self.element_type._pretty(l, indent, increment)
l.append(',\n')
l.append(' ' * indent)
l.append(f'shape: [{self.shape}],\n')
l.append(' ' * indent)
l.append('is_row_vector: ')
self.is_row_vector._pretty(l, indent, increment)
l.append(',\n')
l.append(' ' * indent)
l.append('block_size: ')
self.block_size._pretty(l, indent, increment)
l.append(',\n')
indent -= increment
l.append(' ' * indent)
l.append('}')
return ''.join(l)
import pprint
_old_printer = pprint.PrettyPrinter
class PrettyPrinter(pprint.PrettyPrinter):
def _format(self, object, stream, indent, allowance, context, level):
if isinstance(object, tblockmatrix):
stream.write(object.pretty(self._indent_per_level))
else:
return _old_printer._format(self, object, stream, indent, allowance, context, level)
pprint.PrettyPrinter = PrettyPrinter # monkey-patch pprint
```
#### File: expr/expressions/indices.py
```python
from hail.typecheck import *
import hail as hl
from typing import List
class Indices(object):
@typecheck_method(source=anytype, axes=setof(str))
def __init__(self, source=None, axes=set()):
self.source = source
self.axes = axes
self._cached_key = None
def __hash__(self):
return 37 + hash((self.source, *self.axes))
def __eq__(self, other):
return isinstance(other, Indices) and self.source is other.source and self.axes == other.axes
def __ne__(self, other):
return not self.__eq__(other)
@staticmethod
def unify(*indices):
axes = set()
src = None
for ind in indices:
if src is None:
src = ind.source
else:
if ind.source is not None and ind.source is not src:
from . import ExpressionException
raise ExpressionException()
axes = axes.union(ind.axes)
return Indices(src, axes)
@property
def protected_key(self) -> List[str]:
if self._cached_key is None:
self._cached_key = self._get_key()
return self._cached_key
else:
return self._cached_key
def _get_key(self):
if self.source is None:
return []
elif isinstance(self.source, hl.Table):
if self == self.source._row_indices:
return list(self.source.key)
else:
return []
else:
assert isinstance(self.source, hl.MatrixTable)
if self == self.source._row_indices:
return list(self.source.row_key)
elif self == self.source._col_indices:
return list(self.source.col_key)
else:
return []
def __str__(self):
return 'Indices(axes={}, source={})'.format(self.axes, self.source)
def __repr__(self):
return 'Indices(axes={}, source={})'.format(repr(self.axes), repr(self.source))
class Aggregation(object):
def __init__(self, *exprs):
self.exprs = exprs
from ..expressions import unify_all
indices, agg = unify_all(*exprs)
self.indices = indices
```
#### File: hail/genetics/locus.py
```python
from hail.genetics.reference_genome import ReferenceGenome, reference_genome_type
from hail.typecheck import *
from hail.utils.java import scala_object, Env
import hail as hl
class Locus(object):
"""An object that represents a location in the genome.
Parameters
----------
contig : :obj:`str`
Chromosome identifier.
position : :obj:`int`
Chromosomal position (1-indexed).
reference_genome : :obj:`str` or :class:`.ReferenceGenome`
Reference genome to use.
Note
----
This object refers to the Python value returned by taking or collecting
Hail expressions, e.g. ``mt.locus.take(5)``. This is rare; it is much
more common to manipulate the :class:`.LocusExpression` object, which is
constructed using the following functions:
- :func:`.locus`
- :func:`.parse_locus`
- :func:`.locus_from_global_position`
"""
@typecheck_method(contig=oneof(str, int),
position=int,
reference_genome=reference_genome_type)
def __init__(self, contig, position, reference_genome='default'):
if isinstance(contig, int):
contig = str(contig)
self._contig = contig
self._position = position
self._rg = reference_genome
def __str__(self):
return f'{self._contig}:{self._position}'
def __repr__(self):
return 'Locus(contig=%s, position=%s, reference_genome=%s)' % (self.contig, self.position, self._rg)
def __eq__(self, other):
return (isinstance(other, Locus)
and self._contig == other._contig
and self._position == other._position
and self._rg == other._rg)
def __hash__(self):
return hash(self._contig) ^ hash(self._position) ^ hash(self._rg)
@classmethod
@typecheck_method(string=str,
reference_genome=reference_genome_type)
def parse(cls, string, reference_genome='default'):
"""Parses a locus object from a CHR:POS string.
**Examples**
>>> l1 = hl.Locus.parse('1:101230')
>>> l2 = hl.Locus.parse('X:4201230')
:param str string: String to parse.
:param reference_genome: Reference genome to use. Default is :func:`~hail.default_reference`.
:type reference_genome: :obj:`str` or :class:`.ReferenceGenome`
:rtype: :class:`.Locus`
"""
contig, pos = string.split(':')
if pos.lower() == 'end':
pos = reference_genome.contig_length(contig)
else:
pos = int(pos)
return Locus(contig, pos, reference_genome)
@property
def contig(self):
"""
Chromosome identifier.
:rtype: str
"""
return self._contig
@property
def position(self):
"""
Chromosomal position (1-based).
:rtype: int
"""
return self._position
@property
def reference_genome(self):
"""Reference genome.
:return: :class:`.ReferenceGenome`
"""
return self._rg
```
#### File: hail/ir/base_ir.py
```python
import abc
from hail.utils.java import Env
from .renderer import Renderer, PlainRenderer, Renderable
def _env_bind(env, bindings):
if bindings:
if env:
res = env.copy()
res.update(bindings)
return res
else:
return dict(bindings)
else:
return env
class BaseIR(Renderable):
def __init__(self, *children):
super().__init__()
self._type = None
self.children = children
def __str__(self):
r = PlainRenderer(stop_at_jir=False)
return r(self)
def render_head(self, r: Renderer):
head_str = self.head_str()
if head_str != '':
head_str = f' {head_str}'
trailing_space = ''
if len(self.children) > 0:
trailing_space = ' '
return f'({self._ir_name()}{head_str}{trailing_space}'
def render_tail(self, r: Renderer):
return ')'
def _ir_name(self):
return self.__class__.__name__
def render_children(self, r: Renderer):
return self.children
def head_str(self):
"""String to be added after IR name in serialized representation.
Returns
-------
str
"""
return ''
@abc.abstractmethod
def parse(self, code, ref_map, ir_map):
return
@property
@abc.abstractmethod
def typ(self):
return
def __eq__(self, other):
return isinstance(other, self.__class__) and self.children == other.children and self._eq(other)
def __ne__(self, other):
return not self == other
def _eq(self, other):
"""Compare non-child-BaseIR attributes of the BaseIR.
Parameters
----------
other
BaseIR of the same class.
Returns
-------
bool
"""
return True
def __hash__(self):
return 31 + hash(str(self))
def new_block(self, i: int) -> bool:
return self.renderable_new_block(self.renderable_idx_of_child(i))
@abc.abstractmethod
def renderable_new_block(self, i: int) -> bool:
return self.new_block(i)
@staticmethod
def is_effectful() -> bool:
return False
def bindings(self, i: int, default_value=None):
"""Compute variables bound in child 'i'.
Returns
-------
dict
mapping from bound variables to 'default_value', if provided,
otherwise to their types
"""
return self.renderable_bindings(self.renderable_idx_of_child(i), default_value)
def renderable_bindings(self, i: int, default_value=None):
return {}
def agg_bindings(self, i: int, default_value=None):
return self.renderable_agg_bindings(self.renderable_idx_of_child(i), default_value)
def renderable_agg_bindings(self, i: int, default_value=None):
return {}
def scan_bindings(self, i: int, default_value=None):
return self.renderable_scan_bindings(self.renderable_idx_of_child(i), default_value)
def renderable_scan_bindings(self, i: int, default_value=None):
return {}
def uses_agg_context(self, i: int) -> bool:
return self.renderable_uses_agg_context(self.renderable_idx_of_child(i))
def renderable_uses_agg_context(self, i: int) -> bool:
return False
def uses_scan_context(self, i: int) -> bool:
return self.renderable_uses_scan_context(self.renderable_idx_of_child(i))
def renderable_uses_scan_context(self, i: int) -> bool:
return False
def renderable_idx_of_child(self, i: int) -> int:
return i
# Used as a variable, bound by any node which defines the meaning of
# aggregations (e.g. MatrixMapRows, AggFilter, etc.), and "referenced" by
# any node which performs aggregations (e.g. AggFilter, ApplyAggOp, etc.).
agg_capability = 'agg_capability'
@classmethod
def uses_agg_capability(cls) -> bool:
return False
def child_context_without_bindings(self, i: int, parent_context):
(eval_c, agg_c, scan_c) = parent_context
if self.uses_agg_context(i):
return (agg_c, None, None)
elif self.uses_scan_context(i):
return (scan_c, None, None)
else:
return parent_context
def child_context(self, i: int, parent_context, default_value=None):
return self.renderable_child_context(self.renderable_idx_of_child(i), parent_context, default_value)
def renderable_child_context(self, i: int, parent_context, default_value=None):
base = self.child_context_without_bindings(i, parent_context)
eval_b = self.bindings(i, default_value)
agg_b = self.agg_bindings(i, default_value)
scan_b = self.scan_bindings(i, default_value)
if eval_b or agg_b or scan_b:
(eval_c, agg_c, scan_c) = base
return _env_bind(eval_c, eval_b), _env_bind(agg_c, agg_b), _env_bind(scan_c, scan_b)
else:
return base
@property
def free_vars(self):
return set()
@property
def free_agg_vars(self):
return set()
@property
def free_scan_vars(self):
return set()
class IR(BaseIR):
def __init__(self, *children):
super().__init__(*children)
self._aggregations = None
self._free_vars = None
self._free_agg_vars = None
self._free_scan_vars = None
@property
def aggregations(self):
if self._aggregations is None:
self._aggregations = [agg for child in self.children for agg in child.aggregations]
return self._aggregations
@property
def is_nested_field(self):
return False
def search(self, criteria):
others = [node for child in self.children if isinstance(child, IR) for node in child.search(criteria)]
if criteria(self):
return others + [self]
return others
def copy(self, *args):
raise NotImplementedError("IR has no copy method defined.")
def map_ir(self, f):
new_children = []
for child in self.children:
if isinstance(child, IR):
new_children.append(f(child))
else:
new_children.append(child)
return self.copy(*new_children)
@property
def bound_variables(self):
return {v for child in self.children if isinstance(child, IR) for v in child.bound_variables}
@property
def typ(self):
if self._type is None:
self._compute_type({}, None)
assert self._type is not None, self
return self._type
def renderable_new_block(self, i: int) -> bool:
return False
@abc.abstractmethod
def _compute_type(self, env, agg_env):
raise NotImplementedError(self)
def parse(self, code, ref_map={}, ir_map={}):
return Env.hail().expr.ir.IRParser.parse_value_ir(
code,
{k: t._parsable_string() for k, t in ref_map.items()},
ir_map)
@property
def free_vars(self):
def vars_from_child(i):
if self.uses_agg_context(i):
assert(len(self.children[i].free_agg_vars) == 0)
return set()
if self.uses_scan_context(i):
assert(len(self.children[i].free_scan_vars) == 0)
return set()
return self.children[i].free_vars.difference(self.bindings(i, 0).keys())
if self._free_vars is None:
self._free_vars = {
var for i in range(len(self.children))
for var in vars_from_child(i)}
if self.uses_agg_capability():
self._free_vars.add(BaseIR.agg_capability)
return self._free_vars
@property
def free_agg_vars(self):
def vars_from_child(i):
if self.uses_agg_context(i):
return self.children[i].free_vars
return self.children[i].free_agg_vars.difference(self.agg_bindings(i, 0).keys())
if self._free_agg_vars is None:
self._free_agg_vars = {
var for i in range(len(self.children))
for var in vars_from_child(i)}
return self._free_agg_vars
@property
def free_scan_vars(self):
def vars_from_child(i):
if self.uses_scan_context(i):
return self.children[i].free_vars
return self.children[i].free_scan_vars.difference(self.scan_bindings(i, 0).keys())
if self._free_scan_vars is None:
self._free_scan_vars = {
var for i in range(len(self.children))
for var in vars_from_child(i)}
return self._free_scan_vars
class TableIR(BaseIR):
def __init__(self, *children):
super().__init__(*children)
@abc.abstractmethod
def _compute_type(self):
...
@property
def typ(self):
if self._type is None:
self._compute_type()
assert self._type is not None, self
return self._type
def renderable_new_block(self, i: int) -> bool:
return True
def parse(self, code, ref_map={}, ir_map={}):
return Env.hail().expr.ir.IRParser.parse_table_ir(code, ref_map, ir_map)
global_env = {'global'}
row_env = {'global', 'row'}
class MatrixIR(BaseIR):
def __init__(self, *children):
super().__init__(*children)
@abc.abstractmethod
def _compute_type(self):
...
@property
def typ(self):
if self._type is None:
self._compute_type()
assert self._type is not None, self
return self._type
def renderable_new_block(self, i: int) -> bool:
return True
def parse(self, code, ref_map={}, ir_map={}):
return Env.hail().expr.ir.IRParser.parse_matrix_ir(code, ref_map, ir_map)
global_env = {'global'}
row_env = {'global', 'va'}
col_env = {'global', 'sa'}
entry_env = {'global', 'sa', 'va', 'g'}
class BlockMatrixIR(BaseIR):
def __init__(self, *children):
super().__init__(*children)
@abc.abstractmethod
def _compute_type(self):
...
@property
def typ(self):
if self._type is None:
self._compute_type()
assert self._type is not None, self
return self._type
def renderable_new_block(self, i: int) -> bool:
return True
def parse(self, code, ref_map={}, ir_map={}):
return Env.hail().expr.ir.IRParser.parse_blockmatrix_ir(code, ref_map, ir_map)
class JIRVectorReference(object):
def __init__(self, jid, length, item_type):
self.jid = jid
self.length = length
self.item_type = item_type
def __len__(self):
return self.length
def __del__(self):
try:
Env.hc()._jhc.pyRemoveIrVector(self.jid)
# there is only so much we can do if the attempt to remove the unused IR fails,
# especially since this will often get called during interpreter shutdown.
except Exception:
pass
```
#### File: hailtop/auth/auth.py
```python
import aiohttp
from hailtop.config import get_deploy_config
from hailtop.utils import async_to_blocking, request_retry_transient_errors
from .tokens import get_tokens
async def async_get_userinfo(deploy_config=None):
if not deploy_config:
deploy_config = get_deploy_config()
headers = service_auth_headers(deploy_config, 'auth')
userinfo_url = deploy_config.url('auth', '/api/v1alpha/userinfo')
async with aiohttp.ClientSession(
raise_for_status=True, timeout=aiohttp.ClientTimeout(total=5)) as session:
resp = await request_retry_transient_errors(
session, 'GET', userinfo_url, headers=headers)
return await resp.json()
def get_userinfo(deploy_config=None):
return async_to_blocking(async_get_userinfo(deploy_config))
def namespace_auth_headers(deploy_config, ns, authorize_target=True):
tokens = get_tokens()
headers = {}
if authorize_target:
headers['Authorization'] = f'Bearer {tokens.namespace_token_or_error(ns)}'
if deploy_config.location() == 'external' and ns != 'default':
headers['X-Hail-Internal-Authorization'] = f'Bearer {tokens.namespace_token_or_error("default")}'
return headers
def service_auth_headers(deploy_config, service, authorize_target=True):
ns = deploy_config.service_ns(service)
return namespace_auth_headers(deploy_config, ns, authorize_target)
```
#### File: hailctl/batch/batch_cli_utils.py
```python
import aiohttp
def get_batch_if_exists(client, id):
try:
return client.get_batch(id)
except aiohttp.client_exceptions.ClientResponseError as cle:
if cle.code == 404:
return None
raise cle
def get_job_if_exists(client, batch_id, job_id):
try:
return client.get_job(batch_id, job_id)
except aiohttp.client_exceptions.ClientResponseError as cle:
if cle.code == 404:
return None
raise cle
def bool_string_to_bool(bool_string):
if bool_string in ["True", "true", "t"]:
return True
if bool_string in ['False', 'false', 'f']:
return False
raise ValueError("Input could not be resolved to a bool")
```
#### File: hailctl/batch/delete.py
```python
from .batch_cli_utils import get_batch_if_exists
def init_parser(parser):
parser.add_argument('batch_id', type=int, help="ID number of batch to be deleted")
def main(args, pass_through_args, client): # pylint: disable=unused-argument
maybe_batch = get_batch_if_exists(client, args.batch_id)
if maybe_batch is None:
print(f"Batch with batch_id {args.batch_id} not found")
exit(1)
batch = maybe_batch
batch.delete()
print(f"Batch with batch_id {args.batch_id} was deleted successfully")
```
#### File: hailtop/pipeline/utils.py
```python
class PipelineException(Exception):
def __init__(self, msg=''):
self.msg = msg
super(PipelineException, self).__init__(msg)
```
#### File: hail/utils/java.py
```python
import json
import socketserver
import socket
import sys
import re
from threading import Thread
import py4j
import hail
class FatalError(Exception):
""":class:`.FatalError` is an error thrown by Hail method failures"""
class Env:
_jvm = None
_gateway = None
_hail_package = None
_jutils = None
_hc = None
_counter = 0
_seed_generator = None
@staticmethod
def get_uid(base=None):
if base:
str_base = base
else:
str_base = ''
Env._counter += 1
return f"__uid_{str_base}{Env._counter}"
@staticmethod
def jvm():
if not Env._jvm:
Env.hc()
assert Env._jvm is not None
return Env._jvm
@staticmethod
def hail():
if not Env._hail_package:
Env._hail_package = getattr(Env.jvm(), 'is').hail
return Env._hail_package
@staticmethod
def gateway():
if not Env._gateway:
Env.hc()
assert Env._gateway is not None
return Env._gateway
@staticmethod
def jutils():
if not Env._jutils:
Env._jutils = scala_package_object(Env.hail().utils)
return Env._jutils
@staticmethod
def hc():
if not Env._hc:
from hail.context import init
import sys
sys.stderr.write("Initializing Spark and Hail with default parameters...\n")
init()
assert Env._hc is not None
return Env._hc
@staticmethod
def backend():
return Env.hc()._backend
@staticmethod
def spark_backend(op):
b = Env.backend()
if isinstance(b, hail.backend.SparkBackend):
return b
else:
raise NotImplementedError(
f"{b.__class__.__name__} doesn't support {op}, only SparkBackend")
@staticmethod
def fs():
return Env.backend().fs
@staticmethod
def spark_session():
return Env.hc()._spark_session
_dummy_table = None
@staticmethod
def dummy_table():
if Env._dummy_table is None:
import hail
Env._dummy_table = hail.utils.range_table(1, 1).key_by().cache()
return Env._dummy_table
@staticmethod
def set_seed(seed):
Env._seed_generator = hail.utils.HailSeedGenerator(seed)
@staticmethod
def next_seed():
if Env._seed_generator is None:
Env.set_seed(None)
return Env._seed_generator.next_seed()
def jarray(jtype, lst):
jarr = Env.gateway().new_array(jtype, len(lst))
for i, s in enumerate(lst):
jarr[i] = s
return jarr
def scala_object(jpackage, name):
return getattr(getattr(jpackage, name + '$'), 'MODULE$')
def scala_package_object(jpackage):
return scala_object(jpackage, 'package')
def jnone():
return scala_object(Env.jvm().scala, 'None')
def jsome(x):
return Env.jvm().scala.Some(x)
def joption(x):
return jsome(x) if x else jnone()
def from_option(x):
return x.get() if x.isDefined() else None
def jindexed_seq(x):
return Env.jutils().arrayListToISeq(x)
def jset(x):
return Env.jutils().arrayListToSet(x)
def jindexed_seq_args(x):
args = [x] if isinstance(x, str) else x
return jindexed_seq(args)
def jset_args(x):
args = [x] if isinstance(x, str) else x
return jset(args)
def jiterable_to_list(it):
if it is not None:
return list(Env.jutils().iterableToArrayList(it))
else:
return None
_parsable_str = re.compile(r'[\w_]+')
def escape_parsable(s):
if _parsable_str.fullmatch(s):
return s
else:
return '`' + s.encode('unicode_escape').decode('utf-8').replace('`', '\\`') + '`'
def unescape_parsable(s):
return bytes(s.replace('\\`', '`'), 'utf-8').decode('unicode_escape')
def jarray_to_list(a):
return list(a) if a else None
class Log4jLogger:
log_pkg = None
@staticmethod
def get():
if Log4jLogger.log_pkg is None:
Log4jLogger.log_pkg = Env.jutils()
return Log4jLogger.log_pkg
def error(msg):
Log4jLogger.get().error(msg)
def warn(msg):
Log4jLogger.get().warn(msg)
def info(msg):
Log4jLogger.get().info(msg)
def handle_java_exception(f):
def deco(*args, **kwargs):
import pyspark
try:
return f(*args, **kwargs)
except py4j.protocol.Py4JJavaError as e:
s = e.java_exception.toString()
# py4j catches NoSuchElementExceptions to stop array iteration
if s.startswith('java.util.NoSuchElementException'):
raise
tpl = Env.jutils().handleForPython(e.java_exception)
deepest, full = tpl._1(), tpl._2()
raise FatalError('%s\n\nJava stack trace:\n%s\n'
'Hail version: %s\n'
'Error summary: %s' % (deepest, full, hail.__version__, deepest)) from None
except pyspark.sql.utils.CapturedException as e:
raise FatalError('%s\n\nJava stack trace:\n%s\n'
'Hail version: %s\n'
'Error summary: %s' % (e.desc, e.stackTrace, hail.__version__, e.desc)) from None
return deco
_installed = False
_original = None
def install_exception_handler():
global _installed
global _original
if not _installed:
_original = py4j.protocol.get_return_value
_installed = True
# The original `get_return_value` is not patched, it's idempotent.
patched = handle_java_exception(_original)
# only patch the one used in py4j.java_gateway (call Java API)
py4j.java_gateway.get_return_value = patched
def uninstall_exception_handler():
global _installed
global _original
if _installed:
_installed = False
py4j.protocol.get_return_value = _original
class LoggingTCPHandler(socketserver.StreamRequestHandler):
def handle(self):
for line in self.rfile:
sys.stderr.write(line.decode("ISO-8859-1"))
class SimpleServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
daemon_threads = True
allow_reuse_address = True
def __init__(self, server_address, handler_class):
socketserver.TCPServer.__init__(self, server_address, handler_class)
def connect_logger(host, port):
"""
This method starts a simple server which listens on a port for a
client to connect and start writing messages. Whenever a message
is received, it is written to sys.stderr. The server is run in
a daemon thread from the caller, which is killed when the caller
thread dies.
If the socket is in use, then the server tries to listen on the
next port (port + 1). After 25 tries, it gives up.
:param str host: Hostname for server.
:param int port: Port to listen on.
"""
server = None
tries = 0
max_tries = 25
while not server:
try:
server = SimpleServer((host, port), LoggingTCPHandler)
except socket.error:
port += 1
tries += 1
if tries >= max_tries:
sys.stderr.write(
'WARNING: Could not find a free port for logger, maximum retries {} exceeded.'.format(max_tries))
return
t = Thread(target=server.serve_forever, args=())
# The thread should be a daemon so that it shuts down when the parent thread is killed
t.daemon = True
t.start()
Env.jutils().addSocketAppender(host, port)
```
#### File: hail/methods/test_qc.py
```python
import unittest
import hail as hl
import hail.expr.aggregators as agg
from ..helpers import *
setUpModule = startTestHailContext
tearDownModule = stopTestHailContext
class Tests(unittest.TestCase):
def test_sample_qc(self):
data = [
{'v': '1:1:A:T', 's': '1', 'GT': hl.Call([0, 0]), 'GQ': 10, 'DP': 0},
{'v': '1:2:A:T,C', 's': '1', 'GT': hl.Call([1]), 'GQ': 15, 'DP': 5},
{'v': '1:3:A:G,C', 's': '1', 'GT': hl.Call([2, 2]), 'GQ': 10, 'DP': 4},
{'v': '1:4:G:A', 's': '1', 'GT': hl.Call([0, 1]), 'GQ': None, 'DP': 5},
{'v': '1:5:C:CG', 's': '1', 'GT': hl.Call([1, 1]), 'GQ': 20, 'DP': 3},
{'v': '1:6:C:A', 's': '1', 'GT': None, 'GQ': 0, 'DP': None},
]
ht = hl.Table.parallelize(data, hl.dtype('struct{v: str, s: str, GT: call, GQ: int, DP: int}'))
ht = ht.transmute(**hl.parse_variant(ht.v))
mt = ht.to_matrix_table(['locus', 'alleles'], ['s'])
mt = hl.sample_qc(mt, 'sqc')
r = mt.cols().select('sqc').collect()
self.assertAlmostEqual(r[0].sqc.gq_stats.mean, 11)
self.assertAlmostEqual(r[0].sqc.gq_stats.stdev, 6.6332495807)
self.assertAlmostEqual(r[0].sqc.gq_stats.min, 0)
self.assertAlmostEqual(r[0].sqc.gq_stats.max, 20)
self.assertAlmostEqual(r[0].sqc.dp_stats.mean, 3.399999999)
self.assertAlmostEqual(r[0].sqc.dp_stats.stdev, 1.8547236990)
self.assertAlmostEqual(r[0].sqc.dp_stats.min, 0)
self.assertAlmostEqual(r[0].sqc.dp_stats.max, 5)
self.assertAlmostEqual(r[0].sqc.call_rate, 0.8333333333)
self.assertEqual(r[0].sqc.n_called, 5)
self.assertEqual(r[0].sqc.n_not_called, 1)
self.assertEqual(r[0].sqc.n_hom_ref, 1)
self.assertEqual(r[0].sqc.n_het, 1)
self.assertEqual(r[0].sqc.n_hom_var, 3)
self.assertEqual(r[0].sqc.n_insertion, 2)
self.assertEqual(r[0].sqc.n_deletion, 0)
self.assertEqual(r[0].sqc.n_singleton, 3)
self.assertEqual(r[0].sqc.n_transition, 1)
self.assertEqual(r[0].sqc.n_transversion, 3)
self.assertEqual(r[0].sqc.n_star, 0)
self.assertEqual(r[0].sqc.n_non_ref, 4)
self.assertAlmostEqual(r[0].sqc.r_ti_tv, 0.333333333)
self.assertAlmostEqual(r[0].sqc.r_het_hom_var, 0.3333333333)
self.assertAlmostEqual(r[0].sqc.r_insertion_deletion, None)
def test_variant_qc(self):
data = [
{'v': '1:1:A:T', 's': '1', 'GT': hl.Call([0, 0]), 'GQ': 10, 'DP': 0},
{'v': '1:1:A:T', 's': '2', 'GT': hl.Call([1, 1]), 'GQ': 10, 'DP': 5},
{'v': '1:1:A:T', 's': '3', 'GT': hl.Call([0, 1]), 'GQ': 11, 'DP': 100},
{'v': '1:1:A:T', 's': '4', 'GT': None, 'GQ': None, 'DP': 100},
{'v': '1:2:A:T,C', 's': '1', 'GT': hl.Call([1, 2]), 'GQ': 10, 'DP': 5},
{'v': '1:2:A:T,C', 's': '2', 'GT': hl.Call([2, 2]), 'GQ': 10, 'DP': 5},
{'v': '1:2:A:T,C', 's': '3', 'GT': hl.Call([0, 1]), 'GQ': 10, 'DP': 5},
{'v': '1:2:A:T,C', 's': '4', 'GT': hl.Call([1, 1]), 'GQ': 10, 'DP': 5},
]
ht = hl.Table.parallelize(data, hl.dtype('struct{v: str, s: str, GT: call, GQ: int, DP: int}'))
ht = ht.transmute(**hl.parse_variant(ht.v))
mt = ht.to_matrix_table(['locus', 'alleles'], ['s'])
mt = hl.variant_qc(mt, 'vqc')
r = mt.rows().collect()
self.assertEqual(r[0].vqc.AF, [0.5, 0.5])
self.assertEqual(r[0].vqc.AC, [3, 3])
self.assertEqual(r[0].vqc.AN, 6)
self.assertEqual(r[0].vqc.homozygote_count, [1, 1])
self.assertEqual(r[0].vqc.n_called, 3)
self.assertEqual(r[0].vqc.n_not_called, 1)
self.assertEqual(r[0].vqc.call_rate, 0.75)
self.assertEqual(r[0].vqc.n_het, 1)
self.assertEqual(r[0].vqc.n_non_ref, 2)
self.assertEqual(r[0].vqc.het_freq_hwe, 0.6)
self.assertEqual(r[0].vqc.p_value_hwe, 0.7)
self.assertEqual(r[0].vqc.dp_stats.min, 0)
self.assertEqual(r[0].vqc.dp_stats.max, 100)
self.assertEqual(r[0].vqc.dp_stats.mean, 51.25)
self.assertAlmostEqual(r[0].vqc.dp_stats.stdev, 48.782040752719645)
self.assertEqual(r[0].vqc.gq_stats.min, 10)
self.assertEqual(r[0].vqc.gq_stats.max, 11)
self.assertAlmostEqual(r[0].vqc.gq_stats.mean, 10.333333333333334)
self.assertAlmostEqual(r[0].vqc.gq_stats.stdev, 0.47140452079103168)
self.assertEqual(r[1].vqc.AF, [0.125, 0.5, 0.375])
self.assertEqual(r[1].vqc.AC, [1, 4, 3])
self.assertEqual(r[1].vqc.AN, 8)
self.assertEqual(r[1].vqc.homozygote_count, [0, 1, 1])
self.assertEqual(r[1].vqc.n_called, 4)
self.assertEqual(r[1].vqc.n_not_called, 0)
self.assertEqual(r[1].vqc.call_rate, 1.0)
self.assertEqual(r[1].vqc.n_het, 2)
self.assertEqual(r[1].vqc.n_non_ref, 4)
self.assertEqual(r[1].vqc.p_value_hwe, None)
self.assertEqual(r[1].vqc.het_freq_hwe, None)
self.assertEqual(r[1].vqc.dp_stats.min, 5)
self.assertEqual(r[1].vqc.dp_stats.max, 5)
self.assertEqual(r[1].vqc.dp_stats.mean, 5)
self.assertEqual(r[1].vqc.dp_stats.stdev, 0.0)
self.assertEqual(r[1].vqc.gq_stats.min, 10)
self.assertEqual(r[1].vqc.gq_stats.max, 10)
self.assertEqual(r[1].vqc.gq_stats.mean, 10)
self.assertEqual(r[1].vqc.gq_stats.stdev, 0)
def test_concordance(self):
dataset = get_dataset()
glob_conc, cols_conc, rows_conc = hl.concordance(dataset, dataset)
self.assertEqual(sum([sum(glob_conc[i]) for i in range(5)]), dataset.count_rows() * dataset.count_cols())
counts = dataset.aggregate_entries(hl.Struct(n_het=agg.filter(dataset.GT.is_het(), agg.count()),
n_hom_ref=agg.filter(dataset.GT.is_hom_ref(),
agg.count()),
n_hom_var=agg.filter(dataset.GT.is_hom_var(),
agg.count()),
nNoCall=agg.filter(hl.is_missing(dataset.GT),
agg.count())))
self.assertEqual(glob_conc[0][0], 0)
self.assertEqual(glob_conc[1][1], counts.nNoCall)
self.assertEqual(glob_conc[2][2], counts.n_hom_ref)
self.assertEqual(glob_conc[3][3], counts.n_het)
self.assertEqual(glob_conc[4][4], counts.n_hom_var)
[self.assertEqual(glob_conc[i][j], 0) for i in range(5) for j in range(5) if i != j]
self.assertTrue(cols_conc.all(hl.sum(hl.flatten(cols_conc.concordance)) == dataset.count_rows()))
self.assertTrue(rows_conc.all(hl.sum(hl.flatten(rows_conc.concordance)) == dataset.count_cols()))
cols_conc.write('/tmp/foo.kt', overwrite=True)
rows_conc.write('/tmp/foo.kt', overwrite=True)
def test_filter_alleles(self):
# poor man's Gen
paths = [resource('sample.vcf'),
resource('multipleChromosomes.vcf'),
resource('sample2.vcf')]
for path in paths:
ds = hl.import_vcf(path)
self.assertEqual(
hl.filter_alleles(ds, lambda a, i: False).count_rows(), 0)
self.assertEqual(hl.filter_alleles(ds, lambda a, i: True).count_rows(), ds.count_rows())
def test_filter_alleles_hts(self):
# 1 variant: A:T,G
ds = hl.import_vcf(resource('filter_alleles/input.vcf'))
self.assertTrue(
hl.filter_alleles_hts(ds, lambda a, i: a == 'T', subset=True)
.drop('old_alleles', 'old_locus', 'new_to_old', 'old_to_new')
._same(hl.import_vcf(resource('filter_alleles/keep_allele1_subset.vcf'))))
self.assertTrue(
hl.filter_alleles_hts(ds, lambda a, i: a == 'G', subset=True)
.drop('old_alleles', 'old_locus', 'new_to_old', 'old_to_new')
._same(hl.import_vcf(resource('filter_alleles/keep_allele2_subset.vcf')))
)
self.assertTrue(
hl.filter_alleles_hts(ds, lambda a, i: a != 'G', subset=False)
.drop('old_alleles', 'old_locus', 'new_to_old', 'old_to_new')
._same(hl.import_vcf(resource('filter_alleles/keep_allele1_downcode.vcf')))
)
self.assertTrue(
hl.filter_alleles_hts(ds, lambda a, i: a == 'G', subset=False)
.drop('old_alleles', 'old_locus', 'new_to_old', 'old_to_new')
._same(hl.import_vcf(resource('filter_alleles/keep_allele2_downcode.vcf')))
)
def test_sample_and_variant_qc_call_rate(self):
mt = hl.import_vcf(resource('sample.vcf'))
n_rows, n_cols = mt.count()
mt = mt.filter_entries(mt.GQ > 5)
mt = hl.variant_qc(hl.sample_qc(mt))
assert mt.aggregate_cols(hl.agg.all(hl.approx_equal(mt.sample_qc.call_rate, mt.sample_qc.n_called / n_rows)))
assert mt.aggregate_rows(hl.agg.all(hl.approx_equal(mt.variant_qc.call_rate, mt.variant_qc.n_called / n_cols)))
``` |
{
"source": "3w36zj6/tower-battle",
"score": 3
} |
#### File: 3w36zj6/tower-battle/main_window.py
```python
import arcade
import pymunk
import cv2
import random
import timeit
import math
from PIL import Image
import numpy as np
import sys
SCREEN_WIDTH = 1280
SCREEN_HEIGHT = 720
class Camera:
def __init__(self, camera_id):
self.capture = cv2.VideoCapture(camera_id)
self.count = 0
self.position = [SCREEN_WIDTH / 2, 650]
self.angle = 0
def update(self):
self.sprite = arcade.Sprite()
self.sprite.position = self.position
self.sprite.angle = self.angle
ret, frame_image_cv = self.capture.read()
frame_image_cv = cv2.resize(frame_image_cv, (320, 180))
frame_image_cv = cv2.cvtColor(frame_image_cv, cv2.COLOR_RGB2RGBA)
# HSV変換
hsv = cv2.cvtColor(frame_image_cv, cv2.COLOR_BGR2HSV)
# 2値化
bin_img = ~cv2.inRange(hsv, (62, 100, 0), (79, 255, 255))
# 輪郭抽出
contours = cv2.findContours(
bin_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
)[1]
# 面積が最大の輪郭を取得
if not contours: # 何も写っていない場合
self.sprite.texture = None
return
contour = max(contours, key=lambda x: cv2.contourArea(x))
# マスク画像作成
mask = np.zeros_like(bin_img)
cv2.drawContours(mask, [contour], -1, color=255, thickness=-1)
# 幅、高さは前景画像と背景画像の共通部分をとる
w = frame_image_cv.shape[1]
h = frame_image_cv.shape[0]
# 合成する領域
fg_roi = frame_image_cv[:h, :w]
bg_roi = np.zeros((h, w, 4), np.uint8)
# 合成
frame_image_cv = np.where(mask[:h, :w, np.newaxis] == 0, bg_roi, fg_roi)
frame_img_pil = cv2pil(frame_image_cv)
self.sprite.texture = arcade.Texture(
name=f"{self.count}", image=frame_img_pil, hit_box_algorithm="Detailed"
)
def draw(self):
if self.sprite.texture:
self.sprite.draw()
def move_x(self, change_x):
self.position[0] += change_x
self.position[0] = max(self.position[0], 0)
self.position[0] = min(self.position[0], 1280)
def move_y(self, change_y):
self.position[1] += change_y
self.position[1] = max(self.position[1], 650)
def rotate(self, change_angle):
self.angle += change_angle
def get_sprite(self):
self.count += 1
return self.sprite
def cv2pil(image):
# OpenCV型 -> PIL型
new_image = image.copy()
if new_image.ndim == 2: # モノクロ
pass
elif new_image.shape[2] == 3: # カラー
new_image = cv2.cvtColor(new_image, cv2.COLOR_BGR2RGB)
elif new_image.shape[2] == 4: # 透過
new_image = cv2.cvtColor(new_image, cv2.COLOR_BGRA2RGBA)
new_image = Image.fromarray(new_image)
return new_image
class MyGame(arcade.Window):
"""Main application class."""
def __init__(self, camera_id):
super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, "Tower Battle")
self.set_update_rate(1 / 1000)
# Camera
self.camera_id = camera_id
self.camera = Camera(self.camera_id)
self.camera.update()
self.setup()
def setup(self):
self.object_list = arcade.SpriteList()
arcade.set_background_color(arcade.color.SKY_BLUE)
self.draw_time = 0
self.delta_time = 0
self.time = 0
# 1Pのターン
self.player1_turn = True
# Game Over
self.gameover = False
# Scroll
self.view_bottom = 0
# Draw hit box
self.draw_hit_box = False
# Key
self.a_pressed = (
self.d_pressed
) = (
self.up_pressed
) = self.down_pressed = self.left_pressed = self.right_pressed = False
# Pymunk
self.space = pymunk.Space()
self.space.gravity = (0.0, -400.0)
# Terrain
self.terrain = arcade.Sprite(
filename="terrain.png",
center_x=SCREEN_WIDTH / 2,
center_y=SCREEN_HEIGHT / 2,
)
body = pymunk.Body(body_type=pymunk.Body.STATIC)
body.position = self.terrain.position
shape = pymunk.Poly(body, self.terrain.texture.hit_box_points)
shape.friction = 1
shape.elasticity = 0
self.space.add(body, shape)
def on_draw(self):
# Render the screen.
# This command has to happen before we start drawing
arcade.start_render()
draw_start_time = timeit.default_timer()
self.object_list.draw()
"""
# Display timings
output = f"FPS: {1//self.delta_time}"
arcade.draw_text(output, 20, SCREEN_HEIGHT -
20, arcade.color.WHITE, 12)
output = f"Drawing time: {self.draw_time:.3f}"
arcade.draw_text(output, 20, SCREEN_HEIGHT -
40, arcade.color.WHITE, 12)
"""
arcade.draw_text(
f"{2-self.player1_turn}P",
40,
SCREEN_HEIGHT - 60 + self.view_bottom,
arcade.color.WHITE,
40,
)
if self.gameover:
arcade.draw_text(
f"{2-self.player1_turn}P is winner.",
0,
0 + self.view_bottom,
arcade.color.WHITE,
40,
)
# Draw hit box
if self.draw_hit_box:
for obj in self.object_list:
obj.draw_hit_box(arcade.color.RED, 3)
# Camera
self.camera.draw()
# Terrain
self.terrain.draw()
self.draw_time = timeit.default_timer() - draw_start_time
def on_update(self, delta_time):
start_time = timeit.default_timer()
# Check for balls that fall off the screen
for obj in self.object_list:
if obj.pymunk_shape.body.position.y < 0:
# Remove balls from physics space
self.space.remove(obj.pymunk_shape, obj.pymunk_shape.body)
# Remove balls from physics list
obj.remove_from_sprite_lists()
self.gameover = True
self.space.step(1 / 60.0)
# Move sprites to where physics objects are
for obj in self.object_list:
obj.center_x = obj.pymunk_shape.body.position.x
obj.center_y = obj.pymunk_shape.body.position.y
obj.angle = math.degrees(obj.pymunk_shape.body.angle)
# Camera
self.camera.update()
if self.a_pressed:
self.camera.rotate(3)
if self.d_pressed:
self.camera.rotate(-3)
if self.up_pressed:
self.camera.move_y(3)
if self.down_pressed:
self.camera.move_y(-3)
if self.left_pressed:
self.camera.move_x(-3)
if self.right_pressed:
self.camera.move_x(3)
self.view_bottom = self.camera.position[1] - 650
arcade.set_viewport(
0, SCREEN_WIDTH, self.view_bottom, SCREEN_HEIGHT + self.view_bottom
)
self.time = timeit.default_timer() - start_time
self.delta_time = delta_time
def on_key_press(self, key, modifiers):
if key == arcade.key.ESCAPE:
self.setup()
if key == arcade.key.SPACE and not self.gameover:
self.generate_sprite(self.camera.get_sprite())
self.player1_turn = not self.player1_turn
if key == arcade.key.A:
self.a_pressed = True
if key == arcade.key.D:
self.d_pressed = True
if key == arcade.key.UP:
self.up_pressed = True
if key == arcade.key.DOWN:
self.down_pressed = True
if key == arcade.key.LEFT:
self.left_pressed = True
if key == arcade.key.RIGHT:
self.right_pressed = True
if key == arcade.key.F1:
self.draw_hit_box = not self.draw_hit_box
if key == arcade.key.F11:
self.set_fullscreen(not self.fullscreen)
def on_key_release(self, key, modifiers):
if key == arcade.key.A:
self.a_pressed = False
if key == arcade.key.D:
self.d_pressed = False
if key == arcade.key.UP:
self.up_pressed = False
if key == arcade.key.DOWN:
self.down_pressed = False
if key == arcade.key.LEFT:
self.left_pressed = False
if key == arcade.key.RIGHT:
self.right_pressed = False
def generate_sprite(self, sprite):
if not sprite.texture:
return
mass = 0.5
inertia = pymunk.moment_for_poly(mass, sprite.texture.hit_box_points)
body = pymunk.Body(mass, inertia)
body.position = sprite.position
body.angle = math.radians(sprite.angle)
shape = pymunk.Poly(body, sprite.texture.hit_box_points)
shape.friction = 1
shape.elasticity = 0
self.space.add(body, shape)
sprite.pymunk_shape = shape
self.object_list.append(sprite)
if __name__ == "__main__":
MyGame(int(sys.argv[1]) if len(sys.argv) >= 2 else 0)
arcade.run()
``` |
{
"source": "3w3lfin/Siren",
"score": 2
} |
#### File: app/templatetags/upload_tags.py
```python
from django import template
from django.contrib.auth.models import User
register = template.Library()
from django.shortcuts import render, get_object_or_404
from ..models import Analysis, ProjectComment, Module, Project, File, ParamsComment, Param
from ..forms import ProjectEditCommForm, ParamForm2, ModuleParamForm, ParamTextForm, ModuleForm, TextForm, ParamCommForm, ParamForm, ProjectPlanForm
from django.db.models.aggregates import Max
from django.forms import modelformset_factory
from django.forms import ModelForm, Textarea, NumberInput,Select
#render upload div
@register.simple_tag
def upload_js():
return """
<!-- The template to display files available for upload -->
<script id="template-upload" type="text/x-tmpl">
{% for (var i=0, file; file=o.files[i]; i++) { %}
<tr class="template-upload fade">
<td>
<span class="preview"></span>
</td>
<td>
<p class="name">{%=file.name%}</p>
{% if (file.error) { %}
<div><span class="label label-important">{%=locale.fileupload.error%}</span> {%=file.error%}</div>
{% } %}
</td>
<td>
<p class="size">{%=o.formatFileSize(file.size)%}</p>
{% if (!o.files.error) { %}
<div class="progress progress-striped active" role="progressbar" aria-valuemin="0" aria-valuemax="100" aria-valuenow="0"><div class="progress-bar progress-bar-success" style="width:0%;"></div></div>
{% } %}
</td>
<td>
{% if (!o.files.error && !i && !o.options.autoUpload) { %}
<button class="btn btn-primary start">
<i class="glyphicon glyphicon-upload"></i>
<span>{%=locale.fileupload.start%}</span>
</button>
{% } %}
{% if (!i) { %}
<button class="btn btn-warning cancel">
<i class="glyphicon glyphicon-ban-circle"></i>
<span>{%=locale.fileupload.cancel%}</span>
</button>
{% } %}
</td>
</tr>
{% } %}
</script>
<!-- The template to display files available for download -->
<script id="template-download" type="text/x-tmpl">
{% for (var i=0, file; file=o.files[i]; i++) { %}
<tr class="template-download fade">
<td>
<span class="preview">
{% if (file.thumbnailUrl) { %}
<a href="{%=file.url%}" title="{%=file.name%}" download="{%=file.name%}" data-gallery><img src="{%=file.thumbnailUrl%}"></a>
{% } %}
</span>
</td>
<td>
<p class="name">
<a href="{%=file.url%}" title="{%=file.name%}" download="{%=file.name%}" {%=file.thumbnailUrl?'data-gallery':''%}>{%=file.name%}</a>
</p>
{% if (file.error) { %}
<div><span class="label label-important">{%=locale.fileupload.error%}</span> {%=file.error%}</div>
{% } %}
</td>
<td>
<span class="size">{%=o.formatFileSize(file.size)%}</span>
</td>
<td>
<button class="btn btn-danger delete" data-type="{%=file.deleteType%}" data-url="{%=file.deleteUrl%}"{% if (file.deleteWithCredentials) { %} data-xhr-fields='{"withCredentials":true}'{% } %}>
<i class="glyphicon glyphicon-trash"></i>
<span>{%=locale.fileupload.destroy%}</span>
</button>
<input type="checkbox" name="delete" value="1" class="toggle">
</td>
</tr>
{% } %}
</script>
"""
#render plan div
@register.simple_tag
def get_plan_html(plans, ancestor=True):
html = ""
for plan in plans:
#first time only basic comments
if not (ancestor and plan.child):
plan_class = plan.get_p_class_display()
#plan_header = plan.header if plan.header else ""
if plan.p_class == "P":
plan_span = """
<span onclick="changeClassError({0})" class="{0}_plan1 glyphicon glyphicon-remove glyphicon-right glyphicon-red"></span>
<span onclick="changeClassOk({0})" class="{0}_plan1 glyphicon glyphicon-ok glyphicon-right glyphicon-green"></span>
<span style="display: none;" onclick="changeClassError({0})" class="{0}_plan2 glyphicon glyphicon-repeat glyphicon-right glyphicon-blue"></span>
""".format(plan.id)
else:
plan_span = """
<span style="display: none;" onclick="changeClassError({0})" class="{0}_plan1 glyphicon glyphicon-remove glyphicon-right glyphicon-red"></span>
<span style="display: none;" onclick="changeClassOk({0})" class="{0}_plan1 glyphicon glyphicon-ok glyphicon-right glyphicon-green"></span>
<span onclick="changeClassError({0})" class="{0}_plan2 glyphicon glyphicon-repeat glyphicon-right glyphicon-blue"></span>
""".format(plan.id)
html += """
<li id="plan_{0}" class="placeholder-children col-xs-12" data-id="{0}" data-name="{0}">
<div id="{0}" class="panel no_pad col-xs-12 {1}">
<div class="panel-heading col-xs-12 ">
<div class="panel_left col-xs-12 ">
<div class="col-xs-9 top_m"> {3} </div>
<div class="col-xs-3">
<span onclick="removePlan({0})" class="glyphicon glyphicon-right glyphicon-black glyphicon-trash"></span>
{2}
</div>
</div>
</div>
</div>
<div class="left_plan"></div>
<ol>
""".format(plan.id, plan_class, plan_span, plan.comment)
children = ProjectComment.objects.filter(child = plan)
print(plan.id, plan.child)
if children:
html += get_plan_html(children, False)
html += "</ol> </li> <ol></ol>"
return html
#get people who can see file
@register.simple_tag
def get_obj(file):
obj = User.objects.filter(file_user__file = file, file_user__role = 'X', file_user__is_active = True)
if not obj:
return None
else:
analysts = ""
for entry in obj:
analysts += " "
analysts += entry.username
return analysts
#get files belonging group
@register.simple_tag
def get_files(group):
obj = File.objects.filter(file_group__group = group, file_group__is_active = True)
if not obj:
return None
else:
files = ""
for entry in obj:
files += " "
files += entry.user_name + entry.ext
return files
#get creator of group
@register.simple_tag
def get_creator(group):
obj = get_object_or_404(User, group_user__group = group, group_user__role = 'C', group_user__is_active = True)
if not obj:
return None
else:
return obj.username
#get people who can see group
@register.simple_tag
def get_group_analysts(group):
obj = User.objects.filter(group_user__group = group, group_user__role = 'X', group_user__is_active = True)
if not obj:
return None
else:
analysts = ""
for entry in obj:
analysts += " "
analysts += entry.username
return analysts
#get comment form
@register.simple_tag
def get_comm_form(comm):
try:
old_comm = ParamsComment.objects.filter(params = comm).values('params').annotate(max_id=Max('id'))
comm_id = old_comm[0]['max_id']
param_comm = ParamsComment.objects.get(pk = comm_id)
param_comm_from = ParamCommForm(instance = param_comm)
except:
param_comm_from = ParamCommForm()
return param_comm_from
#get module comment form
@register.simple_tag
def get_module_form(mod):
try:
old_comm = ModuleComment.objects.filter(module = mod).values('module').annotate(max_id=Max('id'))
comm_id = old_comm[0]['max_id']
old_comm = ModuleComment.objects.get(pk = comm_id)
new_module_com = ModuleCommentForm(instance = old_comm)
except:
new_module_com = ModuleCommentForm()
return new_module_com
#get module form
@register.simple_tag
def get_init_module_form(service):
new_module = ModuleForm(service)
return new_module
#get project comment form
@register.simple_tag
def get_pro_comment(com):
edit_comm = ProjectEditCommForm(instance = com)
return edit_comm
#get module parameters form
@register.simple_tag
def get_param_module_form(service):
new_module = ModuleParamForm(service)
return new_module
#get parameter form
@register.simple_tag
def get_param_form(param):
if param.par_type == "N":
param_form = ParamForm(instance = param)
else:
param_form = ParamTextForm(instance = param)
return param_form
@register.simple_tag
def get_param_limit_form(param, project_name):
print(".")
#get parameters formset
@register.simple_tag
def get_param_limit_formset(param_group, project_id):
param_formset = modelformset_factory(Param, form=ParamForm2)
param_formset = param_formset(form_kwargs={'project_id': project_id}, queryset=Param.objects.filter(is_active=True, params__name = param_group.name), prefix='param_formset')
ile = param_formset.total_form_count()
return param_formset
#get init script form
@register.simple_tag
def get_init_form(init):
text = init.display_text_file()
form = TextForm(initial={'text': text})
return form
#get module analysis
@register.simple_tag
def get_service(module_id):
module = get_object_or_404(Module, pk = module_id)
analysis = Analysis.objects.filter(module = module)
return analysis
#get service modules
@register.simple_tag
def get_modules(service, project):
modules = Module.objects.filter(service = service, is_active = True, project_module__project = project)
return modules
#sort data
@register.filter
def sort_by(queryset, order):
return queryset.order_by(order)
#set global context
@register.simple_tag(takes_context=True)
def set_global_context(context, key, value):
"""
Sets a value to the global template context, so it can
be accessible across blocks.
Note that the block where the global context variable is set must appear
before the other blocks using the variable IN THE BASE TEMPLATE. The order
of the blocks in the extending template is not important.
Usage::
{% extends 'base.html' %}
{% block first %}
{% set_global_context 'foo' 'bar' %}
{% endblock %}
{% block second %}
{{ foo }}
{% endblock %}
"""
print("set ", key, " ", value)
print(context)
context.dicts[0][key] = value
return ''
``` |
{
"source": "3w3rt0n/RoboTelepresenca",
"score": 3
} |
#### File: RoboTelepresenca/Software/Cliente.py
```python
import socket
import sys
import cv2
import pickle
import numpy as np
import struct
#Tela
from PIL import Image
from PIL import ImageTk
import tkinter as tki
import threading
import datetime
import imutils
import cv2
import os
# Definicoes
HOST = '127.0.0.1'
PORT = 8083
#Classe da tela
class Tela:
def __init__(self):
#Conecao de rede
self.cliente = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print('[INFO] Cliente iniciado!')
self.cliente.connect((HOST, PORT))
print('[INFO] Cliente conectado.')
print('[INFO] Endereco do servidor: ' + HOST + ':' + str(PORT) + '.')
self.data = b''
self.payload_size = struct.calcsize("L")
#Inicializacao do tkinter
self.frame = None
self.thread = None
self.stopEvent = None
# initialize the root window and image panel
self.root = tki.Tk()
self.root.resizable(width=False, height=False)
self.root.geometry('800x600')
#self.root.resizable(0, 0)
self.panel = None
# Formulario para conexão
# create a button, that when pressed, will take the current
# frame and save it to file
btn = tki.Button(self.root, text="Conectar...", command=self.conectar)
btn.pack(side="right", fill="both", expand="yes", padx=10,pady=10)
# start a thread that constantly pools the video sensor for
# the most recently read frame
self.stopEvent = threading.Event()
self.thread = threading.Thread(target=self.videoLoop, args=())
self.thread.start()
# set a callback to handle when the window is closed
self.root.wm_title("Robo de teçepresenca - v1.0.0")
self.root.wm_protocol("WM_DELETE_WINDOW", self.onClose)
def videoLoop(self):
# DISCLAIMER:
# I'm not a GUI developer, nor do I even pretend to be. This
# try/except statement is a pretty ugly hack to get around
# a RunTime error that Tkinter throws due to threading
try:
# keep looping over frames until we are instructed to stop
while not self.stopEvent.is_set():
while True:
while len(self.data) < self.payload_size:
self.data += self.cliente.recv(4096)
self.packed_msg_size = self.data[:self.payload_size]
self.data = self.data[self.payload_size:]
self.msg_size = struct.unpack("L", self.packed_msg_size)[0]
while len(self.data) < self.msg_size:
self.data += self.cliente.recv(4096)
self.frame_data = self.data[:self.msg_size]
self.data = self.data[self.msg_size:]
self.frame=pickle.loads(self.frame_data)
print('[INFO] Resolucao: ' + str(self.frame.size) + 'px.')
# grab the frame from the video stream and resize it to
# have a maximum width of 300 pixels
self.frame = imutils.resize(self.frame, width=300)
# OpenCV represents images in BGR order; however PIL
# represents images in RGB order, so we need to swap
# the channels, then convert to PIL and ImageTk format
image = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB)
image = Image.fromarray(image)
image = ImageTk.PhotoImage(image)
# if the panel is not None, we need to initialize it
if self.panel is None:
self.panel = tki.Label(image=image)
self.panel.image = image
self.panel.pack(side="left", padx=10, pady=10)
# otherwise, simply update the panel
else:
self.panel.configure(image=image)
self.panel.image = image
except RuntimeError as e:
print("[INFO] caught a RuntimeError")
def conectar(self):
# grab the current timestamp and use it to construct the
# output path
ts = datetime.datetime.now()
filename = "{}.jpg".format(ts.strftime("%Y-%m-%d_%H-%M-%S"))
p = os.path.sep.join((self.outputPath, filename))
# save the file
cv2.imwrite(p, self.frame.copy())
print("[INFO] saved {}".format(filename))
def onClose(self):
# set the stop event, cleanup the camera, and allow the rest of
# the quit process to continue
print("[INFO] closing...")
self.stopEvent.set()
self.root.quit()
self.cliente.close()
# start the app
print("[INFO] starting...")
pba = Tela()
pba.root.mainloop()
``` |
{
"source": "3w3rt0n/ServerIoTPython",
"score": 3
} |
#### File: 3w3rt0n/ServerIoTPython/main.py
```python
import os
import sys
import psycopg2
import urlparse
import pytz
import datetime
import time
from flask import Flask, request, redirect, url_for, current_app, render_template
reload(sys)
sys.setdefaultencoding("utf-8")
#Conexao do banco PostgreSQL
urlparse.uses_netloc.append("postgres")
url = urlparse.urlparse(os.environ["DATABASE_URL"])
conn = psycopg2.connect(
database=url.path[1:],
user=url.username,
password=url.password,
host=url.hostname,
port=url.port
)
cur = conn.cursor()
cur2 = conn.cursor()
#Define a aplicacao
app = Flask("wtf")
#Enviar a pagina de login para o navegador
@app.route("/")
def indexHTML():
return current_app.send_static_file('login.html')
#Verificar se o usuario e a senha estao corretos
@app.route("/login", methods=["POST"])
def login():
cur.execute("SELECT * FROM login")
rows = cur.fetchall()
for row in rows:
if request.form['email'] == row[2] and request.form['pwd'] == row[3]:
#return redirect(url_for('dispositivosHTML'))
cur2.execute("SELECT * FROM dispositivos WHERE idUsuario = " + str(row[0]) + " ORDER BY Id ASC")
rows2 = cur2.fetchall()
respLogado = current_app.make_response(render_template("dispositivos.html", nome = row[1], dispositivos = rows2, pag = "0"))
respLogado.set_cookie('IdUsuario', row[0])
respLogado.set_cookie('Nome', row[1])
return respLogado
return "Email ou senha errado!<br /> <p>Email: {}".format(request.form['email']) + "</p><p>Senha: {}".format(request.form['pwd']) + "</p>"
@app.route("/dispositivos.html")
def dispositivosHTML():
return current_app.send_static_file('dispositivos.html')
#Cadastrar novo usuario
@app.route("/cadastrarLogin.html")
def cadastrarLoginHTML():
return current_app.send_static_file('cadastrarLogin.html')
@app.route("/cadastrarLogin", methods=["POST"])
def cadastrarLoginDB():
cur.execute("INSERT INTO login (nome, email, senha) VALUES(%s, %s, %s)", (request.form['nome'], request.form['email'], request.form['pwd']))
conn.commit()
return "Usuario inserido com sucesso!"
#Lista usuarios cadastrados
@app.route("/listaLogin")
def listaLoginDB():
cur.execute("SELECT * FROM login")
rows = cur.fetchall()
usuarios = "<ul>"
for row in rows:
usuarios = usuarios + "<li>Nome: " + row[1] + "</li><li>Email: " + row[2] + "</li><li>Senha: " + row[3] + "</li><li>-------</li>"
usuarios += "</ul>"
return "Usuarios cadastrados: " + usuarios
#Cadastrar novo usuario
@app.route("/cadastrarDispositivos.html")
def cadastrarDispositivosHTML():
return current_app.send_static_file('cadastrarDispositivos.html')
#Lista dispositivos cadastrados
@app.route("/listaDispositivos")
def listaDispositivosDB():
cur.execute("SELECT * FROM dispositivos")
rows = cur.fetchall()
dispositivos = "<ul>"
for row in rows:
dispositivos = dispositivos + "<li>id: " + str(row[0]) + "</li><li>id Usuario: " + str(row[1]) + "</li><li>Dispositivo: " + row[2] + "</li><li>MAC: " + row[3] + "</li><li>Estado: " + str(row[4]) + str(row[5]) + str(row[6]) + str(row[7]) + str(row[8]) + str(row[9]) + str(row[10]) + str(row[11]) + str(row[12]) + str(row[13]) + "</li><li>----</li>"
dispositivos += "</ul>"
return "Dispositivos cadastrados: " + dispositivos
#Cadastrar no banco o dispositivo
@app.route("/cadastrarDispositivos", methods=["POST"])
def cadastrarDispositivoDB():
#pega hora local
epoch_time = int(time.time())
dt_formatada = time.strftime('%Y-%m-%d %H:%M', time.localtime(epoch_time))
dt = str(epoch_time)
#dt = datetime_without_tz.strftime('%Y-%m-%d %H:%M')
#------
cur.execute("INSERT INTO dispositivos (idUsuario, dispositivo, mac, a0, d0, d1, d2, d3, d4, d5, d6, d7, d8, atualizacao) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)", ( request.form['idUsuario'], request.form['dispositivo'], request.form['MAC'], request.form['a0'], request.form['d0'], request.form['d1'], request.form['d2'], request.form['d3'], request.form['d4'], request.form['d5'], request.form['d6'], request.form['d7'], request.form['d8'], str(dt)))
conn.commit()
return "Dispositivo inserido com sucesso!"
#Atualizar campo no dispositivo --- em teste
#http://site/atualizarDispositivoDB?porta=d0&valor=1&IdDisp=9
@app.route("/atualizarDispositivoDB", methods=["GET"])
def atualizarDispositivoDB():
#pega hora local
epoch_time = int(time.time())
dt = str(epoch_time)
#---------
SQLcomando = "UPDATE dispositivos SET " + request.args.get('porta') + "=" + request.args.get('valor') + ", atualizacao = '" + str(dt) + "' WHERE Id=" + request.args.get('IdDisp')
cur.execute(SQLcomando)
conn.commit()
IdUsuario = request.cookies.get('IdUsuario')
Nome = request.cookies.get('Nome')
cur2.execute("SELECT * FROM dispositivos WHERE idUsuario = " + IdUsuario + " ORDER BY Id ASC")
rows2 = cur2.fetchall()
return render_template("dispositivos.html", nome = Nome, dispositivos = rows2, pag = request.args.get('IdDisp'))
#=========================================================================================================#
# Funções de comunicação com ESP 8266 #
#=========================================================================================================#
@app.route("/lerBD", methods=["GET"])
def lerBD():
cur.execute("SELECT d0, d1, d2, d3, d4, d5, d6, d7, d8, atualizacao FROM dispositivos WHERE mac = '" + request.args.get('mac') + "'")
row = cur.fetchall()
return str(row[0][0]) + "-" + str(row[0][1]) + "-" + str(row[0][2]) + "-" + str(row[0][3]) + "-" + str(row[0][4]) + "-" + str(row[0][5]) + "-" + str(row[0][6]) + "-" + str(row[0][7]) + "-" + str(row[0][8]) + "/" + str(row[0][9])
#=========================================================================================================#
# Funções de Teste #
#=========================================================================================================#
@app.route("/<name>")
def nome(name):
return "Pagina nao encontrada: {}".format(name)
#http://site/get?nome=ewerton&frase=aeiou
@app.route("/get", methods=["GET"])
def get():
nome = request.args.get('nome')
frase = request.args.get('frase')
return "Nome: " + nome + " - qualquer coisa: " + frase
@app.route("/criarTabelaLogin")
def criarTabelaLogin():
cur.execute("CREATE TABLE login(Id SERIAL PRIMARY KEY, nome VARCHAR(30), email VARCHAR(50), senha VARCHAR(20))")
conn.commit()
return "<p>Criado tabela login</p>"
@app.route("/criarTabelaDispositivos")
def criarTabelaDispositivos():
cur.execute("CREATE TABLE dispositivos(Id SERIAL PRIMARY KEY, idUsuario INTEGER, dispositivo VARCHAR(50), mac VARCHAR(17), a0 INTEGER, d0 INTEGER, d1 INTEGER, d2 INTEGER, d3 INTEGER, d4 INTEGER, d5 INTEGER, d6 INTEGER, d7 INTEGER, d8 INTEGER)")
conn.commit()
return "<p>Criado tabela dispositivos</p>"
@app.route("/deleteTabela/<tabela>")
def deleteTabela(tabela):
SQLcomando = "DROP TABLE " + tabela
cur.execute(SQLcomando)
conn.commit()
return "<p>Tabela {} deletada!</p>".format(tabela)
if __name__ == "__main__":
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port)
``` |
{
"source": "3watt/Azure_Kinect_ROS_Driver",
"score": 2
} |
#### File: Azure_Kinect_ROS_Driver/src/parsing_test.py
```python
import json
import base64
import requests
import sys
import csv
import io
import re
import rospy
import cv2
import numpy as np
from std_msgs.msg import Byte, Bool, Int16, String
from sensor_msgs.msg import Image, CameraInfo, CompressedImage
from cv_bridge import CvBridge, CvBridgeError
from geometry_msgs.msg import Pose, Point
import geometry_msgs.msg
import time
def main():
# 토픽으로 층, 호수 정보 publish
# pub = rospy.Publisher("image_topic_2",Image, queue_size=10)
floor_pub = rospy.Publisher("/floor_num", Int16, queue_size=1)
room_pub = rospy.Publisher("/room_num", Int16, queue_size=1)
floor_num = Int16()
room_num = Int16()
rospy.init_node('floor_and_room_publisher', anonymous=True)
rate = rospy.Rate(10)
# json 파일을 열고, OCR 된 text-data 들을 list 로 추출한다.
with io.open('/home/minhye/catkin_ws/src/1.json','r') as f:
json_data = json.load(f)
# 한글 데이터를 불러오기 위해 encoding= utf-8 을 해주었다.
res = json.loads(json_data, encoding="utf-8")
# 추출되는 OCR Text data 를 list 에 넣는다.
list_arr = list()
length = len(res['images'][0]['fields'])
for i in range(length):
list_arr.append(res['images'][0]['fields'][i]['inferText'].encode('utf-8'))
# 추출된 데이터 중에서도 아파트의 동, 호수를 parsing 한다.
# 이때, 단어의 시작이 숫자인지의 여부를 통해 다른 유사어(고잔동, 사동..)가 아닌
# 아파트의 동, 호수를 추출할 수 있게 제한을 두었다.
# 또한, OCR 진행 시, 동과 호수가 같이 기술되어 있는 경우가 있다.
# 이때는, 대부분 순서가 동, 호수 순이기 때문에, 문자들중 숫자만 추출하고,순서대로 배열에 넣는다.
# 뒤에 숫자를 호수라고 판단하고,
resultlist = []
resultlist_ = []
_room_point = []
room_split = ['0','0','0','0']
floor = None
room = None
for row in list_arr:
if "호" in row:
if row[0].isdigit() :
room_ = filter(str.isdigit,row)
resultlist.append(room_)
# info.csv 에 있는 우리 서비스를 사용하는 사람의 택배만을 핸들링하기 위한 절차.
with open('/home/minhye/catkin_ws/src/Azure_Kinect_ROS_Driver/src/info.csv', 'r') as file:
reader = csv.reader(file, delimiter = ',')
num = 0
for row in reader:
_room_point.append(int(row[1]))
num = num + 1
print(_room_point)
print(room_)
search_result = -1
for i in range(num) :
if str(_room_point[i]).find(room_) != -1 :
print("find")
search_result = 0
if search_result == -1 :
print("This parcel is not on our service!!")
# 층, 호수 구분
# 10 층 이상과 이하의 경우를 따로 나누어 계산한다.
if len(room_) == 3 :
for i in range(3) :
room_split[i+1] = str(room_[i])
elif len(room_) == 4 :
for i in range(4) :
room_split[i] = str(room_[i])
floor = int(room_split[0] + room_split[1])
room = int(room_split[2] + room_split[3])
# topic publish
floor_num.data = int(floor)
room_num.data = int(room)
while not rospy.is_shutdown():
floor_connections = floor_pub.get_num_connections()
room_connections = room_pub.get_num_connections()
if floor_connections > 0 :
floor_pub.publish(floor_num)
if room_connections > 0 :
room_pub.publish(room_num)
break
rate.sleep()
# '-' 의 경우 송장에서 쓰이는 경우가 너무 많아 특별한 제약을 걸지 않는 한 쓸 수 없을 듯 하다.
# elif "-" in row:
# if row[0].isdigit() :
# resultlist.append(row)
# apt_num = list_arr.index(row)
##########################################################
##########################################################
# 사용환경에 따라, 저장 하는 주소 바꿔주기!!!!!!
##########################################################
##########################################################
# 아파트의 동, 호수 정보를 숫자로만 저장한다.
with open('/home/minhye/catkin_ws/src/2.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(resultlist)
# 집의 층과 호수 정보만을 따로 추출해서 저장한다.
with open('/home/minhye/catkin_ws/src/3.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(resultlist_)
if __name__ == '__main__':
try:
start = main()
except Exception:
pass
``` |
{
"source": "3wayHimself/crypko.py",
"score": 2
} |
#### File: crypko.py/crypko/api.py
```python
import threading
import math
import requests
from .errors import CrypkoApiError, CrypkoNotFoundError
from .blockchain import ContractHandler
from .objects import Crypko
class API:
DOMAIN = 'https://api.crypko.ai/'
PER_PAGE = 12 # Can't change this :(
def __init__(self, address=None, key=None, proxies=None):
if key is not None and address is not None:
self._contract = ContractHandler(key, address)
else:
self._contract = None
self.proxies = proxies
def _search(self, owner_addr=None, page=None, sort=None, category=None, attributes=None, filters=None):
req = requests.get(self.DOMAIN + 'crypkos/search', params={
'ownerAddr': owner_addr,
'page': page,
'sort': sort,
'category': category,
'attributes': attributes,
'filters': filters
}, proxies=self.proxies)
if req.status_code != 200:
raise CrypkoApiError('Got response code {}. ({})'.format(req.status_code, req.text))
return req.json()
@property
def contract(self):
if self._contract is None:
raise CrypkoApiError('Contract not loaded. Did you provide an address and key?')
return self._contract
def get_crypko(self, crypko_id):
res = requests.get('{}crypkos/{}/detail'.format(self.DOMAIN, crypko_id), proxies=self.proxies)
if res.status_code == 404:
raise CrypkoNotFoundError
if res.status_code != 200:
raise CrypkoApiError('Got response code {}. ({})'.format(res.status_code, res.text))
return Crypko(res.json(), self)
def threaded_search(self, callback, threads=8, start=0, results=-1, **kwargs):
if results == 0:
return
start = max(0, start)
page = math.floor(start / self.PER_PAGE) + 1
kwargs.pop('page', None)
if results < 0:
r = self._search(**kwargs)
results = r['totalMatched'] - start + 1
page_end = math.ceil((start + results) / self.PER_PAGE) + 1
def runner():
nonlocal page
while page < page_end:
me_page = page
page += 1
res = self._search(page=me_page, **kwargs)
if not res['crypkos']:
break
for n, i in enumerate(res['crypkos']):
if n + (page - 1) * self.PER_PAGE >= start:
if n + (page - 1) * self.PER_PAGE < start + results:
callback(Crypko(i, self))
pool = []
for _ in range(threads):
pool.append(threading.Thread(target=runner))
pool[-1].start()
for thread in pool:
thread.join()
def search(self, start=0, results=-1, **kwargs):
"""
Search for Crypkos. Will retrieve `results` Crypkos starting at `start`.
If results is negative, we will read until the end of results.
TODO: Document kwargs
"""
if results == 0:
return
start = max(0, start)
page = math.floor(start / self.PER_PAGE) + 1
kwargs.pop('page', None)
result = self._search(page=page, **kwargs)
def iters():
nonlocal result, page
results_sent = 0
while results < 0 or results_sent < results:
if not result['crypkos']:
break
if results_sent == 0:
result['crypkos'] = result['crypkos'][start % self.PER_PAGE:]
for i in result['crypkos']:
if results < 0 or results_sent < results:
yield Crypko(i, self)
results_sent += 1
page += 1
if results < 0 or results_sent < results:
result = self._search(page=page, **kwargs)
return result['totalMatched'], iters()
```
#### File: crypko/examples/auto_fuse.py
```python
import json
import time
import math
import crypko
from requests.adapters import HTTPAdapter
ADDR = '0xca39e90cec69838e73cc4f24ec5077dac44b47d6'
# ITER_WEIGHT = 1/10
# SIM_THRESHOLD = 12
CAP = 270000
USE_CAP = False
ATTRIBS = 'cooldownready'
FILTERS = None
score_cache = {}
# FILTERS = 'iteration:~3' # Iter 3 or under only
# FILTERS = 'cooldown:ur' # Only ultra rapids
def score(c1, c2):
if USE_CAP and c1.id > CAP and c2.id > CAP:
return 0
if c1.auction.active or c2.auction.active:
return 0
if (c1.id, c2.id) in score_cache:
return score_cache[(c1.id, c2.id)]
similarity = len((*filter(c1.attributes.__contains__, c2.attributes),))
n_sim = sum(x == y for x, y in zip(c1.noise, c2.noise))
# if similarity < SIM_THRESHOLD:
# return 0
# return similarity / (max(c1.iteration, c2.iteration) ** ITER_WEIGHT)
score_cache[(c1.id, c2.id)] = similarity * n_sim
return similarity * n_sim
def legal(c1, c2, cache):
a = [c1.id, c2.id, *cache[c1.id], *cache[c2.id]]
return len(a) == len(set(a))
def load_cache():
try:
with open('cache.json') as file_:
dat = json.load(file_)
except FileNotFoundError:
dat = {}
rtn = {}
for i in dat:
rtn[int(i)] = (*map(int, dat[i]),)
return rtn
def save_cache(dat):
with open('cache.json', 'w') as file_:
json.dump(dat, file_)
def best_pair(crypkos, cooldown=False):
scores = []
cache = load_cache()
for c1 in crypkos:
if c1.id not in cache:
c1.ensure_complete()
cache[c1.id] = (c1.matron.id if c1.matron is not None else time.time(),
c1.sire.id if c1.sire is not None else time.time())
save_cache(cache)
for c2 in crypkos:
if c2.id not in cache:
c2.ensure_complete()
cache[c2.id] = (c2.matron.id if c2.matron is not None else time.time(),
c2.sire.id if c2.sire is not None else time.time())
save_cache(cache)
if not legal(c1, c2, cache):
continue
scores.append((c1, c2, score(c1, c2)))
scores.sort(key=lambda x: x[2], reverse=True)
if scores:
return scores[0][0], scores[0][1]
return None
def purge_attributes(api, attribs):
print(f'==> Collecting crypkos for \'{attribs}\'')
crypkos = [i for i in api.search(owner_addr=ADDR, attributes=attribs)[1] if not i.auction.active]
txs = []
for c in crypkos:
print(f'==> Selling #{c.id}')
tx = c.sell(0.7, 0.4)
print(f' :: TX {tx.hex()}')
txs.append(tx)
print(f'==> Flushing {len(txs)} transactions')
for tx in txs:
print(f' :: TX {tx.hex()}')
api.contract.wait_for_tx(tx)
print(f'==> Done!')
def main():
with open('priv.key') as key_file:
api = crypko.API(ADDR, key_file.read().strip())
batch_size = 48
while True:
purge_attributes(api, 'dark skin')
purge_attributes(api, 'glasses')
print(f'==> Requesting crypkos..')
my_crypkos = [i for i in api.search(owner_addr=ADDR,
attributes=ATTRIBS,
results=12 * 300,
filters=FILTERS)[1] if not i.auction.active]
print(f' :: {len(my_crypkos)} usable')
txs = []
for _ in range(batch_size):
print(f'==> Locating best pair..')
pair = best_pair(my_crypkos)
if pair is None:
print(' :: No legal fuses avaliable. Breaking.')
break
print(f' :: Fusing #{pair[0].id} with #{pair[1].id}..')
try:
tx = pair[0].fuse(pair[1])
print(f' :: Transaction {tx.hex()}')
txs.append(tx)
my_crypkos.remove(pair[0])
my_crypkos.remove(pair[1])
except ValueError as e:
print(f' :: Failed with {e}')
print(f'==> Flushing {len(txs)} transactions')
for tx in txs:
print(f' :: TX {tx.hex()}')
api.contract.wait_for_tx(tx)
print(f' :: Done!')
if __name__ == '__main__':
main()
```
#### File: 3wayHimself/crypko.py/setup.py
```python
import os
from setuptools import setup
version = __import__('crypko').__version__
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as f:
return f.read()
setup(
name='crypko.py',
version=version,
author='Bottersnike',
author_email='<EMAIL>',
description=('A basic wrapper around the Crypko platform.'),
long_description=read('README.rst'),
license='MIT',
install_requires=[
'requests',
'web3'
],
packages=['crypko', 'crypko.examples'],
url='https://gitlab.com/Bottersnike/crypko.py',
project_urls={
"Bug Tracker": "https://gitlab.com/Bottersnike/crypko.py/issues",
"Source Code": "https://gitlab.com/Bottersnike/crypko.py",
},
package_data={
'crypko': ['abi.json'],
},
)
``` |
{
"source": "3-Ways-to-Heck/ctfwriteups",
"score": 4
} |
#### File: cryptography/New-Caesar/new_caesar_annotated.py
```python
import string
LOWERCASE_OFFSET = ord("a") # 97
ALPHABET = string.ascii_lowercase[:16] # first 16 letters of the alphabet, from a - p
def b16_encode(plain):
enc = "" # this is the ciphertext that is generated
for c in plain: # iterate over every character in the plaintext
binary = "{0:08b}".format(ord(c)) # convert the character's ASCII code into an 8-bit binary number (8 digits, so for example, A is 65 in decimal, so its binary variable would be "01000001")
enc += ALPHABET[int(binary[:4], 2)] # take the first 4 digits of the binary number, turn it into decimal, get the letter at that decimal index in the "ALPHABET" array, and add it to the "enc" string
enc += ALPHABET[int(binary[4:], 2)] # do the same for the last 4 digits of the binary variable
return enc
def shift(c, k):
t1 = ord(c) - LOWERCASE_OFFSET # subtracts 97 from the ASCII code of the character c
t2 = ord(k) - LOWERCASE_OFFSET # same thing, but for k, which is just the key variable on line 20
return ALPHABET[(t1 + t2) % len(ALPHABET)] # this is essentially just an implementation of a regular Caesar Cipher
flag = "redacted"
key = "redacted"
assert all([k in ALPHABET for k in key]) # this tells us that every character in key is in the ALPHABET variable, so we can just bruteforce the key
assert len(key) == 1 # awesome! the key is only 1 letter long, so now we know that key is just a letter from a to p
b16 = b16_encode(flag)
enc = ""
for i, c in enumerate(b16):
enc += shift(c, key[i % len(key)]) # since the length of key, len(key), is just 1 (line 22), "key[i % len(key)]" can just be simplified into "key" (because any number mod 1 is just 0); every character of the string b16 is being shifted the same number of positions to the right
print(enc) # enc is the given cipher, "apbopjbobpnjpjnmnnnmnlnbamnpnononpnaaaamnlnkapndnkncamnpapncnbannaapncndnlnpna"
``` |
{
"source": "3-w-c/rstfmt",
"score": 2
} |
#### File: rstfmt/rstfmt/server.py
```python
import argparse
import asyncio
import functools
import logging
import time
from concurrent import futures
import docutils
from aiohttp import web
from . import rst_extras, rstfmt
class ParseError(Exception):
pass
def do_format(width: int, s: str) -> str:
# Unpickling SystemMessage objects is broken for some reason, so raising them directly fails;
# replace them with our own sentinel class.
try:
return rstfmt.format_node(width, rstfmt.parse_string(s))
except docutils.utils.SystemMessage as e:
raise ParseError(str(e))
async def handle(pool: futures.Executor, req: web.Request) -> web.Response:
width = int(req.headers.get("X-Line-Length", 72))
body = await req.text()
t0 = time.perf_counter()
try:
text = await asyncio.get_event_loop().run_in_executor(pool, do_format, width, body)
resp = web.Response(text=text)
except ParseError as e:
logging.warning(f"Failed to parse input: {e}")
resp = web.Response(status=400, reason=str(e))
except Exception as e:
logging.exception("Error while handling request")
resp = web.Response(status=500, reason=str(e))
t1 = time.perf_counter()
dt = int(1000 * (t1 - t0))
print(f"Finished request: {dt:3} ms, {len(body):5} chars")
return resp
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument("--bind-host", default="localhost")
parser.add_argument("--bind-port", type=int, default=5219)
args = parser.parse_args()
rst_extras.register()
with futures.ProcessPoolExecutor() as pool:
app = web.Application()
app.add_routes([web.post("/", functools.partial(handle, pool))])
web.run_app(app, host=args.bind_host, port=args.bind_port)
if __name__ == "__main__":
main()
``` |
{
"source": "3wille/ultimate-poll-bot",
"score": 2
} |
#### File: helper/display/management.py
```python
from .poll import get_poll_text
def get_poll_management_text(session, poll, show_warning=False):
"""Create the management interface for a poll."""
poll_text = get_poll_text(session, poll, show_warning)
return poll_text
```
#### File: pollbot/models/poll.py
```python
from datetime import datetime, timedelta
from sqlalchemy import (
Date,
Column,
func,
ForeignKey,
text,
)
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.types import (
BigInteger,
Boolean,
DateTime,
Integer,
String,
)
from sqlalchemy.orm import relationship
from pollbot.db import base
from pollbot.helper.enums import PollType, UserSorting, OptionSorting
class Poll(base):
"""The model for a Poll."""
__tablename__ = 'poll'
id = Column(Integer, primary_key=True)
uuid = Column(UUID(as_uuid=True), unique=True, nullable=False, server_default=text('gen_random_uuid()'))
created_at = Column(DateTime, server_default=func.now(), nullable=False)
updated_at = Column(DateTime, server_default=func.now(), onupdate=func.now(), nullable=False)
# Options
name = Column(String)
description = Column(String)
locale = Column(String, server_default='english')
poll_type = Column(String, nullable=False)
anonymous = Column(Boolean, nullable=False)
number_of_votes = Column(Integer)
allow_new_options = Column(Boolean, nullable=False, default=False)
option_sorting = Column(String, nullable=False)
user_sorting = Column(String, nullable=False)
results_visible = Column(Boolean, nullable=False, default=True)
show_percentage = Column(Boolean, nullable=False, default=True)
european_date_format = Column(Boolean, nullable=False, default=False)
# Flags
created = Column(Boolean, nullable=False, default=False)
closed = Column(Boolean, nullable=False, default=False)
due_date = Column(DateTime, nullable=True)
next_notification = Column(DateTime, nullable=True)
# Chat state variables
expected_input = Column(String)
in_settings = Column(Boolean, nullable=False, default=False)
current_date = Column(Date, server_default=func.now(), nullable=False)
# OneToOne
user_id = Column(BigInteger, ForeignKey('user.id', ondelete='cascade', name='user'), nullable=False, index=True)
user = relationship('User', foreign_keys='Poll.user_id')
# OneToMany
options = relationship('PollOption', order_by='asc(PollOption.id)', lazy='joined', passive_deletes='all')
votes = relationship('Vote', passive_deletes=True)
references = relationship('Reference', lazy='joined', passive_deletes='all')
def __init__(self, user):
"""Create a new poll."""
self.user = user
self.poll_type = PollType.single_vote.name
self.anonymous = False
self.results_visible = True
self.user_sorting = UserSorting.user_chrono.name
self.option_sorting = OptionSorting.option_chrono.name
def __repr__(self):
"""Print as string."""
return f'Poll with Id: {self.id}, name: {self.name}'
def __str__(self):
"""Print as string."""
return f'Poll with Id: {self.id}, name: {self.name}'
def should_show_result(self):
"""Determine, whether this results of this poll should be shown."""
return self.results_visible or self.closed
def has_date_option(self):
"""Check whether this poll has a date option."""
for option in self.options:
if option.is_date:
return True
return False
def get_formatted_due_date(self):
"""Get the formatted date."""
if self.european_date_format:
return self.due_date.strftime('%d.%m.%Y %H:%M UTC')
return self.due_date.strftime('%Y-%m-%d %H:%M UTC')
def set_due_date(self, date):
"""Set the due date and the next notification."""
now = datetime.now()
self.due_date = date
if now < self.due_date - timedelta(days=7):
self.next_notification = self.due_date - timedelta(days=7)
elif now < self.due_date - timedelta(days=1):
self.next_notification = self.due_date - timedelta(days=1)
elif now < self.due_date - timedelta(hours=6):
self.next_notification = self.due_date - timedelta(hours=6)
else:
self.next_notification = self.due_date
def clone(self, session):
"""Create a clone from the current poll."""
poll = Poll(self.user)
poll.created = True
session.add(poll)
poll.name = self.name
poll.description = self.description
poll.poll_type = self.poll_type
poll.anonymous = self.anonymous
poll.number_of_votes = self.number_of_votes
poll.allow_new_options = self.allow_new_options
poll.option_sorting = self.option_sorting
poll.user_sorting = self.user_sorting
poll.results_visible = self.results_visible
poll.show_percentage = self.show_percentage
from pollbot.models import PollOption
for option in self.options:
new_option = PollOption(poll, option.name)
session.add(new_option)
return poll
```
#### File: pollbot/models/reference.py
```python
from sqlalchemy import (
Column,
func,
ForeignKey,
)
from sqlalchemy.types import (
BigInteger,
Integer,
DateTime,
String,
)
from sqlalchemy.orm import relationship
from pollbot.db import base
class Reference(base):
"""The model for a Reference."""
__tablename__ = 'reference'
id = Column(Integer, primary_key=True)
admin_chat_id = Column(BigInteger)
admin_message_id = Column(BigInteger)
inline_message_id = Column(String)
created_at = Column(DateTime, server_default=func.now(), nullable=False)
updated_at = Column(DateTime, server_default=func.now(), onupdate=func.now(), nullable=False)
# ManyToOne
poll_id = Column(Integer, ForeignKey('poll.id', ondelete='cascade', ), nullable=False, index=True)
poll = relationship('Poll')
def __init__(
self, poll,
inline_message_id=None,
admin_chat_id=None,
admin_message_id=None
):
"""Create a new poll."""
self.poll = poll
self.inline_message_id = inline_message_id
self.admin_chat_id = admin_chat_id
self.admin_message_id = admin_message_id
```
#### File: pollbot/telegram/job.py
```python
from sqlalchemy import func
from sqlalchemy.orm import joinedload
from datetime import datetime, timedelta
from pollbot.i18n import i18n
from pollbot.models import Update, Notification, Poll
from pollbot.helper.session import job_session_wrapper
from pollbot.helper.update import send_updates, window_size, update_poll_messages
@job_session_wrapper()
def message_update_job(context, session):
"""Update all messages if necessary."""
try:
context.job.enabled = False
now = datetime.now()
current_time_window = now - timedelta(seconds=now.second % window_size, microseconds=now.microsecond)
last_time_window = current_time_window - timedelta(seconds=window_size)
one_minute_ago = current_time_window - timedelta(minutes=1)
updates = session.query(Update) \
.filter(Update.updated.is_(False)) \
.filter(Update.time_window <= last_time_window) \
.options(joinedload(Update.poll)) \
.order_by(Update.time_window.desc()) \
.all()
polls_for_refresh = []
for update in updates:
# It might be that there are multiple active updates
# due to the job timeouts and multiple repetetive votes
# or long update tasks/telegram timeouts
previous_active_updates = session.query(Update) \
.filter(Update.poll == update.poll) \
.filter(Update.updated.is_(False)) \
.all()
if len(previous_active_updates) >= 0:
for previous_update in previous_active_updates:
previous_update.updated = True
polls_for_refresh.append(previous_update.poll_id)
session.commit()
# If a more recent update has alreday been updated, ignore the previous updates
elif update.poll_id in polls_for_refresh:
session.refresh(update)
if update.updated:
continue
# Get the update amount of the last minute
updates_in_last_minute = session.query(func.sum(Update.count)) \
.filter(Update.poll == update.poll) \
.filter(Update.time_window >= one_minute_ago) \
.one_or_none()[0]
if updates_in_last_minute is None:
updates_in_last_minute = 0
# Smaller 100, because we need a liiiittle bit of buffer. Timings aren't allways perfect
if updates_in_last_minute < 100:
send_updates(session, context.bot, update.poll, show_warning=True)
session.query(Update) \
.filter(Update.id == update.id) \
.update({
'count': Update.count + 1,
'updated': True,
})
# Let's wait a little longer
else:
pass
finally:
context.job.enabled = True
session.close()
@job_session_wrapper()
def send_notifications(context, session):
"""Notify the users about the poll being closed soon."""
notifications = session.query(Notification) \
.join(Notification.poll) \
.filter(Poll.next_notification <= datetime.now()) \
.all()
for notification in notifications:
poll = notification.poll
locale = poll.locale
time_step = poll.due_date - poll.next_notification
tg_chat = context.bot.get_chat(notification.chat_id)
notification.poll_message_id
if time_step == timedelta(days=1):
poll.next_notification = poll.due_date - timedelta(hours=6)
tg_chat.send_message(
i18n.t('notification.one_day', locale=locale, name=poll.name),
parse_mode='markdown',
reply_to_message_id=notification.poll_message_id,
)
elif time_step == timedelta(hours=6):
poll.next_notification = poll.due_date
tg_chat.send_message(
i18n.t('notification.six_hours', locale=locale, name=poll.name),
parse_mode='markdown',
reply_to_message_id=notification.poll_message_id,
)
elif poll.due_date == poll.next_notification:
update_poll_messages(session, context.bot, poll)
tg_chat.send_message(
i18n.t('notification.closed', locale=locale, name=poll.name),
parse_mode='markdown',
reply_to_message_id=notification.poll_message_id,
)
session.delete(notification)
polls_to_close = session.query(Poll) \
.filter(Poll.due_date <= datetime.now()) \
.filter(Poll.closed.is_(False)) \
.all()
for poll in polls_to_close:
poll.closed = True
update_poll_messages(session, context.bot, poll)
```
#### File: telegram/keyboard/date_picker.py
```python
import calendar
from datetime import date
from telegram import (
InlineKeyboardButton,
)
from pollbot.helper.enums import CallbackType
def get_datepicker_buttons(poll):
"""Get the buttons for the datepicker."""
current_date = poll.current_date
if current_date is None:
current_date = date.now()
poll.current_date = current_date
buttons = []
ignore_payload = f'{CallbackType.ignore.value}:0:0'
# Add headline
headline = f'{calendar.month_name[current_date.month]} {current_date.year}'
buttons.append([InlineKeyboardButton(headline, callback_data=ignore_payload)])
# Create the week-day column description
row = []
for day in ["Mo", "Tu", "We", "Th", "Fr", "Sa", "Su"]:
row.append(InlineKeyboardButton(day, callback_data=ignore_payload))
buttons.append(row)
# Iterate through all days and create respective buttons
calendar_month = calendar.monthcalendar(current_date.year, current_date.month)
for week in calendar_month:
row = []
for day in week:
# Format the text. The currently chosen day should be surrounded by brackets e.g (26)
day_text = day
if day > 0:
this_date = date(year=current_date.year, month=current_date.month, day=day)
if this_date == current_date:
day_text = f'({day})'
# Only create real buttons for actual days of the month
if(day == 0):
row.append(InlineKeyboardButton(" ", callback_data=ignore_payload))
else:
day_date = date(current_date.year, current_date.month, day)
payload = f'{CallbackType.set_date.value}:{poll.id}:{day_date.isoformat()}'
row.append(InlineKeyboardButton(day_text, callback_data=payload))
buttons.append(row)
previous_payload = f'{CallbackType.previous_month.value}:{poll.id}:0'
next_payload = f'{CallbackType.next_month.value}:{poll.id}:0'
buttons.append([
InlineKeyboardButton('<', callback_data=previous_payload),
InlineKeyboardButton('>', callback_data=next_payload),
])
return buttons
``` |
{
"source": "3wnbr1/transit-python2",
"score": 2
} |
#### File: transit-python2/transit/rolling_cache.py
```python
from transit.constants import SUB, MAP_AS_ARR
FIRST_ORD = 48
CACHE_CODE_DIGITS = 44
CACHE_SIZE = CACHE_CODE_DIGITS * CACHE_CODE_DIGITS
MIN_SIZE_CACHEABLE = 4
def is_cache_key(name):
return len(name) and (name[0] == SUB and name != MAP_AS_ARR)
def encode_key(i):
lo = i % CACHE_CODE_DIGITS
hi = i // CACHE_CODE_DIGITS
if hi == 0:
return "^" + chr(lo + FIRST_ORD)
return "^" + chr(hi + FIRST_ORD) + chr(lo + FIRST_ORD)
def decode_key(s):
sz = len(s)
if sz == 2:
return ord(s[1]) - FIRST_ORD
return (ord(s[2]) - FIRST_ORD) + (CACHE_CODE_DIGITS * (ord(s[1]) - FIRST_ORD))
def is_cacheable(string, as_map_key=False):
return string and len(string) >= MIN_SIZE_CACHEABLE and (as_map_key or (string[:2] in ["~#", "~$", "~:"]))
class RollingCache(object):
"""This is the internal cache used by python-transit for cacheing and
expanding map keys during writing and reading. The cache enables transit
to minimize the amount of duplicate data sent over the wire, effectively
compressing down the overall payload size. The cache is not intended to
be used directly.
"""
def __init__(self):
self.key_to_value = {}
self.value_to_key = {}
# if index rolls over... (bug)
def decode(self, name, as_map_key=False):
"""Always returns the name"""
if is_cache_key(name) and (name in self.key_to_value):
return self.key_to_value[name]
return self.encache(name) if is_cacheable(name, as_map_key) else name
def encode(self, name, as_map_key=False):
"""Returns the name the first time and the key after that"""
if name in self.key_to_value:
return self.key_to_value[name]
return self.encache(name) if is_cacheable(name, as_map_key) else name
def size(self):
return len(self.key_to_value)
def is_cache_full(self):
return len(self.key_to_value) > CACHE_SIZE
def encache(self, name):
if self.is_cache_full():
self.clear()
elif name in self.value_to_key:
return self.value_to_key[name]
key = encode_key(len(self.key_to_value))
self.key_to_value[key] = name
self.value_to_key[name] = key
return name
def clear(self):
self.value_to_key = {}
``` |
{
"source": "3x0d2s/chatbot-fl-2",
"score": 3
} |
#### File: chatbot-fl-2/bot/db_requests.py
```python
import sqlite3
def createBD_FromDump():
cur = sqlite3.connect('db/db.db')
f = open('db/db_dump.sql', 'r', encoding='UTF-8')
dump = f.read()
cur.executescript(dump)
class requestDB:
def __init__(self, database):
"""Подключаемся к БД и сохраняем курсор соединения"""
self.connection = sqlite3.connect(database, check_same_thread=False)
self.cursor = self.connection.cursor()
def add_user(self, user_id):
with self.connection:
return self.cursor.execute("INSERT INTO `users` (`user_id`) VALUES(?) ", (user_id,))
def get_users(self):
with self.connection:
self.cursor.execute("SELECT * FROM `users`")
return self.cursor.fetchall()
def check_user(self, user_id):
with self.connection:
result = self.cursor.execute(
"SELECT DISTINCT 1 FROM `users` WHERE user_id = ? ", (user_id,)).fetchone()
if result == None:
return False
else:
return True
def add_admin(self, user_id):
with self.connection:
return self.cursor.execute("INSERT INTO `admins` (`user_id`) VALUES(?) ", (user_id,))
def get_admins(self):
with self.connection:
self.cursor.execute("SELECT * FROM `admins`")
return self.cursor.fetchall()
def add_manufacturer_to_products_stack(self, user_id, manufacturer):
with self.connection:
return self.cursor.execute("INSERT INTO `products_stack` (`user_id`, `manufacturer`) VALUES(?,?) ", (user_id, manufacturer))
def add_taste_to_products_stack(self, user_id, taste):
with self.connection:
return self.cursor.execute(
"UPDATE `products_stack` SET `taste`=? WHERE user_id=?", (taste, user_id))
def add_puffs_to_products_stack(self, user_id, puffs):
with self.connection:
return self.cursor.execute(
"UPDATE `products_stack` SET `number_of_puffs`=? WHERE user_id=?", (puffs, user_id))
def add_amount_to_products_stack(self, user_id, amount):
with self.connection:
return self.cursor.execute(
"UPDATE `products_stack` SET `amount`=? WHERE user_id=?", (amount, user_id))
def get_manufacturer_from_products_stack(self, user_id):
with self.connection:
self.cursor.execute(
"SELECT manufacturer FROM products_stack WHERE user_id=?", (user_id,))
return self.cursor.fetchone()
def get_taste_from_products_stack(self, user_id):
with self.connection:
self.cursor.execute(
"SELECT taste FROM products_stack WHERE user_id=?", (user_id,))
return self.cursor.fetchone()
def get_puffs_from_products_stack(self, user_id):
with self.connection:
self.cursor.execute(
"SELECT number_of_puffs FROM products_stack WHERE user_id=?", (user_id,))
return self.cursor.fetchone()
def get_amount_from_products_stack(self, user_id):
with self.connection:
self.cursor.execute(
"SELECT amount FROM products_stack WHERE user_id=?", (user_id,))
return self.cursor.fetchone()
def delete_product_object_from_stack(self, user_id):
with self.connection:
return self.cursor.execute("DELETE FROM `products_stack` WHERE user_id=?", (user_id,))
def add_product(self, manufacturer, taste, puffs, amount):
with self.connection:
check = self.cursor.execute(
"SELECT DISTINCT 1 FROM `products` WHERE `manufacturer`=? AND `taste`=? AND `number_of_puffs`=? AND `amount`=?", (manufacturer, taste, puffs, amount)).fetchone()
if check == None:
return self.cursor.execute("INSERT INTO `products` (`manufacturer`, `taste`, `number_of_puffs`, `amount`) VALUES(?,?,?,?) ", (manufacturer, taste, puffs, amount))
else:
return False
def get_product_id(self, manufacturer, taste, puffs):
with self.connection:
self.cursor.execute(
"SELECT id FROM products WHERE `manufacturer`=? AND `taste`=? AND `number_of_puffs`=?", (manufacturer, taste, puffs))
return self.cursor.fetchone()
def add_product_to_shopping_cart(self, user_id, product_id, count):
with self.connection:
check = self.cursor.execute(
"SELECT DISTINCT 1 FROM `shopping_cart` WHERE `user_id`=? AND `product_id`=?", (user_id, product_id)).fetchone()
if check == None:
return self.cursor.execute("INSERT INTO `shopping_cart` (`user_id`, `product_id`, `count`) VALUES(?,?,?) ", (user_id, product_id, count))
else:
count_old = (self.cursor.execute(
"SELECT count FROM `shopping_cart` WHERE user_id=? AND product_id=?", (user_id, product_id)).fetchone())[0]
count_new = count_old + count
return self.cursor.execute("UPDATE `shopping_cart` SET `count`=? WHERE user_id=? AND product_id=?", (count_new, user_id, product_id))
def get_manufacturers(self):
with self.connection:
self.cursor.execute(
"SELECT manufacturer FROM products")
return self.cursor.fetchall()
def get_tastes(self, manufacturer):
with self.connection:
self.cursor.execute(
"SELECT taste FROM products WHERE manufacturer=?", (manufacturer,))
return self.cursor.fetchall()
def get_puffs_and_amount(self, manufacturer, taste):
with self.connection:
self.cursor.execute(
"SELECT number_of_puffs, amount FROM products WHERE manufacturer=? AND taste=?", (manufacturer, taste))
return self.cursor.fetchall()
def get_orders(self, user_id):
with self.connection:
self.cursor.execute(
"SELECT product_id, count FROM shopping_cart WHERE user_id=?", (user_id,))
return self.cursor.fetchall()
def get_amount(self, product_id):
with self.connection:
self.cursor.execute(
"SELECT amount FROM products WHERE id=?", (product_id,))
return self.cursor.fetchone()
def delete_product_from_shopping_cart(self, user_id, product_id):
with self.connection:
return self.cursor.execute("DELETE FROM `shopping_cart` WHERE user_id=? AND product_id=?", (user_id, product_id))
def delete_product_from_shopping_cart_everyone_has(self, product_id):
with self.connection:
return self.cursor.execute("DELETE FROM `shopping_cart` WHERE product_id=?", (product_id,))
def clear_shopping_cart_for_user(self, user_id):
with self.connection:
return self.cursor.execute("DELETE FROM `shopping_cart` WHERE user_id=?", (user_id,))
def delete_product(self, product_id):
with self.connection:
check = self.cursor.execute(
"SELECT DISTINCT 1 FROM `products` WHERE id=?", (product_id,)).fetchone()
if check != None:
return self.cursor.execute(
"DELETE FROM `products` WHERE id=?", (product_id,))
else:
return False
def get_product_by_product_id(self, product_id):
with self.connection:
self.cursor.execute(
"SELECT manufacturer, taste, number_of_puffs, amount FROM products WHERE id=?", (product_id,))
return self.cursor.fetchone()
def add_name_to_castomer(self, user_id, name):
with self.connection:
return self.cursor.execute("INSERT INTO `castomers` (`user_id`, `name`) VALUES(?,?) ", (user_id, name))
def add_phone_to_castomer(self, user_id, phone):
with self.connection:
return self.cursor.execute(
"UPDATE `castomers` SET `phone_number`=? WHERE user_id=?", (phone, user_id))
def add_address_to_castomer(self, user_id, address):
with self.connection:
return self.cursor.execute(
"UPDATE `castomers` SET `address`=? WHERE user_id=?", (address, user_id))
def add_delivery_time_to_castomer(self, user_id, delivery_time):
with self.connection:
return self.cursor.execute(
"UPDATE `castomers` SET `delivery_time`=? WHERE user_id=?", (delivery_time, user_id))
def add_payment_method_to_castomer(self, user_id, payment_method):
with self.connection:
return self.cursor.execute(
"UPDATE `castomers` SET `payment_method`=? WHERE user_id=?", (payment_method, user_id))
def get_castomer(self, user_id):
with self.connection:
self.cursor.execute(
"SELECT * FROM castomers WHERE user_id=?", (user_id,))
return self.cursor.fetchone()
def delete_castomer(self, user_id):
with self.connection:
return self.cursor.execute("DELETE FROM `castomers` WHERE user_id=?", (user_id,))
def close(self):
"""Закрываем соединение с БД"""
self.connection.close()
``` |
{
"source": "3x0d2s/chatbot-for-studying",
"score": 3
} |
#### File: bot/scripts/check_InputData.py
```python
import datetime
def check_date(date: datetime.datetime) -> bool:
"""Проверяет дату на корректность."""
try:
datetime.datetime.strptime(date, '%d.%m.%Y')
return True
except ValueError:
return False
def check_lesson_text(lesson: str) -> bool:
"""Проверяет имя урока на длину."""
if len(lesson) <= 32:
return True
else:
return False
def check_task_text(task: str) -> bool:
"""Проверяет задание на длину."""
if len(task) <= 512:
return True
else:
return False
``` |
{
"source": "3x1t1um/SolarSystem",
"score": 3
} |
#### File: 3x1t1um/SolarSystem/main.py
```python
from vpython import *
import platform
import json
import src.informations as infos
import src.planets as planets
import os
import time
class SolarSystem(object):
"""docstring for SolarSystem"""
def __init__(self):
self.mercury_data = json.loads(infos.mercury)
self.venus_data = json.loads(infos.venus)
self.earth_data = json.loads(infos.earth)
self.mars_data = json.loads(infos.mars)
self.jupiter_data = json.loads(infos.jupiter)
self.saturn_data = json.loads(infos.saturn)
self.uranus_data = json.loads(infos.uranus)
self.neptune_data = json.loads(infos.neptune)
self.banner()
self.run()
def banner(self):
if 'Windows' in platform.platform():
os.system('cls')
else:
os.system('clear')
print()
print(""" .::.
.:' .:
,0000000.:' .:'
00000000000 .:'
0000000000000:'
0000000000000 SOLAR SYSTEM
.:0000000000000
.:' 00000000000 by AskaD
.:' .:'0000000'
:' .:'
'::' """)
print()
def run(self):
self.createscene()
self.createsun()
self.mercury = planets.planets().new(scene, self.mercury_data["distance"], self.mercury_data["radius"], self.mercury_data["texture"])
self.venus = planets.planets().new(scene, self.venus_data["distance"], self.venus_data["radius"], self.venus_data["texture"])
self.earth = planets.planets().new(scene, self.earth_data["distance"], self.earth_data["radius"], self.earth_data["texture"])
self.mars = planets.planets().new(scene, self.mars_data["distance"], self.mars_data["radius"], self.mars_data["texture"])
self.jupiter = planets.planets().new(scene, self.jupiter_data["distance"], self.jupiter_data["radius"], self.jupiter_data["texture"])
self.saturn = planets.planets().new(scene, self.saturn_data["distance"], self.saturn_data["radius"], self.saturn_data["texture"])
self.uranus = planets.planets().new(scene, self.uranus_data["distance"], self.uranus_data["radius"], self.uranus_data["texture"])
self.neptune = planets.planets().new(scene, self.neptune_data["distance"], self.neptune_data["radius"], self.neptune_data["texture"])
self.scene.camera.follow(self.sun)
time.sleep(5)
self.rotation()
def rotation(self):
while (True):
self.mercury.rotate(angle=(2*pi/(self.mercury_data["speed"] * 86400)* 3600 / 300), axis=vector(0, 1, 0), origin=self.sun.pos)
self.venus.rotate(angle=(2*pi/(self.venus_data["speed"] * 86400)* 3600 / 300), axis=vector(0, 1, 0), origin=self.sun.pos)
self.earth.rotate(angle=(2*pi/(self.earth_data["speed"] * 86400)* 3600/ 300), axis=vector(0, 1, 0), origin=self.sun.pos)
self.mars.rotate(angle=(2*pi/(self.mars_data["speed"] * 86400)* 3600/ 300), axis=vector(0, 1, 0), origin=self.sun.pos)
self.jupiter.rotate(angle=(2*pi/(self.jupiter_data["speed"] * 86400)* 3600/ 300), axis=vector(0, 1, 0), origin=self.sun.pos)
self.saturn.rotate(angle=(2*pi/(self.saturn_data["speed"] * 86400)* 3600/ 300), axis=vector(0, 1, 0), origin=self.sun.pos)
self.uranus.rotate(angle=(2*pi/(self.uranus_data["speed"] * 86400)* 3600/ 300), axis=vector(0, 1, 0), origin=self.sun.pos)
self.neptune.rotate(angle=(2*pi/(self.neptune_data["speed"] * 86400)* 3600/ 300), axis=vector(0, 1, 0), origin=self.sun.pos)
# to be reviewed at the next update
self.mercury.rotate(angle = radians(57/self.mercury_data["speed"]/300), axis = vector(0, 1, 0))
self.venus.rotate(angle = radians(243/self.venus_data["speed"]/300), axis = vector(0, 1, 0))
self.earth.rotate(angle=radians(self.earth_data["speed"]/self.earth_data["speed"]/300), axis=vector(0, 1, 0))
self.mars.rotate(angle=radians(self.mars_data["speed"]/self.mars_data["speed"]/300), axis=vector(0, 1, 0))
self.jupiter.rotate(angle=radians(self.jupiter_data["speed"]/self.jupiter_data["speed"]/300), axis=vector(0, 1, 0))
self.saturn.rotate(angle=radians(self.saturn_data["speed"]/self.saturn_data["speed"]/300), axis=vector(0, 1, 0))
self.uranus.rotate(angle=radians(self.uranus_data["speed"]/self.uranus_data["speed"]/300), axis=vector(0, 1, 0))
self.neptune.rotate(angle=radians(self.neptune_data["speed"]/self.neptune_data["speed"]/300), axis=vector(0, 1, 0))
# the satellites will arrive later
def createscene(self):
# to be reviewed at the next update
self.scene = scene
self.scene.autoscale = False
self.scene.lights = []
def createsun(self):
self.sun = sphere(canvas=scene, pos=vector(0,0,0), radius=696340*1e-05, texture="./img/sun.jpg", mass=2e30, emissive=True, opacity=1)
self.sunlight = local_light(pos=self.sun.pos, color=color.white)
SolarSystem()
``` |
{
"source": "3x3x3/Presentations",
"score": 2
} |
#### File: Presentations/20210220_simulation_sample/data_handler.py
```python
import threading
import time
import global_def as gd
from db_reader import DbReaderDef, DbReaer
from queue import Queue, Empty
class DataHandlerThd(threading.Thread):
def __init__(self, req_queue: Queue, rcv_queue: Queue, db_host: str, db_port: int, db_user: str, db_pw: str, db_name: str, db_char_set: str = 'utf8'):
threading.Thread.__init__(self)
self._db_host = db_host
self._db_port = db_port
self._db_user = db_user
self._db_pw = db_pw
self._db_name = db_name
self._db_char_set = db_char_set
self._req_queue = req_queue
self._rcv_queue = rcv_queue
self.is_run = False
def _send_err_msg(self, msg: str) -> None:
self._rcv_queue.put({
gd.KEY_NM_EVT: gd.EVT_TYPE_ERR,
gd.KEY_NM_MSG: msg
})
def _read_db(self, req: dict) -> bool:
req_date = int(req.get(gd.KEY_NM_DATE, 0))
tbl_infos = req.get(gd.KEY_NM_TBL_INFOS, None)
if 19900101 > req_date or 30000101 < req_date:
self._send_err_msg('Invalid Date')
return False
if list != type(tbl_infos) or 0 == len(tbl_infos):
self._send_err_msg('Invalid Table Infos1')
return False
db_readers = []
for reader_idx, tbl_info in enumerate(tbl_infos):
tbl_nm = tbl_info.get(gd.KEY_NM_TBL_NM, None)
col_nms = tbl_info.get(gd.KEY_NM_COL_NMS, [])
if tbl_nm is None or 0 == len(col_nms):
self._send_err_msg('Invalid Table Infos2')
return False
db_reader = DbReaer(reader_idx, req_date, tbl_nm, col_nms, self._db_host, self._db_port, self._db_user, self._db_pw, self._db_name, self._db_char_set)
db_readers.append(db_reader)
for db_reader in db_readers:
db_reader.read_thd.start()
is_st_read = False
is_error = False
while not is_st_read:
for db_reader in db_readers:
thd_state: int = db_reader.get_thd_state()
if DbReaderDef.STATE_ERROR == thd_state:
is_st_read = True
is_error = True
break
elif DbReaderDef.STATE_READY == thd_state:
break
else:
is_st_read = True
time.sleep(0.5)
if is_error:
for db_reader in db_readers:
db_reader.set_stop_thd()
time.sleep(1)
self._send_err_msg('Error in DbReaderThd1')
return False
# 처음에 하나씩 데이터를 읽는다
empty_reader_idxs = []
for reader_idx, db_reader in enumerate(db_readers):
if not db_reader.read_next_data():
empty_reader_idxs.append(reader_idx)
# 텅빈 Reader들을 목록에서 제거
for reader_idx in empty_reader_idxs:
del db_readers[reader_idx]
reader_cnt = len(db_readers)
fin_readers = []
while 0 < reader_cnt:
min_rtime_idx = -1
min_rtime = 9999999999999
find_min_ts = False
is_exist_fin_readers = False
for idx, db_reader in enumerate(db_readers):
row: list = db_reader.last_data
# 마지막 데이터가 비었을때
if row is None:
thd_state = db_reader.get_thd_state()
if DbReaderDef.STATE_WORKING == thd_state:
time.sleep(0.5)
db_reader.read_next_data()
find_min_ts = False
break
elif DbReaderDef.STATE_FINISHED == thd_state:
fin_readers.append(idx)
is_exist_fin_readers = True
continue
elif DbReaderDef.STATE_ERROR == thd_state:
self._send_err_msg('Error in DbReaderThd2')
fin_readers.append(idx)
is_exist_fin_readers = True
continue
pk_rtime = row[0]
if min_rtime > pk_rtime:
min_rtime = pk_rtime
min_rtime_idx = idx
find_min_ts = True
# 가장 과거의 값을 찾았다면
if find_min_ts:
target_reader: DbReaer = db_readers[min_rtime_idx]
self._rcv_queue.put({
gd.KEY_NM_EVT: gd.EVT_TYPE_READ_DB,
gd.KEY_NM_IDX: target_reader.reader_idx,
gd.KEY_NM_DATA: target_reader.last_data
})
target_reader.read_next_data()
# 종료된 Reader가 생겼다면
if is_exist_fin_readers:
fin_readers.sort(reverse=True)
for fin_reader_idx in fin_readers:
del db_readers[fin_reader_idx]
reader_cnt = len(db_readers)
fin_readers.clear()
self._rcv_queue.put({
gd.KEY_NM_EVT: gd.EVT_TYPE_FIN
})
return True
def run(self):
self.is_run = True
while self.is_run:
try:
req = self._req_queue.get(True, 1)
evt_type = req.get(gd.KEY_NM_EVT)
if gd.EVT_TYPE_READ_DB == evt_type:
print(f'Read DB Start!, data: {req}')
self._read_db(req)
print(f'Read DB End!, data: {req}')
elif gd.EVT_TYPE_FIN == evt_type:
break
except Empty as em:
pass
except Exception as e:
self.is_run = False
break
```
#### File: Presentations/20210220_simulation_sample/main_server.py
```python
import threading
import socket
import socketserver
import configparser
import struct
import msgpack
import datetime
from decimal import Decimal
from queue import Queue, Empty
from data_handler import DataHandlerThd
class ThreadedTCPRequestHandler(socketserver.BaseRequestHandler):
def __init__(self, request, client_address, server):
super().__init__(request, client_address, server)
self.is_run = False
def _request_listener(self, req_queue: Queue):
while self.is_run:
try:
header = self.request.recv(4, socket.MSG_WAITALL)
if header:
body_len = struct.unpack('I', header)[0]
body_len = socket.ntohl(body_len)
body = self.request.recv(body_len, socket.MSG_WAITALL)
req_data = msgpack.unpackb(body, raw=False)
req_queue.put(req_data)
else:
break
except Exception as e:
self.is_run = False
break
@classmethod
def _pack_default(cls, val: any):
if isinstance(val, datetime.date):
return str(val)
elif isinstance(val, Decimal):
return float(val)
raise TypeError('Exception in msg_pack_default')
def _receive_listener(self, rcv_queue: Queue):
while self.is_run:
try:
rcv_data = rcv_queue.get(True, 1)
body = msgpack.packb(rcv_data, use_bin_type=True, default=self._pack_default)
header = struct.pack('I', socket.htonl(len(body)))
# 내용의길이(4byte, int) + 내용(bytes)
self.request.sendall(header + body)
except Empty as em:
pass
except Exception as e:
self.is_run = False
break
def handle(self):
self.is_run = True
req_queue = Queue()
rcv_queue = Queue()
config = configparser.ConfigParser()
config.read('config.ini')
db_host = config.get('DB', 'HOST')
db_port = int(config.get('DB', 'PORT'))
db_user = config.get('DB', 'USER')
db_pw = config.get('DB', 'PASSWORD')
db_name = config.get('DB', 'DB_NAME')
db_charset = config.get('DB', 'CHAR_SET')
data_handler_thd = DataHandlerThd(req_queue, rcv_queue, db_host, db_port, db_user, db_pw, db_name, db_charset)
data_handler_thd.start()
req_thd = threading.Thread(target=self._request_listener, args=(req_queue,))
req_thd.start()
rcv_thd = threading.Thread(target=self._receive_listener, args=(rcv_queue,))
rcv_thd.start()
req_thd.join()
rcv_thd.join()
data_handler_thd.is_run = False
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
if __name__ == '__main__':
HOST = ''
PORT = 8765
svr = ThreadedTCPServer((HOST, PORT), ThreadedTCPRequestHandler)
with svr:
ip, port = svr.server_address
server_thread = threading.Thread(target=svr.serve_forever)
server_thread.daemon = True
server_thread.start()
server_thread.join()
svr.shutdown()
``` |
{
"source": "3x3x3/websocket_study",
"score": 3
} |
#### File: 3x3x3/websocket_study/echo_client.py
```python
import socket
import random
import base64
import hashlib
import re
MAGIC_NUMBER = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
HOST = "localhost"
PORT = 5858
def main():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((HOST, PORT))
######################################
# HandShake Start ####################
raw_key = bytes(random.getrandbits(8) for _ in range(16))
sec_ws_key = base64.b64encode(raw_key).decode("utf-8")
req = "GET / HTTP/1.1\r\n" + \
"Upgrade: websocket\r\n" \
f"Host: {HOST}:{PORT}\r\n" \
f"Origin: http://{HOST}:{PORT}\r\n" \
f"Sec-WebSocket-Key: {sec_ws_key}\r\n" \
"Sec-WebSocket-Version: 13\r\n" \
"Connection: upgrade\r\n\r\n"
s.sendall(req.encode("utf-8"))
buffer = bytearray()
while True:
buffer = buffer + bytearray(s.recv(8))
# HandShake의 맨 끝은 CRLF 두번
if 0 <= buffer.find(b"\r\n\r\n"):
break
regex = re.compile("Sec-WebSocket-Accept: (.+?)\r\n")
re_match = regex.search(buffer.decode("utf-8"))
resp_sec_ws_acpt = re_match.group(1)
chk_sec_ws_acpt = bytes(sec_ws_key + MAGIC_NUMBER, encoding="utf-8")
chk_sec_ws_acpt = base64.b64encode(hashlib.sha1(chk_sec_ws_acpt).digest()).decode("utf-8")
if resp_sec_ws_acpt == chk_sec_ws_acpt:
print("핸드쉐이크 성공 !!!")
else:
print("핸드쉐이크 실패 !!!")
return False
# HandShake End ######################
######################################
# TODO: 데이터 송신 및 수신
return True
if "__main__" == __name__:
main()
``` |
{
"source": "3xbun/standard-betting",
"score": 3
} |
#### File: 3xbun/standard-betting/app.py
```python
from flask import Flask, render_template, url_for
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html', title="Home")
@app.route('/<game>/<id>')
def result(game, id):
return '{} | Match:{} is Losing'.format(game, id)
``` |
{
"source": "3xistentialcrisis/Blog",
"score": 3
} |
#### File: Blog/tests/test_comments.py
```python
import unittest
from app.models import Comment, Blog, User
from app import db
class CommentModelTest(unittest.TestCase):
def setUp(self):
self.new_comment = Comment(id=1, comment='Test comment', user=self.user_karanja, blog_id=self.new_blog)
def tearDown(self):
Blog.query.delete()
User.query.delete()
def test_check_instance_variables(self):
self.assertEquals(self.new_comment.comment, 'Test comment')
self.assertEquals(self.new_comment.user, self.user_karanja)
self.assertEquals(self.new_comment.blog_id, self.new_blog)
class CommentModelTest(unittest.TestCase):
def setUp(self):
self.user_ciku = User(username='ciku', password='<PASSWORD>', email='<EMAIL>')
self.new_blog = Blog(id=1, title='Test', content='test blog', user_id=self.user_ciku.id)
self.new_comment = Comment(id=1, comment='test comment', user_id=self.user_ciku.id,
blog_id=self.new_blog.id)
def tearDown(self):
Blog.query.delete()
User.query.delete()
Comment.query.delete()
def test_check_instance_variables(self):
self.assertEquals(self.new_comment.comment, 'test comment')
self.assertEquals(self.new_comment.user_id, self.user_ciku.id)
self.assertEquals(self.new_comment.blog_id, self.new_blog.id)
def test_save_comment(self):
self.new_comment.save()
self.assertTrue(len(Comment.query.all()) > 0)
def test_get_comment(self):
self.new_comment.save()
got_comment = Comment.get_comment(1)
self.assertTrue(got_comment is not None)
``` |
{
"source": "3xistentialcrisis/LegalUpdate",
"score": 3
} |
#### File: LegalUpdate/tests/test_post.py
```python
import unittest
from app.models import Case, Client, Comment
class TestCase(unittest.TestCase):
def setUp(self):
self.client_Stephen = Client(full_name = "<NAME>",
username = "remmi_m",
password = "<PASSWORD>",
email = "<EMAIL>")
self.new_post = Case(post_title = "Sample Title",
post_content = "Hallo Welt! Ich bin hier",
client_id = self.client_Stephen.id)
self.new_comment = Comment(comment = "Nice job",
case_id = self.new_client.case_id,
client_id = self.client_Stephen.id)
def tearDown(self):
Case.query.delete()
Client.query.delete()
def test_instance(self):
self.assertTrue(isinstance(self.client_Stephen, Client))
self.assertTrue(isinstance(self.new_case, Cases))
self.assertTrue(isinstance(self.new_comment, Comment))
``` |
{
"source": "3xistentialcrisis/Password",
"score": 4
} |
#### File: 3xistentialcrisis/Password/user-test.py
```python
import unittest
from user import User
class TestUser(unittest.TestCase):
"""
Test Class which defines test cases for the User Class Behaviours
Args:
unittest.TestCase : Test case class used to create test cases
"""
def setUp(self):
"""
This setUp method runs before each test case
"""
#User Object
self.new_user = User("mary","catwoman","<PASSWORD>")
#User Details Array
User.user_details = []
def tearDown(self):
"""
This tearDown method cleans up after each test case is run
"""
#Test Cases
def test_init(self):
"""
This test case tests if the User object is initialised properly
"""
self.assertEqual( self.new_user.fname,"mary")
self.assertEqual( self.new_user.username, "catwoman" )
self.assertEqual( self.new_user.password, "<PASSWORD>" )
def test_save_user(self):
"""
This test case tests if the user object is saved in the user_details array
"""
self.new_user.save_user()
self.assertEqual(len(User.user_details),1)
def test_display_users(self):
"""
This test case tests if the details of all user accounts created is displayed
"""
self.assertEqual(User.display_users(), User.user_details)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "3xistentialcrisis/Pitches",
"score": 3
} |
#### File: Pitches/app/email.py
```python
from flask_mail import Message
from flask import render_template
from . import mail
def mail_message(subject: object, template: object, to: object, kwargs: object) -> object:
sender_email = '<EMAIL>'
email = Message(subject, sender=sender_email, recipients=[to])
email.body= render_template(template + ".txt",**kwargs)
email.html = render_template(template + ".html",**kwargs)
mail.send(email)
``` |
{
"source": "3xp1o1t/htb-cli",
"score": 3
} |
#### File: htb-cli/machines/print_machine_table.py
```python
from time import sleep
from contextlib import contextmanager
from rich.live import Live
from rich.table import Table
from rich.align import Align
from rich import box
"""
Related methods for printing tables with machine information.
"""
# Controling animation duration
BEAT_TIME = 0.01
@contextmanager
def beat(length: int = 1) -> None:
"""
beat works as a decorator for 'with'.
"""
yield
sleep(length * BEAT_TIME)
def print_table(console, machine_list, table_title):
"""
print_machine: Draw a table with a simple animation for all machine list
:param console: Console() from rich to print table
:param machine_list: List of machine from a response
"""
console.clear()
table = Table(show_footer = True, box = box.ASCII)
table_align = Align.left(table)
column_titles = ['ID', 'Name', 'Os', 'IP', 'Points', 'Difficulty', 'User Owned?', 'System Owned?']
column_styles = ['bright_cyan', 'bright_green', 'bright_yellow', 'bright_magenta', 'deep_sky_blue1', 'salmon1', 'white', 'white']
column_align = ['center', 'left', 'left', 'left', 'center', 'left', 'center', 'center']
total_user_owned = 0
total_root_owned = 0
# vertical_overflow = autoscroll when renderin, better if use show_footer = True
with Live(table_align, console = console, screen = False, refresh_per_second = 12, vertical_overflow = 'visible'):
#list_length: Column_titles = Column_styles = Column alignment - Probably a bad programming practice :(
for value in range(len(column_titles)):
with beat(10):
table.add_column(column_titles[value], footer=column_titles[value], style = column_styles[value], justify = column_align[value], no_wrap = True)
with beat(10):
table.title = table_title
for index in range(len(machine_list['info'])):
user_owned = ":x:"
root_owned = ":x:"
if machine_list['info'][index]['authUserInUserOwns'] is not None:
user_owned = ":white_check_mark:"
total_user_owned += 1
if machine_list['info'][index]['authUserInRootOwns'] is not None:
root_owned = ":white_check_mark:"
total_root_owned += 1
with beat(10):
table.add_row(
str(machine_list['info'][index]['id']),
machine_list['info'][index]['name'],
machine_list['info'][index]['os'],
machine_list['info'][index]['ip'],
str(machine_list['info'][index]['points']),
machine_list['info'][index]['difficultyText'],
user_owned,
root_owned
)
with beat(10):
table.border_style = "bright_cyan"
console.print(f'[+] Total found: {index + 1}\t\t[+] Total User Owned: {total_user_owned}\t\t[+] Total Root Owned: {total_root_owned}', style = 'bold')
```
#### File: 3xp1o1t/htb-cli/static_main_menu.py
```python
from tools.generate_menu import print_menu
from machines.static_machine_menu import machine_menu
from tools.utils import log, read
def default_menu(config, console):
default_options = [
'1. Machines',
'0. Exit'
]
console.clear()
while True:
print_menu(console, default_options, 'Main Menu')
user_input = read("Please, type an option", '1')
if user_input == '1':
machine_menu(config, console)
elif user_input == '0':
console.clear()
return
else:
console.clear()
log('Invalid option, try again!', 'error')
```
#### File: htb-cli/tools/api.py
```python
from requests import post, get
"""
Related methods for submitting requests to the HTB V4 API
"""
def api_get(url: str, endpoint: str, headers: dict) -> list:
"""
api_get: Make a get request to HTB API
:param url: Target url to send request
:param endpoint: API path to a specific resource
:param headers: Headers http
:return: Response result
"""
return get(f"{url}{endpoint}", headers=headers, allow_redirects=False).json()
def api_post(url: str, endpoint: str, headers: dict, data: dict) -> list:
"""
api_post: Send data through http POST to HTB API
:param url: Target url to send request
:param endpoint: API target path
:param headers: Headers http
:param data: Data to send
:return: Response result
"""
return post(f"{url}{endpoint}", headers=headers, data=data)
``` |
{
"source": "3Xpl0it3r/6.824",
"score": 3
} |
#### File: 6.824/scripts/dslogs.py
```python
import sys
import shutil
from typing import Optional, List, Tuple, Dict
import typer
from rich import print
from rich.columns import Columns
from rich.console import Console
from rich.traceback import install
# fmt: off
# Mapping from topics to colors
TOPICS = {
"TIMR": "#9a9a99",
"VOTE": "#67a0b2",
"LEAD": "#d0b343",
"TERM": "#70c43f",
"LOG1": "#4878bc",
"LOG2": "#398280",
"CMIT": "#98719f",
"PERS": "#d08341",
"SNAP": "#FD971F",
"DROP": "#ff615c",
"CLNT": "#00813c",
"TEST": "#fe2c79",
"INFO": "#ffffff",
"WARN": "#d08341",
"ERRO": "#fe2626",
"TRCE": "#fe2626",
}
# fmt: on
def list_topics(value: Optional[str]):
if value is None:
return value
topics = value.split(",")
for topic in topics:
if topic not in TOPICS:
raise typer.BadParameter(f"topic {topic} not recognized")
return topics
def main(
file: typer.FileText = typer.Argument(None, help="File to read, stdin otherwise"),
colorize: bool = typer.Option(True, "--no-color"),
n_columns: Optional[int] = typer.Option(None, "--columns", "-c"),
ignore: Optional[str] = typer.Option(None, "--ignore", "-i", callback=list_topics),
just: Optional[str] = typer.Option(None, "--just", "-j", callback=list_topics),
):
topics = list(TOPICS)
# We can take input from a stdin (pipes) or from a file
input_ = file if file else sys.stdin
# Print just some topics or exclude some topics (good for avoiding verbose ones)
if just:
topics = just
if ignore:
topics = [lvl for lvl in topics if lvl not in set(ignore)]
topics = set(topics)
console = Console()
width = console.size.width
panic = False
for line in input_:
try:
time, topic, *msg = line.strip().split(" ")
# To ignore some topics
if topic not in topics:
continue
msg = " ".join(msg)
# Debug calls from the test suite aren't associated with
# any particular peer. Otherwise we can treat second column
# as peer id
if topic != "TEST":
i = int(msg[1])
# Colorize output by using rich syntax when needed
if colorize and topic in TOPICS:
color = TOPICS[topic]
msg = f"[{color}]{msg}[/{color}]"
# Single column printing. Always the case for debug stmts in tests
if n_columns is None or topic == "TEST":
print(time, msg)
# Multi column printing, timing is dropped to maximize horizontal
# space. Heavylifting is done through rich.column.Columns object
else:
cols = ["" for _ in range(n_columns)]
msg = "" + msg
cols[i] = msg
col_width = int(width / n_columns)
cols = Columns(cols, width=col_width - 1, equal=True, expand=True)
print(cols)
except:
# Code from tests or panics does not follow format
# so we print it as is
if line.startswith("panic"):
panic = True
# Output from tests is usually important so add a
# horizontal line with hashes to make it more obvious
if not panic:
print("#" * console.width)
print(line, end="")
if __name__ == "__main__":
typer.run(main)
``` |
{
"source": "3YOURMIND/django-model-sync",
"score": 2
} |
#### File: django-model-sync/django-model-sync/admin.py
```python
from django.conf.urls import url
from django.contrib import admin
from django.http import HttpResponseRedirect
from apps.b3_migration.models.switch import Switch
from apps.b3_organization.models.organization import Organization
def switch_on(modeladmin, request, queryset):
queryset.update(active=True)
def switch_off(modeladmin, request, queryset):
queryset.update(active=False)
switch_on.short_description = "Turn on all selected Switches"
switch_off.short_description = "Turn off all selected Switches"
@admin.register(Switch)
class SwitchAdmin(admin.ModelAdmin):
change_list_template = "b3_migration/switch_changelist.html"
list_display = (
'feature',
'organization',
'site',
'active',
)
list_filter = ('active', )
search_fields = (
'feature',
'organization__site__name',
'organization__showname',
'organization__site__domain',
)
def site(self, obj):
return obj.organization.site
def get_urls(self):
urls = super().get_urls()
my_urls = [
url('create-all/', self.create_all_switches),
]
return my_urls + urls
def create_all_switches(self, request):
for feature in Switch.FEATURE_CHOICES:
for organization in Organization.objects.all():
Switch.objects.get_or_create(
feature=feature[0], organization=organization
)
return HttpResponseRedirect("../")
actions = [switch_on, switch_off]
```
#### File: django-model-sync/django-model-sync/decorators.py
```python
from functools import wraps
from rest_framework import exceptions
from apps.b3_migration.models.switch import Switch
def require_switch(feature_name):
"""
Decorate any view function or methods of class-based views: get(), post()
etc or anything, that gets called from these methods, for example
get_queryset()
Raises PermissionDenied(), in above-mentioned methods DRF gracefully
handles this exception and returns 403 Forbidden response with error
details
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
if Switch.is_active(feature_name):
return func(*args, **kwargs)
else:
raise exceptions.PermissionDenied(
f'This API is not available without {feature_name} switch'
)
return wrapper
return decorator
```
#### File: django-model-sync/sync/auto_synchronization.py
```python
import structlog as logging
from apps.b3_migration.model_descriptors.utils import get_buddy_class
from apps.b3_migration.sync.utils import sync_source_and_target_models
from apps.b3_migration.sync.auto_synchronization_base import \
AutoSynchronizationBase
logger = logging.getLogger(__name__)
class ModelToModelAutoSynchronizationMixin(AutoSynchronizationBase):
"""
Mixin added to model classes for auto synchronization. This works for
models that map on the instance level not the field level
HOW TO USE:
1. Have your model inherit from this mixin before models.Model -and
any other parent class that overrides :function: `delete()`
and :function: `save()`- since it needs to have precedence over
such classes
2. Override :function: `get_source_and_target_descriptors()`
3. IF your model requires a special check in :function: `save()` for
whether the instance is being updated or created, implement
:function: `exists_in_db()` that returns a boolean signifying if
the instance exists in the database -and thus is being updated
not created-. Example case would be if you override the pk and thus
checking the existance of a pk on the current instance would always
be true regardless of whether it is an update or create that is
being performed
PRECAUTION:
> this does not take into account operations that neither call
:function: `delete()` or :function: `save()`
for instance: :function: `bulk_create()` or :function: `update()`
or -obviously- any raw SQL queries...
"""
def _pre_save(self, *args, update=False, target=False, **kwargs):
"""
Pre-save, check if deleted, then don't do anything i.e. don't
allow :function: `_post_save()` to run by returning False
:return bool implying whether or not to run :function: `_post_save()`
"""
# :model: `OrganizationBillingAddress` and
# `OrganizationShippingAddress` are to be deleted from project but
# since they both inherit from :model: `AbstractAddress` we need to
# prevent auto sync for them. The other option would be to augment
# every address model with this mixin, but that seems WET and would
# lead to more changes once these two models are removed from the
# project
# TODO: remove once these models are deleted from the project
if self.__class__.__name__ in [
'OrganizationBillingAddress',
'OrganizationShippingAddress'
]:
return False
if getattr(self, 'deleted_date', False):
return False
return True
def _post_save(self, *args, update=False, target=False, **kwargs):
"""
Overriding :function: `_post_save()` to perform sync between old and
new models.
The model that initiates the saving adds a :kwarg: `target=True` to the
call for :function: `save()` of the target model so as not to loop
infinitely from one model to the other.
Steps:
1. If self is not the target -and thus the initiator-,
call sync function
"""
if not target:
source_descriptor, target_descriptor = \
self.get_source_and_target_descriptors()
sync_source_and_target_models(
self,
source_descriptor,
target_descriptor,
get_buddy_class(target_descriptor),
'AUTO-SYNC',
update=update
)
def _pre_delete(self, *args, target=False, **kwargs):
"""
Overriding :function: `_pre_delete()` to perform sync between old and
new models.
The model that initiates the deletion adds a :kwarg: `target=True` to
the call for :function: `delete()` of the target model so as not to
loop infinitely from one model to the other.
Steps:
if self is the target:
return
else
1. Get source and target descriptors
2. Use descriptors to get target instance
3. Set :kwarg: `target=True`
4. Check if a buddy instance exists - thus a target also exists
5. Delete buddy instance
6. Delete target
:return bool implying whether or not to run :function: `_post_delete()`
"""
if not target:
source_descriptor, target_descriptor = \
self.get_source_and_target_descriptors()
source_related_name_in_buddy = source_descriptor[
'related_name_in_buddy']
if hasattr(self, source_related_name_in_buddy):
buddy_instance = getattr(self, source_related_name_in_buddy)
target_field_name_in_buddy = target_descriptor[
'field_name_in_buddy']
target_instance = getattr(
buddy_instance, target_field_name_in_buddy)
logger.debug(f'AUTO-SYNC: Starting delete for buddy '
f'instance: {buddy_instance}')
buddy_instance.delete()
logger.debug(f'AUTO-SYNC: Completed delete for buddy '
f'instance: {buddy_instance}')
target_instance.delete(*args, target=True, **kwargs)
return True
def _post_delete(self, *args, target=False, **kwargs):
"""
Do nothing after deletion
"""
def get_source_and_target_descriptors(self):
"""
Gets the source and target descriptors - Needed for sync.
:return source_descriptor, target_descriptor:
"""
raise NotImplementedError(
'AutoSynchronizationMixin requires '
'the function get_source_and_target_descriptors() to '
'be implemented'
)
def exists_in_db(self):
"""
Checks if `self` is already in database. This is used with the
AutoSyncMixin for checking on save whether it is an update or a create
"""
return self.__class__.objects.filter(pk=self.pk).exists()
```
#### File: django-model-sync/tests/test_switch.py
```python
from django.db.utils import IntegrityError
from apps.b3_migration.factories.switch_factory import SwitchFactory
from apps.b3_organization.models.organization import Organization
from apps.b3_tests.testcases import B3TestCase
class SwitchTests(B3TestCase):
def setUp(self):
super().setUp()
self.organization = Organization.objects.first()
self.organization.showname = '3YOURMIND'
self.organization.save()
self.switch = SwitchFactory(
feature='new_checkout',
organization=self.organization,
)
def test_str(self):
"""Test string representation of Switch"""
self.assertEqual(
self.switch.__str__(),
'3YOURMIND: new_checkout - Active'
)
def test_uniqueness_constraints(self):
"""
Asserts that :field: `organization` and :field: `feature`
are unique together
"""
switch = SwitchFactory.build(
feature='new_checkout',
organization=self.organization,
)
self.assertRaises(
IntegrityError,
switch.save
)
``` |
{
"source": "3YOURMIND/drf-payload-customizer",
"score": 3
} |
#### File: drf_payload_customizer/mixins/payload_no_null_or_none_mixin.py
```python
from collections import OrderedDict
from rest_framework import fields
class PayloadNoNullOrNoneMixin:
"""
This mixin converts: "" => to null during Python to JSON and null => ""
while accepting data from JSON
"""
@staticmethod
def _is_charfield(field):
"""
@TODO: Improve this to check fields shadowed by Charfield
"""
return isinstance(field, fields.CharField)
@staticmethod
def _nullify(value):
return None if value == "" else value
@staticmethod
def _blankify(value):
return "" if value is None else value
def _nullify_dict(self, dictionary) -> dict:
nullified_dict = {}
for key, value in dictionary.items():
is_dict = isinstance(value, dict)
nullified_dict[key] = \
self._nullify_dict(value) if is_dict else self._nullify(value)
return nullified_dict
def to_internal_value(self, payload: OrderedDict):
for key, value in payload.items():
is_character_field = self._is_charfield(
self.fields.get(key)
)
payload[key] = (
self._blankify(value) if is_character_field else value
)
return super().to_internal_value(payload)
def to_representation(self, instance) -> dict:
snake_cased = super().to_representation(instance)
return self._nullify_dict(snake_cased)
```
#### File: drf_payload_customizer/mixins/payload_transformation_mixin.py
```python
from collections import Mapping
import re
from rest_framework.settings import api_settings
from rest_framework.serializers import ValidationError
from rest_framework.utils.serializer_helpers import ReturnDict
class PayloadTransformationMixin:
"""
This mixin converts python snake_case fields into
camelCase JSON. This works for serialization and deserialization.
"""
@staticmethod
def _do_transform_to_internal_value(camel_key):
snake_cased = re.sub(r'([A-Z]+)([A-Z][a-z])', r'\1_\2', camel_key)
snake_cased = re.sub(r'([a-z\d])([A-Z])', r'\1_\2', snake_cased)
return snake_cased.lower()
def transform_to_internal_value(self, camel_cased):
fields_dict = {}
for camel_key, value in camel_cased.items():
transformed_key = self._do_transform_to_internal_value(camel_key)
fields_dict[transformed_key] = value
return fields_dict
def to_internal_value(self, camel_cased):
if not isinstance(camel_cased, Mapping):
message = self.error_messages['invalid'].format(
datatype=type(camel_cased).__name__
)
raise ValidationError({
api_settings.NON_FIELD_ERRORS_KEY: [message]
}, code='invalid')
return super().to_internal_value(
self.transform_to_internal_value(camel_cased)
)
@staticmethod
def _do_transform_to_representation(snake_key):
first_letter = snake_key[0].lower()
rest = re.sub(r'(?:^|_)(.)', lambda x: x.group(1).upper(),
snake_key)[1:]
return f'{first_letter}{rest}'
def transform_to_representation(self, snake_cased):
fields_dict = {}
transform_needed = getattr(
getattr(self, 'Meta', None), 'PAYLOAD_TRANSFORM_NESTED', False)
for key, value in snake_cased.items():
transformed_key = self._do_transform_to_representation(key)
fields_dict[transformed_key] = value
if transform_needed and isinstance(value, dict):
fields_dict[transformed_key] = \
self.transform_to_representation(value)
return fields_dict
def to_representation(self, instance):
return self.transform_to_representation(
super().to_representation(instance)
)
@property
def errors(self):
ret = self.transform_to_representation(super().errors)
return ReturnDict(ret, serializer=self)
```
#### File: test_app/tests/mixins.py
```python
from drf_payload_customizer.mixins.payload_no_null_or_none_mixin import \
PayloadNoNullOrNoneMixin
from drf_payload_customizer.mixins.payload_transformation_mixin import \
PayloadTransformationMixin
from drf_payload_customizer.mixins.payload_translation_mixin import \
PayloadTranslationMixin
class PayloadConverterMixin(PayloadTransformationMixin,
PayloadTranslationMixin,
PayloadNoNullOrNoneMixin):
"""
Use this mixin in all of our Serializers, to convert the JSON into a
format that is easier consumed by modern front-ends.
"""
def to_representation(self, instance):
return super().to_representation(instance)
def to_internal_value(self, camel_cased):
return super().to_internal_value(camel_cased)
``` |
{
"source": "3ysoftwarehouse/vcd-novo",
"score": 2
} |
#### File: cadastro/forms/cliente.py
```python
from django import forms
from django.utils.translation import ugettext_lazy as _
from djangosige.apps.cadastro.models import Cliente
class ClienteForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request', None)
super(ClienteForm, self).__init__(*args, **kwargs)
self.fields['limite_de_credito'].localize = True
class Meta:
model = Cliente
fields = ('nome_razao_social', 'tipo_pessoa', 'inscricao_municipal',
'limite_de_credito', 'indicador_ie', 'id_estrangeiro', 'informacoes_adicionais',
'nome_pai', 'nome_mae', 'naturalidade', 'numero_dependentes',
'tipo_residencia', 'dt_emissao_rg', 'tempo_residencia',
'dt_admissao', 'cargo', 'principal_renda',
'outra_renda', 'patrimonio')
widgets = {
'nome_razao_social': forms.TextInput(attrs={'class': 'form-control'}),
'tipo_pessoa': forms.RadioSelect(attrs={'class': 'form-control'}),
'limite_de_credito': forms.TextInput(attrs={'class': 'form-control decimal-mask'}),
'indicador_ie': forms.Select(attrs={'class': 'form-control'}),
'inscricao_municipal': forms.TextInput(attrs={'class': 'form-control'}),
'id_estrangeiro': forms.TextInput(attrs={'class': 'form-control'}),
'informacoes_adicionais': forms.Textarea(attrs={'class': 'form-control'}),
'nome_pai': forms.TextInput(attrs={'class': 'form-control'}),
'nome_mae': forms.TextInput(attrs={'class': 'form-control'}),
'naturalidade': forms.TextInput(attrs={'class': 'form-control'}),
'numero_dependentes': forms.NumberInput(attrs={'class': 'form-control'}),
'tipo_residencia': forms.TextInput(attrs={'class': 'form-control'}),
'dt_emissao_rg': forms.DateInput(attrs={'class': 'form-control datepicker'}),
'tempo_residencia': forms.TextInput(attrs={'class': 'form-control'}),
'dt_admissao': forms.DateInput(attrs={'class': 'form-control datepicker'}),
'cargo': forms.TextInput(attrs={'class': 'form-control'}),
'principal_renda': forms.TextInput(attrs={'class': 'form-control decimal-mask'}),
'outra_renda': forms.TextInput(attrs={'class': 'form-control decimal-mask'}),
'patrimonio': forms.TextInput(attrs={'class': 'form-control decimal-mask'}),
}
labels = {
'nome_razao_social': _('Nome / Razão Social'),
'tipo_pessoa': _(''),
'limite_de_credito': _('Limite de Crédito'),
'indicador_ie': _('Indicador da IE do Destinatário'),
'inscricao_municipal': _('Inscrição Municipal'),
'id_estrangeiro': _('Documento legal (Estrangeiro)'),
'nome_pai': _('Nome do Pai'),
'nome_mae': _('Nome da Mãe'),
'naturalidade': _('Naturalidade'),
'numero_dependentes': _('Numero de dependentes'),
'tipo_residencia': _('Tipo de Residência'),
'dt_emissao_rg': _('Data de Emissão'),
'tempo_residencia': _('Tempo de Residência'),
'dt_admissao': _('Data de Admissão'),
'cargo': _('Cargo'),
'principal_renda': _('Principal Renda'),
'outra_renda': _('Outra Renda'),
'patrimonio': _('Patrimônio'),
}
def save(self, commit=True):
instance = super(ClienteForm, self).save(commit=False)
instance.criado_por = self.request.user
instance.emissor = self.request.user
if commit:
instance.save()
return instance
```
#### File: financeiro/models/lancamento.py
```python
from django.db import models
from django.core.validators import MinValueValidator
from decimal import Decimal
from django.core.urlresolvers import reverse_lazy
from django.template.defaultfilters import date
import locale
locale.setlocale(locale.LC_ALL, '')
STATUS_CONTA_SAIDA_ESCOLHAS = (
(u'0', u'Paga'),
(u'1', u'A pagar'),
(u'2', u'Atrasada'),
)
STATUS_CONTA_ENTRADA_ESCOLHAS = (
(u'0', u'Recebida'),
(u'1', u'A receber'),
(u'2', u'Atrasada'),
)
class Lancamento(models.Model):
data_vencimento = models.DateField(null=True, blank=True)
data_pagamento = models.DateField(null=True, blank=True)
descricao = models.CharField(max_length=255)
conta_corrente = models.ForeignKey(
'cadastro.Banco', related_name="conta_corrente_conta", on_delete=models.SET_NULL, null=True, blank=True)
valor_total = models.DecimalField(max_digits=13, decimal_places=2, validators=[
MinValueValidator(Decimal('0.00'))], default=Decimal('0.00'))
abatimento = models.DecimalField(max_digits=13, decimal_places=2, validators=[
MinValueValidator(Decimal('0.00'))], default=Decimal('0.00'))
juros = models.DecimalField(max_digits=13, decimal_places=2, validators=[
MinValueValidator(Decimal('0.00'))], default=Decimal('0.00'))
valor_liquido = models.DecimalField(max_digits=13, decimal_places=2, validators=[
MinValueValidator(Decimal('0.00'))], default=Decimal('0.00'))
movimentar_caixa = models.BooleanField(default=True)
movimento_caixa = models.ForeignKey(
'financeiro.MovimentoCaixa', related_name="movimento_caixa_lancamento", on_delete=models.SET_NULL, null=True, blank=True)
moeda = models.ForeignKey('financeiro.Moeda', related_name="moeda", on_delete=models.SET_NULL, null=True,
blank=True)
class Meta:
verbose_name = "Lançamento"
permissions = (
("view_lancamento", "Can view lancamento"),
)
def format_valor_liquido(self):
return locale.format(u'%.2f', self.valor_liquido, 1)
@property
def format_data_vencimento(self):
return '%s' % date(self.data_vencimento, "d/m/Y")
@property
def format_data_pagamento(self):
return '%s' % date(self.data_pagamento, "d/m/Y")
class Entrada(Lancamento):
cliente = models.ForeignKey('cadastro.Cliente', related_name="conta_cliente",
on_delete=models.SET_NULL, null=True, blank=True)
status = models.CharField(
max_length=1, choices=STATUS_CONTA_ENTRADA_ESCOLHAS, default='1')
grupo_plano = models.ForeignKey(
'financeiro.PlanoContasGrupo', related_name="grupo_plano_recebimento", on_delete=models.SET_NULL, null=True, blank=True)
def get_edit_url(self):
if self.status == '0':
return reverse_lazy('financeiro:editarrecebimentoview', kwargs={'pk': self.id})
else:
return reverse_lazy('financeiro:editarcontareceberview', kwargs={'pk': self.id})
def get_tipo(self):
return 'Entrada'
class Saida(Lancamento):
fornecedor = models.ForeignKey(
'cadastro.Fornecedor', related_name="conta_fornecedor", on_delete=models.SET_NULL, null=True, blank=True)
status = models.CharField(
max_length=1, choices=STATUS_CONTA_SAIDA_ESCOLHAS, default='1')
grupo_plano = models.ForeignKey(
'financeiro.PlanoContasGrupo', related_name="grupo_plano_pagamento", on_delete=models.SET_NULL, null=True, blank=True)
def get_edit_url(self):
if self.status == '0':
return reverse_lazy('financeiro:editarpagamentoview', kwargs={'pk': self.id})
else:
return reverse_lazy('financeiro:editarcontapagarview', kwargs={'pk': self.id})
def get_tipo(self):
return 'Saida'
class MovimentoCaixa(models.Model):
data_movimento = models.DateField(null=True, blank=True)
saldo_inicial = models.DecimalField(
max_digits=13, decimal_places=2, default=Decimal('0.00'))
saldo_final = models.DecimalField(
max_digits=13, decimal_places=2, default=Decimal('0.00'))
entradas = models.DecimalField(max_digits=13, decimal_places=2, validators=[
MinValueValidator(Decimal('0.00'))], default=Decimal('0.00'))
saidas = models.DecimalField(max_digits=13, decimal_places=2, validators=[
MinValueValidator(Decimal('0.00'))], default=Decimal('0.00'))
class Meta:
verbose_name = "Movimento de Caixa"
permissions = (
("acesso_fluxodecaixa", "Pode acessar o Fluxo de Caixa"),
)
@property
def format_data_movimento(self):
return '%s' % date(self.data_movimento, "d/m/Y")
@property
def valor_lucro_prejuizo(self):
return self.saldo_final - self.saldo_inicial
def __unicode__(self):
s = u'Movimento dia %s' % (self.data_movimento)
return s
def __str__(self):
s = u'Movimento dia %s' % (self.data_movimento)
return s
```
#### File: vendas/views/vendas.py
```python
from django.core.mail import EmailMultiAlternatives
from django.core.urlresolvers import reverse_lazy
from django.shortcuts import redirect
from django.http import HttpResponse
from djangosige.apps.base.custom_views import CustomView, CustomCreateView, CustomListView, CustomUpdateView
from djangosige.apps.vendas.forms import OrcamentoVendaForm, PedidoVendaForm, ItensVendaFormSet, PagamentoFormSet, ProspectForm, ContatoProspectForm, PagamentoForm
from djangosige.apps.vendas.models import OrcamentoVenda, PedidoVenda, ItensVenda, Pagamento, Prospect, ContatoProspect
from djangosige.apps.cadastro.models import MinhaEmpresa, Cliente, Categoria, Produto
from djangosige.apps.login.models import Usuario
from djangosige.configs.settings import MEDIA_ROOT, EMAIL_HOST_USER
from geraldo.generators import PDFGenerator
from datetime import datetime
import io
from .report_vendas import VendaReport
class AdicionarProspectView(CustomCreateView):
form_class = ProspectForm
template_name = "vendas/prospect/prospect_add.html"
success_url = reverse_lazy('vendas:listaprospectview')
success_message = "<b>Prospect %(id)s </b>adicionado com sucesso."
permission_codename = 'add_prospect'
def get_success_message(self, cleaned_data):
return self.success_message % dict(cleaned_data, id=self.object.pk)
def get_context_data(self, **kwargs):
context = super(AdicionarProspectView, self).get_context_data(**kwargs)
return self.view_context(context)
def view_context(self, context):
context['title_complete'] = 'ADICIONAR PROSPECT'
context['return_url'] = reverse_lazy('vendas:listaprospectview')
context['editar_view'] = False
return context
def get(self, request, *args, **kwargs):
self.object = None
form = self.get_form(self.form_class)
return self.render_to_response(self.get_context_data(form=form))
def post(self, request, *args, **kwargs):
self.object = None
form = self.get_form(self.form_class)
if form.is_valid():
self.object = form.save(commit=False)
if not request.user.is_superuser:
self.object.emissor = request.user
self.object.save()
if not Cliente.objects.filter(nome_razao_social=self.object.cliente):
cliente = Cliente()
cliente.nome_razao_social = self.object.cliente
if not request.user.is_superuser:
cliente.emissor = request.user
cliente.save()
return self.form_valid(form)
return self.form_invalid(form=form)
class AdicionarContatoProspectView(CustomCreateView):
form_class = ContatoProspectForm
template_name = "vendas/prospect/contatoprospect_add.html"
success_message = "<b>Comentário do Prospect %(id)s </b>adicionado com sucesso."
permission_codename = 'add_contatoprospect'
def get_success_message(self, cleaned_data):
return self.success_message % dict(cleaned_data, id=self.object.pk)
def post(self, request, *args, **kwargs):
self.object = None
form = self.get_form(self.form_class)
if form.is_valid():
self.object = form.save(commit=False)
self.object.prospect = Prospect.objects.get(pk=self.kwargs['pk'])
self.object.emissor = request.user
self.object.save()
return redirect(reverse_lazy('vendas:editarprospectview', kwargs={'pk':self.kwargs['pk']}))
return self.form_invalid(form=form)
class AdicionarVendaView(CustomCreateView):
def get_success_message(self, cleaned_data):
return self.success_message % dict(cleaned_data, id=self.object.pk)
def get_context_data(self, **kwargs):
context = super(AdicionarVendaView, self).get_context_data(**kwargs)
return self.view_context(context)
def get(self, request, form_class, *args, **kwargs):
self.object = None
form = self.get_form(form_class)
form.initial['vendedor'] = request.user.first_name or request.user
form.initial['data_emissao'] = datetime.today().strftime('%d/%m/%Y')
produtos_form = ItensVendaFormSet(prefix='produtos_form')
pagamento_form = PagamentoFormSet(prefix='pagamento_form')
return self.render_to_response(self.get_context_data(form=form,
produtos_form=produtos_form,
pagamento_form=pagamento_form))
def post(self, request, form_class, *args, **kwargs):
self.object = None
# Tirar . dos campos decimais
req_post = request.POST.copy()
for key in req_post:
if ('desconto' in key or
'quantidade' in key or
'valor' in key or
'frete' in key or
'despesas' in key or
'seguro' in key or
'total' in key):
req_post[key] = req_post[key].replace('.', '')
request.POST = req_post
form = self.get_form(form_class)
produtos_form = ItensVendaFormSet(request.POST, prefix='produtos_form')
pagamento_form = PagamentoFormSet(
request.POST, prefix='pagamento_form')
if (form.is_valid() and produtos_form.is_valid() and pagamento_form.is_valid()):
self.object = form.save(commit=False)
self.object.save()
for pform in produtos_form:
if pform.cleaned_data != {}:
itens_venda_obj = pform.save(commit=False)
itens_venda_obj.venda_id = self.object
itens_venda_obj.calcular_pis_cofins()
itens_venda_obj.save()
pagamento_form.instance = self.object
pagamento_form.save()
return self.form_valid(form)
return self.form_invalid(form=form,
produtos_form=produtos_form,
pagamento_form=pagamento_form)
class AdicionarOrcamentoVendaView(AdicionarVendaView):
form_class = OrcamentoVendaForm
template_name = "vendas/orcamento_venda/orcamento_venda_add.html"
success_url = reverse_lazy('vendas:listaorcamentovendaview')
success_message = "<b>Orçamento de venda %(id)s </b>adicionado com sucesso."
permission_codename = 'add_orcamentovenda'
def view_context(self, context):
context['title_complete'] = 'ADICIONAR COTAÇÃO'
context['return_url'] = reverse_lazy('vendas:listaorcamentovendaview')
return context
def get(self, request, *args, **kwargs):
form_class = self.get_form_class()
return super(AdicionarOrcamentoVendaView, self).get(request, form_class, *args, **kwargs)
def post(self, request, *args, **kwargs):
form_class = self.get_form_class()
return super(AdicionarOrcamentoVendaView, self).post(request, form_class, *args, **kwargs)
class AdicionarPedidoVendaView(AdicionarVendaView):
form_class = PedidoVendaForm
template_name = "vendas/pedido_venda/pedido_venda_add.html"
success_url = reverse_lazy('vendas:listapedidovendaview')
success_message = "<b>Pedido de venda %(id)s </b>adicionado com sucesso."
permission_codename = 'add_pedidovenda'
def view_context(self, context):
context['title_complete'] = 'ADICIONAR PEDIDO DE VENDA'
context['return_url'] = reverse_lazy('vendas:listapedidovendaview')
return context
def get(self, request, *args, **kwargs):
form_class = self.get_form_class()
return super(AdicionarPedidoVendaView, self).get(request, form_class, *args, **kwargs)
def post(self, request, *args, **kwargs):
form_class = self.get_form_class()
return super(AdicionarPedidoVendaView, self).post(request, form_class, *args, **kwargs)
class ProspectListView(CustomListView):
template_name = 'vendas/prospect/prospect_list.html'
model = Prospect
context_object_name = 'all_prospects'
success_url = reverse_lazy('vendas:listaprospectview')
permission_codename = 'view_prospect'
def get_context_data(self, **kwargs):
context = super(ProspectListView, self).get_context_data(**kwargs)
return self.view_context(context)
def view_context(self, context):
context['title_complete'] = 'PROSPECTS'
context['add_url'] = reverse_lazy('vendas:addprospectview')
context['excursoes'] = Categoria.objects.all()
return context
def post(self, request, *args, **kwargs):
if request.POST.get('send_email'):
# Enviar Emails
to_email = []
for key, value in request.POST.items():
if key[:5] == 'email' and value == "on":
prospect = Prospect.objects.get(pk=key[6:])
to_email.append(prospect.email)
contato = ContatoProspect()
contato.prospect = prospect
contato.tipo_contato = '6'
contato.observacao = 'Email com apresentações enviado pelo sistema.'
contato.emissor = request.user
contato.save()
excursao = Categoria.objects.get(pk=request.POST.get('excursao'))
pacotes = Produto.objects.filter(categoria=excursao)
context_email = {}
#template = render_to_string('template...',context_email)
subject = 'Apresentação de Pacotes - ' + excursao.categoria_desc
from_email = '<EMAIL>'
text_content = excursao.categoria_desc
msg = EmailMultiAlternatives(subject, text_content, from_email, to_email)
#msg.attach_alternative(template, "text/html")
for pacote in pacotes:
for documento in pacote.documentos.all():
msg.attach(documento.arquivo.name, documento.arquivo.read())
msg.send()
return redirect(self.success_url)
if request.POST.get('remove_itens'):
# Remover itens
if self.check_user_delete_permission(request, self.model):
for key, value in request.POST.items():
if key[:6] == 'remove' and value == "on":
print(key[7:])
prospect = Prospect.objects.get(pk=key[7:])
prospect.delete()
return redirect(self.success_url)
class VendaListView(CustomListView):
def get_context_data(self, **kwargs):
context = super(VendaListView, self).get_context_data(**kwargs)
return self.view_context(context)
class OrcamentoVendaListView(VendaListView):
template_name = 'vendas/orcamento_venda/orcamento_venda_list.html'
model = OrcamentoVenda
context_object_name = 'all_orcamentos'
success_url = reverse_lazy('vendas:listaorcamentovendaview')
permission_codename = 'view_orcamentovenda'
def view_context(self, context):
context['title_complete'] = 'COTAÇÕES'
context['add_url'] = reverse_lazy('vendas:addorcamentovendaview')
return context
class OrcamentoVendaVencidosListView(OrcamentoVendaListView):
success_url = reverse_lazy('vendas:listaorcamentovendavencidoview')
def view_context(self, context):
context['title_complete'] = 'ORÇAMENTOS DE VENDA VENCIDOS'
context['add_url'] = reverse_lazy('vendas:addorcamentovendaview')
return context
def get_queryset(self):
return OrcamentoVenda.objects.filter(data_vencimento__lte=datetime.now().date(), status='0')
class OrcamentoVendaVencimentoHojeListView(OrcamentoVendaListView):
success_url = reverse_lazy('vendas:listaorcamentovendahojeview')
def view_context(self, context):
context['title_complete'] = 'ORÇAMENTOS DE VENDA COM VENCIMENTO DIA ' + \
datetime.now().date().strftime('%d/%m/%Y')
context['add_url'] = reverse_lazy('vendas:addorcamentovendaview')
return context
def get_queryset(self):
return OrcamentoVenda.objects.filter(data_vencimento=datetime.now().date(), status='0')
class PedidoVendaListView(VendaListView):
template_name = 'vendas/pedido_venda/pedido_venda_list.html'
model = PedidoVenda
context_object_name = 'all_pedidos'
success_url = reverse_lazy('vendas:listapedidovendaview')
permission_codename = 'view_pedidovenda'
def view_context(self, context):
context['title_complete'] = 'PEDIDOS DE VENDA'
context['add_url'] = reverse_lazy('vendas:addpedidovendaview')
return context
class PedidoVendaAtrasadosListView(PedidoVendaListView):
success_url = reverse_lazy('vendas:listapedidovendaatrasadosview')
def view_context(self, context):
context['title_complete'] = 'PEDIDOS DE VENDA ATRASADOS'
context['add_url'] = reverse_lazy('vendas:addpedidovendaview')
return context
def get_queryset(self):
return PedidoVenda.objects.filter(data_entrega__lte=datetime.now().date(), status='0')
class PedidoVendaEntregaHojeListView(PedidoVendaListView):
success_url = reverse_lazy('vendas:listapedidovendahojeview')
def view_context(self, context):
context['title_complete'] = 'PEDIDOS DE VENDA COM ENTREGA DIA ' + \
datetime.now().date().strftime('%d/%m/%Y')
context['add_url'] = reverse_lazy('vendas:addpedidovendaview')
return context
def get_queryset(self):
return PedidoVenda.objects.filter(data_entrega=datetime.now().date(), status='0')
class EditarProspectView(CustomUpdateView):
model = Prospect
form_class = ProspectForm
template_name = "vendas/prospect/prospect_edit.html"
success_url = reverse_lazy('vendas:listaprospectview')
success_message = "<b>Prospect %(id)s </b>editado com sucesso."
permission_codename = 'change_prospect'
def get_success_message(self, cleaned_data):
return self.success_message % dict(cleaned_data, id=self.object.pk)
def get_context_data(self, **kwargs):
context = super(EditarProspectView, self).get_context_data(**kwargs)
return self.view_context(context)
def view_context(self, context):
context['title_complete'] = 'EDITAR PROSPECT N°' + \
str(self.object.id)
context['return_url'] = reverse_lazy('vendas:listaprospectview')
context['add_url'] = reverse_lazy('vendas:addcontatoprospectview', kwargs={'pk':self.object.pk})
context['contatos'] = ContatoProspect.objects.filter(prospect=self.object)
context['editar_view'] = True
return context
def get(self, request, *args, **kwargs):
self.object = self.get_object()
form = self.get_form(self.form_class)
form_contato = ContatoProspectForm()
if not request.user.is_superuser:
form.fields['emissor'].widget.attrs['disabled'] = True
form.fields['passageiro'].widget.attrs['disabled'] = True
form.fields['cliente'].widget.attrs['disabled'] = True
form.fields['escola'].widget.attrs['disabled'] = True
form.fields['email'].widget.attrs['disabled'] = True
form.fields['telefone'].widget.attrs['disabled'] = True
form.fields['observacao'].widget.attrs['disabled'] = True
return self.render_to_response(self.get_context_data(form=form, form_contato=form_contato))
def post(self, request, *args, **kwargs):
self.object = self.get_object()
form = self.get_form(self.form_class)
form_contato = ContatoProspectForm()
if not request.user.is_superuser:
form.fields['emissor'].widget.attrs['disabled'] = True
form.fields['passageiro'].widget.attrs['disabled'] = True
form.fields['cliente'].widget.attrs['disabled'] = True
form.fields['escola'].widget.attrs['disabled'] = True
form.fields['email'].widget.attrs['disabled'] = True
form.fields['telefone'].widget.attrs['disabled'] = True
form.fields['observacao'].widget.attrs['disabled'] = True
if form.is_valid():
self.object = form.save(commit=False)
if not request.user.is_superuser:
self.object.emissor = request.user
self.object.save()
if not Cliente.objects.filter(nome_razao_social=self.object.cliente):
cliente = Cliente()
cliente.nome_razao_social = self.object.cliente
if not request.user.is_superuser:
cliente.emissor = request.user
cliente.save()
return self.form_valid(form)
return self.form_invalid(form=form,form_contato=form_contato)
class ContratarProspectView(CustomView):
permission_codename = ['change_prospect']
def get(self, request, *args, **kwargs):
pk = kwargs.get('pk', None)
if not pk:
return redirect(reverse_lazy('vendas:listaprospectview'))
prospect = Prospect.objects.get(pk=pk)
if prospect.emissor:
return redirect(reverse_lazy('vendas:listaprospectview'))
prospect.emissor = request.user
prospect.save()
return redirect(reverse_lazy('vendas:listaprospectview'))
class EditarVendaView(CustomUpdateView):
def get_success_message(self, cleaned_data):
return self.success_message % dict(cleaned_data, id=self.object.pk)
def get_context_data(self, **kwargs):
context = super(EditarVendaView, self).get_context_data(**kwargs)
return self.view_context(context)
def get(self, request, form_class, *args, **kwargs):
form = form = self.get_form(form_class)
form.initial['total_sem_imposto'] = self.object.get_total_sem_imposto()
produtos_form = ItensVendaFormSet(
instance=self.object, prefix='produtos_form')
itens_list = ItensVenda.objects.filter(venda_id=self.object.id)
produtos_form.initial = [{'total_sem_desconto': item.get_total_sem_desconto(),
'total_impostos': item.get_total_impostos(),
'total_com_impostos': item.get_total_com_impostos()} for item in itens_list]
pagamento_form = PagamentoFormSet(
instance=self.object, prefix='pagamento_form')
if ItensVenda.objects.filter(venda_id=self.object.pk).count():
produtos_form.extra = 0
if Pagamento.objects.filter(venda_id=self.object.pk).count():
pagamento_form.extra = 0
return self.render_to_response(self.get_context_data(form=form, produtos_form=produtos_form, pagamento_form=pagamento_form))
def post(self, request, form_class, *args, **kwargs):
# Tirar . dos campos decimais
req_post = request.POST.copy()
for key in req_post:
if ('desconto' in key or
'quantidade' in key or
'valor' in key or
'frete' in key or
'despesas' in key or
'seguro' in key or
'total' in key):
req_post[key] = req_post[key].replace('.', '')
request.POST = req_post
form = self.get_form(form_class)
produtos_form = ItensVendaFormSet(
request.POST, prefix='produtos_form', instance=self.object)
pagamento_form = PagamentoFormSet(
request.POST, prefix='pagamento_form', instance=self.object)
if (form.is_valid() and produtos_form.is_valid() and pagamento_form.is_valid()):
self.object = form.save(commit=False)
self.object.save()
for pform in produtos_form:
if pform.cleaned_data != {}:
itens_venda_obj = pform.save(commit=False)
itens_venda_obj.venda_id = self.object
itens_venda_obj.calcular_pis_cofins()
itens_venda_obj.save()
pagamento_form.instance = self.object
pagamento_form.save()
return self.form_valid(form)
return self.form_invalid(form=form,
produtos_form=produtos_form,
pagamento_form=pagamento_form)
class EditarOrcamentoVendaView(EditarVendaView):
form_class = OrcamentoVendaForm
model = OrcamentoVenda
template_name = "vendas/orcamento_venda/orcamento_venda_edit.html"
success_url = reverse_lazy('vendas:listaorcamentovendaview')
success_message = "<b>Orçamento de venda %(id)s </b>editado com sucesso."
permission_codename = 'change_orcamentovenda'
def view_context(self, context):
context['title_complete'] = 'EDITAR ORÇAMENTO DE VENDA N°' + \
str(self.object.id)
context['return_url'] = reverse_lazy('vendas:listaorcamentovendaview')
context['pagamento_form'] = PagamentoForm()
context['pagamentos'] = Pagamento.objects.filter(venda_id=self.object.id)
context['venda_id'] = self.object.id
return context
def get(self, request, *args, **kwargs):
self.object = self.get_object()
form_class = self.get_form_class()
return super(EditarOrcamentoVendaView, self).get(request, form_class, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
form_class = self.get_form_class()
return super(EditarOrcamentoVendaView, self).post(request, form_class, *args, **kwargs)
class EditarPedidoVendaView(EditarVendaView):
form_class = PedidoVendaForm
model = PedidoVenda
template_name = "vendas/pedido_venda/pedido_venda_edit.html"
success_url = reverse_lazy('vendas:listapedidovendaview')
success_message = "<b>Pedido de venda %(id)s </b>editado com sucesso."
permission_codename = 'change_pedidovenda'
def view_context(self, context):
context['title_complete'] = 'EDITAR PEDIDO DE VENDA N°' + \
str(self.object.id)
context['return_url'] = reverse_lazy('vendas:listapedidovendaview')
context['pagamento_form'] = PagamentoForm()
context['pagamentos'] = Pagamento.objects.filter(venda_id=self.object.id)
context['venda_id'] = self.object.id
return context
def get(self, request, *args, **kwargs):
self.object = self.get_object()
form_class = self.get_form_class()
return super(EditarPedidoVendaView, self).get(request, form_class, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
form_class = self.get_form_class()
return super(EditarPedidoVendaView, self).post(request, form_class, *args, **kwargs)
class GerarPedidoVendaView(CustomView):
permission_codename = ['add_pedidovenda', 'change_pedidovenda', ]
def get(self, request, *args, **kwargs):
orcamento_id = kwargs.get('pk', None)
orcamento = OrcamentoVenda.objects.get(id=orcamento_id)
itens_venda = orcamento.itens_venda.all()
pagamentos = orcamento.parcela_pagamento.all()
novo_pedido = PedidoVenda()
for field in orcamento._meta.fields:
setattr(novo_pedido, field.name, getattr(orcamento, field.name))
novo_pedido.venda_ptr = None
novo_pedido.pk = None
novo_pedido.id = None
novo_pedido.status = '0'
orcamento.status = '1' # Baixado
orcamento.save()
novo_pedido.orcamento = orcamento
novo_pedido.save()
for item in itens_venda:
item.pk = None
item.id = None
item.save()
novo_pedido.itens_venda.add(item)
for pagamento in pagamentos:
pagamento.pk = None
pagamento.id = None
pagamento.save()
novo_pedido.parcela_pagamento.add(pagamento)
return redirect(reverse_lazy('vendas:editarpedidovendaview', kwargs={'pk': novo_pedido.id}))
class CancelarOrcamentoVendaView(CustomView):
permission_codename = 'change_orcamentovenda'
def get(self, request, *args, **kwargs):
venda_id = kwargs.get('pk', None)
instance = OrcamentoVenda.objects.get(id=venda_id)
instance.status = '2'
instance.save()
return redirect(reverse_lazy('vendas:editarorcamentovendaview', kwargs={'pk': instance.id}))
class CancelarPedidoVendaView(CustomView):
permission_codename = 'change_pedidovenda'
def get(self, request, *args, **kwargs):
venda_id = kwargs.get('pk', None)
instance = PedidoVenda.objects.get(id=venda_id)
instance.status = '2'
instance.save()
return redirect(reverse_lazy('vendas:editarpedidovendaview', kwargs={'pk': instance.id}))
class GerarCopiaVendaView(CustomView):
def get(self, request, instance, redirect_url, *args, **kwargs):
itens_venda = instance.itens_venda.all()
pagamentos = instance.parcela_pagamento.all()
instance.pk = None
instance.id = None
instance.status = '0'
instance.save()
for item in itens_venda:
item.pk = None
item.id = None
item.save()
instance.itens_venda.add(item)
for pagamento in pagamentos:
pagamento.pk = None
pagamento.id = None
pagamento.save()
instance.parcela_pagamento.add(pagamento)
return redirect(reverse_lazy(redirect_url, kwargs={'pk': instance.id}))
class GerarCopiaOrcamentoVendaView(GerarCopiaVendaView):
permission_codename = 'add_orcamentovenda'
def get(self, request, *args, **kwargs):
venda_id = kwargs.get('pk', None)
instance = OrcamentoVenda.objects.get(id=venda_id)
redirect_url = 'vendas:editarorcamentovendaview'
return super(GerarCopiaOrcamentoVendaView, self).get(request, instance, redirect_url, *args, **kwargs)
class GerarCopiaPedidoVendaView(GerarCopiaVendaView):
permission_codename = 'add_pedidovenda'
def get(self, request, *args, **kwargs):
venda_id = kwargs.get('pk', None)
instance = PedidoVenda.objects.get(id=venda_id)
redirect_url = 'vendas:editarpedidovendaview'
return super(GerarCopiaPedidoVendaView, self).get(request, instance, redirect_url, *args, **kwargs)
class GerarPDFVenda(CustomView):
def gerar_pdf(self, title, venda, user_id):
resp = HttpResponse(content_type='application/pdf')
venda_pdf = io.BytesIO()
venda_report = VendaReport(queryset=[venda, ])
venda_report.title = title
venda_report.band_page_footer = venda_report.banda_foot
try:
usuario = Usuario.objects.get(pk=user_id)
m_empresa = MinhaEmpresa.objects.get(m_usuario=usuario)
flogo = m_empresa.m_empresa.logo_file
logo_path = '{0}{1}'.format(MEDIA_ROOT, flogo.name)
if flogo != 'imagens/logo.png':
venda_report.topo_pagina.inserir_logo(logo_path)
venda_report.band_page_footer.inserir_nome_empresa(
m_empresa.m_empresa.nome_razao_social)
if m_empresa.m_empresa.endereco_padrao:
venda_report.band_page_footer.inserir_endereco_empresa(
m_empresa.m_empresa.endereco_padrao.format_endereco_completo)
if m_empresa.m_empresa.telefone_padrao:
venda_report.band_page_footer.inserir_telefone_empresa(
m_empresa.m_empresa.telefone_padrao.telefone)
except:
pass
venda_report.topo_pagina.inserir_data_emissao(venda.data_emissao)
if isinstance(venda, OrcamentoVenda):
venda_report.topo_pagina.inserir_data_validade(
venda.data_vencimento)
elif isinstance(venda, PedidoVenda):
venda_report.topo_pagina.inserir_data_entrega(venda.data_entrega)
venda_report.band_page_header = venda_report.topo_pagina
if venda.cliente.tipo_pessoa == 'PJ':
venda_report.dados_cliente.inserir_informacoes_pj()
elif venda.cliente.tipo_pessoa == 'PF':
venda_report.dados_cliente.inserir_informacoes_pf()
if venda.cliente.endereco_padrao:
venda_report.dados_cliente.inserir_informacoes_endereco()
if venda.cliente.telefone_padrao:
venda_report.dados_cliente.inserir_informacoes_telefone()
if venda.cliente.email_padrao:
venda_report.dados_cliente.inserir_informacoes_email()
venda_report.band_page_header.child_bands.append(
venda_report.dados_cliente)
venda_report.dados_produtos.band_detail.set_band_height(
len(ItensVenda.objects.filter(venda_id=venda)))
venda_report.banda_produtos.elements.append(
venda_report.dados_produtos)
venda_report.band_page_header.child_bands.append(
venda_report.banda_produtos)
venda_report.band_page_header.child_bands.append(
venda_report.totais_venda)
if venda.cond_pagamento:
venda_report.banda_pagamento.elements.append(
venda_report.dados_pagamento)
venda_report.band_page_header.child_bands.append(
venda_report.banda_pagamento)
venda_report.observacoes.inserir_vendedor()
venda_report.band_page_header.child_bands.append(
venda_report.observacoes)
venda_report.generate_by(PDFGenerator, filename=venda_pdf)
pdf = venda_pdf.getvalue()
resp.write(pdf)
return resp
class GerarPDFOrcamentoVenda(GerarPDFVenda):
permission_codename = 'change_orcamentovenda'
def get(self, request, *args, **kwargs):
venda_id = kwargs.get('pk', None)
if not venda_id:
return HttpResponse('Objeto não encontrado.')
obj = OrcamentoVenda.objects.get(pk=venda_id)
title = 'Orçamento de venda nº {}'.format(venda_id)
return self.gerar_pdf(title, obj, request.user.id)
class GerarPDFPedidoVenda(GerarPDFVenda):
permission_codename = 'change_pedidovenda'
def get(self, request, *args, **kwargs):
venda_id = kwargs.get('pk', None)
if not venda_id:
return HttpResponse('Objeto não encontrado.')
obj = PedidoVenda.objects.get(pk=venda_id)
title = 'Pedido de venda nº {}'.format(venda_id)
return self.gerar_pdf(title, obj, request.user.id)
``` |
{
"source": "3zachm/recursive-reminders",
"score": 2
} |
#### File: 3zachm/recursive-reminders/run.py
```python
import discord
import configparser
import os
import io
import asyncio
import json
import atexit
import time
from discord.errors import Forbidden
from discord.ext import commands
import utils.embed_generator as embeds
import utils.file_manager as files
import utils.log_manager as logs
import utils.request_manager as requests
import utils.commands as cmds
import utils.screen as screen
import utils.eval as evalu
import utils.utils as utils
boot_time = time.time()
config = configparser.ConfigParser()
files.script_dir = os.path.dirname(os.path.realpath(__file__))
files.delete_contents(files.request_dir())
# generate empty config files
files.make_config(files.config_loc())
files.make_prefixes(files.prefix_loc())
files.make_dir(files.request_dir())
# open config file
with open(files.config_loc()) as c:
discord_config = c.read()
config = configparser.RawConfigParser(allow_no_value=True)
config.read_file(io.StringIO(discord_config))
try:
default_prefix = config.get('discord', 'default_prefix')
generate_logs = config.getboolean('python', 'generate_logs')
bot_token = config.get('discord', 'token')
tmp_screen = config.getboolean('python', 'enable_curses')
enable_eval = config.getboolean('python', 'enable_eval')
except (configparser.NoSectionError, configparser.NoOptionError) as e:
print(e)
print("Ensure config file has all entries present. If you recently pulled an update, consider regenerating the config")
quit()
#logging
if generate_logs:
logger = logs.init_logs(files.logs_dir())
# initializes a server in the prefix file
def initPrefix(serverID):
with open(files.prefix_loc(), 'r') as r:
prefixes = json.load(r)
prefixes[str(serverID)] = default_prefix
with open(files.prefix_loc(), 'w') as w:
json.dump(prefixes, w, indent=4)
def get_prefix(bot, message):
if not message.guild:
return ""
with open(files.prefix_loc(), 'r') as r:
prefixes = json.load(r)
try:
pfx = prefixes[str(message.guild.id)]
except KeyError:
initPrefix(message.guild.id)
with open(files.prefix_loc(), 'r') as r:
prefixes = json.load(r)
pfx = prefixes[str(message.guild.id)]
return pfx
bot = commands.Bot(command_prefix = get_prefix)
bot.remove_command('help')
bot.coroutineList = []
bot.reset_warning = False
use_screen = tmp_screen
@bot.event
async def on_ready():
if not hasattr(bot, 'appinfo'):
bot.appinfo = await bot.application_info()
# generate bot owner
files.make_owners(files.owners_loc(), bot)
await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.playing, name="booting..."))
# start periodic presence update
bot.presence_routine = asyncio.create_task(update_presence())
if use_screen:
asyncio.create_task(screen.loop(bot))
else:
print("Screen disabled\n\nRunning...")
@bot.event
async def on_guild_join(guild):
initPrefix(guild.id)
if generate_logs:
logs.log("Joined \"" + guild.name + "\" - " + str(guild.id), logger)
@bot.event
async def on_command_error(ctx, error):
if isinstance(error, commands.errors.CommandNotFound):
return
if isinstance(error, commands.errors.CheckFailure):
return
if isinstance(error, commands.errors.MissingPermissions) and ctx.guild is None:
return
if isinstance(error, commands.errors.MissingRequiredArgument):
if str(ctx.command) == "reminder add":
await ctx.send(embed=embeds.reminder_add_missing(ctx, guild_prefix(ctx), bot))
return
if str(ctx.command) == "reminder stop":
if len(requests.retrieve_list(ctx.author.id, files.request_dir())) == 1:
await reminder_stop(ctx, 1)
return
await ctx.send(embed=embeds.reminder_stop_missing(ctx, guild_prefix(ctx), bot))
return
if str(ctx.command) == "reminder move":
if len(requests.retrieve_list(ctx.author.id, files.request_dir())) == 1:
await reminder_move(ctx, 1)
return
await ctx.send(embed=embeds.reminder_move_missing(ctx, guild_prefix(ctx), bot))
return
# unhandled exception occurred
if isinstance(error, commands.errors.CommandInvokeError):
await ctx.send(embed=embeds.unhandled(ctx, error))
if generate_logs:
logs.exception(ctx, error, logger)
else:
raise error
@bot.event
async def on_message(message):
ctx = await bot.get_context(message)
msg = message.content
if ctx.guild is None and (message.content.startswith('!') or message.content.startswith('.')):
await ctx.send("I am prefix-less in DMs! Simply type the command like `help`")
if bot.user.mentioned_in(message):
if message.mention_everyone:
return
if message.mentions[0] == bot.user:
# get first mention value up to a space
try:
first_mention = msg[0 : msg.index(' ')]
except ValueError:
first_mention = ''
if ctx.guild is None:
embed = embeds.prefix_dms(ctx)
await ctx.send(embed=embed)
# if mention is first and has a space after it
elif first_mention == (f'<@!{bot.user.id}>') and ctx.message.author.guild_permissions.administrator:
index = msg.index(' ')
await prefix(ctx, msg[index + 1 : index + 2]) # i'm bad at python?
else:
pfx = str(get_prefix(bot=bot, message=message))
if pfx == '':
pfx = 'None'
embed = embeds.prefix_current(ctx, guild_prefix(ctx))
await ctx.send(embed=embed)
await bot.process_commands(message)
@bot.command(name="help", help=cmds.help_help, description=cmds.help_args)
async def help(ctx):
await embeds.help(ctx, guild_prefix(ctx), bot)
@bot.command(name="owners", help=cmds.owners_help, description=cmds.owners_args)
async def owners(ctx):
await ctx.send(embed=embeds.owners(ctx))
@bot.group(name="system", aliases=["sys"], invoke_without_command=True)
@commands.check(cmds.owner_check)
async def system(ctx):
pass
@system.command(name="pt")
async def system_pt(ctx):
if cmds.owner_check(ctx):
await ctx.send("You have owner permissions.")
@system.command(name="fstop")
@commands.check(cmds.owner_check)
async def system_fstop(ctx, rq_ID):
requests.remove(files.request_dir(), int(rq_ID), bot.coroutineList)
await ctx.send("Successfully stopped.")
@system.command(name="global_dm")
@commands.check(cmds.owner_check)
async def system_global_dm(ctx, *, dm_msg):
bot.reset_warning = True
bot.presence_routine.cancel()
await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.playing, name="reseting soon!"))
rqs = files.get_json_userids(files.request_dir())
embed = embeds.global_dm_message(ctx, dm_msg)
for uid in rqs:
user = await bot.fetch_user(uid)
try:
await user.send(embed=embed)
except Forbidden:
pass
# modified from Copyright (c) 2021 beeracademy (https://github.com/beeracademy/discord-bot)
# thank you for being smarter than me <3
@system.command(name="evaluate", aliases=["eval"])
@commands.check(cmds.eval_check)
async def evaluate(ctx, *, stmts):
if ctx.message.author.id != <PASSWORD>: # im checking twice idc
return
stmts = stmts.strip().replace("```python", "```").strip("`")
# not like the filesystem or uptime of my pi matters much anyways, would be more concerned about my account
exit_str = ["/", "\\", "..", "5c", "2f", "2e", "bot_token"]
if any(exit_strs in stmts for exit_strs in exit_str):
await ctx.send("```Direct file operation-related characters not allowed```")
return
if not stmts:
await ctx.send("After stripping `'s, stmts can't be empty.")
return
res = await evalu.eval_stmts(stmts, {"bot": bot, "ctx": ctx})
escaped = evalu.code_block_escape(repr(res))
message = f"```python\n{escaped}\n```"
if len(message) > 2000:
prefix = "Truncated result to length 0000:\n"
suffix = "\n```"
message = message.rstrip("`").strip()
new_length = 2000 - len(prefix) - len(suffix)
prefix = prefix.replace("0000", str(new_length))
message = prefix + message[:new_length] + suffix
await ctx.send(message)
@bot.command(name="uptime", help=cmds.uptime_help, description=cmds.uptime_args)
async def uptime(ctx):
await ctx.send("**Uptime**: `{0}`\n**Server**: `{1}`".format(utils.get_uptime(boot_time), utils.get_sysuptime()))
@bot.command(name="ping", aliases=["test"], help=cmds.ping_help, description=cmds.ping_args)
async def ping(ctx):
latency = bot.latency*1000
sent_time = ctx.message.created_at
botmsg = await ctx.send("**Response Latency**: `{0}ms`\n**Discord Latency**: ".format(round(latency, 0)))
botmsg_time = botmsg.created_at
msg_time = float((botmsg_time - sent_time).total_seconds()*1000)
await botmsg.edit(content=botmsg.content + " `{0}ms`".format(round(msg_time, 0)))
@bot.command(name="prefix", help=cmds.prefix_help, description=cmds.prefix_args)
@commands.has_permissions(administrator=True)
async def prefix(ctx, prefix=None):
if prefix is None:
embed = embeds.prefix_current(ctx, guild_prefix(ctx))
elif len(prefix) > 10:
embed = embeds.prefix_length(ctx)
else:
with open(files.prefix_loc(), 'r') as r:
prefixes = json.load(r)
prefixes[str(ctx.guild.id)] = prefix
with open(files.prefix_loc(), 'w') as w:
json.dump(prefixes, w, indent=4)
if prefix == '':
prefix = 'None'
embed = embeds.prefix_change(ctx, prefix)
await ctx.send(embed=embed)
@bot.command(name="invite", help=cmds.invite_help, description=cmds.invite_args)
async def invite(ctx):
await ctx.send(embed=embeds.invite_link(ctx))
@bot.group(name="reminder", aliases=["r"], invoke_without_command=True, help=cmds.reminder_help, description=cmds.reminder_args)
async def reminder(ctx):
embed = embeds.reminder_base(ctx, guild_prefix(ctx), bot)
await ctx.send(embed=embed)
@reminder.command(name="add", aliases=["create"], help=cmds.reminder_add_help, description=cmds.reminder_add_args)
async def reminder_add(ctx, t, *, rqname):
try:
t = int(t)
except ValueError:
await ctx.send("That time doesn't seem like a real integer. Decimals aren't supported at the moment.")
return
if len(rqname) > 50:
embed = embeds.request_length(ctx, "reminder name", "50 characters")
await ctx.send(embed=embed)
elif t > 720: #currently in minutes
embed = embeds.request_length(ctx, "time", "12 hours")
await ctx.send(embed=embed)
elif t == 0:
await ctx.send("You don't need a 0 minute timer :)")
elif t < 0:
await ctx.send("Counting by negatives seems dangerous")
elif len(requests.retrieve_list(ctx.author.id, files.request_dir())) > 9:
embed = embeds.reminder_list_length(ctx)
await ctx.send(embed=embed)
else:
t = int(t) * 60
rq_json = requests.create(ctx, files.request_dir(), rqname, t)
embed = embeds.reminder_set(ctx, guild_prefix(ctx), t, rqname)
await ctx.send(embed=embed)
if bot.reset_warning:
await ctx.send("**Please note that a reset is expected to happen soon! Don't rely on this timer for the next hour or so**" +
"\nThe bot's presence will return to normal when it's been reset")
timer_task = asyncio.create_task(timer(ctx, rq_json), name=ctx.message.id)
bot.coroutineList.append([ctx.message.id, timer_task])
@reminder.command(name="list", help=cmds.reminder_list_help, description=cmds.reminder_list_args)
async def reminder_list(ctx):
requests_list = requests.retrieve_list(ctx.message.author.id, files.request_dir())
await embeds.reminder_list(ctx, requests_list)
@reminder.command(name="stop", aliases=["remove"], help=cmds.reminder_stop_help, description=cmds.reminder_stop_args)
async def reminder_stop(ctx, request):
user = ctx.message.author
try:
request = int(request)
except ValueError:
try:
if str(request) == "all":
rq_list = requests.retrieve_list(ctx.message.author.id, files.request_dir())
if len(rq_list) < 1:
await ctx.send(embed=embeds.reminder_none(ctx, guild_prefix(ctx)))
return
for rq in rq_list:
requests.remove(files.request_dir(), rq['request'], bot.coroutineList)
await ctx.send(embed=embeds.reminder_cancel_all(ctx))
return
except ValueError:
pass
await ctx.send(embed=embeds.reminder_stop_missing(ctx, guild_prefix(ctx), bot))
return
if requests.retrieve_list(user.id, files.request_dir()) == []:
embed=embeds.reminder_none(ctx, guild_prefix(ctx))
else:
try:
rq_json = requests.retrieve_json(user.id, files.request_dir(), request)
requests.remove(files.request_dir(), rq_json['request'], bot.coroutineList)
embed = embeds.reminder_cancel(ctx, rq_json)
except IndexError:
embed = embeds.reminder_cancel_index(ctx, guild_prefix(ctx), request)
await ctx.send(embed=embed)
@reminder.command(name="move", help=cmds.reminder_move_help, description=cmds.reminder_move_args)
async def reminder_move(ctx, request):
try:
request = int(request)
except ValueError:
await ctx.send(embed=embeds.reminder_move_missing(ctx))
return
try:
rq_json = requests.retrieve_json(ctx.author.id, files.request_dir(), request)
if ctx.guild is None:
channel = 1
guild = ""
else:
channel = ctx.channel.id
guild = ctx.guild.name
requests.edit_json_val(rq_json['user'], files.request_dir(), rq_json['request'], 'channel', channel)
requests.edit_json_val(rq_json['user'], files.request_dir(), rq_json['request'], 'source', ctx.message.id)
rq_new_json = requests.edit_json_val(rq_json['user'], files.request_dir(), rq_json['request'], 'guild', guild)
await ctx.send(embed=embeds.reminder_move_success(ctx, rq_new_json))
except IndexError:
await ctx.send(embed=embeds.reminder_cancel_index(ctx, guild_prefix(ctx), request))
@reminder.command(name="wait", help=cmds.reminder_wait_help, description=cmds.reminder_wait_args)
async def reminder_wait(ctx, request = 1):
try:
request = int(request)
except ValueError:
await ctx.send(embed=embeds.reminder_wait_missing(ctx))
return
try:
rq_json = requests.retrieve_json(ctx.author.id, files.request_dir(), request)
if rq_json['wait'] == True:
rq_new_json = requests.edit_json_val(rq_json['user'], files.request_dir(), rq_json['request'], 'wait', False)
embed = embeds.reminder_wait_off(ctx, rq_new_json)
else:
rq_new_json = requests.edit_json_val(rq_json['user'], files.request_dir(), rq_json['request'], 'wait', True)
embed = embeds.reminder_wait_on(ctx, rq_new_json)
await ctx.send(embed=embed)
except IndexError:
await ctx.send(embed=embeds.reminder_cancel_index(ctx, guild_prefix(ctx), request))
async def timer(ctx, rq_json):
t = rq_json["time"]
react = None
while(True):
await asyncio.sleep(t)
rq_json = requests.retrieve_json_id(files.request_dir(), ctx.author.id, rq_json['request'])
if rq_json["wait"] == True:
await asyncio.sleep(rq_json["added"])
if react is not None:
react.cancel()
react = asyncio.create_task(embeds.timer_end(ctx, guild_prefix(ctx), rq_json))
async def update_presence():
while(True):
await asyncio.sleep(120)
rqs_len = len(bot.coroutineList)
await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=str(rqs_len) + " reminders"))
# offhand because discord.py needs get_prefix to have args
def guild_prefix(ctx):
return get_prefix(bot=bot, message=ctx.message)
bot.run(bot_token)
atexit.register(files.delete_contents, files.request_dir())
``` |
{
"source": "3ZadeSSG/TwitterSentimentAnalysisApp",
"score": 3
} |
#### File: TwitterSentimentAnalysisApp/2. Training with preprocessed data/processedDataHelper.py
```python
import numpy as np
def saveNumpyArray(myArray,filename):
np.save(filename,myArray)
print('Saved successfully!')
def loadNumpyArray(filename):
return np.load(filename)
def loadDict(filename):
return np.load(filename,allow_pickle=True).item()
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.