metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "0xConnorRhodes/dotfiles",
"score": 3
} |
#### File: scripts/cutitout_shared/cutitout.py
```python
clip_margin = 0.2
assert clip_margin >= 0.0
# How loud should noise be to be considered a sound?
audio_treshold = 0.02
assert audio_treshold > 0.0 and audio_treshold <= 1.0
# Minimum clip length (in seconds)
# Sounds shorter than that will be considered noise and cut.
min_clip_length = 0.2
assert min_clip_length > 0.0
# Minimum silence length to skip (in seconds)
min_skip_length = 5.0
assert min_skip_length > 2 * clip_margin
import audioop
import subprocess
import sys
def get_audio_streams(filename):
streams = []
probe = subprocess.check_output(
["ffprobe", "-show_streams", filename],
encoding="utf-8",
stderr=subprocess.DEVNULL,
)
for line in probe.split("\n"):
if line == "[STREAM]":
streams.append({})
try:
key, value = line.split("=")
streams[-1][key] = value
except ValueError:
pass
return list(filter(lambda s: s["codec_type"] == "audio", streams))
def print_skips(stream, sample_rate):
clips = []
clip_index = 0
loud_start = -1
# Get 10ms long audio fragments (* 2 because we get 2 bytes)
fragment_length = int(sample_rate * 0.01 * 2)
chunk_data = orig_audio.stdout.read(fragment_length)
while chunk_data:
# With *signed* 16 bit audio, the maximal absolute value is 2^15 = 32768.
volume = audioop.max(chunk_data, 2) / 32768
if loud_start == -1 and volume >= audio_treshold:
loud_start = clip_index
elif loud_start != -1 and volume < audio_treshold:
# Remove sounds that are too short to be important
if clip_index - loud_start > min_clip_length * 100:
clips.append((loud_start, clip_index))
loud_start = -1
chunk_data = orig_audio.stdout.read(fragment_length)
clip_index += 1
# Turn clips into skips
skips = []
last_skip = 0.0
index_to_time = lambda index: index / 100
for clip in clips:
clip_start = index_to_time(clip[0])
clip_end = index_to_time(clip[1])
if clip_start - last_skip < min_skip_length:
last_skip = clip_end + clip_margin
else:
skips.append((last_skip + clip_margin, clip_start - clip_margin))
last_skip = clip_end + clip_margin
skips = ["{" + f"{v[0]},{v[1]}" + "}" for v in skips]
print("return {" + ",".join(skips) + "}")
for filename in sys.argv[1:]:
for stream in get_audio_streams(filename):
index = int(stream["index"])
sample_rate = int(stream["sample_rate"])
orig_audio = subprocess.Popen(
[
"ffmpeg",
"-i",
filename,
# Output only one channel
"-ac",
"1",
# Output raw 16bit samples for fast processing
"-f",
"s16le",
# Open specific audio stream
"-map",
f"0:{index}",
# Only use one core to avoid making mpv lag
"-threads",
"1",
# Pipe to orig_audio
"pipe:1",
],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
)
print_skips(orig_audio.stdout, sample_rate)
# We're only using the first audio stream
break
``` |
{
"source": "0xcro3dile/SubdominSmasher",
"score": 3
} |
#### File: 0xcro3dile/SubdominSmasher/smash.py
```python
import socket
import concurrent.futures
TIMEOUT = 2.0 # You can change the duratoin
def isHTTPsAlive(childId, host):
try:
isHTTP = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
isHTTP.settimeout(TIMEOUT)
isHTTPS = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
isHTTPS.settimeout(TIMEOUT)
http = isHTTP.connect_ex((host, 80)) == 0
https = isHTTPS.connect_ex((host, 443)) == 0
isHTTP.close()
isHTTPS.close()
except:
(http, https) = (False, False)
if http or https:
print (host)
else:
pass
if __name__ == '__main__':
hostFile = str(input('List of Hosts [ex: hosts.txt] : '))
threads = int(input('Threads [ex: 30] : '))
Hosts = open(hostFile, 'r').read().splitlines()
with concurrent.futures.ThreadPoolExecutor(max_workers=threads) as \
pool_executor:
for host in Hosts:
pool_executor.submit(isHTTPsAlive, 0, host)
``` |
{
"source": "0xcrypto/takeover",
"score": 2
} |
#### File: 0xcrypto/takeover/setup.py
```python
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "takeover.py",
version = "0.0.10",
author = "<NAME>",
author_email = "<EMAIL>",
description = ("This small script tries to detect subdomain takeovers from a list of domains. Fingerprints are taken from https://github.com/EdOverflow/can-i-take-over-xyz."),
license = "WTFPL",
keywords = "subdomain takeover",
url = "http://github.com/0xcrypto/takeover",
packages=['takeover'],
long_description=read('README.rst'),
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Topic :: Security",
"License :: Public Domain",
],
install_requires=[
'requests', 'dnspython', 'discord.py'
],
entry_points = {
'console_scripts': ['takeover=takeover.takeover:main'],
}
)
``` |
{
"source": "0xd0ug/clayPigeons",
"score": 3
} |
#### File: 0xd0ug/clayPigeons/cpType.py
```python
from socket import socket, AF_INET, SOCK_STREAM, SOCK_DGRAM, SOL_SOCKET, SO_REUSEADDR
class ClayPigeon:
@staticmethod
def same(x, y):
a = x.decode('latin-1').encode()
b = y.decode('latin-1').encode()
return a == b
def __str__(self):
return self.probe.protocol + "/" + str(self.port) + ':' + ("softmatch " if self.match.softmatch else "match ") + self.probe.probename
def __init__(self, probe, match, port): # Starts and runs listener for clay pigeon
self.match = match
self.port = port
self.probe = probe
self.probeResponse = self.match.example()
firstOpen = True
portString = str(self)
while True:
if probe.protocol == 'TCP':
self.s = socket(AF_INET, SOCK_STREAM)
else:
self.s = socket(AF_INET, SOCK_DGRAM)
self.s.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
# Try to get the port open
try:
self.s.bind(('127.0.0.1', self.port)) # Binds only to localhost.
except PermissionError:
# Probably trying to open port <=1024 without root privileges
self.s.close()
print(portString + ": Can't open port (root may be required)?")
break
except OSError:
# Probably opened a duplicate port (either another pigeon or a real service)
self.s.close()
print(portString + ": Port may already be in use.")
break
if firstOpen:
# Print port info if this is the first time through (this loop repeats for each connection)
print(portString)
firstOpen = False
if probe.protocol == 'TCP':
# TCP port means you need to listen, UDP just takes data.
self.s.listen(5)
try:
# Try to receive data from TCP or UDP
if probe.protocol == 'TCP':
connection, address = self.s.accept()
if probe.probename != 'NULL':
data = connection.recv(1536)
print(portString + ": Received", data)
else:
data, address = self.s.recvfrom(1536)
print(portString + ": Received", data, "from", address)
except ConnectionResetError:
connection.close()
try:
self.s.shutdown(1)
except OSError:
pass
self.s.close()
continue
# If this is a null probe or if the input matches the signature, send the response
if probe.probename == 'NULL' or self.same(data, self.probe.probestring):
try:
if probe.protocol == 'TCP':
connection.send(self.probeResponse)
print("*" + portString + ": Response", self.probeResponse)
else:
self.s.sendto(self.probeResponse, address)
print("*" + portString + ": Response", self.probeResponse)
except OSError:
pass
# Clean up by getting anything else from the port.
'''while True:
try:
if probe.protocol == 'TCP':
getInput = connection.recv(1024)
else:
getInput, address = self.s.recvfrom(1536)
except OSError:
break
if not getInput:
break'''
if probe.protocol == 'TCP':
connection.close()
try:
self.s.shutdown(1)
except OSError:
pass
self.s.close()
```
#### File: 0xd0ug/clayPigeons/matchType.py
```python
import exrex
class Match:
def __init__(self, service, pattern, versioninfo, softmatch):
self.service = service
self.pattern = pattern
self.versioninfo = versioninfo
self.softmatch = softmatch
def __str__(self):
if self.softmatch:
x = 'softmatch '
else:
x = 'match '
x += self.service + ' ' + self.versioninfo + ' ' + self.pattern
return x.rstrip('\n')
def example(self):
# Generate a candidate that matches the expected signature
sample = exrex.getone(self.pattern,5)
return bytes(sample, 'latin-1')
```
#### File: 0xd0ug/clayPigeons/probeType.py
```python
from copy import deepcopy
import random
class Probe:
def __init__(self, protocol, probename, probestring, matches=[], ports=[], sslports=[]):
self.protocol = protocol
self.probename = probename
self.probestring = probestring
self.matches = deepcopy(matches)
self.ports = deepcopy(ports)
self.sslports = deepcopy(sslports)
def __str__(self):
x = 'Probe ' + self.protocol + ' ' + self.probename + ' ' + str(self.probestring)
return x
def getRandomMatch(self):
return random.choice(self.matches)
def getRandomPort(self):
# Randomly choose from an expected port
if self.ports != []:
port = random.choice(self.ports)
# And if no ports are specified, just pick one in a high range.
else:
port = random.randint(10000, 11000)
# usedPorts.append(port)
return port
``` |
{
"source": "0xd2e/python_playground",
"score": 3
} |
#### File: python_playground/Daftcode Python Level UP 2018 solutions/solutions.py
```python
import numpy as np
from utils import download_file
from zadanie1.task1 import count_characters
from zadanie2.task2 import primesfrom2to, approximate_prime
from zadanie34.task34_prep import process_text, load_data
from zadanie34.task34_utils import show_short_info
from zadanie34.task34 import simple_bottom_up
def solve_task1():
'''
Solution to a task:
https://github.com/daftcode/python_levelup_2018/tree/master/zadania_rekrutacyjne/Zadanie_1
This function does not return any value, just prints the answer to the screen.
'''
file_url = 'https://github.com/daftcode/python_levelup_2018/raw/master/zadania_rekrutacyjne/Zadanie_1/zadanie_1_words.zip'
file_path = download_file(file_url, True)
char_freq = count_characters(file_path)
if char_freq:
items = zip(char_freq.keys(), char_freq.values())
ans = ['{}{:d}'.format(char, freq) for char, freq in items]
ans = sorted(ans)
print(''.join(ans), end='\n\n')
else:
print('Cannot find the answer', end='\n\n')
def solve_task2():
'''
Solution to a task:
https://github.com/daftcode/python_levelup_2018/tree/master/zadania_rekrutacyjne/Zadanie_2
This function does not return any value, just prints the answer to the screen.
'''
# Numba’s JIT compiler warm up
primesfrom2to(20)
approximate_prime(20)
i = 1234567
n = approximate_prime(i)
primes = primesfrom2to(n)
print('{:,d}-th prime number is {:,d}'.format(i, np.sum(primes[:i])), end='\n\n')
def solve_task3():
'''
Solution to a task:
https://github.com/daftcode/python_levelup_2018/tree/master/zadania_rekrutacyjne/Zadanie_3
This function does not return any value, just prints the answer to the screen.
'''
# Numba’s JIT compiler warm up
simple_bottom_up(3, np.fromiter('123456789', dtype=np.uint8))
url = 'https://raw.githubusercontent.com/daftcode/python_levelup_2018/master/zadania_rekrutacyjne/Zadanie_3/zadanie_3_triangle_small.txt'
filepath = download_file(url, False)
filepath = process_text(filepath)
root_node, nlvls, flat_triangle = load_data(filepath)[:3]
show_short_info(nlvls)
output = simple_bottom_up(nlvls, flat_triangle)
print('The highest sum of nodes:', output + root_node, end='\n\n')
def solve_task4():
'''
Solution to a task:
https://github.com/daftcode/python_levelup_2018/tree/master/zadania_rekrutacyjne/Zadanie_4
This function does not return any value, just prints the answer to the screen.
'''
# Numba’s JIT compiler warm up
simple_bottom_up(3, np.fromiter('123456789', dtype=np.uint8))
url = 'https://raw.githubusercontent.com/daftcode/python_levelup_2018/master/zadania_rekrutacyjne/Zadanie_4/zadanie_4_triangle_big.txt'
filepath = download_file(url, False)
filepath = process_text(filepath)
root_node, nlvls, flat_triangle = load_data(filepath)[:3]
show_short_info(nlvls)
output = simple_bottom_up(nlvls, flat_triangle)
print('The highest sum of nodes:', output + root_node, end='\n\n')
if __name__ == '__main__':
solvers = (solve_task1, solve_task2, solve_task3, solve_task4)
for num, sol in enumerate(solvers):
print('___ Task {:d} ___'.format(num + 1))
sol()
```
#### File: Daftcode Python Level UP 2018 solutions/zadanie1/task1.py
```python
from collections import Counter
from os import path
from string import ascii_lowercase
from zipfile import ZipFile, BadZipFile
def count_characters(filepath):
'''
Inputs:
filepath -- string, path to a zip file with (ascii) text files
Counts total occurences of ascii characters in compressed text files.
Returns a dictionary with ascii characters stored as keys
and corresponding counts stored as values.
'''
char_freq = Counter()
encoding = 'ascii'
try:
assert filepath.endswith('.zip')
filename = path.split(filepath)[1]
with ZipFile(filepath, 'r') as archive:
files = archive.namelist()
print(filename, 'contains', len(files), 'compressed files', end='\n\n')
for f in files:
msg = archive.read(f).decode(encoding).lower()
char_freq.update(msg)
except BadZipFile:
print('Invalid zip file:', filepath)
except (IOError, OSError) as err:
if not path.exists(filepath):
print('File does not exist:', filepath)
else:
print('Cannot open:', filepath)
print(err.strerror if err.strerror else err)
except AssertionError:
print('Expected a path to a .zip file, got:', filepath)
items = zip(char_freq.keys(), char_freq.values())
return {char: freq for char, freq in items if char in ascii_lowercase}
```
#### File: python_playground/Pythonchallenge solutions/pythonchallenge07.py
```python
from os import path
from re import findall
from PIL import Image
from utils import download_file
def solve07():
filepath = download_file(url='http://www.pythonchallenge.com/pc/def/oxygen.png', binf=True)
filename = path.basename(filepath)
try:
if not path.exists(filepath):
raise IOError('File does not exist')
with Image.open(filepath, 'r') as img:
width, height = img.size
template = (
'{:<8}: {}'.format('Filename', filename),
'{:<8}: {}'.format('Format', img.format),
'{:<8}: {}'.format('Mode', img.mode),
'{:<8}: {:d} pixels'.format('Width', width),
'{:<8}: {:d} pixels'.format('Height', height),
'{:<8}: {:,d} pixels'.format('Size', width * height),
'{:<8}: {}'.format('Metadata', img.info)
)
# This values were checked manually in an image editor
left, top, right, bottom = 0, 43, 608, 52
length = 7
width = right - left
height = bottom - top
# Keep only the gray rectangles
pixels = img.crop((left, top, right, bottom)).getdata()
# Keep only one color channel
nums = [px[0] for px in pixels]
# Keep only one number for each rectangle
nums = nums[1:width:length]
except (IOError, OSError) as err:
print('Cannot open:', filepath if filepath else '[not downloaded]')
print(err.strerror if err.strerror else err)
else:
print('\n'.join(template), end='\n\n')
del template, left, top, right, bottom, length, width, height, pixels, img
text_parts = [chr(n) for n in nums]
ans = ''.join(text_parts)
print('Secret message:', ans)
text_parts = findall(r'\d+', ans)
text_parts = [chr(int(i)) for i in text_parts]
ans = ''.join(text_parts)
print('Magic word:', ans)
if __name__ == '__main__':
solve07()
```
#### File: python_playground/Pythonchallenge solutions/pythonchallenge09.py
```python
from numpy import array, int16
import matplotlib.pyplot as plt
def solve09():
first = (
146, 399, 163, 403, 170, 393, 169, 391, 166, 386, 170, 381, 170, 371, 170,
355, 169, 346, 167, 335, 170, 329, 170, 320, 170, 310, 171, 301, 173, 290,
178, 289, 182, 287, 188, 286, 190, 286, 192, 291, 194, 296, 195, 305, 194,
307, 191, 312, 190, 316, 190, 321, 192, 331, 193, 338, 196, 341, 197, 346,
199, 352, 198, 360, 197, 366, 197, 373, 196, 380, 197, 383, 196, 387, 192,
389, 191, 392, 190, 396, 189, 400, 194, 401, 201, 402, 208, 403, 213, 402,
216, 401, 219, 397, 219, 393, 216, 390, 215, 385, 215, 379, 213, 373, 213,
365, 212, 360, 210, 353, 210, 347, 212, 338, 213, 329, 214, 319, 215, 311,
215, 306, 216, 296, 218, 290, 221, 283, 225, 282, 233, 284, 238, 287, 243,
290, 250, 291, 255, 294, 261, 293, 265, 291, 271, 291, 273, 289, 278, 287,
279, 285, 281, 280, 284, 278, 284, 276, 287, 277, 289, 283, 291, 286, 294,
291, 296, 295, 299, 300, 301, 304, 304, 320, 305, 327, 306, 332, 307, 341,
306, 349, 303, 354, 301, 364, 301, 371, 297, 375, 292, 384, 291, 386, 302,
393, 324, 391, 333, 387, 328, 375, 329, 367, 329, 353, 330, 341, 331, 328,
336, 319, 338, 310, 341, 304, 341, 285, 341, 278, 343, 269, 344, 262, 346,
259, 346, 251, 349, 259, 349, 264, 349, 273, 349, 280, 349, 288, 349, 295,
349, 298, 354, 293, 356, 286, 354, 279, 352, 268, 352, 257, 351, 249, 350,
234, 351, 211, 352, 197, 354, 185, 353, 171, 351, 154, 348, 147, 342, 137,
339, 132, 330, 122, 327, 120, 314, 116, 304, 117, 293, 118, 284, 118, 281,
122, 275, 128, 265, 129, 257, 131, 244, 133, 239, 134, 228, 136, 221, 137,
214, 138, 209, 135, 201, 132, 192, 130, 184, 131, 175, 129, 170, 131, 159,
134, 157, 134, 160, 130, 170, 125, 176, 114, 176, 102, 173, 103, 172, 108,
171, 111, 163, 115, 156, 116, 149, 117, 142, 116, 136, 115, 129, 115, 124,
115, 120, 115, 115, 117, 113, 120, 109, 122, 102, 122, 100, 121, 95, 121,
89, 115, 87, 110, 82, 109, 84, 118, 89, 123, 93, 129, 100, 130, 108, 132,
110, 133, 110, 136, 107, 138, 105, 140, 95, 138, 86, 141, 79, 149, 77, 155,
81, 162, 90, 165, 97, 167, 99, 171, 109, 171, 107, 161, 111, 156, 113, 170,
115, 185, 118, 208, 117, 223, 121, 239, 128, 251, 133, 259, 136, 266, 139,
276, 143, 290, 148, 310, 151, 332, 155, 348, 156, 353, 153, 366, 149, 379,
147, 394, 146, 399
)
second = (
156, 141, 165, 135, 169, 131, 176, 130, 187, 134, 191, 140, 191, 146, 186,
150, 179, 155, 175, 157, 168, 157, 163, 157, 159, 157, 158, 164, 159, 175,
159, 181, 157, 191, 154, 197, 153, 205, 153, 210, 152, 212, 147, 215, 146,
218, 143, 220, 132, 220, 125, 217, 119, 209, 116, 196, 115, 185, 114, 172,
114, 167, 112, 161, 109, 165, 107, 170, 99, 171, 97, 167, 89, 164, 81, 162,
77, 155, 81, 148, 87, 140, 96, 138, 105, 141, 110, 136, 111, 126, 113, 129,
118, 117, 128, 114, 137, 115, 146, 114, 155, 115, 158, 121, 157, 128, 156,
134, 157, 136, 156, 136
)
third = first + second
x = array(third[0::2], dtype=int16)
y = array(third[1::2], dtype=int16) * -1
del first, second, third
plot_options = {
'color': 'black',
'alpha': 1,
'linestyle': '',
'marker': '.',
'markersize': 10,
'markerfacecolor': 'black',
'markeredgewidth': 0,
'markeredgecolor': 'black',
'antialiased': False
}
plt.ioff()
plt.figure(num='bull', facecolor='white', frameon=False)
plt.axis('equal')
plt.axis('off')
plt.plot(x, y, **plot_options)
plt.show()
print('Magic word: bull')
if __name__ == '__main__':
solve09()
```
#### File: python_playground/Pythonchallenge solutions/pythonchallenge11.py
```python
# If needed, use username and password from challenge 8
from os import path
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image, ImageOps
from utils import download_file
def solve11():
filepath = download_file(
url='http://www.pythonchallenge.com/pc/return/cave.jpg',
binf=True,
username='huge',
password='<PASSWORD>'
)
filename = path.split(filepath)[1]
try:
if not path.exists(filepath):
raise IOError('File does not exist')
with Image.open(filepath, 'r') as img:
width, height = img.size
template = (
'{:<8}: {}'.format('Filename', filename),
'{:<8}: {}'.format('Format', img.format),
'{:<8}: {}'.format('Mode', img.mode),
'{:<8}: {:,d} pixels'.format('Width', width),
'{:<8}: {:,d} pixels'.format('Height', height),
'{:<8}: {:,d} pixels'.format('Size', width * height)
)
pixels = np.asarray(img, dtype=np.uint8, order='F')
except (IOError, OSError) as err:
print('Cannot open:', filepath if filepath else '[not downloaded]')
print(err.strerror if err.strerror else err)
else:
print('\n'.join(template), end='\n\n')
del template, width, height
plt.ioff()
plt.figure(num=filename, frameon=False, clear=True)
plt.imshow(pixels, interpolation=None, filternorm=1)
plt.show()
plt.ioff()
plt.figure(num=filename, frameon=False, clear=True)
with Image.fromarray(pixels[0::2, 0::2]) as img:
img.paste(ImageOps.invert(img))
img.paste(ImageOps.autocontrast(img))
part = np.asarray(img, dtype=np.uint8, order='F')
plt.subplot(221)
plt.axis('off')
plt.imshow(part)
plt.subplot(222)
plt.axis('off')
plt.imshow(pixels[1::2, 1::2])
plt.subplot(223)
plt.axis('off')
plt.imshow(pixels[0::2, 1::2])
plt.subplot(224)
plt.axis('off')
plt.imshow(pixels[1::2, 0::2])
plt.show()
print('Magic word: evil')
if __name__ == '__main__':
solve11()
```
#### File: python_playground/Python scripts/lottery.py
```python
import numpy as np
NUMBERS = np.arange(1, 51, dtype=np.uint8)
def play_lotto():
"""
Returns a sorted 1D numpy array with 6 unique integer numbers between 1 and 49.
"""
return np.sort(np.random.choice(NUMBERS[:49], size=6, replace=False))
def play_eurojackpot():
"""
Returns a tuple with two sorted 1D numpy arrays:
first with 5 unique integer numbers between 1 and 50,
second with 2 unique integer numbers between 1 and 10.
"""
return (np.sort(np.random.choice(NUMBERS, size=5, replace=False)),
np.sort(np.random.choice(NUMBERS[:10], size=2, replace=False)))
if __name__ == '__main__':
np.random.seed()
np.random.shuffle(NUMBERS)
[print(('{:4d}' * 6).format(*play_lotto())) for _ in range(3)]
``` |
{
"source": "0xd34db33f/AWSBucketDump",
"score": 2
} |
#### File: 0xd34db33f/AWSBucketDump/common.py
```python
import sys
import logging
LOG_FILENAME = 'awsbucketdump.log'
BOLD = ""
END = ""
if sys.platform != 'win32' and sys.stdout.isatty():
BOLD = "\033[1m"
END = "\033[0m"
def pretty_print(string):
"""For *nix systems, augment TTY output. For others, strip such syntax."""
string = string.replace('$BOLD$', BOLD)
string = string.replace('$END$', END)
print(string)
def log(msg, level=logging.INFO):
"""Add a string to the log file."""
logging.basicConfig(filename=LOG_FILENAME,
format='%(asctime)s:%(levelname)s:%(message)s',
level=logging.INFO)
if level == logging.DEBUG:
logging.debug(msg)
elif level == logging.INFO:
logging.info(msg)
elif level == logging.WARNING:
logging.warning(msg)
elif level == logging.ERROR:
logging.error(msg)
elif level == logging.CRITICAL:
logging.critical(msg)
else:
raise ValueError(str(level))
``` |
{
"source": "0xd3ba/seam-carving",
"score": 4
} |
#### File: seam-carving/utils/energy.py
```python
import numpy as np
from scipy.ndimage import sobel
from scipy.ndimage import laplace
def _apply_sobel(img_matrix):
"""
Input: img_matrix(height, width) with type float32
Convolves the image with sobel mask and returns the magnitude
"""
dx = sobel(img_matrix, 1)
dy = sobel(img_matrix, 0)
grad_mag = np.hypot(dx, dy) # Calculates sqrt(dx^2 + dy^2)
grad_mag *= 255 / grad_mag.max() # Normalize the gradient magnitudes
return grad_mag
def _apply_laplacian(img_matrix):
"""
Input: img_matrix(height, width) with type float32
Convolves the image with Laplacian and returns the result
"""
dx_dy = laplace(img_matrix)
dx_dy *= 255 / dx_dy.max() # Normalize the result
return dx_dy
################################################################
# The energy function to use for calculating the "energies"
# of the given image. Change it accordingly
ENERGY_FUNCTION = _apply_sobel
################################################################
def find_energies(img_matrix):
"""
img_matrix: 2D numpy array of shape (height, width), i.e. the image is grayscale
Calculates the "energies", i.e. the digital gradients of the image (basically the edges)
and returns the resulting matrix
"""
energy_mat = ENERGY_FUNCTION(img_matrix)
return energy_mat
```
#### File: seam-carving/utils/grayscale.py
```python
def to_grayscale(image_np):
"""
image_np: 2D numpy array of shape (height, width, channels)
Converts the image to grayscale image and returns it
"""
assert len(image_np.shape) >= 2, f"Image must be 2D, provided {len(image_np.shape)}D instead"
# If the number of dimensions are 2, then the image is already in grayscale
if len(image_np.shape) == 2:
return image_np
# Convert it to grayscale using weighted sum of the channel intensities
return (image_np[:, :, 0]*0.299 +
image_np[:, :, 1]*0.587 +
image_np[:, :, 2]*0.114
)
``` |
{
"source": "0xdabbad00/aws-consolidated-admin",
"score": 2
} |
#### File: lambda/describe_stack/describe_stack.py
```python
import base64
import boto3
import botocore
import os
kms = boto3.client('kms')
KMS_KEY_ID = os.environ['KMS_KEY_ID']
def decrypt(ciphertext):
return kms.decrypt(
CiphertextBlob=base64.b64decode(ciphertext))['Plaintext']
STACK_KEYS = {'StackName', 'StackId', 'StackStatus', 'Parameters',
'Outputs', 'Tags', 'Capabilities', 'NotificationARNs',
'StackStatusReason', 'RoleARN', 'ChangeSetId'}
def lambda_handler(event, context):
sess = boto3.session.Session(
aws_access_key_id=event['Credentials']['AccessKeyId'],
aws_secret_access_key=decrypt(event['Credentials']['SecretAccessKeyCiphertext']),
aws_session_token=event['Credentials']['SessionToken'],
region_name=event['Region'])
cfn = sess.client('cloudformation')
try:
stack_query = event['Stack']['StackId']
except KeyError:
stack_query = event['Stack']['StackName']
try:
resp = cfn.describe_stacks(StackName=stack_query)
except botocore.exceptions.ClientError as e:
if e.message.endswith('does not exist'):
return {
'StackName': event['Stack']['StackName'],
'StackStatus': 'DOES_NOT_EXIST'
}
raise e
resp = resp['Stacks'][0]
return { k: v for k, v in resp.iteritems() if k in STACK_KEYS }
```
#### File: lambda/event_selector_resource/event_selector_resource.py
```python
import boto3
import copy
import cfnresponse
# Lambda custom resource for managing CloudTrail Event Selectors.
# Allows you to use CloudFormation to configure the type of management
# events logged to a trail (ReadOnly, WriteOnly, None, or All) in
# addition to S3 access logging to CloudTrail.
#
# This resource DOES NOT change the read/write selector for management
# events if ManagementEventReadWriteType is undefined. Additionally,
# it will not modify the selection settings for any S3 bucket not
# defined in the resource's properties.
#
# This resource DOES have a race condition; an unintended overwrite of
# the trail's event selector configuration can occur if it changes
# after the existing settings are fetched. For this reason, do not
# invoke this resource twice from the same template.
#
# Usage (in the Resources block of your template):
#
# CloudTrailEventSelectors:
# Type: Custom::CloudTrailEventSelector
# Properties:
# ServiceToken: !GetAtt CloudTrailEventSelectorResourceFn.Arn
# TrailName: !Ref CloudTrail
# ManagementEventReadWriteType: All
# DataEventSelectors:
# All:
# 'AWS::S3::Object':
# - !Sub 'arn:aws:s3:::${CloudTrailBucket}/'
# - !Sub 'arn:aws:s3:::${ConfigBucket}/'
# WriteOnly:
# 'AWS::S3::Object':
# - !Sub 'arn:aws:s3:::${OtherS3Bucket}/'
cloudtrail = boto3.client('cloudtrail')
# Convert the API's representation of event selectors into something
# more manageable by converting lists with unique keys into dicts
def parse_event_selectors(source_event_selectors):
event_selectors = {}
for selector in copy.deepcopy(source_event_selectors):
event_selectors[selector.pop('ReadWriteType')] = selector
for data_resource in selector.pop('DataResources'):
type_ = data_resource['Type']
values = data_resource['Values']
selector.setdefault('DataResources', {})[type_] = set(values)
return event_selectors
# Convert from our representation of event selectors back into the
# API's representation
def format_event_selectors(event_selectors):
resp = []
for rwtype, es in event_selectors.iteritems():
es['ReadWriteType'] = rwtype
es['DataResources'] = [
{ 'Type': k, 'Values': list(v) } for k, v in
es.pop('DataResources', {}).iteritems()]
if len(es['DataResources']) > 0 or es['IncludeManagementEvents']:
resp.append(es)
return resp
def mutate_selectors(existing_selectors, defined_selectors, delete=False):
for rwtype, defined_selector in defined_selectors.iteritems():
selector = existing_selectors.setdefault(
rwtype, {'IncludeManagementEvents': False})
for type_, values in defined_selector.iteritems():
resource_values = (selector
.setdefault('DataResources', {})
.setdefault(type_, set()))
if delete:
resource_values.difference_update(set(values))
else:
resource_values.update(set(values))
def mutate_management_event_selector(existing_selectors, mgmt_rw_type):
# Reset all IncludeManagementEvents flags
for _, selector in existing_selectors.iteritems():
selector['IncludeManagementEvents'] = False
existing_selectors.setdefault(
mgmt_rw_type, {})['IncludeManagementEvents'] = True
def modify_event_selectors(request_type, props, old_props=None):
resp = cloudtrail.get_event_selectors(TrailName=props['TrailName'])
selectors = parse_event_selectors(resp['EventSelectors'])
if request_type == 'Delete':
mutate_selectors(selectors, props['DataEventSelectors'], True)
else:
if request_type == 'Update':
mutate_selectors(selectors, old_props['DataEventSelectors'], True)
mutate_selectors(selectors, props['DataEventSelectors'])
try:
mutate_management_event_selector(
selectors, props['ManagementEventReadWriteType'])
except KeyError:
pass
new_selectors = format_event_selectors(selectors)
print new_selectors
cloudtrail.put_event_selectors(
TrailName=props['TrailName'], EventSelectors=new_selectors)
def handler(event, context):
print event
try:
modify_event_selectors(
event['RequestType'],
event['ResourceProperties'],
event.get('OldResourceProperties'))
except Exception as e:
print e.message
response_code = cfnresponse.FAILED
else:
response_code = cfnresponse.SUCCESS
cfnresponse.send(event, context, response_code, {},
event['ResourceProperties']['TrailName'])
```
#### File: lambda/start_workflows/start_workflows.py
```python
import boto3
import json
sfn = boto3.client('stepfunctions')
def lambda_handler(event, context):
output = []
for wf in event['Workflows']:
resp = sfn.start_execution(
stateMachineArn=event['StateMachineArn'],
name=wf['ExecutionName'],
input=json.dumps(wf))
output.append({
'ExecutionArn': resp['executionArn'],
'StartedAt': resp['startDate'].isoformat()
})
return output
``` |
{
"source": "0xdabbad00/aws-remediations",
"score": 2
} |
#### File: src/remediations/aws_ddb_encrypt_table.py
```python
from typing import Any, Dict
from boto3 import Session
from app import Remediation
from app.remediation_base import RemediationBase
@Remediation
class AwsDdbEncryptTable(RemediationBase):
"""Remediation that creates a KMS key and uses it to encrypt DDB table"""
@classmethod
def _id(cls) -> str:
return 'DDB.EncryptTable'
@classmethod
def _parameters(cls) -> Dict[str, str]:
return {}
@classmethod
def _fix(cls, session: Session, resource: Dict[str, Any], parameters: Dict[str, str]) -> None:
session.client('dynamodb').update_table(
TableName=resource['Name'], SSESpecification={
'Enabled': True,
'SSEType': 'KMS'
}
)
```
#### File: src/remediations/aws_guardduty_create_detector.py
```python
from typing import Any, Dict
from boto3 import Session
from app import Remediation
from app.remediation_base import RemediationBase
@Remediation
class AwsGuardDutyCreateDetector(RemediationBase):
"""Remediation that creates a GuardDuty detector if one doesn't exist"""
@classmethod
def _id(cls) -> str:
return 'GuardDuty.CreateDetector'
@classmethod
def _parameters(cls) -> Dict[str, str]:
return {'FindingPublishingFrequency': 'FIFTEEN_MINUTES'}
@classmethod
def _fix(cls, session: Session, resource: Dict[str, Any], parameters: Dict[str, str]) -> None:
session.client("guardduty").create_detector(
Enable=True, FindingPublishingFrequency=parameters['FindingPublishingFrequency']
)
```
#### File: src/remediations/aws_rds_enable_auto_minor_version_upgrade.py
```python
from typing import Any, Dict
from boto3 import Session
from app import Remediation
from app.remediation_base import RemediationBase
@Remediation
class AwsRdsEnableAutoMinorVersionUpgrade(RemediationBase):
"""Remediation that enables Auto Minor Version upgrade for RDS instances"""
@classmethod
def _id(cls) -> str:
return 'RDS.EnableAutoMinorVersionUpgrade'
@classmethod
def _parameters(cls) -> Dict[str, str]:
return {'ApplyImmediately': 'true'}
@classmethod
def _fix(cls, session: Session, resource: Dict[str, Any], parameters: Dict[str, str]) -> None:
session.client('rds').modify_db_instance(
DBInstanceIdentifier=resource['Id'],
AutoMinorVersionUpgrade=True,
ApplyImmediately=parameters['ApplyImmediately'].lower() == 'true'
)
```
#### File: src/remediations/aws_s3_block_bucket_public_access.py
```python
from typing import Any, Dict
from boto3 import Session
from app import Remediation
from app.remediation_base import RemediationBase
@Remediation
class AwsS3BlockBucketPublicAccess(RemediationBase):
"""Remediation that puts an S3 bucket block public access configuration"""
@classmethod
def _id(cls) -> str:
return 'S3.BlockBucketPublicAccess'
@classmethod
def _parameters(cls) -> Dict[str, str]:
return {
'BlockPublicAcls': 'true',
'IgnorePublicAcls': 'true',
'BlockPublicPolicy': 'true',
'RestrictPublicBuckets': 'true'
}
@classmethod
def _fix(cls, session: Session, resource: Dict[str, Any], parameters: Dict[str, str]) -> None:
session.client('s3').put_public_access_block(
Bucket=resource['Name'],
PublicAccessBlockConfiguration={
'BlockPublicAcls': parameters['BlockPublicAcls'].lower() == 'true',
'IgnorePublicAcls': parameters['IgnorePublicAcls'].lower() == 'true',
'BlockPublicPolicy': parameters['BlockPublicPolicy'].lower() == 'true',
'RestrictPublicBuckets': parameters['RestrictPublicBuckets'].lower() == 'true',
},
)
```
#### File: src/remediations/aws_s3_enable_bucket_versioning.py
```python
from typing import Any, Dict
from boto3 import Session
from app import Remediation
from app.remediation_base import RemediationBase
@Remediation
class AwsS3EnableBucketVersioning(RemediationBase):
"""Remediation that enables versioning for an S3 bucket"""
@classmethod
def _id(cls) -> str:
return 'S3.EnableBucketVersioning'
@classmethod
def _parameters(cls) -> Dict[str, str]:
return {}
@classmethod
def _fix(cls, session: Session, resource: Dict[str, Any], parameters: Dict[str, str]) -> None:
session.client('s3').put_bucket_versioning(
Bucket=resource['Name'], VersioningConfiguration={'Status': 'Enabled'}
)
```
#### File: tests/remediations/test_aws_ddb_encrypt_table.py
```python
from unittest import mock, TestCase
from boto3 import Session
from src.remediations.aws_ddb_encrypt_table import AwsDdbEncryptTable
class TestAwsDdbEncryptTable(TestCase):
@mock.patch.object(Session, 'client')
def test_fix(self, mock_session):
mock_client = mock.Mock()
mock_session.return_value = mock_client
resource = {
'Name': 'TestName'
}
AwsDdbEncryptTable()._fix(Session, resource, {})
mock_session.assert_called_once_with('dynamodb')
mock_client.update_table.assert_called_once_with(
TableName='TestName',
SSESpecification={
'Enabled': True,
'SSEType': 'KMS'
}
)
```
#### File: tests/remediations/test_aws_ec2_terminate_instance.py
```python
from unittest import mock, TestCase
from boto3 import Session
from src.remediations.aws_ec2_terminate_instance import AwsEc2TerminateInstance
class TestAwsEc2TerminateInstance(TestCase):
@mock.patch.object(Session, 'client')
def test_fix(self, mock_session):
mock_client = mock.Mock()
mock_session.return_value = mock_client
resource = {
'Id': 'TestInstanceId'
}
AwsEc2TerminateInstance()._fix(Session, resource, {})
mock_session.assert_called_once_with('ec2')
mock_client.terminate_instances.assert_called_once_with(InstanceIds=['TestInstanceId'])
```
#### File: tests/remediations/test_aws_guardduty_create_detector.py
```python
from unittest import mock, TestCase
from boto3 import Session
from src.remediations.aws_guardduty_create_detector import AwsGuardDutyCreateDetector
class TestAwsGuardDutyCreateDetector(TestCase):
@mock.patch.object(Session, 'client')
def test_fix(self, mock_session):
mock_client = mock.Mock()
mock_session.return_value = mock_client
parameters = {
'FindingPublishingFrequency': 'TestFindingPublishingFrequency'
}
mock_client.create_flow_logs.return_value = {}
AwsGuardDutyCreateDetector()._fix(Session, {}, parameters)
mock_session.assert_called_once_with('guardduty')
mock_client.create_detector.assert_called_once_with(
Enable=True,
FindingPublishingFrequency='TestFindingPublishingFrequency'
)
```
#### File: tests/remediations/test_aws_s3_enable_bucket_encryption.py
```python
from unittest import mock, TestCase
from boto3 import Session
from app.exceptions import InvalidParameterException
from remediations.aws_s3_enable_bucket_encryption import AwsS3EnableBucketEncryption
class TestAwsS3EnableBucketEncryption(TestCase):
@mock.patch.object(Session, 'client')
def test_fix_aes256(self, mock_session):
mock_client = mock.Mock()
mock_session.return_value = mock_client
resource = {
'Name': 'TestName'
}
parameters = {
'SSEAlgorithm': 'AES256',
'KMSMasterKeyID': ''
}
AwsS3EnableBucketEncryption()._fix(Session, resource, parameters)
mock_session.assert_called_once_with('s3')
mock_client.put_bucket_encryption.assert_called_with(
Bucket='TestName',
ServerSideEncryptionConfiguration={
'Rules':
[
{
'ApplyServerSideEncryptionByDefault':
{
'SSEAlgorithm': 'AES256'
},
},
],
},
)
@mock.patch.object(Session, 'client')
def test_fix_kms(self, mock_session):
mock_client = mock.Mock()
mock_session.return_value = mock_client
resource = {
'Name': 'TestName'
}
parameters = {
'SSEAlgorithm': 'aws:kms',
'KMSMasterKeyID': '313e6a3d-57c7-4544-ba59-0fecaabaf7b2'
}
AwsS3EnableBucketEncryption()._fix(Session, resource, parameters)
mock_session.assert_called_once_with('s3')
mock_client.put_bucket_encryption.assert_called_with(
Bucket='TestName',
ServerSideEncryptionConfiguration={
'Rules':
[
{
'ApplyServerSideEncryptionByDefault':
{
'SSEAlgorithm': 'aws:kms',
'KMSMasterKeyID': '313e6a3d-57c7-4544-ba59-0fecaabaf7b2'
},
},
],
},
)
@mock.patch.object(Session, 'client')
def test_fix_unknown_algorithm(self, mock_session):
mock_client = mock.Mock()
mock_session.return_value = mock_client
resource = {
'Name': 'TestName'
}
parameters = {
'SSEAlgorithm': 'unknown'
}
self.assertRaises(InvalidParameterException, AwsS3EnableBucketEncryption()._fix, Session, resource, parameters)
mock_session.assert_not_called()
``` |
{
"source": "0xdaedel/shellphish-qemu",
"score": 2
} |
#### File: 0xdaedel/shellphish-qemu/setup.py
```python
import os
import sys
import time
import shutil
import random
import subprocess
import platform
from distutils.errors import LibError
from distutils.util import get_platform
from distutils.command.build import build as _build
from setuptools import setup
from setuptools.command.develop import develop as _develop
BIN_PATH = os.path.join("shellphish_qemu", "bin")
QEMU_REPO_PATH_CGC_BASE = "shellphish-qemu-cgc-base"
QEMU_REPO_PATH_LINUX = "shellphish-qemu-linux"
QEMU_LINUX_TRACER_PATCH = os.path.join("..", "patches", "tracer-qemu.patch")
QEMU_LINUX_UPDATE_PATCH = os.path.join("..", "patches", "ucontext.patch")
QEMU_LINUX_COREDUMP_PATCH = os.path.join("..", "patches", "linux-coredump.patch")
QEMU_CGC_COREDUMP_PATCH = os.path.join("..", "patches", "cgc-coredump.patch")
QEMU_PATH_CGC_TRACER = os.path.join(BIN_PATH, "shellphish-qemu-cgc-tracer")
QEMU_PATH_CGC_NXTRACER = os.path.join(BIN_PATH, "shellphish-qemu-cgc-nxtracer")
QEMU_PATH_CGC_BASE = os.path.join(BIN_PATH, "shellphish-qemu-cgc-base")
QEMU_PATH_LINUX_I386 = os.path.join(BIN_PATH, "shellphish-qemu-linux-i386")
QEMU_PATH_LINUX_X86_64 = os.path.join(BIN_PATH, "shellphish-qemu-linux-x86_64")
QEMU_PATH_LINUX_MIPS = os.path.join(BIN_PATH, "shellphish-qemu-linux-mips")
QEMU_PATH_LINUX_MIPSEL = os.path.join(BIN_PATH, "shellphish-qemu-linux-mipsel")
QEMU_PATH_LINUX_MIPS64 = os.path.join(BIN_PATH, "shellphish-qemu-linux-mips64")
QEMU_PATH_LINUX_PPC = os.path.join(BIN_PATH, "shellphish-qemu-linux-ppc")
QEMU_PATH_LINUX_PPC64 = os.path.join(BIN_PATH, "shellphish-qemu-linux-ppc64")
QEMU_PATH_LINUX_ARM = os.path.join(BIN_PATH, "shellphish-qemu-linux-arm")
QEMU_PATH_LINUX_AARCH64 = os.path.join(BIN_PATH, "shellphish-qemu-linux-aarch64")
ALL_QEMU_BINS = [
QEMU_PATH_CGC_BASE,
QEMU_PATH_CGC_TRACER,
QEMU_PATH_CGC_NXTRACER,
QEMU_PATH_LINUX_I386,
QEMU_PATH_LINUX_X86_64,
QEMU_PATH_LINUX_MIPS,
QEMU_PATH_LINUX_MIPSEL,
QEMU_PATH_LINUX_MIPS64,
QEMU_PATH_LINUX_PPC,
QEMU_PATH_LINUX_PPC64,
QEMU_PATH_LINUX_ARM,
QEMU_PATH_LINUX_AARCH64,
]
def _clone_cgc_qemu():
# grab the CGC repo
if not os.path.exists(QEMU_REPO_PATH_CGC_BASE) \
or not os.path.exists(QEMU_REPO_PATH_CGC_BASE):
TRACER_QEMU_REPO_CGC = "https://github.com/mechaphish/qemu-cgc"
# since we're cloning from gitlab we'll need to try a couple times, since gitlab
# has a cap on the number of ssh workers
retrieved = False
for _ in range(10):
if subprocess.call(['git', 'clone', '--branch', 'base_cgc', '--depth=1', TRACER_QEMU_REPO_CGC, QEMU_REPO_PATH_CGC_BASE]) == 0:
retrieved = True
break
else:
time.sleep(random.randint(0, 10))
if not retrieved:
raise LibError("Unable to retrieve tracer qemu")
# update tracer qemu for cgc
if subprocess.call(['git', 'pull'], cwd=QEMU_REPO_PATH_CGC_BASE) != 0:
raise LibError("Unable to retrieve cgc base qemu")
def _clone_linux_qemu():
# grab the linux tarball
if not os.path.exists(QEMU_REPO_PATH_LINUX):
TRACER_QEMU_REPO_LINUX = "https://github.com/qemu/qemu.git"
if subprocess.call(['git', 'clone', '--branch', 'v2.3.0', '--depth=1', TRACER_QEMU_REPO_LINUX, QEMU_REPO_PATH_LINUX]) != 0:
raise LibError("Unable to retrieve qemu repository \"%s\"" % TRACER_QEMU_REPO_LINUX)
if subprocess.call(['git', '-C', QEMU_REPO_PATH_LINUX, 'apply', QEMU_LINUX_TRACER_PATCH]) != 0:
raise LibError("Unable to apply tracer patch to qemu")
if subprocess.call(['git', '-C', QEMU_REPO_PATH_LINUX, 'apply', QEMU_LINUX_UPDATE_PATCH]) != 0:
raise LibError("Unable to apply ucontext_t update patch to qemu")
if subprocess.call(['git', '-C', QEMU_REPO_PATH_LINUX, 'apply', QEMU_LINUX_COREDUMP_PATCH]) != 0:
raise LibError("Unable to apply coredump update patch to qemu-linux")
if subprocess.call(['git', '-C', QEMU_REPO_PATH_CGC_BASE, 'apply', QEMU_CGC_COREDUMP_PATCH]) != 0:
raise LibError("Unable to apply coredump update patch to qemu-cgc-base")
def _build_qemus():
if not os.path.exists(BIN_PATH):
try:
os.makedirs(BIN_PATH)
except OSError:
raise LibError("Unable to create bin directory")
print("Configuring CGC tracer qemu...")
if subprocess.call(['make', 'clean'], cwd=QEMU_REPO_PATH_CGC_BASE) != 0:
raise LibError("Unable to clean shellphish-qemu-cgc-tracer")
if subprocess.call(['./cgc_configure_tracer_opt'], cwd=QEMU_REPO_PATH_CGC_BASE) != 0:
raise LibError("Unable to configure shellphish-qemu-cgc-tracer")
print("Building CGC tracer qemu...")
if subprocess.call(['make', '-j4'], cwd=QEMU_REPO_PATH_CGC_BASE) != 0:
raise LibError("Unable to build shellphish-qemu-cgc")
shutil.copyfile(os.path.join(QEMU_REPO_PATH_CGC_BASE, "i386-linux-user", "qemu-i386"), QEMU_PATH_CGC_TRACER)
if subprocess.call(['make', 'clean'], cwd=QEMU_REPO_PATH_CGC_BASE) != 0:
raise LibError("Unable to clean shellphish-qemu-cgc")
print("Configuring CGC nxtracer qemu...")
if subprocess.call(['./cgc_configure_nxtracer_opt'], cwd=QEMU_REPO_PATH_CGC_BASE) != 0:
raise LibError("Unable to configure shellphish-qemu-cgc-nxtracer")
print("Building CGC nxtracer qemu...")
if subprocess.call(['make', '-j4'], cwd=QEMU_REPO_PATH_CGC_BASE) != 0:
raise LibError("Unable to build shellphish-qemu-cgc-nxtracer")
shutil.copyfile(os.path.join(QEMU_REPO_PATH_CGC_BASE, "i386-linux-user", "qemu-i386"), QEMU_PATH_CGC_NXTRACER)
if subprocess.call(['make', 'clean'], cwd=QEMU_REPO_PATH_CGC_BASE) != 0:
raise LibError("Unable to clean shellphish-qemu-cgc")
print("Configuring CGC base qemu...")
if subprocess.call(['./cgc_configure_opt'], cwd=QEMU_REPO_PATH_CGC_BASE) != 0:
raise LibError("Unable to configure shellphish-qemu-cgc-base")
print("Building CGC base qemu...")
if subprocess.call(['make', '-j4'], cwd=QEMU_REPO_PATH_CGC_BASE) != 0:
raise LibError("Unable to build shellphish-qemu-cgc")
shutil.copyfile(os.path.join(QEMU_REPO_PATH_CGC_BASE, "i386-linux-user", "qemu-i386"), QEMU_PATH_CGC_BASE)
print("Configuring Linux qemu...")
if subprocess.call(['./tracer-config'], cwd=QEMU_REPO_PATH_LINUX) != 0:
raise LibError("Unable to configure shellphish-qemu-linux")
print("Building Linux qemu...")
if subprocess.call(['make', '-j4'], cwd=QEMU_REPO_PATH_LINUX) != 0:
raise LibError("Unable to build shellphish-qemu-linux")
shutil.copyfile(os.path.join(QEMU_REPO_PATH_LINUX, "i386-linux-user", "qemu-i386"), QEMU_PATH_LINUX_I386)
shutil.copyfile(os.path.join(QEMU_REPO_PATH_LINUX, "x86_64-linux-user", "qemu-x86_64"), QEMU_PATH_LINUX_X86_64)
shutil.copyfile(os.path.join(QEMU_REPO_PATH_LINUX, "mipsel-linux-user", "qemu-mipsel"), QEMU_PATH_LINUX_MIPSEL)
shutil.copyfile(os.path.join(QEMU_REPO_PATH_LINUX, "mips-linux-user", "qemu-mips"), QEMU_PATH_LINUX_MIPS)
shutil.copyfile(os.path.join(QEMU_REPO_PATH_LINUX, "mips64-linux-user", "qemu-mips64"), QEMU_PATH_LINUX_MIPS64)
shutil.copyfile(os.path.join(QEMU_REPO_PATH_LINUX, "ppc-linux-user", "qemu-ppc"), QEMU_PATH_LINUX_PPC)
shutil.copyfile(os.path.join(QEMU_REPO_PATH_LINUX, "ppc64-linux-user", "qemu-ppc64"), QEMU_PATH_LINUX_PPC64)
shutil.copyfile(os.path.join(QEMU_REPO_PATH_LINUX, "arm-linux-user", "qemu-arm"), QEMU_PATH_LINUX_ARM)
shutil.copyfile(os.path.join(QEMU_REPO_PATH_LINUX, "aarch64-linux-user", "qemu-aarch64"), QEMU_PATH_LINUX_AARCH64)
os.chmod(QEMU_PATH_CGC_BASE, 0o755)
os.chmod(QEMU_PATH_CGC_TRACER, 0o755)
os.chmod(QEMU_PATH_CGC_NXTRACER, 0o755)
os.chmod(QEMU_PATH_LINUX_I386, 0o755)
os.chmod(QEMU_PATH_LINUX_X86_64, 0o755)
os.chmod(QEMU_PATH_LINUX_MIPSEL, 0o755)
os.chmod(QEMU_PATH_LINUX_MIPS, 0o755)
os.chmod(QEMU_PATH_LINUX_MIPS64, 0o755)
os.chmod(QEMU_PATH_LINUX_PPC, 0o755)
os.chmod(QEMU_PATH_LINUX_PPC64, 0o755)
os.chmod(QEMU_PATH_LINUX_ARM, 0o755)
os.chmod(QEMU_PATH_LINUX_AARCH64, 0o755)
try:
cgc_base_ver = subprocess.check_output([QEMU_PATH_CGC_BASE, '-version'])
cgc_tracer_ver = subprocess.check_output([QEMU_PATH_CGC_TRACER, '-version'])
cgc_nxtracer_ver = subprocess.check_output([QEMU_PATH_CGC_NXTRACER, '-version'])
assert b'AFL' not in cgc_base_ver
assert b'AFL' not in cgc_tracer_ver
assert b'AFL' not in cgc_nxtracer_ver
assert b'TRACER' not in cgc_base_ver
assert b'TRACER' in cgc_tracer_ver
assert b'TRACER' in cgc_nxtracer_ver
assert b'enforce NX' not in cgc_base_ver # Playing it safe
assert b'enforce NX' not in cgc_tracer_ver # Playing it safe
assert b'enforce NX' in cgc_nxtracer_ver # Mainly used by Antonio for CI tests
except subprocess.CalledProcessError as e:
raise LibError("Unable to check CGC qemu -version [ {} returned {}, output '{}' ]".format(e.cmd, e.returncode, e.output))
except AssertionError:
raise LibError("Wrong configuration for the CGC qemus! Make sure to clean, and check with -version")
# remove the source directory after building
#shutil.rmtree(QEMU_REPO_PATH_LINUX)
#shutil.rmtree(QEMU_REPO_PATH_CGC)
class build(_build):
def run(self):
self.execute(_clone_cgc_qemu, (), msg="Cloning CGC QEMU")
self.execute(_clone_linux_qemu, (), msg="Cloning Linux QEMU")
self.execute(_build_qemus, (), msg="Building Tracer QEMU")
_build.run(self)
class develop(_develop):
def run(self):
self.execute(_clone_cgc_qemu, (), msg="Cloning CGC QEMU")
self.execute(_clone_linux_qemu, (), msg="Cloning Linux QEMU")
self.execute(_build_qemus, (), msg="Building Tracer QEMU")
_develop.run(self)
if 'bdist_wheel' in sys.argv and '--plat-name' not in sys.argv:
idx = sys.argv.index('bdist_wheel') + 1
sys.argv.insert(idx, '--plat-name')
name = get_platform()
if 'linux' in name:
# linux_* platform tags are disallowed because the python ecosystem is fubar
# linux builds should be built in the centos 5 vm for maximum compatibility
# see https://github.com/pypa/manylinux
# see also https://github.com/angr/angr-dev/blob/master/bdist.sh
sys.argv.insert(idx + 1, 'manylinux1_' + platform.machine())
else:
# https://www.python.org/dev/peps/pep-0425/
sys.argv.insert(idx + 1, name.replace('.', '_').replace('-', '_'))
setup(
name='shellphish-qemu',
version='0.9.10',
description="A pip-installable set of qemus.",
packages=['shellphish_qemu'],
provides=['shellphish_qemu'],
requires=['pkg_resources'],
cmdclass={'build': build, 'develop': develop},
zip_safe=True,
include_package_data=True,
package_data={
'shellphish_qemu': ['bin/*']
}
)
``` |
{
"source": "0xdaem0n/siwe-py",
"score": 2
} |
#### File: siwe-py/siwe/siwe.py
```python
from datetime import datetime
import string
import secrets
from dateutil.parser import isoparse
from dateutil.tz import UTC
from typing import Optional, List, Union
import warnings
import eth_utils
from web3 import Web3, HTTPProvider
import eth_account.messages
from .parsed import RegExpParsedMessage, ABNFParsedMessage
class ValidationError(Exception):
pass
class InvalidSignature(ValidationError):
pass
class ExpiredMessage(ValidationError):
pass
class MalformedSession(ValidationError):
def __init__(self, missing_fields):
self.missing_fields = missing_fields
class SiweMessage:
"""
A class meant to fully encompass a Sign-in with Ethereum (EIP-4361) message. Its utility strictly remains
within formatting and compliance.
"""
domain: str # RFC 4501 dns authority that is requesting the signing.
address: str # Ethereum address performing the signing conformant to capitalization encoded checksum specified
# in EIP-55 where applicable.
statement: Optional[
str
] # Human-readable ASCII assertion that the user will sign, and it must not
# contain `\n`.
uri: str # RFC 3986 URI referring to the resource that is the subject of the signing.
version: str # Current version of the message.
chain_id: int # EIP-155 Chain ID to which the session is bound, and the network where Contract Accounts must be
# resolved.
nonce: Optional[
str
] # Randomized token used to prevent replay attacks, at least 8 alphanumeric characters.
issued_at: str # ISO 8601 datetime string of the current time.
expiration_time: Optional[
str
] # ISO 8601 datetime string that, if present, indicates when the signed
# authentication message is no longer valid.
expiration_time_parsed: Optional[datetime]
not_before: Optional[
datetime
] # ISO 8601 datetime string that, if present, indicates when the signed
# authentication message will become valid.
request_id: Optional[
str
] # System-specific identifier that may be used to uniquely refer to the sign-in
# request.
resources: Optional[
List[str]
] # List of information or references to information the user wishes to have
# resolved as part of authentication by the relying party. They are expressed as RFC 3986 URIs separated by `\n- `.
__slots__ = (
"domain",
"address",
"statement",
"uri",
"version",
"chain_id",
"nonce",
"issued_at",
"expiration_time",
"expiration_time_parsed",
"not_before",
"not_before_parsed",
"request_id",
"resources",
)
def __init__(self, message: Union[str, dict] = None, abnf: bool = True):
if isinstance(message, str):
if abnf:
parsed_message = ABNFParsedMessage(message=message)
else:
parsed_message = RegExpParsedMessage(message=message)
message_dict = parsed_message.__dict__
elif isinstance(message, dict):
message_dict = message
else:
raise TypeError
for key in self.__slots__:
value = message_dict.get(key)
if key == "expiration_time" and value is not None:
self.expiration_time_parsed = isoparse(value)
elif key == "not_before" and value is not None:
self.not_before_parsed = isoparse(value)
setattr(self, key, value)
def prepare_message(self) -> str:
"""
Retrieve an EIP-4361 formatted message for signature.
:return: EIP-4361 formatted message, ready for EIP-191 signing.
"""
header = f"{self.domain} wants you to sign in with your Ethereum account:"
uri_field = f"URI: {self.uri}"
prefix = "\n".join([header, self.address])
version_field = f"Version: {self.version}"
if self.nonce is None:
self.nonce = generate_nonce()
chain_field = f"Chain ID: {self.chain_id or 1}"
nonce_field = f"Nonce: {self.nonce}"
suffix_array = [uri_field, version_field, chain_field, nonce_field]
if self.issued_at is None:
# TODO: Should we default to UTC or settle for local time? UX may be better for local
self.issued_at = datetime.now().astimezone().isoformat()
issued_at_field = f"Issued At: {self.issued_at}"
suffix_array.append(issued_at_field)
if self.expiration_time:
expiration_time_field = f"Expiration Time: {self.expiration_time}"
suffix_array.append(expiration_time_field)
if self.not_before:
not_before_field = f"Not Before: {self.not_before}"
suffix_array.append(not_before_field)
if self.request_id:
request_id_field = f"Request ID: {self.request_id}"
suffix_array.append(request_id_field)
if self.resources:
resources_field = "\n".join(
["Resources:"] + [f"- {resource}" for resource in self.resources]
)
suffix_array.append(resources_field)
suffix = "\n".join(suffix_array)
if self.statement:
prefix = "\n\n".join([prefix, self.statement])
else:
prefix += "\n"
return "\n\n".join([prefix, suffix])
def to_message(self) -> str:
warnings.warn("deprecated, use prepare_message()", DeprecationWarning)
return self.prepare_message()
def sign_message(self) -> str:
warnings.warn("deprecated, use prepare_message", DeprecationWarning)
return self.prepare_message()
def validate(
self, signature: str, *, provider: Optional[HTTPProvider] = None
) -> None:
"""
Validates the integrity of fields of this SiweMessage object by matching its signature.
:param provider: A Web3 provider able to perform a contract check, this is required if support for Smart
Contract Wallets that implement EIP-1271 is needed.
:return: raises an appropriate Exception if there is a problem validating, otherwise None
"""
message = eth_account.messages.encode_defunct(text=self.prepare_message())
w3 = Web3(provider=provider)
missing = []
if message is None:
missing.append("message")
if self.address is None:
missing.append("address")
if len(missing) > 0:
raise MalformedSession(missing)
try:
address = w3.eth.account.recover_message(message, signature=signature)
except eth_utils.exceptions.ValidationError:
raise InvalidSignature("Message or signature are malformed")
if address != self.address:
raise InvalidSignature("Recovered address does not match message address")
# if not check_contract_wallet_signature(message=self, provider=provider):
# # TODO: Add error context
if (
self.expiration_time_parsed is not None
and datetime.now(UTC) >= self.expiration_time_parsed
):
raise ExpiredMessage
def check_contract_wallet_signature(message: SiweMessage, *, provider: HTTPProvider):
"""
Calls the EIP-1271 method for Smart Contract wallets,
:param message: The EIP-4361 parsed message
:param provider: A Web3 provider able to perform a contract check.
:return: True if the signature is valid per EIP-1271.
"""
raise NotImplementedError(
"siwe does not yet support EIP-1271 method signature verification."
)
alphanumerics = string.ascii_letters + string.digits
def generate_nonce() -> str:
return "".join(secrets.choice(alphanumerics) for _ in range(11))
``` |
{
"source": "0xdanelia/scrycall",
"score": 3
} |
#### File: scrycall/source/scryparse.py
```python
import sys, time
import scrycache, scryapi, scryhelp
filecache = []
# get collection of json card objects based on query
def getcards(query, options):
url = scryapi.geturl(query)
return getdata(url, options)
# let any lingering threads finish their job
def waitforthreads():
for cache in filecache:
if cache.writethread != None:
cache.writethread.join()
# extract json from local cache or api call
def getdata(url, options):
global filecache
# don't bother creating a cache object if we don't need to access the cache
if not ('ignore-cache' in options and 'do-not-cache' in options):
cachefile = scrycache.CacheFile(url)
filecache.append(cachefile)
# check for an existing, non-expired cache file
dataloaded = False
if 'ignore-cache' not in options:
if cachefile.exists:
if 'cache-only' in options or not cachefile.isexpired():
try:
data = cachefile.read()
return data
except:
return []
# if data not cached, query the api
if 'cache-only' not in options:
try:
apidata = scryapi.getdata(url)
if 'data' in apidata:
data = apidata['data']
# iterate through multiple calls for large data sets
while apidata['has_more']:
apidata = scryapi.getdata(apidata['next_page'])
data = data + apidata['data']
else:
data = apidata
# write data to local cache for quicker access next time
if 'do-not-cache' not in options:
cachefile.write(data)
return data
except:
return []
return []
# return data[key] if it makes sense to do so
def keyval(data, key, ifnull):
try:
val = None
if key.isdigit():
key = int(key)
val = data[key]
if val != None:
return val
return ifnull
except:
return ifnull
# if data is iteratable, get the list of keys or indexes
def getkeys(data):
try:
if isinstance(data, dict):
keys = list(data.keys())
else:
keys = list(range(len(data)))
return keys
except:
return []
# parse command line for option flags, while everything else becomes the query
def parseargs(args, piped, options):
parsed = []
for i in range(1, len(args)):
arg = args[i]
if arg.startswith('--'):
parseoption(arg, options)
else:
# bash strips quotes from args, but we need them
if ' ' in arg:
arg = addquotes(arg)
parsed.append(arg)
if piped != '':
parsed.append(piped)
return ' '.join(parsed)
# do what the command line option is supposed to do
def parseoption(opt, options):
# print some helpful info
if opt == '--help' or opt == '--h':
scryhelp.printhelp()
elif opt == '--help-format':
scryhelp.printhelpformat()
# format the output and select card properties to print
elif opt.startswith('--format=') or opt.startswith('--f='):
options['format'] = opt[opt.index('=')+1:]
# select what to print if a card property is null or empty
elif opt.startswith('--null=') or opt.startswith('--n='):
options['null'] = opt[opt.index('=')+1:]
options['null-format'] = formatnull(options['null'])
# if True, only check the local cache instead of querying api
elif opt == '--cache-only' or opt == '--c':
options['cache-only'] = True
# if True, ignore existing cache and query for fresh data
elif opt == '--ignore-cache' or opt == '--i':
options['ignore-cache'] = True
# if True, do not write results to local cache
elif opt == '--do-not-cache' or opt == '--d':
options['do-not-cache'] = True
# delete expired cache files
elif opt == '--clean-cache':
scrycache.cleancache()
# delete entire cache
elif opt == '--delete-cache':
scrycache.deletecache()
# something unrecognized
else:
raise Exception('Invalid argument. Try --help for more information.')
# do our best to add quotes back into the query after bash strips them away
def addquotes(arg):
if arg.startswith('!'):
return '!"' + arg[1:] + '"'
elif ':' in arg:
idx = arg.index(':')
if idx < arg.index(' '):
return arg[:idx] + ':"' + arg[idx+1:] + '"'
return '"' + arg + '"'
# parse the custom format string into a list of tokens
def formatstring(formatstring):
tokens = []
string = formatstring
while string != '':
# card properties and special characters begin with '%'
if '%' in string:
p = string.index('%')
if p > 0:
tokens.append(string[:p])
token = string[p:p+2]
if token == '%[':
closebracket = string.find(']', p+3)
if closebracket < 0:
raise Exception('Invalid format. Try --help-format for more information.')
token = string[p:closebracket+1]
string = string[closebracket+1:]
else:
token = tokenreplace(token)
string = string[p+2:]
tokens.append(token)
# all other text will be printed as-is
else:
tokens.append(string)
string = ''
return tokens
# the string used to replace null or empty properties needs formatting too
def formatnull(nullstring):
nulltokens = formatstring(nullstring)
for t in nulltokens:
if t == '%|' or (t.startswith('%[') and '*' in t):
raise Exception('Invalid null format. Try --help-format for more information.')
return nulltokens
# formatstring shortcuts are replaced with the actual json key
def tokenreplace(token):
if token == '%n':
return '%name'
if token == '%m':
return '%mana_cost'
if token == '%c':
return '%cmc'
if token == '%y':
return '%type_line'
if token == '%p':
return '%power'
if token == '%t':
return '%toughness'
if token == '%l':
return '%loyalty'
if token == '%o':
return '%oracle_text'
if token == '%f':
return '%flavor_text'
# %% is used to print a literal %, and %| divides output into columns
if token == '%%' or token == '%|':
return token
raise Exception('Invalid format. Try --help-format for more information.')
# if formatstring contains %[*], print a new string for each card parameter
def iterateformat(card, formatstring, options):
lines = []
# replace '%%' with a unique placeholder that does not appear in formatstring
percentplaceholder = '{PERCENT' + str(time.time()) + '}'
while percentplaceholder in formatstring:
percentplaceholder = '{PERCENT' + str(time.time()) + '}'
# any remaining % will indicate a card property
formatstring = formatstring.replace('%%', percentplaceholder)
# find a %[] that contains a *
toreplace = None
toparse = formatstring
while '%[' in toparse:
startbracket = toparse.index('%[')
endbracket = toparse.index(']', startbracket)
bracketed = toparse[startbracket:endbracket+1]
if '*' in bracketed:
toreplace = bracketed[:bracketed.index('*')+1]
break
toparse = toparse[endbracket+1:]
# return just the one string if no iterating is needed
if toreplace == None:
return [formatstring.replace(percentplaceholder, '%%')]
# jsonparse returns a list of keys when it encounters the *
keys = jsonparse(card, toreplace[2:].split(';'), options, [])
# print the nullstring instead of iterating if card property is null
if len(keys) == 0:
newstring = formatstring.replace(bracketed, options['null'])
lines = lines + iterateformat(card, newstring, options)
else:
# create a line for each key with the '*' replaced by the key
for key in keys:
newstring = formatstring.replace(toreplace, toreplace[:-1] + str(key))
# recursively check the new formatstring for any more '*'s to iterate
lines = lines + iterateformat(card, newstring, options)
return lines
# replace any %X card properties in the nullstring for printing
def getifnull(card, options):
return printline(card, options['null-format'], options, '')[0]
# parse %[] for json keys and traverse the card data for the specific value
def jsonparse(card, keys, options, ifnull):
val = card
lastkey = ifnull
for k in range(len(keys)):
key = keys[k]
# print the name of the previous key
if key == '^':
val = lastkey
# print a list of available keys for the current object
elif key == '?':
val = sorted(getkeys(val))
# return list of keys for iteration
elif key == '*':
return getkeys(val)
else:
# print the value of val[key] from the card object
val = keyval(val, key, ifnull)
# if the value is an api web address, query it for more data to traverse
if isinstance(val, str) and val.startswith('https://') and key.endswith('uri'):
lastkey = val
val = getdata(val, options)
continue
lastkey = key
if val == '':
val = ifnull
return val
# return the actual text that gets printed to the screen
def printline(card, params, options, ifnull):
toprint = []
cols = []
for param in params:
if param.startswith('%['):
toprint.append(str(jsonparse(card, param[2:-1].split(';'), options, ifnull)))
elif param == '%%':
toprint.append('%')
elif param == '%|':
cols.append(''.join(toprint))
toprint = []
elif param.startswith('%'):
toprint.append(str(keyval(card, param[1:], ifnull)))
else:
toprint.append(param)
# return a list containing each column to be printed
cols.append(''.join(toprint).replace('\n', '\t'))
return cols
``` |
{
"source": "0xDBFB7/covidinator",
"score": 2
} |
#### File: electronics/ngspice/oscillator_designer_2.py
```python
from time import sleep
import numpy as np
from scipy.signal import cheby1
from scipy.signal import find_peaks
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy.optimize import minimize,basinhopping
import time
import math
from math import sin, cos, pi, sqrt,tan, atan
from pytexit import py2tex
import skrf as rf
from skrf.media import MLine
import subprocess
import os
import sys
from matplotlib.colors import SymLogNorm
#include store.py
import sys
sys.path.append('/home/arthurdent/covidinator/electronics/')
import store
import ngspyce
source_file = 'oscillator.cir'
ngspyce.source(source_file)
def run_sim(varactor_voltage):
ngspyce.cmd(f'alterparam varactor_bias_voltage = {varactor_voltage}')
ngspyce.cmd('reset')
# ngspyce.cmd('stop after 10000')
step_ps = 1 #not always obeyed - ngspice sets its own timestep.
sim_duration = 100000
n_steps = sim_duration/step_ps
# ngspyce.cmd(" ")
ngspyce.cmd(f'tran {step_ps}p {sim_duration}ps uic')
timesteps = ngspyce.vector('time')
v_collector = ngspyce.vector('v(E1)')
v_base = ngspyce.vector('v(Base)')
varactor_bias = ngspyce.vector('v(Vvaractor)')
output = ngspyce.vector('v(output)')
stable_running_point = -1*len(v_collector)//3
v_collector_trimmed = v_collector[stable_running_point:] # lots of noise on startup. we want to trim that out of the FFT.
spectrum = np.fft.fft(v_collector_trimmed)
spectrum_freqs = np.fft.fftfreq(len(v_collector_trimmed), d=(timesteps[-1]-timesteps[stable_running_point])/len(v_collector_trimmed))
spectrum_begin_indice = np.abs(spectrum_freqs - 100e6).argmin()
spectrum_end_indice = np.abs(spectrum_freqs - 25e9).argmin()
#normalize
spectrum_norm = np.linalg.norm(spectrum[spectrum_begin_indice:spectrum_end_indice].clip(min=0))
if(spectrum_norm):
fft_cleaned = spectrum[spectrum_begin_indice:spectrum_end_indice].clip(min=0)/spectrum_norm
else:
fft_cleaned = np.zeros_like(spectrum[spectrum_begin_indice:spectrum_end_indice])
spectrum_freqs = spectrum_freqs[spectrum_begin_indice:spectrum_end_indice]
fft_cleaned = fft_cleaned[:int(600)] # trim all spectra to the same length 2ps,800, 5ps, 600
spectrum_freqs = spectrum_freqs[:int(600)]
return [np.array(np.abs(fft_cleaned)), np.array(spectrum_freqs), timesteps, v_collector, v_base, varactor_bias, output]
spectra = []
values = []
# values = []
# NUM_SPECTRA = 30
NUM_SPECTRA = 20
for i,v in enumerate(np.linspace(0, 20, NUM_SPECTRA)):
values.append(run_sim(v))
spectrum = values[i][0]
if(not len(spectra)):
spectra = spectrum.reshape(-1, 1)
else:
spectra = np.append(spectra, spectrum.reshape(-1, 1), axis=1)
# spectra = np.append( np.array(spectra), spectrum, axis=0)
# spectra_freqs = np.append( np.array(spectra_freqs), spectrum_freqs, axis=0)
# values = np.append( np.array(values), np.array([v]*len(spectrum)), axis=0)
# print(spectra)
#
spectrum_freqs = values[0][1]
spectra = np.repeat(spectra, repeats=20, axis=1) # make each slice wide enough to be visible
fig,(ax1) = plt.subplots(1,1)
ax1.imshow(np.transpose(spectra), norm=SymLogNorm(0.1))
tick_num = 10
idx = np.round(np.linspace(0, len(spectra) - 1, tick_num)).astype(int)
plt.title("Collector freq. waterfall against varactor voltage")
ax1.set_xticks(idx)
ax1.set_xticklabels(["{:1.1f}e9".format(i/1.0e9) for i in spectrum_freqs[idx]])
# ax1.set_yticks(idx)
ax1.set_yticklabels(["{:1.1f}".format(i) for i in values[0][:][5]])
plt.draw()
plt.pause(0.001)
# np.savetxt("/tmp/data.csv", np.append(np.append(spectra_freqs.reshape(-1, 1), spectra.reshape(-1, 1), axis=1), values.reshape(-1, 1),axis=1) , delimiter=",",fmt='%10.5f')
#os.system('gnuplot plot.gpl')
fig.savefig('/tmp/spectrum.svg')
plt.figure()
for i in values[0], values[-1]:
spectrum = i[0]
spectrum_freqs = i[1]
plt.subplot(2,2,1)
plt.title("Collector freq. spectrum")
plt.plot(spectrum_freqs,spectrum)
plt.ticklabel_format(style='sci', axis='x', scilimits=(9,9))
plt.ylabel("mag")
plt.xlabel("F (GHz)")
plt.subplot(2,2,2)
plt.title("Collector freq. spectrum")
plt.ylabel("V")
plt.xlabel("T (nanoseconds)")
plt.plot(i[2],i[3], label="Collector voltage")
plt.plot(i[2],i[5], label="Varactor voltage")
plt.plot(i[2],i[4], label="Base voltage")
plt.ticklabel_format(style='sci', axis='x', scilimits=(-9,-9))
plt.legend()
plt.subplot(2,2,3)
plt.title("ENHANCE collector waveform.")
plt.ylabel("V")
plt.xlabel("T (nanoseconds)")
plt.plot(i[2][-300:],i[3][-300:])
print(np.max(i[6][-1*(len(i[6])//3):]))#voutput
print(np.min(i[6][-1*(len(i[6])//3):]))
print(np.max(i[3][-300:]))
print(np.min(i[3][-300:]))#vcollector
plt.draw()
plt.pause(0.001)
plt.savefig('/tmp/plots.svg')
for i in values:
#https://www.rfcafe.com/references/electrical/pwr2volts.htm
avg_voltage = np.mean(i[6][-1*(len(i[6])//3):])
rms_voltage = np.sqrt(np.mean((i[6][-1*(len(i[6])//3):] - avg_voltage)**2.0))
rms_output_power = np.sqrt(rms_voltage)**2.0 / 50.0
print("V, P: ",rms_voltage, rms_output_power)
spectrum_file = '/tmp/spectrum.svg'
plot_file = '/tmp/plots.svg'
source_file = 'oscillator_designer_2.py'
SPICE_file = 'oscillator.cir'
# spectrum_3d = '/tmp/3d_spectrum.png'
#save to lab notebook
files = [spectrum_file, source_file, SPICE_file, plot_file]
store.ask(files)
```
#### File: electronics/qucs/parse_result.py
```python
import re
import numpy as np
def parse_file(name):
file = open(name)
# the dict this function returns
data = {}
numpoints = 1
ind = 0
shape = []
variables = {}
for line in file:
#print line
if line.startswith('<'):
if line.startswith('<indep'):
#print line
r = re.match(r'\<(\w+) (\S+) (\d+)\>', line)
g = r.groups()
# there can be several independent variables -> numpoints keeps
# the total number of points
numpoints = numpoints * int(g[2])
name = g[1]
# reserve an array for the values
data[name] = np.zeros(int(g[2]))
ind = 0
# save the simulation points in an array
shape = np.append(shape, int(g[2]))
# save that this variable is independent
variables[name] = 'indep'
if line.startswith('<dep'):
#print line
r = re.match(r'\<dep (\S+)', line)
g = r.groups()
name = g[0]
# reserve a complex matrix to be on the safe side
data[name] = np.zeros(int(numpoints), np.complex128)
ind = 0
# store that this is a dependent variable
variables[name] = 'dep'
else:
jind = line.find('j')
if(jind == -1):
# real number -> just parse it
val = float(line)
else:
# complex number -> break into re/im part
val_re = line[0:jind-1]
sign = line[jind-1]
val_im = sign + line[jind+1:-1]
# and convert it into a complex number
val = complex(float(val_re), float(val_im))
# store the extracted datapoint
data[name][ind] = val
ind = ind + 1
data['variables'] = variables
# reverse the shape variable in order to get the reshape operation (see below)
# correct
shape = shape[::-1]
# here comes the clever trick :-)
# if a dependent variable depends on N > 1 (independent) variables,
# we reshape the vector we have obtained so far into an N-dimensional
# matrix
for key in data['variables']:
temp = data['variables'][key]
if temp == 'dep':
temp_data = data[key]
data[key] = np.reshape(temp_data, shape)
return data
```
#### File: chengxiang/rpi/functions.py
```python
import RPi.GPIO as GPIO
from time import sleep
import code
STEP_PIN = 7
DIR_PIN = 21
LIMIT_PIN = 22
TRIGGER_PIN = 11
ENABLE_PIN = 19 #might be 16
GPIO.setmode(GPIO.BOARD)
DEG_PER_STEP = 18.0
SCREW_PITCH = 0.5 # //mm
MICROSTEPS = 4
distance_per_step = ((DEG_PER_STEP / 360.0) * SCREW_PITCH);
#
GPIO.setup(STEP_PIN, GPIO.OUT)
GPIO.setup(DIR_PIN, GPIO.OUT)
GPIO.setup(TRIGGER_PIN, GPIO.OUT)
GPIO.setup(ENABLE_PIN, GPIO.OUT)
GPIO.setup(LIMIT_PIN, GPIO.IN)
position = 0.0
def move_absolute(new_position,speed=(500 * 1e-6)):
global position
print("Moving to {} from {}\n".format(new_position, position))
delta = new_position - position
move_relative(delta < 0, abs(delta), speed=speed)
position = new_position
def move_relative(direction, distance, speed=(500 * 1e-6)):
GPIO.output(DIR_PIN, bool(direction))
GPIO.output(ENABLE_PIN, 0)
num_steps = distance/distance_per_step * MICROSTEPS;
# speed = 500 * 1e-6
for i in range(int(num_steps)):
GPIO.output(STEP_PIN, 1)
sleep(speed) #microseconds
GPIO.output(STEP_PIN, 0)
sleep(speed) #microseconds
GPIO.output(ENABLE_PIN, 1)
def home():
while(GPIO.input(LIMIT_PIN)):
move_relative(1,1)
GPIO.output(DIR_PIN, 0)
GPIO.output(ENABLE_PIN, 0)
while(not GPIO.input(LIMIT_PIN)):
GPIO.output(STEP_PIN, 1)
sleep(500 * 1e-6) #microseconds
GPIO.output(STEP_PIN, 0)
sleep(500 * 1e-6) #microseconds
GPIO.output(ENABLE_PIN, 1)
global position
position = 0
def pulse(enabled):
if(enabled):
GPIO.output(TRIGGER_PIN, 1)
sleep(1500 * 1e-6) #microseconds
GPIO.output(TRIGGER_PIN, 0)
```
#### File: runs/phage_experiment_10/phage_subtract_2.py
```python
import numpy as np
import matplotlib.pyplot as plt
from math import pi, sqrt, e, log, isclose, exp
import pickle
import scipy.integrate as integrate
valid_lines = []
with open('spectrum_empty_1.csv', "r") as f:
for line in f:
if(line.count(',') == 15):
valid_lines.append(line)
with open('spectrum_present_1.csv', "r") as f:
for line in f:
if(line.count(',') == 15):
valid_lines.append(line)
with open('spectrum_water_1.csv', "r") as f:
for line in f:
if(line.count(',') == 15):
valid_lines.append(line)
data = np.genfromtxt(valid_lines, delimiter=",", dtype=np.float, encoding='ascii', skip_header=0)
data = np.array(np.vsplit(data, np.ndarray.flatten(np.argwhere(data[:,0] == 11.95)+1)))[0:-1]
data = np.reshape(data, (3,5))
# data = np.mean(data, axis=1)
near_sensor = 1
far_sensor = 2
def freq_eq(x):
#HMC732 VCO approx equation
return 0.4794e9*x + 5.874e9
freqs = freq_eq(data[0][0][:,0])
# plt.plot(freqs,data[0][1][:,far_sensor]/data[1][:,near_sensor])
# plt.plot(freqs,data[2][:,far_sensor]/data[2][:,near_sensor])
# plt.figure()
plt.plot(freqs,(data[1][0][:,far_sensor]/data[1][0][:,near_sensor])/(data[2][0][:,far_sensor]/data[2][0][:,near_sensor]))
plt.plot(freqs,(data[1][1][:,far_sensor]/data[1][1][:,near_sensor])/(data[2][1][:,far_sensor]/data[2][1][:,near_sensor]))
plt.plot(freqs,(data[1][2][:,far_sensor]/data[1][2][:,near_sensor])/(data[2][2][:,far_sensor]/data[2][2][:,near_sensor]))
plt.plot(freqs,(data[1][3][:,far_sensor]/data[1][3][:,near_sensor])/(data[2][3][:,far_sensor]/data[2][3][:,near_sensor]))
plt.plot(freqs,(data[1][4][:,far_sensor]/data[1][4][:,near_sensor])/(data[2][4][:,far_sensor]/data[2][4][:,near_sensor]))
# plt.plot(freqs, treated_phage[sample_post_treatment][:,near_sensor] - control[sample_post_treatment][:,near_sensor])
# plt.plot(freqs, treated_phage[sample_pre_treatment][:,near_sensor] - control[sample_pre_treatment][:,near_sensor])
# plt.plot(freqs, untreated_phage[sample_pre_treatment][:,near_sensor] - control[sample_pre_treatment][:,near_sensor])
# plt.plot(freqs, treated_phage[sample_post_treatment][:,near_sensor] - control[sample_post_treatment][:,near_sensor])
# plt.plot(freqs, untreated_phage[sample_post_treatment][:,near_sensor] - control[sample_post_treatment][:,near_sensor])
# plt.plot(freqs, treated_phage[sample_pre_treatment][:,near_sensor] - control[sample_pre_treatment][:,near_sensor])
plt.figure()
plt.plot(freqs,data[0][0][:,far_sensor])
plt.plot(freqs,data[1][0][:,far_sensor])
plt.show()
# plt.plot(freqs, (averaged_sic[:,1] - averaged_background[:,1]) / averaged_background[:,1],label="'Near' sensor, $\propto$S$_{11}$")
# plt.plot(freqs, (averaged_sic[:,2] - averaged_background[:,2]) / averaged_background[:,2],label="'Far' sensor, $\propto$S$_{21}$")
# plt.legend()
# plt.savefig("sic_9_1.svg")
# plt.title("Normalized silicon carbide susceptor spectrum")
# plt.ticklabel_format(style='sci', axis='x', scilimits=(9,9))
# plt.xlabel("Frequency (GHz)")
# plt.figure()
# plt.plot(freqs, averaged_sic[:,1],label="SiC susceptor, 'Near' sensor, $\propto$S$_{11}$")
# plt.plot(freqs, averaged_sic[:,2],label="SiC susceptor, 'Far' sensor, $\propto$S$_{21}$")
# plt.plot(freqs, averaged_background[:,1],label="Background, 'Near' sensor, $\propto$S$_{11}$")
# plt.plot(freqs, averaged_background[:,2],label="Background, 'Far' sensor, $\propto$S$_{21}$")
# plt.ticklabel_format(style='sci', axis='x', scilimits=(9,9))
# plt.legend()
# plt.xlabel("Frequency (GHz)")
# plt.ylabel("Voltage")
# plt.title("Raw detector voltage")
# plt.savefig("sic_9_2.svg")
#
# plt.legend()
# plt.show()
```
#### File: software/oscillator_designer/oscillator_designer.py
```python
import qucs.extract
from time import sleep
import numpy as np
from scipy.signal import cheby1
from scipy.signal import find_peaks
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy.optimize import minimize,basinhopping
import time
import math
from math import sin, cos, pi, sqrt,tan, atan
from pytexit import py2tex
import skrf as rf
from skrf.media import MLine
import subprocess
import os
import sys
def mW_to_dBm(milliwatts):
return 10.0*math.log10(milliwatts)
schematic_file = "/home/arthurdent/Projects/covidinator/electronics/qucs/optimize/optimize_filter_1.sch"
net_file = "/mnt/qucs-tmpfs/tmp.net"
net_file_modified = "/mnt/qucs-tmpfs/mod.net"
data_file = "/mnt/qucs-tmpfs/output.dat"
os.system("QUCSDIR=/usr/local/ /usr/local/bin/qucs -n -i " + schematic_file + " -o " + net_file)
np.set_printoptions(precision=4)
def electrical_to_physical_length(electrical_len, trace_width, frequency):
return (effective_wavelength(trace_width, substrate_height, substrate_dielectric_constant, frequency) \
* (electrical_len/(2.0*pi)))
def run_sim(x, net_file, data_file):
# electrical_length_ratio = 0.625
# electrical_length_beta = - (electrical_length_alpha*electrical_length_ratio/(electrical_length_ratio-1.0))
# length_1 = electrical_to_physical_length(electrical_length_alpha, width_1, center_frequency)
# length_2 = electrical_to_physical_length(electrical_length_beta, width_2, center_frequency)
# print("Trying lengths: {:.4f} rad ({:.4f} m) | {:.4f} rad ({:.4f} m) ".format(electrical_length_alpha, length_1, beta, length_2))
with open(net_file, 'r') as file:
netlist = file.read()
scale_factors = np.ones_like(x)
scale_factors[0] *= 10.0
for i in range(0,len(x)):
netlist = netlist.replace('var_'+str(i), str(x[i]*scale_factors[i]))
with open(net_file_modified, 'w') as file:
file.write(netlist)
print("---------------- QUCSATOR RUN ----------------")
sim_return = subprocess.run(['qucsator', '-i', net_file_modified, '-o', data_file], stdout = subprocess.DEVNULL, check=True)
print("---------------- QUCSATOR FIN ----------------")
extracted_data = qucs.extract.load_data(data_file)
feedback_voltage = np.array([abs(i) for i in extracted_data.__dict__["Vfb_v"]])
return np.array(extracted_data.__dict__["acfrequency"]), feedback_voltage, np.array(extracted_data.__dict__["phase_shift"]),\
np.array(extracted_data.__dict__["Vout_v"])
#parasitic inductance can vary as required to build the varactor biasing circuit from discretes
# def capacitor_costs(x, coefficient):
# capacitor_indices = [1, 4] #
# return coefficient * sum([i % 0.5 for i in x[capacitor_values]]) # distance from nearest 0.5
def cost_function(x, retained_values, retained_indices, desired_center_frequency, display = False):
start = time.time()
x[retained_indices] = retained_values[retained_indices]
print("Trying: ", x)
frequency, feedback_voltage, phase_shift, output_amplitude = run_sim(x, net_file, data_file)
# feedback_voltage_peak_indices = find_peaks(feedback_voltage)[0]
# if(len(feedback_voltage_peak_indices) < 1):
# print("No peaks found, cost = 10")
# return 10.0
#
# fb_peak_values = feedback_voltage[feedback_voltage_peak_indices]
# sorted_indices = np.argsort(fb_peak_values)[::-1]
# fb_peak_values = fb_peak_values[sorted_indices]
# feedback_voltage_peak_indices = feedback_voltage_peak_indices[sorted_indices]
#
# fb_peak_frequencies = frequency[feedback_voltage_peak_indices]
#
# if(len(feedback_voltage_peak_indices) > 1):
# fb_peak_ratio = fb_peak_values[1]/fb_peak_values[0]
# else:
# fb_peak_ratio = 0.1
#
# phase_at_peak = phase_shift[feedback_voltage_peak_indices][0]
# freq_coeff = 1
# phase_coeff = 1.5
# ratio_coeff = 0.5
# insertion_loss_coeff = 0.2
#
# frequency_cost = freq_coeff * (abs(desired_center_frequency-fb_peak_frequencies[0])/1e9)
# phase_cost = phase_coeff * abs(1.0 - phase_at_peak)
# ratio_cost = ratio_coeff * fb_peak_ratio
# insertion_loss_cost = (1.0 - fb_peak_values[0])*insertion_loss_coeff
# cost = frequency_cost + phase_cost + fb_peak_ratio + insertion_loss_cost
cost = abs(1.0 - phase_shift[np.abs(frequency-desired_center_frequency).argmin()]) + \
1.0/(feedback_voltage[np.abs(frequency-desired_center_frequency).argmin()])
#
# end = time.time()
# if(display):
# print("Cost: {:.4f} (frequency: {:.4f} MHz ({:.4f} MHz desired), phase: {:.4f}, ratio: {:.4f}, |FB|: {:.4f}), took {:.4f} ms"
# .format(cost,fb_peak_frequencies[0]/1e6,
# desired_center_frequency/1e6,
# phase_at_peak,
# fb_peak_ratio, fb_peak_values[0],
# (end - start)*1000.0))
# else:
# print("Cost: ", cost)
return cost
#
# def sweep_cost(x, desired_frequency_range, varactor_capacitance_range):
#
# os.system('clear')
# # sys.stdout.flush()
#
# total_cost = 0
#
# # add "distance from standard value" cost!
#
# for i in range(0, len(varactor_capacitance_range)):
# total_cost += cost_function(x, desired_frequency_range[i], varactor_capacitance_range[i])
# sys.stdout.flush()
#
# print("\n")
# # sys.stdout.flush()
#
# return total_cost
def optimize(bounds, initial_guess, retained_values, retained_indices, desired_frequency, stochastic_iterations = 7, gradient_iterations = 4, polish_iterations = 10):
args = (retained_values, retained_indices, desired_frequency)
minimizer_kwargs = dict(method="L-BFGS-B", bounds=bounds, args=args, options={"disp":True, "maxiter":gradient_iterations})
tubthumper = basinhopping
ideal_values = tubthumper(cost_function, initial_guess, niter=stochastic_iterations, minimizer_kwargs=minimizer_kwargs, disp=True, niter_success=5)["x"]
# you may not like it, but this is the ideal
#then polish
ideal_values = minimize(cost_function, ideal_values, bounds=bounds, method="L-BFGS-B", args=args, options={"disp":True, "maxiter":polish_iterations})["x"]
return ideal_values
# could also run two sims at extremes and average retained inductance values
num_vars = 3
initial_guess = [1]*num_vars
bounds = [(0.2,10)]*num_vars
bounds[1] = (0.3,2) #SMV2019
# bounds[3] = (0.3,2)
# bounds[5] = (0.3,2)
# bounds[6] = (0.3,2)
# bounds[7] = (0.3,2)
# initial_guess[4] = 0.2
# initial_guess[3] = 0.2
#
# retained_values = np.array([])
# retained_indices = []
#
# frequency_sweep = [10e9,9e9,8e9,7e9]
#
# ideal_values = [[]]*len(frequency_sweep)
# ideal_values[0] = initial_guess
# ideal_values.append(initial_guess)
# import cProfile
# cProfile.run('optimize(bounds, initial_guess, retained_values, retained_indices, frequency_sweep[0] )')
# 28 s in load_data,
# 53 s in qucs,
# for i in range(0, len(frequency_sweep)):
# ideal_values[i] = (optimize(bounds, initial_guess, retained_values, retained_indices, frequency_sweep[i]))
#
# ideal_values[i] = np.array(np.round(ideal_values[i],1))
# retained_values = ideal_values[i]
# retained_indices = np.array([0,2])
##Sweep all varactor values, determine points with minimum distance to ideal frequency,
# ideal_values[0] = np.array([0.2, 0.3, 0.9, 1.3, 0.6, 0.3, 1. ])
#
# ideal_values[1] = np.array([0.2, 0.4, 0.9, 0.6, 0.6, 0.9, 1. ])
#
# print(ideal_values)
#
# # print('='*40)
# # print("Solution: ", ideal_value)
# # print('='*40)`
#
# frequencies = []
# phase_shifts = []
#
varactor_values = []
#
# N_interpolations = 2
#
#
# fig, ax1 = plt.subplots()
# ax2 = ax1.twinx()
# for i in range(0, len(frequency_sweep)):
# # freq = desired_frequency_range[0] + ((desired_frequency_range[-1]-desired_frequency_range[0])/N_interpolations)*i
# # varactor_capacitance = varactor_capacitance_range[0] + ((varactor_capacitance_range[-1]-varactor_capacitance_range[0])/N_interpolations)*i
# # varactor_values.append(varactor_capacitance)
# # print(varactor_capacitance)
#
# ideal_value = ideal_values[i]
#
# cost_function(ideal_value, retained_values, retained_indices, frequency_sweep[i], display = True)
#
# frequency, feedback_voltage, phase_shift, output_amplitude = run_sim(ideal_value, net_file, data_file)
# # np.concatenate([frequency,frequencies])
# np.concatenate([phase_shift,phase_shifts])
#
# # np.concatenate([[varactor_capacitance]*len(phase_shift),varactor_values])
# #plt.subplot(2, 1, 1, projection='3d')
# # plt.subplot(2, 1, 1)
# # plt.title("Phase shift (factor of 360 deg, including active device)")
#
# ax1.plot(frequency, phase_shift)
# # plt.xlabel("frequency")
# # plt.subplot(2, 1, 2)
# # plt.title("Feedback voltage")
# # plt.xlabel("frequency")
# ax2.plot(frequency, feedback_voltage)
retained_values = np.array([])
retained_indices = []
# fig, ax1 = plt.subplots()
# ax2 = ax1.twinx()
high_resistance = 500.0
low_resistance = 10.0
vfb = []
freq = []
code = []
# for i in range(0, 2**3):
try:
i=0
values = np.zeros(9)
# values[2] = 0.5
binary_code = [float(i) for i in list(bin(i)[2:])]
binary_code = [0]*(3-len(binary_code))+binary_code
binary_code = binary_code[::-1]
binary_code = np.array(binary_code)
values[3::2] = high_resistance - (binary_code*(high_resistance-low_resistance))
values[4::2] = low_resistance + (binary_code*(high_resistance-low_resistance))
values[1] = 0.3
frequency, feedback_voltage, phase_shift, output_amplitude = run_sim(values, net_file, data_file)
plt.subplot(4,2,1)
plt.title("Phase shift")
plt.plot(frequency, phase_shift)
plt.subplot(4,2,2)
plt.title("Feedback voltage")
plt.plot(frequency, feedback_voltage)
values[1] = 2
frequency, feedback_voltage, phase_shift, output_amplitude = run_sim(values, net_file, data_file)
plt.subplot(4,2,1)
plt.title("Phase shift")
plt.plot(frequency, phase_shift)
plt.subplot(4,2,2)
plt.title("Feedback voltage")
plt.plot(frequency, feedback_voltage)
for v in np.geomspace(0.05, 0.25, 10):
values[1] = v
# cost_function(values, retained_values, retained_indices, frequency_sweep[i], display = True)
frequency, feedback_voltage, phase_shift, output_amplitude = run_sim(values, net_file, data_file)
# np.concatenate([frequency,frequencies])
# np.concatenate([phase_shift,phase_shifts])
vfb.append(feedback_voltage[np.abs(phase_shift-1.0).argmin()])
freq.append(frequency[np.abs(phase_shift-1.0).argmin()])
code.append(i)
varactor_values.append(v)
except:
pass
# ax1.plot([0,frequency[-1]], [1,1], 'k-', lw=2) # line at phase = 1
# ax1.plot(vfb)
# ax2.plot(freq)
plt.subplot(4,2,3)
plt.plot(range(0,len(vfb)),vfb)
plt.title("Feedback voltage")
plt.subplot(4,2,4)
plt.plot(np.array(freq)[np.array(vfb) > 0.1])
plt.title("Frequency sweep")
plt.subplot(4,2,5)
plt.title("PIN switch values")
plt.plot(code)
plt.plot(varactor_values)
plt.savefig("/home/arthurdent/Downloads/export.png")
plt.show()
# for i in np.linspace(0.05, 2, 10):
# peak_freqs, peak_phases, peak_gains = freq_sweep([2.955, 0.45, 0.01, i])
# print(min(peak_phases))
#
# const = 1.0
```
#### File: software/turbidimeter_samples/turbidimeter.py
```python
import cv2
import numpy as np
from matplotlib import pyplot as plt
import serial
from time import sleep
import time
import sys
from scipy.signal import find_peaks
from matplotlib import pyplot as plt
# background_image = cv2.imread('my_photo-1.jpg',0)
# turbid_img = cv2.imread('my_photo-7.jpg',0)
# turbid_img_2 = cv2.imread('my_photo-8.jpg',0)
# clear_img = cv2.imread('my_photo-9.jpg',0)
# clear_img_2 = cv2.imread('my_photo-10.jpg',0)
#https://stackoverflow.com/questions/48482317/slice-an-image-into-tiles-using-numpy/48483743
# print(np.shape(clear_img))
f = open(sys.argv[2], "a")
ser = serial.Serial(sys.argv[1], 115200, timeout=2)
# detector = cv2.QRCodeDetector()
def blockshaped(arr, nrows, ncols):
"""
Return an array of shape (n, nrows, ncols) where
n * nrows * ncols = arr.size
If arr is a 2D array, the returned array should look like n subblocks with
each subblock preserving the "physical" layout of arr.
"""
h, w = arr.shape
assert h % nrows == 0, "{} rows is not evenly divisble by {}".format(h, nrows)
assert w % ncols == 0, "{} cols is not evenly divisble by {}".format(w, ncols)
return (arr.reshape(h//nrows, nrows, -1, ncols)
.swapaxes(1,2)
.reshape(-1, nrows, ncols))
while True:
# while(ser.readline() == ""):
# pass
#
for x in range(0, 100):
inp = ser.readline()
while(inp == b''):
inp = ser.readline()
pass
ser.reset_input_buffer()
cap = cv2.VideoCapture(0)
cap.set(3,1280)
cap.set(4,720)
cap.set(cv2.CAP_PROP_EXPOSURE,-4)
ret,img_colored = cap.read()
img = img_colored[:,:,0]
cap.release()
# plt.imshow(img)
chunks = blockshaped(img, 144, 160)
t = []
for j in chunks:
i = np.fft.fft2(j)
t.append(np.linalg.norm(i[:,0:np.shape(i)[1]//4])/np.linalg.norm(i[:,np.shape(i)[1]//4:-1]))
value = np.max(t)
flag = cv2.inRange(img_colored, np.array([0, 0, 127]), np.array([50, 50, 255]))#bgR
plt.imshow(flag, cmap='gray')
# flag = np.mean(np.ndarray.astype(img_colored[600:-1,:,0],np.float) - np.ndarray.astype(img_colored[600:-1,:,1],np.float) )
print(np.linalg.norm(flag))
f.write("{},{},{},{}\n".format(time.time(),int(inp),value,np.linalg.norm(flag)))
f.flush()
print(value)
plt.draw()
plt.pause(0.1)
plt.clf()
# if(not ser.readline() == ""):
# print("next")
# continue
#arg 1 is serial port, arg 2 is file to append to.
# python -u read_turbidimeter.py /dev/ttyUSB5 phage_experiment_3/
# import pygame
# import pygame.camera
# pygame.camera.init()
# cam = pygame.camera.Camera("/dev/video0",(640,480))
# cam.start()
printer = serial.Serial(sys.argv[2], 250000, timeout=100)
# cuvette_no = 8
# input("Press enter when ready")
# while(True):
# while(
#remember: 1 is reversed!
# length =
# plt.plot(data)
# plt.show()
# for k in range(0, 2):
# for i in range(0, cuvette_no):
# input(f"Move to slide{k}, cuvette {i}, press enter when ready.")
# ser.reset_input_buffer()
# ser.readline()
# value = float(ser.readline())
# file.write("{},{},{}\n".format(time.time(), i, value))
# print(value)
# input(f"Move to cuvette {i}, turn light on, press enter when ready.")
# ser.reset_input_buffer()
# ser.readline()
# value = float(ser.readline())
# file.write("{},{},{},1\n".format(time.time(), i, value))
# print(value)
# img = cam.get_image()
# pygame.image.save(img,f"{sys.argv[2]}/{time.time()}-{k}-{i}.jpg")
```
#### File: dispersive_propagation_brillouin_precursors/old/propagation_lib.py
```python
from math import atan,sin, cos, pi, sqrt, e, log, isclose, exp
from scipy.optimize import curve_fit
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors, cm, pyplot as plt
from scipy.constants import epsilon_0, mu_0
import scipy.constants
from fdtd_PCB_extensions.tissue import cole_cole_4, complex_permittivity_to_er_and_sigma, electric_field_penetration_depth, get_tissue_cole_cole_coefficients, tissue_properties
from mpl_toolkits.mplot3d import Axes3D
def average(arr, n):
end = n * int(len(arr)/n)
return np.mean(arr[:end].reshape(-1, n), 1)
import unittest
#old, deprecated, see _numerical_optimize.
class propagator:
def __init__(self):
pass
def fourier_transform(self, input_data, sampling_frequency, frequency_scale_factor):
self.input_fft = np.fft.rfft(input_data)
self.a_n_coefficients = self.input_fft.real
self.b_n_coefficients = self.input_fft.imag
frequency_step = (sampling_frequency * frequency_scale_factor)/len(self.a_n_coefficients)
self.times = np.linspace(0, len(self.a_n_coefficients)*(1.0/(sampling_frequency*frequency_scale_factor)), len(self.a_n_coefficients))
self.mode_frequencies = np.linspace(frequency_step, (len(self.a_n_coefficients)/2)*frequency_step, len(self.a_n_coefficients))
def attenuate(self,distance):
attenuation = np.exp(-distance/self.E_penetration_depth)
self.a_n_coefficients*=attenuation
self.b_n_coefficients*=attenuation
def wavenumbers(self):
return self.mode_frequencies*2*pi*np.sqrt(mu_0*self.dielectric_constants*epsilon_0)
def populate_tissue_properties(self, tissue_id):
self.ef, self.sigma, self.deltas, self.alphas, self.taus = get_tissue_cole_cole_coefficients(tissue_id)
self.dielectric_constants = np.ones_like(self.mode_frequencies)
self.E_penetration_depth = np.ones_like(self.mode_frequencies)
for i in range(0, len(self.mode_frequencies)):
self.dielectric_constants[i], self.E_penetration_depth[i] = tissue_properties(self.mode_frequencies[i], self.ef, self.sigma, self.deltas, self.alphas, self.taus)
#
#
#
# a_n, b_n = attenuate(a_n, b_n, 1,0.5)
#
def fourier_sum(self, phase_shift):
# temporal_phase = np.outer(self.mode_frequencies*2*pi,t).T
# np.outer(,t)
# [1]<NAME>, <NAME>, <NAME>. Comments with reply, on \
# “Pulse propagation in a linear, causally dispersive medium” by <NAME>. Proceedings of the IEEE
# 1993;81:631–9. https://doi.org/10.1109/5.219349.
# phase = spatial_phase + temporal_phase + aux_phase
# temporal_phase = 1
# sum = self.a_n_coefficients*np.cos(phase) + self.b_n_coefficients*np.sin(phase)
# output = np.sum(sum, axis=1) #1
# output /= len(self.mode_frequencies)
complex_frequency = self.a_n_coefficients + (1j * self.b_n_coefficients)
complex_frequency *= np.cos(phase_shift) + 1j * np.sin(phase_shift)
output = np.fft.ifft(complex_frequency)
return output.real
def analytic_oscillator_phase(self, beta, resonant_frequency):
return -1.0 * np.arctan((2.0 * beta * 2*pi*self.mode_frequencies) /
((2*pi*resonant_frequency)**2.0 - (2*pi*self.mode_frequencies)**2.0))
#angular frequencies?
#mode_freq is not correct
def analytic_oscillator_scaling(self, beta, resonant_frequency):
p1 = ((2*pi*resonant_frequency)**2.0 - (2*pi*self.mode_frequencies)**2.0)**2.0
p2 = (4.0 * (beta**2.0) * (2*pi*self.mode_frequencies)**2.0)
#numpy uses weird normalization
# undo_normalization = len(self.mode_frequencies ** 2.0)
return 1/np.sqrt(p1+p2)
# def dU_dx(U, t, E_field, charge):
# # Here U is a vector such that y=U[0] and z=U[1]. This function should return [y', z']
# k = *8.9e9
# return [U[1], -2*U[1] - 2*U[0] + E_field*charge]
def standard_precursor_burst():
input_frequency = 1000.0e6
sampling_frequency = 1000.0*input_frequency
frequency_scale_factor = 1
times = np.linspace(0, 100*1.0/input_frequency, 100*1000)
# times = np.tile(times, 5)
input_data = np.zeros_like(times, dtype=np.float64)
input_data[:5000] = np.sin(2.0*pi*times*input_frequency)[0:5000]
class test_fourier(unittest.TestCase):
# def test_sine_fourier(self):
#
# sampling_frequency = 1000000
# input_frequency = 1000.0
# frequency_scale_factor = 1.0e6
# times = np.linspace(0, 1.0, sampling_frequency)
# input_data = np.zeros_like(times, dtype=np.float32)
# input_data = np.sin(2.0*pi*times*1000.0)
#
# p = propagator()
# p.fourier_transform(input_data, sampling_frequency, frequency_scale_factor)
#
# # plt.plot(p.mode_frequencies,p.a_n_coefficients)
# # plt.show()
# print(p.a_n_coefficients)
# print(p.mode_frequencies)
# print(times[-1])
def test_fourier_reconstruction(self):
sampling_frequency = 500000.0
input_frequency = 1000.0
frequency_scale_factor = 1
times = np.linspace(0, 1.0, int(sampling_frequency), dtype=np.float64)
times = np.tile(times, 5)
input_data = np.zeros_like(times, dtype=np.float64)
#input_data[:500] = np.sin(2.0*pi*times*input_frequency)[0:500]
#input_data = np.tile(input_data, 2)
input_data = np.sin(2.0*pi*times*input_frequency)
# filename = 'globalsave.pkl'
# try:
# dill.load_session(filename)
# except:
# os.system("rm dumps/*")
# os.system("rm data/*")
# voltages, currents, currents_2 = sim_VSWR(pcb)
# dill.dump_session(filename)
#plt.plot(input_data)
#plt.show()
p = propagator()
p.fourier_transform(input_data, sampling_frequency, frequency_scale_factor)
depth_in_tissue = 0.2
# p.populate_tissue_properties(48)
# spatial_phase = p.wavenumbers() * depth_in_tissue
spatial_phase = np.zeros_like(p.mode_frequencies,dtype=np.float64)
#p.attenuate(depth_in_tissue)
output = p.fourier_sum(spatial_phase)
#assert(output - input == 0)
# plt.plot(p.times, output)
# plt.plot(p.times, input_data)
#Damping ratio: dimensionless number
#Damping constant:
osc_freq = 1000.0
# k = 1e-7
# charge = 1e7 * k
#turn the E field into a force
# p.a_n_coefficients *= charge
# p.b_n_coefficients *= charge
Q_factor = 100.0
# damping_force_coefficient = #N s/m
# mass = 120 * 1.66e-21 #kg
effective_mass = 120 * 1.66e-21 #kg #fix!
# beta = damping_force_coefficient/(2*effective_mass)
# beta = (osc_freq * 2*pi) / (2.0*Q_factor)
beta = 0.00000000000000001
# beta =/
# analytic_oscillator_scaling
print(p.analytic_oscillator_scaling(0.1, 1.0))
#beta is an exponential time constant, units /s.
print(p.mode_frequencies)
scale_factor = p.analytic_oscillator_scaling(beta, osc_freq)
print(scale_factor)
# plt.plot(p.mode_frequencies, p.a_n_coefficients)
plt.plot(p.mode_frequencies, scale_factor*p.a_n_coefficients)
plt.show()
#
p.a_n_coefficients *= scale_factor
p.b_n_coefficients *= scale_factor
osc_phase = p.analytic_oscillator_phase(beta, osc_freq)
oscillator_output = p.fourier_sum(spatial_phase + osc_freq)
#
# # oscillator_output /= 1e-12
#
plt.plot(p.times, oscillator_output)
plt.show()
if __name__ == '__main__':
unittest.main()
```
#### File: subprojects/dispersive_propagation_brillouin_precursors/propagation_simple_sawtooth.py
```python
from math import sin, cos, pi, sqrt, e, log, isclose, exp
from scipy.optimize import curve_fit
from scipy.signal import sawtooth
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors, cm, pyplot as plt
from scipy.constants import epsilon_0, mu_0
import scipy.constants
from fdtd_PCB_extensions.tissue import cole_cole_4, complex_permittivity_to_er_and_sigma, electric_field_penetration_depth
from mpl_toolkits.mplot3d import Axes3D
from scipy.optimize import minimize, basinhopping
from fdtd_PCB_extensions.tissue import cole_cole_4, get_tissue_cole_cole_coefficients
import pickle
import dill
import sys
from fdtd_PCB_extensions import X,Y,Z, normalized_gaussian_pulse, normalized_gaussian_derivative_pulse
import os
import sys
sys.path.append('/home/arthurdent/covidinator/electronics/')
import store
# rm pretty_output_*.csv && python propagation_simple_sawtooth.py && gnuplot plot_spectrum.plt
muscle_id = 48
lung_id = 41
#cole-cole refractive index
ef, sigma, deltas, alphas, taus = get_tissue_cole_cole_coefficients(muscle_id)
c0 = 3e8
# '''
# See "Compilation of the dielectric properties of body tissues at RF and microwave frequencies.", Gabriel 1996
# Equation 3, page 12.
# '''
def normalized_gaussian_pulse(t,fwhm):
#todo: replace the versions in sources.py with this class-agnostic and np version.
sigma = fwhm/2.355
return np.exp(-((t**2.0)/(2.0*(sigma**2.0))))
def propagate(F, n, omega, z, oscillator=True):
frequency_domain = np.fft.fft(F)
#filter
# frequency_domain[(omega/(2.0*pi)) < 1e9] = 0
frequency_domain[(omega/(2.0*pi)) > 300e9] = 0
propagated = frequency_domain * np.exp(-1j*(omega/c0)*n*z)
# watch the sign here - fix if needed
return np.fft.ifft(propagated)
sf = 30e9 * 2 * pi
duration = 30e-10
samples = int(np.ceil(duration * sf * 2.0 * 5.0))
times = np.linspace(-duration,duration,samples)
F=np.ones(samples)
omega = 2*pi*np.fft.fftfreq(samples)*(samples/(duration*2))
print(f"Samples: {samples} | < 1e10: {np.count_nonzero(omega < 1e10)}")
x = [sf]
s = np.array(x)
correction_speed = np.sqrt(cole_cole_4(s/(2.0*pi), ef, sigma, deltas, alphas, taus))*c0
n = np.sqrt(cole_cole_4(omega/(2.0*pi), ef, sigma, deltas, alphas, taus)) #precompute cole-cole refractive index
n[omega == 0] = 1
# F[len(F)//2:-1] = 0
# F = sawtooth(times*f)
# output = propagate(F, omega, z)
os.system("rm pretty_output_*.csv")
#"method": "Nelder-Mead",
# output = basinhopping(cost_function, F, minimizer_kwargs={"args":(omega, z)}, disp=True)['x']
# output = F
depths = 30
max_depth = 0.08
# snippet_samples = samples // 2 # samples in the middle
beginning_samples = 0
end_samples = samples
snippet_samples = end_samples - beginning_samples
bigtable = np.zeros(((depths*snippet_samples), 3))
plot_indexes = [29]
angular_f = 10e9 * 2 * pi
ylim = 0.02
# F = np.sin(times*f)
F = normalized_gaussian_pulse(times,1/(2*pi*10e9))
for idx,z in enumerate(np.linspace(0, max_depth, depths)):
output = propagate(F, n, omega, z)
if(idx == 0):
plt.subplot(2,4,1)
plt.plot(times, output,'r')
if(idx in plot_indexes):
plt.subplot(2,4,2)
plt.ylim((-ylim,ylim))
plt.plot(times, output,'r')
bigtable[(idx*snippet_samples):((idx+1)*snippet_samples),0] = times[beginning_samples:end_samples]
bigtable[(idx*snippet_samples):((idx+1)*snippet_samples),1] = z
bigtable[(idx*snippet_samples):((idx+1)*snippet_samples),2] = output[beginning_samples:end_samples]
with open("pretty_output_1.csv", "ab") as fi:
np.savetxt(fi, bigtable[(idx*snippet_samples):((idx+1)*snippet_samples),:], delimiter=",")
fi.write(b"\n\n")
F[:] = 0
F[200*3:800*3] = sawtooth(times[200*3:800*3] * angular_f)
for idx,z in enumerate(np.linspace(0, max_depth, depths)):
output = propagate(F, n, omega, z)
if(idx == 0):
plt.subplot(2,4,3)
plt.plot(times, output,'g')
if(idx in plot_indexes):
plt.subplot(2,4,4)
plt.ylim((-ylim,ylim))
plt.plot(times, output,'g')
bigtable[(idx*snippet_samples):((idx+1)*snippet_samples),0] = times[beginning_samples:end_samples]
bigtable[(idx*snippet_samples):((idx+1)*snippet_samples),1] = z
bigtable[(idx*snippet_samples):((idx+1)*snippet_samples),2] = output[beginning_samples:end_samples]
with open("pretty_output_2.csv", "ab") as fi:
np.savetxt(fi, bigtable[(idx*snippet_samples):((idx+1)*snippet_samples),:], delimiter=",")
fi.write(b"\n\n")
F[:] = 0
F[200*3:800*3] = np.sin(times[200*3:800*3]*angular_f)
# F[200:400] = -np.sin(times[200:400]* 1e9 * 2 * pi)
for idx,z in enumerate(np.linspace(0, max_depth, depths)):
output = propagate(F, n, omega, z)
if(idx == 0):
plt.subplot(2,4,5)
plt.plot(times, output,'b')
if(idx in plot_indexes):
plt.subplot(2,4,6)
plt.ylim((-ylim,ylim))
plt.plot(times, output,'b')
bigtable[(idx*snippet_samples):((idx+1)*snippet_samples),0] = times[beginning_samples:end_samples]
bigtable[(idx*snippet_samples):((idx+1)*snippet_samples),1] = z
bigtable[(idx*snippet_samples):((idx+1)*snippet_samples),2] = output[beginning_samples:end_samples]
with open("pretty_output_3.csv", "ab") as fi:
np.savetxt(fi, bigtable[(idx*snippet_samples):((idx+1)*snippet_samples),:], delimiter=",")
fi.write(b"\n\n")
F[:] = 0
F = np.sin(times*angular_f)
for idx,z in enumerate(np.linspace(0, max_depth, depths)):
output = propagate(F, n, omega, z)
if(idx == 0):
plt.subplot(2,4,7)
plt.plot(times, output,'y')
if(idx in plot_indexes):
plt.subplot(2,4,8)
plt.ylim((-ylim,ylim))
plt.plot(times, output,'y')
bigtable[(idx*snippet_samples):((idx+1)*snippet_samples),0] = times[beginning_samples:end_samples]
bigtable[(idx*snippet_samples):((idx+1)*snippet_samples),1] = z
bigtable[(idx*snippet_samples):((idx+1)*snippet_samples),2] = output[beginning_samples:end_samples]
with open("pretty_output_4.csv", "ab") as fi:
np.savetxt(fi, bigtable[(idx*snippet_samples):((idx+1)*snippet_samples),:], delimiter=",")
fi.write(b"\n\n")
plt.draw()
plt.pause(0.001)
plt.savefig("propagated_waveforms_detail.svg")
os.system("gnuplot plot_spectrum.plt")
os.system("eog propagated_waveforms.svg")
files = ['propagation_simple_sawtooth.py', "propagated_waveforms.svg"]
store.ask(files)
```
#### File: molecular_dynamics/james/james.py
```python
import numpy as np
forces = np.zeros();
potentials = np.zeros();
#
#
# import unittest
#
# class TestStringMethods(unittest.TestCase):
#
# def test_upper(self):
# self.assertEqual('foo'.upper(), 'FOO')
``` |
{
"source": "0xDBFB7/hargrave",
"score": 3
} |
#### File: hargrave_language_bindings/python/hargrave.py
```python
def save_file():
# what if the program exits early? we want to save the output log...
# need some kind of finally check?
files = {'upload_file': open('file.txt','rb')}
values = {'DB': 'photcat', 'OUT': 'csv', 'SHORT': 'short'}
r = requests.post(url, files=files, data=values)
```
#### File: hargrave/hargrave_types/time.py
```python
from hargrave_base.database.db import *
class time:
def html():
pass
def edit_html():
pass
def backend():
pass
def db_columns():
Column('id', DateTime, primary_key=True),
*(Column(wordCol, Unicode(255)) for wordCol in wordColumns)
def __init__():
# register_hook
pass
``` |
{
"source": "0xDBFB7/ionprinter",
"score": 3
} |
#### File: simulation/DFTBA/AGM_elliptic.py
```python
MEAN_TOLERANCE = 1.e-10
def ag_mean(a,b,tolerance):
'''
Computes arithmetic-geometric mean of A and B
https://scipython.com/book/chapter-2-the-core-python-language-i/questions/the-arithmetic-geometric-mean/
'''
while abs(a-b) > tolerance:
a, b = (a + b) / 2.0, math.sqrt(a * b)
return b
def first_kind_elliptic_integral(k):
'''
Calculate K(k) - okay?
https://dlmf.nist.gov/19
§19.8(i) Gauss’s Arithmetic-Geometric Mean (AGM)
'''
return (math.pi/2.0)/ag_mean(1,math.sqrt(1.0-(k**2.0)), MEAN_TOLERANCE)
def compute_big_M(m):
'''
Prerequisite for E(m)
'''
return (math.pi/(2.0*first_kind_elliptic_integral(m)*math.sqrt(1-m**2.0)))
def compute_delta(m):
'''
Prerequisite for E(m)
'''
return (1.0/math.sqrt(1.0-(m**2.0)))
def differentiate_delta_over_M(m):
'''
Another prerequisite for E(m)
Numerically differentiate the quotient delta/M
'''
dm=1.0e-6
return (((compute_delta(m+dm)/compute_big_M(m+dm))-(compute_delta(m)/compute_big_M(m)))/dm)
def second_kind_elliptic_integral(m):
'''
Calculate E(m)inem
E says blow, we increase emittance
receive remittance
percieve:
society's been enhanced
oh we got them in a trance
science's now advanced
in common parlance:
dance
From @ kurzweg2012
Efficiency could be improved by eliminating duplicate calls of big_M and compute_delta(m)
Currently slightly broken.
'''
return (math.pi/2.0)/((1.0/(compute_big_M(m)*compute_delta(m))) + (m*differentiate_delta_over_M(m)))
class TestAll(unittest.TestCase):
def test_ag_mean(self):
self.assertAlmostEqual(ag_mean(24,6,1e-7), 13.458, places=3)
def test_first_kind_elliptic_integral(self):
self.assertAlmostEqual(first_kind_elliptic_integral(0.5), 1.685751, places=3)
self.assertAlmostEqual(first_kind_elliptic_integral(0), 1.57079632, places=3)
def test_big_M(self):
self.assertAlmostEqual(compute_big_M(0.5), 1.07595, places=3)
def test_differentiate_delta_over_M(self):
self.assertAlmostEqual(differentiate_delta_over_M(0), 1.6, places=3)
self.assertAlmostEqual(differentiate_delta_over_M(0.9), 1.6, places=3)
def test_second_kind_elliptic_integral(self):
self.assertAlmostEqual(second_kind_elliptic_integral(0), 1.5707963, places=3)
self.assertAlmostEqual(second_kind_elliptic_integral(0.9), 1.15, places=3)
```
#### File: simulation/resistance_slicer/resistance.py
```python
import os
import numpy as np
import numpy.linalg as la
import itertools
import utils
import ply
import scipy.spatial.distance as spdist
import stl
import meshcut
from shapely.geometry import Polygon, Point, LineString
import sys
import mayavi.mlab as mlab
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
##
def merge_close_vertices(verts, faces, close_epsilon=1e-5):
"""
Will merge vertices that are closer than close_epsilon.
Warning, this has a O(n^2) memory usage because we compute the full
vert-to-vert distance matrix. If you have a large mesh, might want
to use some kind of spatial search structure like an octree or some fancy
hashing scheme
Returns: new_verts, new_faces
"""
# Pairwise distance between verts
D = spdist.cdist(verts, verts)
# Compute a mapping from old to new : for each input vert, store the index
# of the new vert it will be merged into
close_epsilon = 1e-5
old2new = np.zeros(D.shape[0], dtype=np.int)
# A mask indicating if a vertex has already been merged into another
merged_verts = np.zeros(D.shape[0], dtype=np.bool)
new_verts = []
for i in range(D.shape[0]):
if merged_verts[i]:
continue
else:
# The vertices that will be merged into this one
merged = np.flatnonzero(D[i, :] < close_epsilon)
old2new[merged] = len(new_verts)
new_verts.append(verts[i])
merged_verts[merged] = True
new_verts = np.array(new_verts)
# Recompute face indices to index in new_verts
new_faces = np.zeros((len(faces), 3), dtype=np.int)
for i, f in enumerate(faces):
new_faces[i] = (old2new[f[0]], old2new[f[1]], old2new[f[2]])
# again, plot with utils.trimesh3d(new_verts, new_faces)
return new_verts, new_faces
def load_stl(stl_fname):
m = stl.mesh.Mesh.from_file(stl_fname)
# Flatten our vert array to Nx3 and generate corresponding faces array
verts = m.vectors.reshape(-1, 3)
faces = np.arange(len(verts)).reshape(-1, 3)
verts, faces = merge_close_vertices(verts, faces)
return verts, faces
verts, faces = load_stl(sys.argv[1])
mesh = meshcut.TriangleMesh(verts, faces)
for x_slice in np.arange(0,10,0.1):
plane_orig = (x_slice, 0, 0)
plane_norm = (x_slice, 1, 0)
plane = meshcut.Plane(plane_orig, plane_norm)
plane_points = meshcut.cross_section_mesh(mesh, plane)
print(LineString(plane_points))
print(plane_points)
``` |
{
"source": "0xDBFB7/Nyion",
"score": 3
} |
#### File: Nyion/benchmarking/bastardized.py
```python
import numpy
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import math
SIZE_X = 32
SIZE_Y = 32
SIZE_Z = 32
u = numpy.zeros((SIZE_X, SIZE_Y, SIZE_Z))
b = numpy.zeros((SIZE_X, SIZE_Y, SIZE_Z))
f = numpy.zeros((SIZE_X, SIZE_Y, SIZE_Z))
for x in range(10,20):
for y in range(10,20):
u[x,y,8] = 100
b[x,y,8] = 1
def gauss_seidel(U,B,F,theta):
rows = U.shape[0]
cols = U.shape[1]
z_size = U.shape[2]
for x in range(theta, (rows - theta)-1,theta):
for y in range(theta, (cols - theta)-1,theta):
for z in range(theta, (z_size - theta)-1,theta):
if(not B[x,y,z]):
U[x,y,z] = (U[x+theta,y,z] +
U[x-theta,y,z] +
U[x,y+theta,z] +
U[x,y-theta,z] +
U[x,y,z+theta] +
U[x,y,z-theta] + F[x,y,z])/6.0
def jacobi(U,T,B,F,theta):
rows = U.shape[0]
cols = U.shape[1]
z_size = U.shape[2]
for x in range(theta, (rows - theta)-1,theta):
for y in range(theta, (cols - theta)-1,theta):
for z in range(theta, (z_size - theta)-1,theta):
if(not B[x,y,z]):
T[x,y,z] = (U[x+theta,y,z] +
U[x-theta,y,z] +
U[x,y+theta,z] +
U[x,y-theta,z] +
U[x,y,z+theta] +
U[x,y,z-theta] +
U[x,y-theta,z-theta] +
U[x,y+theta,z+theta] + F[x,y,z])/8.0
def restriction(X, theta):
rows = X.shape[0]
cols = X.shape[1]
z_size = X.shape[2]
for x in range(theta, (rows - theta),theta):
for y in range(theta, (cols - theta),theta):
for z in range(theta, (z_size - theta),theta):
sum = 0
for i in range(0,theta):
for j in range(0,theta):
for k in range(0,theta):
sum += X[x+i,y+j,z+k]
if(not b[x,y,z]):
X[x,y,z] = sum
# def residual(U,R,F):
# rows = U.shape[0]
# cols = U.shape[1]
# z_size = U.shape[2]
# theta = 1
# for x in range(theta, (rows - theta),theta):
# for y in range(theta, (cols - theta),theta):
# for z in range(theta, (z_size - theta),theta):
# if(not b[x,y,z]):
# R[x,y,z] = F[x,y,z] + (U[x+1,y,z] + U[x-1,y,z] + U[x,y+1,z] + U[x,y-1,z] + U[x,y,z+1] + U[x,y,z-1] - 6.0*U[x,y,z])
def prolongate(X, theta):
rows = X.shape[0]
cols = X.shape[1]
z_size = X.shape[2]
for x in range(theta, (rows - theta),theta):
for y in range(theta, (cols - theta),theta):
for z in range(theta, (z_size - theta),theta):
V000 = X[x,y,z]
V001 = X[x,y,z+theta]
V010 = X[x,y+theta,z]
V100 = X[x+theta,y,z]
V101 = X[x+theta,y,z+theta]
V110 = X[x+theta,y+theta,z]
V111 = X[x+theta,y+theta,z+theta]
for i in range(0,theta):
for j in range(0,theta):
for k in range(0,theta):
if(not b[x+i,y+j,z+k]):
f_x = float(i)/theta
f_y = float(j)/theta
f_z = float(k)/theta
X[x+i,y+j,z+k] = 0
X[x+i,y+j,z+k] += V000*(1.0-f_x)*(1.0-f_y)*(1.0-f_z)
X[x+i,y+j,z+k] += V001*(1.0-f_x)*(1.0-f_y)*(f_z)
X[x+i,y+j,z+k] += V010*(1.0-f_x)*(f_y)*(1.0-f_z)
X[x+i,y+j,z+k] += V100*(f_x)*(1.0-f_y)*(1.0-f_z)
X[x+i,y+j,z+k] += V101*(f_x)*(1.0-f_y)*(f_z)
X[x+i,y+j,z+k] += V110*(f_x)*(f_y)*(1.0-f_z)
X[x+i,y+j,z+k] += V111*(f_x)*(f_y)*(f_z)
# Precondition.
# for i in range(0,10):
# gauss_seidel(u,b,1)
convergence = []
ims = []
t = 0
c1 = 1
pos = 15
while True:
for i in range(0,10):
T1 = u.copy()
jacobi(u,T1,b,f,1)
u = T1.copy()
# Step 1: Residual Calculation.
v1 = u.copy()
T1 = u.copy()
jacobi(u,T1,b,f,1)
u = T1.copy()
# u=T.copy()
r = u - v1
# r = numpy.zeros((SIZE_X, SIZE_Y, SIZE_Z))
# residual(u,r,f)
# Step 2: Restriction.
res = [32,16,8,4,2,1]
v=0
T=0
T = numpy.zeros((SIZE_X, SIZE_Y, SIZE_Z))
v = numpy.zeros((SIZE_X, SIZE_Y, SIZE_Z))
for level in range(0,len(res),1):
resolution = res[level]
print(resolution)
r1 = r.copy()
# plt.subplot(4, 1, 1)
# plt.gca().set_title('Original')
# plt.imshow(r1[:,:,8])
if(res != 1):
restriction(r1,2*resolution)
# plt.subplot(4, 1, 2)
# plt.gca().set_title('Restricted')
# plt.imshow(r1[:,:,8])
for i in range(0,3*int(math.sqrt(res[level]))):
jacobi(v,T,numpy.zeros_like(v),r1,2*resolution)
v=T.copy()
# plt.subplot(4, 1, 3)
# plt.gca().set_title('Smoothed')
# plt.imshow(v[:,:,8])
if(res != 1):
prolongate(v,2*resolution)
# plt.subplot(4, 1, 4)
# plt.gca().set_title('Prolongated')
# plt.imshow(v[:,:,8])
# plt.draw()
# plt.pause(0.001)
# plt.savefig(str(t) + '.png')
#
u = u + (v * (1.0-b))
convergence.append(numpy.linalg.norm(r))
#
plt.subplot(2, 2, 1)
plt.gca().set_title('Potentials')
plt.imshow(u[:,:,8])
plt.subplot(2, 2, 2)
plt.gca().set_title('Residual')
plt.imshow(r[:,:,8])
plt.subplot(2, 2, 3)
plt.gca().set_title('Correction')
plt.imshow(v[:,:,8])
plt.subplot(2, 2, 4)
plt.yscale('log')
plt.gca().set_title('Convergence')
plt.plot(convergence)
t+=1
print("Residual: {} convergence factor: {} Step: {}".format(numpy.linalg.norm(r),numpy.linalg.norm(r)/c1,t))
c1 = numpy.linalg.norm(r)
if(numpy.linalg.norm(r) < 1e-3):
f[25,pos,8] = 10
pos+=1
#
plt.cla()
plt.draw()
plt.pause(0.001)
```
#### File: Nyion/benchmarking/test6.py
```python
import numpy
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import math
SIZE_X = 128
# epsilon_0 = 8.85×10^−12
u = numpy.zeros((SIZE_X))
b = numpy.zeros((SIZE_X))
# for x in range(10,20):
# u[x] = 1.0
# b[x] = 1.0
u[64] = 1.0
b[64] = 1.0
b[0] = 1.0
b[SIZE_X-1] = 1.0
def residual(u,b,f):
rows = u.shape[0]
h = 1.0/(rows-1)
h2 = 1.0/(h*h)
R = numpy.zeros(rows)
for x in range(0,rows-1): #careful with the edge boundaries; they must be included!
if(b[x] == 0):
R[x] = f[x] - (u[x+1] + u[x-1] - 2.0*u[x])
else:
R[x] = 0
return R
# void resid(double **res, double **u, double **rhs, int n)
# Returns minus the residual for the model problem. Input quantities are u[1..n][1..n] and
# rhs[1..n][1..n], while res[1..n][1..n] is returned.
# {
# int i,j;
# double h,h2i;
# h=1.0/(n-1);
# h2i=1.0/(h*h);
# for (j=2;j<n;j++) Interior points.
# for (i=2;i<n;i++)
# res[i][j] = -h2i*(u[i+1][j]+u[i-1][j]+u[i][j+1]+u[i][j-1]- 4.0*u[i][j])+rhs[i][j];
# for (i=1;i<=n;i++) Boundary points.
# res[i][1]=res[i][n]=res[1][i]=res[n][i]=0.0;
# }
# def residual(u, f, b):
# """
# f - A u
# """
# n = len(f)
# r = numpy.zeros(len(u))
# r[1:-1] = f[1:-1] - ((n+1)**2) * (2 * u[1:-1] - u[2:] - u[:-2])
# r[0] = f[0] - ((n+1)**2) * (2 * u[0] - u[1])
# r[-1] = f[-1] - ((n+1)**2) * (2 * u[-1] - u[-2])
# return r
def restriction(X):
rows = X.shape[0]
O = numpy.zeros((int(rows/2)))
for x in range(1,int(rows/2)-2):
O[x] = 0.25*(X[x*2-1] + X[x*2] + X[x*2+1])
return O
def prolongate(X):
rows = X.shape[0]
O = numpy.zeros((rows*2))
for x in range(0,rows-1):
O[2*x] = X[x]
O[2*x+1] = (X[x]+X[x+1])/2.0
return O
def jacobi(u,b,f):
T = u.copy()
for x in range(1, (u.shape[0] - 2)):
if(b[x] == 0):
T[x] = (u[x+1] + u[x-1] + f[x])/2.0
return T
def V_cycle(phi,b,f,h):
u = phi.copy()
u1 = phi.copy()
u = jacobi(u,b,f)
r = -residual(u,b,f)
r1 = restriction(r)
v = numpy.zeros_like(r1) #correction
# b1 = restriction(
b1 = numpy.zeros_like(r1)
b1[0] = 1.0
b1[b1.shape[0]-1] = 1.0
print(numpy.linalg.norm(r),h)
if(h == 32):
for i in range(0,100):
v = jacobi(v,b1,r1)
else:
v = V_cycle(v,b1,r1,2*h)
v1 = prolongate(v)
#
# plt.figure()
# plt.subplot(2, 3, 1)
# plt.gca().set_title('Potentials')
# plt.plot(u)
# plt.subplot(2, 3, 2)
# plt.gca().set_title('Root residual')
# plt.plot(r)
#
# plt.subplot(2, 3, 3)
# plt.gca().set_title('Restricted residual')
# plt.plot(r1)
# plt.plot(b1)
# plt.subplot(2, 3, 4)
# plt.gca().set_title('Correction')
# plt.plot(v)
# plt.plot(b1)
# plt.subplot(2, 3, 5)
# plt.gca().set_title('Prolongated correction')
# plt.plot(v1)
#
# plt.draw()
# plt.pause(0.1)
u += v1
return u
# f = numpy.zeros_like(u) #no space charge
# u = V_cycle(u,b,f,1)
# f = numpy.zeros_like(u) #no space charge
# u = V_cycle(u,b,f,1)
# input()
prev_r = 1
while True:
u1 = u.copy()
f = numpy.zeros_like(u) #no space charge
u = V_cycle(u,b,f,1)
r = u - u1.copy()
prev_r = numpy.linalg.norm(r)
# input()
#
#
# prev_r = numpy.linalg.norm(r)
#
#
# plt.subplot(2, 3, 1)
# plt.gca().set_title('Potentials')
# plt.plot(u)
# plt.subplot(2, 3, 2)
# plt.gca().set_title('Root residual')
# plt.plot(r)
#
#
# r1 = restriction(r)
# v = numpy.zeros_like(r1) #correction
# b1 = restriction(b) #no boundaries on inner levels?
# gauss_seidel(v,b1,r1)
# gauss_seidel(v,b1,r1)
#
# v1 = prolongate(v)
#
# plt.subplot(2, 3, 3)
# plt.gca().set_title('Restricted residual')
# plt.plot(r1)
# plt.plot(b1)
# plt.subplot(2, 3, 4)
# plt.gca().set_title('Correction')
# plt.plot(v)
# plt.plot(b1)
# plt.subplot(2, 3, 5)
# plt.gca().set_title('Prolongated correction')
# plt.plot(v1)
#
#
# u += v1*(1.0-b)
#
# plt.draw()
# plt.pause(0.1)
# plt.clf()
# plt.cla()
```
#### File: Nyion/benchmarking/test7.py
```python
import numpy
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import math
SIZE_X = 128
u = numpy.zeros(SIZE_X)
b = numpy.zeros(SIZE_X)
for x in range(40,50):
u[x] = 1
b[x] = 1
# b[8,8] = 1
def gauss_seidel(U,b,theta):
rows = U.shape[0]
cols = U.shape[1]
for x in range(theta, (rows - theta),theta):
for y in range(theta, (cols - theta),theta):
U[x,y] = (U[x+theta,y] +
U[x-theta,y] +
U[x,y+theta] +
U[x,y-theta] + b[x,y])/4.0
def gauss_seidel(U,b,theta):
rows = U.shape[0]
cols = U.shape[1]
for x in range(theta, (rows - theta),theta):
for y in range(theta, (cols - theta),theta):
U[x,y] = (U[x+theta,y] +
U[x-theta,y] +
U[x,y+theta] +
U[x,y-theta] + b[x,y])/4.0
def restriction(X):
rows = X.shape[0]
cols = X.shape[1]
O = numpy.zeros((int(rows/2), int(cols/2)))
for x in range(0,int(rows/2)-1):
for y in range(0,int(cols/2)-1):
O[x,y] = X[x*2,y*2]
return O
def prolongate(X):
rows = X.shape[0]
cols = X.shape[1]
O = numpy.zeros((rows*2, cols*2))
for x in range(0,rows-1):
for y in range(0,cols-1):
O[2*x,2*y] = X[x,y]
O[2*x+1,2*y] = 0.5*(X[x,y]+X[x+1,y])
O[2*x,2*y+1] = 0.5*(X[x,y]+X[x,y+1])
O[2*x+1,2*y+1] = 0.25*(X[x,y]+X[x,y+1]+X[x+1,y]+X[x+1,y+1])
# for i in range(0,2):
# for j in range(0,2):
return O
# it's not the root residual you idiot
# it's each residual recursively
# def V_Cycle(u,f,h):
# phi = u.copy()
#
# v1 = phi.copy()
# gauss_seidel(phi,f,1)
# r = phi - v1
#
# rhs = restriction(r)
# #
# eps = numpy.zeros_like(rhs)
# #
# if(h == 32):
# for i in range(0,10):
# gauss_seidel(eps,rhs,1)
# else:
# eps = V_Cycle(eps,rhs,2*h)
#
# T = prolongate(eps)
#
# if(h==1):
# print("Residual: {} Step: {}".format(numpy.linalg.norm(r),h))
# plt.subplot(2, 3, 1)
# plt.gca().set_title('Potentials')
# plt.imshow(phi)
#
# plt.subplot(2, 3, 2)
# plt.gca().set_title('Residuals')
# plt.imshow(r)
#
# plt.subplot(2, 3, 3)
# plt.gca().set_title('Restricted')
# plt.imshow(rhs)
#
# plt.subplot(2, 3, 4)
# plt.gca().set_title('Correction')
# plt.imshow(eps)
#
# plt.subplot(2, 3, 5)
# plt.gca().set_title('Prolongated')
# plt.imshow(T)
#
# plt.draw()
# plt.pause(0.1)
# plt.clf()
# plt.cla()
# return phi + T
convergence = []
ims = []
t = 0
c1 = 1
while True:
# u = restriction(b)
# u = restriction(u)
#
# plt.subplot(2, 2, 1)
# plt.gca().set_title('Potentials')
# plt.imshow(b)
#
# plt.subplot(2, 2, 2)
# plt.gca().set_title('Potentials')
# plt.imshow(u)
#
# T = prolongate(u)
#
# plt.subplot(2, 2, 3)
# plt.gca().set_title('Potentials')
# plt.imshow(T)
u = V_Cycle(u,b,1)
#
#
# convergence.append(numpy.linalg.norm(r))
#
#
# u = u + v
#
#
# plt.subplot(2, 2, 2)
# plt.gca().set_title('Residual')
# plt.imshow(r)
# plt.subplot(2, 2, 3)
# plt.gca().set_title('Correction')
# plt.imshow(v)
# plt.subplot(2, 2, 4)
# plt.yscale('log')
# plt.gca().set_title('Convergence')
# plt.plot(convergence)
# # plt.savefig(str(t) + '.png')
# t+=1
# print("Residual: {} convergence factor: {} Step: {}".format(numpy.linalg.norm(r),numpy.linalg.norm(r)/c1,t))
# c1 = numpy.linalg.norm(r)
#
# plt.close()
```
#### File: Nyion/benchmarking/unigrid.py
```python
import numpy
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import math
from itertools import cycle
SIZE_X = 128
I1 = 128
J1 = 128
H = 1/8##?
U = numpy.zeros((I1,J1))
F = numpy.zeros((I1,J1))
B = numpy.zeros((I1,J1))
for I in range(0,I1):
for J in range(0,J1):
# U[I,J] = 10
# U[I+15,J] = -10
# #
# B[I,J] = 1
# B[I+15,J] = 1
U[I,J] = math.cos(3.0*(I+J-2)*H)
for I in range(32,48):
for J in range(32,48):
U[I,J] = 1
B[I,J] = 1
def FND(I3, J3):
return (M1-math.fabs(I-I3))*(M1-math.fabs(J-J3))/(M1*M1)
N=5
prev_E = 1
while True:
#
for root_level in range(0,N+1):
for k in (list(range(0,root_level)) + list(range(root_level,0,-1))): #levels
M1 = 2**(N-k)
print(M1)
for relaxes in range(0,2): #Number of relaxations; 1 usually suffices
E=0
T = U.copy()
for I in range(M1,I1-M1,M1):
for J in range(M1,J1-M1,M1):
submesh_relaxes = 1 #
if(B[I-M1:I+M1,J-M1:J+M1].max() and B[I-M1:I+M1,J-M1:J+M1].sum() != 9 and k < 2): #if there's a boundary nearby, pre-smooth.
for d in range(0,10):
for I3 in range(I-M1+1,I+M1): #fix ranges
for J3 in range(J-M1+1,J+M1):
if(not B[I3,J3]):
U[I3,J3] = ((U[I3,J3-1] + U[I3,J3+1] + U[I3-1,J3] + U[I3+1,J3] + F[I3,J3]) / 4.0)
for d in range(0,submesh_relaxes):
A1=0
R1=0
for I3 in range(I-M1+1,I+M1): #fix ranges
for J3 in range(J-M1+1,J+M1):
D = 4
F[I3,J3] = 0
R = (D*U[I3,J3]) - U[I3,J3-1] - U[I3,J3+1] - U[I3-1,J3] - U[I3+1,J3]
R -= F[I3,J3] #compute residual
A3 = D*FND(I3,J3) - FND(I3,J3+1) - FND(I3,J3-1) - FND(I3+1,J3) - FND(I3-1,J3)
if(not B[I3,J3]):
R1 = R1 + FND(I3,J3)*R
A1 = A1 + FND(I3,J3)*A3
S=R1/A1
E=E+R1*R1
for I3 in range(I-M1+1,I+M1):
for J3 in range(J-M1+1,J+M1):
if(not B[I3,J3]):
T[I3,J3] = U[I3,J3] - 0.8*S*FND(I3,J3)
numpy.copyto(U,T)
E=math.sqrt(E)/M1/H
print(E)
# #FND COMPUTES THE UNIGRID DIRECTIONS
T = U.copy()
for I3 in range(1,I1-1): #fix ranges
for J3 in range(1,J1-1):
if(not B[I3,J3]):
U[I3,J3] = ((U[I3,J3-1] + U[I3,J3+1] + U[I3-1,J3] + U[I3+1,J3] + F[I3,J3]) / 4.0)
T = U - T
plt.subplot(2, 3, 2)
plt.gca().set_title('Potentials')
plt.imshow(U)
plt.subplot(2, 3, 3)
plt.gca().set_title('Boundaries')
plt.imshow(B)
print("Converge: {}".format(E/prev_E))
print("a: {}".format(numpy.linalg.norm(T)))
prev_E = E
#
plt.draw()
plt.pause(0.001)
plt.cla()
```
#### File: SimpleElectroSim/python/electro_sim.py
```python
from shapely.geometry import Point
import numpy as np
import random
import math
from decimal import *
from time import sleep
import copy
try:
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
except:
print "OpenGL wrapper for python not found"
from stl import mesh
#The grid is m^2. Force is in newtons. Charge is Coloumbs.
data_file = open("data.csv","w+")
#display values
timestep = 0.0000001
pixel_size = 10
pixel_spacing = 20
grid_width = 1000
rotation_x_speed = 0
rotation_y_speed = 10
rotation_y = 0
rotation_x = 0
#Physical constants
k_constant = 8990000000.0
electron_charge = 0.000000000000016021766
electron_mass = 0.0000000000000000000000000016726218
aluminum_mass = 0.00000000000000000000000004483263
time = 0
particles = []
glutInit();
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH);
glEnable(GL_MULTISAMPLE);
glutInitWindowSize(1920, 1080);
glutCreateWindow("Solver");
glViewport(0, 0, 1920, 1080);
glClearColor(1.0, 1.0, 1.0, 1.0);
glClearDepth(1.0);
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LEQUAL);
glShadeModel(GL_SMOOTH);
glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(60, (1920.0/1080.0), 1, 3000);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glTranslatef(0,0,0);
glRotatef(0, 45, 0.1, 0);
glRotatef(0, 0, 45, 0);
glTranslatef(-grid_width/2,-grid_width/2, -2000);
def vector(a):
if(a > 0):
return 1.0
else:
return -1.0
def draw_square(x,y,z,color):
glPushMatrix();
glTranslatef(x,y,z);
glColor3f(color[0],color[1],color[2]);
glutSolidCube(pixel_size);
glPopMatrix();
def draw_sphere(x,y,z,color):
glPushMatrix();
glTranslatef(x,y,z);
glColor3f(color[0],color[1],color[2]);
glutSolidSphere(pixel_size/2.0,10,10);
glPopMatrix();
glutSwapBuffers();
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glutSwapBuffers();
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
############################################DEFINE THE SIM#########################################
def particle_circle(center=(0, 0, 0), charge=electron_charge,r=1, n=10,velocity=132620500, fixed=False):
global particles
for i in [
(
center[0]+(math.cos(2 * math.pi / n * x) * r), # x
center[1] + (math.sin(2 * math.pi / n * x) * r) # y
) for x in xrange(0, n + 1)]:
particles.append({"charge": 1.0*charge, "mass": electron_mass, "position": [i[0],center[2],i[1]], "velocity": [0,velocity,0], "constrain": [[],[],[]],"color":[0,0,0], "fixed": fixed})
def charged_plate(position=(5.0, 10, 0.001), size=(2,5,5), n=50, charge=(1/2000000.0),velocity=1000):
global particles
for i in range(0,n):
particles.append({"charge": charge/n, "mass": electron_mass, "position": [random.uniform(position[0],position[0]+size[0]),random.uniform(position[1],position[1]+size[1]),\
random.uniform(position[2],position[2]+size[2])], "velocity": [velocity,velocity,0], "fixed": True,"constrain": [],"color":[100,0,0]})
def charged_plate_with_hole(position=(5.0, 10, 0.001), size=(2,5,5), n=50, charge=(1/2000000.0),velocity=1000):
global particles
for i in range(0,n):
particles.append({"charge": charge/n, "mass": electron_mass, "position": [random.uniform(position[0],position[0]+size[0]),random.uniform(position[1],position[1]+size[1]),\
random.uniform(position[2],position[2]+size[2])], "velocity": [velocity,velocity,0], "fixed": True,"constrain": [],"color":[100,0,0]})
# anode = mesh.Mesh.from_file('anode.stl')
#
# def charged_mesh(input_mesh, charge=(1/2000000.0), n=0,velocity=1000):
# global particles
# prev_point = input_mesh.points[0]
# print(len(input_mesh.points))
# print(input_mesh.points.flatten(-1))
# for point in input_mesh.points:
# for i in np.arange(0,point[0],n/len(input_mesh.points)):
# particles.append({"charge": charge/n, "mass": electron_mass, "position": [point[0]-prev_point[0],point[1],point[2]], "velocity": [velocity,velocity,0], "fixed": True,"constrain": [],"color":[100,0,0]})
# prev_point = point
particle_circle(velocity=1326000)
particle_circle(r=5,velocity=1326000)
# charged_mesh(anode)
for i in np.arange(10,30):
particle_circle(center=(0,0,i), r=10,fixed=True,charge=electron_charge)
for i in np.arange(40,60):
particle_circle(center=(0,0,i), r=10,fixed=True,charge=-electron_charge)
for i in np.arange(70,90):
particle_circle(center=(0,0,i), r=10,fixed=True,charge=electron_charge)
# charged_plate()
print(particles)
sleep(3)
#
# particles = [{"charge": 1.0*electron_charge, "mass": electron_mass, "position": [10,1,0], "velocity": [0,0,0]},
# {"charge": 1.0*electron_charge, "mass": electron_mass, "position": [15,1,0], "velocity": [0,0,0]},
# {"charge": 1.0*electron_charge, "mass": electron_mass, "position": [50.3,53,0.3], "velocity": [0,0,0]},
# {"charge": 1.0*electron_charge, "mass": electron_mass, "position": [50.4,54,0.4], "velocity": [-100,0,0]}]
############################################DEFINE THE SIM#########################################
while True:
# glTranslatef(0,0,2000);
# glRotatef(rotation_y, 0, 0.1, 0);
# glRotatef(45, 0.1, 0, 0);
# glTranslatef(-grid_width/2,-grid_width/2, -2000);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
rotation_y = rotation_y_speed*time
print("Time: {}".format(time))
frozen_state = copy.deepcopy(particles)
for p1_index,particle in enumerate(frozen_state):
if not particle["fixed"]:
force_x = 0
force_y = 0
force_z = 0
for second_particle in [x for i,x in enumerate(frozen_state) if i!=p1_index]:
magnitude = (k_constant*particle['charge']*second_particle['charge']);
distance_x = (particle["position"][0]-second_particle["position"][0])
distance_y = (particle["position"][1]-second_particle["position"][1])
distance_z = (particle["position"][2]-second_particle["position"][2])
distance = math.sqrt(distance_x**2.0+distance_y**2.0+distance_z**2.0)
e_force = (magnitude/((distance)**2.0));
force_x += e_force*(distance_x/distance)
force_y += e_force*(distance_y/distance)
force_z += e_force*(distance_z/distance)
print("Distance: {}".format(math.sqrt(distance_x**2.0+distance_y**2.0+distance_z**2.0)))
accel_x = force_x/particle['mass']
accel_y = force_y/particle['mass']
accel_z = force_z/particle['mass']
particles[p1_index]["velocity"][0] += accel_x*timestep;
particles[p1_index]["velocity"][1] += accel_y*timestep;
particles[p1_index]["velocity"][2] += accel_z*timestep;
particles[p1_index]["position"][0] += particles[p1_index]["velocity"][0]*timestep
particles[p1_index]["position"][1] += particles[p1_index]["velocity"][1]*timestep
particles[p1_index]["position"][2] += particles[p1_index]["velocity"][2]*timestep
print("-"*20)
print("Particle: {} X: {} Y: {} Z: {}".format(p1_index,frozen_state[p1_index]["position"][0],frozen_state[p1_index]["position"][1],frozen_state[p1_index]["position"][2]))
print("Velocity: X: {} Y: {} Z: {}".format(particles[p1_index]["velocity"][0],particles[p1_index]["velocity"][1],particles[p1_index]["velocity"][2]))
print("Acceleration: X: {} Y: {} Z: {}".format(accel_x,accel_y,accel_z))
print("Force: X: {} Y: {} Z: {}".format(force_x,force_y,force_z))
# data_file.write("{},{},{},{},{},{},{},{},{},{},\n".format(particles[p1_index]["position"][0],particles[p1_index]["position"][1],particles[p1_index]["position"][2]))
print("-"*20)
draw_sphere(particles[p1_index]["position"][0]*pixel_spacing,particles[p1_index]["position"][1]*pixel_spacing,particles[p1_index]["position"][2]*pixel_spacing,particles[p1_index]["color"])
print("Frame complete")
print("#"*20)
glutSwapBuffers();
time += timestep
sleep(0.01)
```
#### File: Nyion/struct/structure_test.py
```python
import numpy
import matplotlib.pyplot as plt
import math
CELL_SIZES = [16,16,16,4096,128,128,128,2097152] #l0 x,y,z,len, l1 x,y,z,len,
def idx(x, y, z, x_len, y_len):
return ((x_len*y_len*z) + (x_len*y) + x);
def is_inside_boundary(world_x,world_y,world_z):
return False
X = 0
Y = 1
Z = 2
LEN = 3
BUFFER_SIZE = 100000
WORLD_X_SIZE = 0.1
WORLD_Y_SIZE = 0.1
WORLD_Z_SIZE = 0.1
MAX_DEPTH = 1
particles = [0]*BUFFER_SIZE
electric_potential = [0]*BUFFER_SIZE
boundary_conditions = [0]*BUFFER_SIZE
refined_indices = [0]*BUFFER_SIZE
space_charge = [0]*BUFFER_SIZE
# if(buffer_end_pointer == 0): # if the root block hasn't been initialized yet.
def define_blocks(buffer_end_pointer):
# called once on startup.
buffer_end_pointer = 0
for depth in range(0,MAX_DEPTH):
world_cell_origin_x = 0
world_cell_origin_y = 0
world_cell_origin_z = 0
xlen = CELL_SIZES[(depth*4)+X]
ylen = CELL_SIZES[(depth*4)+Y]
zlen = CELL_SIZES[(depth*4)+Z]
world_x_scale = WORLD_X_SIZE
world_y_scale = WORLD_Y_SIZE
world_z_scale = WORLD_Z_SIZE
for i in range(0,depth):
world_x_scale /= CELL_SIZES[(i*4)+X]
world_y_scale /= CELL_SIZES[(i*4)+Y]
world_z_scale /= CELL_SIZES[(i*4)+Z]
'''
Initialize block.
'''
for x in range(1, xlen-1):
for y in range(1, ylen-1):
for z in range(1, zlen-1):
world_x = world_cell_origin_x+(x*world_x_scale)
world_y = world_cell_origin_y+(y*world_y_scale)
world_z = world_cell_origin_z+(z*world_z_scale)
boundary_conditions[buffer_end_pointer + idx(x,y,z,xlen,ylen)] = is_inside_boundary(world_x,world_y,world_z)
'''
Refine cells that need to be refined.
'''
for x in range(1, xlen-1): #even the root has ghost points
for y in range(1, ylen-1):
for z in range(1, zlen-1):
index = buffer_end_pointer + idx(x,y,z,xlen,ylen)
if(not (boundary_conditions[index+idx(0,0,0,xlen,ylen)]
== boundary_conditions[index+idx(0,0,1,xlen,ylen)]
== boundary_conditions[index+idx(0,1,0,xlen,ylen)]
== boundary_conditions[index+idx(0,1,1,xlen,ylen)]
== boundary_conditions[index+idx(1,0,0,xlen,ylen)]
== boundary_conditions[index+idx(1,0,1,xlen,ylen)]
== boundary_conditions[index+idx(1,1,0,xlen,ylen)]
== boundary_conditions[index+idx(1,1,1,xlen,ylen)]) or # check if this cell is homogenous; otherwise, it needs to be refined.
not space_charge[idx] == 0.0 or
is_inside_special_boundary(world_x,world_y,world_z)):
refined_indices[index] = 0;
buffer_end_pointer += idx(xlen-1,ylen-1,zlen-1,xlen,ylen)
return buffer_end_pointer
def sync_ghosts(depth):
for true:
for x,y,z in bla:
``` |
{
"source": "0xDBFB7/xraylarch",
"score": 2
} |
#### File: larch/xafs/feffit.py
```python
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
from copy import copy, deepcopy
from functools import partial
import numpy as np
from numpy import array, arange, interp, pi, zeros, sqrt, concatenate
from scipy.optimize import leastsq as scipy_leastsq
from lmfit import Parameters, Parameter, Minimizer, fit_report
from lmfit.printfuncs import gformat as gformat
from larch import Group, isNamedClass
from larch.utils.strutils import fix_varname
from ..math import index_of, realimag, complex_phase
from ..fitting import (correlated_values, eval_stderr, ParameterGroup,
group2params, params2group, isParameter)
from .xafsutils import set_xafsGroup
from .xafsft import xftf_fast, xftr_fast, ftwindow
from .sigma2_models import sigma2_correldebye, sigma2_debye
from .feffdat import FeffPathGroup, ff2chi
class TransformGroup(Group):
"""A Group of transform parameters.
The apply() method will return the result of applying the transform,
ready to use in a Fit. This caches the FT windows (k and r windows)
and assumes that once created (not None), these do not need to be
recalculated....
That is: don't simply change the parameters and expect different results.
If you do change parameters, reset kwin / rwin to None to cause them to be
recalculated.
"""
def __init__(self, kmin=0, kmax=20, kweight=2, dk=4, dk2=None,
window='kaiser', nfft=2048, kstep=0.05,
rmin = 0, rmax=10, dr=0, dr2=None, rwindow='hanning',
fitspace='r', wavelet_mask=None, _larch=None, **kws):
Group.__init__(self, **kws)
self.kmin = kmin
self.kmax = kmax
self.kweight = kweight
if 'kw' in kws:
self.kweight = kws['kw']
self.dk = dk
self.dk2 = dk2
self.window = window
self.rmin = rmin
self.rmax = rmax
self.dr = dr
self.dr2 = dr2
if dr2 is None: self.dr2 = self.dr
self.rwindow = rwindow
self.__nfft = 0
self.__kstep = None
self.nfft = nfft
self.kstep = kstep
self.rstep = pi/(self.kstep*self.nfft)
self.fitspace = fitspace
self.wavelet_mask = wavelet_mask
self._cauchymask = None
self._larch = _larch
self.kwin = None
self.rwin = None
self.make_karrays()
def __repr__(self):
return '<FeffitTransform Group: %s>' % self.__name__
def __copy__(self):
return TransformGroup(kmin=self.kmin, kmax=self.kmax,
kweight=self.kweight, dk=self.dk, dk2=self.dk2,
window=self.window, kstep=self.kstep,
rmin=self.rmin, rmax=self.rmax,
dr=self.dr, dr2=self.dr2,
rwindow=self.rwindow, nfft=self.nfft,
fitspace=self.fitspace,
wavelet_mask=self.wavelet_mask,
_larch=self._larch)
def __deepcopy__(self, memo):
return TransformGroup(kmin=self.kmin, kmax=self.kmax,
kweight=self.kweight, dk=self.dk, dk2=self.dk2,
window=self.window, kstep=self.kstep,
rmin=self.rmin, rmax=self.rmax,
dr=self.dr, dr2=self.dr2,
rwindow=self.rwindow, nfft=self.nfft,
fitspace=self.fitspace,
wavelet_mask=self.wavelet_mask,
_larch=self._larch)
def make_karrays(self, k=None, chi=None):
"this should be run in kstep or nfft changes"
if self.kstep == self.__kstep and self.nfft == self.__nfft:
return
self.__kstep = self.kstep
self.__nfft = self.nfft
self.rstep = pi/(self.kstep*self.nfft)
self.k_ = self.kstep * arange(self.nfft, dtype='float64')
self.r_ = self.rstep * arange(self.nfft, dtype='float64')
def _xafsft(self, chi, group=None, rmax_out=10, **kws):
"returns "
for key, val in kws:
if key == 'kw':
key = 'kweight'
setattr(self, key, val)
self.make_karrays()
out = self.fftf(chi)
irmax = int(min(self.nfft/2, 1.01 + rmax_out/self.rstep))
group = set_xafsGroup(group, _larch=self._larch)
r = self.rstep * arange(irmax)
mag = sqrt(out.real**2 + out.imag**2)
group.kwin = self.kwin[:len(chi)]
group.r = r[:irmax]
group.chir = out[:irmax]
group.chir_mag = mag[:irmax]
group.chir_pha = complex_phase(out[:irmax])
group.chir_re = out.real[:irmax]
group.chir_im = out.imag[:irmax]
def get_kweight(self):
"if kweight is a list/tuple, use only the first one here"
if isinstance(self.kweight, Iterable):
return self.kweight[0]
return self.kweight
def fftf(self, chi, kweight=None):
""" forward FT -- meant to be used internally.
chi must be on self.k_ grid"""
if self.kstep != self.__kstep or self.nfft != self.__nfft:
self.make_karrays()
if self.kwin is None:
self.kwin = ftwindow(self.k_, xmin=self.kmin, xmax=self.kmax,
dx=self.dk, dx2=self.dk2, window=self.window)
if kweight is None:
kweight = self.get_kweight()
cx = chi * self.kwin[:len(chi)] * self.k_[:len(chi)]**kweight
return xftf_fast(cx, kstep=self.kstep, nfft=self.nfft)
def fftr(self, chir):
" reverse FT -- meant to be used internally"
if self.kstep != self.__kstep or self.nfft != self.__nfft:
self.make_karrays()
if self.rwin is None:
self.rwin = ftwindow(self.r_, xmin=self.rmin, xmax=self.rmax,
dx=self.dr, dx2=self.dr2, window=self.rwindow)
cx = chir * self.rwin[:len(chir)]
return xftr_fast(cx, kstep=self.kstep, nfft=self.nfft)
def make_cwt_arrays(self, nkpts, nrpts):
if self.kstep != self.__kstep or self.nfft != self.__nfft:
self.make_karrays()
if self.kwin is None:
self.kwin = ftwindow(self.k_, xmin=self.kmin, xmax=self.kmax,
dx=self.dk, dx2=self.dk2, window=self.window)
if self._cauchymask is None:
if self.wavelet_mask is not None:
self._cauchymask = self.wavelet_mask
else:
ikmin = int(max(0, 0.01 + self.kmin/self.kstep))
ikmax = int(min(self.nfft/2, 0.01 + self.kmax/self.kstep))
irmin = int(max(0, 0.01 + self.rmin/self.rstep))
irmax = int(min(self.nfft/2, 0.01 + self.rmax/self.rstep))
cm = np.zeros(nrpts*nkpts, dtype='int').reshape(nrpts, nkpts)
cm[irmin:irmax, ikmin:ikmax] = 1
self._cauchymask = cm
self._cauchyslice =(slice(irmin, irmax), slice(ikmin, ikmax))
def cwt(self, chi, rmax=None, kweight=None):
"""cauchy wavelet transform -- meant to be used internally"""
if self.kstep != self.__kstep or self.nfft != self.__nfft:
self.make_karrays()
nkpts = len(chi)
nrpts = int(np.round(self.rmax/self.rstep))
if self.kwin is None:
self.make_cwt_arrays(nkpts, nrpts)
omega = pi*np.arange(self.nfft)/(self.kstep*self.nfft)
if kweight is None:
kweight = self.get_kweight()
if kweight != 0:
chi = chi * self.kwin[:len(chi)] * self.k_[:len(chi)]**kweight
if rmax is not None:
self.rmax = rmax
chix = np.zeros(int(self.nfft/2)) * self.kstep
chix[:nkpts] = chi
chix = chix[:int(self.nfft/2)]
_ffchi = np.fft.fft(chix, n=2*self.nfft)[:self.nfft]
nrpts = int(np.round(self.rmax/self.rstep))
r = self.rstep * arange(nrpts)
r[0] = 1.e-19
alpha = nrpts/(2*r)
self.make_cwt_arrays(nkpts, nrpts)
cauchy_sum = np.log(2*pi) - np.log(1.0+np.arange(nrpts)).sum()
out = np.zeros(nrpts*nkpts, dtype='complex128').reshape(nrpts, nkpts)
for i in range(nrpts):
aom = alpha[i]*omega
filt = cauchy_sum + nrpts*np.log(aom) - aom
out[i, :] = np.fft.ifft(np.exp(filt)*_ffchi, 2*self.nfft)[:nkpts]
return (out*self._cauchymask)[self._cauchyslice]
class FeffitDataSet(Group):
def __init__(self, data=None, paths=None, transform=None,
epsilon_k=None, _larch=None, pathlist=None, **kws):
self._larch = _larch
Group.__init__(self, **kws)
if paths is None and pathlist is not None: # legacy
paths = pathlist
if isinstance(paths, dict):
self.paths = {key: copy(path) for key, path in paths.items()}
elif isinstance(paths, (list, tuple)):
self.paths = {path.label: copy(path) for path in paths}
else:
self.paths = {}
self.data = data
if transform is None:
transform = TransformGroup()
else:
trasform = copy(transform)
self.transform = transform
if epsilon_k is not None:
self.data.epsilon_k = epsilon_k
self.model = Group()
self.model.k = None
self.__chi = None
self.__prepared = False
def __repr__(self):
return '<FeffitDataSet Group: %s>' % self.__name__
def __copy__(self):
return FeffitDataSet(data=copy(self.data),
paths=self.paths,
transform=self.transform,
_larch=self._larch)
def __deepcopy__(self, memo):
return FeffitDataSet(data=deepcopy(self.data),
paths=self.paths,
transform=self.transform,
_larch=self._larch)
def prepare_fit(self, params):
"""prepare for fit with this dataset"""
trans = self.transform
trans.make_karrays()
ikmax = int(1.01 + max(self.data.k)/trans.kstep)
# ikmax = index_of(trans.k_, max(self.data.k))
self.model.k = trans.k_[:ikmax]
self.__chi = interp(self.model.k, self.data.k, self.data.chi)
self.n_idp = 1 + 2*(trans.rmax-trans.rmin)*(trans.kmax-trans.kmin)/pi
if getattr(self.data, 'epsilon_k', None) is not None:
eps_k = self.data.epsilon_k
if isinstance(eps_k, np.ndarray):
eps_k = interp(self.model.k, self.data.k, self.data.epsilon_k)
self.set_epsilon_k(eps_k)
else:
self.estimate_noise(chi=self.__chi, rmin=15.0, rmax=30.0)
# uncertainty in chi(k) from autobk or other source
if hasattr(self.data, 'delta_chi'):
if isinstance(self.epsilon_k, (list, tuple)):
eps_ave = 0.
for eps in self.epsilon_k:
eps_ave += eps
self.epsilon_k = eps_ave/len(self.epsilon_k)
_dchi = interp(self.model.k, self.data.k, self.data.delta_chi)
eps_k = np.sqrt(_dchi**2 + self.epsilon_k**2)
self.set_epsilon_k(eps_k)
# for each path in the list of paths, setup the Path Parameters
# to use the current Parameters namespace
for label, path in self.paths.items():
path.create_path_params(params=params)
if path.spline_coefs is None:
path.create_spline_coefs()
self.__prepared = True
def estimate_noise(self, chi=None, rmin=15.0, rmax=30.0, all_kweights=True):
"""estimage noise in a chi spectrum from its high r components"""
trans = self.transform
trans.make_karrays()
if chi is None: chi = self.__chi
save = trans.rmin, trans.rmax, trans.fitspace
all_kweights = all_kweights and isinstance(trans.kweight, Iterable)
if all_kweights:
chir = [trans.fftf(chi, kweight=kw) for kw in trans.kweight]
else:
chir = [trans.fftf(chi)]
irmin = int(0.01 + rmin/trans.rstep)
irmax = int(min(trans.nfft/2, 1.01 + rmax/trans.rstep))
highr = [realimag(chir_[irmin:irmax]) for chir_ in chir]
# get average of window function value, we will scale eps_r scale by this
kwin_ave = trans.kwin.sum()*trans.kstep/(trans.kmax-trans.kmin)
eps_r = [(sqrt((chi*chi).sum() / len(chi)) / kwin_ave) for chi in highr]
eps_k = []
# use Parseval's theorem to convert epsilon_r to epsilon_k,
# compensating for kweight
if all_kweights:
kweights = trans.kweight[:]
else:
kweights = [trans.kweight]
for i, kw in enumerate(kweights):
w = 2 * kw + 1
scale = sqrt((2*pi*w)/(trans.kstep*(trans.kmax**w - trans.kmin**w)))
eps_k.append(scale*eps_r[i])
trans.rmin, trans.rmax, trans.fitspace = save
## self.n_idp = 1 + 2*(trans.rmax-trans.rmin)*(trans.kmax-trans.kmin)/pi
self.epsilon_k = eps_k
self.epsilon_r = eps_r
if len(eps_r) == 1:
self.epsilon_k = eps_k[0]
self.epsilon_r = eps_r[0]
if isinstance(eps_r, np.ndarray):
self.epsilon_r = eps_r.mean()
def set_epsilon_k(self, eps_k):
"""set epsilon_k and epsilon_r -- ucertainties in chi(k) and chi(R)"""
trans = self.transform
all_kweights = isinstance(trans.kweight, Iterable)
if isinstance(trans.kweight, Iterable):
self.epsilon_k = []
self.epsilon_r = []
for kw in trans.kweight:
w = 2 * kw + 1
scale = 2*sqrt((pi*w)/(trans.kstep*(trans.kmax**w - trans.kmin**w)))
self.epsilon_k.append(eps_k)
eps_r = eps_k / scale
if isinstance(eps_r, np.ndarray): eps_r = eps_r.mean()
self.epsilon_r.append(eps_r)
else:
w = 2 * trans.get_kweight() + 1
scale = 2*sqrt((pi*w)/(trans.kstep*(trans.kmax**w - trans.kmin**w)))
self.epsilon_k = eps_k
eps_r = eps_k / scale
if isinstance(eps_r, np.ndarray): eps_r = eps_r.mean()
self.epsilon_r = eps_r
def _residual(self, paramgroup, data_only=False, **kws):
"""return the residual for this data set
residual = self.transform.apply(data_chi - model_chi)
where model_chi is the result of ff2chi(paths)
"""
if not isNamedClass(self.transform, TransformGroup):
return
if not self.__prepared:
self.prepare_fit()
ff2chi(self.paths, paramgroup=paramgroup, k=self.model.k,
_larch=self._larch, group=self.model)
eps_k = self.epsilon_k
if isinstance(eps_k, np.ndarray):
eps_k[np.where(eps_k<1.e-12)[0]] = 1.e-12
diff = (self.__chi - self.model.chi)
if data_only: # for extracting transformed data separately from residual
diff = self.__chi
trans = self.transform
k = trans.k_[:len(diff)]
all_kweights = isinstance(trans.kweight, Iterable)
if trans.fitspace == 'k':
iqmin = int(max(0, 0.01 + trans.kmin/trans.kstep))
iqmax = int(min(trans.nfft/2, 0.01 + trans.kmax/trans.kstep))
if all_kweights:
out = []
for i, kw in enumerate(trans.kweight):
out.append(((diff/eps_k[i])*k**kw)[iqmin:iqmax])
return np.concatenate(out)
else:
return ((diff/eps_k) * k**trans.kweight)[iqmin:iqmax]
elif trans.fitspace == 'w':
if all_kweights:
out = []
for i, kw in enumerate(trans.kweight):
cwt = trans.cwt(diff/eps_k, kweight=kw)
out.append(realimag(cwt).ravel())
return np.concatenate(out)
else:
cwt = trans.cwt(diff/eps_k, kweight=trans.kweight)
return realimag(cwt).ravel()
else: # 'r' space
out = []
if all_kweights:
chir = [trans.fftf(diff, kweight=kw) for kw in trans.kweight]
eps_r = self.epsilon_r
else:
chir = [trans.fftf(diff)]
eps_r = [self.epsilon_r]
if trans.fitspace == 'r':
irmin = int(max(0, 0.01 + trans.rmin/trans.rstep))
irmax = int(min(trans.nfft/2, 0.01 + trans.rmax/trans.rstep))
for i, chir_ in enumerate(chir):
chir_ = chir_ / (eps_r[i])
out.append(realimag(chir_[irmin:irmax]))
else:
chiq = [trans.fftr(c)/eps for c, eps in zip(chir, eps_r)]
iqmin = int(max(0, 0.01 + trans.kmin/trans.kstep))
iqmax = int(min(trans.nfft/2, 0.01 + trans.kmax/trans.kstep))
for chiq_ in chiq:
out.append( realimag(chiq_[iqmin:iqmax])[::2])
return np.concatenate(out)
def save_ffts(self, rmax_out=10, path_outputs=True):
"save fft outputs"
xft = self.transform._xafsft
xft(self.__chi, group=self.data, rmax_out=rmax_out)
xft(self.model.chi, group=self.model, rmax_out=rmax_out)
if path_outputs:
for p in self.paths.values():
xft(p.chi, group=p, rmax_out=rmax_out)
def feffit_dataset(data=None, paths=None, transform=None,
epsilon_k=None, pathlist=None, _larch=None):
"""create a Feffit Dataset group.
Parameters:
------------
data: group containing experimental EXAFS (needs arrays 'k' and 'chi').
paths: dict of {label: FeffPathGroup}, using FeffPathGroup created by feffpath()
transform: Feffit Transform group.
pathlist: list of FeffPathGroup [deprecated - use 'paths']
epsilon_k: Uncertainty in data (either single value or array of
same length as data.k)
Returns:
----------
a Feffit Dataset group.
"""
return FeffitDataSet(data=data, paths=paths, transform=transform,
pathlist=pathlist, _larch=_larch)
def feffit_transform(_larch=None, **kws):
"""create a feffit transform group
Parameters:
--------------
fitspace: name of FT type for fit ('r').
kmin: starting *k* for FT Window (0).
kmax: ending *k* for FT Window (20).
dk: tapering parameter for FT Window (4).
dk2: second tapering parameter for FT Window (None).
window: name of window type ('kaiser').
nfft: value to use for N_fft (2048).
kstep: value to use for delta_k (0.05).
kweight: exponent for weighting spectra by k^kweight (2).
rmin: starting *R* for Fit Range and/or reverse FT Window (0).
rmax: ending *R* for Fit Range and/or reverse FT Window (10).
dr: tapering parameter for reverse FT Window 0.
rwindow: name of window type for reverse FT Window ('kaiser').
Returns:
----------
a feffit transform group.
"""
return TransformGroup(_larch=_larch, **kws)
def feffit(paramgroup, datasets, rmax_out=10, path_outputs=True, _larch=None, **kws):
"""execute a Feffit fit: a fit of feff paths to a list of datasets
Parameters:
------------
paramgroup: group containing parameters for fit
datasets: Feffit Dataset group or list of Feffit Dataset group.
rmax_out: maximum R value to calculate output arrays.
path_output: Flag to set whether all Path outputs should be written.
Returns:
---------
a fit results group. This will contain subgroups of:
datasets: an array of FeffitDataSet groups used in the fit.
params: This will be identical to the input parameter group.
fit: an object which points to the low-level fit.
Statistical parameters will be put into the params group. Each
dataset will have a 'data' and 'model' subgroup, each with arrays:
k wavenumber array of k
chi chi(k).
kwin window Omega(k) (length of input chi(k)).
r uniform array of R, out to rmax_out.
chir complex array of chi(R).
chir_mag magnitude of chi(R).
chir_pha phase of chi(R).
chir_re real part of chi(R).
chir_im imaginary part of chi(R).
"""
work_paramgroup = deepcopy(paramgroup)
params = group2params(work_paramgroup)
def _resid(params, datasets=None, pargroup=None, **kwargs):
""" this is the residual function"""
params2group(params, pargroup)
return concatenate([d._residual(pargroup) for d in datasets])
if isNamedClass(datasets, FeffitDataSet):
datasets = [datasets]
for ds in datasets:
if not isNamedClass(ds, FeffitDataSet):
print( "feffit needs a list of FeffitDataSets")
return
ds.prepare_fit(params=params)
fit = Minimizer(_resid, params,
fcn_kws=dict(datasets=datasets, pargroup=work_paramgroup),
scale_covar=True, **kws)
result = fit.leastsq()
params2group(result.params, work_paramgroup)
dat = concatenate([d._residual(work_paramgroup, data_only=True) for d in datasets])
n_idp = 0
for ds in datasets:
n_idp += ds.n_idp
# here we rescale chi-square and reduced chi-square to n_idp
npts = len(result.residual)
chi_square = result.chisqr * n_idp*1.0 / npts
chi2_reduced = chi_square/(n_idp*1.0 - result.nvarys)
rfactor = (result.residual**2).sum() / (dat**2).sum()
# calculate 'aic', 'bic' rescaled to n_idp
# note that neg2_loglikel is -2*log(likelihood)
neg2_loglikel = n_idp * np.log(chi_square / n_idp)
aic = neg2_loglikel + 2 * result.nvarys
bic = neg2_loglikel + np.log(n_idp) * result.nvarys
# With scale_covar = True, Minimizer() scales the uncertainties
# by reduced chi-square assuming params.nfree is the correct value
# for degrees-of-freedom. But n_idp-params.nvarys is a better measure,
# so we rescale uncertainties here.
covar = getattr(result, 'covar', None)
# print("COVAR " , covar)
if covar is not None:
err_scale = (result.nfree / (n_idp - result.nvarys))
for name in result.var_names:
par = result.params[name]
if isParameter(par) and par.vary:
par.stderr *= sqrt(err_scale)
# next, propagate uncertainties to constraints and path parameters.
result.covar *= err_scale
vsave, vbest = {}, []
# 1. save current params
for vname in result.var_names:
par = result.params[vname]
vsave[vname] = par
vbest.append(par.value)
# 2. get correlated uncertainties, set params accordingly
uvars = correlated_values(vbest, result.covar)
# 3. evaluate constrained params, save stderr
for nam, obj in result.params.items():
eval_stderr(obj, uvars, result.var_names, result.params)
# 3. evaluate path_ params, save stderr
for ds in datasets:
for label, path in ds.paths.items():
path.store_feffdat()
for pname in ('degen', 's02', 'e0', 'ei',
'deltar', 'sigma2', 'third', 'fourth'):
obj = path.params[path.pathpar_name(pname)]
eval_stderr(obj, uvars, result.var_names, result.params)
# restore saved parameters again
for vname in result.var_names:
# setattr(params, vname, vsave[vname])
params[vname] = vsave[vname]
# clear any errors evaluting uncertainties
if _larch is not None and (len(_larch.error) > 0):
_larch.error = []
# reset the parameters group with the newly updated uncertainties
params2group(result.params, work_paramgroup)
# here we create outputs arrays for chi(k), chi(r):
for ds in datasets:
ds.save_ffts(rmax_out=rmax_out, path_outputs=path_outputs)
out = Group(name='feffit results', datasets=datasets,
paramgroup=work_paramgroup,
fitter=fit, fit_details=result, chi_square=chi_square,
n_independent=n_idp, chi2_reduced=chi2_reduced,
rfactor=rfactor, aic=aic, bic=bic, covar=covar)
for attr in ('params', 'nvarys', 'nfree', 'ndata', 'var_names', 'nfev',
'success', 'errorbars', 'message', 'lmdif_message'):
setattr(out, attr, getattr(result, attr, None))
return out
def feffit_report(result, min_correl=0.1, with_paths=True, _larch=None):
"""return a printable report of fit for feffit
Parameters:
------------
result: Feffit result, output group from feffit()
min_correl: minimum correlation to report [0.1]
wit_paths: boolean (True/False) for whether to list all paths [True]
Returns:
---------
printable string of report.
"""
input_ok = False
try:
params = result.params
datasets = result.datasets
input_ok = True
except:
pass
if not input_ok:
print( 'must pass output of feffit()!')
return
path_hashkeys = []
for ds in datasets:
path_hashkeys.extend([p.hashkey for p in ds.paths.values()])
topline = '=================== FEFFIT RESULTS ===================='
header = '[[%s]]'
varformat = ' %12s = %s +/-%s (init= %s)'
fixformat = ' %12s = %s (fixed)'
exprformat = ' %12s = %s +/-%s = \'%s\''
out = [topline, header % 'Statistics']
out.append(' nvarys, npts = %i, %i' % (result.nvarys,
result.ndata))
out.append(' n_independent = %.3f' % (result.n_independent))
out.append(' chi_square = %s' % gformat(result.chi_square))
out.append(' reduced chi_square = %s' % gformat(result.chi2_reduced))
out.append(' r-factor = %s' % gformat(result.rfactor))
out.append(' Akaike info crit = %s' % gformat(result.aic))
out.append(' Bayesian info crit = %s' % gformat(result.bic))
out.append(' ')
if len(datasets) == 1:
out.append(header % 'Data')
else:
out.append(header % 'Datasets (%i)' % len(datasets))
for i, ds in enumerate(datasets):
tr = ds.transform
if len(datasets) > 1:
out.append(' dataset %i:' % (i+1))
if isinstance(tr.kweight, Iterable):
if isinstance(ds.epsilon_k[0], np.ndarray):
msg = []
for eps in ds.epsilon_k:
msg.append('Array(mean=%s, std=%s)' % (gformat(eps.mean()).strip(),
gformat(eps.std()).strip()))
eps_k = ', '.join(msg)
else:
eps_k = ', '.join([gformat(eps).strip() for eps in ds.epsilon_k])
eps_r = ', '.join([gformat(eps).strip() for eps in ds.epsilon_r])
kweigh = ', '.join(['%i' % kwe for kwe in tr.kweight])
else:
if isinstance(ds.epsilon_k, np.ndarray):
eps_k = 'Array(mean=%s, std=%s)' % (gformat(ds.epsilon_k.mean()).strip(),
gformat(ds.epsilon_k.std()).strip())
else:
eps_k = gformat(ds.epsilon_k)
eps_r = gformat(ds.epsilon_r).strip()
kweigh = '%i' % tr.kweight
out.append(' fit space = \'%s\'' % (tr.fitspace))
out.append(' r-range = %.3f, %.3f' % (tr.rmin, tr.rmax))
out.append(' k-range = %.3f, %.3f' % (tr.kmin, tr.kmax))
kwin = ' k window, dk = \'%s\', %.3f' % (tr.window, tr.dk)
if tr.dk2 is not None:
kwin = "%s, %.3f" % (kwin, tr.dk2)
out.append(kwin)
pathfiles = [p.filename for p in ds.paths.values()]
out.append(' paths used in fit = %s' % (repr(pathfiles)))
out.append(' k-weight = %s' % kweigh)
out.append(' epsilon_k = %s' % eps_k)
out.append(' epsilon_r = %s' % eps_r)
out.append(' n_independent = %.3f' % (ds.n_idp))
#
out.append(' ')
out.append(header % 'Variables')
for name, par in params.items():
if any([name.endswith('_%s' % phash) for phash in path_hashkeys]):
continue
if len(name) < 14:
name = (name + ' '*14)[:14]
if isParameter(par):
if par.vary:
stderr = 'unknown'
if par.stderr is not None:
stderr = gformat(par.stderr)
out.append(varformat % (name, gformat(par.value),
stderr, gformat(par.init_value)))
elif par.expr is not None:
stderr = 'unknown'
if par.stderr is not None:
stderr = gformat(par.stderr)
out.append(exprformat % (name, gformat(par.value),
stderr, par.expr))
else:
out.append(fixformat % (name, gformat(par.value)))
covar_vars = result.var_names
if len(covar_vars) > 0:
out.append(' ')
out.append(header % 'Correlations' +
' (unreported correlations are < % .3f)' % min_correl)
correls = {}
for i, name in enumerate(covar_vars):
par = params[name]
if not par.vary:
continue
if hasattr(par, 'correl') and par.correl is not None:
for name2 in covar_vars[i+1:]:
if name != name2 and name2 in par.correl:
correls["%s, %s" % (name, name2)] = par.correl[name2]
sort_correl = sorted(correls.items(), key=lambda it: abs(it[1]))
sort_correl.reverse()
for name, val in sort_correl:
if abs(val) < min_correl:
break
if len(name) < 20:
name = (name + ' '*20)[:20]
out.append(' %s = % .3f' % (name, val))
if with_paths:
out.append(' ')
out.append(header % 'Paths')
for ids, ds in enumerate(datasets):
if len(datasets) > 1:
out.append(' dataset %i:' % (ids+1))
for label, path in ds.paths.items():
out.append('%s\n' % path.report())
out.append('='*len(topline))
return '\n'.join(out)
```
#### File: xraylarch/tests/test_deconvolution.py
```python
import unittest
import time
import ast
import numpy as np
import os
from sys import version_info
from utils import TestCase
from larch import Interpreter
class TestScripts(TestCase):
'''tests'''
def test_basic_interp(self):
self.runscript('interp.lar', dirname='../examples/basic/')
assert(len(self.session.get_errors()) == 0)
self.isNear("y0[1]", 0.48578, places=3)
self.isNear("y1[1]", 0.81310, places=3)
self.isNear("y2[1]", 0.41532, places=3)
if __name__ == '__main__': # pragma: no cover
for suite in (TestScripts,):
suite = unittest.TestLoader().loadTestsFromTestCase(suite)
unittest.TextTestRunner(verbosity=13).run(suite)
``` |
{
"source": "0xDB/WDOSXUnpacker",
"score": 3
} |
#### File: 0xDB/WDOSXUnpacker/WDOSXUnpacker.py
```python
import sys
import json
from array import array
import math
DEBUG = False
def log(txt):
if (DEBUG):
print txt
def parseArgs():
# check args
if (len(sys.argv) != 3):
print "usage: python %s exe unpack_destination_folder" % (sys.argv[0])
sys.exit(0)
# read exe file
exeFilePath = sys.argv[1]
with open(exeFilePath,"r") as f:
exeFile = f.read()
outPath = sys.argv[2]
return exeFile, outPath
def validateExeFile(exeFile):
# check executable header
if (exeFile[:2] != "MZ"):
print("Error: not an executable file")
sys.exit(1)
# check TIPPACH signature
if (exeFile[0x19:0x20] != "TIPPACH"):
print("Error: TIPPACH signature not found!")
sys.exit(1)
def strToInt(string):
string = string + "\x00"*(4-len(string))
return (ord(string[3])<<24) + (ord(string[2])<<16) + (ord(string[1])<<8) + ord(string[0])
# parse zero delimited string
def parseString(src, offset):
if ("\x00" in src[offset:]):
return src[offset:src.index("\x00",offset)]
return src[offset:]
def strToHex(s):
return ":".join("{:02x}".format(ord(c)) for c in s)
# this algorithm looks ugly because it was reversed from assembly
class WfseUnpacker():
srcBuf = None
srcIndex = 0
dstBuf = None
dstIndex = 0
tagBits = 0x80
dh = 0x80
wbp = 0
def __init__(self, wfseInfo):
self.wfseInfo = wfseInfo
self.srcBuf = array('B', wfseInfo["packedContent"])
self.dstBuf = array('B', "\x00"*wfseInfo["VirtualSize"])
# left shift one byte
def shiftLeft(self, byte, shiftInBit=0):
byte = (byte<<1) + shiftInBit
carry = byte>>8
byte = byte&0xFF
return byte, carry
# left shift two bytes
def shiftLeftDW(self, dw, shiftInBit=0):
lower = dw & 0xFF
upper = dw>>8
lower, carry = self.shiftLeft(lower, shiftInBit)
upper, carry = self.shiftLeft(upper, carry)
dw = (upper<<8) + lower
return dw, carry
# read tagbits
def getBit(self):
# get msb
self.tagBits, nextBit = self.shiftLeft(self.tagBits)
if (self.tagBits == 0):
# get next byte
self.tagBits = self.srcBuf[self.srcIndex]
self.srcIndex += 1
# previous msb becomes new lsb, shift out loaded msb
self.tagBits, nextBit = self.shiftLeft(self.tagBits, nextBit)
return nextBit
# main unpack function.
def unpack(self):
try:
# algo always unpacks 0x1000 Bytes at a time (one page)
numPages = int(math.ceil(float(len(self.dstBuf)) / 0x1000))
for i in range(numPages):
self.tagBits = 0x80
self.mainLoop()
except Exception as e:
print \
"""
\rdecompression error, file was extracted but might be damaged.
\rdebug information:
\r file: %s
\r dstIndex: 0x%x
\r fileSize: 0x%x
\r srcIndex: 0x%x
\r srcSize: 0x%x
""" % (self.wfseInfo["FileName"], self.dstIndex, len(self.dstBuf), self.srcIndex, len(self.srcBuf))
self.wfseInfo["unpackedContent"] = bytearray(self.dstBuf)
# copies one byte from input buffer to output buffer
def copyLiteral(self):
self.dstBuf[self.dstIndex] = self.srcBuf[self.srcIndex]
log("copied literal %02x" % self.dstBuf[self.dstIndex])
self.dstIndex += 1
self.srcIndex += 1
self.dh = 0x80
# copies a given range from dstBuf to dstBuf at current offset
def inflate(self):
# dstIndexOffset:
# holds the offset into dstBuf where
# bytes will be copied from.
#
# numBytesToCopy:
# number of bytes to copy to current position in dstBuf
self.numBytesToCopy = self.readNumber(1)
self.dstIndexOffset = self.readNumber(1)
log("numBytesToCopy: %x" % self.numBytesToCopy)
log("dstIndexOffset: %x" % self.dstIndexOffset)
# shift out msb from dh
self.dh, cf = self.shiftLeft(self.dh)
# compute dstIndexOffset
# don't ask, I just translated asm magic
self.dstIndexOffset = self.dstIndexOffset - 2 - cf
log("dstIndexOffset after sbb: %d" % self.dstIndexOffset)
if (self.dstIndexOffset>=0):
self.dstIndexOffset = self.dstIndexOffset | 0x400
log("dstIndexOffset after or: %d" % self.dstIndexOffset)
while(True):
# shift in tagBits until a 1 is shifted out
self.dstIndexOffset, cf = self.shiftLeftDW(self.dstIndexOffset, self.getBit())
log("in loop: 0x%x" % self.dstIndexOffset)
if (cf == 1):
break
log("exited while loop, dstIndexOffset: %d" % self.dstIndexOffset)
self.dstIndexOffset += 1
if (self.dstIndexOffset >= 0x781):
self.numBytesToCopy += 1
log("set numBytesToCopy to %d" % self.numBytesToCopy)
self.wbp = self.dstIndexOffset
log("set wbp to %d" % self.dstIndexOffset)
# copy bytes
self.dstIndexOffset = self.wbp
copyFromIndex = self.dstIndex-self.dstIndexOffset
for i in range(self.numBytesToCopy):
self.dstBuf[self.dstIndex] = self.dstBuf[copyFromIndex]
log("copied %02x" % self.dstBuf[self.dstIndex])
self.dstIndex += 1
copyFromIndex += 1
log("inflation done.")
# main algorithm loop for 0x1000 byte blocks
def mainLoop(self):
startAtDstIndex = self.dstIndex
doCopyLiteral = True
while(True):
if (doCopyLiteral):
self.copyLiteral()
# end reached (asm decompDone)
positionInPage = (self.dstIndex - startAtDstIndex)
endOfPageReached = positionInPage >= 0x1000
endOfFileReached = self.srcIndex >= len(self.srcBuf)
if (endOfPageReached or endOfFileReached):
return self.dstBuf
# tag bit chooses whether to loop to copy literal or inflate dstBuf
doCopyLiteral = self.getBit() == 0
if (not doCopyLiteral):
self.inflate()
# read number parameter from tagbits
def readNumber(self, startValue):
startValue = startValue*2 + self.getBit()
if (self.getBit() == 1):
return self.readNumber(startValue)
else:
return startValue
def parseWdxInfo(exeFile, offset=0x20):
# Format:
# 4B Signature
# 2B Revision
# 1B Flags
# 1B StubClass
# 4B XMemReserve
# 4B XMemAlloc
# 4B Wfse Start
# check length
if (len(exeFile) - offset < 20):
raise Exception("invalid wdxInfo: too short!")
wdxInfo = {
"Signature": exeFile[offset:offset+4],
"Revision": strToInt(exeFile[offset+4:offset+6]),
"Flags": strToHex(exeFile[offset+6:offset+7]),
"StubClass": strToHex(exeFile[offset+7:offset+8]),
"XMemReserve": strToHex(exeFile[offset+8:offset+12]),
"XMemAlloc": strToHex(exeFile[offset+12:offset+16]),
"WfseStart": strToInt(exeFile[offset+16:offset+20])
}
# check signature
if (wdxInfo["Signature"] != "$WdX"):
raise Exception("Error parsing WdxInfo: invalid signature!")
return wdxInfo
def parseWfseInfo(src, offset):
# Format:
# 4B Signature "WFSE"
# 4B Size
# 4B VirtualSize
# 4B Flags
# 1-255B FileName
# ?B uiHeader
# ?B packedContent
# check length
if (len(src) < 17):
raise Exception("Error parsing wfseInfo: too short!")
wfseInfo = {
"offsetInFile": offset,
"Signature": src[offset:offset+4],
"Size": strToInt(src[offset+4:offset+8]),
"VirtualSize": strToInt(src[offset+8:offset+12]),
"Flags": strToHex(src[offset+12:offset+16]),
"FileName": parseString(src,offset+16)
}
if (wfseInfo["Signature"] != "WFSE"):
raise Exception("Error parsing wfseInfo: invalid signature")
# handle wfse content
wfseInfo["uiHeaderSize"] = ((wfseInfo["VirtualSize"] + 0xFFF) / 0x1000 * 4) + 6;
startOfPackedContent = wfseInfo["offsetInFile"] + 16 + len(wfseInfo["FileName"]) + 1 + wfseInfo["uiHeaderSize"]
endOfPackedContent = wfseInfo["offsetInFile"] + wfseInfo["Size"]
wfseInfo["packedContent"] = src[startOfPackedContent:endOfPackedContent]
return wfseInfo
def getAllWfseInfo(exeFile, wdxInfo):
wfseInfoList = []
offset = wdxInfo["WfseStart"]
while(offset < len(exeFile)):
wfseInfo = parseWfseInfo(exeFile, offset)
wfseInfoList += [wfseInfo]
offset += wfseInfo["Size"]
return wfseInfoList
if __name__ == '__main__':
exeFile, outPath = parseArgs()
validateExeFile(exeFile)
wdxInfo = parseWdxInfo(exeFile)
wfseInfoList = getAllWfseInfo(exeFile, wdxInfo)
for wfseInfo in wfseInfoList:
d = WfseUnpacker(wfseInfo)
d.unpack()
# write to file in output folder
filePath = outPath + "/" + wfseInfo["FileName"]
with open(filePath,"w") as f:
f.write(wfseInfo["unpackedContent"])
print "extracted %s (%dB)." % (filePath, wfseInfo["VirtualSize"])
``` |
{
"source": "0xdc/0xdc.io-fe",
"score": 2
} |
#### File: 0xdc.io-fe/frontend/tests.py
```python
from django.test import TestCase, RequestFactory
from django.urls import reverse
from .views import homepage
from django.contrib.auth.models import User
class HomepageTests(TestCase):
# setUp before every test
def setUp(self):
self.factory = RequestFactory()
self.request = self.factory.get(reverse("frontend:index"))
def test_homepage(self):
response = homepage(self.request)
self.assertNotContains(response, "/admin")
# Just for this test, ensure the view uses the correct template
self.assertIn("frontend/index.html", response.template_name)
def test_homepage_as_admin(self):
admin = User.objects.create_superuser('admin', '<EMAIL>', '<PASSWORD>')
self.request.user = admin
response = homepage(self.request)
self.assertContains(response, "/admin")
``` |
{
"source": "0xdc9/Bid-It",
"score": 3
} |
#### File: 0xdc9/Bid-It/API_SWIFT.py
```python
from flask import Flask
from flask import request
from flask import jsonify
import json
import sqlite3
import hashlib
app = Flask(__name__)
db_path = 'bid_it_database.db'
# connect db - > return session & ID(SIGN IN)
@app.route("/sign-in", methods=['GET', 'POST'])
def hello():
status = ""
print(request.headers)
if request.method == 'POST':
try:
f = request.get_data()
g = f.decode('utf-8')
parsing_to_json = json.loads(g)
print(parsing_to_json)
username = parsing_to_json['username']
password = hashlib.md5(str( <PASSWORD>"+ parsing_to_json['password']).encode()).hexdigest()
if len(username) == 0:
status = "username kosong"
elif len(password) == 0:
status = "password kosong"
else:
# cek db by name(user exist or nor)
conn1 = sqlite3.connect(db_path)
c1 = conn1.cursor()
c1.execute("select * from Users where Username='{a}'".format(a=username))
f1 = c1.fetchone()
if f1 == None:
print("GA ADA DATA")
status = status = {"code": "username does not exist","username":" ", "gender":" ", "email":" ", "phonenumber":" ", "birthday":" "}
conn1.close()
elif f1 != None:
# cek db (name & password)
conn = sqlite3.connect(db_path)
c = conn.cursor()
c.execute("select * from Users where Username='{a}' and Password='{b}'".format(a=username, b=password))
f = c.fetchone()
output = []
if f == None:
status = jsonify(code = "Wrong password")
conn.close()
else:
conn.close()
[output.append(str(x)) for x in f]
print("[+] QUERY OUTPUT")
ret_user = output[0]
ret_gender = output[2]
ret_email = output[3]
ret_phonumm = output[4]
ret_bdate = output[5]
ret_profile_pic = output[6]
#print(output)
print('[+] QUERY OUTPUT END\n')
status = {"code": "success","username":ret_user, "gender":ret_gender, "email":ret_email, "phonenumber":ret_phonumm, "birthday":ret_bdate, "profilepic":ret_profile_pic}
else:
status = jsonify(code = "something went horribly wrong")
except Exception as weloz:
status = jsonify(code = "500 Error: "+str(weloz))
print(weloz)
elif request.method == 'GET':
status = jsonify(code ="HEY! 'GET' IS NOT ALLOWED")
print("[!] DEBUG: ")
#print(status)
return status
# SIGN-UP API
@app.route("/sign-up", methods=['GET', 'POST'])
def meki():
status = ""
print(request.headers)
if request.method == 'POST':
try:
f = request.get_data()
g = f.decode('utf-8')
parsing_to_json = json.loads(g)
print(parsing_to_json)
username = parsing_to_json['Username']
password = hashlib.md5(str( "$<PASSWORD>5"+ parsing_to_json['Password']).encode()).hexdigest()
repass = hashlib.md5(str( "$4LTY3995"+ parsing_to_json['RePassword']).encode()).hexdigest()
birthdate = parsing_to_json['Bday']
email = parsing_to_json['Email']
nohape = parsing_to_json['PhoneNumber']
gender = parsing_to_json['Gender']
print('[+] DEBUG')
print('Gender: '+ gender )
print('Password: '+ password)
print('RePassword: ' + repass)
print("comparison pass with repass: " + str(password == repass))
# validasi + insert db
with open("default.txt", "r") as f:
image_based64 = f.read()
parsin = (username, password, gender, email, nohape, birthdate, image_based64)
queri = "INSERT INTO Users VALUES(?, ?, ?, ?, ?, ?, ?)"
conn = sqlite3.connect(db_path)
cur_nig = conn.cursor()
cur_nig.execute(queri, parsin)
conn.commit()
conn.close()
status = "Setted and ok"
except Exception as weloz:
status = "ERROR " + str(weloz)
#print(status)
elif request.method == 'GET':
status = "HEY! 'GET' IS NOT ALLOWED"
return jsonify(
code = status)
#### BUAT BID PAGE ###3###
#notes{
# 1. on page click, select auction
# 2. page loaded -> image jadi button
# 3. image clicked -> render page auction(select) (might need to set public variable/func for template)
# 4. select on going bid (kalo ga ada -> "no user has place a bid for this auction. go place a bid".
# kalo ada -> show{
# # AUCTION TEEMPLATe ##
# # Other UUSERR BID #
# # your bid
# # bid input
# # button place bid
# }
# 5. place bid? -> cek udah ada apa belom? udah ada, update. belom ada insert -> update -> reload page
# """
# # add bid
# """
# on going bid:
# user_id(fk)
# auction_id(fk)
# placed_bid(int)
# category:
# cat_id(pk)
# cat_type
# auction:
# auction_id(Pk)
# toko_lelang_id(fk)
# cat_id(fk)
# start(date)
# end(date)
# auction_description
# auction_image
# base logic
# 1. di table belom ada? masukin(insert)
# 2. di table udah ada? update(update where blabla)
# }
# }
# LOGIC PLACE BID
# logic 1(cek if there's a bid): select placed_bid from on_going_auction where Username = '<USERNAME> and auction_name=<AUCTION NAME>'
# logic 1, if none = ga ada bid, kalo ga none = lesgo
# logic 2(there's no bid): insert into on_going_auction VALUES('<AUCTION NAME>', '<USERNAME>', <BID>);
# logic 3(there's bid): update on_going_auction set placed_bid = <INT BID> where auction_name = '<AUCTION NAME>' and Username = '<USERNAME>'
# logic 4(ada bid, tapi bid low, pass. bid lebih high dari yang ada di data. les go)
@app.route("/get-place-bid", methods=['GET', 'POST'])
def get_place_bid():
status = ""
# logic get_data_bidder
# select Username, placed_bid FROM on_going_auction WHERE auction_name = "test1" ORDER by placed_bid DESC LIMIT 3
if request.method == 'POST':
try:
get_param_get = request.get_data()
get_param_get = get_param_get.decode('utf-8')
get_value_param = json.loads(get_param_get)
auction_name = get_value_param['auction']
conn = sqlite3.connect(db_path)
c1 = conn.cursor()
c1.execute("select Username, placed_bid FROM on_going_auction WHERE auction_name = '{a}' ORDER by placed_bid DESC LIMIT 3".format(a=auction_name))
data_auction = c1.fetchall()
conn.close()
print(data_auction)
stat = []
for x in range(0, len(data_auction)):
for j in data_auction[x]:
stat.append(str(j))
print("len status: " + str(len(stat)))
if len(stat) != 0:
status = {"status":stat}
else:
status = {"status":["empty"]}
except Exception as e:
status = "ERROR -> " + str(e) + '\n'
print(status)
return status
@app.route("/place-user-bid", methods=['GET', 'POST'])
def place_bid():
# logic 1: select placed_bid from on_going_auction where auction_name = 'test2' and Username='itachi';
# kosong?: insert into on_going_auction VALUES('test2', 'itachi', 1000)
# ga kosong? cek place bid. bid lebih gede? update on_going_auction set placed_bid = 2000 where auction_name = 'test2' and Username = 'itachi'
status = ""
if request.method == 'POST':
try:
get_parameter_place_user_bid = request.get_data()
get_parameter_place_user_bid = get_parameter_place_user_bid.decode('utf-8')
get_parameter_place_user_bid = json.loads(get_parameter_place_user_bid)
auction_name = get_parameter_place_user_bid['auction']
bid_price = get_parameter_place_user_bid['bid_price']
user_bidder = get_parameter_place_user_bid['username']
# logic 1
conn = sqlite3.connect(db_path)
c1 = conn.cursor()
c1.execute("select placed_bid from on_going_auction where auction_name = '{a}' and Username='{b}'".format(a=auction_name, b=user_bidder))
data_auction = c1.fetchall()
conn.close()
print(data_auction)
print(len(data_auction))
# logic kosong
if len(data_auction) == 0:
print("masukin insert")
#insert into on_going_auction VALUES('test2', 'itachi', 1000)
value_parse = (auction_name, user_bidder, bid_price)
query_insert = """INSERT INTO on_going_auction VALUES(?, ?, ?)"""
conn = sqlite3.connect(db_path)
cur_nig = conn.cursor()
cur_nig.execute(query_insert, value_parse)
conn.commit()
conn.close()
status = {"status":["inserted"]}
elif len(data_auction) != 0:
current_bid = data_auction[0][0]
# logic update
if bid_price > current_bid:
conn = sqlite3.connect(db_path)
c1 = conn.cursor()
c1.execute(f"update on_going_auction set placed_bid = {str(bid_price)} where auction_name = '{auction_name}' and Username = '{user_bidder}'")
conn.commit()
conn.close()
status = {"status":["updated"]}
else:
status = {"status":["not updated"]}
except Exception as e:
raise e
return status
'''
1. select auction
2. parse
'''
# LOGIC GET auction
@app.route("/get-auction", methods=['GET', 'POST'])
def get_on_going_auction():
status = ""
out = []
fixed_out = []
if request.method == 'POST':
response = ""
try:
# get auction -> render
f1 = []
# try to get responses
try:
get_json = request.get_data()
get_json = get_json.decode('utf-8')
get_json = json.loads(get_json)
response = get_json['category']
#print(response)
except:
print("no param")
#print(len(f1))
#print(response)
#print(len(response))
# if else buat outputin auction
if response == "all":
while len(f1) == 0 :
conn1 = sqlite3.connect(db_path)
c1 = conn1.cursor()
# select Auction.Auction_name, Category.cat_type , Auction.Auction_description, Auction.Auction_image, Auction.toko_lelang, Auction.start_date, Auction.end_date from Auction INNER JOIN Category on Auction.cat_id = Category.cat_id WHERE Auction.cat_id = cast(ABS(RANDOM()) % (4 - 1) + 1 as TEXT);
c1.execute("select Auction.Auction_name, Category.cat_type , Auction.Auction_description, Auction.Auction_image, Auction.toko_lelang, Auction.start_date, Auction.end_date from Auction INNER JOIN Category on Auction.cat_id = Category.cat_id;")
#select Auction.Auction_name, Category.cat_type , Auction.Auction_description, Auction.Auction_image, Auction.toko_lelang, Auction.start_date, Auction.end_date from Auction INNER JOIN Category on Auction.cat_id = Category.cat_id WHERE Auction.cat_id = cast(ABS(RANDOM()) % (4 - 1) + 1 as TEXT);")
f1 = c1.fetchall()
#print("DBGJING")
#print(len(f1))
if len(f1) == 0:
conn1.close()
print("empty")
pass
else:
conn1.close()
# for x in f1:
# print("INDEKS#!")
# print(type(x))
# print(type(f1))
#print("")
#print(f1[0])
status = {"status":f1[0]}
#print(type(out))
break
elif response == "fashion":
print("fashion")
while len(f1) == 0 :
conn1 = sqlite3.connect(db_path)
c1 = conn1.cursor()
# select Auction.Auction_name, Category.cat_type , Auction.Auction_description, Auction.Auction_image, Auction.toko_lelang, Auction.start_date, Auction.end_date from Auction INNER JOIN Category on Auction.cat_id = Category.cat_id WHERE Auction.cat_id = cast(ABS(RANDOM()) % (4 - 1) + 1 as TEXT);
#c1.execute("select Auction.Auction_name, Category.cat_type , Auction.Auction_description, Auction.Auction_image, Auction.toko_lelang, Auction.start_date, Auction.end_date from Auction INNER JOIN Category on Auction.cat_id = Category.cat_id;")
c1.execute("select Auction.Auction_name, Category.cat_type , Auction.Auction_description, Auction.Auction_image, Auction.toko_lelang, Auction.start_date, Auction.end_date from Auction INNER JOIN Category on Auction.cat_id = Category.cat_id WHERE Auction.cat_id = '3';")
f1 = c1.fetchall()
#print("DBGJING")
#print(len(f1))
if len(f1) == 0:
conn1.close()
print("empty")
pass
else:
conn1.close()
# for x in f1:
# print("INDEKS#!")
# print(type(x))
# print(type(f1))
#print("")
#print(f1[0])
status = {"status":f1[0]}
#print(type(out))
break
elif response == "game":
print('game')
while len(f1) == 0 :
conn1 = sqlite3.connect(db_path)
c1 = conn1.cursor()
# select Auction.Auction_name, Category.cat_type , Auction.Auction_description, Auction.Auction_image, Auction.toko_lelang, Auction.start_date, Auction.end_date from Auction INNER JOIN Category on Auction.cat_id = Category.cat_id WHERE Auction.cat_id = cast(ABS(RANDOM()) % (4 - 1) + 1 as TEXT);
#c1.execute("select Auction.Auction_name, Category.cat_type , Auction.Auction_description, Auction.Auction_image, Auction.toko_lelang, Auction.start_date, Auction.end_date from Auction INNER JOIN Category on Auction.cat_id = Category.cat_id;")
c1.execute("select Auction.Auction_name, Category.cat_type , Auction.Auction_description, Auction.Auction_image, Auction.toko_lelang, Auction.start_date, Auction.end_date from Auction INNER JOIN Category on Auction.cat_id = Category.cat_id WHERE Auction.cat_id = '1';")
f1 = c1.fetchall()
#print("DBGJING")
#print(len(f1))
if len(f1) == 0:
conn1.close()
print("empty")
pass
else:
conn1.close()
# for x in f1:
# print("INDEKS#!")
# print(type(x))
# print(type(f1))
#print("")
#print(f1[0])
status = {"status":f1[0]}
#print(type(out))
break
elif response == "tech":
print("tech")
while len(f1) == 0 :
conn1 = sqlite3.connect(db_path)
c1 = conn1.cursor()
# select Auction.Auction_name, Category.cat_type , Auction.Auction_description, Auction.Auction_image, Auction.toko_lelang, Auction.start_date, Auction.end_date from Auction INNER JOIN Category on Auction.cat_id = Category.cat_id WHERE Auction.cat_id = cast(ABS(RANDOM()) % (4 - 1) + 1 as TEXT);
#c1.execute("select Auction.Auction_name, Category.cat_type , Auction.Auction_description, Auction.Auction_image, Auction.toko_lelang, Auction.start_date, Auction.end_date from Auction INNER JOIN Category on Auction.cat_id = Category.cat_id;")
c1.execute("select Auction.Auction_name, Category.cat_type , Auction.Auction_description, Auction.Auction_image, Auction.toko_lelang, Auction.start_date, Auction.end_date from Auction INNER JOIN Category on Auction.cat_id = Category.cat_id WHERE Auction.cat_id = '4';")
f1 = c1.fetchall()
#print("DBGJING")
#print(len(f1))
if len(f1) == 0:
conn1.close()
print("empty")
pass
else:
conn1.close()
# for x in f1:
# print("INDEKS#!")
# print(type(x))
# print(type(f1))
#print("")
#print(f1[0])
status = {"status":f1[0]}
#print(type(out))
break
elif response == "automotive":
print('auto')
while len(f1) == 0 :
conn1 = sqlite3.connect(db_path)
c1 = conn1.cursor()
# select Auction.Auction_name, Category.cat_type , Auction.Auction_description, Auction.Auction_image, Auction.toko_lelang, Auction.start_date, Auction.end_date from Auction INNER JOIN Category on Auction.cat_id = Category.cat_id WHERE Auction.cat_id = cast(ABS(RANDOM()) % (4 - 1) + 1 as TEXT);
#c1.execute("select Auction.Auction_name, Category.cat_type , Auction.Auction_description, Auction.Auction_image, Auction.toko_lelang, Auction.start_date, Auction.end_date from Auction INNER JOIN Category on Auction.cat_id = Category.cat_id;")
c1.execute("select Auction.Auction_name, Category.cat_type , Auction.Auction_description, Auction.Auction_image, Auction.toko_lelang, Auction.start_date, Auction.end_date from Auction INNER JOIN Category on Auction.cat_id = Category.cat_id WHERE Auction.cat_id = '2';")
f1 = c1.fetchall()
#print("DBGJING")
#print(len(f1))
if len(f1) == 0:
conn1.close()
print("empty")
pass
else:
conn1.close()
# for x in f1:
# print("INDEKS#!")
# print(type(x))
# print(type(f1))
#print("")
#print(f1[0])
status = {"status":f1[0]}
#print(type(out))
break
except Exception as e:
status = "ERROR" + str(e)
print(status)
else:
return "get not allowed"
#print(status)
return status
# get on_going_placed_bid
@app.route("/my-bid", methods=['POST'])
def get_my_placed_bid():
status = ""
if request.method == 'POST':
try:
get_param_get = request.get_data()
get_param_get = get_param_get.decode('utf-8')
get_value_param = json.loads(get_param_get)
username = get_value_param['username']
query = f"""SELECT Auction.Auction_name, Auction.end_date from Auction INNER JOIN on_going_auction on on_going_auction.auction_name=Auction.Auction_name INNER JOIN Users on Users.Username = on_going_auction.Username where datetime(substr(Auction.end_date,7,4) || '-' || substr(Auction.end_date,4, 2) || '-' || substr(Auction.end_date,0, 3) || ' ' || substr(Auction.end_date, 12, 8) || ':00' ) > datetime('now') and Users.Username ='{username}' limit 1"""
conn = sqlite3.connect(db_path)
c1 = conn.cursor()
c1.execute(query)
data_auction = c1.fetchall()
conn.close()
print(len(data_auction))
#status = {"status":data_auction}
if len(data_auction) == 0:
status = {"status":["empty"]}
else:
# data to be returned
# auction_name, image, date_start, date_ends, description, top_bidder
auction_name = data_auction[0][0]
print("auction name " + auction_name)
query_again = f"""select Auction.auction_name, Category.cat_type,Auction.Auction_image, on_going_auction.placed_bid, Auction.start_date, Auction.end_date, Auction.Auction_description from on_going_auction INNER join Auction on Auction.Auction_name = on_going_auction.auction_name INNER JOIN Category on Auction.cat_id = Category.cat_id where on_going_auction.auction_name = '{auction_name}' and on_going_auction.Username = '{username}'"""
conn = sqlite3.connect(db_path)
c1 = conn.cursor()
c1.execute(query_again)
data_to_send = c1.fetchall()
conn.close()
data_to_send = data_to_send[0]
fixed_data = []
for x in data_to_send:
fixed_data.append(str(x))
# nama[0], category[1], image[2], placed_bid[3], start_date[4], end_date[5], auction_desc[6]
#status = data_auction
status = {"status":fixed_data}
except Exception as e:
raise e
return status
@app.route("/my-bid-history", methods=['POST'])
def get_my_bid_history():
# query 1 = """SELECT Auction.Auction_name, Auction.end_date from Auction INNER JOIN on_going_auction on on_going_auction.auction_name=Auction.Auction_name INNER JOIN Users on Users.Username = on_going_auction.Username where datetime(substr(Auction.end_date,7,4) || '-' || substr(Auction.end_date,4, 2) || '-' || substr(Auction.end_date,0, 3) || ' ' || substr(Auction.end_date, 12, 8) || ':00' ) < datetime('now') and Users.Username ='{username}' limit 1"""
if request.method == 'POST':
try:
get_param_get = request.get_data()
get_param_get = get_param_get.decode('utf-8')
get_value_param = json.loads(get_param_get)
username = get_value_param['username']
query = f"""SELECT Auction.Auction_name, Auction.end_date from Auction INNER JOIN on_going_auction on on_going_auction.auction_name=Auction.Auction_name INNER JOIN Users on Users.Username = on_going_auction.Username where datetime(substr(Auction.end_date,7,4) || '-' || substr(Auction.end_date,4, 2) || '-' || substr(Auction.end_date,0, 3) || ' ' || substr(Auction.end_date, 12, 8) || ':00' ) < datetime('now') and Users.Username ='{username}' limit 1"""
conn = sqlite3.connect(db_path)
c1 = conn.cursor()
c1.execute(query)
data_auction = c1.fetchall()
conn.close()
print(len(data_auction))
#status = {"status":data_auction}
if len(data_auction) == 0:
status = {"status":["empty"]}
else:
# data to be returned
# auction_name, image, date_start, date_ends, description, top_bidder
auction_name = data_auction[0][0]
print("auction name " + auction_name)
query_again = f"""select Auction.auction_name, Category.cat_type,Auction.Auction_image, on_going_auction.placed_bid, Auction.start_date, Auction.end_date, Auction.Auction_description from on_going_auction INNER join Auction on Auction.Auction_name = on_going_auction.auction_name INNER JOIN Category on Auction.cat_id = Category.cat_id where on_going_auction.auction_name = '{auction_name}' and on_going_auction.Username = '{username}'"""
conn = sqlite3.connect(db_path)
c1 = conn.cursor()
c1.execute(query_again)
data_to_send = c1.fetchall()
conn.close()
data_to_send = data_to_send[0]
fixed_data = []
for x in data_to_send:
fixed_data.append(str(x))
# nama[0], category[1], image[2], placed_bid[3], start_date[4], end_date[5], auction_desc[6]
#status = data_auction
status = {"status":fixed_data}
except Exception as e:
raise e
return status
app.run(host="127.0.0.1", port=4242)
``` |
{
"source": "0xdc/botfriend",
"score": 2
} |
#### File: botfriend/tests/test_bot.py
```python
import datetime
from nose.tools import (
assert_raises,
eq_,
set_trace,
)
from . import DatabaseTest
from bot import Bot
from model import (
InvalidPost,
Post,
_now,
)
class TestBot(DatabaseTest):
def test_publishable_posts(self):
bot = self._bot(config=dict(
state_update_schedule=1,
schedule=1
))
eq_(False, bot.state_updated)
# Since this bot has never posted, publishable_posts returns a
# list containing a single new post.
[new_post] = bot.publishable_posts
assert isinstance(new_post, Post)
eq_(new_post.content, bot.new_posts[0])
# Since this bot has no state update schedule,
# Bot.update_state() was not called.
eq_(False, bot.state_updated)
# Calling publishable_posts returns an empty list, since it's not yet
# time for another post.
eq_([], bot.publishable_posts)
def test_publishable_posts_may_update_state(self):
bot = self._bot(config=dict(state_update_schedule=1, schedule=1))
eq_(True, bot.state_needs_update)
bot.publishable_posts
eq_(True, bot.state_updated)
eq_(False, bot.state_needs_update)
def test_backlog(self):
bot = self._bot()
eq_([], bot.backlog)
item = {"k": "v"}
# By default, items are stored in the backlog as is.
eq_(item, bot.backlog_item(item))
# Backlog items are transparently serialized and deserialized
# to JSON.
bot.extend_backlog([item])
eq_([item], bot.backlog)
bot.clear_backlog()
eq_([], bot.backlog)
def test_publishable_posts_pops_backlog(self):
bot = self._bot()
bot.extend_backlog(["backlog_1", "backlog_2"])
[post1] = bot.publishable_posts
eq_("backlog_1", post1.content)
# There's still a backlog item, but it's not time for another post,
# so publishable_posts doesn't pop it.
eq_(["backlog_2"], bot.backlog)
eq_([], bot.publishable_posts)
def test_publishable_posts_returns_all_scheduled_posts(self):
bot = self._bot()
now = _now()
yesterday = now - datetime.timedelta(days=1)
day_before = now - datetime.timedelta(days=2)
tomorrow = now + datetime.timedelta(days=1)
publish_yesterday = self._post(
bot.model, "yesterday", publish_at=yesterday
)
publish_earlier = self._post(
bot.model, "day before", publish_at=day_before
)
publish_later = self._post(
bot.model, "tomorrow", publish_at=tomorrow
)
# publishable_posts returns all posts that should have been
# published by now.
eq_([publish_earlier, publish_yesterday], bot.publishable_posts)
# Since the scheduled posts weren't _created_ by the
# publishable_posts, they don't go away when you call
# publishable_posts once. They will stick around until they're
# published.
eq_([publish_earlier, publish_yesterday], bot.publishable_posts)
def test_to_post_list(self):
"""Test the method that handles the output of new_post."""
class ModifierBot(Bot):
def object_to_post(self, obj):
return obj + "!"
bot = self._bot(ModifierBot)
m = bot._to_post_list
post = self._post()
# A bot can turn an object (such as a backlog object) into a post
# by creating the Post object, or a list of posts.
eq_([post], m(post))
eq_([post], m([post]))
# A bot can also create a Post by defining object_to_post to
# return a string. publishable_posts takes care of actually
# converting it into a post.
[modified_post] = m("A string")
assert isinstance(modified_post, Post)
eq_("A string!", modified_post.content)
# It's also okay for object_to_post to return the actual Post object.
class PostBot(Bot):
def object_to_post(self, obj):
post, is_new = Post.from_content(self.model, obj)
return post
bot = self._bot(PostBot)
[post] = bot._to_post_list("A string")
assert isinstance(modified_post, Post)
eq_("A string", post.content)
# Or a list of Post objects.
class PostBot(Bot):
def object_to_post(self, obj):
post, is_new = Post.from_content(self.model, obj)
return [post]
[post] = self._bot(PostBot)._to_post_list("A string")
assert isinstance(modified_post, Post)
eq_("A string", post.content)
# No other type of value is acceptable.
class PostBot(Bot):
def object_to_list(self, obj):
return dict(value=obj)
assert_raises(
InvalidPost, self._bot(PostBot)._to_post_list, ["A complicated value"]
)
def test_post_can_only_be_scheduled_for_the_future(self):
# You can schedule a post for the future.
class FutureBot(Bot):
def _schedule_posts(self):
tomorrow = _now() + datetime.timedelta(days=1)
post, is_new = Post.from_content(
self.model, "the future!", publish_at=tomorrow
)
return post
bot = self._bot(FutureBot)
eq_(["the future!"], [x.content for x in bot.schedule_posts()])
# But not for the past.
class PastBot(Bot):
def _schedule_posts(self):
yesterday = _now() - datetime.timedelta(days=1)
post, is_new = Post.from_content(
self.model, "the past!", publish_at=yesterday
)
return [post]
bot = self._bot(PastBot)
assert_raises(InvalidPost, bot.schedule_posts)
def test_next_scheduled_post(self):
bot = self._bot()
# If there is no schedule, a Bot will either create a new post
# every time it's invoked (not a good idea), or posts must be
# scheduled in advance using some other mechanism.
bot.schedule = None
eq_(None, bot._next_scheduled_post([]))
# If the schedule is a number, a Bot will create a new post
# every [that number] of minutes.
bot.schedule = 5
delta = bot._next_scheduled_post([])
assert isinstance(delta, datetime.timedelta)
eq_(5*60, delta.seconds)
# If the schedule is a dictionary with values 'mean' and 'stdev',
# a Bot will create posts in a Gaussian distribution with those numbers
# as mean and standard deviation.
bot.schedule = dict(mean=6, stdev=0)
delta = bot._next_scheduled_post([])
assert isinstance(delta, datetime.timedelta)
eq_(6*60, delta.seconds)
```
#### File: bots.sample/podcast/__init__.py
```python
from dateutil import parser
from pdb import set_trace
import random
from olipy.ia import Audio
from botfriend.bot import BasicBot
from botfriend.publish.podcast import PodcastPublisher
class PodcastBot(BasicBot):
COLLECTION = "podcasts"
def update_state(self):
# Grab the 100 most recently posted podcasts.
query = Audio.recent("collection:%s" % self.COLLECTION)
max_count = 100
choices = []
a = 0
for audio in query:
choices.append(audio.identifier)
a += 1
if a >= max_count:
break
self.model.json_state = choices
def file(self, item, format_name):
"""Find a file in a specific format."""
for f in item.files:
if f.format == format_name:
return f
return None
def make_post(self, podcast):
"""Convert an Audio object into a post compatible with
the PodcastPublisher.
"""
meta = podcast.metadata
mp3 = self.file(podcast, "VBR MP3")
if not mp3:
# This isn't really a podcast.
return None
title = meta.get('title')
date = parser.parse(
meta.get('date') or meta.get('publicdate')
).strftime("%d %b %Y")
description = meta.get('description', '')
creator = meta.get('creator')
if creator:
byline = " by %s" % creator
else:
byline = ""
detail_url = 'https://archive.org/details/%s' % meta['identifier']
detail_link='<p>Archived at <a href="%s">%s</a>' % (detail_url, detail_url)
template = '<p>Originally published%(byline)s on %(date)s.</p>\n\n%(description)s\n\n%(details)s'
description = template % dict(
details=detail_link,
title=title,
description=description,
date=date,
byline=byline
)
# Create a post compatible with the PodcastPublisher.
return PodcastPublisher.make_post(
self.model, title, mp3.url, description,
media_size=mp3.size, guid=detail_url
)
def new_post(self):
podcast = random.choice(self.model.json_state)
post, is_new = self.make_post(Audio(podcast))
return post
Bot = PodcastBot
``` |
{
"source": "0xdc/cc-djangae-app",
"score": 2
} |
#### File: {{ cookiecutter.name }}/settings/__init__.py
```python
import os
import sys
import string
from random import SystemRandom
from django.core.exceptions import ImproperlyConfigured
from djangae.settings_base import *
def env(env_var):
try:
return os.environ[env_var]
except KeyError:
error_msg = "Set the {} environment variable".format(env_var)
raise ImproperlyConfigured(error_msg)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# SECURITY WARNING: keep the secret key used in production secret!
try:
SECRET_KEY = env("SECRET_KEY")
except ImproperlyConfigured:
SECRET_FILE = os.path.join(BASE_DIR, 'secretkey.txt')
try:
SECRET_KEY = open(SECRET_FILE).read().strip()
except IOError:
try:
SECRET_KEY = ''.join([SystemRandom().choice("".join([string.ascii_letters, string.digits, string.punctuation])) for i in range(63)])
with open(SECRET_FILE, 'w') as secret:
secret.write(SECRET_KEY)
except IOError:
raise ImproperlyConfigured('Please create the {} file with random characters \
to generate your secret key!'.format(SECRET_FILE))
# By default, run with DEBUG=False
# Debug environments must re-enable debug mode
# An empty string is bool(False)
os.environ.setdefault("DEBUG", "")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(env("DEBUG"))
try:
ALLOWED_HOSTS = env("ALLOWED_HOSTS").split(" ")
except:
ALLOWED_HOSTS = ["localhost", "127.0.0.1"]
sys.stderr.write("No ALLOWED_HOSTS provided; defaulting to: {}\n".format(", ".join(ALLOWED_HOSTS)))
# Detect proxied SSL header
# https://docs.djangoproject.com/en/1.11/ref/settings/#secure-proxy-ssl-header
os.environ.setdefault("SSL", "")
ssl = bool(env("SSL"))
if ssl:
sys.stderr.write("Enabling SSL proxy header\n")
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
else:
sys.stderr.write("Not enabling SSL proxy header\n")
# Application definition
INSTALLED_APPS = [
'djangae',
'django.contrib.auth',
'djangae.contrib.gauth_datastore',
'django.contrib.contenttypes',
'djangae.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'djangae.contrib.security',
]
MIDDLEWARE = [
'djangae.contrib.security.middleware.AppEngineSecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'djangae.contrib.gauth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'session_csrf.CsrfMiddleware',
]
ROOT_URLCONF = '{{ cookiecutter.name }}.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = '{{ cookiecutter.name }}.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'djangae.db.backends.appengine',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = 'static'
AUTH_USER_MODEL = "gauth_datastore.GaeDatastoreUser"
AUTHENTICATION_BACKENDS = (
'djangae.contrib.gauth_datastore.backends.AppEngineUserAPIBackend',
)
``` |
{
"source": "0xdc/django-aaisp-bandwidth",
"score": 2
} |
#### File: django-aaisp-bandwidth/aaisp/views.py
```python
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from .models import Line
@login_required
def update_lines(self, request=None):
for line in Line.objects.all():
line.save_new()
return HttpResponse("OK")
``` |
{
"source": "0xdc/estuary",
"score": 2
} |
#### File: estuary/tests/conftest.py
```python
import pytest
from alembic import command
from alembic.config import Config
from starlette.config import environ
from starlette.testclient import TestClient
environ["DATABASE_URL"] = "sqlite:///:memory:"
@pytest.fixture(autouse=True)
def create_test_database():
config = Config("alembic.ini")
command.upgrade(config, "head", "--sql") # offline, for coverage
command.upgrade(config, "head")
yield
command.downgrade(config, "base")
@pytest.fixture()
def client():
from estuary.asgi import app
with TestClient(app) as client:
yield client
``` |
{
"source": "0xdc/fiawec-results-dl",
"score": 3
} |
#### File: fiawec-results-dl/fiawec/event.py
```python
from bs4 import BeautifulSoup as BS
import requests
import os
import urllib
class Event():
url = "http://fiawec.alkamelsystems.com/index.php?season={season}&evvent={event}"
def __init__(self, event, season):
self.event = event
self.season = season
self.url = self.url.format(season=season, event=event)
def __str__(self):
return "{}/{}".format(self.season, self.event)
def pull(self):
if not hasattr(self, "cache"):
self.cache = requests.get(self.url)
return self.cache
def event_results(self, response=None):
if response is None:
response = self.pull()
soup = BS(response.text, "html.parser")
events = soup.find("div", id="resultsTree")
self.results = events.find_all("a")
#return [ x.get("value") for x in objects ]
return self.results
def all(self, results=None):
if results is None:
results = self.event_results()
for result in results:
try:
href = urllib.parse.unquote(result.get("href"))
except TypeError:
continue
basename = os.path.basename(href)
dirname = os.path.dirname(href)
try:
os.makedirs(dirname)
except FileExistsError:
pass
print(self, basename)
if not os.path.exists(href):
url = "http://fiawec.alkamelsystems.com/{}".format(result.get("href"))
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(href, "wb") as f:
for chunk in r:
f.write(chunk)
class NBEvent(Event):
url = "http://fiawec.alkamelsystems.com/noticeBoard.php?season={season}&evvent={event}"
class Committee(Event):
url = "http://fiawec.alkamelsystems.com/committe.php?season={season}"
def __init__(self, season):
self.season = season
self.event = "committee"
self.url = self.url.format(season=season)
``` |
{
"source": "0xdc/sewer",
"score": 3
} |
#### File: sewer/sewer/cli.py
```python
import os
import argparse
from structlog import get_logger
from . import Client
from . import __version__ as sewer_version
def main():
"""
Usage:
1. To get a new certificate:
CLOUDFLARE_EMAIL=<EMAIL> \
CLOUDFLARE_DNS_ZONE_ID=some-zone \
CLOUDFLARE_API_KEY=api-key \
sewer \
--dns cloudflare \
--domain example.com \
"""
# TODO: enable people to specify the location where they want certificate and keys to be stored.
# currently, we store them in the directory from which sewer is ran
parser = argparse.ArgumentParser(
prog='sewer', description="Sewer is a Let's Encrypt(ACME) client.")
parser.add_argument(
"--version",
action='version',
version='%(prog)s {version}'.format(version=sewer_version.__version__),
help="The currently installed sewer version.")
parser.add_argument(
"--account_key",
type=argparse.FileType('r'),
required=False,
help="The path to your letsencrypt/acme account key. \
eg: --account_key /home/myaccount.key")
parser.add_argument(
"--dns",
type=str,
required=True,
choices=['cloudflare', 'aurora'],
help="The name of the dns provider that you want to use.")
parser.add_argument(
"--domain",
type=str,
required=True,
help="The domain/subdomain name for which \
you want to get/renew certificate for. \
eg: --domain example.com")
parser.add_argument(
"--alt_domains",
type=str,
required=False,
default=[],
nargs='*',
help="A list of alternative domain/subdomain name/s(if any) for which \
you want to get/renew certificate for. \
eg: --alt_domains www.example.com blog.example.com")
parser.add_argument(
"--bundle_name",
type=str,
required=False,
help="The name to use for certificate \
certificate key and account key. Default is value of domain.")
parser.add_argument(
"--email",
type=str,
required=False,
help="Email to be used for registration and recovery. \
eg: --email <EMAIL>")
args = parser.parse_args()
logger = get_logger(__name__)
dns_provider = args.dns
domain = args.domain
alt_domains = args.alt_domains
account_key = args.account_key
bundle_name = args.bundle_name
email = args.email
if bundle_name:
file_name = bundle_name
else:
file_name = '{0}'.format(domain)
if account_key:
account_key = account_key.read()
else:
try:
with open('{0}.account.key'.format(file_name), 'r') as account_file:
account_key = account_file.read().encode()
except IOError:
account_key = None
if email:
# TODO: move this to a config.py file.
# the cli and the client would both read this urls from that config file
GET_NONCE_URL = "https://acme-v01.api.letsencrypt.org/directory"
ACME_CERTIFICATE_AUTHORITY_URL = "https://acme-v01.api.letsencrypt.org"
else:
GET_NONCE_URL = "https://acme-staging.api.letsencrypt.org/directory"
ACME_CERTIFICATE_AUTHORITY_URL = "https://acme-staging.api.letsencrypt.org"
if dns_provider == 'cloudflare':
from . import CloudFlareDns
dns_class = CloudFlareDns()
logger.info(
'chosen_dns_provider',
message='Using {0} as dns provider.'.format(dns_provider))
elif dns_provider == 'aurora':
from . import AuroraDns
try:
AURORA_API_KEY = os.environ['AURORA_API_KEY']
AURORA_SECRET_KEY = os.environ['AURORA_SECRET_KEY']
dns_class = AuroraDns(
AURORA_API_KEY=AURORA_API_KEY,
AURORA_SECRET_KEY=AURORA_SECRET_KEY)
logger.info(
'chosen_dns_provider',
message='Using {0} as dns provider.'.format(dns_provider))
except KeyError as e:
logger.info("ERROR:: Please supply {0} as an environment variable.".
format(str(e)))
raise
elif dns_provider == 'rackspace':
from . import RackspaceDns
dns_class = RackspaceDns()
logger.info(
'chosen_dns_provider',
message='Using {0} as dns provider.'.format(dns_provider))
else:
raise ValueError(
'The dns provider {0} is not recognised.'.format(dns_provider))
client = Client(
domain_name=domain,
dns_class=dns_class,
domain_alt_names=alt_domains,
registration_recovery_email=email,
account_key=account_key,
GET_NONCE_URL=GET_NONCE_URL,
ACME_CERTIFICATE_AUTHORITY_URL=ACME_CERTIFICATE_AUTHORITY_URL)
certificate_key = client.certificate_key
account_key = client.account_key
# write out account_key in current directory
if not client.PRIOR_REGISTERED:
with open('{0}.account.key'.format(file_name), 'wb') as account_file:
account_file.write(account_key)
logger.info(
"write_account_key",
message='account key succesfully written to current directory.')
message = 'Certificate Succesfully issued. The certificate, certificate key and account key have been saved in the current directory'
certificate = client.cert()
# write out certificate and certificate key in current directory
with open('{0}.crt'.format(file_name), 'wt') as certificate_file:
certificate_file.write(certificate)
with open('{0}.key'.format(file_name), 'wb') as certificate_key_file:
certificate_key_file.write(certificate_key)
logger.info("the_end", message=message)
```
#### File: sewer/dns_providers/auroradns.py
```python
from libcloud.dns.providers import get_driver
from libcloud.dns.types import Provider, RecordType
import tldextract
from structlog import get_logger
from . import common
class AuroraDns(common.BaseDns):
"""
Todo: re-organize this class so that we make it easier to mock things out to
facilitate better tests.
"""
def __init__(self, AURORA_API_KEY, AURORA_SECRET_KEY):
self.AURORA_API_KEY = AURORA_API_KEY
self.AURORA_SECRET_KEY = AURORA_SECRET_KEY
self.dns_provider_name = 'aurora'
self.logger = get_logger(__name__).bind(
dns_provider_name=self.dns_provider_name)
def create_dns_record(self, domain_name, base64_of_acme_keyauthorization):
self.logger.info('create_dns_record')
# delete any prior existing DNS authorizations that may exist already
self.delete_dns_record(
domain_name=domain_name,
base64_of_acme_keyauthorization=base64_of_acme_keyauthorization)
extractedDomain = tldextract.extract(domain_name)
domainSuffix = extractedDomain.domain + '.' + extractedDomain.suffix
if extractedDomain.subdomain is '':
subDomain = '_acme-challenge'
else:
subDomain = '_acme-challenge.' + extractedDomain.subdomain
cls = get_driver(Provider.AURORADNS)
driver = cls(key=self.AURORA_API_KEY, secret=self.AURORA_SECRET_KEY)
zone = driver.get_zone(domainSuffix)
zone.create_record(
name=subDomain,
type=RecordType.TXT,
data=base64_of_acme_keyauthorization)
return
def delete_dns_record(self, domain_name, base64_of_acme_keyauthorization):
self.logger.info('delete_dns_record')
extractedDomain = tldextract.extract(domain_name)
domainSuffix = extractedDomain.domain + '.' + extractedDomain.suffix
if extractedDomain.subdomain is '':
subDomain = '_acme-challenge'
else:
subDomain = '_acme-challenge.' + extractedDomain.subdomain
cls = get_driver(Provider.AURORADNS)
driver = cls(key=self.AURORA_API_KEY, secret=self.AURORA_SECRET_KEY)
zone = driver.get_zone(domainSuffix)
records = driver.list_records(zone)
for x in records:
if x.name == subDomain and x.type == 'TXT':
record_id = x.id
self.logger.info('Found record ' + subDomain + '.' +
domainSuffix + ' with id : ' + record_id + '.')
record = driver.get_record(zone_id=zone.id, record_id=record_id)
driver.delete_record(record)
self.logger.info('Deleted record ' + subDomain + '.' +
domainSuffix + ' with id : ' + record_id + '.')
else:
self.logger.info('Record ' + subDomain + '.' + domainSuffix +
' not found. No record to delete.')
return
``` |
{
"source": "0xdc/starly",
"score": 2
} |
#### File: {{ cookiecutter.project }}/tests/conftest.py
```python
import pytest
from alembic import command
from alembic.config import Config
from starlette.testclient import TestClient
from sqlalchemy_utils import database_exists, create_database, drop_database
from {{ cookiecutter.project }} import settings
@pytest.fixture(autouse=True)
def create_test_database():
test_url = str(settings.TEST_DATABASE_URL)
assert not database_exists(test_url), \
'Test database already exists. Aborting tests.'
create_database(test_url)
config = Config("alembic.ini")
try:
command.upgrade(config, "head", "--sql") # offline, for coverage
command.upgrade(config, "head")
yield
command.downgrade(config, "base")
finally:
drop_database(test_url)
@pytest.fixture()
def client():
from {{ cookiecutter.project }}.asgi import app
with TestClient(app) as client:
yield client
``` |
{
"source": "0xdc/wk",
"score": 2
} |
#### File: wk/wk/models.py
```python
from __future__ import unicode_literals
from django.db import models
class WellKnown(models.Model):
key = models.CharField(max_length=255,unique=True)
value = models.TextField()
def __str__(self):
return self.key
```
#### File: wk/wk/views.py
```python
from django.shortcuts import get_object_or_404,render,redirect
from django.http import HttpResponse
try:
from django.contrib.staticfiles.templatetags.staticfiles import static
except ImportError:
from django.templatetags.static import static
from .models import WellKnown
def view(request, tgt):
wk = get_object_or_404(WellKnown, key__exact=tgt)
return HttpResponse(wk.value, content_type="text/plain")
def wkd(request, tgt):
return redirect(
request.build_absolute_uri(
static( '/'.join(['wk','openpgpkey','hu', tgt]) )
)
)
``` |
{
"source": "0xddaa/iddaa",
"score": 2
} |
#### File: idapython/iddaa/elftools.py
```python
from ctypes import *
from struct import unpack
SHN_UNDEF = 0
class SymTypes:
STB_GLOBAL_OBJ = 0x11
STB_GLOBAL_FUNC = 0x12
class SHTypes:
SHT_NULL = 0
SHT_PROGBITS = 1
SHT_SYMTAB = 2
SHT_STRTAB = 3
SHT_RELA = 4
SHT_HASH = 5
SHT_DYNAMIC = 6
SHT_NOTE = 7
SHT_NOBITS = 8
SHT_REL = 9
SHT_SHLIB = 10
SHT_DYNSYM = 11
SHT_NUM = 12
SHT_LOPROC = 0x70000000
SHT_HIPROC = 0x7fffffff
SHT_LOUSER = 0x80000000
SHT_HIUSER = 0xffffffff
class ELFFlags:
ELFCLASS32 = 0x01
ELFCLASS64 = 0x02
EI_CLASS = 0x04
EI_DATA = 0x05
ELFDATA2LSB = 0x01
ELFDATA2MSB = 0x02
EM_386 = 0x03
EM_X86_64 = 0x3e
EM_ARM = 0x28
EM_MIPS = 0x08
EM_SPARCv8p = 0x12
EM_PowerPC = 0x14
EM_ARM64 = 0xb7
class SymFlags:
STB_LOCAL = 0
STB_GLOBAL = 1
STB_WEAK = 2
STT_NOTYPE = 0
STT_OBJECT = 1
STT_FUNC = 2
STT_SECTION = 3
STT_FILE = 4
STT_COMMON = 5
STT_TLS = 6
class Elf32_Ehdr_LSB(LittleEndianStructure):
_fields_ = [
("e_ident", c_ubyte * 16),
("e_type", c_ushort),
("e_machine", c_ushort),
("e_version", c_uint),
("e_entry", c_uint),
("e_phoff", c_uint),
("e_shoff", c_uint),
("e_flags", c_uint),
("e_ehsize", c_ushort),
("e_phentsize", c_ushort),
("e_phnum", c_ushort),
("e_shentsize", c_ushort),
("e_shnum", c_ushort),
("e_shstrndx", c_ushort)
]
class Elf64_Ehdr_LSB(LittleEndianStructure):
_fields_ = [
("e_ident", c_ubyte * 16),
("e_type", c_ushort),
("e_machine", c_ushort),
("e_version", c_uint),
("e_entry", c_ulonglong),
("e_phoff", c_ulonglong),
("e_shoff", c_ulonglong),
("e_flags", c_uint),
("e_ehsize", c_ushort),
("e_phentsize", c_ushort),
("e_phnum", c_ushort),
("e_shentsize", c_ushort),
("e_shnum", c_ushort),
("e_shstrndx", c_ushort)
]
class Elf32_Phdr_LSB(LittleEndianStructure):
_fields_ = [
("p_type", c_uint),
("p_offset", c_uint),
("p_vaddr", c_uint),
("p_paddr", c_uint),
("p_filesz", c_uint),
("p_memsz", c_uint),
("p_flags", c_uint),
("p_align", c_uint)
]
class Elf64_Phdr_LSB(LittleEndianStructure):
_fields_ = [
("p_type", c_uint),
("p_flags", c_uint),
("p_offset", c_ulonglong),
("p_vaddr", c_ulonglong),
("p_paddr", c_ulonglong),
("p_filesz", c_ulonglong),
("p_memsz", c_ulonglong),
("p_align", c_ulonglong)
]
class Elf32_Shdr_LSB(LittleEndianStructure):
_fields_ = [
("sh_name", c_uint),
("sh_type", c_uint),
("sh_flags", c_uint),
("sh_addr", c_uint),
("sh_offset", c_uint),
("sh_size", c_uint),
("sh_link", c_uint),
("sh_info", c_uint),
("sh_addralign", c_uint),
("sh_entsize", c_uint)
]
class Elf64_Shdr_LSB(LittleEndianStructure):
_fields_ = [
("sh_name", c_uint),
("sh_type", c_uint),
("sh_flags", c_ulonglong),
("sh_addr", c_ulonglong),
("sh_offset", c_ulonglong),
("sh_size", c_ulonglong),
("sh_link", c_uint),
("sh_info", c_uint),
("sh_addralign", c_ulonglong),
("sh_entsize", c_ulonglong)
]
class Elf32_Ehdr_MSB(BigEndianStructure):
_fields_ = [
("e_ident", c_ubyte * 16),
("e_type", c_ushort),
("e_machine", c_ushort),
("e_version", c_uint),
("e_entry", c_uint),
("e_phoff", c_uint),
("e_shoff", c_uint),
("e_flags", c_uint),
("e_ehsize", c_ushort),
("e_phentsize", c_ushort),
("e_phnum", c_ushort),
("e_shentsize", c_ushort),
("e_shnum", c_ushort),
("e_shstrndx", c_ushort)
]
class Elf64_Ehdr_MSB(BigEndianStructure):
_fields_ = [
("e_ident", c_ubyte * 16),
("e_type", c_ushort),
("e_machine", c_ushort),
("e_version", c_uint),
("e_entry", c_ulonglong),
("e_phoff", c_ulonglong),
("e_shoff", c_ulonglong),
("e_flags", c_uint),
("e_ehsize", c_ushort),
("e_phentsize", c_ushort),
("e_phnum", c_ushort),
("e_shentsize", c_ushort),
("e_shnum", c_ushort),
("e_shstrndx", c_ushort)
]
class Elf32_Phdr_MSB(BigEndianStructure):
_fields_ = [
("p_type", c_uint),
("p_offset", c_uint),
("p_vaddr", c_uint),
("p_paddr", c_uint),
("p_filesz", c_uint),
("p_memsz", c_uint),
("p_flags", c_uint),
("p_align", c_uint)
]
class Elf64_Phdr_MSB(BigEndianStructure):
_fields_ = [
("p_type", c_uint),
("p_flags", c_uint),
("p_offset", c_ulonglong),
("p_vaddr", c_ulonglong),
("p_paddr", c_ulonglong),
("p_filesz", c_ulonglong),
("p_memsz", c_ulonglong),
("p_align", c_ulonglong)
]
class Elf32_Shdr_MSB(BigEndianStructure):
_fields_ = [
("sh_name", c_uint),
("sh_type", c_uint),
("sh_flags", c_uint),
("sh_addr", c_uint),
("sh_offset", c_uint),
("sh_size", c_uint),
("sh_link", c_uint),
("sh_info", c_uint),
("sh_addralign", c_uint),
("sh_entsize", c_uint)
]
class Elf64_Shdr_MSB(BigEndianStructure):
_fields_ = [
("sh_name", c_uint),
("sh_type", c_uint),
("sh_flags", c_ulonglong),
("sh_addr", c_ulonglong),
("sh_offset", c_ulonglong),
("sh_size", c_ulonglong),
("sh_link", c_uint),
("sh_info", c_uint),
("sh_addralign", c_ulonglong),
("sh_entsize", c_ulonglong)
]
class Elf32_Sym_LSB(LittleEndianStructure):
_fields_ = [
("st_name", c_uint),
("st_value", c_uint),
("st_size", c_uint),
("st_info", c_ubyte),
("st_other", c_ubyte),
("st_shndx", c_ushort)
]
class Elf64_Sym_LSB(LittleEndianStructure):
_fields_ = [
("st_name", c_uint),
("st_info", c_ubyte),
("st_other", c_ubyte),
("st_shndx", c_ushort),
("st_value", c_ulonglong),
("st_size", c_ulonglong)
]
class Elf32_Sym_MSB(BigEndianStructure):
_fields_ = [
("st_name", c_uint),
("st_value", c_uint),
("st_size", c_uint),
("st_info", c_ubyte),
("st_other", c_ubyte),
("st_shndx", c_ushort)
]
class Elf64_Sym_MSB(BigEndianStructure):
_fields_ = [
("st_name", c_uint),
("st_info", c_ubyte),
("st_other", c_ubyte),
("st_shndx", c_ushort),
("st_value", c_ulonglong),
("st_size", c_ulonglong)
]
""" This class parses the ELF """
class ELF:
def __init__(self, binary):
self.binary = bytearray(binary)
self.ElfHeader = None
self.shdr_l = []
self.phdr_l = []
self.syms_l = []
self.e_ident = str(self.binary[:15])
self.ei_data = unpack("<B", self.e_ident[ELFFlags.EI_DATA])[0] # LSB/MSB
self.__setHeaderElf()
self.__setShdr()
self.__setPhdr()
def is_stripped(self):
if not self.get_symtab():
return True
if not self.get_strtab():
return True
return False
def is_static(self):
for sh in self.shdr_l:
shname = self.get_shstrtab_data()[sh.shname:].split('\x00')[0]
if 'dyn' in shname:
return False
return True
def strip_symbols(self):
sh2delete = 2
size2dec = 0
end_shdr = self.ElfHeader.e_shoff + (self.sizeof_sh() * self.ElfHeader.e_shnum)
symtab = self.get_symtab()
strtab = self.get_strtab()
if not symtab or not strtab:
return False
log("Stripping binary...")
if symtab.sh_offset < end_shdr:
size2dec += symtab.sh_size
if strtab.sh_offset < end_shdr:
size2dec += strtab.sh_size
self.ElfHeader.e_shoff -= size2dec
self.ElfHeader.e_shnum -= sh2delete
e_shnum = self.ElfHeader.e_shnum
e_shoff = self.ElfHeader.e_shoff
sz_striped = (e_shoff + (e_shnum * self.sizeof_sh()))
if strtab.sh_offset > symtab.sh_offset:
self.cut_at_offset(strtab.sh_offset, strtab.sh_size)
self.cut_at_offset(symtab.sh_offset, symtab.sh_size)
else:
self.cut_at_offset(symtab.sh_offset, symtab.sh_size)
self.cut_at_offset(strtab.sh_offset, strtab.sh_size)
self.binary = self.binary[0:sz_striped]
self.write(0, self.ElfHeader)
return True
def get_symtab(self):
shstrtab = bytes(self.get_shstrtab_data())
for sh in self.shdr_l:
sh_name = shstrtab[sh.sh_name:].split("\0")[0]
if sh.sh_type == SHTypes.SHT_SYMTAB and \
(sh.sh_name == SHN_UNDEF or sh_name == ".symtab"):
return sh
return None
def get_strtab(self):
shstrtab = bytes(self.get_shstrtab_data())
for sh in self.shdr_l:
sh_name = shstrtab[sh.sh_name:].split("\0")[0]
if sh.sh_type == SHTypes.SHT_STRTAB and \
(sh.sh_name == SHN_UNDEF or sh_name == ".strtab"):
return sh
return None
def getArchMode(self):
if self.ElfHeader.e_ident[ELFFlags.EI_CLASS] == ELFFlags.ELFCLASS32:
return 32
elif self.ElfHeader.e_ident[ELFFlags.EI_CLASS] == ELFFlags.ELFCLASS64:
return 64
else:
log("[Error] ELF.getArchMode() - Bad Arch size")
return None
""" Parse ELF header """
def __setHeaderElf(self):
e_ident = str(self.binary[:15])
ei_class = unpack("<B", e_ident[ELFFlags.EI_CLASS])[0]
ei_data = unpack("<B", e_ident[ELFFlags.EI_DATA])[0]
if ei_class != ELFFlags.ELFCLASS32 and ei_class != ELFFlags.ELFCLASS64:
log("[Error] ELF.__setHeaderElf() - Bad Arch size")
return None
if ei_data != ELFFlags.ELFDATA2LSB and ei_data != ELFFlags.ELFDATA2MSB:
log("[Error] ELF.__setHeaderElf() - Bad architecture endian")
return None
if ei_class == ELFFlags.ELFCLASS32:
if ei_data == ELFFlags.ELFDATA2LSB: self.ElfHeader = Elf32_Ehdr_LSB.from_buffer_copy(self.binary)
elif ei_data == ELFFlags.ELFDATA2MSB: self.ElfHeader = Elf32_Ehdr_MSB.from_buffer_copy(self.binary)
elif ei_class == ELFFlags.ELFCLASS64:
if ei_data == ELFFlags.ELFDATA2LSB: self.ElfHeader = Elf64_Ehdr_LSB.from_buffer_copy(self.binary)
elif ei_data == ELFFlags.ELFDATA2MSB: self.ElfHeader = Elf64_Ehdr_MSB.from_buffer_copy(self.binary)
""" Write the section header to self.binary """
def write_shdr(self):
off = self.ElfHeader.e_shoff
for sh in self.shdr_l:
self.write(off, sh)
off += off + sizeof(sh)
""" Parse Section header """
def __setShdr(self):
shdr_num = self.ElfHeader.e_shnum
base = self.binary[self.ElfHeader.e_shoff:]
shdr_l = []
e_ident = str(self.binary[:15])
ei_data = unpack("<B", e_ident[ELFFlags.EI_DATA])[0]
for i in range(shdr_num):
if self.getArchMode() == 32:
if ei_data == ELFFlags.ELFDATA2LSB: shdr = Elf32_Shdr_LSB.from_buffer_copy(base)
elif ei_data == ELFFlags.ELFDATA2MSB: shdr = Elf32_Shdr_MSB.from_buffer_copy(base)
elif self.getArchMode() == 64:
if ei_data == ELFFlags.ELFDATA2LSB: shdr = Elf64_Shdr_LSB.from_buffer_copy(base)
elif ei_data == ELFFlags.ELFDATA2MSB: shdr = Elf64_Shdr_MSB.from_buffer_copy(base)
self.shdr_l.append(shdr)
base = base[self.ElfHeader.e_shentsize:]
string_table = str(self.binary[(self.shdr_l[self.ElfHeader.e_shstrndx].sh_offset):])
for i in range(shdr_num):
self.shdr_l[i].str_name = string_table[self.shdr_l[i].sh_name:].split('\0')[0]
""" Parse Program header """
def __setPhdr(self):
pdhr_num = self.ElfHeader.e_phnum
base = self.binary[self.ElfHeader.e_phoff:]
phdr_l = []
e_ident = str(self.binary[:15])
ei_data = unpack("<B", e_ident[ELFFlags.EI_DATA])[0]
for i in range(pdhr_num):
if self.getArchMode() == 32:
if ei_data == ELFFlags.ELFDATA2LSB: phdr = Elf32_Phdr_LSB.from_buffer_copy(base)
elif ei_data == ELFFlags.ELFDATA2MSB: phdr = Elf32_Phdr_MSB.from_buffer_copy(base)
elif self.getArchMode() == 64:
if ei_data == ELFFlags.ELFDATA2LSB: phdr = Elf64_Phdr_LSB.from_buffer_copy(base)
elif ei_data == ELFFlags.ELFDATA2MSB: phdr = Elf64_Phdr_MSB.from_buffer_copy(base)
self.phdr_l.append(phdr)
base = base[self.ElfHeader.e_phentsize:]
def get_section_id(self, sh_name):
for idx, sh in enumerate(self.shdr_l):
if sh.str_name == sh_name:
return idx
return None
def get_shstrtab_data(self):
sh = self.shdr_l[self.ElfHeader.e_shstrndx]
if sh.sh_type == SHTypes.SHT_STRTAB:
return self.binary[sh.sh_offset:sh.sh_offset+sh.sh_size]
return None
def get_sym_at_offset(self, off):
if self.getArchMode() == 32:
if ei_data == ELFFlags.ELFDATA2LSB: sym = Elf32_Sym_LSB.from_buffer_copy(self.binary[off:])
elif ei_data == ELFFlags.ELFDATA2MSB: sym = Elf32_Sym_MSB.from_buffer_copy(self.binary[off:])
elif self.getArchMode() == 64:
if ei_data == ELFFlags.ELFDATA2LSB: sym = Elf64_Sym_LSB.from_buffer_copy(self.binary[off:])
elif ei_data == ELFFlags.ELFDATA2MSB: sym = Elf64_Sym_MSB.from_buffer_copy(self.binary[off:])
return sym
def get_entrypoint(self):
return self.e_entry
def sizeof_sh(self):
size = None
if self.getArchMode() == 32:
size = sizeof(Elf32_Shdr_LSB())
elif self.getArchMode() == 64:
size = sizeof(Elf64_Shdr_LSB())
return size
def sizeof_sym(self):
size = None
if self.getArchMode() == 32:
size = sizeof(Elf32_Sym_LSB)
elif self.getArchMode() == 64:
size = sizeof(Elf64_Sym_LSB)
return size
def append_section_header(self, section):
sh = None
if self.getArchMode() == 32:
if self.ei_data == ELFFlags.ELFDATA2LSB: sh = Elf32_Shdr_LSB()
elif self.ei_data == ELFFlags.ELFDATA2MSB: sh = Elf32_Shdr_MSB()
elif self.getArchMode() == 64:
if self.ei_data == ELFFlags.ELFDATA2LSB: sh = Elf64_Shdr_LSB()
elif self.ei_data == ELFFlags.ELFDATA2MSB: sh = Elf64_Shdr_MSB()
sh.sh_name = section["name"]
sh.sh_type = section["type"]
sh.sh_flags = section["flags"]
sh.sh_addr = section["addr"]
sh.sh_offset = section["offset"]
sh.sh_size = section["size"]
sh.sh_link = section["link"]
sh.sh_info = section["info"]
sh.sh_addralign = section["addralign"]
sh.sh_entsize = section["entsize"]
self.binary.extend(sh)
def append_symbol(self, symbol):
if self.getArchMode() == 32:
if self.ei_data == ELFFlags.ELFDATA2LSB: sym = Elf32_Sym_LSB()
elif self.ei_data == ELFFlags.ELFDATA2MSB: sym = Elf32_Sym_MSB()
elif self.getArchMode() == 64:
if self.ei_data == ELFFlags.ELFDATA2LSB: sym = Elf64_Sym_LSB()
elif self.ei_data == ELFFlags.ELFDATA2MSB: sym = Elf64_Sym_MSB()
sym.st_name = symbol["name"]
sym.st_value = symbol["value"]
sym.st_size = symbol["size"]
sym.st_info = symbol["info"]
sym.st_other = symbol["other"]
sym.st_shndx = symbol["shndx"]
self.binary.extend(sym)
def get_binary(self):
return self.binary
def write(self, offset, data):
self.binary[offset:offset+sizeof(data)] = data
def expand_at_offset(self, offset, data):
self.binary = self.binary[:offset] + data + self.binary[offset:]
def cut_at_offset(self, offset, size):
self.binary = self.binary[:offset] + self.binary[offset+size:]
def save(self, output):
with open(output, 'wb') as f:
f.write(self.binary)
class Symbol:
def __init__(self, name, info, value, size, shname, shndx=-1):
self.name = name
self.info = info
self.value = value
self.size = size
self.shname = shname
self.shndx = shndx
def __str__(self):
return "%s;%s;%s;%s;%s" % (self.name, self.value, self.size,
self.info, self.shname)
def log(msg=''):
print("[%s] %s" % ('elftools', msg))
``` |
{
"source": "0xdeadbeefJERKY/tuf",
"score": 3
} |
#### File: tuf/evpy/cipher.py
```python
import ctypes
import time
import evp
class CipherError(evp.SSLError):
pass
def _strengthen_password(pw, iv, salt=None):
# add the hash
evp.OpenSSL_add_all_digests()
# build the key buffer
key = ctypes.create_string_buffer(24)
# either take the existing salt or build a new one
if not salt:
salt = ctypes.create_string_buffer(8)
# get the needed entropy, bailing if it doesn't work in
# the first thousand tries
for i in range(1000):
if evp.RAND_bytes(salt, 8): break
else:
raise CipherError("Could not generate enough entropy")
# extract the salt
salt = salt.raw
# get the hash
evp_hash = evp.EVP_get_digestbyname("sha512")
if not evp_hash:
raise CipherError("Could not create hash object")
# fill the key
if not evp.EVP_BytesToKey(evp.EVP_aes_192_cbc(), evp_hash, salt, pw, len(pw), 1000, key, iv):
raise CipherError("Could not strengthen key")
# go home
return salt, key.raw
def encrypt(data, password):
"""Encrypts the given data, raising CipherError on failure.
This uses AES192 to encrypt and strengthens the given
passphrase using SHA512.
Usage:
>>> from evpy import cipher
>>> f = open("test/short.txt", "rb")
>>> data = f.read()
>>> pw = b"<PASSWORD>"
>>> salt, iv, enc = cipher.encrypt(data, pw)
>>> cipher.decrypt(salt, iv, enc, pw) == data
True
"""
# ensure data exists
if not len(data):
raise CipherError("Data must actually exist")
if not len(password):
raise CipherError("Password must actually exist")
# build and initialize the context
ctx = evp.EVP_CIPHER_CTX_new()
if not ctx:
raise CipherError("Could not create context")
evp.EVP_CIPHER_CTX_init(ctx)
# get the cipher object
cipher_object = evp.EVP_aes_192_cbc()
if not cipher_object:
raise CipherError("Could not create cipher object")
# finish the context and cipher object
if not evp.EVP_EncryptInit_ex(ctx, cipher_object, None, None, None):
raise CipherError("Could not finish context")
# build the randomized iv
iv_length = evp.EVP_CIPHER_CTX_iv_length(ctx)
iv = ctypes.create_string_buffer(iv_length)
# get the needed entropy, bailing if it doesn't work in
# the first thousand tries
for i in range(1000):
if evp.RAND_bytes(iv, iv_length): break
else:
raise CipherError("Not enough entropy for IV")
output_iv = iv.raw
# strengthen the password into an honest-to-goodness key
salt, aes_key = _strengthen_password(password, iv)
# initialize the encryption operation
if not evp.EVP_EncryptInit_ex(ctx, None, None, aes_key, iv):
raise CipherError("Could not start encryption operation")
# build the output buffer
buf = ctypes.create_string_buffer(len(data) + 16)
written = ctypes.c_int(0)
final = ctypes.c_int(0)
# update
if not evp.EVP_EncryptUpdate(ctx, buf, ctypes.byref(written), data, len(data)):
raise CipherError("Could not update ciphertext")
output = buf.raw[:written.value]
# finalize
if not evp.EVP_EncryptFinal_ex(ctx, buf, ctypes.byref(final)):
raise CipherError("Could not finalize ciphertext")
output += buf.raw[:final.value]
# ...and go home
return salt, output_iv, output
def decrypt(salt, iv, data, password):
"""Decrypts the given data, raising CipherError on failure.
Usage:
>>> from evpy import cipher
>>> f = open("test/short.txt", "rb")
>>> data = f.read()
>>> pw = b"<PASSWORD>"
>>> salt, iv, enc = cipher.encrypt(data, pw)
>>> cipher.decrypt(salt, iv, enc, pw) == data
True
"""
# ensure inputs are the correct size
if not len(data):
raise CipherError("Data must actually exist")
if not len(password):
raise CipherError("Password must actually exist")
if len(salt) != 8:
raise CipherError("Incorrect salt size")
if len(iv) != 16:
raise CipherError("Incorrect iv size")
# build and initialize the context
ctx = evp.EVP_CIPHER_CTX_new()
if not ctx:
raise CipherError("Could not create context")
evp.EVP_CIPHER_CTX_init(ctx)
# get the cipher object
cipher_object = evp.EVP_aes_192_cbc()
if not cipher_object:
raise CipherError("Could not create cipher object")
# build the key
salt, key = _strengthen_password(password, iv, salt)
# start decrypting the ciphertext
if not evp.EVP_DecryptInit_ex(ctx, cipher_object, None, key, iv):
raise CipherError("Could not open envelope")
# build the output buffers
buf = ctypes.create_string_buffer(len(data) + 16)
written = ctypes.c_int(0)
final = ctypes.c_int(0)
# update
if not evp.EVP_DecryptUpdate(ctx, buf, ctypes.byref(written), data, len(data)):
raise CipherError("Could not update plaintext")
output = buf.raw[:written.value]
# finalize
if not evp.EVP_DecryptFinal_ex(ctx, buf, ctypes.byref(final)):
raise CipherError("Could not finalize decryption")
output += buf.raw[:final.value]
return output
```
#### File: tuf/evpy/envelope.py
```python
import ctypes
import evp
from signature import _string_to_bio
class EnvelopeError(evp.SSLError):
pass
class KeygenError(evp.SSLError):
pass
def _build_dkey_from_file(keyfile):
fp = evp.fopen(keyfile, "r")
if not fp:
raise EnvelopeError("Could not open keyfile")
# get the decryption key
skey = evp.PEM_read_PrivateKey(fp, None, None, None)
if not skey:
evp.fclose(fp)
raise EnvelopeError("Could not read decryption key")
# close the file
evp.fclose(fp)
return skey
def _build_dkey_from_string(key):
bio = _string_to_bio(key)
dkey = evp.PEM_read_bio_PrivateKey(bio, None, None, None)
if not dkey:
raise EnvelopeError("Could not build decryption key from string")
evp.BIO_free(bio)
return dkey
def _build_ekey_from_file(keyfile):
fp = evp.fopen(keyfile, "r")
if not fp:
raise EnvelopeError("Could not open keyfile")
# get the encryption key
ekey = evp.PEM_read_PUBKEY(fp, None, None, None)
if not ekey:
evp.fclose(fp)
raise EnvelopeError("Could not read encryption key")
# close the file
evp.fclose(fp)
return ekey
def _build_ekey_from_string(key):
bio = _string_to_bio(key)
ekey = evp.PEM_read_bio_PUBKEY(bio, None, None, None)
if not ekey:
raise EnvelopeError("Could not create encryption key from string")
evp.BIO_free(bio)
return ekey
def _build_bio():
method = evp.BIO_s_mem()
return evp.BIO_new(method);
def _asn1_hex_to_int(value):
print(value)
return int(''.join(value.split(':')), 16)
def _parse_printed_key(k):
attrs = {}
current = ""
current_attr = ""
for line in k.splitlines()[1:]:
# its a continuation of the current block
if line.startswith(' '):
current += line.strip()
else:
# special case the public exponent
if "publicExponent" in current_attr:
attrs['publicExponent'] = int(current_attr.split()[1])
elif current_attr:
attrs[current_attr] = _asn1_hex_to_int(current)
current_attr = line.strip(':')
current = ""
translator = {'publicExponent': 'e', 'privateExponent': 'd', 'modulus': 'n', 'prime1': 'p', 'prime2': 'q'}
translated_attrs = {}
for key, value in attrs.items():
try:
translated_attrs[translator[key]] = value
except: pass
return translated_attrs
def keygen(bitlength=1024, e=65537, pem=True):
key = evp.RSA_generate_key(bitlength, e, None, None)
if not key:
raise EnvelopeError("Could not generate key")
if pem:
private_bio = evp.BIO_new(evp.BIO_s_mem())
if not private_bio:
raise KeygenError("Could not create temporary storage")
public_bio = evp.BIO_new(evp.BIO_s_mem())
if not public_bio:
raise KeygenError("Could not create temporary storage")
private_buf = ctypes.create_string_buffer('', 65537)
if not private_buf:
raise MemoryError("Could not allocate key storage")
public_buf = ctypes.create_string_buffer('', 65537)
if not public_buf:
raise MemoryError("Could not allocate key storage")
if not evp.PEM_write_bio_RSAPrivateKey(private_bio, key, None, None, 0, 0, None):
raise KeygenError("Could not write private key")
if not evp.PEM_write_bio_RSA_PUBKEY(public_bio, key):
raise KeygenError("Could not write public key")
public_len = evp.BIO_read(public_bio, public_buf, 65537)
private_len = evp.BIO_read(private_bio, private_buf, 65537)
evp.BIO_free(public_bio)
evp.BIO_free(private_bio)
return public_buf.value, private_buf.value
else:
# we go through this rigamarole because if there's an engine
# in place it won't populate the RSA key's values properly.
key_bio = evp.BIO_new(evp.BIO_s_mem())
if not key_bio:
raise KeygenError("Could not create temporary storage")
if not evp.RSA_print(key_bio, key, 0):
raise KeygenError("Could not stringify key")
key_buf = ctypes.create_string_buffer('', 65537)
if not key_buf:
raise MemoryError("Could not allocate key storage")
evp.BIO_read(key_bio, key_buf, 65537)
evp.BIO_free(key_bio)
key_string = key_buf.value
return key, _parse_printed_key(key_string)
def encrypt(data, keyfile=None, key=None):
"""Encrypts the given data, raising EnvelopeError on failure.
This uses AES192 to do bulk encryption and RSA to encrypt
the given public key.
Usage:
>>> from evpy import envelope
>>> f = open("test/short.txt", "rb")
>>> data = f.read()
>>> public_key = "test/keys/public1.pem"
>>> private_key = "test/keys/private1.pem"
>>> iv, key, ciphertext = envelope.encrypt(data, public_key)
>>> envelope.decrypt(iv, key, ciphertext, private_key) == data
True
"""
# validate the incoming data
if not data:
raise EnvelopeError("Incoming data must be bytes")
if not len(data):
raise EnvelopeError("Data must actually exist")
# build and initialize the context
ctx = evp.EVP_CIPHER_CTX_new()
if not ctx:
raise EnvelopeError("Could not create context")
evp.EVP_CIPHER_CTX_init(ctx)
# get the key from the keyfile
if key and not keyfile:
ekey = _build_ekey_from_string(key)
elif keyfile and not key:
ekey = _build_ekey_from_file(keyfile)
else:
raise EnvelopeError("Must specify exactly one key or keyfile")
# get the cipher object
cipher_object = evp.EVP_aes_192_cbc()
if not cipher_object:
raise EnvelopeError("Could not create cipher object")
# finish the context and cipher object
if not evp.EVP_EncryptInit_ex(ctx, cipher_object, None, None, None):
raise EnvelopeError("Could not finish context")
# build the randomized iv
iv_length = evp.EVP_CIPHER_CTX_iv_length(ctx)
iv = ctypes.create_string_buffer(iv_length)
for i in range(1000):
if evp.RAND_bytes(iv, iv_length): break
else:
raise EnvelopeError("Could not generate enough entropy for IV")
output_iv = iv.raw
# build the randomized AES key
keysize = evp.EVP_CIPHER_key_length(cipher_object)
aes_key = ctypes.create_string_buffer(keysize)
for i in range(1000):
if evp.RAND_bytes(aes_key, keysize): break
else:
raise EnvelopeError("Could not generate enough entropy for AES key")
# extract the RSA key
rsa_key = evp.EVP_PKEY_get1_RSA(ekey)
if not rsa_key:
raise EnvelopeError("Could not get RSA key")
# encrypt it
buf_size = evp.RSA_size(rsa_key)
if not buf_size:
raise EnvelopeError("Invalid RSA keysize")
encrypted_aes_key = ctypes.create_string_buffer(buf_size)
# RSA_PKCS1_PADDING is defined as 1
written = evp.RSA_public_encrypt(keysize, aes_key, encrypted_aes_key, rsa_key, 1)
if not written:
raise EnvelopeError("Could not encrypt AES key")
output_key = encrypted_aes_key.raw[:written]
# initialize the encryption operation
if not evp.EVP_EncryptInit_ex(ctx, None, None, aes_key, iv):
raise EnvelopeError("Could not start encryption operation")
# build the output buffer
buf = ctypes.create_string_buffer(len(data) + 16)
written = ctypes.c_int(0)
final = ctypes.c_int(0)
# update
if not evp.EVP_EncryptUpdate(ctx, buf, ctypes.byref(written), data, len(data)):
raise EnvelopeError("Could not update ciphertext")
output = buf.raw[:written.value]
# finalize
if not evp.EVP_EncryptFinal_ex(ctx, buf, ctypes.byref(final)):
raise EnvelopeError("Could not finalize ciphertext")
output += buf.raw[:final.value]
# ...and go home
return output_iv, output_key, output
def decrypt(iv, encrypted_aes_key, data, keyfile=None, key=None):
"""Decrypts the given ciphertext, raising EnvelopeError on failure.
Usage:
>>> from evpy import envelope
>>> f = open("test/short.txt", "rb")
>>> data = f.read()
>>> public_key = "test/keys/public1.pem"
>>> private_key = "test/keys/private1.pem"
>>> iv, key, ciphertext = envelope.encrypt(data, public_key)
>>> envelope.decrypt(iv, key, ciphertext, private_key) == data
True
"""
# build and initialize the context
ctx = evp.EVP_CIPHER_CTX_new()
if not ctx:
raise EnvelopeError("Could not create context")
evp.EVP_CIPHER_CTX_init(ctx)
# get the cipher object
cipher_object = evp.EVP_aes_192_cbc()
if not cipher_object:
raise EnvelopeError("Could not create cipher object")
# get the key from the keyfile
if key and not keyfile:
dkey = _build_dkey_from_string(key)
elif keyfile and not key:
dkey = _build_dkey_from_file(keyfile)
else:
raise EnvelopeError("Must specify exactly one key or keyfile")
# open the envelope
if not evp.EVP_OpenInit(ctx, cipher_object, encrypted_aes_key, len(encrypted_aes_key), iv, dkey):
raise EnvelopeError("Could not open envelope")
# build the output buffer
buf = ctypes.create_string_buffer(len(data) + 16)
written = ctypes.c_int(0)
final = ctypes.c_int(0)
# update
if not evp.EVP_DecryptUpdate(ctx, buf, ctypes.byref(written), data, len(data)):
raise EnvelopeError("Could not update envelope")
output = buf.raw[:written.value]
# finalize
if not evp.EVP_DecryptFinal_ex(ctx, buf, ctypes.byref(final)):
raise EnvelopeError("Could not finalize envelope")
output += buf.raw[:final.value]
return output
```
#### File: tuf/compatibility/socket_create_connection.py
```python
_GLOBAL_DEFAULT_TIMEOUT = object()
def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
An host of '' or port 0 tells the OS to use the default.
"""
host, port = address
err = None
for res in getaddrinfo(host, port, 0, SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket(af, socktype, proto)
if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise error("getaddrinfo returns an empty list")
```
#### File: tuf/pushtools/push.py
```python
import os
import sys
import optparse
import tuf
import tuf.formats
import tuf.pushtools.pushtoolslib
import tuf.pushtools.transfer.scp
def push(config_filepath):
"""
<Purpose>
Perform a push/transfer of target files to a host. The configuration file
'config_filepath' provides the required settings needed by the transfer
command. In the case of an 'scp' configuration file, the configuration
file would contain 'host', 'user', 'identity file', and 'remote directory'
entries.
<Arguments>
config_filepath:
The push configuration file (i.e., 'push.cfg').
<Exceptions>
tuf.FormatError, if any of the arguments are incorrectly formatted.
tuf.Error, if there was an error while processing the push.
<Side Effects>
The 'config_filepath' file is read and its contents stored, the files
in the targets directory (specified in the config file) are copied,
and the copied targets transfered to a specified host.
<Returns>
None.
"""
# Do the arguments have the correct format?
# Raise 'tuf.FormatError' if there is a mismatch.
tuf.formats.PATH_SCHEMA.check_match(config_filepath)
# Is the path to the configuration file valid?
if not os.path.isfile(config_filepath):
message = 'The configuration file path is invalid.'
raise tuf.Error(message)
config_filepath = os.path.abspath(config_filepath)
# Retrieve the push configuration settings required by the transfer
# modules. Raise ('tuf.FormatError', 'tuf.Error') if a valid
# configuration file cannot be retrieved.
config_dict = tuf.pushtools.pushtoolslib.read_config_file(config_filepath, 'push')
# Extract the transfer module identified in the configuration file.
transfer_module = config_dict['general']['transfer_module']
# 'scp' is the only transfer module currently supported. Perform
# an scp-transfer of the targets located in the targets directory as
# listed in the configuration file.
if transfer_module == 'scp':
tuf.pushtools.transfer.scp.transfer(config_dict)
else:
message = 'Cannot perform a transfer using '+repr(transfer_module)
raise tuf.Error(message)
def parse_options():
"""
<Purpose>
Parse the command-line options. 'push.py' expects the '--config'
option to be set by the user.
Example:
$ python push.py --config ./push.cfg
The '--config' option accepts a path argument to the push configuration
file (i.e., 'push.cfg'). If the required option is unset, a parser error
is printed and the script exits.
<Arguments>
None.
<Exceptions>
None.
<Side Effects>
None.
<Returns>
The options object returned by the parser's parse_args() method.
"""
usage = 'usage: %prog --config <config path>'
option_parser = optparse.OptionParser(usage=usage)
# Add the options supported by 'push.py' to the option parser.
option_parser.add_option('--config', action='store', type='string',
help='Specify the "push.cfg" configuration file.')
(options, remaining_arguments) = option_parser.parse_args()
# Ensure the '--config' option is set. If the required option is unset,
# option_parser.error() will print an error message and exit.
if options.config is None:
message = '"--config" must be set on the command-line.'
option_parser.error(message)
return options
if __name__ == '__main__':
options = parse_options()
# Perform a 'push' of the target files specified in the configuration file.
try:
push(options.config)
except (tuf.FormatError, tuf.Error), e:
sys.stderr.write('Error: '+str(e)+'\n')
sys.exit(1)
# The 'push' and command-line options were processed successfully.
sys.exit(0)
```
#### File: tuf/repo/signercli.py
```python
import os
import optparse
import getpass
import time
import sys
import logging
import errno
import tuf
import tuf.formats
import tuf.repo.signerlib
import tuf.repo.keystore
import tuf.util
import tuf.log
# See 'log.py' to learn how logging is handled in TUF.
logger = logging.getLogger('tuf.signercli')
json = tuf.util.import_json()
# The maximum number of attempts the user has to enter
# valid input.
MAX_INPUT_ATTEMPTS = 3
def _check_directory(directory):
try:
directory = tuf.repo.signerlib.check_directory(directory)
except (tuf.FormatError, tuf.Error), e:
message = str(e)+'\n'
raise tuf.RepositoryError(message)
return directory
def _get_password(prompt='Password: ', confirm=False):
"""
Return the password entered by the user. If 'confirm'
is True, the user is asked to enter the previously
entered password once again. If they match, the
password is returned to the caller.
"""
while True:
password = getpass.getpass(prompt, sys.stderr)
if not confirm:
return password
password2 = getpass.getpass('Confirm: ', sys.stderr)
if password == password2:
return password
else:
message = 'Mismatch; try again.'
logger.info(message)
def _prompt(message, result_type=str):
"""
Prompt the user for input by printing 'message', converting
the input to 'result_type', and returning the value to the
caller.
"""
return result_type(raw_input(message))
def _get_metadata_directory():
"""
Get the metadata directory from the user. The user
is asked to enter the directory, and if validated, is
returned to the caller. 'tuf.FormatError' is raised
if the directory is not properly formatted, and 'tuf.Error'
if it does not exist.
"""
metadata_directory = _prompt('\nEnter the metadata directory: ', str)
# Raises 'tuf.RepositoryError'.
metadata_directory = _check_directory(metadata_directory)
return metadata_directory
def _list_keyids(keystore_directory, metadata_directory):
"""
List the key files found in 'keystore_directory'.
It is assumed the directory arguments exist and have been validated by
the caller. The keyids are listed without the '.key' extension,
along with their associated roles.
"""
# Determine the 'root.txt' filename. This metadata file is needed
# to extract the keyids belonging to the top-level roles.
filenames = tuf.repo.signerlib.get_metadata_filenames(metadata_directory)
root_filename = filenames['root']
# Load the root metadata file. The loaded object should conform to
# 'tuf.formats.SIGNABLE_SCHEMA'.
metadata_signable = tuf.util.load_json_file(root_filename)
# Ensure the loaded json object is properly formatted.
try:
tuf.formats.check_signable_object_format(metadata_signable)
except tuf.FormatError, e:
message = 'Invalid metadata format: '+repr(root_filename)+'.'
raise tuf.RepositoryError(message)
# Extract the 'signed' role object from 'metadata_signable'.
root_metadata = metadata_signable['signed']
# Extract the 'roles' dict, where the dict keys are top-level roles and dict
# values a dictionary containing a list of corresponding keyids and a
# threshold.
top_level_keyids = root_metadata['roles']
# Determine the keyids associated with all the targets roles.
try:
targets_keyids = tuf.repo.signerlib.get_target_keyids(metadata_directory)
except tuf.FormatError, e:
raise tuf.RepositoryError('Format error: '+str(e))
# Extract the key files ending in a '.key' extension.
key_paths = []
for filename in os.listdir(keystore_directory):
full_path = os.path.join(keystore_directory, filename)
if filename.endswith('.key') and not os.path.isdir(full_path):
key_paths.append(filename)
# For each keyid listed in the keystore, search 'top_level_keyids'
# and 'targets_keyids' for a possible entry. 'keyids_dict' stores
# the associated roles for each keyid.
keyids_dict = {}
for keyid in key_paths:
# Strip the '.key' extension. These raw keyids are needed to search
# for the roles attached to them in the metadata files.
keyid = keyid[0:keyid.rfind('.key')]
keyids_dict[keyid] = []
# Is 'keyid' listed in any of the top-level roles?
for top_level_role in top_level_keyids:
if keyid in top_level_keyids[top_level_role]['keyids']:
# To avoid a duplicate, ignore the 'targets.txt' role for now.
# 'targets_keyids' will also contain the keyids for this top-level role.
if top_level_role != 'targets':
keyids_dict[keyid].append(top_level_role)
# Is 'keyid' listed in any of the targets roles?
for targets_role, keyids in targets_keyids.items():
if keyid in keyids:
keyids_dict[keyid].append(targets_role)
# Log the keyids without the '.key' extension and the roles
# associated with them.
message = 'Listing the keyids in '+repr(keystore_directory)
logger.info(message)
for keyid in keyids_dict:
message = keyid+' : '+str(keyids_dict[keyid])
logger.info(message)
def _get_keyids(keystore_directory):
"""
Load the keyids in 'keystore_directory'. The keystore
database is populated with the keyids that are found
and successfully loaded. A list containing the keyids
of the loaded keys is returned to the caller. Since the
key files are stored in encrypted form, the user is asked
to enter the password that was used to encrypt the key
file.
"""
# The keyids list containing the keys loaded.
loaded_keyids = []
# Save the 'load_keystore_from_keyfiles' function call.
# Set to improve readability.
load_key = tuf.repo.keystore.load_keystore_from_keyfiles
# Ask the user for the keyid and password. Next, try to load the specified
# keyid/password combination. If loaded, append the loaded key's keyid to
# 'loaded_keyids'. Loop the steps above or exit when the user enters 'quit'.
while True:
keyid_prompt = '\nEnter the keyid or "quit" when done: '
keyid = _prompt(keyid_prompt, str)
if keyid.lower() == 'quit':
break
# Get the password from the user so we can decrypt the key file.
password = _get_password('\nEnter the keyid\'s password: ')
# Try to load the keyfile with the keyid and password credentials.
loaded_keyid = load_key(keystore_directory, [keyid], [password])
# Was 'keyid' loaded?
if keyid not in loaded_keyid:
message = 'Could not load keyid: '+keyid
logger.error(message)
continue
# Append 'keyid' to the loaded list of keyids.
loaded_keyids.append(loaded_keyid[0])
return loaded_keyids
def _get_all_config_keyids(config_filepath, keystore_directory):
"""
Retrieve the contents of the config file and load
the keys for the top-level roles. After this function
returns successfully, all the required roles are loaded
in the keystore. The arguments should be absolute paths.
<Exceptions>
tuf.Error, if the required top-level keys could
not be loaded.
<Returns>
A dictionary containing the keyids for the top-level roles.
loaded_keyids = {'root': [1233d3d, <KEY>, ..],
'release': [sdfsd323, sdsd9090s, ..]
...}
"""
# Save the 'load_keystore_from_keyfiles' function call.
load_key = tuf.repo.keystore.load_keystore_from_keyfiles
# 'tuf.Error' raised if the configuration file cannot be read.
config_dict = tuf.repo.signerlib.read_config_file(config_filepath)
loaded_keyids = {}
# Extract the sections from the config file. We are only
# interested in role sections.
for key, value in config_dict.items():
if key in ['root', 'targets', 'release', 'timestamp']:
# Try to load the keyids for each role.
loaded_keyids[key] = []
for keyid in value['keyids']:
for attempt in range(MAX_INPUT_ATTEMPTS):
message = '\nEnter the password for the '+key+' role ('+keyid+'): '
password = _get_password(message)
loaded_key = load_key(keystore_directory, [keyid], [password])
if not loaded_key or keyid not in loaded_key:
message = 'Could not load keyid: '+keyid
logger.error(message)
continue
loaded_keyids[key].append(keyid)
break
if keyid not in loaded_keyids[key]:
raise tuf.Error('Could not load a required top-level role key')
# Ensure we loaded keys for the required top-level roles.
for key in ['root', 'targets', 'release', 'timestamp']:
if key not in loaded_keyids:
message = 'The configuration file did not contain the required roles'
raise tuf.Error(message)
return loaded_keyids
def _get_role_config_keyids(config_filepath, keystore_directory, role):
"""
Retrieve and load the key(s) for 'role', as listed in the keyids
found in 'config_filepath'. 'config_filepath' and 'keystore_directory'
should be absolute paths.
<Exceptions>
tuf.Error, if the required keys could not be loaded.
"""
# Save the 'load_keystore_from_keyfiles' function call.
load_key = tuf.repo.keystore.load_keystore_from_keyfiles
# 'tuf.Error' raised if the configuration file cannot be read.
config_dict = tuf.repo.signerlib.read_config_file(config_filepath)
role_keyids = []
# Extract the sections from the config file. We are only interested
# in the 'role' section.
for key, value in config_dict.items():
if key == role:
for keyid in value['keyids']:
for attempt in range(MAX_INPUT_ATTEMPTS):
message = '\nEnter the password for the '+key+' role ('+keyid+'): '
password = _get_password(message)
loaded_key = load_key(keystore_directory, [keyid], [password])
if not loaded_key or keyid not in loaded_key:
message = 'Could not load keyid: '+keyid
logger.error(message)
continue
role_keyids.append(keyid)
break
# Ensure we loaded all the keyids.
for keyid in value['keyids']:
if keyid not in role_keyids:
raise tuf.Error('Could not load a required role key')
if not role_keyids:
raise tuf.Error('Could not load the required keys for '+role)
return role_keyids
def _sign_and_write_metadata(metadata, keyids, filename):
"""
Sign 'metadata' and write it to 'filename' (an absolute path),
overwriting the original file if it exists. If any of the
keyids have already signed the file, the old signatures of
those keyids will be replaced.
<Exceptions>
tuf.FormatError, if any of the arguments are incorrectly formatted.
tuf.Error, if an error is encountered.
"""
# Sign the metadata object. The 'signable' object contains the keyids
# used in the signing process, including the signatures generated.
signable = tuf.repo.signerlib.sign_metadata(metadata, keyids, filename)
# Write the 'signable' object to 'filename'. The 'filename' file is
# the final metadata file, such as 'root.txt' and 'targets.txt'.
tuf.repo.signerlib.write_metadata_file(signable, filename)
def _get_metadata_version(metadata_filename):
"""
If 'metadata_filename' exists, load it and extract the current version.
This version number is incremented by one prior to returning. If
'metadata_filename' does not exist, return a version value of 1.
Raise 'tuf.RepositoryError' if 'metadata_filename' cannot be read or
validated.
"""
# If 'metadata_filename' does not exist on the repository, this means
# it will be newly created and thus version 1 of the file.
if not os.path.exists(metadata_filename):
return 1
# Open 'metadata_filename', extract the version number, and return it
# incremented by 1. A metadata's version is used to determine newer metadata
# from older. The client should only accept newer metadata.
try:
signable = tuf.repo.signerlib.read_metadata_file(metadata_filename)
tuf.formats.check_signable_object_format(signable)
except (tuf.FormatError, tuf.Error), e:
message = repr(metadata_filename)+' could not be opened or is invalid.'+\
' Backup or replace it and try again.'
raise tuf.RepositoryError(message)
current_version = signable['signed']['version']
return current_version+1
def _get_metadata_expiration():
"""
Prompt the user for the expiration date of the metadata file.
If the entered date is valid, it is returned unmodified.
<Exceptions>
tuf.RepositoryError, if the entered expiration date is invalid.
"""
message = '\nCurrent time: '+tuf.formats.format_time(time.time())+'.\n'+\
'Enter the expiration date, in UTC, of the metadata file (yyyy-mm-dd HH:MM:SS): '
try:
input_date = _prompt(message, str)
input_date = input_date+' UTC'
expiration_date = tuf.formats.parse_time(input_date)
except (tuf.FormatError, ValueError), e:
raise tuf.RepositoryError('Invalid date entered.')
if expiration_date < time.time():
message = 'The expiration date must occur after the current date.'
raise tuf.RepositoryError(message)
return input_date
def change_password(keystore_directory):
"""
<Purpose>
Change the password for the signing key specified by the user.
All the values required by the user will be interactively
retrieved by this function.
<Arguments>
keystore_directory:
The directory containing the signing keys (i.e., key files ending
in '.key').
<Exceptions>
tuf.RepositoryError, if a bad password was given, the keystore directory
was invalid, or a required key could not be loaded.
<Side Effects>
The key file specified by the user is modified, including the encryption
key.
<Returns>
None.
"""
# Save the 'load_keystore_from_keyfiles' function call.
load_key = tuf.repo.keystore.load_keystore_from_keyfiles
# Verify the 'keystore_directory' argument.
keystore_directory = _check_directory(keystore_directory)
# Retrieve the metadata directory. The 'root.txt' and all the targets
# metadata are needed to extract rolenames and their corresponding
# keyids.
try:
metadata_directory = _get_metadata_directory()
except (tuf.FormatError, tuf.Error), e:
message = str(e)+'\n'
raise tuf.RepositoryError(message)
# List the keyids in the keystore and prompt the user for the keyid they
# wish to modify.
_list_keyids(keystore_directory, metadata_directory)
# Retrieve the keyid from the user.
message = '\nEnter the keyid for the password you wish to change: '
keyid = _prompt(message, str)
# Get the old password from the user.
old_password_prompt = '\nEnter the old password for the keyid: '
old_password = _get_password(old_password_prompt)
# Try to load the keyfile
loaded_keys = load_key(keystore_directory, [keyid], [old_password])
# Was 'keyid' loaded?
if keyid not in loaded_keys:
message = 'Could not load keyid: '+keyid+'\n'
raise tuf.RepositoryError(message)
# Retrieve the new password.
new_password = _get_password('\nNew password: ', confirm=True)
# Now that we have all the required information, try to change the password.
try:
tuf.repo.keystore.change_password(keyid, old_password, new_password)
except (tuf.BadPasswordError, tuf.UnknownKeyError), e:
message = str(e)+'\n'
raise tuf.RepositoryError(message)
# Save the changes.
tuf.repo.keystore.save_keystore_to_keyfiles(keystore_directory)
def generate_rsa_key(keystore_directory):
"""
<Purpose>
Generate an RSA key and save it to the keystore directory.
<Arguments>
keystore_directory:
The directory containing the signing keys (i.e., key files ending
in '.key').
<Exceptions>
tuf.RepositoryError, if the keystore directory is invalid or an rsa key
cannot be generated.
<Side Effects>
An RSA key will be generated and added to tuf.repo.keystore.
The RSA key will be saved to the keystore directory specified
on the command-line.
<Returns>
None.
"""
# Save a reference to the generate_and_save_rsa_key() function.
save_rsa_key = tuf.repo.signerlib.generate_and_save_rsa_key
# Verify the 'keystore_directory' argument.
keystore_directory = _check_directory(keystore_directory)
# Retrieve the number of bits for the RSA key from the user.
rsa_key_bits = _prompt('\nEnter the number of bits for the RSA key: ', int)
# Retrieve the password used to encrypt/decrypt the key file from the user.
message = '\nEnter a password to encrypt the generated RSA key: '
password = _get_password(message, confirm=True)
# Generate the RSA key and save it to 'keystore_directory'.
try:
rsa_key = save_rsa_key(keystore_directory=keystore_directory,
password=password, bits=rsa_key_bits)
logger.info('Generated a new key: '+rsa_key['keyid'])
except (tuf.FormatError, tuf.CryptoError), e:
message = 'The RSA key could not be generated. '+str(e)+'\n'
raise tuf.RepositoryError(message)
def list_signing_keys(keystore_directory):
"""
<Purpose>
Print the key IDs of the signing keys listed in the keystore directory.
The associated roles of each keyid is also listed.
<Arguments>
keystore_directory:
The directory containing the signing keys (i.e., key files ending
in '.key').
<Exceptions>
tuf.RepositoryError, if the keystore directory is invalid or if the
required metadata files cannot be read.
<Side Effects>
None.
<Returns>
None.
"""
# Verify the 'keystore_directory' argument.
keystore_directory = _check_directory(keystore_directory)
# Retrieve the metadata directory. The 'root.txt' file and all the metadata
# for the targets roles are needed to extract rolenames and their associated
# keyids.
try:
metadata_directory = _get_metadata_directory()
except (tuf.FormatError, tuf.Error), e:
message = str(e)+'\n'
raise tuf.RepositoryError(message)
_list_keyids(keystore_directory, metadata_directory)
def dump_key(keystore_directory):
"""
<Purpose>
Dump the contents of the signing key specified by the user.
This dumped information includes the keytype, signing method,
the public key, and the private key (if requested by the user).
<Arguments>
keystore_directory:
The directory containing the signing keys (i.e., key files ending
in '.key').
<Exceptions>
tuf.RepositoryError, if the keystore directory is invalid, a required
key cannot be loaded, or the keystore contains an invalid key,
<Side Effects>
The contents of encrypted key files are extracted and printed.
<Returns>
None.
"""
# Save the 'load_keystore_from_keyfiles' function call.
load_key = tuf.repo.keystore.load_keystore_from_keyfiles
# Verify the 'keystore_directory' argument.
keystore_directory = _check_directory(keystore_directory)
# Retrieve the metadata directory. The 'root.txt' and all the targets
# role metadata files are needed to extract rolenames and their corresponding
# keyids.
try:
metadata_directory = _get_metadata_directory()
except (tuf.FormatError, tuf.Error), e:
message = str(e)+'\n'
raise tuf.RepositoryError(message)
# List the keyids found in 'keystore_directory', minus the '.key' extension.
_list_keyids(keystore_directory, metadata_directory)
# Retrieve the keyid and password from the user.
message = '\nEnter the keyid for the signing key you wish to dump: '
keyid = _prompt(message, str)
password = _get_password('\nEnter the password for the keyid: ')
# Try to load the keyfile
loaded_keys = load_key(keystore_directory, [keyid], [password])
# Was 'keyid' loaded?
if keyid not in loaded_keys:
message = 'Could not load keyid: '+keyid+'\n'
raise tuf.RepositoryError(message)
# Get the key object.
key = tuf.repo.keystore.get_key(keyid)
# Ask the user if they would like to print the private key as well.
show_private = False
prompt = 'Should the private key be printed as well?' \
' (if yes, enter \'private\'): '
message = '*WARNING* Printing the private key reveals' \
' sensitive information *WARNING*'
logger.warning(message)
input = _prompt(prompt, str)
if input.lower() == 'private':
show_private = True
# Retrieve the key metadata according to the keytype.
if key['keytype'] == 'rsa':
key_metadata = tuf.rsa_key.create_in_metadata_format(key['keyval'],
private=show_private)
else:
message = 'The keystore contains an invalid key type.'
raise tuf.RepositoryError(message)
# Print the contents of the key metadata.
logger.info(json.dumps(key_metadata, indent=2, sort_keys=True))
def make_root_metadata(keystore_directory):
"""
<Purpose>
Create the 'root.txt' file.
<Arguments>
keystore_directory:
The directory containing the signing keys (i.e., key files ending
in '.key').
<Exceptions>
tuf.RepositoryError, if required directories cannot be validated,
required keys cannot be loaded, or a properly formatted root
metadata file cannot be created.
<Side Effects>
The contents of an existing root metadata file is overwritten.
<Returns>
None.
"""
# Verify the 'keystore_directory' argument.
keystore_directory = _check_directory(keystore_directory)
# Get the metadata directory and the metadata filenames.
try:
metadata_directory = _get_metadata_directory()
except (tuf.FormatError, tuf.Error), e:
message = str(e)+'\n'
raise tuf.RepositoryError(message)
filenames = tuf.repo.signerlib.get_metadata_filenames(metadata_directory)
root_filename = filenames['root']
# If the metadata file currently exists, extract the version number and
# increment it by 1. Otherwise, set the version to 1. Incrementing
# the version number ensures the newly created metadata file is considered
# newer.
version = _get_metadata_version(root_filename)
# Get the configuration file.
config_filepath = _prompt('\nEnter the configuration file path: ', str)
config_filepath = os.path.abspath(config_filepath)
# Load the keys for the top-level roles.
try:
loaded_keyids = _get_all_config_keyids(config_filepath, keystore_directory)
except (tuf.Error, tuf.FormatError), e:
message = str(e)+'\n'
raise tuf.RepositoryError(message)
root_keyids = loaded_keyids['root']
# Generate the root metadata and write it to 'root.txt'.
try:
tuf.repo.signerlib.build_root_file(config_filepath, root_keyids,
metadata_directory, version)
except (tuf.FormatError, tuf.Error), e:
message = str(e)+'\n'
raise tuf.RepositoryError(message)
def make_targets_metadata(keystore_directory):
"""
<Purpose>
Create the 'targets.txt' metadata file. The targets must exist at the
same path they should on the repository. This takes a list of targets.
We're not worrying about custom metadata at the moment. It's allowed to
not provide keys.
<Arguments>
keystore_directory:
The directory containing the signing keys (i.e., key files ending
in '.key').
<Exceptions>
tuf.RepositoryError, if required directories cannot be validated,
required keys cannot be loaded, or a properly formatted targets
metadata file cannot be created.
<Side Effects>
The contents of an existing targets metadata file is overwritten.
<Returns>
None.
"""
# Verify the 'keystore_directory' argument.
keystore_directory = _check_directory(keystore_directory)
# Retrieve the target files. The target paths entered by the user should be
# separated by white space. 'targets' is a list of the target path strings
# extracted from user input.
prompt_targets = '\nInput may be a directory, directories, or any '+\
'number of file paths.\nEnter the target files: '
targets_input = _prompt(prompt_targets, str)
targets = targets_input.split()
# Retrieve the metadata directory and the 'targets' filename.
try:
metadata_directory = _get_metadata_directory()
except (tuf.FormatError, tuf.Error), e:
message = str(e)+'\n'
raise tuf.RepositoryError(message)
filenames = tuf.repo.signerlib.get_metadata_filenames(metadata_directory)
targets_filename = filenames['targets']
# If the metadata file currently exists, extract the version number and
# increment it by 1. Otherwise, set the version to 1. Incrementing
# the version number ensures the newly created metadata file is considered
# newer.
version = _get_metadata_version(targets_filename)
# Prompt the user the metadata file's expiration date.
# Raise 'tuf.RepositoryError' if invalid date is entered
# by the user.
expiration_date = _get_metadata_expiration()
# Get the configuration file.
config_filepath = _prompt('\nEnter the configuration file path: ', str)
config_filepath = os.path.abspath(config_filepath)
try:
# Retrieve and load the 'targets' signing keys.
targets_keyids = _get_role_config_keyids(config_filepath,
keystore_directory, 'targets')
except (tuf.FormatError, tuf.Error), e:
message = str(e)+'\n'
raise tuf.RepositoryError(message)
try:
# Create, sign, and write the "targets.txt" file.
tuf.repo.signerlib.build_targets_file(targets, targets_keyids,
metadata_directory, version,
expiration_date)
except (tuf.FormatError, tuf.Error), e:
message = str(e)+'\n'
raise tuf.RepositoryError(message)
def make_release_metadata(keystore_directory):
"""
<Purpose>
Create the release metadata file.
The minimum metadata must exist. This is root.txt and targets.txt.
<Arguments>
keystore_directory:
The directory containing the signing keys (i.e., key files ending
in '.key').
<Exceptions>
tuf.RepositoryError, if required directories cannot be validated,
required keys cannot be loaded, or a properly formatted release
metadata file cannot be created.
<Side Effects>
The contents of an existing release metadata file is overwritten.
<Returns>
None.
"""
# Verify the 'keystore_directory' argument.
keystore_directory = _check_directory(keystore_directory)
# Retrieve the metadata directory and the release filename.
try:
metadata_directory = _get_metadata_directory()
except (tuf.FormatError, tuf.Error), e:
message = str(e)+'\n'
raise tuf.RepositoryError(message)
filenames = tuf.repo.signerlib.get_metadata_filenames(metadata_directory)
release_filename = filenames['release']
# If the metadata file currently exists, extract the version number and
# increment it by 1. Otherwise, set the version to 1. Incrementing
# the version number ensures the newly created metadata file is considered
# newer.
version = _get_metadata_version(release_filename)
# Prompt the user the metadata file's expiration date.
# Raise 'tuf.RepositoryError' if invalid date is entered
# by the user.
expiration_date = _get_metadata_expiration()
# Get the configuration file.
config_filepath = _prompt('\nEnter the configuration file path: ', str)
config_filepath = os.path.abspath(config_filepath)
# Retrieve and load the 'release' signing keys.
try:
release_keyids = _get_role_config_keyids(config_filepath,
keystore_directory, 'release')
# Generate the release metadata and write it to 'release.txt'
tuf.repo.signerlib.build_release_file(release_keyids, metadata_directory,
version, expiration_date)
except (tuf.FormatError, tuf.Error), e:
message = str(e)+'\n'
raise tuf.RepositoryError(message)
def make_timestamp_metadata(keystore_directory):
"""
<Purpose>
Create the timestamp metadata file. The 'release.txt' file must exist.
<Arguments>
keystore_directory:
The directory containing the signing keys (i.e., key files ending
in '.key').
<Exceptions>
tuf.RepositoryError, if required directories cannot be validated,
required keys cannot be loaded, or a properly formatted timestamp
metadata file cannot be created.
<Side Effects>
The contents of an existing timestamp metadata file is overwritten.
<Returns>
None.
"""
# Verify the 'keystore_directory' argument.
keystore_directory = _check_directory(keystore_directory)
# Retrieve the metadata directory and the timestamp filename.
try:
metadata_directory = _get_metadata_directory()
except (tuf.FormatError, tuf.Error), e:
message = str(e)+'\n'
raise tuf.RepositoryError(message)
filenames = tuf.repo.signerlib.get_metadata_filenames(metadata_directory)
timestamp_filename = filenames['timestamp']
# If the metadata file currently exists, extract the version number and
# increment it by 1. Otherwise, set the version to 1. Incrementing
# the version number ensures the newly created metadata file is considered
# newer.
version = _get_metadata_version(timestamp_filename)
# Prompt the user the metadata file's expiration date.
# Raise 'tuf.RepositoryError' if invalid date is entered
# by the user.
expiration_date = _get_metadata_expiration()
# Get the configuration file.
config_filepath = _prompt('\nEnter the configuration file path: ', str)
config_filepath = os.path.abspath(config_filepath)
# Retrieve and load the 'timestamp' signing keys.
try:
timestamp_keyids = _get_role_config_keyids(config_filepath,
keystore_directory, 'timestamp')
# Generate the timestamp metadata and write it to 'timestamp.txt'
tuf.repo.signerlib.build_timestamp_file(timestamp_keyids, metadata_directory,
version, expiration_date)
except (tuf.FormatError, tuf.Error), e:
message = str(e)+'\n'
raise tuf.RepositoryError(message)
def sign_metadata_file(keystore_directory):
"""
<Purpose>
Sign the metadata file specified by the user.
<Arguments>
keystore_directory:
The directory containing the signing keys (i.e., key files ending
in '.key').
<Exceptions>
tuf.RepositoryError, if required directories cannot be validated,
required keys cannot be loaded, or the specified metadata file
is invalid.
<Side Effects>
The contents of an existing metadata file is overwritten.
<Returns>
None.
"""
# Verify the 'keystore_directory' argument.
keystore_directory = _check_directory(keystore_directory)
# Retrieve the metadata directory. The 'root.txt' and all the targets
# role metadata files are needed to extract rolenames and their corresponding
# keyids.
try:
metadata_directory = _get_metadata_directory()
except (tuf.FormatError, tuf.Error), e:
message = str(e)+'\n'
raise tuf.RepositoryError(message)
# List the keyids available in the keystore.
_list_keyids(keystore_directory, metadata_directory)
# Retrieve the keyids of the signing keys from the user.
message = 'The keyids that will sign the metadata file must be loaded.'
logger.info(message)
loaded_keyids = _get_keyids(keystore_directory)
if len(loaded_keyids) == 0:
message = 'No keyids were loaded\n'
raise tuf.RepositoryError(message)
# Retrieve the metadata file the user intends to sign.
metadata_filename = _prompt('\nEnter the metadata filename: ', str)
metadata_filename = os.path.abspath(metadata_filename)
if not os.path.isfile(metadata_filename):
message = repr(metadata_filename)+' is an invalid file.\n'
raise tuf.RepositoryError(message)
# Create, sign, and write the metadata file.
metadata = tuf.repo.signerlib.read_metadata_file(metadata_filename)
_sign_and_write_metadata(metadata, loaded_keyids, metadata_filename)
def make_delegation(keystore_directory):
"""
<Purpose>
Create a delegation by updating the 'delegations' field of a parent's
metadata file (targets) and creating the delegated role's metadata file.
The user specifies the delegated role's name and target files.
The parent's metadata file must exist.
<Arguments>
keystore_directory:
The directory containing the signing keys (i.e., key files ending
in '.key').
<Exceptions>
tuf.RepositoryError, if required directories cannot be validated, the
parent role cannot be loaded, the delegated role metadata file
cannot be created, or the parent role metadata file cannot be updated.
<Side Effects>
The parent targets metadata file is modified. The 'delegations' field of
is added or updated.
<Returns>
None.
"""
# Verify the 'keystore_directory' argument.
keystore_directory = _check_directory(keystore_directory)
# Get the metadata directory.
try:
metadata_directory = _get_metadata_directory()
except (tuf.FormatError, tuf.Error), e:
message = str(e)+'\n'
raise tuf.RepositoryError(message)
# Get the delegated role's target paths, which should be located within
# the repository's targets directory. We need these directory/file paths to
# generate the delegated role's metadata file.
prompt = '\nThe paths entered below should be located within the '+\
'repository\'s targets directory.\nEnter the directory, directories, or '+\
'any number of file paths containing the delegated role\'s target files: '
delegated_targets_input = _prompt(prompt, str)
delegated_targets_input = delegated_targets_input.split()
# Verify the format of the delegated targets specified by the user.
# The paths in 'delegated_targets_input' will be verified in
# in the _make_delegated_metadata() call.
try:
tuf.formats.PATHS_SCHEMA.check_match(delegated_targets_input)
except (tuf.FormatError, tuf.Error), e:
message = str(e)+'\n'
raise tuf.RepositoryError(message)
# Get all the target roles and their respective keyids.
# These keyids will let the user know which roles are currently known.
# signerlib.get_target_keyids() returns a dictionary that has the form:
# {'targets': [keyid1, ...], 'targets/role1': [keyid1, ...] ...}
targets_roles = tuf.repo.signerlib.get_target_keyids(metadata_directory)
# Load the parent role specified by the user. The parent role must be loaded
# so its 'delegations' field can be updated.
parent_role, parent_keyids = _load_parent_role(metadata_directory,
keystore_directory,
targets_roles)
# Load the delegated role specified by the user. The delegated role must be
# loaded so its metadata file can be created.
delegated_role, delegated_keyids = _get_delegated_role(keystore_directory,
metadata_directory)
# Create, sign, and write the delegated role's metadata file.
delegated_paths = _make_delegated_metadata(metadata_directory,
delegated_targets_input,
parent_role, delegated_role,
delegated_keyids)
# Update the parent role's metadata file. The parent role's delegation
# field must be updated with the newly created delegated role.
_update_parent_metadata(metadata_directory, delegated_role, delegated_keyids,
delegated_paths, parent_role, parent_keyids)
def _load_parent_role(metadata_directory, keystore_directory, targets_roles):
"""
Load the parent role specified by the user. The user is presented with a
list of known targets roles and asked to enter the parent role to load.
Ensure the parent role is loaded properly and return a string containing
the parent role's full rolename and a list of keyids belonging to the parent.
"""
# 'load_key' is a reference to the 'load_keystore_from_keyfiles function'.
# Set to improve readability.
load_key = tuf.repo.keystore.load_keystore_from_keyfiles
# Get the parent role. We need to modify the parent role's metadata file.
parent_role = None
# Retrieve the parent role from the user.
for attempt in range(MAX_INPUT_ATTEMPTS):
prompt = '\nChoose and enter the parent role\'s full name: '
parent_role = _prompt(prompt, str)
if parent_role not in targets_roles:
message = 'Invalid role name entered'
logger.info(message)
parent_role = None
continue
else:
break
# Ensure we loaded a valid parent role.
if parent_role is None:
message = 'Could not get a valid parent role.\n'
raise tuf.RepositoryError(message)
# Load the parent's key(s). The key needs to be loaded because
# its metadata file will be modified.
parent_keyids = []
for keyid in targets_roles[parent_role]:
for attempt in range(MAX_INPUT_ATTEMPTS):
prompt = '\nEnter the password for '+parent_role+' ('+keyid+'): '
password = _get_password(prompt)
loaded_keyid = load_key(keystore_directory, [keyid], [password])
if keyid not in loaded_keyid:
message = 'The keyid could not be loaded.'
logger.info(message)
continue
parent_keyids.append(loaded_keyid[0])
break
if keyid not in parent_keyids:
message = 'Could not load the keys for the parent role.\n'
raise tuf.RepositoryError(message)
return parent_role, parent_keyids
def _get_delegated_role(keystore_directory, metadata_directory):
"""
Get the delegated role specified by the user. The user is presented with
a list of keyids available in the keystore and asked to enter the keyid
belonging to the delegated role. Return a string containing
the delegated role's full rolename and its keyids.
"""
# Retrieve the delegated rolename from the user (e.g., 'role1').
delegated_role = _prompt('\nEnter the delegated role\'s name: ', str)
delegated_role = unicode(delegated_role, encoding="utf-8")
# Retrieve the delegated role\'s keyids from the user.
message = 'The keyid of the delegated role must be loaded.'
logger.info(message)
delegated_keyids = _get_keyids(keystore_directory)
# Ensure at least one delegated key was loaded.
if not tuf.formats.THRESHOLD_SCHEMA.matches(len(delegated_keyids)):
message = 'The minimum required threshold of keyids was not loaded.\n'
raise tuf.RepositoryError(message)
return delegated_role, delegated_keyids
def _make_delegated_metadata(metadata_directory, delegated_targets,
parent_role, delegated_role, delegated_keyids):
"""
Create, sign, and write the metadata file for the newly added delegated
role. Determine the target files from the paths in 'delegated_targets'
and the other information needed to generate the targets metadata file for
delegated_role'. Return the delegated paths to the caller.
"""
repository_directory, junk = os.path.split(metadata_directory)
# Retrieve the file paths for the delegated targets. Keep track of the valid
# paths in 'delegated_targets', which will be stored in the 'paths' entry
# of the parent's metadata. Directories are preserved in the returned
# 'delegated_paths' list.
delegated_paths = []
delegated_filepaths = []
# The 'delegated_paths' list contains either file paths or the paths of
# directories. A child role may list any target(s) under a directory or sub-
# directory. Replicate directory wildcards using os.path.commonprefix()
# instead of regular expressions, which may be abused by input
# carefully-crafted for this purpose.
for path in delegated_targets:
path = os.path.abspath(path)
relative_path = path[len(repository_directory)+1:]
if os.path.isfile(path):
# The target paths need to be relative to the repository's targets
# directory (e.g., 'targets/role1/target_file.gif').
# [len(repository_directory)+1:] strips the repository path, including
# its trailing path separator.
delegated_filepaths.append(relative_path)
delegated_paths.append(relative_path)
# A directory implies the child role may list any targets under this
# directory.
elif os.path.isdir(path):
for entry in os.listdir(path):
filepath = os.path.join(path, entry)
if os.path.isfile(filepath):
relative_filepath = os.path.join(relative_path, entry)
delegated_filepaths.append(relative_filepath)
for delegated_path in delegated_paths:
if os.path.commonprefix([relative_path, delegated_path]) == delegated_path:
break
# Add the relative path of 'path' to 'delegated_paths'. 'relative_path'
# has not been added to 'delegated_paths', nor a parent directory of it.
else:
delegated_paths.append(relative_path+os.sep)
message = 'There are '+repr(len(delegated_filepaths))+' target paths for '+\
repr(delegated_role)
logger.info(message)
# Create, sign, and write the delegated role's metadata file.
# The first time a parent role creates a delegation, a directory
# containing the parent role's name is created in the metadata
# directory. For example, if the targets roles creates a delegated
# role 'role1', the metadata directory would then contain:
# '{metadata_directory}/targets/role1.txt', where 'role1.txt' is the
# delegated role's metadata file.
# If delegated role 'role1' creates its own delegated role 'role2', the
# metadata directory would then contain:
# '{metadata_directory}/targets/role1/role2.txt'.
# When creating a delegated role, if the parent directory already
# exists, this means a prior delegation has been perform by the parent.
parent_directory = os.path.join(metadata_directory, parent_role)
try:
os.mkdir(parent_directory)
except OSError, e:
if e.errno == errno.EEXIST:
pass
else:
raise
# Prompt the user the metadata file's expiration date.
# Raise 'tuf.RepositoryError' if invalid date is entered
# by the user.
expiration_date = _get_metadata_expiration()
# Sign and write the delegated metadata file.
delegated_role_filename = delegated_role+'.txt'
metadata_filename = os.path.join(parent_directory, delegated_role_filename)
repository_directory, junk = os.path.split(metadata_directory)
generate_metadata = tuf.repo.signerlib.generate_targets_metadata
delegated_metadata = generate_metadata(repository_directory, delegated_filepaths,
1, expiration_date)
_sign_and_write_metadata(delegated_metadata, delegated_keyids,
metadata_filename)
return delegated_paths
def _update_parent_metadata(metadata_directory, delegated_role, delegated_keyids,
delegated_paths, parent_role, parent_keyids):
"""
Update the parent role's metadata file. The delegations field of the
metadata file is updated with the key and role information belonging
to the newly added delegated role. Finally, the metadata file
is signed and written to the metadata directory.
"""
# Extract the metadata from the parent role's file.
parent_filename = os.path.join(metadata_directory, parent_role)
parent_filename = parent_filename+'.txt'
parent_signable = tuf.repo.signerlib.read_metadata_file(parent_filename)
parent_metadata = parent_signable['signed']
# Extract the delegations structure if it exists.
delegations = parent_metadata.get('delegations', {})
# Update the keys field.
keys = delegations.get('keys', {})
for delegated_keyid in delegated_keyids:
# Retrieve the key belonging to 'delegated_keyid' from the keystore.
role_key = tuf.repo.keystore.get_key(delegated_keyid)
if role_key['keytype'] == 'rsa':
keyval = role_key['keyval']
keys[delegated_keyid] = tuf.rsa_key.create_in_metadata_format(keyval)
else:
message = 'Invalid keytype encountered: '+delegated_keyid+'\n'
raise tuf.RepositoryError(message)
# Add the full list of keys belonging to 'delegated_role' to the delegations
# field.
delegations['keys'] = keys
# Update the 'roles' field.
roles = delegations.get('roles', [])
threshold = len(delegated_keyids)
delegated_role = parent_role+'/'+delegated_role
relative_paths = []
for path in delegated_paths:
relative_paths.append(os.path.sep.join(path.split(os.path.sep)[1:]))
role_metadata = tuf.formats.make_role_metadata(delegated_keyids, threshold,
name=delegated_role,
paths=relative_paths)
role_index = tuf.repo.signerlib.find_delegated_role(roles, delegated_role)
if role_index is None:
# Append role to the end of the list of delegated roles.
logger.info('Appending role '+delegated_role+' to '+parent_role)
roles.append(role_metadata)
else:
# Update role with the same name.
logger.info('Replacing role '+delegated_role+' in '+parent_role)
roles[role_index] = role_metadata
delegations['roles'] = roles
# Update the larger metadata structure.
parent_metadata['delegations'] = delegations
# Increment the parent role's version.
version = parent_metadata['version']
parent_metadata['version'] = version+1
# Try to write the modified targets file.
parent_signable = tuf.formats.make_signable(parent_metadata)
_sign_and_write_metadata(parent_signable, parent_keyids, parent_filename)
def process_option(options):
"""
<Purpose>
Determine the command-line option chosen by the user and call its
corresponding function. If 'signercli' is invoked with the --genrsakey
command-line option, its corresponding 'generate_rsa_key()' function
is called.
<Arguments>
options:
An optparse OptionValues instance, returned by parser.parse_args().
<Exceptions>
tuf.RepositoryError, raised by one of the supported option
functions.
tuf.Error, if a valid option was not encountered.
<Side Effects>
Files in the repository are either created or modified
depending on the command-line option chosen by the user.
<Returns>
None.
"""
# Determine which option was chosen and call its corresponding
# internal function with the option's keystore directory argument.
if options.genrsakey is not None:
generate_rsa_key(options.genrsakey)
elif options.listkeys is not None:
list_signing_keys(options.listkeys)
elif options.changepass is not None:
change_password(options.changepass)
elif options.dumpkey is not None:
dump_key(options.dumpkey)
elif options.makeroot is not None:
make_root_metadata(options.makeroot)
elif options.maketargets is not None:
make_targets_metadata(options.maketargets)
elif options.makerelease is not None:
make_release_metadata(options.makerelease)
elif options.maketimestamp is not None:
make_timestamp_metadata(options.maketimestamp)
elif options.sign is not None:
sign_metadata_file(options.sign)
elif options.makedelegation is not None:
make_delegation(options.makedelegation)
else:
raise tuf.Error('A valid option was not encountered.\n')
def parse_options():
"""
<Purpose>
Parse the command-line options. 'signercli' expects a single
command-line option and one keystore directory argument.
Example:
$ python signercli.py --genrsakey ./keystore
All supported command-line options expect a single keystore
directory argument. If 'signercli' is invoked with an incorrect
number of command-line options or arguments, a parser error
is printed and the script exits.
<Arguments>
None.
<Exceptions>
None.
<Side Effects>
A file is a created or modified depending on the option
encountered on the command-line.
<Returns>
The options object returned by the parser's parse_args() method.
"""
usage = 'usage: %prog [option] <keystore_directory>'
option_parser = optparse.OptionParser(usage=usage)
# Add the options supported by 'signercli' to the option parser.
option_parser.add_option('--genrsakey', action='store', type='string',
help='Generate an RSA key and save it to '\
'the keystore.')
option_parser.add_option('--listkeys', action='store', type='string',
help='List the key IDs of the signing '\
'keys located in the keystore.')
option_parser.add_option('--changepass', action='store', type='string',
help='Change the password for one of '\
'the signing keys.')
option_parser.add_option('--dumpkey', action='store', type='string',
help='Dump the contents of an encrypted '\
'key file.')
option_parser.add_option('--makeroot', action='store', type='string',
help='Create the Root metadata file '\
'(root.txt).')
option_parser.add_option('--maketargets', action='store', type='string',
help='Create the Targets metadata file '\
'(targets.txt).')
option_parser.add_option('--makerelease', action='store', type='string',
help='Create the Release metadata file '\
'(release.txt).')
option_parser.add_option('--maketimestamp', action='store', type='string',
help='Create the Timestamp metadata file '\
'(timestamp.txt).')
option_parser.add_option('--sign', action='store', type='string',
help='Sign a metadata file.')
option_parser.add_option('--makedelegation', action='store', type='string',
help='Create a delegated role by creating '\
'its metadata file and updating the parent '\
'role\'s metadata file.')
(options, remaining_arguments) = option_parser.parse_args()
# Ensure the script was invoked with the correct number of arguments
# (i.e., one command-line option and a single keystore directory argument).
# Return the options object to the caller to determine the option chosen
# by the user. option_parser.error() will print the argument error message
# and exit.
if len(sys.argv) != 3:
option_parser.error('Expected a single option and one keystore argument.')
return options
if __name__ == '__main__':
options = parse_options()
# Process the command-line option chosen by the user.
# 'tuf.RepositoryError' raised by the option's corresponding
# function if an error occurs. 'tuf.Error' raised if a valid
# option is not provided by the user.
try:
process_option(options)
except (tuf.RepositoryError, tuf.Error), e:
sys.stderr.write('Error: '+str(e))
sys.exit(1)
# The command-line option was processed successfully.
sys.exit(0)
```
#### File: tuf/tests/test_keystore.py
```python
import unittest
import shutil
import os
import logging
import tuf
import tuf.repo.keystore
import tuf.rsa_key
import tuf.formats
import tuf.util
import tuf.log
logger = logging.getLogger('tuf.test_keystore')
# We'll need json module for testing '_encrypt()' and '_decrypt()'
# internal function.
json = tuf.util.import_json()
# Creating a directory string in current directory.
_CURRENT_DIR = os.getcwd()
_DIR = os.path.join(_CURRENT_DIR, 'test_keystore')
# Check if directory '_DIR' exists.
if os.path.exists(_DIR):
msg = ('\''+_DIR+'\' directory already exists,'+
' please change '+'\'_DIR\''+' to something else.')
raise tuf.Error(msg)
KEYSTORE = tuf.repo.keystore
RSAKEYS = []
PASSWDS = []
temp_keys_info = []
temp_keys_vals = []
for i in range(3):
# Populating the original 'RSAKEYS' and 'PASSWDS' lists.
RSAKEYS.append(tuf.rsa_key.generate())
PASSWDS.append('passwd_'+str(i))
# Saving original copies of 'RSAKEYS' and 'PASSWDS' to temp variables
# in order to repopulate them at the start of every test.
temp_keys_info.append(RSAKEYS[i].values())
temp_keys_vals.append(RSAKEYS[i]['keyval'].values())
temp_passwds=list(PASSWDS)
class TestKeystore(unittest.TestCase):
def setUp(self):
# Returning 'RSAKEY' and 'PASSWDS' to original state.
for i in range(len(temp_keys_info)):
RSAKEYS[i]['keytype'] = temp_keys_info[i][0]
RSAKEYS[i]['keyid'] = temp_keys_info[i][1]
RSAKEYS[i]['keyval'] = temp_keys_info[i][2]
RSAKEYS[i]['keyval']['public'] = temp_keys_vals[i][0]
RSAKEYS[i]['keyval']['private'] = temp_keys_vals[i][1]
PASSWDS[i] = temp_passwds[i]
def tearDown(self):
# Empty keystore's databases.
KEYSTORE.clear_keystore()
# Check if directory '_DIR' exists, remove it if it does.
if os.path.exists(_DIR):
shutil.rmtree(_DIR)
def test_clear_keystore(self):
# Populate KEYSTORE's internal databases '_keystore' and '_key_passwords'.
for i in range(3):
KEYSTORE.add_rsakey(RSAKEYS[i], PASSWDS[i], RSAKEYS[i]['keyid'])
# Verify KEYSTORE's internal databases ARE NOT EMPTY.
self.assertTrue(len(KEYSTORE._keystore) > 0)
self.assertTrue(len(KEYSTORE._key_passwords) > 0)
# Clear KEYSTORE's internal databases.
KEYSTORE.clear_keystore()
# Verify KEYSTORE's internal databases ARE EMPTY.
self.assertFalse(len(KEYSTORE._keystore) > 0)
self.assertFalse(len(KEYSTORE._key_passwords) > 0)
def test_add_rsakey(self):
# Passing 2 arguments to the function and verifying that the internal
# databases have been modified.
KEYSTORE.add_rsakey(RSAKEYS[0], PASSWDS[0])
self.assertEqual(RSAKEYS[0], KEYSTORE._keystore[RSAKEYS[0]['keyid']],
'Adding an rsa key dict was unsuccessful.')
self.assertEqual(PASSWDS[0],
KEYSTORE._key_passwords[RSAKEYS[0]['keyid']],
'Adding a password pertaining to \'_keyid\' was unsuccessful.')
# Passing three arguments to the function, i.e. including the 'keyid'.
KEYSTORE.add_rsakey(RSAKEYS[1], PASSWDS[1], RSAKEYS[1]['keyid'])
self.assertEqual(RSAKEYS[1],
KEYSTORE._keystore[RSAKEYS[1]['keyid']],
'Adding an rsa key dict was unsuccessful.')
self.assertEqual(PASSWDS[1],
KEYSTORE._key_passwords[RSAKEYS[1]['keyid']],
'Adding a password pertaining to \'_keyid\' was unsuccessful.')
# Passing a keyid that does not match the keyid in 'rsakey_dict'.
_keyid = 'somedifferentkey123456789'
self.assertRaises(tuf.Error, KEYSTORE.add_rsakey, RSAKEYS[2],
PASSWDS[2], _keyid)
# Passing an existing 'rsakey_dict' object.
self.assertRaises(tuf.KeyAlreadyExistsError, KEYSTORE.add_rsakey,
RSAKEYS[1], PASSWDS[1], RSAKEYS[1]['keyid'])
# Passing an 'rsakey_dict' that does not conform to the 'RSAKEY_SCHEMA'.
del RSAKEYS[2]['keytype']
self.assertRaises(tuf.FormatError, KEYSTORE.add_rsakey,
RSAKEYS[2], PASSWDS[2], RSAKEYS[2]['keyid'])
def test_save_keystore_to_keyfiles(self):
# Extract and store keyids in '_keyids' list.
keyids = []
# Populate KEYSTORE's internal databases '_keystore' and '_key_passwords'.
for i in range(3):
KEYSTORE.add_rsakey(RSAKEYS[i], PASSWDS[i], RSAKEYS[i]['keyid'])
keyids.append(RSAKEYS[i]['keyid'])
# Check if directory '_DIR' exists, remove it if it does.
if os.path.exists(_DIR):
shutil.rmtree(_DIR)
KEYSTORE.save_keystore_to_keyfiles(_DIR)
# Check if directory '_DIR' has been created.
self.assertTrue(os.path.exists(_DIR), 'Creating directory failed.')
# Check if all of the key files where created and that they are not empty.
for keyid in keyids:
key_file = os.path.join(_DIR, str(keyid)+'.key')
# Checks if key file has been created.
self.assertTrue(os.path.exists(key_file), 'Key file does not exits.')
file_stats = os.stat(key_file)
# Checks if key file is not empty.
self.assertTrue(file_stats.st_size > 0)
# Passing an invalid 'directory_name' argument - an integer value.
self.assertRaises(tuf.FormatError, KEYSTORE.save_keystore_to_keyfiles, 222)
def test_load_keystore_from_keyfiles(self):
keyids = []
# Check if '_DIR' directory exists, if not - create it.
if not os.path.exists(_DIR):
# Populate KEYSTORE's internal databases.
for i in range(3):
KEYSTORE.add_rsakey(RSAKEYS[i], PASSWDS[i], RSAKEYS[i]['keyid'])
keyids.append(RSAKEYS[i]['keyid'])
# Create the key files.
KEYSTORE.save_keystore_to_keyfiles(_DIR)
# Clearing internal databases.
KEYSTORE.clear_keystore()
# Test normal conditions where two valid arguments are passed.
loaded_keys = KEYSTORE.load_keystore_from_keyfiles(_DIR, keyids, PASSWDS)
# Loaded keys should all be contained in 'keyids'.
loaded_keys_set = set(loaded_keys)
keyids_set = set(keyids)
intersect = keyids_set.intersection(loaded_keys_set)
self.assertEquals(len(intersect), len(keyids))
for i in range(3):
self.assertEqual(RSAKEYS[i], KEYSTORE._keystore[RSAKEYS[i]['keyid']])
# Clearing internal databases.
KEYSTORE.clear_keystore()
_invalid_dir = os.path.join(_CURRENT_DIR, 'invalid_directory')
# Passing an invalid 'directory_name' argument - a directory that
# does not exist. AS EXPECTED, THIS CALL SHOULDN'T RAISE ANY ERRORS.
KEYSTORE.load_keystore_from_keyfiles(_invalid_dir, keyids, PASSWDS)
# The keystore should not have loaded any keys.
self.assertEqual(0, len(KEYSTORE._keystore))
self.assertEqual(0, len(KEYSTORE._key_passwords))
# Passing nonexistent 'keyids'.
# AS EXPECTED, THIS CALL SHOULDN'T RAISE ANY ERRORS.
invalid_keyids = ['333', '333', '333']
KEYSTORE.load_keystore_from_keyfiles(_DIR, invalid_keyids, PASSWDS)
# The keystore should not have loaded any keys.
self.assertEqual(0, len(KEYSTORE._keystore))
self.assertEqual(0, len(KEYSTORE._key_passwords))
# Passing an invalid 'directory_name' argument - an integer value.
self.assertRaises(tuf.FormatError, KEYSTORE.load_keystore_from_keyfiles,
333, keyids, PASSWDS)
# Passing an invalid 'passwords' argument - a string value.
self.assertRaises(tuf.FormatError, KEYSTORE.load_keystore_from_keyfiles,
_DIR, keyids, '333')
# Passing an invalid 'passwords' argument - an integer value.
self.assertRaises(tuf.FormatError, KEYSTORE.load_keystore_from_keyfiles,
_DIR, keyids, 333)
# Passing an invalid 'keyids' argument - a string value.
self.assertRaises(tuf.FormatError, KEYSTORE.load_keystore_from_keyfiles,
_DIR, '333', PASSWDS)
# Passing an invalid 'keyids' argument - an integer value.
self.assertRaises(tuf.FormatError, KEYSTORE.load_keystore_from_keyfiles,
_DIR, 333, PASSWDS)
def test_change_password(self):
# Populate KEYSTORE's internal databases.
for i in range(2):
KEYSTORE.add_rsakey(RSAKEYS[i], PASSWDS[i], RSAKEYS[i]['keyid'])
# Create a new password.
new_passwd = '<PASSWORD>'
# Change a password - normal case.
KEYSTORE.change_password(RSAKEYS[0]['keyid'], PASSWDS[0], new_passwd)
# Check if password was changed.
self.assertNotEqual(KEYSTORE._key_passwords[RSAKEYS[0]['keyid']],
PASSWDS[0])
self.assertEqual(KEYSTORE._key_passwords[RSAKEYS[0]['keyid']],
new_passwd)
# Passing an invalid keyid i.e. RSAKEY[2] that was not loaded into
# the '_keystore'.
self.assertRaises(tuf.UnknownKeyError, KEYSTORE.change_password,
RSAKEYS[2]['keyid'], PASSWDS[1], new_passwd)
# Passing an incorrect old password.
self.assertRaises(tuf.BadPasswordError, KEYSTORE.change_password,
RSAKEYS[1]['keyid'], PASSWDS[2], new_passwd)
def test_get_key(self):
# Populate KEYSTORE's internal databases.
for i in range(2):
KEYSTORE.add_rsakey(RSAKEYS[i], PASSWDS[i], RSAKEYS[i]['keyid'])
# Get a key - normal case.
self.assertEqual(KEYSTORE.get_key(RSAKEYS[0]['keyid']), RSAKEYS[0])
# Passing an invalid keyid.
self.assertRaises(tuf.UnknownKeyError,
KEYSTORE.get_key, RSAKEYS[2]['keyid'])
# Passing an invalid keyid format.
self.assertRaises(tuf.FormatError, KEYSTORE.get_key, 123)
def test_internal_encrypt(self):
# Test for valid arguments to '_encrypt()' and a valid return type.
encrypted_key = KEYSTORE._encrypt(json.dumps(RSAKEYS[0]), PASSWDS[0])
self.assertEqual(type(encrypted_key), str)
# Test for invalid arguments to _encrypt().
self.assertRaises(tuf.CryptoError, KEYSTORE._encrypt, '', PASSWDS[0])
self.assertRaises(tuf.CryptoError, KEYSTORE._encrypt,
json.dumps(RSAKEYS[0]), '')
def test_internal_decrypt(self):
del RSAKEYS[0]['keyid']
tuf.formats.KEY_SCHEMA.check_match(RSAKEYS[0])
# Getting a valid encrypted key using '_encrypt()'.
encrypted_key = KEYSTORE._encrypt(json.dumps(RSAKEYS[0]), PASSWDS[0])
# Decrypting and decoding (using json's loads()) an encrypted file.
tuf.util.load_json_string(KEYSTORE._decrypt(encrypted_key, PASSWDS[0]))
self.assertEqual(RSAKEYS[0], tuf.util.load_json_string(
KEYSTORE._decrypt(encrypted_key, PASSWDS[0])))
# Passing an invalid password to try to decrypt the file.
self.assertRaises(tuf.CryptoError, KEYSTORE._decrypt,
encrypted_key, PASSWDS[1])
# Run the unit tests.
if __name__ == '__main__':
unittest.main()
```
#### File: tuf/tests/test_quickstart.py
```python
import os
import shutil
import unittest
import logging
import tuf
import tuf.log
import tuf.repo.quickstart as quickstart
import tuf.util
import tuf.tests.unittest_toolbox
logger = logging.getLogger('tuf.test_quickstart')
unit_tbox = tuf.tests.unittest_toolbox.Modified_TestCase
logger.info('from test_quickstart')
class TestQuickstart(unit_tbox):
def test_1_get_password(self):
# SETUP
original_getpass = quickstart.getpass.getpass
# A quick test of _get_password.
password = <PASSWORD>()
def _mock_getpass(junk1, junk2, pw = password):
return pw
# Monkey patch getpass.getpass().
quickstart.getpass.getpass = _mock_getpass
# Run _get_password().
self.assertEqual(quickstart._get_password(), password)
# RESTORE
quickstart.getpass.getpass = original_getpass
def test_2_build_repository(self):
# SETUP
original_prompt = quickstart._prompt
original_get_password = quickstart._get_password
# Create the project directories.
repo_dir = os.path.join(os.getcwd(), 'repository')
keystore_dir = os.path.join(os.getcwd(), 'keystore')
client_dir = os.path.join(os.getcwd(), 'client')
proj_files = self.make_temp_directory_with_data_files()
proj_dir = os.path.join(proj_files[0], 'targets')
input_dict = {'expiration':'12/12/2013',
'root':{'threshold':1, 'password':'<PASSWORD>'},
'targets':{'threshold':1, 'password':'<PASSWORD>'},
'release':{'threshold':1, 'password':'<PASSWORD>'},
'timestamp':{'threshold':1, 'password':'<PASSWORD>'}}
def _mock_prompt(message, confirm=False, input_parameters=input_dict):
if message.startswith('\nWhen would you like your '+
'"root.txt" metadata to expire?'):
return input_parameters['expiration']
for role in self.role_list: # role_list=['root', 'targets', ...]
if message.startswith('\nEnter the desired threshold '+
'for the role '+repr(role)):
return input_parameters[role]['threshold']
elif message.startswith('Enter a password for '+repr(role)):
for threshold in range(input_parameters[role]['threshold']):
if message.endswith(repr(role)+' ('+str(threshold+1)+'): '):
return input_parameters[role]['password']
print 'Cannot recognize message: '+message
# Monkey patching quickstart's _prompt() and _get_password.
quickstart._prompt = _mock_prompt
quickstart._get_password = _mock_prompt
def _remove_repository_directories(repo_dir, keystore_dir, client_dir):
"""
quickstart.py creates the 'client', 'keystore', and 'repository'
directories in the current working directory. Remove these
directories after every quickstart.build_repository() call.
"""
try:
shutil.rmtree(repo_dir)
shutil.rmtree(keystore_dir)
shutil.rmtree(client_dir)
except OSError, e:
pass
# TESTS
# TEST: various input parameters.
# Supplying bogus expiration.
input_dict['expiration'] = '5/8/2011'
self.assertRaises(tuf.RepositoryError, quickstart.build_repository,
proj_dir)
# Random string.
input_dict['expiration'] = self.random_string()
self.assertRaises(tuf.RepositoryError, quickstart.build_repository,
proj_dir)
_remove_repository_directories(repo_dir, keystore_dir, client_dir)
# Restore expiration.
input_dict['expiration'] = '10/10/2013'
# Supplying bogus 'root' threshold. Doing this for all roles slows
# the test significantly.
input_dict['root']['threshold'] = self.random_string()
self.assertRaises(tuf.RepositoryError, quickstart.build_repository,
proj_dir)
_remove_repository_directories(repo_dir, keystore_dir, client_dir)
input_dict['root']['threshold'] = 0
self.assertRaises(tuf.RepositoryError, quickstart.build_repository,
proj_dir)
_remove_repository_directories(repo_dir, keystore_dir, client_dir)
# Restore keystore directory.
input_dict['root']['threshold'] = 1
# TEST: normal case.
try:
quickstart.build_repository(proj_dir)
except Exception, e:
raise
# Verify the existence of metadata, target, and keystore files.
meta_dir = os.path.join(repo_dir, 'metadata')
targets_dir = os.path.join(repo_dir, 'targets')
client_current_meta_dir = os.path.join(client_dir, 'metadata', 'current')
client_previous_meta_dir = os.path.join(client_dir, 'metadata', 'previous')
target_files = os.listdir(targets_dir)
# Verify repository, keystore, metadata, and targets directories.
self.assertTrue(os.path.exists(repo_dir))
self.assertTrue(os.path.exists(keystore_dir))
self.assertTrue(os.path.exists(meta_dir))
self.assertTrue(os.path.exists(targets_dir))
self.assertTrue(os.path.exists(client_current_meta_dir))
self.assertTrue(os.path.exists(client_previous_meta_dir))
# Verify that target_files exist.
self.assertTrue(target_files)
for role in self.role_list:
meta_file = role+'.txt'
# Verify metadata file for a 'role'.
self.assertTrue(os.path.isfile(os.path.join(meta_dir, meta_file)))
# Get the metadata.
signable = tuf.util.load_json_file(os.path.join(meta_dir, meta_file))
for signature in range(len(signable['signatures'])):
# Extract a keyid.
keyid = signable['signatures'][signature]['keyid']
key_file = os.path.join(keystore_dir, keyid+'.key')
# Verify existence of a key for the keyid that belong to the 'role'.
self.assertTrue(os.path.isfile(key_file))
_remove_repository_directories(repo_dir, keystore_dir, client_dir)
# RESTORE
quickstart._prompt = original_prompt
quickstart._get_password = <PASSWORD>
# Run the unit tests.
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "0xDEC0DE/bashdoctest",
"score": 2
} |
#### File: bashdoctest/tests/test_testfile.py
```python
from __future__ import absolute_import
from bashdoctest import Runner
from bashdoctest.validators import (
SubprocessValidator)
def testfile():
tester = Runner()
tester.call_engines['echo'] = SubprocessValidator()
tester.call_engines['python'] = SubprocessValidator()
tester.call_engines['cat'] = SubprocessValidator()
tester.testfile('tests/resources/sample_doc.txt')
```
#### File: bashdoctest/tests/test_validator.py
```python
import os
import click
import pytest
from click.testing import CliRunner
from bashdoctest import Runner
from bashdoctest.validators import (
ClickValidator,
SubprocessValidator,
SkipValidator)
@click.command()
@click.argument('name')
def hello(name):
click.echo('Hello %s!' % name)
@click.command()
@click.argument('name')
def badcmd(name):
raise ValueError("This command doesn't work!")
def test_hello_world():
runner = CliRunner()
result = runner.invoke(hello, ['Peter'])
assert result.exit_code == 0
assert result.output == 'Hello Peter!\n'
def test_string_command():
teststr = '''
.. code-block:: bash
$ hello Polly
Hello Polly!
$ hello Polly Parrot
Usage: hello [OPTIONS] NAME
<BLANKLINE>
Error: Got unexpected extra argument (Parrot)
$ hello '<NAME>'
Hello Polly Parrot!
'''
tester = Runner()
tester.call_engines['hello'] = ClickValidator(hello)
tester.teststring(teststr)
def test_bad_command():
badstr = '''
.. code-block:: bash
$ badcmd Polly # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: This command doesn't work!
'''
tester = Runner()
tester.call_engines['badcmd'] = ClickValidator(badcmd)
tester.teststring(badstr)
def test_validator():
teststr = r'''
.. code-block:: bash
$ hello Polly
Hello Polly!
$ echo 'Pining for the fjords' # doctest: +NORMALIZE_WHITESPACE
Pining for the fjords
Pipes don't work, so we can't redirect this value into a file. But we can
write a file with python:
.. code-block:: bash
$ python -c \
> "with open('tmp.txt', 'w+') as f: f.write('Pushing up daisies')"
$ cat tmp.txt
Pushing up daisies
'''
tester = Runner()
tester.call_engines['hello'] = ClickValidator(hello)
tester.call_engines['echo'] = SubprocessValidator()
tester.call_engines['python'] = SubprocessValidator()
tester.call_engines['cat'] = SubprocessValidator()
tester.teststring(teststr)
badstr = '''
The following block of code should cause an error:
.. code-block:: bash
$ rm tmp.txt
'''
with pytest.raises(ValueError):
tester.teststring(badstr)
os.remove('tmp.txt')
def test_skipper():
skipstr = '''
The following command will be skipped:
.. code-block:: bash
$ aws storage buckets list
'''
tester = Runner()
tester.call_engines['aws'] = SkipValidator()
tester.teststring(skipstr)
noskip = '''
Unrecognized commands will raise an error, even if +SKIP is specified
.. code-block:: bash
$ nmake all # doctest: +SKIP
$ echo 'I made it!'
I made it!
'''
tester.teststring(noskip)
def test_string_failure():
teststr = r'''
Lines failing to match the command's output will raise an error
.. code-block:: bash
$ echo "There, it moved!"
"No it didn't!"
'''
tester = Runner()
tester.call_engines['echo'] = SubprocessValidator()
with pytest.raises(ValueError):
tester.teststring(teststr)
def test_skip():
teststr = r'''
Of course, you can always skip them!
.. code-block:: bash
$ echo "There, it moved!" # doctest: +SKIP
"No it didn't!"
'''
tester = Runner()
tester.call_engines['echo'] = SubprocessValidator()
tester.teststring(teststr)
``` |
{
"source": "0xDEC0DE/spinach",
"score": 3
} |
#### File: spinach/examples/flaskapp.py
```python
from flask import Flask
from spinach.contrib.flask_spinach import Spinach
app = Flask(__name__)
spinach = Spinach(app)
@spinach.task(name='say_hello')
def say_hello():
print('Hello from a task')
@app.route('/')
def home():
spinach.schedule('say_hello')
return 'Hello from HTTP'
```
#### File: spinach/examples/periodic.py
```python
from datetime import timedelta
from spinach import Engine, MemoryBroker
spin = Engine(MemoryBroker())
every_5_sec = timedelta(seconds=5)
@spin.task(name='make_coffee', periodicity=every_5_sec)
def make_coffee():
print("Making coffee...")
print('Starting workers, ^C to quit')
spin.start_workers()
```
#### File: spinach/brokers/base.py
```python
from abc import ABC, abstractmethod
from datetime import datetime, timezone
from logging import getLogger
import platform
import threading
import time
from typing import Optional, Iterable, List, Tuple, Dict, Union
import uuid
from ..job import Job
from ..task import Task
from ..const import WAIT_FOR_EVENT_MAX_SECONDS
logger = getLogger('spinach.broker')
class Broker(ABC):
def __init__(self):
# Event that is set whenever:
# - a job is enqueued in the main queue (to allow to fetch it)
# - a job has been finished (to allow to fetch a new one)
# - a future job is put in the waiting queue (to move it)
# - the broker is stopping
# It allows the Engine to wait for these things.
self._something_happened = threading.Event()
self._namespace = None
self._id = uuid.uuid4()
self._broker_info = {
'id': str(self._id),
'name': platform.node(),
'started_at': int(time.time())
}
def wait_for_event(self):
next_future_job_delta = self.next_future_job_delta
if next_future_job_delta is None:
next_future_job_delta = WAIT_FOR_EVENT_MAX_SECONDS
next_future_periodic_delta = self.next_future_periodic_delta
if next_future_periodic_delta is None:
next_future_periodic_delta = WAIT_FOR_EVENT_MAX_SECONDS
timeout = min(
next_future_job_delta,
next_future_periodic_delta,
WAIT_FOR_EVENT_MAX_SECONDS
)
if self._something_happened.wait(timeout=timeout):
self._something_happened.clear()
def start(self):
"""Start the broker.
Only needed by arbiter.
"""
def stop(self):
"""Stop the broker.
Only needed by arbiter.
"""
self._something_happened.set()
@property
def namespace(self) -> str:
if not self._namespace:
raise RuntimeError('Namespace must be set before using the broker')
return self._namespace
@namespace.setter
def namespace(self, value: str):
if self._namespace:
raise RuntimeError('The namespace can only be set once')
self._namespace = value
self._broker_info['namespace'] = value
def _to_namespaced(self, value: str) -> str:
return '{}/{}'.format(self.namespace, value)
@abstractmethod
def register_periodic_tasks(self, tasks: Iterable[Task]):
"""Register tasks that need to be scheduled periodically."""
@abstractmethod
def set_concurrency_keys(self, tasks: Iterable[Task]):
"""Register concurrency data for Tasks.
Set up anything in the Broker that is required to track
concurrency on Tasks, where a Task defines max_concurrency.
"""
@abstractmethod
def is_queue_empty(self, queue: str) -> bool:
"""Return True if the provided queue is empty."""
@abstractmethod
def inspect_periodic_tasks(self) -> List[Tuple[int, str]]:
"""Get the next periodic task schedule.
Used only for debugging and during tests.
"""
@abstractmethod
def enqueue_jobs(self, jobs: Iterable[Job], from_failure: bool):
"""Enqueue a batch of jobs."""
@abstractmethod
def remove_job_from_running(self, job: Job):
"""Remove a job from the list of running ones."""
@abstractmethod
def get_jobs_from_queue(self, queue: str, max_jobs: int) -> List[Job]:
"""Get jobs from a queue."""
@abstractmethod
def move_future_jobs(self) -> int:
"""Perform periodic management of the broker and the queues.
This method originally only moved future jobs, but it expanded to
perform other actions related to maintenance of brokers' data:
- Moves ready jobs from the future queue to their normal queues
- Enqueue periodic tasks that are due
- Perform broker keepalive
Note: This method may be called very often. In the future it would be
preferable to decouple it from the retrieval of jobs from the queue.
:returns the number of jobs moved
"""
@abstractmethod
def _get_next_future_job(self) -> Optional[Job]:
"""Get the next future job."""
@property
def next_future_job_delta(self) -> Optional[float]:
"""Give the amount of seconds before the next future job is due."""
job = self._get_next_future_job()
if not job:
return None
return (job.at - datetime.now(timezone.utc)).total_seconds()
@property
@abstractmethod
def next_future_periodic_delta(self) -> Optional[float]:
"""Give the amount of seconds before the next periodic task is due."""
@abstractmethod
def flush(self):
"""Delete everything in the namespace."""
@abstractmethod
def get_all_brokers(self) -> List[Dict[str, Union[None, str, int]]]:
"""Return all registered brokers."""
@abstractmethod
def enqueue_jobs_from_dead_broker(self, dead_broker_id: uuid.UUID) -> int:
"""Re-enqueue the jobs that were running on a broker.
Only jobs that can be retired are moved back to the queue, the others
are lost as expected.
Both the current broker and the dead one must use the same namespace.
This method is called automatically on brokers that are identified
as dead by Spinach but it can also be used by user's code.
If someone has a better system to detect dead processes (monitoring,
Consul, etcd...) this method can be called with the ID of the dead
broker to re-enqueue jobs before Spinach notices that the broker is
actually dead, which takes 30 minutes by default.
:param dead_broker_id: UUID of the dead broker.
:return: Number of jobs that were moved back to the queue.
"""
def _get_broker_info(self) -> Dict[str, Union[None, str, int]]:
rv = self._broker_info.copy()
rv['last_seen_at'] = int(time.time())
return rv
def __repr__(self):
return '<{}: {}>'.format(self.__class__.__name__, self._id)
```
#### File: spinach/contrib/datadog.py
```python
from typing import Optional
from spinach import signals
def register_datadog(tracer=None, namespace: Optional[str]=None,
service: str='spinach'):
"""Register the Datadog integration.
:param tracer: optionally use a custom ddtrace Tracer instead of the global
one.
:param namespace: optionally only register the Datadog integration for a
particular Spinach :class:`Engine`
:param service: Datadog service associated with the trace, defaults to
`spinach`
"""
if tracer is None:
from ddtrace import tracer
@signals.job_started.connect_via(namespace)
def job_started(namespace, job, **kwargs):
tracer.trace(
'spinach.task', service=service, span_type='worker',
resource=job.task_name
)
@signals.job_finished.connect_via(namespace)
def job_finished(namespace, job, **kwargs):
root_span = tracer.current_root_span()
for attr in job.__slots__:
root_span.set_tag(attr, getattr(job, attr))
root_span.finish()
@signals.job_failed.connect_via(namespace)
def job_failed(namespace, job, **kwargs):
root_span = tracer.current_root_span()
root_span.set_traceback()
@signals.job_schedule_retry.connect_via(namespace)
def job_schedule_retry(namespace, job, **kwargs):
root_span = tracer.current_root_span()
root_span.set_traceback()
def register_datadog_if_module_patched(module: str, *args, **kwargs) -> bool:
"""Register the datadog integration if ddtrace is already used.
This can be used to enable datadog for Spinach only if datadog
is enabled for Django.
:param module: Name of the module that must already be patched
:return: boolean telling if the integration was registered
"""
try:
from ddtrace.monkey import get_patched_modules
except ImportError:
return False
if module not in get_patched_modules():
return False
register_datadog(*args, **kwargs)
return True
```
#### File: spinach/contrib/sentry_sdk_spinach.py
```python
from sentry_sdk.hub import Hub
from sentry_sdk.integrations import Integration
from spinach import signals
class SpinachIntegration(Integration):
"""Register the Sentry SDK integration.
Exceptions making jobs fail are sent to Sentry and performance
tracing of Spinach tasks is enabled.
:param send_retries: whether to also send to Sentry exceptions resulting
in a job being retried
"""
identifier = 'spinach'
def __init__(self, send_retries: bool=False):
self.send_retries = send_retries
@staticmethod
def setup_once():
signals.job_started.connect(_job_started)
signals.job_finished.connect(_job_finished)
signals.job_failed.connect(_job_failed)
signals.job_schedule_retry.connect(_job_schedule_retry)
def _job_started(namespace, job, **kwargs):
hub = Hub.current
# Scopes are for error reporting
hub.push_scope()
with hub.configure_scope() as scope:
scope.transaction = job.task_name
scope.clear_breadcrumbs()
for attr in job.__slots__:
scope.set_extra(attr, getattr(job, attr))
# Transactions and spans are for tracing
transaction = hub.start_transaction(
op='task',
name=job.task_name
)
# Transaction are meant to be used as a context manager,
# but this does not fit the signals based approach well so
# pretend that we use a context manager.
transaction.__enter__()
def _job_finished(namespace, job, **kwargs):
hub = Hub.current
with hub.configure_scope() as scope:
for attr in job.__slots__:
scope.set_extra(attr, getattr(job, attr))
hub.scope.transaction.__exit__(None, None, None)
hub.pop_scope_unsafe()
def _job_failed(namespace, job, **kwargs):
hub = Hub.current
with hub.configure_scope() as scope:
for attr in job.__slots__:
scope.set_extra(attr, getattr(job, attr))
hub.capture_exception()
hub.scope.transaction.set_status("internal_error")
def _job_schedule_retry(namespace, job, **kwargs):
hub = Hub.current
with hub.configure_scope() as scope:
for attr in job.__slots__:
scope.set_extra(attr, getattr(job, attr))
integration = hub.get_integration(SpinachIntegration)
if integration is None:
return
if integration.send_retries:
hub.capture_exception()
```
#### File: spinach/spinach/signals.py
```python
from logging import getLogger
import blinker
logger = getLogger(__name__)
__all__ = [
'job_started', 'job_finished', 'job_schedule_retry', 'job_failed',
'worker_started', 'worker_terminated'
]
class SafeNamedSignal(blinker.NamedSignal):
"""Named signal for misbehaving receivers."""
def send(self, *sender, **kwargs):
"""Emit this signal on behalf of `sender`, passing on kwargs.
This is an extension of `Signal.send` that changes one thing:
Exceptions raised in calling the receiver are logged but do not fail
"""
if len(sender) == 0:
sender = None
elif len(sender) > 1:
raise TypeError('send() accepts only one positional argument, '
'%s given' % len(sender))
else:
sender = sender[0]
if not self.receivers:
return []
rv = list()
for receiver in self.receivers_for(sender):
try:
rv.append((receiver, receiver(sender, **kwargs)))
except Exception:
logger.exception('Error while dispatching signal "{}" '
'to receiver'.format(self.name))
return rv
def __repr__(self):
return 'SafeNamedSignal "{}"'.format(self.name)
# Added signals but also be documented in doc/user/signals.rst
job_started = SafeNamedSignal('job_started', doc='''\
Sent by a worker when a job starts being executed.
Signal handlers receive:
- `namespace` Spinach namespace
- `job` :class:`Job` being executed
''')
job_finished = SafeNamedSignal('job_finished', doc='''\
Sent by a worker when a job finishes execution.
The signal is sent no matter the outcome, even if the job fails or gets
rescheduled for retry.
Signal handlers receive:
- `namespace` Spinach namespace
- `job` :class:`Job` being executed
''')
job_schedule_retry = SafeNamedSignal('job_schedule_retry', doc='''\
Sent by a worker when a job gets rescheduled for retry.
Signal handlers receive:
- `namespace` Spinach namespace
- `job` :class:`Job` being executed
- `err` exception that made the job retry
''')
job_failed = SafeNamedSignal('job_failed', doc='''\
Sent by a worker when a job failed.
A failed job will not be retried.
Signal handlers receive:
- `namespace` Spinach namespace
- `job` :class:`Job` being executed
- `err` exception that made the job fail
''')
worker_started = SafeNamedSignal('worker_started', doc='''\
Sent by a worker when it starts.
Signal handlers receive:
- `namespace` Spinach namespace
- `worker_name` name of the worker starting
''')
worker_terminated = SafeNamedSignal('worker_terminated', doc='''\
Sent by a worker when it shutdowns.
Signal handlers receive:
- `namespace` Spinach namespace
- `worker_name` name of the worker shutting down
''')
```
#### File: spinach/tests/conftest.py
```python
from datetime import datetime, timezone
import pytest
_now = datetime(2017, 9, 2, 8, 50, 56, 482169, timezone.utc)
@pytest.fixture
def patch_now(monkeypatch):
"""Patch datetime.datetime.
It is not possible to patch it like a normal Python object, so the
reference is replaced completely by a custom class.
The test function can get and set the fake time with get_now() and
set_now().
"""
global _now
# Reset the time before each test
_now = datetime(2017, 9, 2, 8, 50, 56, 482169, timezone.utc)
class MyDatetime:
@classmethod
def now(cls, tz=None):
# All code within Spinach shall create TZ aware datetime
assert tz == timezone.utc
return _now
@classmethod
def fromtimestamp(cls, *args, **kwargs):
return datetime.fromtimestamp(*args, **kwargs)
monkeypatch.setattr('spinach.brokers.base.datetime', MyDatetime)
monkeypatch.setattr('spinach.brokers.redis.datetime', MyDatetime)
monkeypatch.setattr('spinach.job.datetime', MyDatetime)
monkeypatch.setattr('spinach.engine.datetime', MyDatetime)
monkeypatch.setattr('spinach.task.datetime', MyDatetime)
def get_now() -> datetime:
return _now
def set_now(now: datetime):
global _now
if now.tzinfo is None:
# Make it a TZ aware datetime here for convenience to avoid over
# verbose tests
now = now.replace(tzinfo=timezone.utc)
_now = now
```
#### File: tests/contrib/test_django.py
```python
import django
import django.conf
from django.core.mail import send_mail
from django.core.management import call_command
from spinach import MemoryBroker
# capsys fixture allows to capture stdout
def test_django_app(capsys):
django.conf.settings.configure(
LOGGING_CONFIG=None,
INSTALLED_APPS=('spinach.contrib.spinachd',),
EMAIL_BACKEND='spinach.contrib.spinachd.mail.BackgroundEmailBackend',
SPINACH_BROKER=MemoryBroker(),
SPINACH_ACTUAL_EMAIL_BACKEND='django.core.mail.backends.'
'console.EmailBackend'
)
django.setup()
from spinach.contrib.spinachd import spin
spin.schedule('spinachd:clear_expired_sessions')
send_mail('Subject', 'Hello from email', '<EMAIL>',
['<EMAIL>'])
call_command('spinach', '--stop-when-queue-empty')
captured = capsys.readouterr()
assert 'Hello from email' in captured.out
``` |
{
"source": "0xdecaff/starknet.py",
"score": 2
} |
#### File: starknet_py/net/client.py
```python
import asyncio
from typing import Optional, List, Dict, Union
from services.external_api.base_client import RetryConfig, BadRequest as BadRequestError
from starkware.starknet.definitions.fields import ContractAddressSalt
from starkware.starknet.services.api.contract_definition import ContractDefinition
from starkware.starknet.services.api.feeder_gateway.feeder_gateway_client import (
FeederGatewayClient,
CastableToHash,
JsonObject,
)
from starkware.starknet.services.api.gateway.gateway_client import GatewayClient
from starkware.starkware_utils.error_handling import StarkErrorCode
from starknet_py.constants import TxStatus, ACCEPTED_STATUSES
from starknet_py.utils.sync import add_sync_methods
from starknet_py.net.models import (
InvokeFunction,
Transaction,
Deploy,
StarknetChainId,
chain_from_network,
)
from starknet_py.net.networks import Network, net_address_from_net
BadRequest = BadRequestError
@add_sync_methods
class Client:
def __init__(
self, net: Network, chain: StarknetChainId = None, n_retries: Optional[int] = 1
):
"""
:param net: Target network for the client. Can be a string with URL or one of ``"mainnet"``, ``"testnet"``
:param chain: Chain used by the network. Required if you use a custom URL for ``net`` param.
:param n_retries: Number of retries client will attempt before failing a request
"""
host = net_address_from_net(net)
retry_config = RetryConfig(n_retries)
feeder_gateway_url = f"{host}/feeder_gateway"
self.chain = chain_from_network(net, chain)
self._feeder_gateway = FeederGatewayClient(
url=feeder_gateway_url, retry_config=retry_config
)
gateway_url = f"{host}/gateway"
self._gateway = GatewayClient(url=gateway_url, retry_config=retry_config)
# View methods
async def get_contract_addresses(self) -> Dict[str, str]:
"""
:return: Dictionary containing all of the contract's addresses
"""
return await self._feeder_gateway.get_contract_addresses()
async def call_contract(
self,
invoke_tx: InvokeFunction,
block_hash: Optional[CastableToHash] = None,
block_number: Optional[int] = None,
) -> List[int]:
"""
Calls the contract with given instance of InvokeTransaction
:param invoke_tx: The invoke transaction
:param block_hash: Block hash to execute the contract at specific point of time
:param block_number: Block number to execute the contract at
:return: List of integers representing contract's function output (structured like calldata)
"""
response = await self._feeder_gateway.call_contract(
invoke_tx,
block_hash,
block_number,
)
return [int(v, 16) for v in response["result"]]
async def get_block(
self,
block_hash: Optional[CastableToHash] = None,
block_number: Optional[int] = None,
) -> JsonObject:
"""
Retrieve the block's data by its number or hash
:param block_hash: Block's hash
:param block_number: Block's number
:return: Dictionary with block's transactions
"""
return await self._feeder_gateway.get_block(block_hash, block_number)
async def get_code(
self,
contract_address: int,
block_hash: Optional[CastableToHash] = None,
block_number: Optional[int] = None,
) -> dict:
"""
Retrieve contract's bytecode and abi.
:raises BadRequest: when contract is not found
:param contract_address: Address of the contract on Starknet
:param block_hash: Get code at specific block hash
:param block_number: Get code at given block number
:return: JSON representation of compiled: {"bytecode": list, "abi": dict}
"""
code = await self._feeder_gateway.get_code(
contract_address,
block_hash,
block_number,
)
if len(code["bytecode"]) == 0:
raise BadRequest(
200, f"Contract with address {contract_address} was not found."
)
return code
async def get_storage_at(
self,
contract_address: int,
key: int,
block_hash: Optional[CastableToHash] = None,
block_number: Optional[int] = None,
) -> str:
"""
:param contract_address: Contract's address on Starknet
:param key: An address of the storage variable inside of the contract.
Can be retrieved using ``starkware.starknet.public.abi.get_storage_var_address(<name>)``
:param block_hash: Fetches the value of the variable at given block hash
:param block_number: See above, uses block number instead of hash
:return: Storage value of given contract
"""
return await self._feeder_gateway.get_storage_at(
contract_address,
key,
block_hash,
block_number,
)
async def get_transaction_status(
self, tx_hash: Optional[CastableToHash], tx_id: Optional[int] = None
) -> JsonObject:
"""
:param tx_hash: Transaction's hash
:param tx_id: Transaction's index
:return: Dictionary containing tx's status
"""
return await self._feeder_gateway.get_transaction_status(
tx_hash,
tx_id,
)
async def get_transaction(
self, tx_hash: Optional[CastableToHash], tx_id: Optional[int] = None
) -> JsonObject:
"""
:param tx_hash: Transaction's hash
:param tx_id: Transaction's index
:return: Dictionary representing JSON of the transaction on Starknet
"""
return await self._feeder_gateway.get_transaction(
tx_hash,
tx_id,
)
async def get_transaction_receipt(
self, tx_hash: Optional[CastableToHash], tx_id: Optional[int] = None
) -> JsonObject:
"""
:param tx_hash: Transaction's hash
:param tx_id: Transaction's index
:return: Dictionary representing JSON of the transaction's receipt on Starknet
"""
return await self._feeder_gateway.get_transaction_receipt(
tx_hash,
tx_id,
)
async def wait_for_tx(
self,
tx_hash: Optional[CastableToHash],
wait_for_accept: Optional[bool] = False,
check_interval=5,
) -> (int, TxStatus):
"""
Awaits for transaction to get accepted or at least pending by polling its status
:param tx_hash: Transaction's hash
:param wait_for_accept: If true waits for ACCEPTED_ONCHAIN status, otherwise waits for at least PENDING
:param check_interval: Defines interval between checks
:return: tuple(block number, ``starknet.constants.TxStatus``)
"""
if check_interval <= 0:
raise ValueError("check_interval has to bigger than 0.")
first_run = True
while True:
result = await self.get_transaction(tx_hash=tx_hash)
status = TxStatus[result["status"]]
if status in ACCEPTED_STATUSES:
return result["block_number"], status
if status == TxStatus.PENDING:
if not wait_for_accept and "block_number" in result:
return result["block_number"], status
elif status == TxStatus.REJECTED:
raise Exception(f"Transaction [{tx_hash}] was rejected.")
elif status == TxStatus.NOT_RECEIVED:
if not first_run:
raise Exception(f"Transaction [{tx_hash}] was not received.")
elif status != TxStatus.RECEIVED:
raise Exception(f"Unknown status [{status}]")
first_run = False
await asyncio.sleep(check_interval)
# Mutating methods
async def add_transaction(
self, tx: Transaction, token: Optional[str] = None
) -> Dict[str, int]:
"""
:param tx: Transaction object (i.e. InvokeFunction, Deploy).
A subclass of ``starkware.starknet.services.api.gateway.transaction.Transaction``
:param token: Optional token for Starknet API access, appended in a query string
:return: Dictionary with `code`, `transaction_hash`
"""
return await self._gateway.add_transaction(tx, token)
async def deploy(
self,
compiled_contract: Union[ContractDefinition, str],
constructor_calldata: List[int],
salt: Optional[int] = None,
) -> dict:
if isinstance(compiled_contract, str):
compiled_contract = ContractDefinition.loads(compiled_contract)
res = await self.add_transaction(
tx=Deploy(
contract_address_salt=ContractAddressSalt.get_random_value()
if salt is None
else salt,
contract_definition=compiled_contract,
constructor_calldata=constructor_calldata,
)
)
if res["code"] != StarkErrorCode.TRANSACTION_RECEIVED.name:
raise Exception("Transaction not received")
return res
```
#### File: e2e/account/account_client_test.py
```python
import os.path
from pathlib import Path
import pytest
from starknet_py.contract import Contract
from starknet_py.net import AccountClient
from starknet_py.net.models import StarknetChainId
directory = os.path.dirname(__file__)
map_source_code = Path(directory, "map.cairo").read_text("utf-8")
@pytest.mark.asyncio
async def test_deploy_account_contract_and_sign_tx():
acc_client = await AccountClient.create_account(
net="http://localhost:5000/", chain=StarknetChainId.TESTNET
)
map_contract = await Contract.deploy(
client=acc_client, compilation_source=map_source_code
)
k, v = 13, 4324
await map_contract.functions["put"].invoke(k, v)
(resp,) = await map_contract.functions["get"].call(k)
assert resp == v
``` |
{
"source": "0xDECEA5ED/solitaire-player",
"score": 4
} |
#### File: pysolvers/solvers/pyramid.py
```python
import collections
import solvers.deck
def card_value(card):
"""Return the card's numeric value according to Pyramid Solitaire rules.
Aces are always 1, Jacks are 11, Queens are 12, and Kings are 13."""
return 1 + "A23456789TJQK".index(solvers.deck.card_rank(card))
def cards_are_removable(card1, card2=None):
"""Return true if the card or cards can be removed together.
Kings can be removed by themselves, and pairs of cards that add to 13."""
values = [card_value(c) if c else 0 for c in [card1, card2]]
return sum(values) == 13
class State:
"""A state in Pyramid Solitaire, represented by a 60-bit integer value.
This class only has static methods, meant to be called on integer values.
The reason is to save as much memory as possible (we'll be creating tens
of millions of these).
It's tempting to represent the state as lists of cards in the tableau,
stock, and waste piles, but it's too slow and memory usage is too high.
The trick to this state representation is that it holds data that refers
to the deck of cards, without containing a reference to the deck. So we
need the deck of cards to understand the state of the game.
Bits 0-51: "deck_flags" - 52 bits representing whether or not each card
in the deck remains in the game.
Bits 52-57: "stock_index" - 6 bits containing a number from 28 to 52,
an index into the deck for the card at the top of the stock
pile. Cards with index higher than this are the remainder of
the stock pile. Cards with index below this (and above 27) are
the cards in the waste pile. Hint for understanding how it
works: incrementing this stock index moves the top card of the
stock pile to the top of the waste pile.
Bits 58-59: 2 bits to indicate how many times the waste pile has been
recycled."""
EMPTY_STOCK = 52
EMPTY_WASTE = 27
INITIAL_STATE = (28 << 52) | ((2**52) - 1)
# bits set on the Nth tableau card and the cards covering it from below
UNCOVERED_MASKS = [
0b1111111111111111111111111111,
0b0111111011111011110111011010,
0b1111110111110111101110110100,
0b0011111001111001110011001000,
0b0111110011110011100110010000,
0b1111100111100111001100100000,
0b0001111000111000110001000000,
0b0011110001110001100010000000,
0b0111100011100011000100000000,
0b1111000111000110001000000000,
0b0000111000011000010000000000,
0b0001110000110000100000000000,
0b0011100001100001000000000000,
0b0111000011000010000000000000,
0b1110000110000100000000000000,
0b0000011000001000000000000000,
0b0000110000010000000000000000,
0b0001100000100000000000000000,
0b0011000001000000000000000000,
0b0110000010000000000000000000,
0b1100000100000000000000000000,
0b0000001000000000000000000000,
0b0000010000000000000000000000,
0b0000100000000000000000000000,
0b0001000000000000000000000000,
0b0010000000000000000000000000,
0b0100000000000000000000000000,
0b1000000000000000000000000000,
]
@staticmethod
def deck_flags(state):
"""Return the state's deck flags."""
return state & 0xFFFFFFFFFFFFF
@staticmethod
def is_tableau_empty(state):
return (state & 0xFFFFFFF) == 0
@staticmethod
def stock_index(state):
"""Return the state's stock index, the top card of the stock pile.
If the stock index is 52, it means the stock pile is empty."""
return (state >> 52) & 0b111111
@staticmethod
def cycle(state):
"""Return the state's cycle, the times the waste pile was recycled."""
return (state >> 58) & 0b11
@staticmethod
def waste_index(state):
"""Return the state's waste index, the top card of the waste pile.
If the waste index is 27, it means the waste pile is empty."""
index = State.stock_index(state) - 1
mask = 1 << index
while index > State.EMPTY_WASTE:
if (state & mask) != 0:
break
mask >>= 1
index -= 1
return index
@staticmethod
def _adjust_stock_index(state):
"""Return the state with its stock index adjusted correctly.
Basically the stock index must point to a card that remains in the
game or else be 52 to indicate the stock pile is empty. This makes sure
every state has a single unique representation - you can't have two
states that are effectively the same but have different stock indexes
because one points to the actual top card and the other points to
some card that no longer remains in the game."""
index = State.stock_index(state)
state = state & 0xC0FFFFFFFFFFFFF # remove the stock index
mask = 1 << index
while index < State.EMPTY_STOCK:
if (state & mask) != 0:
break
mask <<= 1
index += 1
return state | (index << 52)
@staticmethod
def _uncovered_indexes(deck_flags):
"""Return deck indexes of uncovered tableau cards."""
flags = deck_flags & 0xFFFFFFF
def is_uncovered(index):
return (1 << index) == (flags & State.UNCOVERED_MASKS[index])
return [i for i in range(28) if is_uncovered(i)]
@staticmethod
def successors(state, deck):
"""Return a list of successor states to this state.
Actions that can be performed (if applicable):
1. Recycle the waste pile.
2. Draw a card from the stock pile to the waste pile.
3. Remove a King from the tableau.
4. Remove a King from the stock pile.
5. Remove a King from the waste pile.
6. Remove a pair of cards from the tableau.
7. Remove a pair of cards, one each from the tableau and stock pile.
8. Remove a pair of cards, one each from the tableau and waste pile.
9. Remove a pair of cards, one each from the stock and waste piles."""
def remove(deck_flags, *indexes):
"""Remove the cards at the indexes from the deck_flags value."""
for index in indexes:
deck_flags ^= (1 << index)
return deck_flags
results = []
deck_flags = State.deck_flags(state)
uncovered = State._uncovered_indexes(deck_flags)
stock_index = State.stock_index(state)
waste_index = State.waste_index(state)
cycle = State.cycle(state)
def create(deck_flags=deck_flags, stock_index=stock_index, cycle=cycle):
"""Create a new state given the individual parts of the state."""
new_state = (cycle << 58) | (stock_index << 52) | deck_flags
return State._adjust_stock_index(new_state)
is_stock_empty = stock_index == State.EMPTY_STOCK
is_waste_empty = waste_index == State.EMPTY_WASTE
stock_card = deck[stock_index] if not is_stock_empty else None
waste_card = deck[waste_index] if not is_waste_empty else None
has_both = stock_card and waste_card
if not stock_card and cycle < 2:
# 1. recycle the waste pile
results.append(create(stock_index=28, cycle=cycle+1))
if stock_card:
# 2. draw a card from stock to waste
results.append(create(stock_index=stock_index+1))
if stock_card and cards_are_removable(stock_card):
# 4. remove a King from the stock pile
results.append(create(deck_flags=remove(deck_flags, stock_index)))
if waste_card and cards_are_removable(waste_card):
# 5. remove a King from the waste pile
results.append(create(remove(deck_flags, waste_index)))
if has_both and cards_are_removable(stock_card, waste_card):
# 9. remove the cards on the stock and waste piles
results.append(create(remove(deck_flags, stock_index, waste_index)))
for i in uncovered:
if cards_are_removable(deck[i]):
# 3. remove a King from the tableau
results.append(create(remove(deck_flags, i)))
else:
if stock_card and cards_are_removable(deck[i], stock_card):
# 7. remove the cards from the tableau/stock pile
results.append(create(remove(deck_flags, i, stock_index)))
if waste_card and cards_are_removable(deck[i], waste_card):
# 8. remove the cards from the tableau/waste pile
results.append(create(remove(deck_flags, i, waste_index)))
for j in uncovered:
if cards_are_removable(deck[i], deck[j]):
# 6. remove two cards from the tableau
results.append(create(remove(deck_flags, i, j)))
return results
def path(state, seen_states, deck):
"""Return the actions to take to get to this state from the start."""
def is_bit_set(bits, n):
"""Return true if the nth bit of bits is equal to 1."""
return (bits & (1 << n)) != 0
def action(state, next_state):
"""Return the action taken to go from state to next_state."""
diffs = state ^ next_state # XOR to see which bits changed
deck_diff = State.deck_flags(diffs)
cycle_diff = State.cycle(diffs)
if cycle_diff:
return 'Recycle'
elif deck_diff:
cards = [deck[i] for i in range(52) if is_bit_set(deck_diff, i)]
return f"Remove {' and '.join(cards)}"
else:
return 'Draw'
actions = []
while state in seen_states:
prev_state = seen_states[state]
actions.append(action(prev_state, state))
state = prev_state
return list(reversed(actions))
def solve(deck):
"""Return a solution to removing all tableau cards in Pyramid Solitaire."""
fringe = collections.deque()
seen_states = dict()
fringe.append(State.INITIAL_STATE)
while fringe:
state = fringe.popleft()
if State.is_tableau_empty(state):
return path(state, seen_states, deck)
for next_state in State.successors(state, deck):
if next_state not in seen_states:
seen_states[next_state] = state
fringe.append(next_state)
return []
```
#### File: pysolvers/tests/test_deck.py
```python
import unittest
import solvers.deck
ALL_CARDS = (
'Ac', '2c', '3c', '4c', '5c', '6c', '7c', '8c', '9c', 'Tc', 'Jc', 'Qc', 'Kc',
'Ad', '2d', '3d', '4d', '5d', '6d', '7d', '8d', '9d', 'Td', 'Jd', 'Qd', 'Kd',
'Ah', '2h', '3h', '4h', '5h', '6h', '7h', '8h', '9h', 'Th', 'Jh', 'Qh', 'Kh',
'As', '2s', '3s', '4s', '5s', '6s', '7s', '8s', '9s', 'Ts', 'Js', 'Qs', 'Ks'
)
class TestCardAndDeck(unittest.TestCase):
def test_card_ranks(self):
for card in ALL_CARDS:
self.assertEqual(card[0], solvers.deck.card_rank(card))
def test_missing_cards(self):
self.assertEqual(0, len(solvers.deck.missing_cards(ALL_CARDS)))
self.assertEqual(['Ac'], solvers.deck.missing_cards(ALL_CARDS[1:]))
def test_duplicate_cards(self):
self.assertEqual(0, len(solvers.deck.duplicate_cards(ALL_CARDS)))
self.assertEqual(['Ac', 'Ac'], solvers.deck.duplicate_cards(ALL_CARDS + ('Ac',)))
self.assertEqual(['Ac', 'Ac', 'Ac'], solvers.deck.duplicate_cards(ALL_CARDS + ('Ac', 'Ac')))
def test_malformed_cards(self):
self.assertEqual(0, len(solvers.deck.malformed_cards(ALL_CARDS)))
input = ['7S', 'ks', 'KS', 'kS', None, '', 0, 'Ks']
expected = ['7S', 'ks', 'KS', 'kS', None, '', 0]
self.assertEqual(expected, solvers.deck.malformed_cards(input))
def test_is_standard_deck(self):
self.assertTrue(solvers.deck.is_standard_deck(ALL_CARDS))
self.assertFalse(solvers.deck.is_standard_deck(ALL_CARDS[1:]))
self.assertFalse(solvers.deck.is_standard_deck([]))
``` |
{
"source": "0xDeviI/FatEagle",
"score": 2
} |
#### File: 0xDeviI/FatEagle/commandParser.py
```python
from fescripts.libs.PFable import fable
class FE_COMMAND_PARSE:
def isLoading(self,comm):
d = comm.split()
if (len(d) == 2 and d[0].casefold() == "load".casefold()):
if d[1][0:10] == "fescripts/":
return {"Type":"FESCRIPT","Name":d[1]}
else:
return False
return False
def IsSet(self,comm):
if (comm[0:4].casefold() == "set ".casefold()):
return True
else:
return False
def IsShow(self,comm):
d = comm.split()
if (len(d) == 2 and d[0].casefold() == "show".casefold()):
return d[1]
return False
def IsSearch(self,comm):
if (len(comm.split()) == 2 and comm.split()[0].casefold() == "search".casefold()):
return comm.split()[1]
else:
return False
def IsAdd(self,comm):
arged = comm.split()
if (arged[0].casefold() == "add".casefold()):
return arged
else:
return False
def IsShowList(self,comm):
if (comm[0:10].casefold() == "show list ".casefold()):
return comm[10:len(comm)]
else:
return False
def IsMultiFesSet(self,comm):# set IP 127.0.0.1 Osint/WhoIsWho
arged = comm.split()
if (arged[0].casefold() == "mset".casefold()):
return arged
else:
return False
def isFuncCall(self,comm):
splited = comm.split()
if (splited[0].casefold() == "call".casefold() and len(splited) >= 2):
_funName = splited[1]
_args = []
for i in range(2,len(splited)):
_args.append(splited[i])
return [_funName,_args]
else:
return False
def isExec(self,comm):
splited = comm.split()
if (splited[0].casefold() == "exec".casefold() and len(splited) >= 2):
_shell = ""
for i in range(1,len(splited)):
_shell += splited[i] + " "
_shell = _shell[0:len(_shell) - 1]
return _shell
else:
return False
def isFunSearch(self,comm):
if (len(comm.split()) == 2 and comm.split()[0].casefold() == "funsearch".casefold()):
return comm.split()[1]
else:
return False
```
#### File: 0xDeviI/FatEagle/fEagle.py
```python
import os
import sys
import signal
import header
import commandParser as _command_parser
import fescriptManager
import socket as s
import json
import shutil
import time
from colorama import init
init()
from colorama import Fore,Back
from processManager import _process_mode,FE_PROCESS_MANAGER
from functions.functions import FUNCTIONS
from fescripts.libs.PFable import fable_mode,fable
from colorManager import *
from feConfig import *
_HEADER = header.FE_HEADER()
_COLOR_M = FE_COLOR()
_FUNCTIONS = FUNCTIONS()
_cp = _command_parser.FE_COMMAND_PARSE()
_fsc = fescriptManager.FE_SCRIPT_MANAGER()
_proc = FE_PROCESS_MANAGER()
_in_app = True
__FE_MULTI_FESCRIPT__ = []
def goodBye():
print(Fore.LIGHTBLUE_EX + "\nGoodbye!" + Fore.RESET)
sys.exit(0)
def signal_handler(sig, frame):
goodBye()
signal.signal(signal.SIGINT, signal_handler)
def clearConsole():
os.system('cls' if os.name == 'nt' else 'clear')
def main():
_HEADER.printHeader()
while (_in_app):
try:
command = input(Fore.RESET + "FatEagle ~> " if _fsc.loaded_script == "" else "FatEagle " + Fore.MAGENTA + "fescript" + Fore.RESET + "(" + Fore.YELLOW + _fsc.loaded_script + Fore.RESET + ") ~> ")
except:
goodBye()
if (command != ""):
if (command.casefold() == "myIP".casefold()):
print("Your IP adrress is :",Fore.LIGHTGREEN_EX + s.gethostbyname(s.gethostname()) + Fore.RESET)
elif (command.casefold() == "myHost".casefold()):
print("Your host name is :",Fore.LIGHTGREEN_EX + s.gethostname() + Fore.RESET)
elif (os.name == "nt" and command == "cls"):
clearConsole()
elif (os.name != "nt" and command == "clear"):
clearConsole()
elif (command.casefold() == "banner".casefold()):
_HEADER.printHeader()
elif (_cp.isLoading(command) != False):
f = _cp.isLoading(command)
if (f["Type"] == "FESCRIPT"):
_fsc.loadScript(f["Name"])
elif (command.casefold() == "unload module"):
if (_fsc.loaded_script != ""): _fsc.unloadScript()
elif (command.casefold() == "fesInfo".casefold()):
if (_fsc.loaded_script != ""):
try:
_main_module = locals()[_fsc.loaded_script] = __import__(_fsc.loaded_script.replace("/","."),fromlist=['object'])
eval("_main_module." + pathilize(_fsc.loaded_script) + "().info()")
except:
print(Fore.LIGHTRED_EX + "fescript " + Fore.LIGHTBLUE_EX + pathilize(_fsc.loaded_script) + Fore.LIGHTRED_EX + " does not exist or may it has some errors" + Fore.RESET)
else:
print(Fore.LIGHTRED_EX + "no module loaded." + Fore.RESET)
elif (command.casefold() == "fesHelp".casefold()):
if (_fsc.loaded_script != ""):
try:
_main_module = locals()[_fsc.loaded_script] = __import__(_fsc.loaded_script.replace("/","."),fromlist=['object'])
eval("_main_module." + pathilize(_fsc.loaded_script) + "().help()")
except:
print(Fore.LIGHTRED_EX + "fescript " + Fore.LIGHTBLUE_EX + pathilize(_fsc.loaded_script) + Fore.LIGHTRED_EX + " does not exist or may it has some errors" + Fore.RESET)
else:
print(Fore.LIGHTRED_EX + "no module loaded." + Fore.RESET)
elif (command.casefold() == "fesOptions".casefold()):
if (_fsc.loaded_script != ""):
try:
_main_module = locals()[_fsc.loaded_script] = __import__(_fsc.loaded_script.replace("/","."),fromlist=['object'])
eval("_main_module." + pathilize(_fsc.loaded_script) + "().switchInfo()")
except:
print(Fore.LIGHTRED_EX + "fescript " + Fore.LIGHTBLUE_EX + pathilize(_fsc.loaded_script) + Fore.LIGHTRED_EX + " does not exist or may it has some errors" + Fore.RESET)
else:
print(Fore.LIGHTRED_EX + "no module loaded." + Fore.RESET)
elif (command.casefold() == "fesRequire".casefold()):
if (_fsc.loaded_script != ""):
try:
_main_module = locals()[_fsc.loaded_script] = __import__(_fsc.loaded_script.replace("/","."),fromlist=['object'])
eval("_main_module." + pathilize(_fsc.loaded_script) + "().missedSwitch()")
except:
print(Fore.LIGHTRED_EX + "fescript " + Fore.LIGHTBLUE_EX + pathilize(_fsc.loaded_script) + Fore.LIGHTRED_EX + " does not exist or may it has some errors" + Fore.RESET)
else:
print(Fore.LIGHTRED_EX + "no module loaded." + Fore.RESET)
elif (_cp.IsSet(command) != False):
setValue(command)
elif (_cp.IsShow(command) != False):
showValue(_cp.IsShow(command))
elif (command.casefold() == "fesStart".casefold()):
try:
_main_module = locals()[_fsc.loaded_script] = __import__(_fsc.loaded_script.replace("/","."),fromlist=['object'])
_proc.setPM(_process_mode.IN_SCRIPT)
eval("_main_module." + pathilize(_fsc.loaded_script) + "()._pre_start()")
_proc.setPM(_process_mode.FREE)
except:
print(Fore.LIGHTRED_EX + "fescript " + Fore.LIGHTBLUE_EX + pathilize(_fsc.loaded_script) + Fore.LIGHTRED_EX + " does not exist or may it has some errors" + Fore.RESET)
elif (_cp.IsSearch(command) != False):
_search = _cp.IsSearch(command)
if (_search == "*"): _search = "s"
_file = open("ModulesDB.json","r",encoding="utf8")
_modules = json.load(_file)
cols = ["name","description"]
data = []
for _ in _modules:
if (_search.casefold() in _["MODULE_FNAME"].casefold() or _search.casefold() in _["MODULE_INFO"].casefold()
or _search.casefold() in _["MODULE_DESC"].casefold()
or _search.casefold() in _["MODULE_AUTHOR"].casefold()):
data.append([_["MODULE_FNAME"],_["MODULE_INFO"]])
if (len(data) == 0):
print(Fore.LIGHTRED_EX + "can't find any module named '" + Fore.CYAN + _search + Fore.LIGHTRED_EX + "'.\nif you added new fescript, you may need to update modules database using:" + Fore.YELLOW + "\n update db" + Fore.RESET)
else:
_fable = fable(cols,data,fable_mode.BOLD_COLUMNS)
print(_fable.popData())
elif (command.casefold() == "update mdb".casefold()):
updateModulesDB(True)
elif (command.casefold() == "update fundb".casefold()):
_FUNCTIONS.updateFunctionDB(True)
elif (_cp.isFunSearch(command) != False):
_FUNCTIONS.searchFunc(_cp.isFunSearch(command))
elif (command.casefold() == "exit".casefold()):
print(Fore.LIGHTBLUE_EX + "\nGoodbye!" + Fore.RESET)
sys.exit(0)
elif (_cp.IsAdd(command) != False):
arged = _cp.IsAdd(command)
__fes_dir = getListOfFiles("fescripts/")
for i in range(0,len(__fes_dir)):
__fes_dir[i] = __fes_dir[i].replace("fescripts/","")
_fescript = arged[1]
_list_name = arged[2]
__all_vars = globals().keys()
found = False
for i in __all_vars:
if (_list_name == i):
found = True
break
if (found):
found = False
for i in __fes_dir:
if (_fescript == i):
found = True
break
if (found):
if (_fescript not in eval(_list_name)):
eval(_list_name + ".append(\"" + _fescript + "\")")
else:
print(Fore.LIGHTRED_EX + "fescript '" + Fore.LIGHTBLUE_EX + _fescript + Fore.LIGHTRED_EX + "' is Undefined!" + Fore.RESET)
else:
print(Fore.LIGHTRED_EX + "list '" + Fore.LIGHTBLUE_EX + _list_name + Fore.LIGHTRED_EX + "' is Undefined!" + Fore.RESET)
del __all_vars
del __fes_dir
elif (_cp.IsShowList(command) != False):
_list_name = _cp.IsShowList(command)
__all_vars = globals().keys()
found = False
for i in __all_vars:
if (_list_name == i):
found = True
break
if (found):
eval("print(" + str(_list_name) + ")")
else:
print(Fore.LIGHTRED_EX + "list '" + Fore.LIGHTBLUE_EX + _list_name + Fore.LIGHTRED_EX + "' is Undefined!" + Fore.RESET)
del __all_vars
elif (command.casefold() == "deltemp".casefold()):
clearModuleTemp(True)
elif (command.casefold() == "info __FE_MULTI_FESCRIPT__".casefold()):
for i in __FE_MULTI_FESCRIPT__:
print(i + " : ")
_main_module = locals()[pathilize(i)] = __import__("fescripts." + i.replace("/","."),fromlist=['object'])
_ = eval("_main_module." + pathilize(i) + "().switchInfo()")
del _main_module
elif (command.casefold() == "start __FE_MULTI_FESCRIPT__".casefold()):
for i in __FE_MULTI_FESCRIPT__:
print(i + " : ")
_main_module = locals()[pathilize(i)] = __import__("fescripts." + i.replace("/","."),fromlist=['object'])
_proc.setPM(_process_mode.IN_SCRIPT)
_ = eval("_main_module." + pathilize(i) + "()._pre_start()")
del _main_module
_proc.setPM(_process_mode.FREE)
elif (_cp.IsMultiFesSet(command) != False):
arged = _cp.IsMultiFesSet(command)
_s_name = arged[1]
_s_val = arged[2]
_fes_name = arged[3]
_fsc.loadScript(_fes_name)
setValue("set " + _s_name + " " + _s_val)
elif (command.casefold() == "version".casefold()):
print(_COLOR_M.colorful_str("Fat Eagle V" + VERSION_))
elif (_cp.isFuncCall(command) != False):
funcCall = _cp.isFuncCall(command)
_FUNCTIONS.execute(funcCall[0],funcCall[1])
elif (_cp.isExec(command) != False):
_shell = _cp.isExec(command)
os.system(_shell)
elif (command.casefold() == "fwi".casefold()):
print(Fore.YELLOW + "Fat Eagle is a hacking and cybersecurity framework written in python by " + DEVELOPER + """
you can easily run it everywhere like windows,linux,mac,android and everywehere python can run. with this framework you can access to top security tools like exploits,payloads,hash crackers,phishing tools and e.t.c.""" + Fore.RESET)
else:
print(Fore.LIGHTCYAN_EX + command + Fore.LIGHTRED_EX + " is not a valid command." + Fore.RESET)
def showValue(switch):
if (_fsc.loaded_script != ""):
found = False
_main_module = locals()[_fsc.loaded_script] = __import__(_fsc.loaded_script.replace("/","."),fromlist=['object'])
props = eval("_main_module." + pathilize(_fsc.loaded_script) + "().allSwitches()")
del _main_module
for i in props:
if (switch in i):
_main_module = locals()[_fsc.loaded_script] = __import__(_fsc.loaded_script.replace("/","."),fromlist=['object'])
eval("_main_module." + pathilize(_fsc.loaded_script) + "().showSwitch(\"" + switch + "\")")
found = True
del _main_module
break
if (found == False):
print(Fore.LIGHTRED_EX + "switch " + Fore.LIGHTBLUE_EX + switch + Fore.LIGHTRED_EX + " does not exist!" + Fore.RESET)
else:
print(Fore.LIGHTRED_EX + "no module loaded." + Fore.RESET)
def setValue(command):
if (_fsc.loaded_script != ""):
other = command[4:len(command)].split()
found = False
_main_module = locals()[_fsc.loaded_script] = __import__(_fsc.loaded_script.replace("/","."),fromlist=['object'])
props = eval("_main_module." + pathilize(_fsc.loaded_script) + "().allSwitches()")
del _main_module
for i in props:
if (other[0] == i):
_main_module = locals()[_fsc.loaded_script] = __import__(_fsc.loaded_script.replace("/","."),fromlist=['object'])
oper = eval("_main_module." + pathilize(_fsc.loaded_script) + "().setSwitch(\"" + other[0] + "\",\"" + other[1] + "\")")
if (oper == None):
print(Fore.LIGHTBLUE_EX + other[0] + Fore.RESET + " ---> " + Fore.LIGHTGREEN_EX + other[1] + Fore.RESET)
else:
if (oper[0] == False):
print(Fore.RED + oper[1] + Fore.RESET)
found = True
del _main_module
break
if (found == False):
print(Fore.LIGHTRED_EX + "switch " + Fore.LIGHTBLUE_EX + other[0] + Fore.LIGHTRED_EX + " does not exist!" + Fore.RESET)
else:
print(Fore.LIGHTRED_EX + "no module loaded." + Fore.RESET)
def pathilize(_str):
last = ""
for i in _str[::-1]:
if (i == '/'):
break
else:
last += i
return last[::-1]
def getListOfFiles(dirName):
listOfFile = os.listdir(dirName)
allFiles = list()
for entry in listOfFile:
fullPath = os.path.join(dirName, entry)
if os.path.isdir(fullPath):
allFiles = allFiles + getListOfFiles(fullPath)
else:
path = fullPath.replace("\\","/")
if (path[len(path) - 3:len(path)] == ".py" and "libs" not in path and "temp" not in path):
allFiles.append(path[0:len(path) - 3])
return allFiles
def updateModulesDB(userRequest = False):
FeScripts = getListOfFiles("fescripts/")
data = []
for _ in FeScripts:
if ('s' in _):
_main_module = locals()[_] = __import__(_.replace("/","."),fromlist=['object'])
info = eval("_main_module." + pathilize(_) + "()._info()")
desc = eval("_main_module." + pathilize(_) + "()._help()")
author = eval("_main_module." + pathilize(_) + "()._author()")
data.append({"MODULE_FNAME":_,"MODULE_INFO":info.replace("\n", ""),"MODULE_DESC":desc,"MODULE_AUTHOR":author})
del _main_module
_file = open("ModulesDB.json","w",encoding="utf8")
json.dump(data,_file,indent=4, sort_keys=True)
_file.close()
if (userRequest): print(Fore.GREEN + "Modules Database Updated!" + Fore.RESET)
def clearModuleTemp(userRequest = False):
folder = 'fescripts/temp/'
for filename in os.listdir(folder):
file_path = os.path.join(folder, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print(Fore.RED + ('Failed to delete %s. Reason: %s' % (file_path, e)) + Fore.RESET)
if (userRequest): print(Fore.GREEN + "Modules Temp Cleared!" + Fore.RESET)
if (__name__ == "__main__"):
msg = 'Loading Fat Eagle ...'
clearConsole()
sys.stdout.write(msg)
sys.stdout.flush()
time.sleep(2)
for _ in range(len(msg)):
time.sleep(0.1)
sys.stdout.write('\033[D \033[D')
sys.stdout.flush()
if (MODULE_DB_UPDATE_ON_START): updateModulesDB()
if (CLEAR_MODULE_TEMPS_ON_START): clearModuleTemp()
_FUNCTIONS.updateFunctionDB()
main()
```
#### File: Cryptography/Hash/Hash6.py
```python
from fescripts.libs.blake2s import Blake2
from fescripts.libs.blake import Blake
from fescripts.libs.md2 import MD2
import hashlib,binascii
import os
import sys
import fescripts.libs.fescripts
from fescripts.libs.PFable import fable,fable_mode
from colorama import init
init()
from colorama import Fore,Back
import signal
import fescripts.libs.ripemd128 as ripe128
from fescripts.libs.alphaHash import *
class Hash6:
#config
SHOW_NOT_FOUND_RES = True
#config
def signal_handler(self,sig, frame):
self._end()
signal.signal(signal.SIGINT, signal_handler)
_fs = fescripts.libs.fescripts.FE_SCRIPTS("Hash6","a FeScript to bruteforce Hashes","""a FeScript to bruteforce diffrent kind of hashes like md,sha,blake,ripemd and e.t.c...""",{'Hash': {'Body': '', 'Description': 'hash you want to brute force.', 'Require': True}, 'Wordlist': {'Body': '', 'Description': 'wordlist you have to brute force hash', 'Require': True}, 'Salt': {'Body': '', 'Description': 'hash salt if hash uses.', 'Require': False}},"0xDeviI")
def __init__(self):
pass
def help(self):
print(self._fs._totalDesc)
def _help(self):
return self._fs._totalDesc
def _author(self):
return self._fs._author
def info(self):
print(self._fs._miniDesc + "\n Author: " + self._fs._author)
def _info(self):
return self._fs._miniDesc
def allRequirement(self):
keys = self._fs._Opt.keys()
allKeys = []
for i in keys:
if (self._fs._Opt[i]['Require'] == True):
allKeys.append(i)
return allKeys
def allSwitches(self):
keys = self._fs._Opt.keys()
allKeys = []
for i in keys:
allKeys.append(i)
return allKeys
def _pre_start(self):
_all_req = self.allRequirement()
found = False
for i in _all_req:
if (self._fs._Opt[i]["Body"] == ""):
found = True
break
if (found):
print(Fore.RED + "All requirement switches not filled!" + Fore.RESET)
else:
self._start()
def showSwitch(self,sw):
print(self._fs._Opt[sw]["Body"])
def _start(self):
print("\nFatEagle Script ' " + Fore.YELLOW + self.__class__.__name__ + Fore.RESET + " '" + Fore.GREEN + " Started!" + Fore.RESET)
# --------------------------------------------> Script Started!
print("""
1) MD2
2) MD4
3) MD5
4) SHA1
5) SHA256
6) SHA384
7) SHA512
8) RIPEMD-128
9) RIPEMD-160
10) BLAKE-224
11) BLAKE-256
12) BLAKE-384
13) BLAKE-512
14) BLAKE2s-224
15) BLAKE2s-256
16) BLAKE2s-384
17) BLAKE2s-512
18) NTLM
19) ALPHA-1
""")
hashType = input("enter hash ID: ")
if (os.path.exists(self._fs._Opt["Wordlist"]["Body"])):
_file = open(self._fs._Opt["Wordlist"]["Body"],encoding="utf8")
ah = AlphaHashV1()
for i in list(_file.readlines()):
i = i.replace("\n","")
if (hashType == "1"):
digest = MD2().hash_digest(i.encode("utf-8"))
if (digest.lower() == self._fs._Opt["Hash"]["Body"].lower()):
print(Fore.LIGHTGREEN_EX + "[+] " + digest + " : " + i + Fore.RESET)
break
elif (self.SHOW_NOT_FOUND_RES):
print(Fore.LIGHTRED_EX + "[!] " + Fore.RESET + digest + " : " + i)
elif (hashType == "2"):
digest = hashlib.new('md4', i.encode('utf-8')).hexdigest()
if (digest.lower() == self._fs._Opt["Hash"]["Body"].lower()):
print(Fore.LIGHTGREEN_EX + "[+] " + digest + " : " + i + Fore.RESET)
break
elif (self.SHOW_NOT_FOUND_RES):
print(Fore.LIGHTRED_EX + "[!] " + Fore.RESET + digest + " : " + i)
elif (hashType == "3"):
digest = hashlib.new('md5', i.encode('utf-8')).hexdigest()
if (digest.lower() == self._fs._Opt["Hash"]["Body"].lower()):
print(Fore.LIGHTGREEN_EX + "[+] " + digest + " : " + i + Fore.RESET)
break
elif (self.SHOW_NOT_FOUND_RES):
print(Fore.LIGHTRED_EX + "[!] " + Fore.RESET + digest + " : " + i)
elif (hashType == "4"):
digest = hashlib.new('sha1', i.encode('utf-8')).hexdigest()
if (digest.lower() == self._fs._Opt["Hash"]["Body"].lower()):
print(Fore.LIGHTGREEN_EX + "[+] " + digest + " : " + i + Fore.RESET)
break
elif (self.SHOW_NOT_FOUND_RES):
print(Fore.LIGHTRED_EX + "[!] " + Fore.RESET + digest + " : " + i)
elif (hashType == "5"):
digest = hashlib.sha256(i.encode('utf-8')).hexdigest()
if (digest.lower() == self._fs._Opt["Hash"]["Body"].lower()):
print(Fore.LIGHTGREEN_EX + "[+] " + digest + " : " + i + Fore.RESET)
break
elif (self.SHOW_NOT_FOUND_RES):
print(Fore.LIGHTRED_EX + "[!] " + Fore.RESET + digest + " : " + i)
elif (hashType == "6"):
digest = hashlib.sha384(i.encode('utf-8')).hexdigest()
if (digest.lower() == self._fs._Opt["Hash"]["Body"].lower()):
print(Fore.LIGHTGREEN_EX + "[+] " + digest + " : " + i + Fore.RESET)
break
elif (self.SHOW_NOT_FOUND_RES):
print(Fore.LIGHTRED_EX + "[!] " + Fore.RESET + digest + " : " + i)
elif (hashType == "7"):
digest = hashlib.sha512(i.encode('utf-8')).hexdigest()
if (digest.lower() == self._fs._Opt["Hash"]["Body"].lower()):
print(Fore.LIGHTGREEN_EX + "[+] " + digest + " : " + i + Fore.RESET)
break
elif (self.SHOW_NOT_FOUND_RES):
print(Fore.LIGHTRED_EX + "[!] " + Fore.RESET + digest + " : " + i)
elif (hashType == "8"):
digest = ripe128.hexstr(ripe128.ripemd128(i.encode("utf8")))
if (digest.lower() == self._fs._Opt["Hash"]["Body"].lower()):
print(Fore.LIGHTGREEN_EX + "[+] " + digest + " : " + i + Fore.RESET)
break
elif (self.SHOW_NOT_FOUND_RES):
print(Fore.LIGHTRED_EX + "[!] " + Fore.RESET + digest + " : " + i)
elif (hashType == "9"):
digest = hashlib.new('ripemd160', i.encode('utf-8')).hexdigest()
if (digest.lower() == self._fs._Opt["Hash"]["Body"].lower()):
print(Fore.LIGHTGREEN_EX + "[+] " + digest + " : " + i + Fore.RESET)
break
elif (self.SHOW_NOT_FOUND_RES):
print(Fore.LIGHTRED_EX + "[!] " + Fore.RESET + digest + " : " + i)
elif (hashType == "10"):
digest = Blake(224).hash_digest(i.encode("utf-8"))
if (digest.lower() == self._fs._Opt["Hash"]["Body"].lower()):
print(Fore.LIGHTGREEN_EX + "[+] " + digest + " : " + i + Fore.RESET)
break
elif (self.SHOW_NOT_FOUND_RES):
print(Fore.LIGHTRED_EX + "[!] " + Fore.RESET + digest + " : " + i)
elif (hashType == "11"):
digest = Blake(256).hash_digest(i.encode("utf-8"))
if (digest.lower() == self._fs._Opt["Hash"]["Body"].lower()):
print(Fore.LIGHTGREEN_EX + "[+] " + digest + " : " + i + Fore.RESET)
break
elif (self.SHOW_NOT_FOUND_RES):
print(Fore.LIGHTRED_EX + "[!] " + Fore.RESET + digest + " : " + i)
elif (hashType == "12"):
digest = Blake(384).hash_digest(i.encode("utf-8"))
if (digest.lower() == self._fs._Opt["Hash"]["Body"].lower()):
print(Fore.LIGHTGREEN_EX + "[+] " + digest + " : " + i + Fore.RESET)
break
elif (self.SHOW_NOT_FOUND_RES):
print(Fore.LIGHTRED_EX + "[!] " + Fore.RESET + digest + " : " + i)
elif (hashType == "13"):
digest = Blake(512).hash_digest(i.encode("utf-8"))
if (digest.lower() == self._fs._Opt["Hash"]["Body"].lower()):
print(Fore.LIGHTGREEN_EX + "[+] " + digest + " : " + i + Fore.RESET)
break
elif (self.SHOW_NOT_FOUND_RES):
print(Fore.LIGHTRED_EX + "[!] " + Fore.RESET + digest + " : " + i)
elif (hashType == "14"):
digest = Blake2(224).hash_digest(i.encode("utf-8"))
if (digest.lower() == self._fs._Opt["Hash"]["Body"].lower()):
print(Fore.LIGHTGREEN_EX + "[+] " + digest + " : " + i + Fore.RESET)
break
elif (self.SHOW_NOT_FOUND_RES):
print(Fore.LIGHTRED_EX + "[!] " + Fore.RESET + digest + " : " + i)
elif (hashType == "15"):
digest = Blake2(256).hash_digest(i.encode("utf-8"))
if (digest.lower() == self._fs._Opt["Hash"]["Body"].lower()):
print(Fore.LIGHTGREEN_EX + "[+] " + digest + " : " + i + Fore.RESET)
break
elif (self.SHOW_NOT_FOUND_RES):
print(Fore.LIGHTRED_EX + "[!] " + Fore.RESET + digest + " : " + i)
elif (hashType == "16"):
digest = Blake2(384).hash_digest(i.encode("utf-8"))
if (digest.lower() == self._fs._Opt["Hash"]["Body"].lower()):
print(Fore.LIGHTGREEN_EX + "[+] " + digest + " : " + i + Fore.RESET)
break
elif (self.SHOW_NOT_FOUND_RES):
print(Fore.LIGHTRED_EX + "[!] " + Fore.RESET + digest + " : " + i)
elif (hashType == "17"):
digest = Blake2(512).hash_digest(i.encode("utf-8"))
if (digest.lower() == self._fs._Opt["Hash"]["Body"].lower()):
print(Fore.LIGHTGREEN_EX + "[+] " + digest + " : " + i + Fore.RESET)
break
elif (self.SHOW_NOT_FOUND_RES):
print(Fore.LIGHTRED_EX + "[!] " + Fore.RESET + digest + " : " + i)
elif (hashType == "18"):
digest = binascii.hexlify(hashlib.new('md4', i.encode('utf-16le')).digest()).decode("utf-8")
if (digest.lower() == self._fs._Opt["Hash"]["Body"].lower()):
print(Fore.LIGHTGREEN_EX + "[+] " + digest + " : " + i + Fore.RESET)
break
elif (self.SHOW_NOT_FOUND_RES):
print(Fore.LIGHTRED_EX + "[!] " + Fore.RESET + digest + " : " + i)
elif (hashType == "19"):
digest = ah.Hash(i)
if (digest == self._fs._Opt["Hash"]["Body"]):
print(Fore.LIGHTGREEN_EX + "[+] " + digest + " : " + i + Fore.RESET)
break
elif (self.SHOW_NOT_FOUND_RES):
print(Fore.LIGHTRED_EX + "[!] " + Fore.RESET + digest + " : " + i)
else:
print(Fore.LIGHTRED_EX + "wordlist not found!" + Fore.RESET)
# --------------------------------------------> Script Stopped!
self._end()
def _end(self):
print("FatEagle Script ' " + Fore.YELLOW + self.__class__.__name__ + Fore.RESET + " '" + Fore.RED + " Stopped!\n\n" + Fore.RESET)
def missedSwitch(self):
fable_data = []
keys = self._fs._Opt.keys()
for i in keys:
if (self._fs._Opt[i].get("Require") == True and self._fs._Opt[i].get("Body") == ""):
fable_data.append([i,self._fs._Opt[i].get("Body"),self._fs._Opt[i].get("Description")])
fabled = fable(["switch name","value","descrption"],fable_data,fable_mode.SLICED)
print(fabled.popData())
def switchInfo(self):
fable_data = []
keys = self._fs._Opt.keys()
for i in keys:
fable_data.append([i,self._fs._Opt[i].get("Body"),str(self._fs._Opt[i].get("Require")),self._fs._Opt[i].get("Description")])
fabled = fable(["switch name","value","required","descrption"],fable_data,fable_mode.SLICED)
print(fabled.popData())
def setSwitch(self,prop,value):
self._fs._Opt[prop]["Body"] = value
```
#### File: fescripts/libs/blake.py
```python
from fescripts.libs.cryptopals_lib import *
class Blake(object):
def __init__(self, version=512):
self.round_constant1 = [0x243F6A88, 0x85A308D3, 0x13198A2E, 0x03707344,
0xA4093822, 0x299F31D0, 0x082EFA98, 0xEC4E6C89,
0x452821E6, 0x38D01377, 0xBE5466CF, 0x34E90C6C,
0xC0AC29B7, 0xC97C50DD, 0x3F84D5B5, 0xB5470917,]
self.round_constant2 = [0x243F6A8885A308D3, 0x13198A2E03707344, 0xA4093822299F31D0, 0x082EFA98EC4E6C89,
0x452821E638D01377, 0xBE5466CF34E90C6C, 0xC0AC29B7C97C50DD, 0x3F84D5B5B5470917,
0x9216D5D98979FB1B, 0xD1310BA698DFB5AC, 0x2FFD72DBD01ADFB7, 0xB8E1AFED6A267E96,
0xBA7C9045F12C7F99, 0x24A19947B3916CF7, 0x0801F2E2858EFC16, 0x636920D871574E69,]
self.permutations = [
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15],
[14,10, 4, 8, 9,15,13, 6, 1,12, 0, 2,11, 7, 5, 3],
[11, 8,12, 0, 5, 2,15,13,10,14, 3, 6, 7, 1, 9, 4],
[ 7, 9, 3, 1,13,12,11,14, 2, 6, 5,10, 4, 0,15, 8],
[ 9, 0, 5, 7, 2, 4,10,15,14, 1,11,12, 6, 8, 3,13],
[ 2,12, 6,10, 0,11, 8, 3, 4,13, 7, 5,15,14, 1, 9],
[12, 5, 1,15,14,13, 4,10, 0, 7, 6, 3, 9, 2, 8,11],
[13,11, 7,14,12, 1, 3, 9, 5, 0,15, 4, 8, 6, 2,10],
[ 6,15,14, 9,11, 3, 0, 8,12, 2,13, 7, 1, 4,10, 5],
[10, 2, 8, 4, 7, 6, 1, 5,15,11, 9,14, 3,12,13, 0],
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15],
[14,10, 4, 8, 9,15,13, 6, 1,12, 0, 2,11, 7, 5, 3],
[11, 8,12, 0, 5, 2,15,13,10,14, 3, 6, 7, 1, 9, 4],
[ 7, 9, 3, 1,13,12,11,14, 2, 6, 5,10, 4, 0,15, 8],
[ 9, 0, 5, 7, 2, 4,10,15,14, 1,11,12, 6, 8, 3,13],
[ 2,12, 6,10, 0,11, 8, 3, 4,13, 7, 5,15,14, 1, 9],
[12, 5, 1,15,14,13, 4,10, 0, 7, 6, 3, 9, 2, 8,11],
[13,11, 7,14,12, 1, 3, 9, 5, 0,15, 4, 8, 6, 2,10],
[ 6,15,14, 9,11, 3, 0, 8,12, 2,13, 7, 1, 4,10, 5],
[10, 2, 8, 4, 7, 6, 1, 5,15,11, 9,14, 3,12,13, 0],
]
self.salt = [0x00, 0x00, 0x00, 0x00]
self.xor_block = True
self.current_length = 0
self.__select_version(version)
def __select_version(self, version):
if version == 224:
self.buffers = [0xC1059ED8, 0x367CD507, 0x3070DD17, 0xF70E5939,
0xFFC00B31, 0x68581511, 0x64F98FA7, 0xBEFA4FA4,]
self.round_constants = self.round_constant1
self.rotations = [16,12,8,7]
self.blocksize = 32
self.rounds = 14
self.padding_end = 0x00
self.output_size = 7
elif version == 256:
self.buffers = [0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19,]
self.round_constants = self.round_constant1
self.rotations = [16,12,8,7]
self.blocksize = 32
self.rounds = 14
self.padding_end = 0x01
self.output_size = 8
elif version == 384:
self.buffers = [0xCBBB9D5DC1059ED8, 0x629A292A367CD507, 0x9159015A3070DD17, 0x152FECD8F70E5939,
0x67332667FFC00B31, 0x8EB44A8768581511, 0xDB0C2E0D64F98FA7, 0x47B5481DBEFA4FA4,]
self.round_constants = self.round_constant2
self.output_size = 6
self.rotations = [32,25,16,11]
self.blocksize = 64
self.rounds = 16
self.padding_end = 0x00
self.output_size = 6
elif version == 512:
self.buffers = [0x6A09E667F3BCC908, 0xBB67AE8584CAA73B, 0x3C6EF372FE94F82B, 0xA54FF53A5F1D36F1,
0x510E527FADE682D1, 0x9B05688C2B3E6C1F, 0x1F83D9ABFB41BD6B, 0x5BE0CD19137E2179,]
self.round_constants = self.round_constant2
self.rotations = [32,25,16,11]
self.blocksize = 64
self.rounds = 16
self.padding_end = 0x01
self.output_size = 8
else:
raise ValueError("Invalid Blake Version {}".format(self.version))
def _set_message(self, message):
#Convert to bytes if not already
byte_message = bytearray(message)
#Append 0x80 to the end of the message
byte_message.append(0x80)
#Get Length shifted by 8 and limit to int
self.final_length = len(message) << 3
input_length_data = asint(self.final_length, self.blocksize * 2)
#Pad the data to a multable of 64 bytes when the 8 byte input_length_data is added
while len(byte_message) % (self.blocksize * 2) != ((self.blocksize * 2) - ((self.blocksize * 2) // 8)):
byte_message.append(0x00)
#Make the last byte of the padding end with a 1 or a 0 depending on the hash version
byte_message[-1] |= self.padding_end
#Append the length data to the message
byte_message += int_to_bytes_length(input_length_data, (self.blocksize * 2) // 8 )
return byte_message
def _chacha_quarter_round(self, a, b, c, d, message, round_num, index):
#Calculate indexes from Permuation table and round_index and offset
message_index = self.permutations[round_num][index]
constant_index = self.permutations[round_num][index+1]
#Modified first part to include message and round xor
a = asint((a + b) + (message[message_index] ^ self.round_constants[constant_index]), self.blocksize)
d = asint(d ^ a, self.blocksize)
d = asint(shift_rotate_right(d, self.rotations[0], self.blocksize), self.blocksize)
c = asint(c + d, self.blocksize)
b = asint(b ^ c, self.blocksize)
b = asint(shift_rotate_right(b, self.rotations[1], self.blocksize), self.blocksize)
#Modified first part to include message and round xor
a = asint((a + b) + (message[constant_index] ^ self.round_constants[message_index]), self.blocksize)
d = asint(d ^ a, self.blocksize)
d = asint(shift_rotate_right(d, self.rotations[2], self.blocksize), self.blocksize)
c = asint(d + c, self.blocksize)
b = asint(b ^ c, self.blocksize)
b = asint(shift_rotate_right(b, self.rotations[3], self.blocksize), self.blocksize)
return [a,b,c,d]
def _compress_chunk(self, chunk):
#Start the compress function
#Create the start of the temp chunks
temp_chunk = bytes_to_intarray(chunk, (self.blocksize //8), byte_order="big")
#print(f"message: {[hex(x) for x in temp_chunk]}")
#Start setting up the temp buffers
temp_buffers = self.buffers[:] + self.round_constants[:8]
for x in range(4):
temp_buffers[8+x] ^= self.salt[x]
#Do not xor currentlength when it is the last block and there is more than one block
if self.xor_block:
temp_buffers[12] ^= asint(self.current_length, self.blocksize)
temp_buffers[13] ^= asint(self.current_length, self.blocksize)
temp_buffers[14] ^= (self.current_length >> self.blocksize)
temp_buffers[15] ^= (self.current_length >> self.blocksize)
'''
Resulting temp_buffers looks like this
|IV |IV |IV |IV |
|IV |IV |IV |IV |
|Const ^ Salt |Const ^ Salt |Const ^ Salt |Const ^ Salt |
|Const ^ len[0] |Const ^ len[0] |Const ^ len[1] |Const ^ len[1] |
'''
#print([hex(x) for x in temp_buffers[12:]], not self.xor_block, hex(self.current_length))
#Do ChaCha rounds with modifications
for index in range(self.rounds):
#Do Each Column
temp_buffers[0], temp_buffers[4], temp_buffers[8], temp_buffers[12] = self._chacha_quarter_round(temp_buffers[0], temp_buffers[4], temp_buffers[8], temp_buffers[12], temp_chunk, index, 0)
temp_buffers[1], temp_buffers[5], temp_buffers[9], temp_buffers[13] = self._chacha_quarter_round(temp_buffers[1], temp_buffers[5], temp_buffers[9], temp_buffers[13], temp_chunk, index, 2)
temp_buffers[2], temp_buffers[6], temp_buffers[10], temp_buffers[14] = self._chacha_quarter_round(temp_buffers[2], temp_buffers[6], temp_buffers[10], temp_buffers[14], temp_chunk, index, 4)
temp_buffers[3], temp_buffers[7], temp_buffers[11], temp_buffers[15] = self._chacha_quarter_round(temp_buffers[3], temp_buffers[7], temp_buffers[11], temp_buffers[15], temp_chunk, index, 6)
#Do Each Diagonal
temp_buffers[0], temp_buffers[5], temp_buffers[10], temp_buffers[15] = self._chacha_quarter_round(temp_buffers[0], temp_buffers[5], temp_buffers[10], temp_buffers[15], temp_chunk, index, 8)
temp_buffers[1], temp_buffers[6], temp_buffers[11], temp_buffers[12] = self._chacha_quarter_round(temp_buffers[1], temp_buffers[6], temp_buffers[11], temp_buffers[12], temp_chunk, index, 10)
temp_buffers[2], temp_buffers[7], temp_buffers[8], temp_buffers[13] = self._chacha_quarter_round(temp_buffers[2], temp_buffers[7], temp_buffers[8], temp_buffers[13], temp_chunk, index, 12)
temp_buffers[3], temp_buffers[4], temp_buffers[9], temp_buffers[14] = self._chacha_quarter_round(temp_buffers[3], temp_buffers[4], temp_buffers[9], temp_buffers[14], temp_chunk, index, 14)
#print(f"After Round {index} {temp_buffers}")
#Update Buffers
for x in range(8):
#print(self.buffers[x], temp_buffers[x], temp_buffers[x+8], self.salt[x % 4])
self.buffers[x] ^= (temp_buffers[x] ^ temp_buffers[x+8] ^ self.salt[x % 4])
#print(self.buffers)
def hash(self, message):
#Setup message with padding and length data
byte_message = self._set_message(message)
#Opperate on each of the chunks
blocks = to_blocks(byte_message, (self.blocksize * 2))
#print(blocks)
for index, chunk in enumerate(blocks):
#Fix Edge Case for padding goes into the next block
if index == len(blocks) - 1:
#Calculate the last block size without padding
mod_num = (self.final_length >> 3) % (self.blocksize * 2)
#print(mod_num, (self.blocksize * 2) - ((self.blocksize * 2) // 8)-1, (self.blocksize * 2))
#If adding the padding would make a new block the last block
# If mod_num is inbetween 55-64 then
if (mod_num > (self.blocksize * 2) - ((self.blocksize * 2) // 8) - 1 and mod_num <= (self.blocksize * 2)):
self.current_length = self.final_length - ((self.blocksize * 2) // 8)
self.xor_block = False
elif mod_num == 0:
self.xor_block = False
else:
self.current_length = self.final_length
#Fix Edge Case for padding goes into the next block
elif (self.current_length + (len(chunk) << 3)) >= self.final_length:
self.current_length = self.final_length
else:
#Update the current_length
self.current_length += (len(chunk) << 3)
#print(self.current_length, self.final_length)
#Compress the message Chunk
self._compress_chunk(chunk)
#Convert Intagers to Byte string
output = b""
for x in self.buffers[:self.output_size]:
output += (x).to_bytes((self.blocksize // 8), byteorder='big')
return output
def hash_digest(self, message):
return self.hash(message).hex()
```
#### File: fescripts/libs/md2.py
```python
from fescripts.libs.cryptopals_lib import *
class MD2(object):
def __init__(self):
self.block_size = 16
self.buffer_size = 48
self.round_count = 18
self.buffer = bytearray([0 for _ in range(self.buffer_size)])
self.sbox = [41, 46, 67, 201, 162, 216, 124, 1, 61, 54, 84, 161, 236, 240, 6,
19, 98, 167, 5, 243, 192, 199, 115, 140, 152, 147, 43, 217, 188,
76, 130, 202, 30, 155, 87, 60, 253, 212, 224, 22, 103, 66, 111, 24,
138, 23, 229, 18, 190, 78, 196, 214, 218, 158, 222, 73, 160, 251,
245, 142, 187, 47, 238, 122, 169, 104, 121, 145, 21, 178, 7, 63,
148, 194, 16, 137, 11, 34, 95, 33, 128, 127, 93, 154, 90, 144, 50,
39, 53, 62, 204, 231, 191, 247, 151, 3, 255, 25, 48, 179, 72, 165,
181, 209, 215, 94, 146, 42, 172, 86, 170, 198, 79, 184, 56, 210,
150, 164, 125, 182, 118, 252, 107, 226, 156, 116, 4, 241, 69, 157,
112, 89, 100, 113, 135, 32, 134, 91, 207, 101, 230, 45, 168, 2, 27,
96, 37, 173, 174, 176, 185, 246, 28, 70, 97, 105, 52, 64, 126, 15,
85, 71, 163, 35, 221, 81, 175, 58, 195, 92, 249, 206, 186, 197,
234, 38, 44, 83, 13, 110, 133, 40, 132, 9, 211, 223, 205, 244, 65,
129, 77, 82, 106, 220, 55, 200, 108, 193, 171, 250, 36, 225, 123,
8, 12, 189, 177, 74, 120, 136, 149, 139, 227, 99, 232, 109, 233,
203, 213, 254, 59, 0, 29, 57, 242, 239, 183, 14, 102, 88, 208, 228,
166, 119, 114, 248, 235, 117, 75, 10, 49, 68, 80, 180, 143, 237,
31, 26, 219, 153, 141, 51, 159, 17, 131, 20]
def _set_message(self, message):
#Convert to bytes if not already
byte_message = bytearray(message)
#Get Padding Number
padding_number = self.block_size - (len(message) % self.block_size)
#Add the padding number to pad the input to the next block
for _ in range(padding_number):
byte_message.append(padding_number)
#Append Checksum
checksum_byte = 0
checksum = bytearray(0 for _ in range(self.block_size))
# For each Block
for block_num, block in enumerate(to_blocks(byte_message, self.block_size)):
# Calculate checksum of block using each byte of the block
for byte_num, byte in enumerate(block):
checksum_byte = self.sbox[byte ^ checksum_byte]
checksum[byte_num] = checksum_byte
byte_message += checksum
return byte_message
def _hash_message_chunk(self, chunk):
for bit_index, bit in enumerate(chunk):
self.buffer[self.block_size + bit_index] = bit
self.buffer[2 * self.block_size + bit_index] = self.buffer[self.block_size + bit_index] ^ self.buffer[bit_index]
#print(self.buffer)
# Rounds of encryption over the entire array. Current byte XOR'd with the previous (substituted) byte.
hash_byte = 0
for round_num in range(self.round_count):
for bit_index in range(self.buffer_size):
#print(self.buffer)
hash_byte = self.buffer[bit_index] ^ self.sbox[hash_byte]
self.buffer[bit_index] = hash_byte
hash_byte = (hash_byte + round_num) % len(self.sbox)
def hash(self, message):
#Setup message with padding and length data
byte_message = self._set_message(message)
#Opperate on each of the 64 byte chunks
for chunk in to_blocks(byte_message, self.block_size):
self._hash_message_chunk(chunk)
#Convert Intagers to Byte string
return self.buffer[:16]
def hash_digest(self, message):
return self.hash(message).hex()
```
#### File: fescripts/libs/PFable.py
```python
from enum import Enum
class fable_mode(Enum):
SLICED = "sliced"
BOLD_COLUMNS = "boldColumns"
class fable:
v = ""
def __init__(self,colNames,data,f_mode):
error = False
for i in data:
if (len(i) != len(colNames)):
error = True
break
if (error):
print("Data Error <::> len error")
else:
if (f_mode == fable_mode.SLICED):
data.insert(0,colNames)
self.v += self.drawRowLine(colNames,data)
for i in data:
for c in range(0,len(i)):
if (c < len(i) - 1):
if (len(i[c]) < self.getBiggestStringArrayLength(data,c)):
self.v += "| " + self.dataSpaceAdder(i[c],self.getBiggestStringArrayLength(data,c)) + " "
else:
self.v += "| " + i[c] + " "
else:
if (len(i[c]) < self.getBiggestStringArrayLength(data,c)):
self.v += "| " + self.dataSpaceAdder(i[c],self.getBiggestStringArrayLength(data,c)) + " |\n" + self.drawRowLine(colNames,data)
else:
self.v += "| " + i[c] + " |\n" + self.drawRowLine(colNames,data)
elif (f_mode == fable_mode.BOLD_COLUMNS):
data.insert(0,colNames)
self.v += self.drawRowLine(colNames,data)
for i in data:
for c in range(0,len(i)):
if (c < len(i) - 1):
if (len(i[c]) < self.getBiggestStringArrayLength(data,c)):
self.v += "| " + self.dataSpaceAdder(i[c],self.getBiggestStringArrayLength(data,c)) + " "
else:
self.v += "| " + i[c] + " "
else:
if (data[0] == i):
if (len(i[c]) < self.getBiggestStringArrayLength(data,c)):
self.v += "| " + self.dataSpaceAdder(i[c],self.getBiggestStringArrayLength(data,c)) + " |\n" + self.drawRowLine(colNames,data)
else:
self.v += "| " + i[c] + " |\n" + self.drawRowLine(colNames,data)
else:
if (len(i[c]) < self.getBiggestStringArrayLength(data,c)):
self.v += "| " + self.dataSpaceAdder(i[c],self.getBiggestStringArrayLength(data,c)) + " |\n"
else:
self.v += "| " + i[c] + " |\n"
def getBiggestStringArrayLength(self,data,index):
big = 0
for i in range(0,len(data)):
for c in range(0,len(data[i])):
if (c == index):
if (len(data[i][c]) >= big):
big = len(data[i][c])
return big
def drawRowLine(self,colNames,data):
v = ""
blens = [0] * len(colNames)
for i in range(0,len(data)):
for c in range(0,len(data[i])):
if (len(data[i][c]) >= blens[c]):
blens[c] = len(data[i][c])
maxLen = 0
for i in blens:
maxLen += i
maxLen += (len(colNames) + 1) + (len(colNames) * 2)
for i in range(0,maxLen):
if (i < maxLen - 1):
v += "-"
else:
v += "-\n"
return v
def popData(self):
return self.v
def dataSpaceAdder(self,data,spaces):
for i in range(spaces - len(data)):
data += " "
return data
``` |
{
"source": "0xdia/Linear-programming",
"score": 3
} |
#### File: Linear-programming/Sudoku solver with the simplex algorithm/Structure.py
```python
class Structure:
def __init__(self, grid):
self.Grid = grid
self.WaitedAnswer = 405
self.MaxValue = 0
self.C = None
self.B = [45 for _ in range(27)]
self.Table = []
self.empty = []
# getting empty cells, and setting the B vector
x = 0
for i in range(9):
for j in range(9):
if grid[i][j] == 0:
self.empty.append((x, i, j))
x += 1
self.WaitedAnswer -= grid[i][j]
self.B[i] -= grid[i][j]
self.B[j+9] -= grid[i][j]
for i in range(0, 9, 3):
for j in range(0, 9, 3):
for a in range(i, i+3):
for b in range(j, j+3):
self.B[(a//3)*3 + b//3 + 18] -= grid[a][b]
# setting the table
self.Table = [[0 for i in range(len(self.empty))] for _ in range(27)]
# constraints variables
for elem in self.empty:
x, i, j = elem[0], elem[1], elem[2]
self.Table[i][x] = 1
self.Table[j+9][x] = 1
self.Table[(i//3)*3+j//3+18][x] = 1
# add the constraints <= 9
constraints = [ [0 for i in range(len(self.empty))]
for j in range(len(self.empty)) ]
for i in range(len(self.empty)):
constraints[i][i] = 1
self.Table += constraints
# add the constraints >= 1
_constraints = [ [0 for i in range(len(self.empty))]
for j in range(len(self.empty)) ]
for i in range(len(self.empty)):
_constraints[i][i] = 1
self.Table += _constraints
# adding the identity matrix (some slack vars and artificial vars)
identity = [ [0 for i in range(27+2*len(self.empty))]
for j in range(27+2*len(self.empty)) ]
for i in range(27+2*len(self.empty)):
identity[i][i] = 1
for i in range(len(self.Table)):
self.Table[i] += identity[i]
# adding the remaining of slack vars
remaining = [[0 for i in range(len(self.empty))]
for j in range(27+2*len(self.empty))]
for i in range(27+len(self.empty), len(remaining)):
remaining[i][i-27-len(self.empty)] = -1
for i in range(len(self.Table)):
self.Table[i] += remaining[i]
# completing B
self.B = [self.WaitedAnswer] + self.B + [9 for _ in range(len(self.empty))] + [1 for _ in range(len(self.empty))]
# setting C
self.C = [1 for i in range(len(self.empty))]+[0 for i in range(27+3*len(self.empty))]
for i in range(len(self.C)):
if self.C[i] == 0:
print('.', end='')
else:
print(self.C[i], end='')
print()
for i in range(len(self.Table)):
for j in range(len(self.Table[i])):
if self.Table[i][j] == 0:
print('.', end='')
else:
print(self.Table[i][j], end='')
print()
print()
def print_table(self):
print(end=' ')
for c in self.C:
print(f'{c}', end='')
print(f' {- self.MaxValue}')
for i in range(len(self.Table)):
print(f'{str(i).zfill(2)}:', end='')
for e in self.Table[i]:
print(e, end='')
print(end=f' {self.B[i]}\n')
def solve(self):
iteration = 0
while True:
print(f'at {iteration}')
iteration += 1
done = False
for i in range(len(self.B)):
if self.B[i] < 0:
done = True
break
if done:
break
indx_max = self.C.index(max(self.C))
if self.C[indx_max] <= 0:
break
pivot_indx = -1
for j in range(len(self.Table)):
if self.Table[j][indx_max] <= 0:
continue
if pivot_indx == -1:
pivot_indx = j
continue
if self.B[j]//self.Table[j][indx_max] < self.B[pivot_indx]//self.Table[pivot_indx][indx_max]:
pivot_indx = j
if pivot_indx == -1:
break
for i in range(len(self.Table[pivot_indx])):
self.Table[pivot_indx][i] //= self.Table[pivot_indx][indx_max]
self.B[pivot_indx] //= self.Table[pivot_indx][indx_max]
self.pivoting(pivot_indx, indx_max)
def pivoting(self, pivot_indx, indx_max):
for j in range(len(self.Table)):
if pivot_indx == j:
continue
if self.Table[j][indx_max] == 0:
continue
P = self.Table[j][indx_max]
for k in range(len(self.Table[j])):
self.Table[j][k] -= P * self.Table[pivot_indx][k]
self.B[j] -= P * self.B[pivot_indx]
P = self.C[indx_max]
for k in range(len(self.C)):
self.C[k] -= P * self.Table[pivot_indx][k]
self.MaxValue -= P * self.B[pivot_indx]
def retrive_solution(self):
self.print_grid()
for elem in self.empty:
x, i, j = elem[0], elem[1], elem[2]
for _ in range(len(self.Table)):
if self.Table[_][x] == 1:
self.Grid[i][j] = self.B[_]
break
self.print_grid()
def print_grid(self):
print("*"*35)
for row in self.Grid:
print(row)
print("*"*35)
def check_row(self, i):
Found = [False for _ in range(10)]
for j in range(9):
if Found[self.Grid[i][j]]:
return False
Found[self.Grid[i][j]] = True
return True
def check_col(self, i):
Found = [False for _ in range(10)]
for j in range(9):
if Found[self.Grid[i][j]]:
return False
Found[self.Grid[i][j]] = True
return True
def check_box(self, i, j):
Found = [False for _ in range(10)]
for x in range(i, i+3):
for y in range(j, j+3):
if Found[self.Grid[x][y]]:
return False
Found[self.Grid[x][y]] = True
return True
def verify_correctness(self):
for i in range(9):
for j in range(9):
if self.Grid[i][j]<1 or 9<self.Grid[i][j]:
return False
for i in range(9):
if not self.check_row(i):
return False
for i in range(9):
if not self.check_col(i):
return False
for i in range(0, 9, 3):
for j in range(0, 9, 3):
if not self.check_box(i, j):
return False
return True
def check_solution(self):
if self.verify_correctness():
print("I checked the solution, and it is correct")
else:
print("Not a solution")
``` |
{
"source": "0xdomyz/dwops",
"score": 3
} |
#### File: src/dwopt/dbo.py
```python
import sqlalchemy as alc
import pandas as pd
import numpy as np
import logging
import re
from dwopt._qry import _Qry
from dwopt.set_up import _make_iris_df, _make_mtcars_df
_logger = logging.getLogger(__name__)
def db(eng):
"""The :class:`database operator object <dwopt.dbo._Db>` factory.
Args
-----------
eng: str, or sqlalchemy.engine.Engine
A `sqlalchemy engine url <https://docs.sqlalchemy.org/en/14/
core/engines.html#database-urls>`_, which
combines the user name, password, database names, etc.
Alternatively a Database connection engine to be used.
Use the :func:`dwopt.make_eng` function to make engine.
Returns
-------
dwopt.dbo._Db
The relevant database operator object.
Examples
-------------
Produce a sqlite database operator object:
>>> from dwopt import db
>>> d = db("sqlite://")
>>> d.mtcars()
>>> d.run('select count(1) from mtcars')
count(1)
0 32
Produce a postgre database operator object:
>>> from dwopt import db
>>> url = "postgresql://dwopt_tester:1234@localhost/dwopt_test"
>>> db(url).iris(q=True).len()
150
Produce using engine object:
>>> from dwopt import db, make_eng
>>> eng = make_eng("sqlite://")
>>> db(eng).mtcars(q=1).len()
32
Produce an oracle database operator object:
>>> from dwopt import db, Oc
>>> url = "oracle://scott2:tiger@tnsname"
>>> isinstance(db(url), Oc)
True
"""
if isinstance(eng, str):
eng = alc.create_engine(eng)
else:
if not isinstance(eng, alc.engine.Engine):
raise ValueError("Invalid eng, either engine url or engine")
nme = eng.name
if nme == "postgresql":
return Pg(eng)
elif nme == "sqlite":
return Lt(eng)
elif nme == "oracle":
return Oc(eng)
else:
raise ValueError("Invalid engine, either postgre, sqlite, or oracle")
def Db(eng):
"""Alias for :func:`dwopt.db`"""
return db(eng)
class _Db:
"""
The base database operator class.
See examples for quick-start.
Instantiate the child classes for different databases via one of below ways:
* The factory function: :func:`dwopt.db`.
* The pre-instantiated objects on package import.
* The relevant child classes.
The child classes and the pre-instantiated objects:
========== =================== ========================
Database Child class Pre-instantiated object
========== =================== ========================
Postgre ``dwopt.Pg(eng)`` ``dwopt.pg``
Sqlite ``dwopt.Lt(eng)`` ``dwopt.lt``
Oracle ``dwopt.Oc(eng)`` ``dwopt.oc``
========== =================== ========================
Pre-instantiation uses the default credentials set-up prior by the user
via the :func:`dwopt.save_url` function.
Args
----------
eng: str, or sqlalchemy.engine.Engine
A `sqlalchemy engine url <https://docs.sqlalchemy.org/en/14/
core/engines.html#database-urls>`_, which
combines the user name, password, database names, etc.
Alternatively a Database connection engine to be used.
Use the :func:`dwopt.make_eng` function to make engine.
Attributes
----------
eng: sqlalchemy.engine.Engine
Underlying engine. Details see
`sqlalchemy.engine.Engine <https://docs.sqlalchemy.org/en/14/core/
connections.html#sqlalchemy.engine.Engine>`_
meta: sqlalchemy.schema.MetaData
Underlying metadata. Details see
`sqlalchemy.schema.MetaData <https://docs.sqlalchemy.org/en/14/core/
metadata.html#sqlalchemy.schema.MetaData>`_
Examples
--------
Instantiate and use a Sqlite database operator object via factory:
>>> from dwopt import db
>>> d = db("sqlite://")
>>> d.mtcars()
>>> d.run('select count(1) from mtcars')
count(1)
0 32
Use the pre-instantiated Sqlite database operator object:
>>> from dwopt import lt
>>> lt.iris()
>>> lt.qry('iris').len()
150
Instantiate and use a Postgre database operator object via the class:
>>> from dwopt import Pg
>>> p = Pg("postgresql://dwopt_tester:1234@localhost/dwopt_test")
>>> p.mtcars(q=1).len()
32
"""
def __init__(self, eng):
if isinstance(eng, str):
self.eng = alc.create_engine(eng)
else:
self.eng = eng
self.meta = alc.MetaData()
_nme = self.eng.name
if _nme == "postgresql":
self._dialect = "pg"
elif _nme == "sqlite":
self._dialect = "lt"
elif _nme == "oracle":
self._dialect = "oc"
def _bind_mods(self, sql, mods=None, **kwargs):
"""Apply modification to sql statement
Examples
-----------
import re
def f(sql, i, j):
return re.sub(f":{i}(?=[^a-zA-Z0-9]|$)", str(j), sql)
f("from tbl_:yr_0304", 'yr', 2017)
f(f("from tbl_:yr_:yr1_0304", 'yr', 2017), 'yr1', 2018)
f("from tbl_:yr_mth_tbl", 'yr_mth', 2017)
"""
if mods is None:
mods = kwargs
else:
mods.update(kwargs)
for i, j in mods.items():
sql = re.sub(f":{i}(?=[^a-zA-Z0-9]|$)", str(j), sql)
_logger.debug(f"replaced :{i} by {j}")
return sql
def _guess_dtype(self, dtype):
"""See :meth:`dwopt.dbo._Db.create`"""
if self._dialect == "pg":
if np.issubdtype(dtype, np.int64):
return alc.dialects.postgresql.BIGINT
elif np.issubdtype(dtype, np.float64):
return alc.Float(8)
elif self._dialect == "lt":
if np.issubdtype(dtype, np.float64):
return alc.REAL
elif np.issubdtype(dtype, np.datetime64):
return alc.String
elif self._dialect == "oc":
if np.issubdtype(dtype, np.int64):
return alc.dialects.oracle.NUMBER
elif np.issubdtype(dtype, np.float64):
return alc.Float
elif np.issubdtype(dtype, np.datetime64):
return alc.Date
else:
return alc.String(20)
else:
raise ValueError("invalid dialect, only 'pg', 'lt', or 'oc'")
if np.issubdtype(dtype, np.int64):
return alc.Integer
elif np.issubdtype(dtype, np.float64):
return alc.Float
elif np.issubdtype(dtype, np.datetime64):
return alc.DateTime
else:
return alc.String
def _parse_sch_tbl_nme(self, sch_tbl_nme, split=True):
"""Resolve schema dot table name name into lower case components.
Args
------
sch_tbl_nme: str
Table name in form ``my_schema1.my_table1`` or ``my_table1``.
split: bool
Split form or not.
Returns
----------
str or (str, str, str)
parsed names, all elements can be None.
Examples
---------
>>> import dwopt
>>> d = dwopt.dbo._Db
>>> f = lambda x:d._parse_sch_tbl_nme(d, x, split=True)
>>> g = lambda x:d._parse_sch_tbl_nme(d, x)
>>> for i in ['ab', 'Ab', 'ab.ab', 'Ab.Ab', 'Ab.Ab.Ab', '', None, 3]:
... print(f"{i = }, {f(i) = }, {g(i) = }")
i = 'ab', f(i) = ('ab', None, 'ab'), g(i) = 'ab'
i = 'Ab', f(i) = ('ab', None, 'ab'), g(i) = 'ab'
i = 'ab.ab', f(i) = ('ab.ab', 'ab', 'ab'), g(i) = 'ab.ab'
i = 'Ab.Ab', f(i) = ('ab.ab', 'ab', 'ab'), g(i) = 'ab.ab'
i = 'Ab.Ab.Ab', f(i) = ('ab.ab.ab', 'ab', 'ab.ab'), g(i) = 'ab.ab.ab'
i = '', f(i) = ('', None, ''), g(i) = ''
i = None, f(i) = (None, None, None), g(i) = None
i = 3, f(i) = (None, None, None), g(i) = None
"""
try:
clean = sch_tbl_nme.lower()
items = clean.split(".")
except AttributeError:
sch = None
tbl_nme = None
full_nme = None
else:
n = len(items)
if n == 1:
sch = None
tbl_nme = items[0]
full_nme = tbl_nme
elif n == 2:
sch = items[0]
tbl_nme = items[1]
full_nme = clean
else:
sch = items[0]
tbl_nme = ".".join(items[1:n])
full_nme = clean
if split:
return full_nme, sch, tbl_nme
else:
return full_nme
def _remove_sch_tbl(self, sch_tbl_nme):
"""Remove sch_tbl from meta.
Args
------
sch_tbl_nme: str
Table name in form ``my_schema1.my_table1`` or ``my_table1``.
Examples
-----------
Set-up::
from dwopt import pg
import sqlalchemy as alc
meta = pg.meta
First table entry into meta overwrites second one::
meta.clear()
alc.Table('test', meta, schema='test')
alc.Table('test.test', meta)
meta.tables
meta.clear()
alc.Table('test.test', meta)
alc.Table('test', meta, schema='test')
meta.tables
No schema is entered unless explicitly::
meta.clear()
alc.Table('test.test', meta, schema=None)
meta.clear()
alc.Table('test.test.test', meta)
meta.clear()
alc.Table('test.test', meta, schema='test')
Items removed by key not certain on schema::
meta.clear()
alc.Table('test', meta)
alc.Table('test.test', meta)
alc.Table('test.test.test', meta)
meta.tables['test']
meta.tables['test.test']
meta.tables['test.test.test']
meta.tables
"""
if sch_tbl_nme in self.meta.tables:
self.meta.remove(self.meta.tables[sch_tbl_nme])
def _run(self, sql, args=None):
"""Run sql statement with argument passing"""
with self.eng.begin() as c:
_logger.info(f"running:\n{sql}")
if args is not None:
_logger.info(f"{len(args) = }")
r = c.execute(alc.text(sql), args)
else:
r = c.execute(sql)
_logger.info("done")
if r.returns_rows:
return pd.DataFrame(r.all(), columns=r.keys())
def add_pkey(self, sch_tbl_nme, pkey):
"""Make and run an add primary key statement.
Work on postgre and oracle.
Args
----------
sch_tbl_nme: str
Table name in form ``my_schema1.my_table1`` or ``my_table1``.
pkey : str
columns names in form "col1, col2, ...".
Examples
--------
>>> from dwopt import pg
>>> pg.mtcars()
>>> pg.add_pkey('mtcars', 'name')
>>> pg.qry('information_schema.constraint_table_usage').select(
... 'table_name, constraint_name').where(
... "table_schema = 'public'", "table_name = 'mtcars'").run()
table_name constraint_name
0 mtcars mtcars_pkey
"""
sql = f"alter table {sch_tbl_nme} add primary key ({pkey})"
return self.run(sql)
def create(self, sch_tbl_nme, dtypes=None, **kwargs):
"""
Make and run a create table statment.
Args
----------
sch_tbl_nme: str
Table name in form ``my_schema1.my_table1`` or ``my_table1``.
dtypes : {str:str}, optional
Dictionary of column names to data types mappings.
**kwargs :
Convenient way to add mappings.
Keyword to argument mappings will be added to the dtypes
dictionary.
Notes
-----
**Datatypes**
Datatypes vary across databases
(`postgre types <https://www.postgresql.org/docs/current/
datatype.html>`_,
`sqlite types <https://www.sqlite.org/datatype3.html>`_,
`oracle types <https://docs.oracle.com/en/database/oracle/
oracle-database/21/sqlqr/Data-Types.html>`_),
common example below:
========== =========== ======= ============
Type Postgre Sqlite Oracle
========== =========== ======= ============
integer bigint integer number
float float8 real float
string varchar(20) text varchar2(20)
datetime timestamp text date
date date text date
========== =========== ======= ============
**Other statements**
The ``dtypes`` mappings also allow other sql statements which are
part of a create statement to be added
(`sqlite other <https://sqlite.org/lang_createtable.html>`_,
`postgre other <https://www.postgresql.org/docs/current/
sql-createtable.html>`_,
`oracle other <https://docs.oracle.com/en/database/oracle/
oracle-database/21/sqlrf/CREATE-TABLE.html>`_).
For example a primary key constraint.
Examples
--------
>>> from dwopt import lt
>>> lt.drop('test')
>>> lt.create(
... 'test',
... {
... 'id': 'integer'
... ,'score': 'real'
... ,'amt': 'integer'
... ,'cat': 'text'
... ,'time': 'text'
... ,'constraint df_pk': 'primary key (id)'
... })
>>> lt.run("select * from test")
Empty DataFrame
Columns: [id, score, amt, cat, time]
Index: []
>>> lt.drop('test2')
>>> lt.create('test2', id='integer', score='real', cat='text')
>>> lt.run("select * from test2")
Empty DataFrame
Columns: [id, score, cat]
Index: []
"""
if dtypes is None:
dtypes = kwargs
else:
dtypes.update(kwargs)
cls = ""
for col, dtype in dtypes.items():
cls += f"\n ,{col} {dtype}"
self.run(f"create table {sch_tbl_nme}(" f"\n {cls[6:]}" "\n)")
def create_schema(self, sch_nme):
"""Make and run a create schema statement.
Works on postgre.
Args
----------
sch_nme: str
Schema name.
Examples
--------
>>> from dwopt import pg
>>> pg.create_schema('test')
>>> pg.iris('test.iris', q=1).len()
150
"""
try:
self.run(f"create schema {sch_nme}")
except Exception as ex:
if "already exists" in str(ex):
pass
else:
raise (ex)
def cwrite(self, df, sch_tbl_nme):
"""Create table and insert based on dataframe.
* Replace ``.`` by ``_`` in dataframe column names.
* Data types infered based on the :meth:`dwopt.dbo._Db.create` method notes.
Also, date type columns are treated same as str type columns.
* Reversibility issue see :meth:`dwopt.dbo._Db.write` method notes.
Args
----------
df : pandas.DataFrame
Payload Dataframe with data to insert.
sch_tbl_nme: str
Table name in form ``my_schema1.my_table1`` or ``my_table1``.
Examples
--------
>>> import pandas as pd
>>> from dwopt import lt
>>> tbl = pd.DataFrame({'col1': [1, 2], 'col2': ['a', 'b']})
>>> lt.drop('test')
>>> lt.cwrite(tbl, 'test')
>>> lt.qry('test').run()
col1 col2
0 1 a
1 2 b
Attempt to write a dataframe into database and query back the same dataframe.
>>> from dwopt import pg
>>> from pandas.testing import assert_frame_equal
>>> df = pg.mtcars(q=1).run().sort_values('name').reset_index(drop=True)
>>> pg.drop('mtcars2')
>>> pg.cwrite(df, 'mtcars2')
>>> df_back = pg.qry('mtcars2').run().sort_values('name').reset_index(drop=True)
>>> assert_frame_equal(df_back, df)
"""
df = df.copy()
df.columns = [_.lower().replace(".", "_") for _ in df.columns]
sch_tbl_nme, sch, tbl_nme = self._parse_sch_tbl_nme(sch_tbl_nme)
self._remove_sch_tbl(sch_tbl_nme)
tbl = alc.Table(
tbl_nme,
self.meta,
*[alc.Column(col, self._guess_dtype(df[col].dtype)) for col in df.columns],
schema=sch,
)
_logger.info("creating table via sqlalchemy:")
for col in tbl.columns.items():
_logger.info(f"{col}")
tbl.create(self.eng)
_logger.info("done")
self.write(df, sch_tbl_nme)
def delete(self):
"""WIP"""
raise NotImplementedError
def drop(self, sch_tbl_nme):
"""Drop table if exist.
Args
----------
sch_tbl_nme: str
Table name in form ``my_schema1.my_table1`` or ``my_table1``.
See also
----------
:meth:`dwopt.dbo._Db.exist`
Examples
--------
>>> from dwopt import lt
>>> lt.drop('iris')
>>> lt.iris()
>>> lt.drop('iris')
>>> lt.exist('iris')
False
>>> from dwopt import pg
>>> pg.create_schema('test')
>>> tbl = 'test.iris'
>>> pg.iris(tbl)
>>> pg.exist(tbl)
True
>>> pg.drop(tbl)
>>> pg.exist(tbl)
False
"""
sch_tbl_nme, sch, tbl_nme = self._parse_sch_tbl_nme(sch_tbl_nme)
self._remove_sch_tbl(sch_tbl_nme)
with self.eng.connect() as conn:
_logger.info(f"dropping table via sqlalchemy: {sch_tbl_nme}")
alc.Table(tbl_nme, self.meta, schema=sch).drop(conn, checkfirst=True)
_logger.info("done")
def exist(self, sch_tbl_nme):
"""Check if table exist.
Args
------
sch_tbl_nme: str
Table name in form ``my_schema1.my_table1`` or ``my_table1``.
Returns
----------
bool
Examples
---------
>>> from dwopt import lt
>>> lt.iris()
>>> lt.drop('mtcars')
>>> lt.exist('iris')
True
>>> lt.exist('mtcars')
False
>>> from dwopt import pg as d
>>> d.create_schema('test')
>>> d.iris('test.iris')
>>> d.drop('test.mtcars')
>>> d.exist('test.iris')
True
>>> d.exist('test.mtcars')
False
"""
sch_tbl_nme, sch, tbl_nme = self._parse_sch_tbl_nme(sch_tbl_nme)
self._remove_sch_tbl(sch_tbl_nme)
try:
_logger.info(f"reflecting table via sqlalchemy: {sch_tbl_nme}")
self.meta.reflect(self.eng, schema=sch, only=[tbl_nme])
_logger.info("done")
return True
except Exception as ex:
if "Could not reflect: requested table(s) not available in Engine" in str(
ex
):
_logger.debug(ex)
return False
else:
raise ex
def iris(self, sch_tbl_nme="iris", q=False):
"""Create the iris test table on the database.
Drop and recreate if already exist.
Sourced from `UCI iris <https://archive.ics.uci.edu/ml/datasets/Iris/>`_.
args
-------
sch_tbl_nme: str
Table name in form ``my_schema1.my_table1`` or ``my_table1``.
Default ``iris``.
q: bool
Return query object or not. Default False.
Returns
-------
None or dwopt._qry._Qry
Query object with sch_tbl_nme loaded for convenience.
Examples
--------
>>> from dwopt import lt
>>> lt.iris()
>>> lt.run('select count(*) from iris')
count(*)
0 150
>>> from dwopt import lt
>>> lt.iris(q=True).valc('species', 'avg(petal_length)')
species n avg(petal_length)
0 sicolor 50 4.260
1 setosa 50 1.462
2 rginica 50 5.552
>>> from dwopt import pg
>>> pg.create_schema('test')
>>> pg.iris('test.iris', q=1).len()
150
"""
sch_tbl_nme = self._parse_sch_tbl_nme(sch_tbl_nme, split=False)
self.drop(sch_tbl_nme)
self.cwrite(_make_iris_df(), sch_tbl_nme)
if q:
return self.qry(sch_tbl_nme)
def list_cons(self):
"""
List all constraints.
Only works for postgre.
Uses the postgre `information_schema.constraint_table_usage
<https://www.postgresql.org/docs/current/infoschema-
constraint-table-usage.html>`_ table.
Returns
-------
pandas.DataFrame
Examples
----------
>>> from dwopt import pg
>>> pg.mtcars()
>>> pg.add_pkey('mtcars', 'name')
>>> pg.list_cons().loc[
... lambda x:(x.table_schema == 'public') & (x.table_name == 'mtcars'),
... ['table_name', 'constraint_name']
... ].reset_index(drop=True)
table_name constraint_name
0 mtcars mtcars_pkey
"""
if self._dialect == "pg":
sql = "SELECT * FROM information_schema.constraint_table_usage"
return self.run(sql)
else:
raise NotImplementedError
def list_tables(self, owner):
"""
List all tables on database or specified schema.
Args
----------
owner : str
Only applicable for oracle. Name of the schema(owner).
Returns
-------
pandas.DataFrame
Notes
-----
Postgre sql used, `information_schema.tables
<https://www.postgresql.org/docs/current/infoschema-tables.html>`_:
.. code-block:: sql
select
table_catalog,table_schema,table_name
,is_insertable_into,commit_action
from information_schema.tables
where table_schema
not in ('information_schema','pg_catalog')
Sqlite sql used, `sqlite_schema <https://www.sqlite.org/schematab.html>`_:
.. code-block:: sql
select * from sqlite_master
where type ='table'
and name NOT LIKE 'sqlite_%'
Oracle sql used, `all_tab_columns
<https://docs.oracle.com/en/database/oracle/oracle-database/21/
refrn/ALL_TAB_COLUMNS.html>`_:
.. code-block:: sql
select/*+PARALLEL (4)*/ owner,table_name
,max(column_name),min(column_name)
from all_tab_columns
where owner = ':owner'
group by owner,table_name
Examples
-----------
>>> from dwopt import lt
>>> lt.iris()
>>> lt.mtcars()
>>> lt.drop('test')
>>> lt.drop('test2')
>>> lt.list_tables().iloc[:,:-2]
type name tbl_name
0 table iris iris
1 table mtcars mtcars
"""
raise NotImplementedError
def mtcars(self, sch_tbl_nme="mtcars", q=False):
"""Create the mtcars test table on the database.
Drop and recreate if already exist.
Sourced from `R mtcars <https://www.rdocumentation.org/packages/datasets
/versions/3.6.2/topics/mtcars>`_.
args
-------
sch_tbl_nme: str
Table name in form ``my_schema1.my_table1`` or ``my_table1``.
Default ``mtcars``.
q: bool
Return query object or not. Default False.
Returns
-------
None or dwopt._qry._Qry
Query object with sch_tbl_nme loaded for convenience.
Examples
--------
>>> from dwopt import lt
>>> lt.mtcars()
>>> lt.run('select count(*) from mtcars')
count(*)
0 32
>>> from dwopt import lt
>>> lt.mtcars(q=True).valc('cyl', 'avg(mpg)')
cyl n avg(mpg)
0 8 14 15.100000
1 4 11 26.663636
2 6 7 19.742857
>>> from dwopt import pg
>>> pg.create_schema('test')
>>> pg.mtcars('test.mtcars', q=1).len()
32
"""
sch_tbl_nme = self._parse_sch_tbl_nme(sch_tbl_nme, split=False)
self.drop(sch_tbl_nme)
self.cwrite(_make_mtcars_df(), sch_tbl_nme)
if q:
return self.qry(sch_tbl_nme)
def qry(self, *args, **kwargs):
"""Make a :class:`query object <dwopt._qry._Qry>`.
Args
----------
*args :
Positional arguments of the :class:`dwopt._qry._Qry`>.
**kwargs :
keyword arguments of the :class:`dwopt._qry._Qry`.
Returns
-------
dwopt._qry._Qry
Examples
--------
>>> from dwopt import lt
>>> lt.mtcars()
>>> lt.qry('mtcars').valc('cyl', 'avg(mpg)')
cyl n avg(mpg)
0 8 14 15.100000
1 4 11 26.663636
2 6 7 19.742857
"""
return _Qry(self, *args, **kwargs)
def run(self, sql=None, args=None, pth=None, mods=None, **kwargs):
"""
Run sql statement.
Features:
* Argument binding.
* Text replacement.
* Reading from sql script file.
Args
----------
sql : str, optional
The sql statement to run.
args : dict, or [dict], optional
Dictionary or list of dictionary of argument name str to argument
data object mappings.
These argument data objects are passed via sqlalchemy to the database,
to function as data for the argument names.
See the notes and the examples section for details.
pth : str, optional
Path to sql script, ignored if the sql parameter is not None.
The script can hold a sql statement, for example a significant piece
of table creation statement.
mods : dict, optional
Dictionary of modification name str to modification str mappings.
Replaces modification name in the sql by the respective
modification str.
See the notes and the examples section for details.
**kwargs :
Convenient way to add modification mappings.
Keyword to argument mappings will be added to the mods dictionary.
The keyword cannot be one of the positional parameter names.
Returns
-------
pandas.DataFrame or None
Returns dataframe if the database returns any result.
Returns dataframe with column names and zero rows if running query
that returns zero rows.
Returns None otherwise, typically when running DDL/DML statement.
Notes
-----
**The args and the mods parameter**
An argument name or a modification name is denoted in the sql by prepending
a colon symbol ``:`` before a series of alphanumeric or underscore symbols.
In addition, the end of the series for the modification name is to be
followed by a non-alphanumeric or a end of line symbol. This is to distinguish
names such as ``:var`` and ``:var1``.
The args parameter binding is recommanded where possible,
while the mods paramter method of text replacement gives
more flexibility when it comes to programatically generate sql statment.
Examples
--------
Run sql:
>>> from dwopt import lt
>>> lt.iris()
>>> lt.run("select * from iris limit 1")
sepal_length sepal_width petal_length petal_width species
0 5.1 3.5 1.4 0.2 setosa
Run sql with argument passing:
>>> from dwopt import lt
>>> lt.iris()
>>> lt.run("select count(1) from iris where species = :x",
... args = {'x':'setosa'})
count(1)
0 50
Run sql with text modification:
>>> from dwopt import lt
>>> lt.iris()
>>> old = 'iris'
>>> new = 'iris2'
>>> lt.run("drop table if exists :var", var=new)
>>> lt.run("create table :x as select * from :y", mods={'x':new, 'y': old})
>>> lt.run("select count(1) from :tbl", tbl=new)
count(1)
0 150
Run from sql script:
>>> from dwopt import pg, make_test_tbl
>>> _ = make_test_tbl(pg)
>>> pg.run(pth = "E:/projects/my_sql_script.sql",
... my_run_dte = '2022-03-03',
... my_label = '20220303',
... threshold = 5)
count
0 137
Above runs the sql stored on ``E:/projects/my_sql_script.sql`` as below:
.. code-block:: sql
drop table if exists monthly_extract_:my_label;
create table monthly_extract_:my_label as
select * from test
where
dte = to_date(':my_run_dte','YYYY-MM-DD')
and score > :threshold;
select count(1) from monthly_extract_:my_label;
"""
if sql is None and pth is not None:
with open(pth) as f:
sql = f.read()
_logger.info(f"sql from:\n{pth}")
if mods is not None or len(kwargs) > 0:
sql = self._bind_mods(sql, mods, **kwargs)
return self._run(sql, args)
def table_cols(self, sch_tbl_nme):
"""
Show information of specified table's columns.
Notes
-----
Postgre sql used, `information_schema.columns
<https://www.postgresql.org/docs/current/infoschema-columns.html>`_:
.. code-block:: sql
select column_name, data_type
from information_schema.columns
where table_schema = ':schema_nme'
and table_name = ':tbl_nme'
Oracle sql used, `all_tab_columns
<https://docs.oracle.com/en/database/oracle/oracle-database/21/
refrn/ALL_TAB_COLUMNS.html>`_:
.. code-block:: sql
select/*+PARALLEL (4)*/ *
from all_tab_columns
where owner = ':schema_nme'
and table_name = ':tbl_nme'
Parameters
----------
sch_tbl_nme : str
Table name in format: `schema.table`.
Returns
-------
pandas.DataFrame
Examples
-----------
>>> from dwopt import pg
>>> pg.iris()
>>> pg.table_cols('public.iris')
column_name data_type
0 sepal_length real
1 sepal_width real
2 petal_length real
3 petal_width real
4 species character varying
"""
raise NotImplementedError
def table_sizes(self):
"""
List sizes of all tables in current schema.
Returns
-------
pandas.DataFrame
Notes
-----
Oracle sql used, `user_extents
<https://docs.oracle.com/en/database/oracle/oracle-database/21/refrn/
USER_EXTENTS.html>`_:
.. code-block:: sql
select/*+PARALLEL (4)*/
tablespace_name,segment_type,segment_name
,sum(bytes)/1024/1024 table_size_mb
from user_extents
group by tablespace_name,segment_type,segment_name
"""
raise NotImplementedError
def update(self):
"""WIP"""
raise NotImplementedError
def write(self, df, sch_tbl_nme):
"""Make and run a insert many statement.
**Pre-processing**
* Pandas Datetime64 columns are converted into object columns, and the
``pandas.NaT`` objects are converted into ``None``.
* Pandas Float64 columns are converted into object columns, and the
``pandas.NaN`` objects are converted into ``None``.
This should follow from a :meth:`dwopt.dbo._Db.create` call which sets up
the database table with table name, column names, intended data types,
and constraints.
Args
----------
df: pandas.DataFrame
Payload Dataframe with data to insert.
sch_tbl_nme: str
Table name in form ``my_schema1.my_table1`` or ``my_table1``.
Notes
-----
**Reversibility**
Ideally python dataframe written to database should allow a exact same
dataframe to be read back into python. Whether this is true depends on:
* The database.
* The data and object types on the dataframe.
* The data types on the database table.
With the set up used in the :func:`dwopt.make_test_tbl` function,
following results is obtained:
* The postgre table is reversible except for row order on select from database.
Example fix/strategy for comparison:
.. code-block:: python
df.sort_values('id').reset_index(drop=True)
* Sqlite stores date/datetime as text, this causes a str type column to
be read back. One strategy is to convert from datatime and NaT to
str and None before insertion, and convert to date and datetime
when reading back.
Example fix/strategy for comparison:
.. code-block:: python
lt.write(
df.assign(
time=lambda x: x.time.astype(str).where(~x.time.isna(), None)),
"test2",
)
tbl = (
db.qry("test2").run()
.assign(
dte=lambda x: x["dte"].apply(
lambda x: datetime.date.fromisoformat(x) if x else None
),
time=lambda x: pd.to_datetime(x.time),
)
)
* Oracle has same issue as postgre. In addition:
* Both date and datetime are stored as date format, and are read back
as datetime.
* Datetime milliseconds are lost on the database.
* Date are stored in dd-MMM-yy format on database.
* Date passed into varchar2 type column are stored in dd-MMM-yy format.
Example fix/strategy for comparison:
.. code-block:: python
tbl = db.run("select * from test2 order by id").assign(
dte=lambda x: x["dte"].apply(lambda x: x.date() if x else None)
)
df2 = df.assign(
time=lambda x: x["time"].apply(lambda x: x.replace(microsecond=0))
)
Examples
--------
Write dataframe into a table.
>>> import pandas as pd
>>> from dwopt import lt
>>> tbl = pd.DataFrame({'col1': [1, 2], 'col2': ['a', 'b']})
>>> lt.drop('test')
>>> lt.create('test', col1='int', col2='text')
>>> lt.write(tbl,'test')
>>> lt.run('select * from test')
col1 col2
0 1 a
1 2 b
Attempt to write a dataframe into database and query back the same dataframe.
>>> from dwopt import make_test_tbl
>>> from pandas.testing import assert_frame_equal
>>> pg, df = make_test_tbl('pg')
>>> pg.drop('test')
>>> pg.create(
... "test",
... dtypes={
... "id": "bigint primary key",
... "score": "float8",
... "amt": "bigint",
... "cat": "varchar(20)",
... "dte":"date",
... "time":"timestamp"
... }
... )
>>> pg.write(df, 'test')
>>> df_back = pg.qry('test').run().sort_values('id').reset_index(drop=True)
>>> assert_frame_equal(df_back, df)
"""
L = len(df)
sch_tbl_nme, sch, tbl_nme = self._parse_sch_tbl_nme(sch_tbl_nme)
if L == 0:
return
df = df.copy()
cols = df.columns.tolist()
for col in cols:
if np.issubdtype(df[col].dtype, np.datetime64) or np.issubdtype(
df[col].dtype, np.float64
):
df[col] = df[col].astype(object).where(~df[col].isna(), None)
self._remove_sch_tbl(sch_tbl_nme)
tbl = alc.Table(
tbl_nme, self.meta, *[alc.Column(col) for col in cols], schema=sch
)
_logger.info(f"running:\n{tbl.insert()}")
_ = df.to_dict("records")
_logger.info(f"args len={L}, e.g.\n{_[0]}")
with self.eng.connect() as conn:
conn.execute(
tbl.insert(),
_,
)
_logger.info("done")
def write_nodup(self, tbl, sch_tbl_nme, pkey, where=None):
"""Insert without creating duplicates.
Does below:
1. Make and run a select statement with optionally provided
where clause.
2. If step 1 returns any results and the payload table in non-empty
, remove duplicates on the payload table, using the provided primary
key columns as judge of duplication.
3. Make insert statement on the non-duplicating payload data via the
:meth:`dwopt.dbo._Db.write` method.
Args
----------
tbl: pandas.DataFrame
Payload Dataframe with data to insert.
sch_tbl_nme: str
Table name in form ``my_schema1.my_table1`` or ``my_table1``.
pkey: [str]
Iterable of column name str.
where: str
where clause in str form. The ``where`` keyword is not needed.
See also
--------
:meth:`dwopt.dbo._Db.write`
Examples
--------
>>> import pandas as pd
>>> from dwopt import lt
>>> tbl = pd.DataFrame({'col1': [1, 2], 'col2': ['a', 'b']})
>>> tbl2 = pd.DataFrame({'col1': [1, 3], 'col2': ['a', 'c']})
>>> lt.drop('test')
>>> lt.create('test', col1='int', col2='text')
>>> lt.write(tbl, 'test')
>>> lt.write_nodup(tbl2, 'test', ['col1'], "col1 < 4")
>>> lt.run("select * from test")
col1 col2
0 1 a
1 2 b
2 3 c
"""
cols = ",".join(pkey)
where_cls = f"\nwhere {where}" if where else ""
sch_tbl_nme = self._parse_sch_tbl_nme(sch_tbl_nme, split=False)
db_tbl = self.run(f"select {cols} from {sch_tbl_nme} {where_cls}")
l_tbl = len(tbl)
l_db_tbl = len(db_tbl)
if l_tbl > 0 and l_db_tbl > 0:
dedup_tbl = (
tbl.merge(
db_tbl, how="left", on=pkey, validate="one_to_one", indicator=True
)
.loc[lambda x: x._merge == "left_only", :]
.drop(columns="_merge")
)
else:
dedup_tbl = tbl
_logger.debug(
f"write nodup: {l_tbl = }, {l_db_tbl = }" f", {len(dedup_tbl) = }"
)
self.write(dedup_tbl, sch_tbl_nme)
class Pg(_Db):
def list_cons(self):
sql = "SELECT * FROM information_schema.constraint_table_usage"
return self.run(sql)
def list_tables(self):
sql = (
"select table_catalog,table_schema,table_name"
"\n ,is_insertable_into,commit_action"
"\nfrom information_schema.tables"
"\nwhere table_schema"
"\n not in ('information_schema','pg_catalog')"
)
return self.run(sql)
def table_cols(self, sch_tbl_nme):
sch_tbl_nme, sch, tbl_nme = self._parse_sch_tbl_nme(sch_tbl_nme)
sql = (
"select column_name, data_type from information_schema.columns"
f"\nwhere table_schema = '{sch}' "
f"\nand table_name = '{tbl_nme}'"
)
return self.run(sql)
class Lt(_Db):
def list_tables(self):
sql = (
"select * from sqlite_master "
"\nwhere type ='table' "
"\nand name NOT LIKE 'sqlite_%' "
)
return self.run(sql)
class Oc(_Db):
def list_tables(self, owner):
sql = (
"select/*+PARALLEL (4)*/ owner,table_name"
"\n ,max(column_name),min(column_name)"
"\nfrom all_tab_columns"
f"\nwhere owner = '{owner.upper()}'"
"\ngroup by owner,table_name"
)
return self.run(sql)
def table_cols(self, sch_tbl_nme):
sch_tbl_nme, sch, tbl_nme = self._parse_sch_tbl_nme(sch_tbl_nme)
sql = (
"select/*+PARALLEL (4)*/ *"
"\nfrom all_tab_columns"
f"\nwhere owner = '{sch.upper()}'"
f"\nand table_name = '{tbl_nme.upper()}'"
)
return self.run(sql)
def table_sizes(self):
sql = (
"select/*+PARALLEL (4)*/"
"\n tablespace_name,segment_type,segment_name"
"\n ,sum(bytes)/1024/1024 table_size_mb"
"\nfrom user_extents"
"\ngroup by tablespace_name,segment_type,segment_name"
)
return self.run(sql)
```
#### File: dwops/tests/test_db_opt.py
```python
from pandas.testing import assert_frame_equal
from dwopt import Pg, Lt, Oc
import pandas as pd
import datetime
import pytest
def assert_frame_equal_reset_index(a, b):
assert_frame_equal(a.reset_index(drop=True), b.reset_index(drop=True))
def test_db_opt_run(test_tbl):
db, df = test_tbl
if isinstance(db, Lt):
exp = df.assign(
dte=lambda x: x.dte.astype(str).where(~x.dte.isna(), None),
time=lambda x: x.time.astype(str).where(~x.time.isna(), None),
).loc[lambda x: x.id <= 9, :]
elif isinstance(db, Pg):
exp = df.loc[lambda x: x.id <= 9, :]
elif isinstance(db, Oc):
exp = df.assign(
dte=lambda x: x["dte"].apply(
lambda x: datetime.datetime.combine(x, datetime.time()) if x else None
),
time=lambda x: x["time"].apply(lambda x: x.replace(microsecond=0)),
).loc[lambda x: x.id <= 9, :]
# import pdb;pdb.set_trace()
else:
raise ValueError
act = db.run("select * from test where id <= 9 order by id")
assert_frame_equal_reset_index(act, exp)
act = db.run("select * from test where id <= :id order by id", args={"id": 9})
assert_frame_equal_reset_index(act, exp)
act = db.run("select * from test where id <= :id order by id", mods={"id": 9})
assert_frame_equal_reset_index(act, exp)
act = db.run("select * from test where id <= :id order by id", id=9)
assert_frame_equal_reset_index(act, exp)
def test_db_opt_create(test_tbl, test_tbl2):
db, df = test_tbl
if isinstance(db, Pg):
db.create(
"test2",
dtypes={
"id": "bigint primary key",
"score": "float8",
"amt": "bigint",
"cat": "varchar(20)",
},
dte="date",
time="timestamp",
)
elif isinstance(db, Lt):
db.create(
"test2",
dtypes={
"id": "integer primary key",
"score": "real",
"amt": "integer",
"cat": "text",
},
dte="text",
time="text",
)
elif isinstance(db, Oc):
db.create(
"test2",
dtypes={
"id": "number primary key",
"score": "float",
"amt": "number",
"cat": "varchar2(20)",
},
dte="date",
time="timestamp",
)
else:
raise ValueError
db.run("insert into test2 select * from test")
act = db.run("select * from test2 order by id")
exp = db.run("select * from test order by id")
assert_frame_equal_reset_index(act, exp)
def test_db_opt_add_pkey(test_tbl, test_tbl2):
db, df = test_tbl
if isinstance(db, Pg):
db.run("create table test2 as select * from test")
db.add_pkey("test2", "id")
elif isinstance(db, Lt):
pass
elif isinstance(db, Oc):
pass
else:
raise ValueError
def test_db_opt_create_schema(test_tbl, test_tbl2):
db, df = test_tbl
if isinstance(db, Pg):
try:
db.run("drop schema test cascade")
except Exception as ex:
if "does not exist" in str(ex):
pass
else:
raise (ex)
db.create_schema("test")
db.run("create table test.test (col int)")
db.run("drop schema test cascade")
elif isinstance(db, Lt):
pass
elif isinstance(db, Oc):
pass
else:
raise ValueError
def test_db_opt_drop(test_tbl, test_tbl2):
db, df = test_tbl
db.run("create table test2 as select * from test")
db.drop("test2")
with pytest.raises(Exception) as e_info:
db.run("select count(1) from test2")
def test_db_opt_write_nodup(test_tbl, test_tbl2):
db, df = test_tbl
db.run("create table test2 as select * from test where 1=2")
if isinstance(db, Pg):
db.write(df, "test2")
db.write_nodup(df, "test2", ["id"])
tbl = db.run("select * from test2 order by id")
assert_frame_equal_reset_index(tbl, df)
elif isinstance(db, Lt):
db.write(
df.assign(time=lambda x: x.time.astype(str).where(~x.time.isna(), None)),
"test2",
)
db.write_nodup(
df.assign(time=lambda x: x.time.astype(str).where(~x.time.isna(), None)),
"test2",
["id"],
)
tbl = (
db.qry("test2")
.run()
.assign(
dte=lambda x: x["dte"].apply(
lambda x: datetime.date.fromisoformat(x) if x else None
),
time=lambda x: pd.to_datetime(x.time),
)
)
assert_frame_equal_reset_index(tbl, df)
elif isinstance(db, Oc):
db.write(
df,
"test2",
)
db.write_nodup(
df,
"test2",
["id"],
)
tbl = db.run("select * from test2 order by id").assign(
dte=lambda x: x["dte"].apply(lambda x: x.date() if x else None)
)
df2 = df.assign(
time=lambda x: x["time"].apply(lambda x: x.replace(microsecond=0))
)
assert_frame_equal_reset_index(tbl, df2)
else:
raise ValueError
def test_db_opt_cwrite(test_tbl, test_tbl2):
db, df = test_tbl
if isinstance(db, Pg):
db.cwrite(df, "test2")
tbl = db.run("select * from test2 order by id").assign(
dte=lambda x: x["dte"].apply(
lambda x: datetime.date.fromisoformat(x) if x else None
)
)
elif isinstance(db, Lt):
db.cwrite(
df.assign(time=lambda x: x.time.astype(str).where(~x.time.isna(), None)),
"test2",
)
tbl = (
db.qry("test2")
.run()
.assign(
dte=lambda x: x["dte"].apply(
lambda x: datetime.date.fromisoformat(x) if x else None
),
time=lambda x: pd.to_datetime(x.time),
)
)
elif isinstance(db, Oc):
db.cwrite(
df,
"test2",
)
tbl = db.run("select * from test2 order by id").assign(
dte=lambda x: x["dte"].apply(
lambda x: datetime.datetime.strptime(x, "%d-%b-%y").date()
if x
else None
)
)
df = df.assign(
time=lambda x: x["time"].apply(lambda x: x.replace(microsecond=0))
)
else:
raise ValueError
assert_frame_equal_reset_index(tbl, df)
```
#### File: dwops/tests/test_set_up.py
```python
import dwopt
from sqlalchemy.engine import Engine
import os
_DB_LST = ["pg", "lt", "oc"]
def test_set_up_save_url_config(creds):
for nme, url in zip(_DB_LST, creds):
dwopt.save_url(nme, url, "config")
assert dwopt.set_up._get_url(nme) == url
def test_set_up_save_url_environ(creds):
for nme, url in zip(_DB_LST, creds):
os.environ[f"dwopt_{nme}"] = url
assert dwopt.set_up._get_url(nme) == url
def test_set_up_save_url_ordering(creds):
for nme, url in zip(_DB_LST, creds):
dwopt.save_url(nme, url + "salt", "config")
os.environ[f"dwopt_{nme}"] = url
assert dwopt.set_up._get_url(nme) == url + "salt"
def test_set_up_make_eng():
act = dwopt.make_eng("sqlite://")
assert isinstance(act, Engine)
``` |
{
"source": "0xdomyz/sklearn_collection",
"score": 4
} |
#### File: sklearn_collection/higher_lvl_func/decorator.py
```python
import time
import functools
def hello(func):
"""make a func that print hello then execute a func
Examples
-------------
>>> def name():
... print("Alice")
...
>>> hello(name)()
Hello
Alice
>>> @hello
... def name2():
... print("Alice")
...
>>> name2()
Hello
Alice
"""
def inner():
print("Hello")
func()
return inner
def measure_time(func):
"""
Exampls
-----------
>>> def myFunction(n):
... time.sleep(n)
... print("done")
...
>>> measure_time(myFunction)(0.5)
done
Function took 0.5110921859741211 seconds to run
"""
def wrapper(*arg, **kwargs):
t = time.time()
res = func(*arg, **kwargs)
print("Function took " + str(time.time() - t) + " seconds to run")
return res
return wrapper
def do_twice(function):
"""
Examples
------------
>>> do_twice(lambda x: print(f"do twice {x}"))("input")
do twice input
do twice input
"""
@functools.wraps(function)
def wrapper(*args, **kwargs):
function(*args, **kwargs)
return function(*args, **kwargs)
return wrapper
def timer(func):
"""Print the runtime of the decorated function
Examples
------------
>>> @timer
... def waste_some_time(num_times):
... for _ in range(num_times):
... sum([i**2 for i in range(10000)])
...
>>> waste_some_time(2)
Finished 'waste_some_time' in 0.0056 secs
"""
@functools.wraps(func)
def wrapper_timer(*args, **kwargs):
start_time = time.perf_counter()
value = func(*args, **kwargs)
end_time = time.perf_counter()
run_time = end_time - start_time
print(f"Finished {func.__name__!r} in {run_time:.4f} secs")
return value
return wrapper_timer
def debug(func):
"""Print the function signature and return value when called
Examples
---------
>>> import math
>>> factorial = debug(math.factorial)
>>> def approximate_e(terms=18):
... return sum(1 / factorial(n) for n in range(terms))
...
>>> approximate_e(4)
Calling factorial(0)
-> 1
Calling factorial(1)
-> 1
Calling factorial(2)
-> 2
Calling factorial(3)
-> 6
2.6666666666666665
"""
@functools.wraps(func)
def new_func(*args, **kwargs):
args_repr = [repr(a) for a in args]
kwargs_repr = [f"{k}={v!r}" for k, v in kwargs.items()]
signature = ", ".join(args_repr + kwargs_repr)
print(f"Calling {func.__name__}({signature})")
value = func(*args, **kwargs)
print(f"-> {value!r}")
return value
return new_func
if __name__ == "__main__":
pass
```
#### File: sklearn_collection/higher_lvl_func/overload.py
```python
from functools import singledispatch
@singledispatch
def func(input):
print("generic")
@func.register
def _(input: list):
print("list")
@func.register
def _(input: str):
print("str")
if __name__ == "__main__":
func("ab")
func(['a', 'b'])
pass
```
#### File: plot/seaborn_tute/overview.py
```python
import seaborn as sns
from matplotlib import pyplot as plt
import pandas as pd
def displot_cols(data, cols: list[str]=None):
"""
Examples
--------------
tips = sns.load_dataset("tips")
displot_cols(tips)
displot_cols(tips, ["time", "size"])
"""
if cols is None:
cols = data.columns
for col in cols:
sns.displot(data=data,x=col)
def displot_cats(data, cols: list[str]=None):
"""
Examples
--------------
tips = sns.load_dataset("tips")
displot_cats(tips)
displot_cats(tips, ["time", "smoker"])
"""
import itertools
if cols is None:
cols = data.select_dtypes(include=["object", "category"]).columns
for i, j in itertools.combinations(cols,2):
sns.displot(data, x=i, y=j)
if __name__ == "__main__":
# Apply the default theme
sns.set_theme()
tips = sns.load_dataset("tips")
tips.head()
tips.describe(include="all")
tips.select_dtypes(include="number").columns
tips.select_dtypes(exclude="number").columns
sns.pairplot(tips)
displot_cols(tips)
displot_cats(tips)
#series
sns.relplot(data=tips.loc[:,["total_bill"]])
#scatter
sns.relplot(data=tips,x="total_bill", y="tip")
sns.relplot(data=tips,x="total_bill", y="tip",hue="sex")
sns.relplot(data=tips,x="total_bill", y="tip", style="smoker")
sns.relplot(data=tips,x="total_bill", y="tip", col="time")
sns.relplot(data=tips,x="total_bill", y="tip", row="day")
sns.relplot(data=tips,x="total_bill", y="tip",
hue="sex", style="smoker",
col="time", row="day"
)
#scatter with size
sns.relplot(data=tips,x="total_bill", y="tip", size="size")
sns.relplot(data=tips,x="total_bill", y="tip", size="size",
hue="sex", style="smoker",
col="time", row="day"
)
#line
dots = sns.load_dataset("dots")
dots.head()
dots.describe(include="all")
sns.relplot(data=dots, x="time", y="firing_rate")
sns.relplot(data=dots, x="time", y="firing_rate", kind="line")
sns.relplot(data=dots, x="time", y="firing_rate", kind="line",
col="align", hue="choice", style="choice")
sns.relplot(data=dots, x="time", y="firing_rate", kind="line",
col="align", size="choice")
sns.relplot(
data=dots, kind="line",
x="time", y="firing_rate", col="align",
hue="choice", style="choice",
facet_kws=dict(sharex=False),
)
sns.relplot(data=dots, x="time", y="firing_rate", kind="line",
col="align", hue="coherence", style="coherence")
sns.relplot(data=dots, x="time", y="firing_rate", kind="line",
col="align", size="coherence")
sns.relplot(
data=dots, kind="line",
x="time", y="firing_rate", col="align",
hue="choice", size="coherence", style="choice",
facet_kws=dict(sharex=False),
)
#stat error bar
fmri = sns.load_dataset("fmri")
fmri.head()
fmri.describe(include="all")
sns.relplot(
data=fmri,
x="timepoint", y="signal", col="region",
hue="subject", style="event",
)
sns.relplot(
data=fmri, kind="line",
x="timepoint", y="signal", col="region",
hue="subject", style="event",
)
sns.relplot(
data=fmri, kind="line",
x="timepoint", y="signal", col="region",
hue="event", style="event",
)
sns.lmplot(data=tips, x="total_bill", y="tip", col="time", hue="smoker")
#distribution
tips.head()
tips.describe(include="all")
sns.displot(data=tips, x="total_bill", col="time", kde=True)
sns.displot(data=tips, kind="ecdf", x="total_bill", col="time", hue="smoker", rug=True)
#catagory
sns.catplot(data=tips, kind="swarm", x="day", y="total_bill", hue="smoker")
sns.catplot(data=tips, kind="violin", x="day", y="total_bill", hue="smoker", split=True)
sns.catplot(data=tips, kind="bar", x="day", y="total_bill", hue="smoker")
sns.catplot(data=tips, kind="bar", x="day", y="total_bill", hue="smoker", col="sex")
sns.catplot(data=tips, kind="bar", x="day", y="total_bill")
#composite views
penguins = sns.load_dataset("penguins")
penguins.head()
penguins.describe(include="all")
penguins.select_dtypes(include="number")
for col in penguins.columns:
sns.displot(data=penguins,x=col)
sns.pairplot(data=penguins)
for col in penguins.select_dtypes(include="object"):
sns.pairplot(data=penguins, hue=col)
sns.jointplot(data=penguins, x="flipper_length_mm", y="bill_length_mm", hue="species")
#complex
g = sns.PairGrid(penguins, hue="species", corner=True)
g.map_lower(sns.kdeplot, hue=None, levels=5, color=".2")
g.map_lower(sns.scatterplot, marker="+")
g.map_diag(sns.histplot, element="step", linewidth=0, kde=True)
g.add_legend(frameon=True)
g.legend.set_bbox_to_anchor((.61, .6))
#default vs custom
sns.relplot(
data=penguins,
x="bill_length_mm", y="bill_depth_mm", hue="body_mass_g"
)
sns.set_theme(style="ticks", font_scale=1.25)
g = sns.relplot(
data=penguins,
x="bill_length_mm", y="bill_depth_mm", hue="body_mass_g",
palette="crest", marker="x", s=100,
)
g.set_axis_labels("Bill length (mm)", "Bill depth (mm)", labelpad=10)
g.legend.set_title("Body mass (g)")
g.figure.set_size_inches(6.5, 4.5)
g.ax.margins(.15)
g.despine(trim=True)
```
#### File: sklearn_collection/shell/deploy.py
```python
import subprocess
from contextlib import contextmanager
import os
from pathlib import Path
@contextmanager
def cd(newdir):
prevdir = os.getcwd()
os.chdir(os.path.expanduser(newdir))
try:
yield
finally:
os.chdir(prevdir)
def run_cmd_on_path(cmd: str, path: Path) -> subprocess.CompletedProcess[str]:
with cd(path):
return subprocess.run(cmd, shell=True, check=True)
def make_dirs_if_not_exist(path: Path, number_of_parents: int = 2):
"""
Examples
-----------
>>> make_dirs_if_not_exist(Path('.') / 'test' / 'test' / 'test')
>>> make_dirs_if_not_exist(Path('.') / 'test' / 'test' / 'test' / 'test')
FileNotFoundError: [WinError 3] ...
"""
for i in reversed(range(number_of_parents)):
if not path.parents[i].exists():
path.parents[i].mkdir()
if not path.exists():
path.mkdir()
if __name__ == "__main__":
root_path = (Path(__file__).parents[2] / "python_shell_example").resolve()
sub_paths = ["f1/sf1", "f2/sf2"]
paths = [root_path / sub_path for sub_path in sub_paths]
repo_name = "python_collection"
repo_address = f"[email protected]:0xdomyz/{repo_name}.git"
for path in paths:
print(f"on path: {path.as_posix()}")
make_dirs_if_not_exist(path)
if not (path / repo_name).exists():
run_cmd_on_path(f"git clone {repo_address}", path)
run_cmd_on_path("git pull origin master", path / repo_name)
run_cmd_on_path("dir", path)
``` |
{
"source": "0xE232FE/cruiser",
"score": 2
} |
#### File: cruiser/bot/configparser.py
```python
import logging
import requests
import yaml
from bot.listeners import TelegramListener, AlertListener
from bot.protocol import SendExpedition
from ogame.game.const import Ship, CoordsType, Resource
from ogame.game.model import Coordinates
from ogame.util import find_unique
def parse_bot_config(config):
""" @return Parameters to initialize OGameBot. """
bot_config = config.get('bot', {})
sleep_min = bot_config.get('sleep_min')
sleep_max = bot_config.get('sleep_max')
min_time_before_attack_to_act = bot_config.get('min_time_before_attack_to_act')
max_time_before_attack_to_act = bot_config.get('max_time_before_attack_to_act')
try_recalling_saved_fleet = bot_config.get('try_recalling_saved_fleet')
max_return_flight_time = bot_config.get('max_return_flight_time')
harvest_expedition_debris = bot_config.get('harvest_expedition_debris')
harvest_speed = bot_config.get('harvest_speed')
return _remove_empty_values({
'sleep_min': sleep_min,
'sleep_max': sleep_max,
'min_time_before_attack_to_act': min_time_before_attack_to_act,
'max_time_before_attack_to_act': max_time_before_attack_to_act,
'try_recalling_saved_fleet': try_recalling_saved_fleet,
'max_return_flight_time': max_return_flight_time,
'harvest_expedition_debris': harvest_expedition_debris,
'harvest_speed': harvest_speed
})
def parse_client_config(config):
""" @return Parameters to initialize OGame client. """
# Parse account information.
account_config = _require('account', config)
username = _require('username', account_config)
password = _require('password', account_config)
universe = _require('universe', account_config)
language = _require('language', account_config)
country = _require('country', account_config)
if isinstance(universe, int): # universe is server number
server_number = universe
else: # universe is server name so we have to find the corresponding number
servers = get_servers(timeout=10)
def get_server_data(data): return data['name'].casefold(), data['language'].casefold()
server = find_unique(
item=(universe.casefold(), language.casefold()),
iterable=servers,
key=get_server_data)
if not server:
raise ValueError(f'Failed to match {universe} ({language}) to any server.')
server_number = server['number']
logging.debug(f'Matched {universe} ({language}) to server {server_number}.')
variations = {"us": "en"}
if language in variations:
locale = f'{variations[language]}_{country}'
else:
locale = f'{language}_{country}'
# Parse client parameters
bot_config = config.get('bot', {})
request_timeout = bot_config.get('request_timeout')
delay_between_requests = bot_config.get('delay_between_requests')
return _remove_empty_values({
'username': username,
'password': password,
'language': language,
'server_number': server_number,
'locale': locale,
'request_timeout': request_timeout,
'delay_between_requests': delay_between_requests
})
def parse_listener_config(config):
""" @return List of listeners. """
bot_config = config.get('bot', {})
listeners_config = config.get('listeners', {})
active_listeners = bot_config.get('listeners', [])
listeners = [_initialize_listener(name, listeners_config.get(name))
for name in active_listeners]
return listeners
def parse_expedition_config(config):
""" @return List of expeditions. """
bot_config = config.get('bot', {})
expeditions_config = config.get('expeditions', {})
active_expeditions = bot_config.get('expeditions', [])
expeditions = [_initialize_expedition(id, expeditions_config.get(id))
for id in active_expeditions]
return expeditions
def get_servers(**kwargs):
""" @return List of all available servers. We use it for matching server name with its number. """
return requests.get('https://lobby.ogame.gameforge.com/api/servers', **kwargs).json()
def load_config(file):
""" Load configuration from yaml file. """
with open(file, 'r') as stream:
return yaml.safe_load(stream)
def _initialize_listener(name, config):
if name == 'telegram':
return TelegramListener(**config)
elif name == 'alert':
return AlertListener(**config)
else:
raise ValueError(f'Unknown listener: {name}')
def _initialize_expedition(id, config):
origin_galaxy, origin_system, origin_position = _require('origin', config)
origin_type_name = config.get('origin_type', 'planet')
origin_type = CoordsType.from_name(origin_type_name)
if not origin_type:
raise ValueError(f'Unknown origin type: {origin_type_name}')
dest_galaxy, dest_system, dest_position = config.get('dest', [origin_galaxy, origin_system, 16])
ships = {}
for ship_name, amount in _require('ships', config).items():
ship = Ship.from_name(ship_name)
if not ship:
raise ValueError(f'Unknown ship: {ship_name}')
ships[ship] = amount
cargo = {}
for resource_name, amount in config.get('cargo', {}).items():
resource = Resource.from_name(resource_name)
if not resource:
raise ValueError(f'Unknown resource: {resource_name}')
cargo[resource] = amount
speed = config.get('speed', 10)
holding_time = config.get('holding_time', 1)
repeat = config.get('repeat', 'forever')
origin = Coordinates(
galaxy=origin_galaxy,
system=origin_system,
position=origin_position,
type=origin_type)
dest = Coordinates(
galaxy=dest_galaxy,
system=dest_system,
position=dest_position,
type=CoordsType.planet)
expedition = SendExpedition(
id=id,
origin=origin,
dest=dest,
ships=ships,
speed=speed,
holding_time=holding_time,
repeat=repeat,
cargo=cargo)
return expedition
def _require(key, cfg, error_msg=None):
""" Ensures that `key` is in the config `cfg`. """
error_msg = error_msg or f'Missing field `{key}` in the config file.'
val = cfg.get(key)
if not val:
raise ValueError(error_msg)
return val
def _remove_empty_values(dictionary):
""" Remove None values from a dictionary. """
return {k: v for k, v in dictionary.items() if v is not None}
```
#### File: ogame/game/client.py
```python
import functools
import logging
import time
from typing import List, Union, Dict
from urllib.parse import urlparse
import requests
import yaml
from ogame.api.client import OGameAPI
from ogame.game.const import (
Mission,
CoordsType,
Resource,
Ship,
Technology,
CharacterClass
)
from ogame.game.model import (
Coordinates,
FleetEvent,
Planet,
FleetMovement,
Production,
Shipyard,
Research,
Resources,
Movement,
FleetDispatch,
Overview,
Galaxy,
GalaxyPosition
)
from ogame.util import (
join_digits,
parse_html,
extract_numbers,
str2bool,
tuple2timestamp,
find_first_between,
)
class NotLoggedInError(Exception):
pass
class ParseException(Exception):
pass
def keep_session(*, maxtries=1):
def decorator_keep_session(func):
@functools.wraps(func)
def wrapper_keep_session(self, *args, **kwargs):
tries = 0
while True:
try:
return func(self, *args, **kwargs)
except NotLoggedInError:
if tries < maxtries:
self.login()
tries += 1
else:
raise
return wrapper_keep_session
return decorator_keep_session
class OGame:
def __init__(self,
username: str,
password: str,
language: str,
server_number: int,
locale: str,
request_timeout: int = 10,
delay_between_requests: int = 0):
self.username = username
self.password = password
self.language = language.casefold()
self.server_number = server_number
self.locale = locale
self.request_timeout = request_timeout
self.delay_between_requests = delay_between_requests
self._session = requests.session()
self._session.headers.update({
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/73.0.3683.103 '
'Safari/537.36'
})
self._account = None
self._server_url = None
self._tech_dictionary = None
self._server_data = None
self._last_request_time = 0
@property
def api(self):
return OGameAPI(
server_number=self.server_number,
server_language=self.language)
@property
def server_data(self):
return self._server_data
def login(self):
# Get game configuration.
configuration = self._get_game_configuration()
game_env_id = configuration['connect']['gameEnvironmentId']
platform_game_id = configuration['connect']['platformGameId']
# Get token.
game_sess = self._get_game_session(game_env_id, platform_game_id)
token = game_sess['token']
# Set token cookie.
requests.utils.add_dict_to_cookiejar(self._session.cookies, {'gf-token-production': token})
# Find server.
accounts = self._get_accounts(token)
self._account = self._find_account(accounts)
if not self._account:
raise ValueError('Invalid server.')
# Login to the server.
login_url = self._get_login_url(token)
login_url = login_url['url']
if not self._login(login_url, token):
raise ValueError('Failed to log in.')
login_url_parsed = urlparse(login_url)
self._server_url = login_url_parsed.netloc
# Initialize tech dictionary from the API. It is used for
# translating ship names while parsing the movement page.
# Note that we assume that the dictionary won't change.
if self._tech_dictionary is None:
self._tech_dictionary = self.api.get_localization()['technologies']
# Cache server data.
if self._server_data is None:
self._server_data = self.api.get_server_data()['server_data']
def get_research(self,
delay: int = None) -> Research:
research_soup = self._get_research(delay=delay)
technology_elements = _find_at_least_one(research_soup, class_='technology')
technologies = {}
production = None
for technology_el in technology_elements:
level_el = _find_exactly_one(technology_el, class_='level')
technology_id = int(technology_el['data-technology'])
technology = Technology.from_id(technology_id)
if not technology:
logging.warning(f'Missing technology (id={technology_id})')
continue
level = int(level_el['data-value'])
bonus = join_digits(level_el['data-bonus'])
status = technology_el['data-status']
if status == 'active':
if production is not None:
logging.warning('Multiple productions encountered.')
else:
prod_start = int(technology_el['data-start'])
prod_end = int(technology_el['data-end'])
production = Production(
o=technology,
start=prod_start,
end=prod_end)
technologies[technology] = level + bonus
return Research(
technology=technologies,
production=production)
def get_shipyard(self,
planet: Union[Planet, int],
delay: int = None) -> Shipyard:
shipyard_soup = self._get_shipyard(planet, delay=delay)
ship_elements = _find_at_least_one(shipyard_soup, class_='technology')
ships = {}
production = None
for ship_el in ship_elements:
amount_el = _find_exactly_one(ship_el, class_='amount')
ship_id = int(ship_el['data-technology'])
ship = Ship.from_id(ship_id)
if not ship:
logging.warning(f'Missing ship (id={ship_id})')
continue
amount = int(amount_el['data-value'])
status = ship_el['data-status']
if status == 'active':
if production is not None:
logging.warning('Multiple productions encountered.')
else:
target_amount_el = _find_exactly_one(ship_el, class_='targetamount')
target_amount = int(target_amount_el['data-value'])
prod_start = int(ship_el['data-start'])
prod_end = int(ship_el['data-end'])
production = Production(
o=ship,
start=prod_start,
end=prod_end,
amount=target_amount - amount)
ships[ship] = amount
return Shipyard(
ships=ships,
production=production)
def get_resources(self,
planet: Union[Planet, int]) -> Resources:
def amount(res): return int(resources[res]['amount'])
def storage(res): return int(resources[res]['storage'])
resources = self._get_resources(
planet=planet,
delay=0)['resources']
amounts = {Resource.metal: amount('metal'),
Resource.crystal: amount('crystal'),
Resource.deuterium: amount('deuterium'),
Resource.energy: amount('energy'),
Resource.dark_matter: amount('darkmatter')}
storage = {Resource.metal: storage('metal'),
Resource.crystal: storage('crystal'),
Resource.deuterium: storage('deuterium')}
return Resources(
amount=amounts,
storage=storage)
def get_overview(self,
delay: int = None) -> Overview:
overview_soup = self._get_overview(delay=delay)
planet_list = overview_soup.find(id='planetList')
smallplanets = planet_list.findAll(class_='smallplanet')
character_class_el = overview_soup.find(id='characterclass').find('div')
character_class = None
if 'miner' in character_class_el['class']:
character_class = CharacterClass.collector
elif 'warrior' in character_class_el['class']:
character_class = CharacterClass.general
elif 'explorer' in character_class_el['class']:
character_class = CharacterClass.discoverer
planets = []
for planet_div in smallplanets:
planet_id = abs(join_digits(planet_div['id']))
planet_name = planet_div.find(class_='planet-name').text.strip()
galaxy, system, position = extract_numbers(planet_div.find(class_='planet-koords').text)
planet_coords = Coordinates(galaxy, system, position, CoordsType.planet)
planet = Planet(
id=planet_id,
name=planet_name,
coords=planet_coords)
planets.append(planet)
moon_el = planet_div.find(class_='moonlink')
if moon_el:
moon_url = moon_el['href']
moon_url_params = urlparse(moon_url).query.split('&')
moon_id = join_digits(next(param for param in moon_url_params if 'cp' in param))
moon_name = moon_el.img['alt']
moon_coords = Coordinates(galaxy, system, position, CoordsType.moon)
moon = Planet(
id=moon_id,
name=moon_name,
coords=moon_coords)
planets.append(moon)
return Overview(
planets=planets,
character_class=character_class)
def get_events(self) -> List[FleetEvent]:
event_list = self._get_event_list(delay=0)
event_elements = event_list.findAll(class_='eventFleet')
events = []
for event_el in event_elements:
if 'partnerInfo' in event_el['class']:
# part of an ACS attack
event_id = next(abs(join_digits(class_)) for class_ in event_el['class'] if 'union' in class_)
else:
event_id = abs(join_digits(event_el['id']))
arrival_time = int(event_el['data-arrival-time'])
return_flight = str2bool(event_el['data-return-flight'])
mission = Mission(int(event_el['data-mission-type']))
origin_galaxy, origin_system, origin_position = extract_numbers(event_el.find(class_='coordsOrigin').text)
origin_type_el = event_el.find(class_='originFleet').find('figure')
origin_type = self._parse_coords_type(origin_type_el)
origin = Coordinates(origin_galaxy, origin_system, origin_position, origin_type)
dest_galaxy, dest_system, dest_position = extract_numbers(event_el.find(class_='destCoords').text)
dest_type_el = event_el.find(class_='destFleet').find('figure')
dest_type = self._parse_coords_type(dest_type_el)
dest = Coordinates(dest_galaxy, dest_system, dest_position, dest_type)
player_id_el = event_el.find('a', class_='sendMail')
player_id = int(player_id_el['data-playerid']) if player_id_el else None
if return_flight:
fleet_movement_el = event_el.find(class_='icon_movement_reserve')
else:
fleet_movement_el = event_el.find(class_='icon_movement')
fleet_movement_tooltip_el = fleet_movement_el.find(class_='tooltip')
if fleet_movement_tooltip_el:
fleet_movement_soup = parse_html(fleet_movement_tooltip_el['title'])
fleet_info_el = fleet_movement_soup.find(class_='fleetinfo')
# Note that cargo parsing is currently not supported.
ships = self._parse_fleet_info(fleet_info_el, has_cargo=False)
else:
ships = None
event = FleetEvent(
id=event_id,
origin=origin,
dest=dest,
arrival_time=arrival_time,
mission=mission,
return_flight=return_flight,
ships=ships,
player_id=player_id)
events.append(event)
return events
def get_fleet_movement(self,
return_fleet: Union[FleetMovement, int] = None,
delay: int = None) -> Movement:
movement_soup = self._get_movement(return_fleet, delay=delay)
movement_el = movement_soup.find(id='movement')
timestamp = int(movement_soup.find('meta', {'name': 'ogame-timestamp'})['content'])
if not movement_el:
# when there is no movement the server redirects to fleet dispatch
slot_elements = movement_soup.find(id='slots').findAll('div', recursive=False)
used_fleet_slots, max_fleet_slots = extract_numbers(slot_elements[0].text)
used_expedition_slots, max_expedition_slots = extract_numbers(slot_elements[1].text)
return Movement(
fleets=[],
used_fleet_slots=used_fleet_slots,
max_fleet_slots=max_fleet_slots,
used_expedition_slots=used_expedition_slots,
max_expedition_slots=max_expedition_slots,
timestamp=timestamp)
else:
fleet_slots_el = movement_el.find(class_='fleetSlots')
expedition_slots_el = movement_el.find(class_='expSlots')
fleet_details_elements = movement_el.findAll(class_='fleetDetails')
used_fleet_slots, max_fleet_slots = extract_numbers(fleet_slots_el.text)
used_expedition_slots, max_expedition_slots = extract_numbers(expedition_slots_el.text)
fleets = []
for fleet_details_el in fleet_details_elements:
fleet_id = abs(join_digits(fleet_details_el['id']))
arrival_time = int(fleet_details_el['data-arrival-time'])
return_flight = str2bool(fleet_details_el['data-return-flight']) or False
mission = Mission(int(fleet_details_el['data-mission-type']))
origin_time = tuple2timestamp(extract_numbers(fleet_details_el.find(class_='origin').img['title']),
tz_offset=self.server_data.timezone_offset)
dest_time = tuple2timestamp(extract_numbers(fleet_details_el.find(class_='destination').img['title']),
tz_offset=self.server_data.timezone_offset)
if return_flight:
flight_duration = origin_time - dest_time
departure_time = dest_time - flight_duration
else:
departure_time = origin_time
end_time = int(fleet_details_el.find('span', class_='openDetails').a['data-end-time'])
reversal_el = fleet_details_el.find('span', class_='reversal')
if mission == Mission.expedition and not return_flight:
if not reversal_el:
# fleet is currently on expedition
holding = True
holding_time = end_time - departure_time
else:
# fleet is flying to expedition
holding = False
flight_duration = end_time - departure_time
holding_time = arrival_time - departure_time - 2 * flight_duration
else:
holding = False
holding_time = 0
origin_galaxy, origin_system, origin_position = extract_numbers(
fleet_details_el.find(class_='originCoords').text)
origin_type_el = fleet_details_el.find(class_='originPlanet').find('figure')
origin_type = self._parse_coords_type(origin_type_el)
origin = Coordinates(origin_galaxy, origin_system, origin_position, origin_type)
dest_galaxy, dest_system, dest_position = extract_numbers(
fleet_details_el.find(class_='destinationCoords').text)
dest_type_el = fleet_details_el.find(class_='destinationPlanet').find('figure')
if dest_type_el:
dest_type = self._parse_coords_type(dest_type_el)
else:
# destination type is a planet by default
dest_type = CoordsType.planet
dest = Coordinates(dest_galaxy, dest_system, dest_position, dest_type)
fleet_info_el = _find_exactly_one(fleet_details_el, class_='fleetinfo')
ships, cargo = self._parse_fleet_info(fleet_info_el)
fleet = FleetMovement(
id=fleet_id,
origin=origin,
dest=dest,
departure_time=departure_time,
arrival_time=arrival_time,
mission=mission,
return_flight=return_flight,
ships=ships,
cargo=cargo,
holding=holding,
holding_time=holding_time)
fleets.append(fleet)
return Movement(
fleets=fleets,
used_fleet_slots=used_fleet_slots,
max_fleet_slots=max_fleet_slots,
used_expedition_slots=used_expedition_slots,
max_expedition_slots=max_expedition_slots,
timestamp=timestamp)
def get_galaxy(self,
galaxy: int,
system: int,
planet: Union[Planet, int] = None,
delay: int = None,
content_only: bool = False) -> Galaxy:
def parse_activity(activity_el):
if activity_el:
if 'minute15' in activity_el['class']:
return '*'
elif 'showMinutes' in activity_el['class']:
activity = join_digits(activity_el.text)
return activity
else:
raise ValueError('Failed to parse activity')
if not content_only:
self._get_galaxy(
planet=planet,
galaxy=galaxy,
system=system,
delay=delay)
galaxy_content = self._get_galaxy_content(
galaxy=galaxy,
system=system,
delay=delay if content_only else 0)
galaxy_soup = parse_html(galaxy_content['galaxy'])
galaxy_rows = galaxy_soup.find_all(class_='row')
positions = []
for position, galaxy_row in enumerate(galaxy_rows, start=1):
planet_el = galaxy_row.find(attrs={'data-planet-id': True})
if not planet_el:
continue # empty position
planet_id = int(planet_el['data-planet-id'])
planet_activity_el = planet_el.find(class_='activity')
planet_activity = parse_activity(planet_activity_el)
planet_el = _find_exactly_one(galaxy_soup, id=f'planet{position}')
planet_name = planet_el.h1.span.text.strip()
planet = Planet(
id=planet_id,
name=planet_name,
coords=Coordinates(galaxy, system, position, CoordsType.planet))
player_el = _find_exactly_one(galaxy_row, class_='playername')
player_link = player_el.find('a')
planet_destroyed = False
if player_link:
player_id = join_digits(player_link['rel'][0])
if player_id == 99999:
planet_destroyed = True
else:
# it is on of our planets
player_id = None
moon_el = galaxy_row.find(attrs={'data-moon-id': True})
if moon_el:
moon_id = moon_el['data-moon-id']
moon_activity_el = moon_el.find(class_='activity')
moon_activity = parse_activity(moon_activity_el)
moon_destroyed = 'moon_c' in moon_el.a.div['class']
moon_el = _find_exactly_one(galaxy_soup, id=f'moon{position}')
moon_name = moon_el.h1.span.text.strip()
moon = Planet(
id=moon_id,
name=moon_name,
coords=Coordinates(galaxy, system, position, CoordsType.moon))
else:
moon = None
moon_activity = None
moon_destroyed = False
debris_el = galaxy_row.find(class_='debrisField')
if debris_el:
debris_el = _find_exactly_one(galaxy_soup, id=f'debris{position}')
metal_el, crystal_el = _find_exactly(debris_el, n=2, class_='debris-content')
metal_amount = join_digits(metal_el.text)
crystal_amount = join_digits(crystal_el.text)
debris = {Resource.metal: metal_amount,
Resource.crystal: crystal_amount}
else:
debris = None
galaxy_position = GalaxyPosition(
planet=planet,
planet_activity=planet_activity,
moon=moon,
moon_activity=moon_activity,
debris=debris,
player_id=player_id,
planet_destroyed=planet_destroyed,
moon_destroyed=moon_destroyed)
positions.append(galaxy_position)
expedition_debris_el = galaxy_soup.find(id='debris16')
expedition_debris = {}
if expedition_debris_el:
metal_el, crystal_el = _find_exactly(expedition_debris_el, n=2, class_='debris-content')
metal_amount = join_digits(metal_el.text)
crystal_amount = join_digits(crystal_el.text)
expedition_debris = {Resource.metal: metal_amount,
Resource.crystal: crystal_amount}
return Galaxy(
positions=positions,
expedition_debris=expedition_debris)
def get_fleet_dispatch(self,
planet: Union[Planet, int],
delay: int = None) -> FleetDispatch:
fleet_dispatch_soup = self._get_fleet_dispatch(planet, delay=delay)
token = find_first_between(str(fleet_dispatch_soup), left='fleetSendingToken = "', right='"')
timestamp = int(fleet_dispatch_soup.find('meta', {'name': 'ogame-timestamp'})['content'])
slot_elements = fleet_dispatch_soup.find(id='slots').findAll('div', recursive=False)
used_fleet_slots, max_fleet_slots = extract_numbers(slot_elements[0].text)
used_expedition_slots, max_expedition_slots = extract_numbers(slot_elements[1].text)
ship_elements = fleet_dispatch_soup.findAll(class_='technology')
ships = {}
for ship_el in ship_elements:
amount_el = _find_exactly_one(ship_el, class_='amount')
ship_id = int(ship_el['data-technology'])
ship = Ship.from_id(ship_id)
if not ship:
logging.warning(f'Missing ship (id={ship_id})')
continue
amount = int(amount_el['data-value'])
ships[ship] = amount
return FleetDispatch(
dispatch_token=token,
ships=ships,
used_fleet_slots=used_fleet_slots,
max_fleet_slots=max_fleet_slots,
used_expedition_slots=used_expedition_slots,
max_expedition_slots=max_expedition_slots,
timestamp=timestamp)
def send_fleet(self, *,
origin: Union[Planet, int],
dest: Union[Planet, Coordinates],
mission: Mission,
ships: Dict[Ship, int],
fleet_speed: int = 10,
resources: Dict[Resource, int] = None,
holding_time: int = None,
token: str = None,
delay: int = None) -> bool:
""" @return: FleetDispatch before sending the fleet. """
if isinstance(dest, Planet):
dest = dest.coords
if not resources:
resources = {}
if mission in [Mission.expedition, Mission.defend]:
holding_time = holding_time or 1
else:
if holding_time is not None:
logging.warning('Setting `holding_time` to 0')
holding_time = 0
if token is None:
token = self.get_fleet_dispatch(origin, delay=delay).dispatch_token
response = self._post_fleet_dispatch(
{'token': token,
'galaxy': dest.galaxy,
'system': dest.system,
'position': dest.position,
'type': dest.type.id,
'metal': resources.get(Resource.metal, 0),
'crystal': resources.get(Resource.crystal, 0),
'deuterium': resources.get(Resource.deuterium, 0),
'prioMetal': 1,
'prioCrystal': 2,
'prioDeuterium': 3,
'mission': mission.id,
'speed': fleet_speed,
'retreatAfterDefenderRetreat': 0,
'union': 0,
'holdingtime': holding_time,
**{f'am{ship.id}': amount for ship, amount in ships.items() if amount > 0}},
delay=delay)
success = response['success']
return success
def _get_overview(self,
planet: Union[Planet, int] = None,
delay: int = None):
if planet is not None and isinstance(planet, Planet):
planet = planet.id
return self._get_game_page(
params={'page': 'ingame',
'component': 'overview',
'cp': planet},
delay=delay)
def _get_research(self,
delay: int = None):
return self._get_game_page(
params={'page': 'ingame',
'component': 'research'},
delay=delay)
def _get_shipyard(self,
planet: Union[Planet, int] = None,
delay: int = None):
if planet is not None and isinstance(planet, Planet):
planet = planet.id
return self._get_game_page(
params={'page': 'ingame',
'component': 'shipyard',
'cp': planet},
delay=delay)
def _get_fleet_dispatch(self,
planet: Union[Planet, int] = None,
delay: int = None):
if planet is not None and isinstance(planet, Planet):
planet = planet.id
return self._get_game_page(
params={'page': 'ingame',
'component': 'fleetdispatch',
'cp': planet},
delay=delay)
def _get_movement(self,
return_fleet: Union[FleetMovement, int] = None,
delay: int = None):
if return_fleet is not None and isinstance(return_fleet, FleetMovement):
return_fleet = return_fleet.id
return self._get_game_page(
params={'page': 'ingame',
'component': 'movement',
'return': return_fleet},
delay=delay)
def _get_galaxy(self,
planet: Union[Planet, int] = None,
galaxy: int = None,
system: int = None,
delay: int = None):
if planet is not None and isinstance(planet, Planet):
planet = planet.id
return self._get_game_page(
params={'page': 'ingame',
'component': 'galaxy',
'cp': planet,
'galaxy': galaxy,
'system': system},
delay=delay)
def _get_galaxy_content(self,
galaxy: int,
system: int,
delay: int = None):
return self._post_game_resource(
resource='json',
params={'page': 'ingame',
'component': 'galaxyContent',
'ajax': 1},
headers={'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Requested-With': 'XMLHttpRequest'},
data={'galaxy': galaxy,
'system': system},
delay=delay)
def _get_resources(self,
planet: Union[Planet, int] = None,
delay: int = None):
if planet is not None and isinstance(planet, Planet):
planet = planet.id
return self._get_game_resource(
resource='json',
params={'page': 'fetchResources',
'cp': planet,
'ajax': 1},
headers={'X-Requested-With': 'XMLHttpRequest'},
delay=delay)
def _get_event_list(self,
delay: int = None):
return self._get_game_resource(
resource='html',
params={'page': 'componentOnly',
'component': 'eventList',
'ajax': 1},
headers={'X-Requested-With': 'XMLHttpRequest'},
delay=delay)
def _post_fleet_dispatch(self,
fleet_dispatch_data,
delay: int = None):
return self._post_game_resource(
resource='json',
params={'page': 'ingame',
'component': 'fleetdispatch',
'action': 'sendFleet',
'ajax': 1,
'asJson': 1},
headers={'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Requested-With': 'XMLHttpRequest'},
data=fleet_dispatch_data,
delay=delay)
def _get_game_configuration(self):
response = self._request(
method='get',
url='https://lobby.ogame.gameforge.com/config/configuration.js',
delay=0)
configuration_raw = response.text
configuration_obj_start = configuration_raw.find('{')
configuration_obj_raw = configuration_raw[configuration_obj_start:]
configuration = yaml.safe_load(configuration_obj_raw)
return configuration
def _get_game_session(self, game_env_id, platform_game_id):
response = self._request(
method='post',
url='https://gameforge.com/api/v1/auth/thin/sessions',
delay=0,
headers={'content-type': 'application/json'},
json={'autoGameAccountCreation': False,
'gameEnvironmentId': game_env_id,
'gfLang': self.language,
'identity': self.username,
'locale': self.locale,
'password': <PASSWORD>,
'platformGameId': platform_game_id})
game_sess = response.json()
if 'error' in game_sess:
raise ValueError(game_sess['error'])
return game_sess
def _get_login_url(self, token):
response = self._request(
method='get',
url='https://lobby.ogame.gameforge.com/api/users/me/loginLink',
delay=0,
headers={'authorization': f'Bearer {token}'},
data={'id': self._account['id'],
'server[language]': self.language,
'server[number]': self.server_number,
'clickedButton': 'account_list'})
login_url = response.json()
if 'error' in login_url:
raise ValueError(login_url['error'])
return login_url
def _login(self, login_url, token):
self._request(
method='get',
url=login_url,
delay=0,
headers={'authorization': f'Bearer {token}'}
)
for cookie in self._session.cookies:
if cookie.name == 'PHPSESSID':
return True
return False
def _find_account(self, accounts):
for account in accounts:
acc_server_number = account['server']['number']
acc_server_language = account['server']['language'].casefold()
if self.server_number == acc_server_number and self.language == acc_server_language:
return account
def _get_accounts(self, token):
response = self._request(
method='get',
url='https://lobby.ogame.gameforge.com/api/users/me/accounts',
delay=0,
headers={'authorization': f'Bearer {token}'})
accounts = response.json()
if 'error' in accounts:
raise ValueError(accounts['error'])
return accounts
def _get_game_resource(self, resource, **kwargs):
return self._request_game_resource('get', resource, **kwargs)
def _post_game_resource(self, resource, **kwargs):
return self._request_game_resource('post', resource, **kwargs)
def _get_game_page(self, **kwargs):
return self._request_game_page(method='get', **kwargs)
def _post_game_page(self, **kwargs):
return self._request_game_page(method='post', **kwargs)
@keep_session()
def _request_game_page(self, method, **kwargs):
if not self._base_game_url:
raise NotLoggedInError()
response = self._request(
method=method,
url=self._base_game_url,
**kwargs)
soup = parse_html(response.content)
ogame_session = soup.find('meta', {'name': 'ogame-session'})
if not ogame_session:
raise NotLoggedInError()
return soup
@keep_session()
def _request_game_resource(self, method, resource, **kwargs):
if not self._base_game_url:
raise NotLoggedInError()
response = self._request(
method=method,
url=self._base_game_url,
**kwargs)
soup = parse_html(response.content)
# resource can be either a piece of html or json
# so a <head> tag in the html means that we landed on the login page
if soup.find('head'):
raise NotLoggedInError()
if resource == 'html':
return soup
elif resource == 'json':
return response.json()
else:
raise ValueError('unknown resource: ' + str(resource))
@property
def _base_game_url(self):
if self._server_url:
return f'https://{self._server_url}/game/index.php'
def _request(self, method, url, delay=None, **kwargs):
now = time.time()
if delay is None:
delay = self.delay_between_requests
if delay:
resume_time = self._last_request_time + delay
if now < resume_time:
time.sleep(resume_time - now)
timeout = kwargs.pop('timeout', self.request_timeout)
response = self._session.request(method, url, timeout=timeout, **kwargs)
self._last_request_time = time.time()
return response
@staticmethod
def _parse_coords_type(figure_el):
if 'planet' in figure_el['class']:
return CoordsType.planet
elif 'moon' in figure_el['class']:
return CoordsType.moon
elif 'tf' in figure_el['class']:
return CoordsType.debris
else:
raise ValueError('Failed to parse coordinate type.')
def _parse_fleet_info(self, fleet_info_el, has_cargo=True):
def is_resource_cell(cell_index): return cell_index >= len(fleet_info_rows) - 3 # last 3 rows are resources
def get_resource_from_cell(cell_index): return list(Resource)[3 - len(fleet_info_rows) + cell_index]
fleet_info_rows = fleet_info_el.find_all(lambda el: _find_exactly_one(el, raise_exc=False, class_='value'))
ships = {}
cargo = {}
for i, row in enumerate(fleet_info_rows):
name_col, value_col = _find_exactly(row, n=2, name='td')
amount = join_digits(value_col.text)
if has_cargo and is_resource_cell(i):
resource = get_resource_from_cell(i)
cargo[resource] = amount
else:
tech_name = name_col.text.strip()[:-1] # remove colon at the end
tech_id = self._tech_dictionary.get(tech_name)
if not tech_id:
if has_cargo:
raise ParseException(f'Unknown ship (name={tech_name}) found while parsing.')
else:
# We are not sure whether this was a mistake or cargo element so just skip it.
continue
ship = Ship(tech_id)
ships[ship] = amount
if has_cargo:
return ships, cargo
else:
return ships
def _find_exactly_one(root, raise_exc=True, **kwargs):
""" Find exactly one element. """
descendants = _find_exactly(root, n=1, raise_exc=raise_exc, **kwargs)
if raise_exc or descendants:
return descendants[0]
def _find_at_least_one(root, **kwargs):
""" Find at least one element. """
descendants = root.find_all(**kwargs)
if len(descendants) == 0:
raise ParseException(f'Failed to find any descendants of:\n'
f'element: {root.attrs}\n'
f'query: {kwargs}')
return descendants
def _find_exactly(root, n, raise_exc=True, **kwargs):
""" Find exactly `n` elements. By default raise ParseException if exactly `n` elements were not found. """
limit = kwargs.get('limit')
if limit and n > limit:
raise ValueError(f'An exact number of elements (n={n}) will never be matched '
f'because of the limit (limit={limit}).')
query = dict(**kwargs)
# exception will be thrown regardless of the number of elements,
# so don't match more than necessary
query.update({'limit': n + 1})
descendants = root.find_all(**query)
if len(descendants) != n:
if raise_exc:
raise ParseException(f'Failed to find exactly (n={n}) descendant(s) of:\n'
f'element: {root.attrs}\n'
f'query: {kwargs}')
else:
return descendants
``` |
{
"source": "0xE28891/PlexTraktSync",
"score": 2
} |
#### File: 0xE28891/PlexTraktSync/pytrakt_extensions.py
```python
from trakt.core import get
from trakt.tv import TVEpisode
@get
def get_liked_lists():
data = yield 'users/likes/lists'
retVal = []
for lst in data:
thisList = {}
thisList['listname'] = lst['list']['name']
thisList['username'] = lst['list']['user']['ids']['slug']
retVal.append(thisList)
yield retVal
@get
def lookup_table(show):
# returns all seasons and episodes with one single call
data = yield 'shows/{}/seasons?extended=episodes'.format(show.trakt)
retVal = {}
for season in data:
eps = {}
if 'episodes' in season.keys():
for episode in season['episodes']:
eps[episode['number']] = LazyEpisode(show, season['number'], episode['number'], episode['ids'])
retVal[season['number']] = eps
yield retVal
class LazyEpisode():
def __init__(self, show, season, number, ids):
self.show = show
self.season = season
self.number = number
self.ids = ids
self._instance = None
@property
def instance(self):
if self._instance is None:
self._instance = TVEpisode(self.show, self.season, number=self.number, **self.ids)
return self._instance
@get
def allwatched():
# returns a ShowProgress object containing all watched episodes
data = yield 'sync/watched/shows'
yield AllWatchedShows(data)
@get
def watched(show_id):
# returns a ShowProgress object containing the watched states of the passed show
data = yield 'shows/{}/progress/watched?specials=true'.format(show_id)
yield ShowProgress(**data)
@get
def collected(show_id):
# returns a ShowProgress object containing the watched states of the passed show
data = yield 'shows/{}/progress/collection?specials=true'.format(show_id)
#print(data)
yield ShowProgress(**data)
class EpisodeProgress():
def __init__(self, number=0, aired=0, plays=False, completed=False, last_watched_at=None, collected_at=None):
self.number = number
self.aired = aired
self.completed = completed
if plays > 0:
self.completed = True
self.last_watched_at = last_watched_at
self.collected_at = collected_at
#print("Episode {} completed: {}".format(number, completed))
def get_completed(self):
return self.completed
class SeasonProgress():
def __init__(self, number=0, title=None, aired=0, completed=False, episodes=None):
self.number = number
self.aired = aired
self.episodes = {}
for episode in episodes:
prog = EpisodeProgress(**episode)
self.episodes[prog.number] = prog
self.completed = completed == len(episodes)
#print("Season {} completed: {}".format(number, self.completed))
def get_completed(self, episode):
if self.completed:
return True
elif episode not in self.episodes.keys():
return False
return self.episodes[episode].get_completed()
class ShowProgress():
def __init__(self, aired=0, plays=None, completed=False, last_watched_at=None, last_updated_at=None, reset_at=None, show=None, seasons=None, hidden_seasons=None, next_episode=0, last_episode=0, last_collected_at=None):
self.aired = aired
self.last_watched_at = last_watched_at
self.last_updated_at = last_updated_at
self.last_collected_at = last_collected_at
self.reset_at = reset_at
self.hidden_seasons = hidden_seasons
self.next_episode = next_episode
self.last_episode = last_episode
self.trakt = show['ids']['trakt'] if show else None
self.slug = show['ids']['slug'] if show else None
self.seasons = {}
allCompleted = True
for season in seasons:
prog = SeasonProgress(**season)
self.seasons[prog.number] = prog
allCompleted = allCompleted and prog.completed
self.completed = allCompleted if len(seasons) > 0 else False
#print("Series completed: {}".format(self.completed))
def get_completed(self, season, episode):
if self.completed:
return True
elif season not in self.seasons.keys():
return False
return self.seasons[season].get_completed(episode)
class AllWatchedShows():
def __init__(self, shows=None):
self.shows = {}
for show in shows:
prog = ShowProgress(**show)
self.shows[prog.trakt] = prog
def get_completed(self, trakt_id, season, episode):
if trakt_id not in self.shows.keys():
return False
elif season not in self.shows[trakt_id].seasons.keys():
return False
return self.shows[trakt_id].seasons[season].get_completed(episode)
if __name__ == "__main__":
print(get_liked_lists())
``` |
{
"source": "0xean/yearn-exporter",
"score": 3
} |
#### File: yearn-exporter/scripts/historical_treasury_exporter.py
```python
import logging
from datetime import datetime, timezone
from yearn.historical_helper import export_historical, time_tracking
from yearn.utils import closest_block_after_timestamp
from yearn.treasury.treasury import Treasury
logger = logging.getLogger('yearn.historical_treasury_exporter')
def main():
start = datetime.now(tz=timezone.utc)
# end: 2020-02-12 first treasury tx
end = datetime(2020, 7, 21, tzinfo=timezone.utc)
export_historical(
start,
end,
export_chunk,
export_snapshot,
'treasury_assets'
)
def export_chunk(chunk, export_snapshot_func):
treasury = Treasury()
for snapshot in chunk:
ts = snapshot.timestamp()
export_snapshot_func(
{
'treasury': treasury,
'snapshot': snapshot,
'ts': ts,
'exporter_name': 'historical_treasury'
}
)
@time_tracking
def export_snapshot(treasury, snapshot, ts, exporter_name):
block = closest_block_after_timestamp(ts)
assert block is not None, "no block after timestamp found"
treasury.export(block, ts)
logger.info("exported treasury snapshot %s", snapshot)
```
#### File: prices/uniswap/v1.py
```python
from brownie import chain, interface
from brownie.exceptions import ContractNotFound
from cachetools.func import ttl_cache
from yearn.exceptions import UnsupportedNetwork
from yearn.networks import Network
from yearn.prices.constants import usdc
from yearn.utils import Singleton, contract
addresses = {
Network.Mainnet: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95',
}
class UniswapV1(metaclass=Singleton):
def __init__(self):
if chain.id not in addresses:
raise UnsupportedNetwork('uniswap v1 is not supported on this network')
self.factory = contract(addresses[chain.id])
@ttl_cache(ttl=600)
def get_price(self, asset, block=None):
try:
asset = contract(asset)
exchange = interface.UniswapV1Exchange(self.factory.getExchange(asset))
eth_bought = exchange.getTokenToEthInputPrice(10 ** asset.decimals(), block_identifier=block)
exchange = interface.UniswapV1Exchange(self.factory.getExchange(usdc))
usdc_bought = exchange.getEthToTokenInputPrice(eth_bought, block_identifier=block) / 1e6
fees = 0.997 ** 2
return usdc_bought / fees
except (ContractNotFound, ValueError) as e:
pass
uniswap_v1 = None
try:
uniswap_v1 = UniswapV1()
except UnsupportedNetwork:
pass
``` |
{
"source": "0xeb/src",
"score": 4
} |
#### File: examples/core/list_function_items.py
```python
import ida_bytes
import ida_kernwin
import ida_funcs
import ida_ua
class logger_t(object):
class section_t(object):
def __init__(self, logger, header):
self.logger = logger
self.logger.log(header)
def __enter__(self):
self.logger.indent += 2
return self
def __exit__(self, tp, value, traceback):
self.logger.indent -= 2
if value:
return False # Re-raise
def __init__(self):
self.indent = 0
def log(self, *args):
print(" " * self.indent + "".join(args))
def log_ea(self, ea):
F = ida_bytes.get_flags(ea)
parts = ["0x%08x" % ea, ": "]
if ida_bytes.is_code(F):
parts.append("instruction (%s)" % ida_ua.print_insn_mnem(ea))
if ida_bytes.is_data(F):
parts.append("data")
if ida_bytes.is_tail(F):
parts.append("tail")
if ida_bytes.is_unknown(F):
parts.append("unknown")
if ida_funcs.get_func(ea) != ida_funcs.get_fchunk(ea):
parts.append(" (in function chunk)")
self.log(*parts)
def main():
# Get current ea
ea = ida_kernwin.get_screen_ea()
pfn = ida_funcs.get_func(ea)
if pfn is None:
print("No function defined at 0x%x" % ea)
return
func_name = ida_funcs.get_func_name(pfn.start_ea)
logger = logger_t()
logger.log("Function %s at 0x%x" % (func_name, ea))
with logger_t.section_t(logger, "Code items:"):
for item in pfn:
logger.log_ea(item)
with logger_t.section_t(logger, "'head' items:"):
for item in pfn.head_items():
logger.log_ea(item)
with logger_t.section_t(logger, "Addresses:"):
for item in pfn.addresses():
logger.log_ea(item)
with logger_t.section_t(logger, "Function chunks:"):
for chunk in ida_funcs.func_tail_iterator_t(pfn):
logger.log("%s chunk: 0x%08x..0x%08x" % (
"Main" if chunk.start_ea == pfn.start_ea else "Tail",
chunk.start_ea,
chunk.end_ea))
if __name__ == '__main__':
main()
```
#### File: examples/hexrays/curpos_details.py
```python
import ida_hexrays
import ida_kernwin
class curpos_details_t(ida_hexrays.Hexrays_Hooks):
def curpos(self, v):
parts = ["cpos={lnnum=%d, x=%d, y=%d}" % (v.cpos.lnnum, v.cpos.x, v.cpos.y)]
uie = ida_kernwin.input_event_t()
if ida_kernwin.get_user_input_event(uie):
kind_str = {
ida_kernwin.iek_shortcut : "shortcut",
ida_kernwin.iek_key_press : "key_press",
ida_kernwin.iek_key_release : "key_release",
ida_kernwin.iek_mouse_button_press : "mouse_button_press",
ida_kernwin.iek_mouse_button_release : "mouse_button_release",
ida_kernwin.iek_mouse_wheel : "mouse_wheel",
}[uie.kind]
#
# Retrieve input kind-specific information
#
if uie.kind == ida_kernwin.iek_shortcut:
payload_str = "shortcut={action_name=%s}" % uie.shortcut.action_name
elif uie.kind in [
ida_kernwin.iek_key_press,
ida_kernwin.iek_key_release]:
payload_str = "keyboard={key=%d, text=%s}" % (uie.keyboard.key, uie.keyboard.text)
else:
payload_str = "mouse={x=%d, y=%d, button=%d}" % (
uie.mouse.x,
uie.mouse.y,
uie.mouse.button)
#
# And while at it, retrieve a few extra bits from the
# source QEvent as well, why not
#
qevent = uie.get_source_QEvent()
qevent_str = str(qevent)
from PyQt5 import QtCore
if qevent.type() in [
QtCore.QEvent.KeyPress,
QtCore.QEvent.KeyRelease]:
qevent_str="{count=%d}" % qevent.count()
elif qevent.type() in [
QtCore.QEvent.MouseButtonPress,
QtCore.QEvent.MouseButtonRelease]:
qevent_str="{globalX=%d, globalY=%d, flags=%s}" % (
qevent.globalX(),
qevent.globalY(),
qevent.flags())
elif qevent.type() == QtCore.QEvent.Wheel:
qevent_str="{angleDelta={x=%s, y=%s}, phase=%s}" % (
qevent.angleDelta().x(),
qevent.angleDelta().y(),
qevent.phase())
#
# If the target QWidget is a scroll area's viewport,
# pick up the parent
#
from PyQt5 import QtWidgets
qwidget = uie.get_target_QWidget()
if qwidget:
parent = qwidget.parentWidget()
if parent and isinstance(parent, QtWidgets.QAbstractScrollArea):
qwidget = parent
parts.append("user_input_event={kind=%s, modifiers=0x%x, target={metaObject={className=%s}, windowTitle=%s}, source=%s, %s, source-as-qevent=%s}" % (
kind_str,
uie.modifiers,
qwidget.metaObject().className(),
qwidget.windowTitle(),
uie.source,
payload_str,
qevent_str))
print("### curpos: %s" % ", ".join(parts))
return 0
curpos_details = curpos_details_t()
curpos_details.hook()
```
#### File: examples/hexrays/vds_hooks.py
```python
from __future__ import print_function
import inspect
import ida_idaapi
import ida_typeinf
import ida_hexrays
class vds_hooks_t(ida_hexrays.Hexrays_Hooks):
def __init__(self):
ida_hexrays.Hexrays_Hooks.__init__(self)
self.display_shortened_cfuncs = False
self.display_vdui_curpos = False
self.inhibit_log = 0;
def _format_lvar(self, v):
parts = []
if v:
if v.name:
parts.append("name=%s" % v.name)
if v.cmt:
parts.append("cmt=%s" % v.cmt)
parts.append("width=%s" % v.width)
parts.append("defblk=%s" % v.defblk)
parts.append("divisor=%s" % v.divisor)
return "{%s}" % ", ".join(parts)
def _format_vdui_curpos(self, v):
return "cpos={lnnum=%d, x=%d, y=%d}" % (v.cpos.lnnum, v.cpos.x, v.cpos.y)
def _format_value(self, v):
if isinstance(v, ida_hexrays.lvar_t):
v = self._format_lvar(v)
elif isinstance(v, ida_hexrays.cfunc_t):
if self.display_shortened_cfuncs:
self.inhibit_log += 1
v = str(v)
if len(v) > 20:
v = v[0:20] + "[...snipped...]"
self.inhibit_log -= 1
else:
v = "<cfunc>" # cannot print contents: we'll end up being called recursively
elif isinstance(v, ida_hexrays.vdui_t) and self.display_vdui_curpos:
v = str(v) + " " + self._format_vdui_curpos(v)
return str(v)
def _log(self):
if self.inhibit_log <= 0:
stack = inspect.stack()
frame, _, _, _, _, _ = stack[1]
args, _, _, values = inspect.getargvalues(frame)
method_name = inspect.getframeinfo(frame)[2]
argstrs = []
for arg in args[1:]:
argstrs.append("%s=%s" % (arg, self._format_value(values[arg])))
print("### %s: %s" % (method_name, ", ".join(argstrs)))
return 0
def flowchart(self, fc):
return self._log()
def stkpnts(self, mba, stkpnts):
return self._log()
def prolog(self, mba, fc, reachable_blocks, decomp_flags):
return self._log()
def microcode(self, mba):
return self._log()
def preoptimized(self, mba):
return self._log()
def locopt(self, mba):
return self._log()
def prealloc(self, mba):
return self._log()
def glbopt(self, mba):
return self._log()
def structural(self, ctrl_graph):
return self._log()
def maturity(self, cfunc, maturity):
return self._log()
def interr(self, code):
return self._log()
def combine(self, blk, insn):
return self._log()
def print_func(self, cfunc, printer):
return self._log()
def func_printed(self, cfunc):
return self._log()
def resolve_stkaddrs(self, mba):
return self._log()
def open_pseudocode(self, vu):
return self._log()
def switch_pseudocode(self, vu):
return self._log()
def refresh_pseudocode(self, vu):
return self._log()
def close_pseudocode(self, vu):
return self._log()
def keyboard(self, vu, key_code, shift_state):
return self._log()
def right_click(self, vu):
return self._log()
def double_click(self, vu, shift_state):
return self._log()
def curpos(self, vu):
return self._log()
def create_hint(self, vu):
return self._log()
def text_ready(self, vu):
return self._log()
def populating_popup(self, widget, popup, vu):
return self._log()
def lvar_name_changed(self, vu, v, name, is_user_name):
return self._log()
def lvar_type_changed(self, vu, v, tif):
return self._log()
def lvar_cmt_changed(self, vu, v, cmt):
return self._log()
def lvar_mapping_changed(self, vu, _from, to):
return self._log()
def cmt_changed(self, cfunc, loc, cmt):
return self._log()
def build_callinfo(self, *args):
return self._log()
vds_hooks = vds_hooks_t()
vds_hooks.hook()
```
#### File: tabular_views/custom/chooser_with_folders.py
```python
import inspect
import ida_kernwin
import ida_dirtree
import ida_netnode
class my_dirspec_t(ida_dirtree.dirspec_t):
def __init__(self, chooser):
ida_dirtree.dirspec_t.__init__(self)
self.chooser = chooser
def log_frame(self):
if self.chooser.dirspec_log:
stack = inspect.stack()
frame, _, _, _, _, _ = stack[1]
args, _, _, values = inspect.getargvalues(frame)
print(">>> %s: args=%s" % (inspect.getframeinfo(frame)[2], [(i, values[i]) for i in args[1:]]))
def get_name(self, inode, flags):
self.log_frame()
def find_inode(index, ordinal, _inode):
if inode == _inode:
return "inode #%d" % inode
return self.chooser._for_each_item(find_inode)
def get_inode(self, dirpath, name):
self.log_frame()
if not name.startswith("inode #"):
return ida_dirtree.direntry_t.BADIDX
return int(name[7:])
def get_size(self, inode):
self.log_frame()
return 1
def get_attrs(self, inode):
self.log_frame()
def rename_inode(self, inode, newname):
self.log_frame()
def set_column0_contents(index, ordinal, _inode):
if inode == _inode:
ordinal = self.chooser._get_ordinal_at(index)
self.chooser.netnode.supset(index, newname, SUPVAL_COL0_DATA_TAG)
return True
return self.chooser._for_each_item(set_column0_contents)
def unlink_inode(self, inode):
self.log_frame()
ALTVAL_NEW_ORDINAL_TAG = 'L'
ALTVAL_ORDINAL_TAG = 'O'
ALTVAL_INODE_TAG = 'I'
SUPVAL_COL0_DATA_TAG = '0'
SUPVAL_COL1_DATA_TAG = '1'
SUPVAL_COL2_DATA_TAG = '2'
class base_idapython_tree_view_t(ida_kernwin.Choose):
def __init__(self, title, nitems=100, dirspec_log=True, flags=0):
flags |= ida_kernwin.CH_NOIDB
flags |= ida_kernwin.CH_MULTI
flags |= ida_kernwin.CH_HAS_DIRTREE
ida_kernwin.Choose.__init__(self,
title,
[
["First",
10
| ida_kernwin.Choose.CHCOL_PLAIN
| ida_kernwin.Choose.CHCOL_DRAGHINT
| ida_kernwin.Choose.CHCOL_INODENAME
],
["Second", 10 | ida_kernwin.Choose.CHCOL_PLAIN],
["Third", 10 | ida_kernwin.Choose.CHCOL_PLAIN],
],
flags=flags)
self.debug_items = False
self.dirspec_log = dirspec_log
self.dirtree = None
self.dirspec = None
self.netnode = ida_netnode.netnode()
self.netnode.create("$ idapython_tree_view %s" % title)
for i in range(nitems):
self._new_item()
def _get_new_ordinal(self):
return self.netnode.altval(0, ALTVAL_NEW_ORDINAL_TAG)
def _set_new_ordinal(self, ordinal):
self.netnode.altset(0, ordinal, ALTVAL_NEW_ORDINAL_TAG)
def _allocate_ordinal(self):
ordinal = self._get_new_ordinal()
self._set_new_ordinal(ordinal + 1)
return ordinal
def _move_items(self, src, dst, sz):
self.netnode.altshift(src, dst, sz, ALTVAL_ORDINAL_TAG)
self.netnode.altshift(src, dst, sz, ALTVAL_INODE_TAG)
self.netnode.supshift(src, dst, sz, SUPVAL_COL0_DATA_TAG)
self.netnode.supshift(src, dst, sz, SUPVAL_COL1_DATA_TAG)
self.netnode.supshift(src, dst, sz, SUPVAL_COL2_DATA_TAG)
def _new_item(self, index=None):
new_ord = self._allocate_ordinal()
new_inode = new_ord + 1000
nitems = self._get_items_count()
if index is None:
index = nitems
else:
assert(index < nitems)
if index < nitems:
self._move_items(index, index + 1, nitems - index)
self.netnode.altset(index, new_ord, ALTVAL_ORDINAL_TAG)
self.netnode.altset(index, new_inode, ALTVAL_INODE_TAG)
return index, new_ord, new_inode
def _dump_items(self):
if self.debug_items:
data = []
def collect(index, ordinal, inode):
data.append([inode] + self._make_item_contents_from_index(index))
self._for_each_item(collect)
import pprint
print(pprint.pformat(data))
def _get_ordinal_at(self, index):
assert(index <= self.netnode.altlast(ALTVAL_ORDINAL_TAG))
return self.netnode.altval(index, ALTVAL_ORDINAL_TAG)
def _get_inode_at(self, index):
assert(index <= self.netnode.altlast(ALTVAL_INODE_TAG))
return self.netnode.altval(index, ALTVAL_INODE_TAG)
def _for_each_item(self, cb):
for i in range(self._get_items_count()):
rc = cb(i, self._get_ordinal_at(i), self._get_inode_at(i))
if rc is not None:
return rc
def _get_items_count(self):
l = self.netnode.altlast(ALTVAL_ORDINAL_TAG)
return 0 if l == ida_netnode.BADNODE else l + 1
def _make_item_contents_from_index(self, index):
ordinal = self._get_ordinal_at(index)
c0 = self.netnode.supstr(index, SUPVAL_COL0_DATA_TAG) or "a%d" % ordinal
c1 = self.netnode.supstr(index, SUPVAL_COL1_DATA_TAG) or "b%d" % ordinal
c2 = self.netnode.supstr(index, SUPVAL_COL2_DATA_TAG) or "c%d" % ordinal
return [c0, c1, c2]
def OnGetLine(self, n):
return self._make_item_contents_from_index(n)
def OnGetSize(self):
return self._get_items_count()
def OnGetDirTree(self):
self.dirspec = my_dirspec_t(self)
self.dirtree = ida_dirtree.dirtree_t(self.dirspec)
def do_link(index, ordinal, inode):
de = ida_dirtree.direntry_t(inode, False)
self.dirtree.link("/%s" % self.dirtree.get_entry_name(de))
self._for_each_item(do_link)
return (self.dirspec, self.dirtree)
def OnIndexToInode(self, n):
return self._get_inode_at(n)
# Helper function, to be called by "On*" event handlers.
# This will print all the arguments that were passed
def _print_prev_frame(self):
import inspect
stack = inspect.stack()
frame, _, _, _, _, _ = stack[1]
args, _, _, values = inspect.getargvalues(frame)
print("EVENT: %s: args=%s" % (
inspect.getframeinfo(frame)[2],
[(i, values[i]) for i in args[1:]]))
def OnSelectionChange(self, sel):
self._print_prev_frame()
def OnSelectLine(self, sel):
self._print_prev_frame()
class idapython_tree_view_t(base_idapython_tree_view_t):
def __init__(self, title, nitems=100, dirspec_log=True, flags=0):
flags |= ida_kernwin.CH_CAN_INS
flags |= ida_kernwin.CH_CAN_DEL
flags |= ida_kernwin.CH_CAN_EDIT
base_idapython_tree_view_t.__init__(self, title, nitems, dirspec_log, flags)
def OnInsertLine(self, sel):
self._print_prev_frame()
# Add item into storage
index = sel[0] if sel else None
prev_inode = self._get_inode_at(index) if index is not None else None
final_index, new_ordinal, new_inode = self._new_item(sel[0] if sel else None)
# Link in the tree (unless an absolute path is provided,
# 'link()' will use the current directory, which is set
# by the 'OnInsertLine' caller.)
dt = self.dirtree
cwd = dt.getcwd()
parent_de = dt.resolve_path(cwd)
wanted_rank = -1
if prev_inode is not None:
wanted_rank = dt.get_rank(parent_de.idx, ida_dirtree.direntry_t(prev_inode, False))
de = ida_dirtree.direntry_t(new_inode, False)
name = dt.get_entry_name(de)
code = dt.link(name)
assert(code == ida_dirtree.DTE_OK)
if wanted_rank >= 0:
assert(ida_dirtree.dirtree_t.isdir(parent_de))
cur_rank = dt.get_rank(parent_de.idx, de)
dt.change_rank(cwd + "/" + name, wanted_rank - cur_rank)
self._dump_items()
return [ida_kernwin.Choose.ALL_CHANGED] + [final_index]
def OnDeleteLine(self, sel):
self._print_prev_frame()
dt = self.dirtree
for index in reversed(sorted(sel)):
# Note: when it comes to deletion of items, the dirtree_t is
# designed in such a way folders contents will be re-computed
# on-demand after the deletion of an inode. Consequently,
# there is no need to perform an unlink() operation here, only
# notify the dirtree that something changed
nitems = self._get_items_count()
assert(index < nitems)
inode = self._get_inode_at(index)
self.netnode.altdel(index, ALTVAL_ORDINAL_TAG)
self.netnode.altdel(index, ALTVAL_INODE_TAG)
self._move_items(index + 1, index, nitems - index + 1)
dt.notify_dirtree(False, inode)
self._dump_items()
return [ida_kernwin.Choose.ALL_CHANGED]
def OnEditLine(self, sel):
self._print_prev_frame()
for idx in sel:
repl = ida_kernwin.ask_str("", 0, "Please enter replacement for index %d" % idx)
if repl:
self.netnode.supset(idx, repl, SUPVAL_COL0_DATA_TAG)
self._dump_items()
return [ida_kernwin.Choose.ALL_CHANGED] + sel
```
#### File: src/pywraps/py_idd.py
```python
NO_PROCESS = 0xFFFFFFFF
NO_THREAD = 0
import types
import _ida_idaapi
import _ida_dbg
import _ida_typeinf
import _ida_name
import _ida_bytes
import _ida_ida
import ida_idaapi
import ida_typeinf
dbg_can_query = _ida_dbg.dbg_can_query
# -----------------------------------------------------------------------
class Appcall_array__(object):
"""This class is used with Appcall.array() method"""
def __init__(self, tp):
self.__type = tp
def pack(self, L):
"""Packs a list or tuple into a byref buffer"""
t = type(L)
if not (t == list or t == tuple):
raise ValueError("Either a list or a tuple must be passed")
self.__size = len(L)
if self.__size == 1:
self.__typedobj = Appcall__.typedobj(self.__type + ";")
else:
self.__typedobj = Appcall__.typedobj("%s x[%d];" % (self.__type, self.__size))
# Now store the object in a string buffer
ok, buf = self.__typedobj.store(L)
if ok:
return Appcall__.byref(buf)
else:
return None
def try_to_convert_to_list(self, obj):
"""Is this object a list? We check for the existance of attribute zero and attribute self.size-1"""
if not (hasattr(obj, "0") and hasattr(obj, str(self.__size-1))):
return obj
# at this point, we are sure we have an "idc list"
# let us convert to a Python list
return [getattr(obj, str(x)) for x in range(0, self.__size)]
def unpack(self, buf, as_list=True):
"""Unpacks an array back into a list or an object"""
# take the value from the special ref object
if isinstance(buf, ida_idaapi.PyIdc_cvt_refclass__):
buf = buf.value
# we can only unpack from strings
if type(buf) != bytes:
raise ValueError("Cannot unpack this type!")
# now unpack
ok, obj = self.__typedobj.retrieve(buf)
if not ok:
raise ValueError("Failed while unpacking!")
if not as_list:
return obj
return self.try_to_convert_to_list(obj)
# -----------------------------------------------------------------------
# Wrapper class for the appcall()
class Appcall_callable__(object):
"""
Helper class to issue appcalls using a natural syntax:
appcall.FunctionNameInTheDatabase(arguments, ....)
or
appcall["Function@8"](arguments, ...)
or
f8 = appcall["Function@8"]
f8(arg1, arg2, ...)
or
o = appcall.obj()
i = byref(5)
appcall.funcname(arg1, i, "hello", o)
"""
def __init__(self, ea, tinfo_or_typestr = None, fields = None):
"""Initializes an appcall with a given function ea"""
self.__ea = ea
self.__tif = None
self.__type = None
self.__fields = None
self.__options = None # Appcall options
self.__timeout = None # Appcall timeout
if tinfo_or_typestr:
if isinstance(tinfo_or_typestr, ida_idaapi.string_types):
# a type string? assume (typestr, fields), try to deserialize
tif = ida_typeinf.tinfo_t()
if not tif.deserialize(None, tinfo_or_typestr, fields):
raise ValueError("Could not deserialize type string")
else:
if not isinstance(tinfo_or_typestr, ida_typeinf.tinfo_t):
raise ValueError("Invalid argument 'tinfo_or_typestr'")
tif = tinfo_or_typestr
self.__tif = tif
(self.__type, self.__fields, _) = tif.serialize()
def __get_timeout(self):
return self.__timeout
def __set_timeout(self, v):
self.__timeout = v
timeout = property(__get_timeout, __set_timeout)
"""An Appcall instance can change its timeout value with this attribute"""
def __get_options(self):
return self.__options if self.__options != None else Appcall__.get_appcall_options()
def __set_options(self, v):
if self.timeout:
# If timeout value is set, then put the timeout flag and encode the timeout value
v |= Appcall__.APPCALL_TIMEOUT | (self.timeout << 16)
else:
# Timeout is not set, then clear the timeout flag
v &= ~Appcall__.APPCALL_TIMEOUT
self.__options = v
options = property(__get_options, __set_options)
"""Sets the Appcall options locally to this Appcall instance"""
def __call__(self, *args):
"""Make object callable. We redirect execution to idaapi.appcall()"""
if self.ea is None:
raise ValueError("Object not callable!")
# convert arguments to a list
arg_list = list(args)
# Save appcall options and set new global options
old_opt = Appcall__.get_appcall_options()
Appcall__.set_appcall_options(self.options)
# Do the Appcall (use the wrapped version)
try:
return _ida_idd.appcall(
self.ea,
_ida_dbg.get_current_thread(),
self.type,
self.fields,
arg_list)
finally:
# Restore appcall options
Appcall__.set_appcall_options(old_opt)
def __get_ea(self):
return self.__ea
def __set_ea(self, val):
self.__ea = val
ea = property(__get_ea, __set_ea)
"""Returns or sets the EA associated with this object"""
def __get_tif(self):
return self.__tif
tif = property(__get_tif)
"""Returns the tinfo_t object"""
def __get_size(self):
if self.__type == None:
return -1
r = _ida_typeinf.calc_type_size(None, self.__type)
if not r:
return -1
return r
size = property(__get_size)
"""Returns the size of the type"""
def __get_type(self):
return self.__type
type = property(__get_type)
"""Returns the typestring"""
def __get_fields(self):
return self.__fields
fields = property(__get_fields)
"""Returns the field names"""
def retrieve(self, src=None, flags=0):
"""
Unpacks a typed object from the database if an ea is given or from a string if a string was passed
@param src: the address of the object or a string
@return: Returns a tuple of boolean and object or error number (Bool, Error | Object).
"""
# Nothing passed? Take the address and unpack from the database
if src is None:
src = self.ea
if type(src) == bytes:
return _ida_typeinf.unpack_object_from_bv(None, self.type, self.fields, src, flags)
else:
return _ida_typeinf.unpack_object_from_idb(None, self.type, self.fields, src, flags)
def store(self, obj, dest_ea=None, base_ea=0, flags=0):
"""
Packs an object into a given ea if provided or into a string if no address was passed.
@param obj: The object to pack
@param dest_ea: If packing to idb this will be the store location
@param base_ea: If packing to a buffer, this will be the base that will be used to relocate the pointers
@return:
- If packing to a string then a Tuple(Boolean, packed_string or error code)
- If packing to the database then a return code is returned (0 is success)
"""
# no ea passed? thus pack to a string
if dest_ea is None:
return _ida_typeinf.pack_object_to_bv(obj,
None,
self.type,
self.fields,
base_ea,
flags)
else:
return _ida_typeinf.pack_object_to_idb(obj,
None,
self.type,
self.fields,
dest_ea,
flags)
# -----------------------------------------------------------------------
class Appcall_consts__(object):
"""
Helper class used by Appcall.Consts attribute
It is used to retrieve constants via attribute access
"""
def __init__(self, default=None):
self.__default = default
def __getattr__(self, attr):
v = Appcall__.valueof(attr, self.__default)
if v is None:
raise AttributeError("No constant with name " + attr)
return v
# -----------------------------------------------------------------------
class Appcall__(object):
APPCALL_MANUAL = 0x1
"""
Only set up the appcall, do not run it.
you should call CleanupAppcall() when finished
"""
APPCALL_DEBEV = 0x2
"""
Return debug event information
If this bit is set, exceptions during appcall
will generate idc exceptions with full
information about the exception
"""
APPCALL_TIMEOUT = 0x4
"""
Appcall with timeout
The timeout value in milliseconds is specified
in the high 2 bytes of the 'options' argument:
If timed out, errbuf will contain "timeout".
"""
__name__ = "Appcall__"
def __init__(self):
self.__consts = Appcall_consts__()
def __get_consts(self):
return self.__consts
Consts = property(__get_consts)
"""Use Appcall.Consts.CONST_NAME to access constants"""
@staticmethod
def __name_or_ea(name_or_ea):
"""
Function that accepts a name or an ea and checks if the address is enabled.
If a name is passed then idaapi.get_name_ea() is applied to retrieve the name
@return:
- Returns the resolved EA or
- Raises an exception if the address is not enabled
"""
# a string? try to resolve it
if type(name_or_ea) in ida_idaapi.string_types:
ea = _ida_name.get_name_ea(_ida_idaapi.BADADDR, name_or_ea)
else:
ea = name_or_ea
# could not resolve name or invalid address?
if ea == _ida_idaapi.BADADDR or not _ida_bytes.is_mapped(ea):
raise AttributeError("Undefined function " + name_or_ea)
return ea
@staticmethod
def __typedecl_or_tinfo(typedecl_or_tinfo, flags = None):
"""
Function that accepts a tinfo_t object or type declaration as a string
If a type declaration is passed then ida_typeinf.parse_decl() is applied to prepare tinfo_t object
@return:
- Returns the tinfo_t object
- Raises an exception if the declaration cannot be parsed
"""
# a string? try to parse it
if isinstance(typedecl_or_tinfo, ida_idaapi.string_types):
if flags is None:
flags = ida_typeinf.PT_SIL|ida_typeinf.PT_NDC|ida_typeinf.PT_TYP
tif = ida_typeinf.tinfo_t()
if ida_typeinf.parse_decl(tif, None, typedecl_or_tinfo, flags) == None:
raise ValueError("Could not parse type: " + typedecl_or_tinfo)
else:
if not isinstance(typedecl_or_tinfo, ida_typeinf.tinfo_t):
raise ValueError("Invalid argument 'typedecl_or_tinfo'")
tif = typedecl_or_tinfo
return tif
@staticmethod
def proto(name_or_ea, proto_or_tinfo, flags = None):
"""
Allows you to instantiate an appcall (callable object) with the desired prototype
@param name_or_ea: The name of the function (will be resolved with LocByName())
@param proto_or_tinfo: function prototype as a string or type of the function as tinfo_t object
@return:
- On failure it raises an exception if the prototype could not be parsed
or the address is not resolvable
- Returns a callbable Appcall instance with the given prototypes and flags
"""
# resolve and raise exception on error
ea = Appcall__.__name_or_ea(name_or_ea)
# parse the type if it is given as (prototype, flags)
tif = Appcall__.__typedecl_or_tinfo(proto_or_tinfo, flags)
# Return the callable method with type info
return Appcall_callable__(ea, tif)
def __getattr__(self, name_or_ea):
"""Allows you to call functions as if they were member functions (by returning a callable object)"""
# resolve and raise exception on error
ea = self.__name_or_ea(name_or_ea)
if ea == _ida_idaapi.BADADDR:
raise AttributeError("Undefined function " + name)
# Return the callable method
return Appcall_callable__(ea)
def __getitem__(self, idx):
"""
Use self[func_name] syntax if the function name contains invalid characters for an attribute name
See __getattr___
"""
return self.__getattr__(idx)
@staticmethod
def valueof(name, default=0):
"""
Returns the numeric value of a given name string.
If the name could not be resolved then the default value will be returned
"""
t, v = _ida_name.get_name_value(_ida_idaapi.BADADDR, name)
if t == 0: # NT_NONE
v = default
return v
@staticmethod
def int64(v):
"""Whenever a 64bit number is needed use this method to construct an object"""
return ida_idaapi.PyIdc_cvt_int64__(v)
@staticmethod
def byref(val):
"""
Method to create references to immutable objects
Currently we support references to int/strings
Objects need not be passed by reference (this will be done automatically)
"""
return ida_idaapi.PyIdc_cvt_refclass__(val)
@staticmethod
def buffer(str = None, size = 0, fill="\x00"):
"""
Creates a string buffer. The returned value (r) will be a byref object.
Use r.value to get the contents and r.size to get the buffer's size
"""
if str is None:
str = ""
left = size - len(str)
if left > 0:
str = str + (fill * left)
r = Appcall__.byref(str)
r.size = size
return r
@staticmethod
def obj(**kwds):
"""Returns an empty object or objects with attributes as passed via its keywords arguments"""
return ida_idaapi.object_t(**kwds)
@staticmethod
def cstr(val):
return ida_idaapi.as_cstr(val)
@staticmethod
def UTF16(s):
return ida_idaapi.as_UTF16(s)
unicode = UTF16
@staticmethod
def array(type_name):
"""Defines an array type. Later you need to pack() / unpack()"""
return Appcall_array__(type_name)
@staticmethod
def typedobj(typedecl_or_tinfo, ea=None):
"""
Returns an appcall object for a type (can be given as tinfo_t object or
as a string declaration)
One can then use retrieve() member method
@param ea: Optional parameter that later can be used to retrieve the type
@return: Appcall object or raises ValueError exception
"""
# parse the type if it is given as string
tif = Appcall__.__typedecl_or_tinfo(typedecl_or_tinfo)
# Return the callable method with type info
return Appcall_callable__(ea, tif)
@staticmethod
def set_appcall_options(opt):
"""Method to change the Appcall options globally (not per Appcall)"""
old_opt = Appcall__.get_appcall_options()
_ida_ida.cvar.inf.appcall_options = opt
return old_opt
@staticmethod
def get_appcall_options():
"""Return the global Appcall options"""
return _ida_ida.cvar.inf.appcall_options
@staticmethod
def cleanup_appcall(tid = 0):
"""Equivalent to IDC's CleanupAppcall()"""
return _ida_idd.cleanup_appcall(tid)
Appcall = Appcall__()
#</pycode(py_idd)>
``` |
{
"source": "0xecho/botogram",
"score": 2
} |
#### File: botogram/botogram/commands.py
```python
from inspect import Parameter
class Command:
"""Representation of a single command"""
def __init__(self, hook, _bot=None):
# Get some parameters from the hook
self.name = hook._name
self.hidden = hook._hidden
self.order = hook._order
self._hook = hook
self._component_id = hook.component_id
self._bot = _bot
def __reduce__(self):
return rebuild_command, (self._hook,)
def for_bot(self, bot):
"""Get the command instance for a specific bot"""
return self.__class__(self._hook, _bot=bot)
@property
def raw_docstring(self):
"""Get the raw docstring of this command"""
func = self._hook.func
if hasattr(func, "_botogram_help_message"):
if self._bot is not None:
return self._bot._call(func._botogram_help_message,
self._component_id)
else:
return func._botogram_help_message()
elif func.__doc__:
return func.__doc__
return
@property
def docstring(self):
"""Get the docstring of this command"""
docstring = self.raw_docstring
if docstring is None:
return
result = []
for line in self.raw_docstring.split("\n"):
# Remove leading whitespaces
line = line.strip()
# Allow only a single blackline
if line == "" and len(result) and result[-1] == "":
continue
result.append(line)
# Remove empty lines at the end or at the start of the docstring
for pos in 0, -1:
if result[pos] == "":
result.pop(pos)
return "\n".join(result)
@property
def parameters_list(self):
"""Get the parameters list of this single command"""
if not self._hook._parameters:
return None
params_list = ""
for parameter in self._hook._parameters.values():
params_list += "[" + parameter.name
if parameter.annotation is not Parameter.empty:
params_list += ":" + parameter.annotation.__name__
if parameter.default is not Parameter.empty:
params_list += "=" + str(parameter.default)
params_list += "] "
return params_list.strip()
@property
def summary(self):
"""Get a summary of the command"""
docstring = self.docstring
if docstring is None:
return
return docstring.split("\n", 1)[0]
def rebuild_command(hook):
"""Rebuild a Command after being pickled"""
return Command(hook)
```
#### File: botogram/botogram/inline.py
```python
from . import syntaxes
def process(bot, chains, update):
"""Process an inline update"""
for hook in chains["inline"]:
bot.logger.debug("Processing update #%s with the hook %s..." %
(update.update_id, hook.name))
result = hook.call(bot, update)
if result is {'ok': True, 'result': True}:
bot.logger.debug("Update #%s was just processed by the %s hook."
% (update.update_id, hook.name))
return
bot.logger.debug("No hook actually processed the #%s update." %
update.update_id)
def inline_feedback_process(bot, chains, update):
"""Process a chosen inline result update"""
for hook in chains["inline_feedback"]:
bot.logger.debug("Processing update #%s with the hook %s..." %
(update.update_id, hook.name))
result = hook.call(bot, update)
if result is {'ok': True}:
bot.logger.debug("Update #%s was just processed by the %s hook."
% (update.update_id, hook.name))
return
bot.logger.debug("No hook actually processed the #%s update." %
update.update_id)
class InlineInputMessage:
"""A factory for InputMessageContent Telegram objects"""
def __init__(self, text, syntax=None, preview=True):
self.text = text
self.syntax = syntax
self.preview = preview
def _serialize(self):
args = {
"message_text": self.text,
"disable_web_page_preview": not self.preview,
}
syntax = syntaxes.guess_syntax(self.text, self.syntax)
if syntax:
args["parse_mode"] = syntax
return args
class InlineInputLocation:
"""A factory for InputLocationMessageContent Telegram objects"""
def __init__(self, latitude, longitude, live_period=None):
self.latitude = latitude
self.longitude = longitude
self.live_period = live_period
def _serialize(self):
args = {
"latitude": self.latitude,
"longitude": self.longitude,
}
if self.live_period is not None:
args["live_period"] = self.live_period
return args
class InlineInputVenue:
"""A factory for InputVenueMessageContent Telegram objects"""
def __init__(self, latitude, longitude, title, address,
foursquare_id=None, foursquare_type=None):
self.latitude = latitude
self.longitude = longitude
self.title = title
self.address = address
self.foursquare_id = foursquare_id
self.foursquare_type = foursquare_type
def _serialize(self):
args = {
"latitude": self.latitude,
"longitude": self.longitude,
"title": self.title,
"address": self.address,
}
if self.foursquare_id is not None:
args["foursquare_id"] = self.foursquare_id
if self.foursquare_type is not None:
args["foursquare_type"] = self.foursquare_type
return args
class InlineInputContact:
"""A factory for InputContactMessageContent Telegram objects"""
def __init__(self, phone, first_name, last_name=None, vcard=None):
self.phone_number = phone
self.first_name = first_name
self.last_name = last_name
self.vcard = vcard
def _serialize(self):
args = {
"phone_number": self.phone_number,
"first_name": self.first_name,
}
if self.last_name is not None:
args["last_name"] = self.last_name
if self.vcard is not None:
args["vcard"] = self.vcard
return args
```
#### File: botogram/objects/inline.py
```python
from .base import BaseObject
from .messages import User, Location
from . import mixins
from .messages import Message
class InlineQuery(BaseObject, mixins.InlineMixin):
required = {
"id": str,
"from": User,
"query": str,
}
optional = {
"location": Location,
"offset": str,
}
replace_keys = {
"from": "sender"
}
def __init__(self, data):
super().__init__(data)
self._switch_pm_text = None
self._switch_pm_parameter = None
def switch_pm(self, text, parameter):
"""Helper to set the switch_pm_text and switch_pm_parameter"""
self._switch_pm_text = text
self._switch_pm_parameter = parameter
class InlineFeedback(BaseObject):
required = {
"result_id": str,
"from": User,
"query": str
}
optional = {
"location": Location,
"inline_message_id": str,
}
replace_keys = {
"from": "sender",
"inline_message_id": "message"
}
def __init__(self, data, api=None):
super().__init__(data, api)
self.message = Message({"inline_message_id": self.message}, api)
```
#### File: botogram/objects/updates.py
```python
from .base import BaseObject, multiple
from .callbacks import CallbackQuery
from .messages import Message
from .polls import Poll
from .inline import InlineQuery, InlineFeedback
class Update(BaseObject):
"""Telegram API representation of an update
https://core.telegram.org/bots/api#update
"""
# Please update the chat method below when adding new types, thanks!
required = {
"update_id": int,
}
optional = {
"message": Message,
"edited_message": Message,
"channel_post": Message,
"edited_channel_post": Message,
"callback_query": CallbackQuery,
"poll": Poll,
"inline_query": InlineQuery,
"chosen_inline_result": InlineFeedback,
}
_check_equality_ = "update_id"
def chat(self):
"""Get the chat related to this update"""
if self.message is not None:
return self.message.chat
if self.edited_message is not None:
return self.edited_message.chat
if self.channel_post is not None:
return self.channel_post.chat
if self.edited_channel_post is not None:
return self.edited_channel_post.chat
if self.callback_query is not None:
return self.callback_query.message.chat
raise NotImplementedError
# Shortcut for the Updates type
Updates = multiple(Update)
``` |
{
"source": "0xedward/no-google",
"score": 3
} |
#### File: no-google/scripts/test_duplicates.py
```python
counts = { }
with open("../pihole-google.txt") as f:
for line in f:
stripline = line.strip() # strip whitespace
myhash = hash(stripline)
if myhash:
if myhash in counts: # duplicate line, inc count
counts[myhash] = counts[myhash]+1
else:
counts[myhash] = 1 # new entry
f.close()
#re-read file, and print out duplicate lines
with open("../pihole-google.txt") as f:
for line in f:
stripline = line.strip()
myhash = hash(stripline)
if myhash:
if counts[myhash]>1:
# print duplicate line and count
assert False, stripline + " occurred more than one time in pihole-google.txt, please remove duplicate domains."
# after printing dup, clear ctr so prints once
counts[myhash] = 0
f.close()
def test_succes():
assert(True)
``` |
{
"source": "0xedward/usort",
"score": 2
} |
#### File: usort/usort/api.py
```python
import sys
import traceback
from functools import partial
from pathlib import Path
from typing import Iterable, Optional, Tuple
from trailrunner import walk, run
from .config import Config
from .sorting import sort_module
from .types import Result
from .util import get_timings, try_parse, timed
__all__ = ["usort_bytes", "usort_string", "usort_path", "usort_stdin"]
def usort_bytes(
data: bytes, config: Config, path: Optional[Path] = None
) -> Tuple[bytes, str]:
"""
Returns (new_bytes, encoding_str) after sorting.
"""
if path is None:
path = Path("<data>")
module = try_parse(data=data, path=path)
with timed(f"sorting {path}"):
new_mod = sort_module(module, config)
return (new_mod.bytes, new_mod.encoding)
def usort_string(data: str, config: Config, path: Optional[Path] = None) -> str:
r"""
Whenever possible use usort_bytes instead.
One does not just .read_text() Python source code. That will use the system
encoding, which if is not utf-8 would be in violation of pep 3120.
There are two additional cases where this function does the wrong thing, but you
won't notice on most modern file contents:
- a string unrepresentable in utf-8, e.g. "\ud800" is a single high surrogate
- a string with a valid pep 263 coding line, other than utf-8
"""
return usort_bytes(data=data.encode(), config=config, path=path)[0].decode()
def usort_file(path: Path, *, write: bool = False) -> Result:
"""
Format a single file and return a Result object.
"""
data: bytes = b""
try:
config = Config.find(path.parent)
data = path.read_bytes()
output, encoding = usort_bytes(data, config, path)
if write:
path.write_bytes(output)
return Result(
path=path,
content=data,
output=output,
encoding=encoding,
timings=get_timings(),
)
except Exception as e:
trace = "".join(traceback.format_exception(*sys.exc_info()))
return Result(
path=path, content=data, error=e, trace=trace, timings=get_timings()
)
def usort_path(path: Path, *, write: bool = False) -> Iterable[Result]:
"""
For a given path, format it, or any .py files in it, and yield Result objects
"""
with timed(f"total for {path}"):
with timed(f"walking {path}"):
paths = walk(path)
fn = partial(usort_file, write=write)
return (v for v in run(paths, fn).values())
def usort_stdin() -> bool:
"""
Read file contents from stdin, format it, and write the resulting file to stdout
In case of error during sorting, no output will be written to stdout, and the
exception will be written to stderr instead.
Returns True if formatting succeeded, otherwise False
"""
if sys.stdin.isatty():
print("Warning: stdin is a tty", file=sys.stderr)
try:
config = Config.find()
data = sys.stdin.read()
result = usort_string(data, config, Path("<stdin>"))
sys.stdout.write(result)
return True
except Exception as e:
sys.stderr.write(repr(e))
return False
```
#### File: usort/tests/cli.py
```python
import os
import unittest
from contextlib import contextmanager
from pathlib import Path
from textwrap import dedent
from typing import AnyStr, Generator
import volatile
from click.testing import CliRunner
from usort.cli import main
@contextmanager
def chdir(new_dir: str) -> Generator[None, None, None]:
cur_dir = os.getcwd()
os.chdir(new_dir)
try:
yield
finally:
os.chdir(cur_dir)
@contextmanager
def sample_contents(s: AnyStr) -> Generator[str, None, None]:
with volatile.dir() as dtmp:
ptmp = Path(dtmp)
(ptmp / "pyproject.toml").write_text("")
if isinstance(s, bytes):
(ptmp / "sample.py").write_bytes(s)
else:
(ptmp / "sample.py").write_text(s)
yield dtmp
class CliTest(unittest.TestCase):
def test_benchmark(self) -> None:
with sample_contents("import sys\n") as dtmp:
runner = CliRunner()
with chdir(dtmp):
result = runner.invoke(main, ["--benchmark", "check", "."])
self.assertRegex(
result.output,
dedent(
r"""
parsing sample\.py:\s+\d+ µs
sorting sample\.py:\s+\d+ µs
walking \.:\s+\d+ µs
total for \.:\s+\d+ µs
"""
).strip(),
)
self.assertEqual(0, result.exit_code)
def test_check_no_change(self) -> None:
with sample_contents("import sys\n") as dtmp:
runner = CliRunner()
with chdir(dtmp):
result = runner.invoke(main, ["check", "."])
self.assertEqual("", result.output)
self.assertEqual(0, result.exit_code)
def test_check_with_change(self) -> None:
with sample_contents("import sys\nimport os\n") as dtmp:
runner = CliRunner()
with chdir(dtmp):
result = runner.invoke(main, ["check", "."])
self.assertEqual("Would sort sample.py\n", result.output)
self.assertEqual(2, result.exit_code)
def test_diff_no_change(self) -> None:
with sample_contents("import sys\n") as dtmp:
runner = CliRunner()
with chdir(dtmp):
result = runner.invoke(main, ["diff", "."])
self.assertEqual("", result.output)
self.assertEqual(0, result.exit_code)
def test_diff_with_change(self) -> None:
with sample_contents(b"import sys\nimport os\n") as dtmp:
runner = CliRunner()
with chdir(dtmp):
result = runner.invoke(main, ["diff", "."])
self.assertEqual(
"""\
--- a/sample.py
+++ b/sample.py
@@ -1,2 +1,2 @@
+import os
import sys
-import os
""".replace(
"\r", ""
),
result.output,
)
self.assertEqual(result.exit_code, 0)
def test_list_imports(self) -> None:
with sample_contents("import sys\nx = 5\nimport os") as dtmp:
runner = CliRunner()
with chdir(dtmp):
# TODO this takes filenames, not paths...
result = runner.invoke(main, ["list-imports", "sample.py"])
self.assertEqual(
"""\
sample.py 2 blocks:
body[0:1]
Formatted:
[[[
import sys
]]]
body[2:3]
Formatted:
[[[
import os
]]]
""",
result.output.replace("\r\n", "\n"),
)
self.assertEqual(result.exit_code, 0)
def test_format_no_change(self) -> None:
with sample_contents("import sys\n") as dtmp:
runner = CliRunner()
with chdir(dtmp):
result = runner.invoke(main, ["format", "."])
self.assertEqual(result.output, "")
self.assertEqual(result.exit_code, 0)
def test_format_parse_error(self) -> None:
"""Code that has syntax that would never be valid in any version of python"""
with sample_contents("import\n") as dtmp:
runner = CliRunner()
with chdir(dtmp):
result = runner.invoke(main, ["format", "."])
self.assertRegex(
result.output,
r"Error sorting sample\.py: Syntax Error @ 1:7\.",
)
self.assertEqual(result.exit_code, 1)
def test_format_parse_error_conflicting_syntax(self) -> None:
"""Code that contains syntax both <=2.7 and >=3.8 that could never coexist"""
with sample_contents("while (i := foo()):\n print 'i'\n") as dtmp:
runner = CliRunner()
with chdir(dtmp):
result = runner.invoke(main, ["format", "."])
self.assertRegex(
result.output,
r"Error sorting sample\.py: Syntax Error @ 2:11\.",
)
self.assertEqual(result.exit_code, 1)
def test_format_permission_error(self) -> None:
"""File does not have read permissions"""
with sample_contents("print('hello world')\n") as dtmp:
runner = CliRunner()
# make the file unreadable
(Path(dtmp) / "sample.py").chmod(0o000)
with chdir(dtmp):
result = runner.invoke(main, ["format", "."])
# restore permissions so that cleanup can succeed on windows
(Path(dtmp) / "sample.py").chmod(0o644)
self.assertRegex(
result.output,
r"Error sorting sample\.py: .+ Permission denied:",
)
self.assertEqual(result.exit_code, 1)
def test_format_with_change(self) -> None:
with sample_contents("import sys\nimport os\n") as dtmp:
runner = CliRunner()
with chdir(dtmp):
result = runner.invoke(main, ["format", "."])
self.assertEqual(result.output, "Sorted sample.py\n")
self.assertEqual(result.exit_code, 0)
self.assertEqual(
"""\
import os
import sys
""",
(Path(dtmp) / "sample.py").read_text(),
)
def test_format_utf8(self) -> None:
# the string is "µ" as in "µsort"
with sample_contents(
b"""\
import b
import a
s = "\xc2\xb5"
"""
) as dtmp:
runner = CliRunner()
with chdir(dtmp):
result = runner.invoke(main, ["diff", "."])
# Diff output is unicode
self.assertEqual(
result.output,
"""\
--- a/sample.py
+++ b/sample.py
@@ -1,3 +1,3 @@
+import a
import b
-import a
s = "\u00b5"
""",
result.output,
)
with chdir(dtmp):
result = runner.invoke(main, ["format", "."])
self.assertEqual(
b"""\
import a
import b
s = "\xc2\xb5"
""",
(Path(dtmp) / "sample.py").read_bytes(),
)
def test_format_latin_1(self) -> None:
# the string is "µ" as in "µsort"
with sample_contents(
b"""\
# -*- coding: latin-1 -*-
import b
import a
s = "\xb5"
""".replace(
b"\r", b""
) # git on windows might make \r\n
) as dtmp:
runner = CliRunner()
# Diff output is unicode
with chdir(dtmp):
result = runner.invoke(main, ["diff", "."])
self.assertEqual(
result.output,
"""\
--- a/sample.py
+++ b/sample.py
@@ -1,4 +1,4 @@
# -*- coding: latin-1 -*-
+import a
import b
-import a
s = "\u00b5"
""".replace(
"\r", ""
), # git on windows again
result.output,
)
# Format keeps current encoding
with chdir(dtmp):
result = runner.invoke(main, ["format", "."])
self.assertEqual(
b"""\
# -*- coding: latin-1 -*-
import a
import b
s = "\xb5"
""".replace(
b"\r", b""
), # git on windows again
(Path(dtmp) / "sample.py").read_bytes(),
)
```
#### File: usort/tests/translate.py
```python
import unittest
from ..config import Config
from ..sorting import is_sortable_import
from ..translate import import_from_node
from ..util import parse_import
class SortableImportTest(unittest.TestCase):
def test_import_from_node_Import(self) -> None:
imp = import_from_node(parse_import("import a"), Config())
self.assertIsNone(imp.stem)
self.assertEqual("a", imp.items[0].name)
self.assertEqual({"a": "a"}, imp.imported_names)
imp = import_from_node(parse_import("import a, b"), Config())
self.assertIsNone(imp.stem)
self.assertEqual("a", imp.items[0].name)
self.assertEqual("b", imp.items[1].name)
self.assertEqual({"a": "a", "b": "b"}, imp.imported_names)
imp = import_from_node(parse_import("import a as b"), Config())
self.assertIsNone(imp.stem)
self.assertEqual("a", imp.items[0].name)
self.assertEqual("b", imp.items[0].asname)
self.assertEqual({"b": "a"}, imp.imported_names)
imp = import_from_node(parse_import("import os.path"), Config())
self.assertIsNone(imp.stem)
self.assertEqual("os.path", imp.items[0].name)
self.assertEqual({"os": "os"}, imp.imported_names)
imp = import_from_node(parse_import("import IPython.core"), Config())
self.assertEqual("IPython.core", imp.items[0].name)
self.assertEqual({"IPython": "IPython"}, imp.imported_names)
def test_import_from_node_ImportFrom(self) -> None:
imp = import_from_node(parse_import("from a import b"), Config())
self.assertEqual("a", imp.stem)
self.assertEqual("b", imp.items[0].name)
self.assertEqual({"b": "a.b"}, imp.imported_names)
imp = import_from_node(parse_import("from a import b as c"), Config())
self.assertEqual("a", imp.stem)
self.assertEqual("b", imp.items[0].name)
self.assertEqual("c", imp.items[0].asname)
self.assertEqual({"c": "a.b"}, imp.imported_names)
def test_import_from_node_ImportFrom_relative(self) -> None:
imp = import_from_node(parse_import("from .a import b"), Config())
self.assertEqual(".a", imp.stem)
self.assertEqual("b", imp.items[0].name)
self.assertEqual({"b": ".a.b"}, imp.imported_names)
imp = import_from_node(parse_import("from ...a import b"), Config())
self.assertEqual("...a", imp.stem)
self.assertEqual("b", imp.items[0].name)
self.assertEqual({"b": "...a.b"}, imp.imported_names)
imp = import_from_node(parse_import("from . import a"), Config())
self.assertEqual(".", imp.stem)
self.assertEqual("a", imp.items[0].name)
self.assertEqual({"a": ".a"}, imp.imported_names)
imp = import_from_node(parse_import("from .. import a"), Config())
self.assertEqual("..", imp.stem)
self.assertEqual("a", imp.items[0].name)
self.assertEqual({"a": "..a"}, imp.imported_names)
imp = import_from_node(parse_import("from . import a as b"), Config())
self.assertEqual(".", imp.stem)
self.assertEqual("a", imp.items[0].name)
self.assertEqual("b", imp.items[0].asname)
self.assertEqual({"b": ".a"}, imp.imported_names)
class IsSortableTest(unittest.TestCase):
def test_is_sortable(self) -> None:
self.assertTrue(is_sortable_import(parse_import("import a"), Config()))
self.assertTrue(is_sortable_import(parse_import("from a import b"), Config()))
self.assertFalse(
is_sortable_import(parse_import("import a # isort: skip"), Config())
)
``` |
{
"source": "0xelectron/laru",
"score": 4
} |
#### File: 0xelectron/laru/vectors.py
```python
import numbers
import math
import decimal
from decimal import Decimal, getcontext
# set precision of decimal numbers
getcontext().prec = 30
class Vector(object):
"""Vector: A Simple Vector Object
Attributes:
"""
def __init__(self, coordinates):
"""__init__ method
Args:
coordinates (iterator): coordinates of the vector
Raises:
decimal.InvalidOperation: If coordinates values are invalid
TypeError: if coordinates is not an iterator
"""
if not coordinates:
raise ValueError
try:
self.coordinates = tuple([Decimal(x) for x in coordinates])
except decimal.InvalidOperation:
raise decimal.InvalidOperation('The coordinates must be a valid number')
except TypeError:
raise TypeError('The coordinates must be iterable')
# dimension of vector
self.dimension = len(coordinates)
@property
def mag(self):
""" Decimal: Magnitude of a vector """
return Decimal(math.sqrt(sum([x**2 for x in self.coordinates])))
@property
def norm(self):
""" Vector: Normalization of a vector """
if (self.mag != 0):
return self.__mul__((Decimal(1.0)/self.mag))
else:
raise ZeroDivisionError('Cannot normalize the zero vector')
def dot(self, v):
""" dot: dot product of two vectors
Args:
v: Vector object
Returns:
Decimal
Raises:
TypeError: if v is not a vector
"""
if not isinstance(v, Vector):
raise TypeError('Argument must be a vector')
return Decimal(sum([x * v.coordinates[i] for i, x in enumerate(self.coordinates)]))
def cross(self, v):
""" cross: cross product of two vectors
Args:
v: Vector object
Returns:
Vector
Raises:
TypeError: if v is not a vector
NotImplementedError: If dimension not equal to 3
"""
if not isinstance(v, Vector):
raise TypeError('Argument must be a vector')
if not (self.dimension == v.dimension == 3):
raise NotImplementedError
x1, y1, z1 = self.coordinates
x2, y2, z2 = v.coordinates
# basic cross product formula for 3 dimensional vector
x = y1*z2 - y2*z1
y = -(x1*z2 - x2*z1)
z = x1*y2 - x2*y1
return Vector([x,y,z])
def area_of_parallelogram(self, v):
""" area_of_parallelogram: area of parallelogram spanned by
self and v
Args:
v: Vector object
Returns:
Decimal: area of parellelogram
Raises:
TypeError: if v is not a vector
"""
if not isinstance(v, Vector):
raise TypeError('Argument must be a vector')
return (self.cross(v)).mag
def area_of_triangle(self, v):
""" area_of_triangle: area of triangle spanned by self and v
Args:
v: Vector object
Returns:
Decimal: area of triangle
Raises:
TypeError: if v is not a vector
"""
if not isinstance(v, Vector):
raise TypeError('Argument must be a vector')
return self.area_of_parallelogram(v) / 2
def theta(self, v, in_degrees=False):
""" theta: finds the angle between two vectors
Args:
v: Vector object
in_degrees: boolean
Returns:
Angle between two vectors. Default in radians,
and in degrees if in_degrees is True
Raises:
TypeError: if v is not a vector
Exception: if product of magnitude of two vectors is zero
"""
if not isinstance(v, Vector):
raise TypeError('Argument must be a vector')
mag_product = self.mag * v.mag
if mag_product == 0:
raise Exception('Cannot find angle for a zero vector')
a = self.__clean_angle(self.dot(v)/mag_product)
t = math.acos(a)
if in_degrees:
return math.degrees(t)
return t
def proj_on(self, v):
""" proj_on: finds projection of vector on a given vector v
Args:
v: Vector object
Returns:
Vector
Raises:
TypeError: if v is not a vector
"""
if not isinstance(v, Vector):
raise TypeError('Argument must be a vector')
return self.dot(v.norm) * v.norm
def orthogonal_to(self, v):
""" orthogonal_to: finds a vector to a given vector v
Args:
v: Vector object
Returns:
Vector
Raises:
TypeError: if v is not a vector
"""
if not isinstance(v, Vector):
raise TypeError('Argument must be a vector')
return self.__sub__(self.proj_on(v))
def decompose(self, v):
""" decompose: decomposes the vector(self) into sum of two vectors.
one of which is orthogonal to v and the other is parallel to v.
Args:
v: Vector object
Returns:
tuple of two vectors
Raises:
TypeError: if v is not a vector
"""
if not isinstance(v, Vector):
raise TypeError('Argument must be a vector')
return (self.proj_on(v), self.orthogonal_to(v))
def is_zero(self, tolerance=1e-10):
""" is_zero: checks if vector is zero
Args:
tolerance: tolerance for the number to be zero
Returns:
True: if vector is zero
False: if not
"""
return self.mag < tolerance
def is_orthogonal_to(self, v, tolerance=1e-10):
""" is_orthogonal_to: check if two vectors are orthognal
Args:
v: Vector object
tolerance: tolerance for the number to be orthogonal
Returns:
True: if two vectors are orthogonal to each other
False: if not
Raises:
TypeError: If v is not a vector
"""
if not isinstance(v, Vector):
raise TypeError('Argument must be a vector')
return abs(self.dot(v)) < tolerance
def is_parallel_to(self, v):
""" is_parallel_to: check if two vectors are parallel
Args:
v: Vector object
Returns:
True: if two vectors are parallel to each other
False: if not
Raises:
TypeError: If v is not a vector
"""
if not isinstance(v, Vector):
raise TypeError('Argument must be a vector')
return (self.is_zero() or
v.is_zero() or
self.theta(v) == Decimal(math.pi) or
self.theta(v) == Decimal(0))
def __clean_angle(self, a):
""" __clean_angle: constraint angle between [-1, 1]
Args:
a: angle to be constrained
Returns:
Decimal: constrained number between -1 and 1
Raises:
TypeError: if a is not a number
"""
if not isinstance(a, numbers.Number):
raise TypeError('Invalied number provided')
return Decimal(min(1, max(a, -1)))
def __add__(self, v):
""" __add__: Sum of two Vectors
Args:
v: Vector object
Returns:
Vector object
Raises:
TypeError: if v is not a Vector
"""
if not isinstance(v, Vector):
raise TypeError('You can only add two vectors')
return Vector([x + v.coordinates[i] for i, x in enumerate(self.coordinates)])
# allow in reverse
__radd__ = __add__
def __sub__(self, v):
""" __sub__: Subtration of two Vectors
Args:
v: Vector object
Returns:
Vector object
Raises:
TypeError: if v is not a Vector
"""
if not isinstance(v, Vector):
raise TypeError('You can only add two vectors')
return Vector([x - v.coordinates[i] for i, x in enumerate(self.coordinates)])
# allow in reverse
__rsub__ = __sub__
def __mul__(self, v):
""" __mul__: cross product of two vectors or
multiplication of a scalar with a vector
Args:
v: Vector object
Returns:
Vector
Raises:
TypeError: if v is not a vector or a number
"""
if (isinstance(v, numbers.Number)):
return Vector([x * v for x in self.coordinates])
elif (isinstance(v, Vector)):
return self.cross(v)
raise TypeError('Argument must be number or a vector')
# allow in reverse
__rmul__ = __mul__
def __str__(self):
""" returns a string representation of a vector """
return 'Vector: {}'.format(self.coordinates)
# representation == string
__repr__ = __str__
def __eq__(self, v):
""" checks the equality of two vectors """
return self.coordinates == v.coordinates
``` |
{
"source": "0xelectron/mhtportal-web",
"score": 2
} |
#### File: mhtportal-web/base/views.py
```python
from django.shortcuts import get_object_or_404
from django_countries import countries
from localflavor.in_.in_states import STATE_CHOICES
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
from rest_framework.permissions import IsAuthenticated
from base.models import (Center,
CenterScope,
ScopedCenter,
Address,
Participant,
Profile)
from base.serializers import (AddressSerializer,
CenterSerializer,
CenterScopeSerializer,
ScopedCenterSerializer,
ParticipantSerializer,
ProfileSerializer)
from django.core.cache import cache
from django.utils.decorators import method_decorator
from django.views.decorators.cache import cache_page
class MultipleFieldLookupMixin(object):
"""
Apply this mixin to any view or viewset to get multiple field filtering
based on a `lookup_fields` attribute, instead of the default single field filtering.
"""
def get_object(self):
queryset = self.get_queryset() # Get the base queryset
queryset = self.filter_queryset(queryset) # Apply any filter backends
filter = {}
for field in self.lookup_fields:
if self.kwargs[field]: # Ignore empty fields.
filter[field] = self.kwargs[field]
return get_object_or_404(queryset, **filter) # Lookup the object
class MeView(APIView):
"""
Display Profile of current logged in User.
* Requires authentication.
* Only logged in users are able to access this view.
"""
permission_classes = (IsAuthenticated,)
def get(self, request, format=None):
"""
Return Profile of current logged in user.
"""
profile = get_object_or_404(Profile, user=request.user)
ps = ProfileSerializer(profile)
return Response(ps.data)
class CountriesView(APIView):
def get(self, request, format=None):
return Response(dict(countries))
class StatesView(APIView):
def get(self, request, format=None):
return Response(dict(STATE_CHOICES))
class CenterViewSet(ModelViewSet):
"""This endpoint Represents the Centers
It presents the list of Current Centers.
"""
@method_decorator(cache_page(60*60*2))
def list(self, request, *args, **kwargs):
return super(CenterViewSet, self).list(request, *args, **kwargs)
queryset = Center.objects.all()
serializer_class = CenterSerializer
filter_fields = ['id', 'name', 'parent', 'is_displayed']
class CenterScopeViewSet(ModelViewSet):
"""This endpoint Represents the Center Scopes
It presents the list of Current Center Scopes.
"""
queryset = CenterScope.objects.all()
serializer_class = CenterScopeSerializer
class ScopedCenterViewSet(ModelViewSet):
"""This endpoint Represents the Centers with definite Scopes
It presents the list of Current Centers with definite Scopes
"""
queryset = ScopedCenter.objects.all()
serializer_class = ScopedCenterSerializer
class AddressViewSet(ModelViewSet):
"""This endpoint Represents the Event Addresses
It presents the address for the given event
"""
queryset = Address.objects.all()
serializer_class = AddressSerializer
filter_fields = ['id', 'city', 'state', 'country', 'zip_code']
class ParticipantViewSet(ModelViewSet):
"""This endpoint Represents the Participants
It can create/update/retrieve an Participant
It also presents lists of Participants
"""
permission_classes = (IsAuthenticated,)
queryset = Participant.objects.all()
serializer_class = ParticipantSerializer
filter_fields = ['id', 'first_name', 'last_name', 'date_of_birth' 'gender',
'center', 'other_center', 'email']
class ProfileViewSet(ModelViewSet):
"""This endpoint Represents the Profiles in the system
It can create/update/retrieve an Profile
It also presents lists of Profiles
"""
permission_classes = (IsAuthenticated,)
queryset = Profile.objects.all()
serializer_class = ProfileSerializer
filter_fields = ['id', 'user', 'center', 'gender', 'min_age', 'max_age']
```
#### File: mhtportal-web/events/signals.py
```python
import re
import logging
import json
from django.utils import timezone
from django.db.models.signals import (pre_save,
post_save)
from django.dispatch import receiver
from django.conf import settings
from base.models import (CenterScope,
Profile)
from events.models import (Event,
EventParticipant)
from events.tasks import send_sms_async
from django.db import connection
import json
logger = logging.getLogger(__name__)
center_event_poc = [
{
"center_id": 89,
"mobile": 7069605637
},
{
"center_id": 90,
"mobile": "9601673613"
},
{
"center_id": 4,
"mobile": 9998992900
},
{
"center_id": 5,
"mobile": 9904312880
},
{
"center_id": 44,
"mobile": 9327072945
},
{
"center_id": 63,
"mobile": 9327072945
},
{
"center_id": 39,
"mobile": 9824090958
},
{
"center_id": 64,
"mobile": 9327081075
},
{
"center_id": 70,
"mobile": 9509300679
},
{
"center_id": 68,
"mobile": 7698025206
},
{
"center_id": 6,
"mobile": 9898026589
},
{
"center_id": 7,
"mobile": 9537977231
},
{
"center_id": 71,
"mobile": 8087137057
},
{
"center_id": 72,
"mobile": 9314209457
},
{
"center_id": 8,
"mobile": 9974299193
},
{
"center_id": 9,
"mobile": "9879822077"
},
{
"center_id": 10,
"mobile": "7359553949"
},
{
"center_id": 73,
"mobile": "9722324333"
},
{
"center_id": 57,
"mobile": "9428904747"
},
{
"center_id": 75,
"mobile": "9574046081"
},
{
"center_id": 11,
"mobile": "9825795751"
},
{
"center_id": 65,
"mobile": "6353468031"
},
{
"center_id": 77,
"mobile": "9205727818"
},
{
"center_id": 12,
"mobile": "9979109529"
},
{
"center_id": 52,
"mobile": "9904551649 / 7383818547"
},
{
"center_id": 62,
"mobile": "9926910037"
},
{
"center_id": 78,
"mobile": "7737806148"
},
{
"center_id": 13,
"mobile": "9428315109"
},
{
"center_id": 53,
"mobile": "7016861275"
},
{
"center_id": 14,
"mobile": "7984056061"
},
{
"center_id": 66,
"mobile": 8401389698
},
{
"center_id": 15,
"mobile": 9925678854
},
{
"center_id": 16,
"mobile": 8160590472
},
{
"center_id": 17,
"mobile": 9323263232
},
{
"center_id": 18,
"mobile": 9320819111
},
{
"center_id": 19,
"mobile": 9820597129
},
{
"center_id": 20,
"mobile": 7738044390
},
{
"center_id": 21,
"mobile": 9860708203
},
{
"center_id": 22,
"mobile": 9657003381
},
{
"center_id": 79,
"mobile": 9594982002
},
{
"center_id": 80,
"mobile": 9323263232
},
{
"center_id": 23,
"mobile": 8866100217
},
{
"center_id": 24,
"mobile": 9429290795
},
{
"center_id": 81,
"mobile": 9413172239
},
{
"center_id": 82,
"mobile": 8200041637
},
{
"center_id": 25,
"mobile": 9033494114
},
{
"center_id": 27,
"mobile": "8238990150"
},
{
"center_id": 28,
"mobile": "9726272267"
},
{
"center_id": 26,
"mobile": "9726272267"
},
{
"center_id": 83,
"mobile": "9824164941"
},
{
"center_id": 50,
"mobile": "9601301918"
},
{
"center_id": 51,
"mobile": 9924351117
},
{
"center_id": 29,
"mobile": 9898689697
},
{
"center_id": 85,
"mobile": 7359988465
},
{
"center_id": 84,
"mobile": 8758343332
},
{
"center_id": 31,
"mobile": 9904401775
},
{
"center_id": 86,
"mobile": 9898689697
},
{
"center_id": 87,
"mobile": 9537371313
},
{
"center_id": 32,
"mobile": 9998177813
},
{
"center_id": 33,
"mobile": 9825503819
},
{
"center_id": 34,
"mobile": 7405875164
},
{
"center_id": 88,
"mobile": "9427492610 / 7990098414"
},
{
"center_id": 35,
"mobile": 8200758658
},
{
"center_id": 36,
"mobile": "9924347260"
}
]
@receiver(pre_save, sender=Event)
def generate_event_code(sender, instance, **kwargs):
# no need to create if already there. I know there's a better way to
# achieve this.
if instance.event_code:
return
l = len(instance.name)
s = ''
y = instance.year
if (l <= 6):
s += instance.name.upper()
else:
only_alphanum = re.compile(r'[^a-zA-z0-9]')
words = instance.name.strip().split(' ')
l = len(words)
# strip any non alphanumeric characters
for i in range(l):
words[i] = only_alphanum.sub('', words[i]).upper()
if (l == 1):
s += words[0][:2] + words[0][:-3:-1]
elif (l > 1 and l < 4):
s += ''.join([words[i][:3] for i in range(l)])
else:
for i in range(l):
if (len(s) > 8):
break
s += words[i][:i+1]
fs = '{}-{}'.format(s, y)
events = Event.objects.filter(event_code=fs)
# event code not unique
if events.exists():
similar_events = len(events)
instance.event_code = '{}-{}-{}'.format(s, similar_events+1, y)
else:
instance.event_code = fs
@receiver(pre_save, sender=EventParticipant)
def generate_registration_no(sender, instance, **kwargs):
if settings.REDIS_CLIENT.exists(instance.event.event_code) != 1:
with connection.cursor() as cursor:
max_id_query = '''(select max(split_part(registration_no, '-', 4)::numeric) as max_id
from events_eventparticipant where event_id = %s)'''
cursor.execute(max_id_query, [instance.event.id])
max_id_record = cursor.fetchall()
settings.REDIS_CLIENT.set(instance.event.event_code, max_id_record[0][0] or 0)
if instance.registration_no:
return
ec = instance.event.event_code + '-M-'
# if instance.participant.gender == 'male':
# ec += '-M-'
# else:
# ec += '-F-'
# last_registered = EventParticipant.objects.filter(event=instance.event,
# participant__gender=instance.participant.gender).order_by('id').last()
# if last_registered:
# total_registered = int(last_registered.registration_no.split('-')[-1])
# instance.registration_no = ec + '{}'.format(total_registered+1)
# else:
# instance.registration_no = ec + '1'
instance.registration_no = ec + '{}'.format(settings.REDIS_CLIENT.incr(instance.event.event_code))
@receiver(post_save, sender=EventParticipant)
def send_sms(sender, instance, created, **kwargs):
if created:
born = instance.participant.date_of_birth
today = timezone.now().today()
age = today.year - born.year - ((today.month, today.day) < (born.month, born.day))
is_lmht_or_bmht = (int(age) <= int(CenterScope.objects.filter(gender='').order_by('-max_age').first().max_age))
profile_filter = None
pm = ''
gender = ''
# Get mobile number of coordinator of the center of the current pariticipant
# Profiles don't have gender for lmht and bmht. i.e. profiles are combined for boys and girls for lmht, bmht.
if not is_lmht_or_bmht:
gender = instance.participant.gender
profile_filter = Profile.objects.filter(center=instance.home_center, gender=gender,
min_age__lte=age, max_age__gte=age)
# if age of participant is greater than any of the profiles, send the mobile no. of the profile of
# the current event
if not profile_filter.exists():
profile_filter = Profile.objects.filter(center=instance.home_center, gender=gender,
min_age=instance.event.min_age, max_age=instance.event.max_age)
if profile_filter.exists():
pm = profile_filter.order_by('id').first().mobile
if instance.event.id == 84:
pm = "8200312214"
if instance.event.id == 85:
logger.info(instance.home_center)
pms = [cep["mobile"] for cep in center_event_poc if cep["center_id"] == instance.home_center.id]
if len(pms) > 0:
pm = pms[0]
if instance.event.is_global_poc == True:
pm = instance.event.poc_number
# sms_string = settings.SMS_TEMPLATE.format(instance.registration_no, int(instance.event.fees), pm)
sms_string = "We have received your Registration for Youth Shibir"
# Because the sms vendor auto adds 91 to the number, we'll have to remove ours
# Note: This is a hack and only works for India numbers. Please don't use this in
# production.
mobile = str(instance.participant.mobile)
if ('+' in mobile) or ('91' in mobile[0:3]):
mobile = mobile[3:]
#url = settings.SMS_URL.format(settings.SMS_USER, settings.SMS_PASS, settings.SENDER_ID, mobile, sms_string)
logger.info("Created SMS string {}".format(sms_string))
try:
# pass
send_sms_async.delay('POST', params={'to': [mobile], 'message': sms_string})
except Exception as e:
logger.exception('while sending sms')
```
#### File: mhtportal-web/mhtportal/celery.py
```python
import os
import logging
from celery import Celery
logger = logging.getLogger(__name__)
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mhtportal.settings')
app = Celery('mhtportal')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks()
@app.task(bind=True)
def debug_task(self):
logger.info('Request: {0!r}'.format(self.request))
``` |
{
"source": "0xelectron/scrap_acpc",
"score": 3
} |
#### File: 0xelectron/scrap_acpc/scrap_merit_info_acpdc.py
```python
import xlsxwriter
import os
import re
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from time import sleep
import logging
import logging.handlers
from datetime import datetime
# logging
LOG_FILENAME = "scrap_merit_info_acpdc.out"
my_logger = logging.getLogger('ScrapMeritInfoLogger')
my_logger.setLevel(logging.DEBUG)
# Add the log message handler to the logger
handler = logging.handlers.RotatingFileHandler(
LOG_FILENAME, maxBytes=10*1024*1024, backupCount=5)
my_logger.addHandler(handler)
my_logger.info('\n\n-----------------------------------------------------')
my_logger.info('Time of Execution: {}'.format(datetime.now()))
my_logger.info('-----------------------------------------------------\n\n')
URL = "http://acpdc.in/result/result_search.asp"
# FILENAMES = []
bad_chars = r'\xa0\t\n\\'
rgx = re.compile('[%s]' % bad_chars)
# scrap merit info from url with roll no. range from l to h
def main(l, h):
try:
FILENAME = "{}-{}.xlsx".format(l, h)
my_logger.info("open(out_file): {}".format(FILENAME))
out_file = xlsxwriter.Workbook(FILENAME)
sheet = out_file.add_worksheet()
row = 1
col = 0
sheet.write('A1', 'Merit No')
sheet.write('B1', 'Roll No')
sheet.write('C1', 'Name')
sheet.write('D1', 'Allotted Institute Name')
sheet.write('E1', 'Allotted Course Name')
sheet.write('F1', 'Allotted Category')
sheet.write('G1', 'Basic Category')
sheet.write('H1', 'Allotted Status')
driver = webdriver.Chrome()
driver.get(URL)
for gid in range(l, h):
my_logger.info("Getting merit info for gid: {}".format(gid))
elm = driver.find_element_by_name('inno')
elm.clear()
elm.send_keys(str(gid) + Keys.RETURN)
soup = BeautifulSoup(driver.page_source, 'html.parser')
tables = soup.find_all('table')
if not tables:
driver.back()
continue
rows = tables[0].find_all('tr')
data = [[rgx.sub('', td.text) for td in tr.findAll("td")] for tr in rows]
sheet.write(row, 0, data[1][1])
sheet.write(row, 1, data[1][3])
sheet.write(row, 2, data[2][1])
sheet.write(row, 3, data[3][1])
sheet.write(row, 4, data[4][1])
sheet.write(row, 5, data[5][1])
sheet.write(row, 6, data[5][3])
sheet.write(row, 7, data[6][1])
row += 1
driver.back()
sleep(0.05)
except KeyboardInterrupt:
sys.exit(0)
finally:
my_logger.info("------------------------------------------------------------\n")
if out_file:
out_file.close()
if driver:
driver.close()
if __name__ == '__main__':
for i in range(1000000, 1039490, 10000):
l = i
if (i == 1030000):
h = 1039491
else:
h = i + 10000
my_logger.info("-------------------------------\n")
my_logger.info("l = {}\nh = {}\n".format(l, h))
main(l, h)
```
#### File: 0xelectron/scrap_acpc/scrap_merit_info.py
```python
import xlsxwriter
from openpyxl import load_workbook
import os
import re
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from time import sleep
import logging
import logging.handlers
from datetime import datetime
import argparse
# logging
LOG_FILENAME = "scrap_merit_info.out"
my_logger = logging.getLogger('ScrapMeritInfoLogger')
my_logger.setLevel(logging.DEBUG)
# Add the log message handler to the logger
handler = logging.handlers.RotatingFileHandler(
LOG_FILENAME, maxBytes=10*1024*1024, backupCount=5)
my_logger.addHandler(handler)
my_logger.info('\n\n-----------------------------------------------------')
my_logger.info('-----------------------------------------------------')
my_logger.info('Time of Execution: {}'.format(datetime.now()))
URL = "http://www.jacpcldce.ac.in/search/WPQuery_13.asp"
# FILENAMES = []
bad_chars = r'\xa0\t\n\\'
rgx = re.compile('[%s]' % bad_chars)
def main(filename):
try:
my_logger.info("open(in_file): {}".format(filename))
in_file = load_workbook(filename)
first_sheet = in_file.get_sheet_names()[0]
worksheet = in_file.get_sheet_by_name(first_sheet)
my_logger.info("open(out_file): {}".format("final_" + filename))
out_file = xlsxwriter.Workbook("final_" + filename)
sheet = out_file.add_worksheet()
row = 1
col = 0
sheet.write('A1', 'Merit No')
sheet.write('B1', 'Gujcet No')
sheet.write('C1', 'Name')
sheet.write('D1', 'Board Name')
sheet.write('E1', 'Board PCM Toatl')
sheet.write('F1', 'Gujcet PCM Total')
sheet.write('G1', 'PCM Board Percntile (A)')
sheet.write('H1', 'PCM Board Percntile (B)')
sheet.write('I1', 'Merit Mark (A*0.6 + B*0.4)')
sheet.write('J1', 'Remarks')
sheet.write('K1', 'Alloted Institute')
sheet.write('L1', 'Course alloted')
sheet.write('M1', 'Alloted Category')
sheet.write('N1', 'Sub Category')
driver = webdriver.Chrome()
driver.get(URL)
for gids in worksheet.iter_rows():
gid = gids[0].value
my_logger.info("Getting merit info for gid: {}".format(gid))
elm = driver.find_element_by_name('txtGcetNo')
elm.clear()
elm.send_keys(str(gid) + Keys.RETURN)
soup = BeautifulSoup(driver.page_source, 'html.parser')
tables = soup.find_all('table', limit=5)
if len(tables) < 5:
driver.back()
continue
rows = tables[-1].find_all('tr')
data = [[rgx.sub('', td.text) for td in tr.findAll("td")] for tr in rows]
for i in range(14):
sheet.write(row, col, data[i][1])
col += 1
row += 1
col = 0
driver.back()
sleep(0.05)
except KeyboardInterrupt:
sys.exit(0)
finally:
if in_file:
in_file.close()
if out_file:
out_file.close()
if driver:
driver.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Scrap Merit Info from ACPC')
parser.add_argument('--filenames', nargs='*', required=True,
help='filenames for the input')
args = parser.parse_args()
for f in args.filenames:
main(f)
``` |
{
"source": "0xelectron/studious-engine",
"score": 4
} |
#### File: studious-engine/python/fib.py
```python
def fib(n):
if (n == 0 or n == 1):
return 1
return fib(n-1) + fib(n-2)
if __name__ == '__main__':
print(fib(5))
```
#### File: Head First Python and Head First Programming/atheletes data/atheletes_data.py
```python
from athletelist import Athletelist
'''function to open the file and read each line and make a list from it and return an data object of class athlete'''
def get_coach_data(filename):
try:
with open(filename) as f:
data = f.readline()
templ = data.strip().split(',')
return(Athletelist(templ.pop(0),templ.pop(0),templ))
except IOError as ex:
print('File error' + str(ex))
return(None)
james=get_coach_data('james.txt')
print(james.name + " 's fastes times are: " + str(james.top3()))
julie=get_coach_data('julie.txt')
print(julie.name + " 's fastes times are: " + str(julie.top3()))
mikey =get_coach_data('mikey.txt')
print(mikey.name + " 's fastes times are: " + str(mikey.top3()))
sarah =get_coach_data('sarah.txt')
print(sarah.name + " 's fastes times are: " + str(sarah.top3()))
```
#### File: Head First Python and Head First Programming/atheletes data/athletemodel.py
```python
import pickle
from athletelist import Athletelist
def get_coach_data(filename):
try:
with open(filename) as d:
data = d.readline()
templ = data.strip().split(',')
return(Athletelist(templ.pop(0),templ.pop(0),templ))
except IOError as err:
print("File Error" + str(err))
return(None)
def put_to_store(files_list):
all_athletes={}
for each_file in files_list:
ath=get_coach_data(each_file)
all_athletes[ath.name]= ath
try:
with open('athletes.pickle','wb') as athf:
pickle.dump(all_athletes,athf)
except IOError as err:
print('File error(put and store): ' + err)
return(all_athletes)
def get_from_store():
all_athletes={}
try:
with open('athletes.pickle','rb') as athf:
all_athletes = pickle.load(athf)
except IOError as err:
print('File error (get from store): ' + err)
return(all_athletes)
```
#### File: Head First Python and Head First Programming/credit_card program/promotion.py
```python
def discount(price):
return(0.9 * price)
```
#### File: Head First Python and Head First Programming/Quiz_host program/Quiz_host(GUI).pyw
```python
from tkinter import*
app= Tk()
app.title("TVN Game Show")
app.geometry('400x100+200+100')
import pygame.mixer
sounds = pygame.mixer
sounds.init()
correct_s = sounds.Sound("correct.wav")
wrong_s = sounds.Sound("wrong.wav")
number_correct = 0
number_wrong = 0
t1 = IntVar() #create a text variable for b1
t2 = IntVar() #create a text variable for b2
def play_correct_sound():
global number_correct
number_correct = number_correct + 1
correct_s.play()
t1.set(number_correct)
def play_wrong_sound():
global number_wrong
number_wrong = number_wrong + 1
wrong_s.play()
t2.set(number_wrong)
b1 = Button(app,text = 'Correct!',width = 10, command = play_correct_sound)
b1.pack(side = 'left', padx=10,pady=10)
b2 = Button(app,text= 'Wrong!',width=10,command=play_wrong_sound)
b2.pack(side='right',padx=10,pady=10)
msg = Label(app,text = 'Click buttons when you are ready!!',height = 2)
msg.pack(side='top')
l1=Label(app,textvariable = t1,height = 3)
l1.pack(side = 'left')
l2=Label(app,textvariable = t2,height = 3)
l2.pack(side = 'right')
app.mainloop()
``` |
{
"source": "0xen/PICO-GB",
"score": 3
} |
#### File: PICO-GB/Python/obj2ofs.py
```python
import sys
def obj2ofs(filename, section, define):
if len(define) > 0:
print('#define {:s}'.format(define))
detected = False
with open(filename) as f:
line = f.readline()
while line:
decoded_line = [x.strip() for x in line.split(' ')]
if decoded_line[0] == 'A':
detected = (decoded_line[1] == section)
if (detected and decoded_line[0] == 'S'):
print('#define {:s}_ofs 0x{:s}u'.format(decoded_line[1], decoded_line[2][5:]))
line = f.readline()
return
if __name__=='__main__':
obj2ofs(sys.argv[1], sys.argv[2], sys.argv[3])
``` |
{
"source": "0xf0f/codenode",
"score": 2
} |
#### File: cpp/nodes/block.py
```python
from codenode.base import CodeNode
class Block(CodeNode):
def header(self):
yield '{'
def body(self):
yield from self.children
def footer(self):
yield '}'
```
#### File: cpp/nodes/function_definition.py
```python
from codenode.base import CodeNode
class Function(CodeNode):
def __init__(self, return_type, name, *args, **kwargs):
super().__init__()
self.return_type = return_type
self.name = name
self.args = args
self.kwargs = kwargs
def header(self):
arg_string = ', '.join(self.args)
if self.kwargs:
if self.args:
arg_string += ', '
arg_string += ', '.join(
f'{key}={value}' for key, value in self.kwargs.items()
)
yield f'{self.return_type} {self.name}({arg_string})'
yield '{'
def body(self):
yield from self.children
def footer(self):
yield '}'
``` |
{
"source": "0xf0f/quicktest",
"score": 3
} |
#### File: examples/codenode_example/generate_tests.py
```python
import codenode as cn
import codenode.python as py
import inspect
def generate_class_tests(cls):
test_list_name = f'{cls.__name__.lower()}_tests'
file = cn.File()
file.add_child(
cn.Line('from quicktest import TestList')
)
file.add_child(
py.Comment(f'import {cls.__name__} here')
)
file.add_child(cn.EmptyLines(2))
file.add_child(
cn.Line(
f"{test_list_name} = TestList('{cls.__name__} tests')"
)
)
file.add_child(cn.EmptyLines(2))
for name, method in inspect.getmembers(
cls, predicate=inspect.isroutine
):
test_function = py.Function(f'test_{name}', 'instance')
test_function.add_decorator(f'{test_list_name}.test')
comment = py.Comment(test_function.dumps())
file.add_child(comment)
file.add_child(cn.EmptyLines(1))
run_function = py.Function('run_tests')
run_function.add_child(cn.Line(f'instance = {cls.__name__}()'))
run_function.add_child(cn.Line(f'{test_list_name}.run(instance)'))
file.add_child(run_function)
return file
if __name__ == '__main__':
class TestClass:
def test_method(self):
pass
generated_tests = generate_class_tests(TestClass)
# print results:
print(generated_tests.dumps())
# or to save to a file:
with open('output.py', 'w') as file:
generated_tests.dump(file)
```
#### File: examples/codenode_example/output.py
```python
from quicktest import TestList
# import TestClass here
testclass_tests = TestList('TestClass tests')
# @testclass_tests.test
# def test___delattr__(instance):
# pass
#
# @testclass_tests.test
# def test___dir__(instance):
# pass
#
# @testclass_tests.test
# def test___eq__(instance):
# pass
#
# @testclass_tests.test
# def test___format__(instance):
# pass
#
# @testclass_tests.test
# def test___ge__(instance):
# pass
#
# @testclass_tests.test
# def test___getattribute__(instance):
# pass
#
# @testclass_tests.test
# def test___gt__(instance):
# pass
#
# @testclass_tests.test
# def test___hash__(instance):
# pass
#
# @testclass_tests.test
# def test___init__(instance):
# pass
#
# @testclass_tests.test
# def test___init_subclass__(instance):
# pass
#
# @testclass_tests.test
# def test___le__(instance):
# pass
#
# @testclass_tests.test
# def test___lt__(instance):
# pass
#
# @testclass_tests.test
# def test___ne__(instance):
# pass
#
# @testclass_tests.test
# def test___new__(instance):
# pass
#
# @testclass_tests.test
# def test___reduce__(instance):
# pass
#
# @testclass_tests.test
# def test___reduce_ex__(instance):
# pass
#
# @testclass_tests.test
# def test___repr__(instance):
# pass
#
# @testclass_tests.test
# def test___setattr__(instance):
# pass
#
# @testclass_tests.test
# def test___sizeof__(instance):
# pass
#
# @testclass_tests.test
# def test___str__(instance):
# pass
#
# @testclass_tests.test
# def test___subclasshook__(instance):
# pass
#
# @testclass_tests.test
# def test_test_method(instance):
# pass
#
def run_tests():
instance = TestClass()
testclass_tests.run(instance)
```
#### File: quicktest/quicktest/test_list.py
```python
import sys
import time
from quicktest.test import Test
from quicktest.test_run import TestRun
from typing import Callable, Union
class TestList:
def __init__(self, name='Unnamed Test List'):
self.name = name
self.tests = []
def add_test(self, test: Test):
self.tests.append(test)
def test(self, name_or_method: Union[str, Callable]):
if callable(name_or_method):
new_test = Test()
new_test.name = name_or_method.__name__
new_test.method = name_or_method
self.add_test(new_test)
return name_or_method
elif isinstance(name_or_method, str):
def wrapper(func):
new_test = Test()
new_test.name = name_or_method
new_test.method = func
self.add_test(new_test)
return func
return wrapper
def run(self, *args, out=sys.stdout, **kwargs):
print(
self.name, '-',
time.strftime('%Y-%m-%d %H:%M:%S'),
file=out
)
failed_tests = []
for test in self.tests:
test_run = TestRun(test)
test_run.run(*args, **kwargs)
if test_run.succeeded():
result = 'succeeded'
else:
result = 'failed'
failed_tests.append(test_run)
print('*', end='', file=out)
print(
test.name, '-', result,
# 'with return value', test_run.return_value,
file=out
)
print(
len(self.tests)-len(failed_tests),
'out of', len(self.tests), 'tests succeeded.',
file=out
)
if failed_tests:
print(file=out)
print('Failures:', file=out)
for test_run in failed_tests:
print(test_run.test.name, file=out)
print(test_run.error, file=out)
return bool(failed_tests)
```
#### File: quicktest/quicktest/test_run.py
```python
from traceback import format_exc
from quicktest.test import Test
class TestRun:
def __init__(self, test: Test):
self.test = test
self.completed = False
self.return_value = None
self.error = None
def succeeded(self):
return self.completed and not self.error
def run(self, *args, **kwargs):
try:
self.return_value = self.test.method(*args, **kwargs)
except:
self.error = format_exc()
self.completed = True
``` |
{
"source": "0xf0f/sqlite-file-index",
"score": 3
} |
#### File: sqlite-file-index/sqlite_file_index/file_index_node.py
```python
from typing import TYPE_CHECKING
from pathlib import Path
if TYPE_CHECKING:
from .file_index import FileIndex
import sqlite3
class FileIndexNode:
def __init__(self, file_index: 'FileIndex', row: 'sqlite3.Row'):
self.file_index: 'FileIndex' = file_index
self.id = row['id']
self.path = Path(row['path'])
self.parent = row['parent']
def sub_node(self, row):
return FileIndexNode(self.file_index, row)
def search(self, keyword, recursive=False):
if recursive:
items = self.file_index.db.execute(
f'''
with recursive subfolders(_id) as (
values(?)
union all select id from folders, subfolders where parent=_id
)
select * from files where parent in subfolders and path like ?
order by path collate nocase asc;
''',
(self.id, f'%{keyword}%')
)
yield from map(self.sub_node, items)
else:
files = self.file_index.db.execute(
'select * from files where parent=? and '
'path like ? order by path asc',
(self.id, f'%{keyword}%')
)
yield from map(self.sub_node, files)
def iterdir(self, recursive=False):
if recursive:
items = self.file_index.db.execute(
f'''
with recursive subfolders(_id) as (
values(?)
union all select id from folders, subfolders where parent=_id
)
select * from folders where parent in subfolders
union all
select * from files where parent in subfolders
order by path collate nocase asc;
''',
(self.id,)
)
yield from map(self.sub_node, items)
else:
yield from self
def __iter__(self):
items = self.file_index.db.execute(
f'''
select * from folders where parent=?
union all
select * from files where parent=?
order by path collate nocase asc;
''', (self.id, self.id)
)
yield from map(self.sub_node, items)
```
#### File: sqlite-file-index/sqlite_file_index/file_index_task.py
```python
from threading import Thread, Event
from abc import ABC, abstractmethod
import typing
if typing.TYPE_CHECKING:
from .file_index import FileIndex
class FileIndexTask(Thread, ABC):
def __init__(self, index: 'FileIndex'):
super().__init__()
self.index: 'FileIndex' = index
self.pause_flag = Event()
self.pause_flag.set()
def run(self) -> None:
self.on_start()
self.loop()
self.on_finish()
def on_start(self):
pass
def on_finish(self):
pass
def loop(self):
while not self.complete():
self.pause_flag.wait()
self.iterate()
self.pause_flag.wait()
@abstractmethod
def iterate(self):
pass
@abstractmethod
def complete(self) -> bool:
pass
def pause(self):
self.pause_flag.clear()
def resume(self):
self.pause_flag.set()
def is_paused(self):
return not self.pause_flag.is_set()
``` |
{
"source": "0xf0/simpleTelegramPyBot",
"score": 3
} |
#### File: simpleTelegramPyBot/core/commands.py
```python
import re
import config
def start(**kwargs):
if kwargs["name"]:
name = re.sub("<[^>]+>", "", kwargs["name"])
elif kwargs["username"]:
name = re.sub("<[^>]+>", "", kwargs["username"])
else:
name = "Аноним"
text = ["Привет <b>{name}</b>! Вот что я умею:".format(name=name)]
for k, v in config.TELEGRAM_COMMANDS.items():
text.append("/{cmd} - {description}".format(cmd=k, description=v["description"]))
return {"chat_id": kwargs["chat_id"], "text": "\n".join(text), "parse_mode": "HTML"}
```
#### File: simpleTelegramPyBot/core/telegram.py
```python
import requests
import logging
txt = {
"token_not_set": "[-] telegram token not set",
"requests_error": "[-] requests error: {e}",
"method_error": "[-] got an error: {e}",
"method_exception": "[-] got an exception: {e}\n\tdata: {data}"
}
log = logging.getLogger(__name__)
class API(object):
def __init__(self, token=None, **kwargs):
self.__token = token
self.__method = kwargs.get("method", "")
def __request(self, **kwargs):
r = ""
if self.__token:
url = "https://api.telegram.org/bot{token}/{method}".format(token=self.__token, method=self.__method)
try:
r = requests.post(url, data=kwargs, timeout=10.0).json()
except Exception as e:
log.critical(txt["requests_error"].format(e=e))
else:
log.warning(txt["token_not_set"])
return r
def __getattr__(self, attr):
method = ("{method}.{attr}".format(method=self.__method, attr=attr)).lstrip('.')
return API(self.__token, method=method)
def __call__(self, **kwargs):
return self.__request(**kwargs)
@staticmethod
def parse(_m):
if "message" in _m:
m = _m["message"]
first_name = m["from"]["first_name"] if "first_name" in m["from"] else ""
last_name = m["from"]["last_name"] if "last_name" in m["from"] else ""
username = m["from"]["username"] if "username" in m["from"] else ""
data = {"chat_id": m["chat"]["id"],
"name": "{fname} {lname}".format(fname=first_name, lname=last_name),
"username": username,
"user_id": m["from"]["id"],
"type": m["chat"]["type"],
"date": m["date"],
"text": m["text"],
"ok": True}
elif "callback_query" in _m:
m = _m["callback_query"]
first_name = m["from"]["first_name"] if "first_name" in m["from"] else ""
last_name = m["from"]["last_name"] if "last_name" in m["from"] else ""
username = m["from"]["username"] if "username" in m["from"] else ""
data = {
"chat_id": m["from"]["id"],
"name": "{fname} {lname}".format(fname=first_name, lname=last_name),
"username": username,
"user_id": m["from"]["id"],
"data": m["data"],
"ok": True
}
else:
data = _m
return data
@staticmethod
def method_errors(data):
try:
if "ok" in data and data["ok"]:
return False
else:
log.warning(txt["method_error"].format(e=data))
return True
except Exception as e:
log.critical(txt["method_exception"].format(e=e, data=data))
return True
``` |
{
"source": "0xF1/django_smtp_auth",
"score": 2
} |
#### File: django_smtp_auth/smtp_auth_backend/backend.py
```python
import smtplib
from django.conf import settings
from django.contrib.auth.models import User
EMAIL_HOST = settings.EMAIL_HOST # Django default setting
EMAIL_USE_TLS = settings.EMAIL_USE_TLS # Django default setting
SMTP_AUTH_HOST = getattr(settings, 'SMTP_AUTH_HOST', None) # smtp_auth setting
SMTP_AUTH_USE_TLS = getattr(settings, 'SMTP_AUTH_USE_TLS', None) # smtp_auth setting
class SMTPBackend(object):
"""
Authenticates against an SMTP server
"""
supports_inactive_user = True
def authenticate(self, username=None, password=None):
#
# SMTP Authentication
#
if SMTP_AUTH_HOST:
srv = smtplib.SMTP(SMTP_AUTH_HOST)
else:
srv = smtplib.SMTP(EMAIL_HOST)
if SMTP_AUTH_USE_TLS is not None:
if SMTP_AUTH_USE_TLS:
srv.starttls()
else:
if EMAIL_USE_TLS:
srv.starttls()
status_code = srv.login(username, password)
srv.quit()
if status_code[0] in [235, 503]:
try:
return User.objects.get(username=username)
except User.DoesNotExist:
return User.objects.create_user(username, username, password)
else:
try:
user = User.objects.get(username=username)
if user.check_password(password):
return user
else:
return None
except User.DoesNotExist:
return None
``` |
{
"source": "0xF1/nessus_tools",
"score": 3
} |
#### File: 0xF1/nessus_tools/software_parser.py
```python
import xml.dom.minidom
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-f", "--file", action="store", type="string", dest="file", help="Nessus file to parse")
parser.add_option("-o", "--output", action="store", type="string", dest="output", help="output file name")
(menu, args) = parser.parse_args()
report_output = []
def main():
nes_file = menu.file
report = xml.dom.minidom.parse(nes_file)
for host in report.getElementsByTagName('ReportHost'):
client = host.getAttribute("name")
reports = host.childNodes
for el in reports:
if el.nodeType == el.ELEMENT_NODE and el.getAttribute("pluginID") == "20811":
output = get_plugin_output(el)
software = get_software(output)
updates = []
item = {"software": software, "updates": updates, "client": client}
report_output.append(item)
if len(report_output) > 0:
save_csv(report_output)
def save_csv(data):
fh = open(menu.output, "w")
fh.write("Client,Software,Version,Date Installed\n")
for d in data:
for i in d['software']:
if not i is None:
fh.write('"%s","%s","%s","%s"\n' %(d['client'], i[0], i[1], i[2]))
fh.close()
print "Done."
def getText(nodelist):
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc)
def get_plugin_output(el):
a = el.getElementsByTagName("plugin_output")[0]
return getText(a.childNodes)
def get_software(data):
software = []
lines = data.split("\n")
lines.pop(0)
lines.pop(0)
lines.pop(0)
for line in lines:
if line == "":
break
software.append(extract_meta(line))
return software
def extract_meta(data):
fragments = data.split("[")
name = fragments[0].strip()
version = None
date = None
for frag in fragments:
if frag.startswith("version"):
words = frag.split()
ver = words[1].split("]")
version = ver[0]
if frag.startswith("installed"):
words = frag.split()
ver = words[2].split("]")
date = ver[0]
if version and date:
return (name, version, date)
def get_updates(data):
""" Incomplete"""
updates = []
sections = data.split("The following updates")
lines = sections[1].split("\n")
lines.pop(0)
lines.pop(0)
lines.pop(0)
for line in lines:
updates.append(line)
return updates
if __name__ == "__main__":
main()
``` |
{
"source": "0xf3cd/BTC-Blockchain-Raw-Data-ETL",
"score": 3
} |
#### File: stage1/BTCParser/ScriptSemantic.py
```python
from enum import Enum
from .ScriptContext import ScriptExecutorContext
# Reference: https://en.bitcoin.it/wiki/Script
# Indicate the source of the stack (either MAIN stack or ALT stack).
# This will only be used for transform semantic.
class ScriptSemanticStackSrc(Enum):
MAIN = 0
ALT = 1
# The type of ScriptSemantic.
class ScriptSemanticType(Enum):
BASE_CLASS = 0
TRANSFORM = 1
ABORT = 2
CONDITIONAL_ABORT = 3
BRANCH_BEGIN = 4
BRANCH_ELSE = 5
BRANCH_END = 6
ARITHMETIC_CHECK = 7
CODE_SEPARATOR = 8
CHECK_SIG = 9
CHECK_MULTI_SIG = 10
CHECK_LOCKTIME_VERIFY = 11
CHECK_SEQUENCE_VERIFY = 12
# Base class for a semantic item (a semantic item carries its semantical meaning).
class ScriptSemantic:
def __init__(self, semantic_type=ScriptSemanticType.BASE_CLASS):
self.semantic_type = semantic_type
def get_semantic_type(self):
return self.semantic_type
def take_action(self, exec_ctx: ScriptExecutorContext):
raise NotImplementedError
# Represent transform semantic.
# Steps of the semantic:
# 1. Pop `num_pop` elements from input stack (specified by `in_stack_src`), and form the input list of popped elements.
# 2. Apply function `func` on the input list.
# 3. If `func_has_return` is True, push the return of the `func` back to the output stack (specified by `out_stack_src`).
class ScriptTransformSemantic(ScriptSemantic):
def __init__(self, num_pop, func, in_stack_src=ScriptSemanticStackSrc.MAIN, out_stack_src=ScriptSemanticStackSrc.MAIN):
super(ScriptTransformSemantic, self).__init__(ScriptSemanticType.TRANSFORM)
self.num_pop = num_pop
self.func = func
self.in_stack_src = in_stack_src
self.out_stack_src = out_stack_src
# Represent abort semantic.
class ScriptAbortSemantic(ScriptSemantic):
def __init__(self):
super(ScriptAbortSemantic, self).__init__(ScriptSemanticType.ABORT)
# Represent conditional abort semantic.
class ScriptConditionalAbortSemantic(ScriptSemantic):
def __init__(self, num_pop, func):
super(ScriptConditionalAbortSemantic, self).__init__(ScriptSemanticType.CONDITIONAL_ABORT)
self.num_pop = num_pop
self.func = func
# Represent branch begin semantic.
class ScriptBranchBeginSemantic(ScriptSemantic):
def __init__(self, num_pop, func):
super(ScriptBranchBeginSemantic, self).__init__(ScriptSemanticType.BRANCH_BEGIN)
self.num_pop = num_pop
self.func = func
# Represent branch else semantic.
class ScriptBranchElseSemantic(ScriptSemantic):
def __init__(self):
super(ScriptBranchElseSemantic, self).__init__(ScriptSemanticType.BRANCH_ELSE)
# Represent branch end semantic.
class ScriptBranchEndSemantic(ScriptSemantic):
def __init__(self):
super(ScriptBranchEndSemantic, self).__init__(ScriptSemanticType.BRANCH_END)
# Represent arithmetic check semantic.
class ScriptArithmeticCheckSemantic(ScriptSemantic):
def __init__(self, num_check):
super(ScriptArithmeticCheckSemantic, self).__init__(ScriptSemanticType.ARITHMETIC_CHECK)
self.num_check = num_check # The number of elements to check.
# Represent code separator and signature check semantics.
class ScriptCodeSeparatorSemantic(ScriptSemantic):
def __init__(self):
super(ScriptCodeSeparatorSemantic, self).__init__(ScriptSemanticType.CODE_SEPARATOR)
class ScriptCheckSigSemantic(ScriptSemantic):
def __init__(self):
super(ScriptCheckSigSemantic, self).__init__(ScriptSemanticType.CHECK_SIG)
class ScriptCheckMultiSigSemantic(ScriptSemantic):
def __init__(self):
super(ScriptCheckMultiSigSemantic, self).__init__(ScriptSemanticType.CHECK_MULTI_SIG)
# Represent check * verify semantics.
class ScriptCheckLocktimeVerifySemantic(ScriptSemantic):
def __init__(self):
super(ScriptCheckLocktimeVerifySemantic, self).__init__(ScriptSemanticType.CHECK_LOCKTIME_VERIFY)
class ScriptCheckSequenceVerifySemantic(ScriptSemantic):
def __init__(self):
super(ScriptCheckSequenceVerifySemantic, self).__init__(ScriptSemanticType.CHECK_SEQUENCE_VERIFY)
```
#### File: stage1/BTCParser/ScriptToken.py
```python
import struct
from enum import Enum
from .Tool import int_to_byte_vector, bool_to_byte_vector, byte_vector_to_int, byte_vector_to_bool
from .Tool import hash_ripemd160, hash_sha1, hash_sha256, hash_hash160, hash_hash256
from .ScriptContext import ScriptParserContext
from .ScriptSemantic import ScriptSemanticStackSrc, \
ScriptTransformSemantic, \
ScriptAbortSemantic, \
ScriptConditionalAbortSemantic, \
ScriptBranchBeginSemantic, \
ScriptBranchElseSemantic, \
ScriptBranchEndSemantic, \
ScriptArithmeticCheckSemantic, \
ScriptCodeSeparatorSemantic, \
ScriptCheckSigSemantic, \
ScriptCheckMultiSigSemantic, \
ScriptCheckLocktimeVerifySemantic, \
ScriptCheckSequenceVerifySemantic
# Reference: https://en.bitcoin.it/wiki/Script
# The enumeration of all valid token type.
class ScriptTokenType(Enum):
BASE_CLASS = 0
CONSTANTS = 1
FLOW_CONTROL = 2
STACK = 3
SPLICE = 4
BITWISE_LOGIC = 5
ARITHMETIC = 6
CRYPTO = 7
LOCKTIME = 8
PSEUDO = 9 # Not used
RESERVED = 10
# Base class for a token.
class ScriptToken:
def __init__(self, word, opcode, token_type=ScriptTokenType.BASE_CLASS):
self.word = word
self.opcode = opcode
self.token_type = token_type
self.semantic_list = []
self.data = b'' # Only used for constants.
def __str__(self):
return self.word # f'<Token opcode={self.opcode} word={self.word}>'
def get_token_type(self):
return self.token_type
def add_semantic(self, semantic):
self.semantic_list.append(semantic)
def get_semantic_list(self):
return self.semantic_list
def get_asm(self) -> str:
return self.word
# The mapping table from opcode (integer) to word (string).
OPCODE_TO_WORD_TABLE = {} # To be filled later.
################## Constants ####################
# Reference: https://en.bitcoin.it/wiki/Script#Constants
# Indicate which strategy we should take to get the value of the constant.
class ScriptConstantDataSrc(Enum):
SELF_CONTAINING = 0 # The operation has its default constant value, e.g. OP_1
DATA_FROM_SCRIPT = 1 # The operation will read fixed number of bytes from script, e.g. opcode = 0x40
LEN_FROM_SCRIPT = 2 # The length of data is stored on the script, e.g. OP_PUSHDATA1
# The rule table for each constant token.
# Key: op code
# Value: a dictionary
# - word: the corresponding word to the op code.
# - data_src: from which source can we get the data.
# - value: the way to interpret this field depends on `data_src`,
# for SELF_CONTAINING, it should be the value of the constant, e.g. for OP_1 it should be 1,
# for DATA_FROM_SCRIPT, it is interpreted as the number of bytes we need to read from script,
# for LEN_FROM_SCRIPT, it is interpreted as the number of bytes used to represent the length of data, e.g. for OP_PUSHDATA2 it should be 2.
# - little_endian: whether or not the data is in the format of little endian.
CONSTANT_OP_RULE_TABLE = {
0x00: { 'word': 'OP_0', 'data_src': ScriptConstantDataSrc.SELF_CONTAINING, 'value': int_to_byte_vector(0), 'little_endian': False },
0x4c: { 'word': 'OP_PUSHDATA1', 'data_src': ScriptConstantDataSrc.LEN_FROM_SCRIPT, 'value': 1, 'little_endian': False },
0x4d: { 'word': 'OP_PUSHDATA2', 'data_src': ScriptConstantDataSrc.LEN_FROM_SCRIPT, 'value': 2, 'little_endian': True },
0x4e: { 'word': 'OP_PUSHDATA4', 'data_src': ScriptConstantDataSrc.LEN_FROM_SCRIPT, 'value': 4, 'little_endian': True },
0x4f: { 'word': 'OP_1NEGATE', 'data_src': ScriptConstantDataSrc.SELF_CONTAINING, 'value': int_to_byte_vector(-1), 'little_endian': False },
0x51: { 'word': 'OP_1', 'data_src': ScriptConstantDataSrc.SELF_CONTAINING, 'value': int_to_byte_vector(1), 'little_endian': False },
}
for opcode in range(0x01, 0x4b+1): # 0x01-0x4b, inclusive
# The official word of all these ops is actually 'N/A'.
# Rename words to make them meaningful and differentiable.
CONSTANT_OP_RULE_TABLE[opcode] = \
{ 'word': f'OP_DATA_{opcode}', 'data_src': ScriptConstantDataSrc.DATA_FROM_SCRIPT, 'value': opcode, 'little_endian': False }
for opcode in range(0x52, 0x60+1): # 0x52-0x60, inclusive
n = opcode - 0x50
CONSTANT_OP_RULE_TABLE[opcode] = \
{ 'word': f'OP_{n}', 'data_src': ScriptConstantDataSrc.SELF_CONTAINING, 'value': int_to_byte_vector(n), 'little_endian': False }
CONSTANT_OP_SET = set(CONSTANT_OP_RULE_TABLE.keys())
for opcode in CONSTANT_OP_RULE_TABLE.keys():
OPCODE_TO_WORD_TABLE[opcode] = CONSTANT_OP_RULE_TABLE[opcode]['word']
# Represent a constant token.
class ScriptConstantToken(ScriptToken):
def __init__(self, parse_ctx: ScriptParserContext):
# Read and validate the op code.
cur_index = parse_ctx.cur_index
opcode = parse_ctx.script_bytes[cur_index]
if opcode not in CONSTANT_OP_SET:
raise RuntimeError(f'{opcode} is not a valid opcode for Constants.')
cur_index += 1
# Invoke base class's constructor.
rule = CONSTANT_OP_RULE_TABLE[opcode]
super(ScriptConstantToken, self).__init__(rule['word'], opcode, ScriptTokenType.CONSTANTS)
# Take different actions depending on the data source.
data_src = rule['data_src']
data = None
if data_src == ScriptConstantDataSrc.SELF_CONTAINING: # No need to read from script.
data = rule['value']
elif data_src == ScriptConstantDataSrc.DATA_FROM_SCRIPT: # Read the fixed size data from script.
num_bytes_read = rule['value']
data = parse_ctx.script_bytes[cur_index:cur_index+num_bytes_read]
cur_index += num_bytes_read
else: # Read the data length from script first, and then read the data from script.
assert(data_src == ScriptConstantDataSrc.LEN_FROM_SCRIPT)
# Read the length.
num_data_len_bytes = rule['value']
data_len = None
if num_data_len_bytes == 1:
data_len = parse_ctx.script_bytes[cur_index]
elif num_data_len_bytes == 2:
(data_len,) = struct.unpack('<H', parse_ctx.script_bytes[cur_index:cur_index+2])
else:
assert(num_data_len_bytes == 4)
(data_len,) = struct.unpack('<L', parse_ctx.script_bytes[cur_index:cur_index+4])
cur_index += num_data_len_bytes
# Read the data.
data = parse_ctx.script_bytes[cur_index:cur_index+data_len]
cur_index += data_len
if rule['little_endian']:
data = data[::-1]
# Store the data in the object.
self.data = data
# Set the semantic.
self.add_semantic(
ScriptTransformSemantic(0, lambda l : [data]) # Pop 0 element from stack, but push `data` to the stack.
)
# Update the parsing context.
parse_ctx.cur_index = cur_index
def __str__(self):
return f'{self.word}:{self.data.hex()}'
def get_asm(self) -> str:
return self.data.hex()
#################################################
################# Flow Control ##################
# Reference: https://en.bitcoin.it/wiki/Script#Flow_control
# Key: op code
# Value: the word (name) of the op code
FLOW_CTRL_OP_WORD_TABLE = {
0x61: 'OP_NOP',
0x63: 'OP_IF',
0x64: 'OP_NOTIF',
0x67: 'OP_ELSE',
0x68: 'OP_ENDIF',
0x69: 'OP_VERIFY',
0x6a: 'OP_RETURN',
}
FLOW_CTRL_OP_SEMANTIC_LIST_TABLE = {
0x61: [], # OP_NOP does nothing.
0x63: [ScriptBranchBeginSemantic(1, lambda l : byte_vector_to_bool(l[0]) == True)],
0x64: [ScriptBranchBeginSemantic(1, lambda l : byte_vector_to_bool(l[0]) == False)],
0x67: [ScriptBranchElseSemantic()],
0x68: [ScriptBranchEndSemantic()],
0x69: [ScriptConditionalAbortSemantic(1, lambda l : byte_vector_to_bool(l[0]) == False)], # Abort if the top stack value is not true.
0x6a: [ScriptAbortSemantic()],
}
FLOW_CTRL_OP_SET = set(FLOW_CTRL_OP_WORD_TABLE.keys())
for opcode in FLOW_CTRL_OP_WORD_TABLE.keys():
OPCODE_TO_WORD_TABLE[opcode] = FLOW_CTRL_OP_WORD_TABLE[opcode]
# Represent a flow control op token.
class ScriptFlowCtrlToken(ScriptToken):
def __init__(self, parse_ctx: ScriptParserContext):
# Validate the op code.
opcode = parse_ctx.script_bytes[parse_ctx.cur_index]
if opcode not in FLOW_CTRL_OP_SET:
raise RuntimeError(f'{opcode} is not a valid opcode for Flow Control.')
# Invoke base class's constructor.
super(ScriptFlowCtrlToken, self).__init__(FLOW_CTRL_OP_WORD_TABLE[opcode], opcode, ScriptTokenType.FLOW_CONTROL)
# Set the semantics.
for semantic in FLOW_CTRL_OP_SEMANTIC_LIST_TABLE[opcode]:
self.add_semantic(semantic)
# Update the parsing context.
parse_ctx.cur_index += 1
#################################################
#################### Stack ######################
# Reference: https://en.bitcoin.it/wiki/Script#Stack
# TODO: Check the order! Add the element to input list at the head or at the end?
STACK_OP_INFO_TABLE = {
0x6b: { 'word': 'OP_TOALTSTACK', 'num_pop': 1, 'func': lambda l : l },
0x6c: { 'word': 'OP_FROMALTSTACK', 'num_pop': 1, 'func': lambda l : l },
0x73: { 'word': 'OP_IFDUP', 'num_pop': 1, 'func': lambda l : [l[0], l[0]] if byte_vector_to_bool(l[0]) else l },
0x74: { 'word': 'OP_DEPTH', 'num_pop': -1, 'func': lambda l : l + [int_to_byte_vector(len(l))] }, # TODO: l + [len(l)] or [len(l)] + l?
0x75: { 'word': 'OP_DROP', 'num_pop': 1, 'func': lambda l : [] },
0x76: { 'word': 'OP_DUP', 'num_pop': 1, 'func': lambda l : [l[0], l[0]] },
0x77: { 'word': 'OP_NIP', 'num_pop': 2, 'func': lambda l : [l[1]] },
0x78: { 'word': 'OP_OVER', 'num_pop': 2, 'func': lambda l : [l[0], l[1], l[0]] },
0x79: { 'word': 'OP_PICK', 'num_pop': -1, 'func': lambda l : l + [l[0]] },
0x7a: { 'word': 'OP_ROLL', 'num_pop': -1, 'func': lambda l : l[1:] + [l[0]] },
0x7b: { 'word': 'OP_ROT', 'num_pop': 3, 'func': lambda l : [l[1], l[2], l[0]] },
0x7c: { 'word': 'OP_SWAP', 'num_pop': 2, 'func': lambda l : [l[1], l[0]] },
0x7d: { 'word': 'OP_TUCK', 'num_pop': 2, 'func': lambda l : [l[1], l[0], l[1]] },
0x6d: { 'word': 'OP_2DROP', 'num_pop': 2, 'func': lambda l : [] },
0x6e: { 'word': 'OP_2DUP', 'num_pop': 2, 'func': lambda l : l + l },
0x6f: { 'word': 'OP_3DUP', 'num_pop': 3, 'func': lambda l : l + l },
0x70: { 'word': 'OP_2OVER', 'num_pop': 4, 'func': lambda l : l + l[0:2] },
0x71: { 'word': 'OP_2ROT', 'num_pop': 6, 'func': lambda l : l[2:] + l[0:2] },
0x72: { 'word': 'OP_2SWAP', 'num_pop': 4, 'func': lambda l : l[2:] + l[0:2] },
}
# Build the configure table for stack ops.
STACK_OP_SEMANTIC_TABLE = dict()
for (opcode, info) in STACK_OP_INFO_TABLE.items():
in_stack_src = ScriptSemanticStackSrc.MAIN
out_stack_src = ScriptSemanticStackSrc.MAIN
if info['word'] == 'OP_TOALTSTACK':
out_stack_src = ScriptSemanticStackSrc.ALT
elif info['word'] == 'OP_FROMALTSTACK':
in_stack_src = ScriptSemanticStackSrc.ALT
STACK_OP_SEMANTIC_TABLE[opcode] = \
ScriptTransformSemantic(info['num_pop'], info['func'], in_stack_src=in_stack_src, out_stack_src=out_stack_src)
STACK_OP_SET = set(STACK_OP_INFO_TABLE.keys())
for opcode in STACK_OP_INFO_TABLE.keys():
OPCODE_TO_WORD_TABLE[opcode] = STACK_OP_INFO_TABLE[opcode]['word']
# Represent a stack op token.
class ScriptStackToken(ScriptToken):
def __init__(self, parse_ctx: ScriptParserContext):
# Validate the op code.
opcode = parse_ctx.script_bytes[parse_ctx.cur_index]
if opcode not in STACK_OP_SET:
raise RuntimeError(f'{opcode} is not a valid opcode for Stack.')
# Invoke base class's constructor.
super(ScriptStackToken, self).__init__(STACK_OP_INFO_TABLE[opcode]['word'], opcode, ScriptTokenType.STACK)
# Set the semantic.
self.add_semantic(STACK_OP_SEMANTIC_TABLE[opcode])
# Update the parsing context.
parse_ctx.cur_index += 1
#################################################
#################### Splice #####################
# Reference: https://en.bitcoin.it/wiki/Script#Splice
# From the documentation, it reads:
# "If any opcode marked as disabled is present in a script, it must abort and fail."
SPLICE_OP_SET = set([0x7e, 0x7f, 0x80, 0x81, 0x82])
SPLICE_OP_DISABLED_SET = set([0x7e, 0x7f, 0x80, 0x81]) # 0x82 is the only valid splice op.
SPLICE_OP_INFO_TABLE = {
0x82: { 'word': 'OP_SIZE', 'semantic': ScriptTransformSemantic(1, lambda l : l + [ int_to_byte_vector( len(l[0]) ) ] )}, # TODO: How to understand "string length"?
}
OPCODE_TO_WORD_TABLE[0x7e] = 'OP_CAT'
OPCODE_TO_WORD_TABLE[0x7f] = 'OP_SUBSTR'
OPCODE_TO_WORD_TABLE[0x80] = 'OP_LEFT'
OPCODE_TO_WORD_TABLE[0x81] = 'OP_RIGHT'
OPCODE_TO_WORD_TABLE[0x82] = 'OP_SIZE'
# Represent a splice op token.
class ScriptSpliceToken(ScriptToken):
def __init__(self, parse_ctx: ScriptParserContext):
# Validate the op code.
opcode = parse_ctx.script_bytes[parse_ctx.cur_index]
if opcode not in SPLICE_OP_SET:
raise RuntimeError(f'{opcode} is not a valid opcode for Splice.')
if opcode in SPLICE_OP_DISABLED_SET:
raise RuntimeError(f'{opcode} is a disabled splice op.')
assert(opcode == 0x82)
# Invoke base class's constructor.
super(ScriptSpliceToken, self).__init__(SPLICE_OP_INFO_TABLE[opcode]['word'], opcode, ScriptTokenType.SPLICE)
# Set the semantic.
self.add_semantic(SPLICE_OP_INFO_TABLE[opcode]['semantic'])
# Update the parsing context.
parse_ctx.cur_index += 1
#################################################
################ Bitwise Logic ##################
# Reference: https://en.bitcoin.it/wiki/Script#Bitwise_logic
# From the documentation, it reads:
# "If any opcode marked as disabled is present in a script, it must abort and fail."
BITWISE_OP_SET = set([0x83, 0x84, 0x85, 0x86, 0x87, 0x88])
BITWISE_OP_DISABLED_SET = set([0x83, 0x84, 0x85, 0x86])
BITWISE_OP_INFO_TABLE = {
0x87: { 'word': 'OP_EQUAL', 'semantic_list': [
ScriptTransformSemantic(2, lambda l : [
bool_to_byte_vector(l[0] == l[1])
]),
]},
0x88: { 'word': 'OP_EQUALVERIFY', 'semantic_list': [
ScriptTransformSemantic(2, lambda l : [
bool_to_byte_vector(l[0] == l[1])
]),
ScriptConditionalAbortSemantic(1, lambda l : byte_vector_to_bool(l[0]) == False),
]},
}
OPCODE_TO_WORD_TABLE[0x83] = 'OP_INVERT'
OPCODE_TO_WORD_TABLE[0x84] = 'OP_AND'
OPCODE_TO_WORD_TABLE[0x85] = 'OP_OR'
OPCODE_TO_WORD_TABLE[0x86] = 'OP_XOR'
OPCODE_TO_WORD_TABLE[0x87] = 'OP_EQUAL'
OPCODE_TO_WORD_TABLE[0x88] = 'OP_EQUALVERIFY'
# Represent a bitwise logic token.
class ScriptBitwiseLogicToken(ScriptToken):
def __init__(self, parse_ctx: ScriptParserContext):
# Validate the op code.
opcode = parse_ctx.script_bytes[parse_ctx.cur_index]
if opcode not in BITWISE_OP_SET:
raise RuntimeError(f'{opcode} is not a valid opcode for Bitwise Logic.')
if opcode in BITWISE_OP_DISABLED_SET:
raise RuntimeError(f'{opcode} is a disabled bitwise logic op.')
assert(opcode == 0x87 or opcode == 0x88)
# Invoke base class's constructor.
super(ScriptBitwiseLogicToken, self).__init__(BITWISE_OP_INFO_TABLE[opcode]['word'], opcode, ScriptTokenType.BITWISE_LOGIC)
# Set the semantic.
for semantic in BITWISE_OP_INFO_TABLE[opcode]['semantic_list']:
self.add_semantic(semantic)
# Update the parsing context.
parse_ctx.cur_index += 1
#################################################
################## Arithmetic ###################
# Reference: https://en.bitcoin.it/wiki/Script#Arithmetic
'''
TODO: Ensure the following stuff...
Copied from: https://en.bitcoin.it/wiki/Script#Arithmetic
Note: Arithmetic inputs are limited to signed 32-bit integers, but may overflow their output.
If any input value for any of these commands is longer than 4 bytes, the script must abort and fail.
If any opcode marked as disabled is present in a script - it must also abort and fail.
'''
ARITHMETIC_OP_DISABLED_SET = set([0x8d, 0x8e, 0x95, 0x96, 0x97, 0x98, 0x99])
OPCODE_TO_WORD_TABLE[0x8d] = 'OP_2MUL'
OPCODE_TO_WORD_TABLE[0x8e] = 'OP_2DIV'
OPCODE_TO_WORD_TABLE[0x95] = 'OP_MUL'
OPCODE_TO_WORD_TABLE[0x96] = 'OP_DIV'
OPCODE_TO_WORD_TABLE[0x97] = 'OP_MOD'
OPCODE_TO_WORD_TABLE[0x98] = 'OP_LSHIFT'
OPCODE_TO_WORD_TABLE[0x99] = 'OP_RSHIFT'
ARITHMETIC_OP_INFO_TABLE = {
0x8b: { 'word': 'OP_1ADD', 'semantic_list': [
ScriptArithmeticCheckSemantic(1),
ScriptTransformSemantic(1, lambda l : [
int_to_byte_vector( byte_vector_to_int(l[0]) + 1 )
])
]},
0x8c: { 'word': 'OP_1SUB', 'semantic_list': [
ScriptArithmeticCheckSemantic(1),
ScriptTransformSemantic(1, lambda l : [
int_to_byte_vector( byte_vector_to_int(l[0]) - 1 )
])
]},
0x8f: { 'word': 'OP_NEGATE', 'semantic_list': [
ScriptArithmeticCheckSemantic(1),
ScriptTransformSemantic(1, lambda l : [
int_to_byte_vector( - byte_vector_to_int(l[0]) )
])
]},
0x90: { 'word': 'OP_ABS', 'semantic_list': [
ScriptArithmeticCheckSemantic(1),
ScriptTransformSemantic(1, lambda l : [
int_to_byte_vector( abs( byte_vector_to_int(l[0]) ) )
])
]},
0x91: { 'word': 'OP_NOT', 'semantic_list': [
ScriptArithmeticCheckSemantic(1),
ScriptTransformSemantic(1, lambda l : [
int_to_byte_vector( 1 if byte_vector_to_int(l[0]) == 0 else 0 )
])
]},
0x92: { 'word': 'OP_0NOTEQUAL', 'semantic_list': [
ScriptArithmeticCheckSemantic(1),
ScriptTransformSemantic(1, lambda l : [
int_to_byte_vector( 0 if byte_vector_to_int(l[0]) == 0 else 1 )
])
]},
0x93: { 'word': 'OP_ADD', 'semantic_list': [
ScriptArithmeticCheckSemantic(2),
ScriptTransformSemantic(2, lambda l : [
int_to_byte_vector( byte_vector_to_int(l[0]) + byte_vector_to_int(l[1]) )
])
]},
0x94: { 'word': 'OP_SUB', 'semantic_list': [
ScriptArithmeticCheckSemantic(2),
ScriptTransformSemantic(2, lambda l : [
int_to_byte_vector( byte_vector_to_int(l[0]) - byte_vector_to_int(l[1]) )
])
]},
0x9a: { 'word': 'OP_BOOLAND', 'semantic_list': [
ScriptArithmeticCheckSemantic(2),
ScriptTransformSemantic(2, lambda l : [
bool_to_byte_vector( byte_vector_to_bool(l[0]) and byte_vector_to_bool(l[1]) )
])
]},
0x9b: { 'word': 'OP_BOOLOR', 'semantic_list': [
ScriptArithmeticCheckSemantic(2),
ScriptTransformSemantic(2, lambda l : [
bool_to_byte_vector( byte_vector_to_bool(l[0]) or byte_vector_to_bool(l[1]) )
])
]},
0x9c: { 'word': 'OP_NUMEQUAL', 'semantic_list': [
ScriptArithmeticCheckSemantic(2),
ScriptTransformSemantic(2, lambda l : [
bool_to_byte_vector( byte_vector_to_int(l[0]) == byte_vector_to_int(l[1]) )
])
]},
0x9d: { 'word': 'OP_NUMEQUALVERIFY', 'semantic_list': [
ScriptArithmeticCheckSemantic(2),
ScriptTransformSemantic(2, lambda l : [
bool_to_byte_vector( byte_vector_to_int(l[0]) == byte_vector_to_int(l[1]) )
]),
ScriptConditionalAbortSemantic(1, lambda l : byte_vector_to_bool(l[0]) == False)
]},
0x9e: { 'word': 'OP_NUMNOTEQUAL', 'semantic_list': [
ScriptArithmeticCheckSemantic(2),
ScriptTransformSemantic(2, lambda l : [
bool_to_byte_vector( byte_vector_to_int(l[0]) != byte_vector_to_int(l[1]) )
])
]},
0x9f: { 'word': 'OP_LESSTHAN', 'semantic_list': [
ScriptArithmeticCheckSemantic(2),
ScriptTransformSemantic(2, lambda l : [
bool_to_byte_vector( byte_vector_to_int(l[0]) < byte_vector_to_int(l[1]) )
]),
]},
0xa0: { 'word': 'OP_GREATERTHAN', 'semantic_list': [
ScriptArithmeticCheckSemantic(2),
ScriptTransformSemantic(2, lambda l : [
bool_to_byte_vector( byte_vector_to_int(l[0]) > byte_vector_to_int(l[1]) )
]),
]},
0xa1: { 'word': 'OP_LESSTHANOREQUAL', 'semantic_list': [
ScriptArithmeticCheckSemantic(2),
ScriptTransformSemantic(2, lambda l : [
bool_to_byte_vector( byte_vector_to_int(l[0]) <= byte_vector_to_int(l[1]) )
]),
]},
0xa2: { 'word': 'OP_GREATERTHANOREQUAL', 'semantic_list': [
ScriptArithmeticCheckSemantic(2),
ScriptTransformSemantic(2, lambda l : [
bool_to_byte_vector( byte_vector_to_int(l[0]) >= byte_vector_to_int(l[1]) )
]),
]},
0xa3: { 'word': 'OP_MIN', 'semantic_list': [
ScriptArithmeticCheckSemantic(2),
ScriptTransformSemantic(2, lambda l : [
int_to_byte_vector( min( byte_vector_to_int(l[0]), byte_vector_to_int(l[1]) ) )
]),
]},
0xa4: { 'word': 'OP_MAX', 'semantic_list': [
ScriptArithmeticCheckSemantic(2),
ScriptTransformSemantic(2, lambda l : [
int_to_byte_vector( max( byte_vector_to_int(l[0]), byte_vector_to_int(l[1]) ) )
]),
]},
0xa5: { 'word': 'OP_WITHIN', 'semantic_list': [
ScriptArithmeticCheckSemantic(3),
ScriptTransformSemantic(3, lambda l : [
bool_to_byte_vector(
( byte_vector_to_int(l[0]) >= byte_vector_to_int(l[1]) ) and
( byte_vector_to_int(l[0]) < byte_vector_to_int(l[2]) )
)
]),
]},
}
for opcode in ARITHMETIC_OP_INFO_TABLE.keys():
OPCODE_TO_WORD_TABLE[opcode] = ARITHMETIC_OP_INFO_TABLE[opcode]['word']
ARITHMETIC_OP_SET = set(list(ARITHMETIC_OP_DISABLED_SET) + list(ARITHMETIC_OP_INFO_TABLE.keys()))
# Represent a arithmetic token.
class ScriptArithmeticToken(ScriptToken):
def __init__(self, parse_ctx: ScriptParserContext):
# Validate the op code.
opcode = parse_ctx.script_bytes[parse_ctx.cur_index]
if opcode not in ARITHMETIC_OP_SET:
raise RuntimeError(f'{opcode} is not a valid opcode for Arithmetic.')
if opcode in ARITHMETIC_OP_DISABLED_SET:
raise RuntimeError(f'{opcode} is a disabled Arithmetic op.')
# Invoke base class's constructor.
super(ScriptArithmeticToken, self).__init__(ARITHMETIC_OP_INFO_TABLE[opcode]['word'], opcode, ScriptTokenType.ARITHMETIC)
# Set the semantic.
for semantic in ARITHMETIC_OP_INFO_TABLE[opcode]['semantic_list']:
self.add_semantic(semantic)
# Update the parsing context.
parse_ctx.cur_index += 1
#################################################
#################### Crypto #####################
CRYPTO_OP_INFO_TABLE = {
0xa6: { 'word': 'OP_RIPEMD160', 'semantic_list': [
ScriptTransformSemantic(1, lambda l : [
hash_ripemd160(l[0])
]),
]},
0xa7: { 'word': 'OP_SHA1', 'semantic_list': [
ScriptTransformSemantic(1, lambda l : [
hash_sha1(l[0])
]),
]},
0xa8: { 'word': 'OP_SHA256', 'semantic_list': [
ScriptTransformSemantic(1, lambda l : [
hash_sha256(l[0])
]),
]},
0xa9: { 'word': 'OP_HASH160', 'semantic_list': [
ScriptTransformSemantic(1, lambda l : [
hash_hash160(l[0])
]),
]},
0xaa: { 'word': 'OP_HASH256', 'semantic_list': [
ScriptTransformSemantic(1, lambda l : [
hash_hash256(l[0])
]),
]},
0xab: { 'word': 'OP_CODESEPARATOR', 'semantic_list': [
ScriptCodeSeparatorSemantic(),
]},
0xac: { 'word': 'OP_CHECKSIG', 'semantic_list': [
ScriptCheckSigSemantic(),
]},
0xad: { 'word': 'OP_CHECKSIGVERIFY', 'semantic_list': [
ScriptCheckSigSemantic(),
ScriptConditionalAbortSemantic(1, lambda l : byte_vector_to_bool(l[0]) == False)
]},
0xae: { 'word': 'OP_CHECKMULTISIG', 'semantic_list': [
ScriptCheckMultiSigSemantic(),
]},
0xaf: { 'word': 'OP_CHECKMULTISIGVERIFY', 'semantic_list': [
ScriptCheckMultiSigSemantic(),
ScriptConditionalAbortSemantic(1, lambda l : byte_vector_to_bool(l[0]) == False)
]},
}
for opcode in CRYPTO_OP_INFO_TABLE.keys():
OPCODE_TO_WORD_TABLE[opcode] = CRYPTO_OP_INFO_TABLE[opcode]['word']
CRYPTO_OP_SET = set(CRYPTO_OP_INFO_TABLE.keys())
# Represent a crypto token.
class ScriptCryptoToken(ScriptToken):
def __init__(self, parse_ctx: ScriptParserContext):
# Validate the op code.
opcode = parse_ctx.script_bytes[parse_ctx.cur_index]
if opcode not in CRYPTO_OP_SET:
raise RuntimeError(f'{opcode} is not a valid opcode for Crypto.')
# Invoke base class's constructor.
super(ScriptCryptoToken, self).__init__(CRYPTO_OP_INFO_TABLE[opcode]['word'], opcode, ScriptTokenType.CRYPTO)
# Set the semantic.
for semantic in CRYPTO_OP_INFO_TABLE[opcode]['semantic_list']:
self.add_semantic(semantic)
# Update the parsing context.
parse_ctx.cur_index += 1
#################################################
################### Locktime ####################
LOCKTIME_OP_INFO_TABLE = {
0xb1: { 'word': 'OP_CHECKLOCKTIMEVERIFY', 'semantic_list': [
ScriptCheckLocktimeVerifySemantic(),
]},
0xb2: { 'word': 'OP_CHECKSEQUENCEVERIFY', 'semantic_list': [
ScriptCheckSequenceVerifySemantic(),
]},
}
for opcode in LOCKTIME_OP_INFO_TABLE.keys():
OPCODE_TO_WORD_TABLE[opcode] = LOCKTIME_OP_INFO_TABLE[opcode]['word']
LOCKTIME_OP_SET = set(LOCKTIME_OP_INFO_TABLE.keys())
# Represent a locktime token.
class ScriptLocktimeToken(ScriptToken):
def __init__(self, parse_ctx: ScriptParserContext):
# Validate the op code.
opcode = parse_ctx.script_bytes[parse_ctx.cur_index]
if opcode not in LOCKTIME_OP_SET:
raise RuntimeError(f'{opcode} is not a valid opcode for Locktime.')
# Invoke base class's constructor.
super(ScriptLocktimeToken, self).__init__(LOCKTIME_OP_INFO_TABLE[opcode]['word'], opcode, ScriptTokenType.LOCKTIME)
# Set the semantic.
for semantic in LOCKTIME_OP_INFO_TABLE[opcode]['semantic_list']:
self.add_semantic(semantic)
# Update the parsing context.
parse_ctx.cur_index += 1
#################################################
################# Pseudo-words ##################
PSEUDO_OP_DISABLED_SET = set([0xfd, 0xfe, 0xff])
OPCODE_TO_WORD_TABLE[0xfd] = 'OP_PUBKEYHASH'
OPCODE_TO_WORD_TABLE[0xfe] = 'OP_PUBKEY'
OPCODE_TO_WORD_TABLE[0xff] = 'OP_INVALIDOPCODE'
PSEUDO_OP_SET = PSEUDO_OP_DISABLED_SET
class ScriptPseudoWordToken(ScriptToken):
def __init__(self, parse_ctx: ScriptParserContext):
opcode = parse_ctx.script_bytes[parse_ctx.cur_index]
if opcode not in PSEUDO_OP_SET:
raise RuntimeError(f'{opcode} is not a valid opcode for Pseudo Words.')
raise RuntimeError(f'{opcode} refers to a pseudo word, which is illegal.')
#################################################
################ Reserved-words #################
RESERVED_OP_INFO_TABLE = {
0x50: { 'word': 'OP_RESERVED', 'semantic_list': [
ScriptAbortSemantic(),
]},
0x62: { 'word': 'OP_VER', 'semantic_list': [
ScriptAbortSemantic(),
]},
0x89: { 'word': 'OP_RESERVED1', 'semantic_list': [
ScriptAbortSemantic(),
]},
0x8a: { 'word': 'OP_RESERVED2', 'semantic_list': [
ScriptAbortSemantic(),
]},
0xb0: { 'word': 'OP_NOP1', 'semantic_list': []},
0xb3: { 'word': 'OP_NOP4', 'semantic_list': []},
0xb4: { 'word': 'OP_NOP5', 'semantic_list': []},
0xb5: { 'word': 'OP_NOP6', 'semantic_list': []},
0xb6: { 'word': 'OP_NOP7', 'semantic_list': []},
0xb7: { 'word': 'OP_NOP8', 'semantic_list': []},
0xb8: { 'word': 'OP_NOP9', 'semantic_list': []},
0xb9: { 'word': 'OP_NOP10', 'semantic_list': []},
}
for opcode in RESERVED_OP_INFO_TABLE.keys():
OPCODE_TO_WORD_TABLE[opcode] = RESERVED_OP_INFO_TABLE[opcode]['word']
RESERVED_OP_DISABLED_SET = set([0x65, 0x66])
OPCODE_TO_WORD_TABLE[0x65] = 'OP_VERIF'
OPCODE_TO_WORD_TABLE[0x66] = 'OP_VERNOTIF'
RESERVED_OP_SET = set(list(RESERVED_OP_DISABLED_SET) + list(RESERVED_OP_INFO_TABLE.keys()))
# Represent a reserved word token.
class ScriptReservedToken(ScriptToken):
def __init__(self, parse_ctx: ScriptParserContext):
# Validate the op code.
opcode = parse_ctx.script_bytes[parse_ctx.cur_index]
if opcode not in RESERVED_OP_SET:
raise RuntimeError(f'{opcode} is not a valid opcode for Reserved Words.')
if opcode in RESERVED_OP_DISABLED_SET:
raise RuntimeError(f'{opcode} is a disabled reserved words op.')
# Invoke base class's constructor.
super(ScriptReservedToken, self).__init__(RESERVED_OP_INFO_TABLE[opcode]['word'], opcode, ScriptTokenType.RESERVED)
# Set the semantic.
for semantic in RESERVED_OP_INFO_TABLE[opcode]['semantic_list']:
self.add_semantic(semantic)
# Update the parsing context.
parse_ctx.cur_index += 1
#################################################
# OPCODE_TO_WORD_TABLE has been fully filled up now.
# The mapping table from word (string) to opcode (integer).
WORD_TO_OPCODE_TABLE = {}
for (key, value) in OPCODE_TO_WORD_TABLE.items():
WORD_TO_OPCODE_TABLE[value] = key
# Return the type (enum) of an opcode.
def get_opcode_type(opcode):
if opcode in CONSTANT_OP_SET:
return ScriptTokenType.CONSTANTS
elif opcode in FLOW_CTRL_OP_SET:
return ScriptTokenType.FLOW_CONTROL
elif opcode in STACK_OP_SET:
return ScriptTokenType.STACK
elif opcode in SPLICE_OP_SET:
return ScriptTokenType.SPLICE
elif opcode in BITWISE_OP_SET:
return ScriptTokenType.BITWISE_LOGIC
elif opcode in ARITHMETIC_OP_SET:
return ScriptTokenType.ARITHMETIC
elif opcode in CRYPTO_OP_SET:
return ScriptTokenType.CRYPTO
elif opcode in LOCKTIME_OP_SET:
return ScriptTokenType.LOCKTIME
elif opcode in PSEUDO_OP_SET:
return ScriptTokenType.PSEUDO
elif opcode in RESERVED_OP_SET:
return ScriptTokenType.RESERVED
else:
raise RuntimeError(f'{opcode} does not belong to any token category.')
# Table (mapping table) from token type (enum) to token class.
TOKEN_TYPE_TO_CLASS_TABLE = {
ScriptTokenType.CONSTANTS: ScriptConstantToken,
ScriptTokenType.FLOW_CONTROL: ScriptFlowCtrlToken,
ScriptTokenType.STACK: ScriptStackToken,
ScriptTokenType.SPLICE: ScriptSpliceToken,
ScriptTokenType.BITWISE_LOGIC: ScriptBitwiseLogicToken,
ScriptTokenType.ARITHMETIC: ScriptArithmeticToken,
ScriptTokenType.CRYPTO: ScriptCryptoToken,
ScriptTokenType.LOCKTIME: ScriptLocktimeToken,
ScriptTokenType.PSEUDO: ScriptPseudoWordToken,
ScriptTokenType.RESERVED: ScriptReservedToken,
}
# Return the proper class based on the input opcode.
# i.e. checking the operation belongs to which category.
def get_opcode_class(opcode):
token_type = get_opcode_type(opcode)
return TOKEN_TYPE_TO_CLASS_TABLE[token_type]
# Build a token.
def build_token(parse_ctx: ScriptParserContext) -> ScriptToken:
if parse_ctx.cur_index >= len(parse_ctx.script_bytes):
raise RuntimeError('Invalid index in parse context!')
opcode = parse_ctx.script_bytes[parse_ctx.cur_index]
TokenClass = get_opcode_class(opcode)
token = TokenClass(parse_ctx)
return token
```
#### File: stage1/BTCParser/Tool.py
```python
import struct
import hashlib
import base58
from .Bech32 import encode as segwit_encode
# Read a var int at the index of `start_index` in the block data `blk_data`.
# Return the integer and the number of bytes that we read.
# Reference: https://en.bitcoin.it/wiki/Protocol_documentation#Variable_length_integer
def read_var_int(blk_data: bytes, start_index: int) -> tuple[int, int]:
head = blk_data[start_index]
if head == 0xfd: # uint16_t
(n,) = struct.unpack('<H', blk_data[start_index+1:start_index+3])
return (n, 3)
elif head == 0xfe: # uint32_t
(n,) = struct.unpack('<L', blk_data[start_index+1:start_index+5])
return (n, 5)
elif head == 0xff: # uint64_t
(n,) = struct.unpack('<Q', blk_data[start_index+1:start_index+9])
return (n, 9)
else: # uint8_t
n = head
return (n, 1)
def int_to_var_int(num: int) -> bytes:
assert(type(num) == int)
assert(num >= 0)
if num <= 0xfc:
return num.to_bytes(1, 'big', signed=False)
elif num <= 0xffff:
return b'\xfd' + num.to_bytes(2, 'big', signed=False)
elif num <= 0xffffffff:
return b'\xfe' + num.to_bytes(4, 'big', signed=False)
else:
return b'\xff' + num.to_bytes(8, 'big', signed=False)
'''
Copied from: https://en.bitcoin.it/wiki/Script#Stack
This document is for information purposes only. De facto,
Bitcoin script is defined by the code run by the network to check the validity of blocks.
The stacks hold byte vectors. When used as numbers, byte vectors are interpreted as little-endian variable-length integers with the most significant bit determining the sign of the integer.
Thus 0x81 represents -1. 0x80 is another representation of zero (so called negative 0).
Positive 0 is represented by a null-length vector.
Byte vectors are interpreted as Booleans where False is represented by any representation of zero and True is represented by any representation of non-zero.
Leading zeros in an integer and negative zero are allowed in blocks but get rejected by the stricter requirements which standard full nodes put on transactions before retransmitting them.
Byte vectors on the stack are not allowed to be more than 520 bytes long.
Opcodes which take integers and bools off the stack require that they be no more than 4 bytes long, but addition and subtraction can overflow and result in a 5 byte integer being put on the stack.
'''
# The following functions are used to interpret the byte vector on stack,
# and convert the value to byte vector.
def int_to_byte_vector(n):
assert(type(n) == int)
if n == 0:
return b''
elif n >= 1 and n <= 127:
return bytes([n])
elif n <= -1 and n >= -127:
bv = bytes([abs(n)])
return bytes([
bv[0] | b'\x80'[0] # Toggle the most significant bit.
])
raise NotImplementedError
def bool_to_byte_vector(b):
assert(type(b) == bool)
if b:
return b'\x01'
else:
return b''
def byte_vector_to_int(bv):
bv_int = 0
is_neg = False
for i in range(len(bv)-1, -1, -1):
bv_byte = bv[i]
if i == len(bv) - 1:
if bv_byte & 0x80 == 0x80: # If the most significant bit is 1, meaning the number is negative.
bv_byte ^= 0x80 # Toggle the most significant bit.
is_neg = True
bv_int = (bv_int << 8) | bv_byte
if is_neg:
bv_int = -bv_int
return bv_int
def byte_vector_to_bool(bv):
if byte_vector_to_int(bv) == 0:
return False
else:
return True
# Check if the byte vector is a valid representation of a 4-byte signed integer.
def is_byte_vector_valid_arithmetic_int(bv):
return len(bv) == 4
# Hash helper functions.
def hash_ripemd160(bytes_data):
return hashlib.new('ripemd160', bytes_data).digest()
def hash_sha1(bytes_data):
return hashlib.sha1(bytes_data).digest()
def hash_sha256(bytes_data):
return hashlib.sha256(bytes_data).digest()
def hash_hash160(bytes_data):
first_hash = hashlib.sha256(bytes_data).digest()
return hashlib.new('ripemd160', first_hash).digest()
def hash_hash256(bytes_data):
first_hash = hashlib.sha256(bytes_data).digest()
return hashlib.sha256(first_hash).digest()
# Helper functions for extracting the address.
def extract_addr_from_p2pkh(bytes_data: bytes) -> str:
data = b'\x00' + bytes_data
checksum = hash_hash256(data)[0:4]
return base58.b58encode(data + checksum).decode('utf-8')
def extract_addr_from_p2sh(bytes_data: bytes) -> str:
data = b'\x05' + bytes_data
checksum = hash_hash256(data)[0:4]
return base58.b58encode(data + checksum).decode('utf-8')
def extract_addr_from_segwit(version: int, bytes_data: bytes) -> str:
return segwit_encode('bc', version, bytes_data)
```
#### File: stage2/MyToolKit/Subprocess.py
```python
import subprocess
class MySubprocess:
def __init__(self, cmd: 'list[str]'):
self.p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
def get_pid(self) -> int:
return self.p.pid
def send(self, data: str):
self.p.stdin.write(data.encode())
self.p.stdin.flush()
def readline(self, echo=True) -> str:
resp = self.p.stdout.readline().decode('utf-8').strip()
if echo:
print(f'Response: {resp}')
return resp
def wait_for_keyword(self, keyword: str, echo=True):
while True:
resp: str = self.readline(echo=echo)
if keyword in resp:
break
def kill(self):
self.p.kill()
def restart(self, cmd: 'list[str]'):
self.p.kill()
self.p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
```
#### File: Transformer/test/bloomfilter_restore_test.py
```python
import time
import os
import psutil
import subprocess
import random
# Hardcode the paths here.
STAGE1_OUTPUT_PATH: str = '/mnt/m/Stage1Output'
STAGE2_OUTPUT_PATH: str = '/mnt/m/Stage2Output'
BLOOM_FILTER_RESTORE_PATH = os.path.join(STAGE2_OUTPUT_PATH, 'bloomfilter.bin')
with open(BLOOM_FILTER_RESTORE_PATH, 'wb') as f:
f.write(b'')
# Suppose the hashtable executable is already there.
p_cmd: list[str] = ['../build/src/bloomfilter']
p_bf = subprocess.Popen(p_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
def send_cmd(data: str):
global p_bf
p_bf.stdin.write(data.encode())
p_bf.stdin.flush()
def readline() -> str:
global p_bf
p_response = p_bf.stdout.readline().decode('utf-8').strip()
return p_response
time.sleep(1)
# Read the stage 1's outputs.
data_fn_list: list[str] = os.listdir(STAGE1_OUTPUT_PATH)
data_fn_list = [fn for fn in data_fn_list if fn.startswith('blk') and fn.endswith('.out')]
random.shuffle(data_fn_list)
NUM_DAT_FILE_PER_FILTER = 128
NUM_TEST_FILE = 32
send_cmd('path\n')
send_cmd(f'{BLOOM_FILTER_RESTORE_PATH}\n')
resp = readline()
print(f'Response: {resp}')
assert(resp == 'OK')
work_list = data_fn_list[:NUM_DAT_FILE_PER_FILTER]
for i, fn in enumerate(work_list):
print('#' * 60)
print(f'Progress: {i+1} / {len(work_list)}')
file_path: str = os.path.join(STAGE1_OUTPUT_PATH, fn)
print(f'Adding information for {fn}')
send_cmd('add\n')
send_cmd(f'{file_path}\n')
resp = readline()
print(f'Response: {resp}')
assert(resp == 'OK')
send_cmd('details\n')
resp = readline()
print(f'Current details: {resp}')
resp = readline()
print(f'Current details: {resp}')
resp = readline()
print(f'Current details: {resp}')
resp = readline()
print(f'Response: {resp}')
assert(resp == 'OK')
mem_used = psutil.Process(p_bf.pid).memory_info().rss
print(f'Memory used: {mem_used/1024/1024} MB')
print('#' * 60)
# Store to file.
print('Storing to file...')
send_cmd('store\n')
resp = readline()
print(f'Response: {resp}')
assert(resp == 'OK')
# Kill the process and restart it again.
p_bf.kill()
p_bf = subprocess.Popen(p_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
send_cmd('path\n')
send_cmd(f'{BLOOM_FILTER_RESTORE_PATH}\n')
resp = readline()
print(f'Response: {resp}')
assert(resp == 'OK')
print('Subprocess restarted.')
time.sleep(1)
# Load from file.
print('Restoring from file...')
send_cmd('load\n')
resp = readline()
print(f'Response: {resp}')
assert(resp == 'OK')
# Test if all previous stuff is restored successfully.
for i, fn in enumerate(work_list):
print('#' * 60)
print(f'Progress: {i+1} / {len(work_list)}')
file_path: str = os.path.join(STAGE1_OUTPUT_PATH, fn)
print('Test if added stuff are in the bitsets.')
send_cmd('test\n')
send_cmd(f'{file_path}\n')
resp = readline()
print(f'Response: {resp}')
(a, b) = resp.split('happend')[-1].split('/')
assert(a.strip() == b.strip())
print('#' * 60)
# Test on other files.
faults_count: int = 0
all_items_count: int = 0
test_list = data_fn_list[NUM_DAT_FILE_PER_FILTER:NUM_DAT_FILE_PER_FILTER+NUM_TEST_FILE]
for i, fn in enumerate(test_list):
print('#' * 60)
print(f'Progress: {i+1} / {len(test_list)}')
file_path: str = os.path.join(STAGE1_OUTPUT_PATH, fn)
print(f'Test bloom filter on {fn}')
send_cmd('test\n')
send_cmd(f'{file_path}\n')
resp = readline()
print(f'Response: {resp}')
(a, b) = resp.split('happend')[-1].split('/')
faults_count += int(a.strip())
all_items_count += int(b.strip())
print(f'Faults / all items: {faults_count} / {all_items_count}, rate: {faults_count/all_items_count}')
print('#' * 60)
print('Test OK!')
p_bf.kill()
```
#### File: Transformer/test/tablequery_test.py
```python
import os
import sys
import random
import secrets
sys.path.append('../../')
from MyToolKit import MySubprocess
# Hardcode the paths here.
STAGE1_OUTPUT_PATH: str = '/mnt/m/Stage1Output'
STAGE2_OUTPUT_PATH: str = '/mnt/m/Stage2Output'
MAPTABLE_PATH: str = os.path.join(STAGE2_OUTPUT_PATH, 'maptable')
chunk_folder_list: list[str] = os.listdir(MAPTABLE_PATH)
chunk_folder_list = [folder for folder in chunk_folder_list if os.path.isdir(os.path.join(MAPTABLE_PATH, folder))]
chunk_folder_list = [folder for folder in chunk_folder_list if folder.startswith('chunk')]
random.shuffle(chunk_folder_list)
proc_cmd: list[str] = ['../build/src/tablequery']
p: MySubprocess = MySubprocess(proc_cmd)
test_chunk_folder_list: list[str] = chunk_folder_list[:2]
add_chunk_folder_list: list[str] = chunk_folder_list[2:]
# Load the chunks.
max_in_mem_chunk = 10
in_mem_chunk_count = 0
p.send('start\n')
p.send(f'{len(add_chunk_folder_list)}\n')
for chunk_folder in add_chunk_folder_list:
chunk_folder_path: str = os.path.join(MAPTABLE_PATH, chunk_folder)
bloom_filter_path: str = os.path.join(chunk_folder_path, 'bloomfilter.bf')
table_path: str = os.path.join(chunk_folder_path, 'orderedtable.ot')
index_path: str = os.path.join(chunk_folder_path, 'index.oti')
in_mem: bool = (secrets.randbelow(100000) % 2 == 0)
if in_mem and in_mem_chunk_count < max_in_mem_chunk: # Adding as an in-mem chunk.
print(f'Adding {chunk_folder} as in-memory chunk.')
p.send('inmem\n')
p.send(f'{bloom_filter_path}\n')
p.send(f'{table_path}\n')
else: # Adding as an on-disk chunk.
print(f'Adding {chunk_folder} as on-disk chunk.')
p.send('ondisk\n')
p.send(f'{bloom_filter_path}\n')
p.send(f'{table_path}\n')
p.send(f'{index_path}\n')
# Wait until the chunk is loaded.
while True:
resp = p.readline()
if 'Adding chunk' in resp and 'done' in resp:
break
p.wait_for_keyword('OK')
def randomly_choose_stage1_out(chunk_folder: str) -> str:
selected_chunk_id: int = int(chunk_folder[5:])
while True:
selected_stage1_no: int = selected_chunk_id * 128 + secrets.randbelow(128)
selected_stage1_fn: str = (5 - len(str(selected_stage1_no))) * '0' + str(selected_stage1_no)
selected_output: str = os.path.join(STAGE1_OUTPUT_PATH, f'blk{selected_stage1_fn}.out')
if os.path.exists(selected_output):
break
return selected_output
# Test one stage 1's transactions.
random.shuffle(add_chunk_folder_list)
selected_output = randomly_choose_stage1_out(add_chunk_folder_list[0])
print(f'Testing on {selected_output}')
p.send('test\n')
p.send(f'{selected_output}\n')
while True:
resp = p.readline()
if resp == 'OK':
break
assert('true' in resp)
test_chunk = randomly_choose_stage1_out(test_chunk_folder_list[0])
print(f'Testing on chunk {test_chunk_folder_list[0]}, {test_chunk}')
p.send('test\n')
p.send(f'{test_chunk}\n')
while True:
resp = p.readline()
if resp == 'OK':
break
assert('false' in resp)
p.kill()
``` |
{
"source": "0xf3cd/Chinese-Resume",
"score": 3
} |
#### File: Chinese-Resume/EngVer/UpdateGoogleDrive.py
```python
import os
from pydrive import auth, drive
def UpdateFile(file_id, src_path):
f = drive.CreateFile({'id': file_id})
f.SetContentFile(src_path)
f.Upload()
def CreateFile(parent_folder_id, file_name, src_path):
f = drive.CreateFile({
'title': file_name,
'parents': [{'id': parent_folder_id}],
})
f.SetContentFile(src_path)
f.Upload()
FOLDER_ID = '10CgNO2GlpE6MgC4B-WWOFdWxPMBMfeIp'
RESUME_PATH = './EngVer.pdf'
DEST_FILE_NAME = 'Resume-NingqiWang.pdf'
g_auth = auth.GoogleAuth()
g_auth.LocalWebserverAuth() # Creates local webserver and auto handles authentication.
drive = drive.GoogleDrive(g_auth)
# Create or update the resume
file_list = drive.ListFile({
'q': f"'{FOLDER_ID}' in parents and trashed=false"
}).GetList()
for f_data in file_list:
f_name = f_data['title']
if f_name == DEST_FILE_NAME:
UpdateFile(f_data['id'], RESUME_PATH)
os._exit(0)
# At this point, we can make sure that the .pdf file has not been uploaded yet
CreateFile(FOLDER_ID, DEST_FILE_NAME, RESUME_PATH)
``` |
{
"source": "0xf4b1/keg",
"score": 3
} |
#### File: keg/keg/blizini.py
```python
from typing import Dict
class BlizIni:
def __init__(self) -> None:
self.items: Dict[str, str] = {}
def read_string(self, text: str) -> None:
for line in text.splitlines():
line = line.strip()
if not line or line.startswith("#"):
continue
key, _, value = line.partition("=")
key = key.strip()
value = value.strip()
if key in self.items:
self.items[key] += "\n" + value
else:
self.items[key] = value
def load(text: str) -> Dict[str, str]:
p = BlizIni()
p.read_string(text)
return p.items
```
#### File: keg/core/keg.py
```python
import os
from ..cdn import LocalCDN
from ..remote.cache import CacheableHttpRemote, CacheableRemote, CacheableRibbitRemote
from .config import KegConfig
from .db import KegDB
from .statecache import StateCache
class Keg:
def __init__(self, path: str) -> None:
self.path = os.path.abspath(path)
self.objects_path = os.path.join(self.path, "objects")
self.fragments_path = os.path.join(self.path, "fragments")
self.response_cache_dir = os.path.join(self.path, "responses")
self.ribbit_cache_dir = os.path.join(self.path, "ribbit")
self.armadillo_dir = os.path.join(self.path, "armadillo")
self.temp_dir = os.path.join(self.path, "tmp")
self.config_path = os.path.join(self.path, "keg.conf")
self.db_path = os.path.join(self.path, "keg.db")
self.state_cache = StateCache(self.response_cache_dir)
self.ribbit_state_cache = StateCache(self.ribbit_cache_dir)
self.initialized = os.path.exists(self.path)
if self.initialized:
self.db = KegDB(self.db_path)
else:
self.db = KegDB(":memory:")
self.config = KegConfig(self.config_path)
self.local_cdn = LocalCDN(
self.objects_path, self.fragments_path, self.armadillo_dir, self.temp_dir
)
def initialize(self) -> bool:
if not os.path.exists(self.path):
reinitialized = True
os.makedirs(self.path)
else:
reinitialized = False
self.config.initialize()
self.db = KegDB(self.db_path)
self.db.create_tables()
return reinitialized
def get_remote(self, remote: str) -> CacheableRemote:
is_ribbit = remote.startswith("ribbit://")
cls = CacheableRibbitRemote if is_ribbit else CacheableHttpRemote
state_cache = self.ribbit_state_cache if is_ribbit else self.state_cache
return cls(
remote, cache_dir=self.response_cache_dir, cache_db=self.db, state_cache=state_cache
)
def clean_remote(self, remote: str) -> str:
"""
Cleans a remote by adding the configured default remote prefix
if it's missing a scheme.
"""
if "://" not in remote:
remote = self.config.default_remote_prefix + remote
return remote
```
#### File: keg/remote/base.py
```python
from typing import List
from .. import psvresponse
class BaseRemote:
def __init__(self, remote: str) -> None:
self.remote = remote
def get_psv(self, name: str):
raise NotImplementedError("This method must be overridden in a subclass")
def get_blobs(self) -> List[psvresponse.Blobs]:
psvfile, _ = self.get_psv("blobs")
return [psvresponse.Blobs(row) for row in psvfile]
def get_bgdl(self) -> List[psvresponse.BGDL]:
psvfile, _ = self.get_psv("bgdl")
return [psvresponse.BGDL(row) for row in psvfile]
def get_cdns(self) -> List[psvresponse.CDNs]:
psvfile, _ = self.get_psv("cdns")
return [psvresponse.CDNs(row) for row in psvfile]
def get_versions(self) -> List[psvresponse.Versions]:
psvfile, _ = self.get_psv("versions")
return [psvresponse.Versions(row) for row in psvfile]
```
#### File: keg/remote/http.py
```python
import json
from datetime import datetime
from hashlib import md5
from typing import Any, Tuple
import requests
from .. import psv
from ..exceptions import NetworkError
from .base import BaseRemote
class StatefulResponse:
def __init__(self, path: str, response: requests.Response) -> None:
self.path = path
self.content = response.content
self.timestamp = int(datetime.utcnow().timestamp())
self.digest = md5(self.content).hexdigest()
if response.status_code != 200:
raise NetworkError(f"Got status code {response.status_code} for {path!r}")
class HttpRemote(BaseRemote):
supports_blobs = True
def get_response(self, path: str) -> StatefulResponse:
url = self.remote + path
return StatefulResponse(path, requests.get(url))
def get_blob(self, name: str) -> Tuple[Any, StatefulResponse]:
resp = self.get_response(f"/blob/{name}")
return json.loads(resp.content.decode()), resp
def get_psv(self, name: str) -> Tuple[psv.PSVFile, StatefulResponse]:
resp = self.get_response(f"/{name}")
return psv.loads(resp.content.decode()), resp
```
#### File: keg/tests/test_armadillo.py
```python
from base64 import b32decode
from hashlib import md5
from keg.armadillo import ARMADILLO_KEY_SIZE, ArmadilloKey, verify_armadillo_key
from . import get_resource
FULL_KEY = b32decode("6Z45YOHAYNS7WSBOJCTUREE5FEM7LO4I")
AK = ArmadilloKey(FULL_KEY[:ARMADILLO_KEY_SIZE])
def test_verify_armadillo_key():
assert verify_armadillo_key(FULL_KEY)
def test_decrypt_buildconfig():
key = "<KEY>"
with get_resource(f"buildconfig/encrypted/{key}", "rb") as f:
encrypted_data = f.read()
decrypted_data = AK.decrypt_object(key, encrypted_data)
assert md5(decrypted_data).hexdigest() == key
```
#### File: keg/tests/test_blte.py
```python
from hashlib import md5
from io import BytesIO
import pytest
from keg import blte
from keg.exceptions import BLTEError
from . import get_resource
def test_verify_good_blte():
key = "<KEY>"
with get_resource(f"blte/{key}", "rb") as fp:
blte.verify_blte_data(fp, key)
def test_verify_blte_extra_data():
key = "<KEY>"
with get_resource(f"blte/{key}", "rb") as fp:
data = fp.read()
fp = BytesIO(data + b"B")
with pytest.raises(BLTEError):
blte.verify_blte_data(fp, key)
def test_blte_encode():
key = "<KEY>"
spec = "b:{22=n,54=z,192=n,24576=n,128=n,16384=n,*=z}"
with get_resource(f"blte/{key}.in", "rb") as fp:
data = fp.read()
data, written, out_key = blte.dumps(data, spec)
assert key == out_key
assert md5(data).hexdigest() == "39c6c6b7b1fecd09a1d6514470988700"
``` |
{
"source": "0xF4D3C0D3/ray-tracer-challenge-with-python",
"score": 3
} |
#### File: src/canvas/canvas.py
```python
import textwrap
import matplotlib.pyplot as plt
import numpy as np
class Canvas(np.ndarray):
"""
Canvas holds (cols, rows, 3) ndarray for drawing various formats.
"""
def __new__(cls, rows, cols):
obj = np.zeros((cols, rows, 3)).view(Canvas)
obj.rows = rows
obj.cols = cols
return obj
def to_matplotlib(self, figsize=(10, 10)):
fig, ax = plt.subplots(figsize=figsize)
ax.imshow(np.flipud(self.clip(0, 1)))
return fig, ax
def to_ppm(self):
header = "P3\n" f"{self.rows} {self.cols}\n" "255\n"
raw_body = (
np.ceil((np.flipud(self) * 255).clip(0, 255))
.astype(int)
.astype(str)
.reshape(self.cols, -1)
)
raw_lines = [" ".join(["".join(cell) for cell in row]) for row in raw_body]
body = "\n".join(["\n".join(textwrap.wrap(line)) for line in raw_lines])
ppm = header + body + "\n"
return ppm
```
#### File: src/intersection/intersection.py
```python
import numpy as np
class Intersection(np.ndarray):
def __new__(cls, ts, mask, obj):
self = ts.T.view(cls)
self.mask = mask.squeeze()
self.obj = obj
return self
def __eq__(self, other):
return np.allclose(self, other)
@property
def count(self):
return self.shape[0]
@property
def hit(self):
return self.min(axis=1)
```
#### File: src/shape/sphere.py
```python
import numpy as np
from src.grid import Point, VectorGrid
from src.intersection import Intersection
from src.material import Material
from src.matrix import Matrix
class Sphere:
def __init__(self, transform=None, material=None):
self.transform = Matrix() if transform is None else transform
self.material = Material() if material is None else material
def __repr__(self):
return (
f"Sphere(transform={repr(self.transform)}, material={repr(self.material)})"
)
def set_transform(self, transform):
return Sphere(transform, self.material)
def set_material(self, material):
return Sphere(self.transform, material)
def normal_at(self, point):
obj_point = self.transform.inv @ point
obj_normal = obj_point - Point(0, 0, 0)
world_normal = VectorGrid(*(self.transform.inv.T @ obj_normal).T[:-1], False)
return world_normal.normalize()
def intersect(self, ray):
ray = ray.transform(self.transform.inv)
sphere_to_ray = ray.origin - Point(0, 0, 0)
a = ray.direction @ ray.direction
b = 2 * (ray.direction @ sphere_to_ray)
c = sphere_to_ray @ sphere_to_ray - 1
discriminant = b ** 2 - 4 * a * c
mask = discriminant >= 0
masked_a = a[mask]
masked_b = b[mask]
masked_discriminant = discriminant[mask]
t1 = (-masked_b - masked_discriminant ** 0.5) / (2 * masked_a)
t2 = (-masked_b + masked_discriminant ** 0.5) / (2 * masked_a)
res = Intersection(np.vstack([t1, t2]), mask, self)
return res
```
#### File: ray-tracer-challenge-with-python/tests/test_canvas.py
```python
import numpy as np
from src.canvas import Canvas
from src.grid import Color
def test_create_canvas():
c = Canvas(10, 20)
assert c.rows == 10
assert c.cols == 20
assert np.all(c == Color(0, 0, 0))
def test_writing_pixel_to_canvas():
c = Canvas(10, 20)
red = Color(1, 0, 0)
c[2, 3] = red
assert red == c[2, 3]
def test_constructing_ppm_header():
c = Canvas(5, 3)
ppm = c.to_ppm()
line_1_3 = "\n".join(ppm.splitlines()[0:3])
assert line_1_3 == ("P3\n" "5 3\n" "255")
def test_constructing_ppm_pixel_data():
c = Canvas(5, 3)
cs = Color([0, 0, 1], [0, 0.5, 0], [1.5, 0, -0.5])
c[(0, 1, 2), (4, 2, 0)] = cs
ppm = c.to_ppm()
lines_4_6 = "\n".join(ppm.splitlines()[3:6])
assert lines_4_6 == (
"255 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n"
"0 0 0 0 0 0 0 128 0 0 0 0 0 0 0\n"
"0 0 0 0 0 0 0 0 0 0 0 0 0 0 255"
)
def test_splitting_long_lines_in_ppm_files():
c = Canvas(10, 2)
c[:2, :10] = Color(1, 0.8, 0.6)
ppm = c.to_ppm()
lines_4_7 = "\n".join(ppm.splitlines()[3:7])
assert lines_4_7 == (
"255 204 153 255 204 153 255 204 153 255 204 153 255 204 153 255 204\n"
"153 255 204 153 255 204 153 255 204 153 255 204 153\n"
"255 204 153 255 204 153 255 204 153 255 204 153 255 204 153 255 204\n"
"153 255 204 153 255 204 153 255 204 153 255 204 153"
)
def test_ppm_files_are_terminated_by_a_newline_character():
c = Canvas(5, 3)
ppm = c.to_ppm()
assert ppm[-1] == "\n"
```
#### File: ray-tracer-challenge-with-python/tests/test_spheres.py
```python
import numpy as np
from src.grid import Point, Vector
from src.light import Ray
from src.material import Material
from src.matrix import Rotation, Scaling, Translation
from src.shape.sphere import Sphere
def test_ray_intersects_sphere_at_two_points():
r = Ray(Point(0, 0, -5), Vector(0, 0, 1))
s = Sphere()
xs = s.intersect(r)
assert xs.count == 1
assert xs == [4, 6]
def test_ray_intersects_sphere_at_tangent():
r = Ray(Point(0, 1, -5), Vector(0, 0, 1))
s = Sphere()
xs = s.intersect(r)
assert xs.count == 1
assert xs == [5, 5]
def test_ray_misses_sphere():
r = Ray(Point(0, 2, -5), Vector(0, 0, 1))
s = Sphere()
xs = s.intersect(r)
assert xs.count == 0
def test_ray_originates_inside_sphere():
r = Ray(Point(0, 0, 0), Vector(0, 0, 1))
s = Sphere()
xs = s.intersect(r)
assert xs.count == 1
assert xs == [-1, 1]
def test_sphere_is_behind_ray():
r = Ray(Point(0, 0, 5), Vector(0, 0, 1))
s = Sphere()
xs = s.intersect(r)
assert xs.count == 1
assert xs == [-6, -4]
def test_intersect_sets_the_object_on_intersection():
r = Ray(Point(0, 0, -5), Vector(0, 0, 1))
s = Sphere()
xs = s.intersect(r)
assert xs.count == 1
assert xs.obj is s
def test_sphere_default_transformation():
s = Sphere()
assert np.allclose(s.transform, np.eye(4))
def test_changing_sphere_transformation():
s = Sphere()
t = Translation(2, 3, 4)
s = s.set_transform(t)
assert np.allclose(s.transform, t)
def test_intersecting_scaled_sphere_with_ray():
r = Ray(Point(0, 0, -5), Vector(0, 0, 1))
s = Sphere()
s = s.set_transform(Scaling(2, 2, 2))
xs = s.intersect(r)
assert xs.count == 1
assert xs == [3, 7]
def test_intersecting_translated_sphere_with_ray():
r = Ray(Point(0, 0, -5), Vector(0, 0, 1))
s = Sphere()
s = s.set_transform(Translation(5, 0, 0))
xs = s.intersect(r)
assert xs.count == 0
def test_normal_on_sphere_at_point_xaxis():
s = Sphere()
n = s.normal_at(Point(1, 0, 0))
assert n == Vector(1, 0, 0)
def test_normal_on_sphere_at_point_yaxis():
s = Sphere()
n = s.normal_at(Point(0, 1, 0))
assert n == Vector(0, 1, 0)
def test_normal_on_sphere_at_point_zaxis():
s = Sphere()
n = s.normal_at(Point(0, 0, 1))
assert n == Vector(0, 0, 1)
def test_normal_on_sphere_at_nonaxial_point():
s = Sphere()
a = 3 ** 0.5 / 3
n = s.normal_at(Point(a, a, a))
assert n == Vector(a, a, a)
def test_normal_is_normalized_vector():
s = Sphere()
a = 3 ** 0.5 / 3
n = s.normal_at(Point(a, a, a))
assert n == n.normalize()
def test_computing_normal_on_translated_sphere():
s = Sphere()
s = s.set_transform(Translation(0, 1, 0))
n = s.normal_at(Point(0, 1.70711, -0.70711))
assert n == Vector(0, 0.70711, -0.70711)
def test_computing_normal_on_transformed_sphere():
s = Sphere()
m = Scaling(1, 0.5, 1) @ Rotation(0, 0, np.pi / 5)
s = s.set_transform(m)
n = s.normal_at(Point(0, 2 ** 0.5 / 2, -(2 ** 0.5) / 2))
assert np.allclose(n, Vector(0, 0.97014, -0.24254), 1e-03, 1e-03)
def test_sphere_has_default_material():
s = Sphere()
m = s.material
assert m == Material()
def test_sphere_may_be_assigned_material():
s = Sphere()
m = Material(ambient=1)
s = s.set_material(m)
assert s.material == m
```
#### File: ray-tracer-challenge-with-python/tests/test_transformations.py
```python
import numpy as np
from src.grid import Point, PointGrid, Vector
from src.matrix import Rotation, Scaling, Shearing, Translation
def test_multiplying_by_translation_matrix():
transform = Translation(5, -3, 2)
p = Point(-3, 4, 5)
assert transform @ p == Point(2, 1, 7)
def test_multiplying_by_inverse_of_translation_matrix():
transform = Translation(5, -3, 2)
inv = transform.inv
p = Point(-3, 4, 5)
assert inv @ p == Point(-8, 7, 3)
def test_translation_dose_not_affect_vectors():
transform = Translation(5, -3, 2)
v = Vector(-3, 4, 5)
assert transform @ v == v
def test_scaling_matrix_applied_to_point():
transform = Scaling(2, 3, 4)
p = Point(-4, 6, 8)
assert transform @ p == Point(-8, 18, 32)
def test_scaling_matrix_applied_to_vector():
transform = Scaling(2, 3, 4)
v = Vector(-4, 6, 8)
assert transform @ v == Vector(-8, 18, 32)
def multiplying_by_inverse_of_scaling_matrix():
transform = Scaling(2, 3, 4)
inv = transform.inv
v = Vector(-4, 6, 8)
assert inv @ v == Vector(-2, 2, 2)
def test_reflection_is_scaling_by_negative_value():
transform = Scaling(-1, 1, 1)
p = Point(2, 3, 4)
assert transform @ p == Point(-2, 3, 4)
def test_rotating_point_around_x_axis():
p = Point(0, 1, 0)
half_quarter = Rotation(np.pi / 4, 0, 0)
full_quarter = Rotation(np.pi / 2, 0, 0)
assert half_quarter @ p == Point(0, 2 ** 0.5 / 2, 2 ** 0.5 / 2)
assert full_quarter @ p == Point(0, 0, 1)
def test_invesre_of_x_rotation_rotates_in_opposite_direction():
p = Point(0, 1, 0)
half_quarter = Rotation(np.pi / 4, 0, 0)
inv = half_quarter.inv
assert inv @ p == Point(0, 2 ** 0.5 / 2, -(2 ** 0.5) / 2)
def test_rotating_point_around_y_axis():
p = Point(0, 0, 1)
half_quarter = Rotation(0, np.pi / 4, 0)
full_quarter = Rotation(0, np.pi / 2, 0)
assert half_quarter @ p == Point(2 ** 0.5 / 2, 0, 2 ** 0.5 / 2)
assert full_quarter @ p == Point(1, 0, 0)
def test_rotating_point_around_z_axis():
p = Point(0, 1, 0)
half_quarter = Rotation(0, 0, np.pi / 4)
full_quarter = Rotation(0, 0, np.pi / 2)
assert half_quarter @ p == Point(-(2 ** 0.5) / 2, 2 ** 0.5 / 2, 0)
assert full_quarter @ p == Point(-1, 0, 0)
def test_shearing_transformation_moves_x_in_proportion_to_y():
transform = Shearing(1, 0, 0, 0, 0, 0)
p = Point(2, 3, 4)
assert transform @ p == Point(5, 3, 4)
def test_shearing_transformation_moves_x_in_proportion_to_z():
transform = Shearing(0, 1, 0, 0, 0, 0)
p = Point(2, 3, 4)
assert transform @ p == Point(6, 3, 4)
def test_shearing_transformation_moves_y_in_proportion_to_x():
transform = Shearing(0, 0, 1, 0, 0, 0)
p = Point(2, 3, 4)
assert transform @ p == Point(2, 5, 4)
def test_shearing_transformation_moves_y_in_proportion_to_z():
transform = Shearing(0, 0, 0, 1, 0, 0)
p = Point(2, 3, 4)
assert transform @ p == Point(2, 7, 4)
def test_shearing_transformation_moves_z_in_proportion_to_x():
transform = Shearing(0, 0, 0, 0, 1, 0)
p = Point(2, 3, 4)
assert transform @ p == Point(2, 3, 6)
def test_shearing_transformation_moves_z_in_proportion_to_y():
transform = Shearing(0, 0, 0, 0, 0, 1)
p = Point(2, 3, 4)
assert transform @ p == Point(2, 3, 7)
def test_individual_transformations_are_applied_in_sequence():
p = Point(1, 0, 1)
A = Rotation(np.pi / 2, 0, 0)
B = Scaling(5, 5, 5)
C = Translation(10, 5, 7)
p2 = A @ p
assert p2 == Point(1, -1, 0)
p3 = B @ p2
assert p3 == Point(5, -5, 0)
p4 = C @ p3
assert p4 == Point(15, 0, 7)
def test_chained_transformations_must_be_applied_in_reverse_order():
p = Point(1, 0, 1)
A = Rotation(np.pi / 2, 0, 0)
B = Scaling(5, 5, 5)
C = Translation(10, 5, 7)
T = C @ B @ A
assert T @ p == Point(15, 0, 7)
pg = PointGrid([1, 10], [2, 3], 4)
assert T @ pg == PointGrid(
[15, 15, 60, 60], [-15, -15, -15, -15], [17, 22, 17, 22], False
)
``` |
{
"source": "0xFA7E/identify_files",
"score": 3
} |
#### File: identify_files/data_clean/commands.py
```python
from abc import ABC, abstractmethod
from typing import Optional
from data_clean.stats import Stats
from data_clean.configuration import Config
class Command(ABC):
def __init__(self, files: list[str], config: Config, stats: Stats, hashes: Optional[list[str]] = list[str]):
self.files = files
self.stats = stats
self.test = config.test
self.verbose = config.verbose
self.debug = config.debug
self.hashes = hashes
super().__init__()
@abstractmethod
def run(self) -> list[str]:
pass
```
#### File: identify_files/data_clean/processing.py
```python
from os.path import isfile, join, isdir
from os import listdir
from typing import Optional
def files_from_dir(directory: str, recursive: bool = False, exclude: Optional[list[str]] = None) -> list[str]:
"""Takes a directory as a string and processes it for files to identify or delete, if recursive is set
it will descend into those as well"""
if not exclude:
exclude = []
files = [join(directory, f) for f in listdir(directory) if isfile(join(directory, f)) and join(directory,f) not in exclude]
if recursive:
# we're descending recursively but not checking for any looping redirects or anything, take care humans
directories = [join(directory, dirs) for dirs in listdir(directory) if isdir(join(directory, dirs)) and join(directory,dirs) not in exclude]
for directory in directories:
files.extend(files_from_dir(directory, recursive=True))
return files
``` |
{
"source": "0xFable/capsule",
"score": 3
} |
#### File: capsule/cmds/execute.py
```python
import os
import pathlib
import sys
from capsule.lib.deployer import Deployer
from terra_sdk.client.lcd import LCDClient
from terra_sdk.core import Coins
import requests
import asyncio
import json
from capsule.abstractions import ACmd
from capsule.lib.logging_handler import LOG
sys.path.append(pathlib.Path(__file__).parent.resolve())
DEFAULT_TESTNET_CHAIN = "bombay-12"
DEFAULT_CLONE_PATH = os.path.expanduser(
os.path.join("~", ".capsule", "localterra-clones"))
class ExecuteCmd(ACmd):
"""
Execute command -- Used to execute actions on MultiChain contracts
"""
CMD_NAME = "execute"
CMD_HELP = "Attempt to execute an action on a given contract address."
CMD_USAGE = """
$ capsule execute --contract <addr> --chain <chain> --msg <msg>"""
CMD_DESCRIPTION = "Helper tool which exposes the ability to prepare and sending ExecuteMsg's on chain specific contract addresses"
def initialise(self):
# Define usage and description
self.parser.usage = self.CMD_USAGE
self.parser.description = self.CMD_DESCRIPTION
# Add any positional or optional arguments here
self.parser.add_argument("-a", "--address",
type=str,
help="(required) Contract Address to perform query on")
# Add any positional or optional arguments here
self.parser.add_argument("-m", "--msg",
type=str,
default={},
help="(Optional) The execution message for the contract you are trying to execute an action on. Must be a json-like str")
self.parser.add_argument("-c", "--chain",
type=str,
default="",
help="(Optional) A chain to deploy too. Defaults to localterra")
def run_command(self, args):
"""
"""
LOG.info(f"Performing msg exectution on contract addr {args.address}")
chain_url="https://bombay-lcd.terra.dev"
chain_fcd_url="https://bombay-fcd.terra.dev"
deployer = Deployer(client=LCDClient(
url=chain_url,
chain_id=args.chain or "bombay-12",
gas_prices=Coins(requests.get(f"{chain_fcd_url}/v1/txs/gas_prices").json())))
exe_result = asyncio.run(deployer.execute_contract(args.address, json.loads(args.msg)))
LOG.info(f"Execute Result {exe_result} \n\n Execute Finished.")
```
#### File: capsule/lib/credential_handler.py
```python
import os
from capsule.lib.config_handler import get_config
async def get_mnemonic(strict=False):
"""Attempt to gather a mnemonic from one of the available sources
First, if a mnemonic is defined in the env, use that.
Next, check the config file for the secret
If no mnemonic can be found, optionally raise an Exception
Args:
strict (bool, optional): When set to true, if no mnemonic is found an exception is raised. Defaults to False.
Returns:
str: The mnemonic found either in the env or in the config file
"""
if os.getenv("CAPSULE_MNEMONIC", False):
return os.environ["CAPSULE_MNEMONIC"]
config = await get_config()
if config.get("deploy_info", {}).get("mnemonic", False):
return config.get("deploy_info", {}).get("mnemonic", False)
if strict:
raise Exception("No Mnemonic was found either in the specified config file or in the environment. Strict mode is set to true")
return None
```
#### File: capsule/tests/test_config.py
```python
import os
import pytest
import mock
from capsule.lib.config_handler import DEFAULT_CONFIG_FILE_ENV_VAR, get_config_file, get_config
import asyncio
TEST_CONFIG_FILE_RELATIVE_PATH = "./capsule/lib/settings/config.toml"
TEST_CONFIG_FILE_LOCATION = os.path.abspath(
os.path.expandvars(
os.path.expanduser(TEST_CONFIG_FILE_RELATIVE_PATH)))
class TestConfigHandler():
@mock.patch.dict(os.environ, {DEFAULT_CONFIG_FILE_ENV_VAR: "/Users/notyou"}, clear=True)
def test_config_file_var_gathered_from_env(self):
"""test that ACmd can be inherited from
provided that its minimum attributes and methods
are dealt with.
"""
assert get_config_file() == "/Users/notyou"
def test_config_fails_without_a_provided_path_or_created_default_file(self):
"""test when we try to run get_config without a provided path on
an assumed fresh system that it will fail.
This is expected as when no file is provided and None is found in the env
It will default to ~/.capsule/config.toml
Which shouldn't exist on a fresh system
"""
with pytest.raises(FileNotFoundError):
asyncio.run(get_config())
def test_config_with_specified_path(self):
"""test when we try to run get_config with a provided path
that it will find the file, be able to parse the file
And we can ensure values are within the file.
"""
assert asyncio.run(get_config(TEST_CONFIG_FILE_RELATIVE_PATH))
assert 'networks' in asyncio.run(get_config(TEST_CONFIG_FILE_RELATIVE_PATH))
@mock.patch.dict(os.environ, {DEFAULT_CONFIG_FILE_ENV_VAR: TEST_CONFIG_FILE_LOCATION}, clear=True)
def test_config_gathered_from_env(self):
"""test when a mocked environment variable is setup on the system
this value is read and the function will find the file,
be able to parse the file
And we can ensure values are within the file.
"""
assert asyncio.run(get_config())
assert 'networks' in asyncio.run(get_config())
```
#### File: capsule/tests/test_parser.py
```python
from capsule.abstractions import ACmd
from capsule.cmds.deploy import DeployCmd
from capsule.parser import get_main_parser, get_subcommmand_parser
from capsule.cmds import DeployCmd
import pytest
import argparse
class TestCommandAbstraction():
def test_interface_with_impl_methods(self):
"""test that ACmd can be inherited from
provided that its minimum attributes and methods
are dealt with.
"""
class GoodImpl(ACmd):
CMD_NAME = 'good'
CMD_DESCRIPTION = 'a very useful command'
CMD_HELP = 'help instructions when you get a common problem'
CMD_USAGE = '$ capsule command [options]'
def run_command():
pass
def initialise(self):
pass
main_parser = get_main_parser()
sub_parser = get_subcommmand_parser(main_parser)
GoodImpl.__init__(GoodImpl, sub_parser=sub_parser)
def test_interface_without_std_attributes(self):
"""test to confirm an Attribute error is provided
to the developer if they do not define
each of the required attributes
"""
main_parser = get_main_parser()
sub_parser = get_subcommmand_parser(main_parser)
DeployCmd.__init__(DeployCmd, sub_parser=sub_parser)
class BadImpl(ACmd):
pass
with pytest.raises(AttributeError):
BadImpl.__init__(BadImpl, sub_parser=sub_parser)
def test_interface_without_std_methods(self):
"""test to confirm a NotImplementedError is provided
to the developer if they do not define
each of the required methods
"""
main_parser = get_main_parser()
sub_parser = get_subcommmand_parser(main_parser)
DeployCmd.__init__(DeployCmd, sub_parser=sub_parser)
class BadImpl(ACmd):
CMD_NAME = 'alpha'
CMD_DESCRIPTION = 'its nearly ready for prod!'
CMD_HELP = 'help instructions when you get a common problem'
CMD_USAGE = '$ capsule command [options]'
# Note none of the needed methods are defined
with pytest.raises(NotImplementedError):
BadImpl.__init__(BadImpl, sub_parser=sub_parser)
``` |
{
"source": "0xfab-ri/saleor",
"score": 2
} |
#### File: account/mutations/service_account.py
```python
import graphene
from saleor.core.permissions import get_permissions
from ....account import models
from ...core.enums import PermissionEnum
from ...core.mutations import (
ClearMetaBaseMutation,
ModelDeleteMutation,
ModelMutation,
UpdateMetaBaseMutation,
)
from ...core.types.common import AccountError
class ServiceAccountInput(graphene.InputObjectType):
name = graphene.String(description="Name of the service account.")
is_active = graphene.Boolean(
description="Determine if this service account should be enabled."
)
permissions = graphene.List(
PermissionEnum,
description="List of permission code names to assign to this service account.",
)
class ServiceAccountTokenInput(graphene.InputObjectType):
name = graphene.String(description="Name of the token.", required=False)
service_account = graphene.ID(description="ID of service account.", required=True)
class ServiceAccountTokenCreate(ModelMutation):
auth_token = graphene.types.String(
description="The newly created authentication token."
)
class Arguments:
input = ServiceAccountTokenInput(
required=True, description="Fields required to create a new auth token."
)
class Meta:
description = "Creates a new token."
model = models.ServiceAccountToken
permissions = ("account.manage_service_accounts",)
error_type_class = AccountError
error_type_field = "account_errors"
@classmethod
def perform_mutation(cls, root, info, **data):
instance = cls.get_instance(info, **data)
data = data.get("input")
cleaned_input = cls.clean_input(info, instance, data)
instance = cls.construct_instance(instance, cleaned_input)
cls.clean_instance(instance)
cls.save(info, instance, cleaned_input)
cls._save_m2m(info, instance, cleaned_input)
response = cls.success_response(instance)
response.auth_token = instance.auth_token
return response
class ServiceAccountTokenDelete(ModelDeleteMutation):
class Arguments:
id = graphene.ID(description="ID of an auth token to delete.", required=True)
class Meta:
description = "Deletes an authentication token assigned to service account."
model = models.ServiceAccountToken
permissions = ("account.manage_service_accounts",)
error_type_class = AccountError
error_type_field = "account_errors"
class ServiceAccountCreate(ModelMutation):
auth_token = graphene.types.String(
description="The newly created authentication token."
)
class Arguments:
input = ServiceAccountInput(
required=True,
description="Fields required to create a new service account.",
)
class Meta:
description = "Creates a new service account"
model = models.ServiceAccount
permissions = ("account.manage_service_accounts",)
error_type_class = AccountError
error_type_field = "account_errors"
@classmethod
def clean_input(cls, info, instance, data):
cleaned_input = super().clean_input(info, instance, data)
# clean and prepare permissions
if "permissions" in cleaned_input:
permissions = cleaned_input.pop("permissions")
cleaned_input["permissions"] = get_permissions(permissions)
return cleaned_input
@classmethod
def save(cls, info, instance, cleaned_input):
instance.save()
instance.tokens.create(name="Default")
@classmethod
def success_response(cls, instance):
response = super().success_response(instance)
response.auth_token = instance.tokens.get().auth_token
return response
class ServiceAccountUpdate(ModelMutation):
class Arguments:
id = graphene.ID(
description="ID of a service account to update.", required=True
)
input = ServiceAccountInput(
required=True,
description="Fields required to update an existing service account.",
)
class Meta:
description = "Updates an existing service account"
model = models.ServiceAccount
permissions = ("account.manage_service_accounts",)
error_type_class = AccountError
error_type_field = "account_errors"
@classmethod
def clean_input(cls, info, instance, data):
cleaned_input = super().clean_input(info, instance, data)
# clean and prepare permissions
if "permissions" in cleaned_input:
cleaned_input["permissions"] = get_permissions(cleaned_input["permissions"])
return cleaned_input
class ServiceAccountDelete(ModelDeleteMutation):
class Arguments:
id = graphene.ID(
description="ID of a service account to delete.", required=True
)
class Meta:
description = "Deletes a service account"
model = models.ServiceAccount
permissions = ("account.manage_service_accounts",)
error_type_class = AccountError
error_type_field = "account_errors"
class ServiceAccountUpdatePrivateMeta(UpdateMetaBaseMutation):
class Meta:
description = "Updates private metadata for a service account."
permissions = ("account.manage_service_accounts",)
model = models.ServiceAccount
public = False
error_type_class = AccountError
error_type_field = "account_errors"
class ServiceAccountClearPrivateMeta(ClearMetaBaseMutation):
class Meta:
description = "Clear stored metadata value."
model = models.ServiceAccount
permissions = ("account.manage_service_accounts",)
public = False
error_type_class = AccountError
error_type_field = "account_errors"
``` |
{
"source": "0xfadead/soundcloud-backup",
"score": 3
} |
#### File: 0xfadead/soundcloud-backup/backup.py
```python
import requests
import zipfile
import tempfile
import sys
import argparse
import time
CLIENT_ID = 'JlZIsxg2hY5WnBgtn3jfS0UYCl0K8DOg'
INFO_BASE_URL = 'https://api.soundcloud.com/resolve.json'
TRACKS_BASE_URL = 'https://api.soundcloud.com/users/{:d}/tracks'
LIMIT = 50 # the max track data SoundCloud will return
ARCHIVE_SKELETON = '{:s}-{:s}.zip'
# SoundCloud streamable tracks are transcoded
# at 128 kbps and are in mp3 format
STREAM_SKELETON = '{:s}.mp3'
def error(msg):
print(msg, file=sys.stderr)
def json_request(scurl, payload):
try:
r = requests.get(scurl, params=payload)
if r.status_code != requests.codes.ok:
error('Could not reach: {}'.format(str(r.status_code)))
return {}
return r.json()
except requests.exceptions.RequestException as e:
error(e)
return {}
def user_info(scurl):
data = json_request(
INFO_BASE_URL, {
'url': scurl, # encode (?)
'client_id': CLIENT_ID
})
if not bool(data):
return [None for _ in range(4)]
return data.get('id'), data.get('username'), \
data.get('permalink'), data.get('track_count')
def user_tracks(userid, offset):
# todo: downloadable + download_url (?)
target_keys = ('id', 'streamable', 'stream_url', 'permalink', 'title')
data = json_request(
TRACKS_BASE_URL.format(userid),
{'client_id': CLIENT_ID,
'offset': offset})
return [{k: unfiltered.get(k) for k in target_keys}
for unfiltered in data ]
def save_audio_stream(fout, csize, streamurl):
r = requests.get(streamurl,
{'client_id' : CLIENT_ID},
stream=True)
if r.status_code != requests.codes.ok:
error('Could not reach: {}'.format(str(r.status_code)))
return False
for chunk in r.iter_content(chunk_size=csize):
if chunk:
fout.write(chunk)
return True
def main():
# Get command line args.
# Everyone's favorite thing about coding...
parser = argparse.ArgumentParser()
parser.add_argument('url',
type=str,
help="Url of SoundCloud profile you'd like to backup"
)
parser.add_argument('-C', '--client-id',
type=str,
help='If you are gettinga 429 response the default \
client id is maxed out for the day so you \
can optionally supply a different one.'
)
parser.add_argument('-A', '--name',
type=str,
help="Name of the archive"
)
parser.add_argument('-Z', '--chunk-size',
type=int,
default=1024,
help='The chunk size in which pieces of the mp3 file \
will be saved (default: 1024).'
)
parser.add_argument('-d', '--delay-time',
type=int,
default=0,
help='Specify a delay time (in seconds) between each track download.'
)
args = parser.parse_args()
url = args.url
if ('soundcloud.com' not in url or
not url.startswith('https://')):
print('Please use a valid HTTPS Soundcloud Url')
return
if args.client_id is not None:
global CLIENT_ID
CLIENT_ID = args.client_id
uid, uname, ulink, trackcnt = user_info(url)
if uid is None:
print('Could not locate: {}'.format(url))
return
tracks = []
for offset in range(0, trackcnt, LIMIT+1):
tracks += user_tracks(uid, offset)
if not bool(tracks):
print('{} has no songs!'.format(artist_url))
return
print('{:d} streamable tracks on {}\'s page'.format(len(tracks), uname))
zipname = (ARCHIVE_SKELETON.format(uname, ulink)
if args.name is None else args.name)
with zipfile.ZipFile(zipname, 'w', zipfile.ZIP_DEFLATED) as archive:
print('Starting download...')
for track in tracks:
if not track['streamable']:
print(' {} is not streamable.'.format(track['title']))
continue
with tempfile.NamedTemporaryFile('wb') as f:
if save_audio_stream(f, args.chunk_size, track['stream_url']):
archive.write(f.name, arcname=STREAM_SKELETON
.format(track['permalink']))
print(' {} has been saved to the archive'.format(track['title']))
else:
print(' Could not download: {}'.format(track['title']))
time.sleep(args.delay_time)
if __name__ == '__main__':
main()
``` |
{
"source": "0xfcmartins/binance-stacking-checker",
"score": 3
} |
#### File: binance-stacking-checker/utils/application.py
```python
import json
from types import SimpleNamespace
API_PUBLIC = "https://www.binance.com/bapi/earn/v1/friendly/pos/union?"
TS_FORMAT = "%d/%m/%Y %H:%M:%S"
LOG_FORMAT = "%d%m%Y"
CONFIG_FILE = '../config/config.json'
INTERVAL = 30
app_config_file = None
class Colors:
ASSET = '\033[96m'
WARNING = '\033[90m'
VALUES = '\033[94m'
TAG = '\033[92m'
END = '\033[0m'
R30 = '\033[48;5;12m'
R60 = '\033[48;5;9m'
R90 = '\033[48;5;76m'
R_BG = '\033[38;5;15m'
def bg_color(text, color):
return "\33[48;5;" + str(color) + "m" + text + "\33[0m"
def duration_color(text, color):
if color == '30':
return bg_color("\33[38;5;" + str(15) + "m " + text + " \33[0m", 12)
elif color == '60':
return bg_color("\33[38;5;" + str(15) + "m " + text + " \33[0m", 9)
elif color == '90':
return bg_color("\33[38;5;" + str(15) + "m " + text + " \33[0m", 76)
else:
return "\33[38;5;" + str(color) + "m" + text + "\33[0m"
def load_config():
file = open(CONFIG_FILE)
config = json.load(file)
file.close()
global app_config_file
app_config_file = json.loads(json.dumps(config), object_hook=lambda d: SimpleNamespace(**d))
print(app_config_file.interval)
def get_conf():
if app_config_file is None:
load_config()
return app_config_file
if __name__ == '__main__':
load_config()
```
#### File: binance-stacking-checker/utils/notifications.py
```python
import requests
import hashlib
from utils.writer import replacer
# noinspection SpellCheckingInspection
telegram_bot = '2113591415:'
telegram_base = 'https://api.telegram.org/bot'
pro_mode = ''
dev_key = 'textbelt'
chat_id = '-'
sent = {}
def notify(asset, duration, phone_number):
global sent
message = '🚨🚨 ' + replacer(asset) + ' 🚨🚨 is OPEN ' + replacer(duration) + ' days on Binance!'
if hashlib.md5(message.encode()).hexdigest() not in sent:
resp = requests.post('https://textbelt.com/text', {
'phone': phone_number,
'message': message + ' OPEN on binance!',
'key': dev_key
})
if resp.json()['success'] == 'False':
print('SMS notification sent!')
sent[hashlib.md5(message.encode()).hexdigest()] = True
def telegram(asset, duration):
message = '🚨🚨 ' + replacer(asset) + ' 🚨🚨 is OPEN ' + replacer(duration) + ' days on Binance!'
send_text = telegram_base + telegram_bot + '/sendMessage?chat_id=' + chat_id + '&parse_mode=Markdown&text=' + message
requests.get(send_text)
def on(assets):
message = 'The application is ONLINE, watching for ' + assets
send_text = telegram_base + telegram_bot + '/sendMessage?chat_id=' + chat_id + '&parse_mode=Markdown&text=' + message
requests.get(send_text)
``` |
{
"source": "0xfdb/vicky",
"score": 3
} |
#### File: vicky/modules/bofh.py
```python
import random
from pathlib import Path
from lib.cog import Cog
from lib.command import Command, command
class Bofh(Cog):
def __init__(self, bot):
super().__init__(bot)
self.path = Path.cwd() / self.settings["excuse_path"]
@command(
aliases=["bofh", "excuse"],
description="f0ur0nes excuse for why shit sucks in any particular instance",
)
def run(self, c: Command):
if self.path.exists():
excuses = self.path.read_text().split("\n")
excuse = random.choice(excuses).strip()
self.sendmsg(excuse)
else:
self.sendmsg("Couldn't find excuses file.")
```
#### File: vicky/modules/kanye.py
```python
from lib.cog import Cog
from lib.command import Command, command
from lib.web import get
class Kanye(Cog):
@command(aliases=["kanye"], description="")
def run(self, c: Command):
req = get(url="https://api.kanye.rest/", json=True)
if req.status == 200:
self.sendmsg(req.data["quote"])
```
#### File: vicky/modules/kodictrl.py
```python
from lib.cog import Cog
from lib.command import Command, command
from kodipydent import Kodi
import json
from lib.paste import pastebin
my_kodi = Kodi('192.168.1.123')
def get_activeplayer(): my_kodi.Player.GetActivePlayers()
def get_movies(): my_kodi.VideoLibrary.GetMovies()
def ctrl_play_pause(): my_kodi.Player.PlayPause(1)
def ctrl_play_item(): my_kodi.Player.Open()
def ctrl_stop_item(): my_kodi.Player.Stop(1)
class kodictrl(Cog):
@command(aliases=['kodictrl'], description='abc')
def ctrl_kodi(self, c: Command):
kodi_msg = c.message.split(" ") #commands
nick = c.user.nick
isop = c.user.isop
isowner = c.user.isowner
nick_allowed = False
if isop == True or isowner == True:
nick_allowed = True
print(kodi_msg)
if kodi_msg[0] == 'playpause' and nick_allowed == True: #play/pause
ctrl_play_pause()
self.sendmsg("Play/Pause")
elif kodi_msg[0] == 'play' and nick_allowed == True: #play movie
movie_name = kodi_msg[:0]+kodi_msg[0+1:]
movie_name_search = ""
print(movie_name)
for i in range(len(movie_name)):
movie_name_search += str(movie_name[i]).lower().replace('"', '')
print(movie_name_search)
self.sendmsg("now playing " + str(movie_name_search) + " with movie id " + str(movie_id(movie_name_search)))
my_kodi.Player.Open(item={'movieid':movie_id(movie_name_search)})
elif kodi_msg[0] == 'stop' and nick_allowed == True: #stop
ctrl_stop_item()
self.sendmsg("Stopped Playback")
elif kodi_msg[0] == 'list': #list
#movielistpaste = ""
#self.sendmsg("Grabbing list...")
#movielist_raw = my_kodi.VideoLibrary.GetMovies()
#movielist = movielist_raw['result']['movies']
#print(movielist)
#for i in range(len(movielist)):
# movielistpaste = (movielistpaste + movielist[i]['label'] + "\n")
#self.sendmsg("Done grabbing and formating, sending to pastebin...")
#self.sendmsg(str(pastebin(movielistpaste)).replace("com/", "com/raw/"))
self.sendmsg("Movie List - https://kapi.0xfdb.xyz/movies")
self.sendmsg("TV Show List - https://kapi.0xfdb.xyz/tvshows")
elif kodi_msg[0] == 'playlist': #playlist
playlistmsg = ""
if len(kodi_msg) >= 2: #playlist sub commands
if kodi_msg[1] == 'list': #playlist list
playlist_raw = my_kodi.Playlist.GetItems(1)
playlist = playlist_raw['result']['items']
print("playlist is " + str(playlist))
self.sendmsg("- playlist -")
for i in range(len(playlist)):
print("test " + str(i))
self.sendmsg(str(i + 1) + ". " + playlist[i]['label'])
self.sendmsg("- end of playlist -")
elif kodi_msg[1] == 'add' and nick_allowed == True: #playlist add
if len(kodi_msg) >= 3: #title
movie_name = kodi_msg[:0]+kodi_msg[0+1:]
movie_name = movie_name[:0]+movie_name[0+1:]
movie_name_search = ""
print(movie_name)
for i in range(len(movie_name)):
movie_name_search += str(movie_name[i]).lower().replace('"', '')
self.sendmsg("lol we will try to add " + str(movie_name_search) + " to the playlist")
my_kodi.Playlist.Add(1, item={'movieid':movie_id(movie_name_search)})
self.sendmsg("check if that worked son")
else:
self.sendmsg("no item given")
elif kodi_msg[1] == 'swap' and nick_allowed == True: #playlist swap
if len(kodi_msg) >= 3:
item1 = int(kodi_msg[2]) - 1
item2 = int(kodi_msg[3]) - 1
self.sendmsg("swapping playlist item " + str(kodi_msg[2]) + " with " + str(kodi_msg[3]))
my_kodi.Playlist.Swap(1, item1, item2)
else:
self.sendmsg("not enough arguments")
elif kodi_msg[1] == 'remove' and nick_allowed == True: #playlist remove
if len(kodi_msg) >= 3:
item_rem = int(kodi_msg[2]) - 1
self.sendmsg("removing item " + kodi_msg[2] + " from playlist")
my_kodi.Playlist.Remove(1, )
else:
send.sendmsg("no item given")
else: self.sendmsg("i dont know that playlist command")
else:
self.sendmsg("requires more options")
elif kodi_msg[0] == 'help': #help
self.sendmsg("kodictrl - Vicky IRC bot module for controlling live stream for 0xfdb network")
self.sendmsg("USAGE - ;kodictrl option [sub-option] title")
self.sendmsg("Availible Options:")
self.sendmsg(" list Posts list of availible titles")
self.sendmsg(" play title Plays given title")
self.sendmsg(" playpause Pauses/Plays title")
self.sendmsg(" stop Stops title")
self.sendmsg(" playlist Playlist mgmt (requires the following sub-options)")
self.sendmsg(" list Lists Current Playlist")
self.sendmsg(" add title Adds additional title to playlist")
self.sendmsg(" remove x Removes title from place in playlist (BROKEN)")
self.sendmsg(" swap x y Swaps 2 titles places in playlist (BROKEN)")
elif nick_allowed == False:
self.sendmsg( "" + nick + " does not have the required privileges for this command.")
else:
self.sendmsg("Command not recognized. Send ';kodictrl help' for a list of options.")
def movie_id(moviename):
movieid = ""
movielist_raw = my_kodi.VideoLibrary.GetMovies()
movielist = movielist_raw['result']['movies']
for i in range(len(movielist)):
if str(movielist[i]['label']).lower().replace('"', '').replace(" ","") == moviename:
movieid = movielist[i]['movieid']
print("got")
print(str(movielist[i]['label']).lower().replace('"', '').replace(" ",""))
print("searched for")
print(moviename)
return movieid
```
#### File: vicky/modules/youtube.py
```python
import re
from irc.client import Event
from lib.cog import Cog, event
from lib.command import Command, command
from lib.objects import Events
from lib.web import Web, get
class Youtube(Cog):
@event(Events.PubMsg)
def youtube(self, event: Event):
# TODO store videoid's in db
if re.search("youtu(be\.com|\.be)", event.arguments[0]):
ytid = re.search("(?:v=|\.be\/)(.{11})", event.arguments[0])[1]
info = self.lookup(ytid)
if len(info) != 0:
self.sendmsg(
"[ {} — {} ({}) ] - youtu.be".format(
info["title"], info["channel"], info["duration"]
)
)
def lookup(self, videoid: str) -> dict:
searchurl = (
"https://www.googleapis.com/youtube/v3/videos?"
"part=contentDetails,snippet&id={vid}&fields="
"items(contentDetails%2Fduration%2Csnippet(channelTitle%2Ctitle))"
"&key={key}"
)
url = searchurl.format(vid=videoid, key=self.settings["api_key"])
ytjson = get(url, json=True)
if ytjson.data.get("error", False):
return {}
# videoid = ytjson.data["items"][0]["id"]["videoId"]
title = ytjson.data["items"][0]["snippet"]["title"]
channel = ytjson.data["items"][0]["snippet"]["channelTitle"]
_duration = ytjson.data["items"][0]["contentDetails"]["duration"]
duration = _duration.lstrip("PT").lower()
return {"title": title, "channel": channel, "duration": duration}
``` |
{
"source": "0xfede7c8/scrapy-fake-useragent",
"score": 2
} |
#### File: scrapy-fake-useragent/tests/test_retry_middleware.py
```python
import pytest
from scrapy import Request
from scrapy.http import Response
from scrapy.spiders import Spider
from scrapy.utils.test import get_crawler
from twisted.internet.error import DNSLookupError
from scrapy_fake_useragent.middleware import RetryUserAgentMiddleware
@pytest.fixture
def retry_middleware_response(request):
"""
Fixture to simplify creating a crawler
with an activated middleware and going through
the request-response cycle.
Executes process_response() method of the middleware.
"""
settings, status = request.param
crawler = get_crawler(Spider, settings_dict=settings)
spider = crawler._create_spider('foo')
mw = RetryUserAgentMiddleware.from_crawler(crawler)
req = Request('http://www.scrapytest.org/')
rsp = Response(req.url, body=b'', status=status)
yield mw.process_response(req, rsp, spider)
@pytest.fixture
def retry_middleware_exception(request):
"""
Fixture to simplify creating a crawler
with an activated retry middleware and going through
the request-response cycle.
Executes process_exception() method of the middleware.
"""
settings, exception = request.param
crawler = get_crawler(Spider, settings_dict=settings)
spider = crawler._create_spider('foo')
mw = RetryUserAgentMiddleware.from_crawler(crawler)
req = Request('http://www.scrapytest.org/')
yield mw.process_exception(req, exception, spider)
@pytest.mark.parametrize(
'retry_middleware_response',
(({'FAKEUSERAGENT_FALLBACK': 'firefox'}, 503), ),
indirect=True
)
def test_random_ua_set_on_response(retry_middleware_response):
assert 'User-Agent' in retry_middleware_response.headers
@pytest.mark.parametrize(
'retry_middleware_exception',
(({'FAKEUSERAGENT_FALLBACK': 'firefox'},
DNSLookupError('Test exception')), ),
indirect=True
)
def test_random_ua_set_on_exception(retry_middleware_exception):
assert 'User-Agent' in retry_middleware_exception.headers
``` |
{
"source": "0xFEEDC0DE64/homeassistant-core",
"score": 2
} |
#### File: components/goe_charger/select.py
```python
from __future__ import annotations
import logging
from homeassistant.core import HomeAssistant
from homeassistant.config_entries import ConfigEntry
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.update_coordinator import CoordinatorEntity, DataUpdateCoordinator
from homeassistant.components.select import SelectEntity
from .const import DOMAIN
from .common import GoeChargerHub
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities) -> None:
coordinator = hass.data[DOMAIN][config_entry.entry_id]
serial = config_entry.data["serial"]
async_add_entities([
GoeChargerSelect(coordinator, config_entry, "Logic mode", serial, "logic_mode", None, None, "lmo", {
3: "Default",
4: "Awattar",
5: "AutomaticStop"
}),
GoeChargerSelect(coordinator, config_entry, "Unlock setting", serial, "unlock_setting", None, None, "ust", {
0: "Normal",
1: "AutoUnlock",
2: "AlwaysLock",
3: "ForceUnlock"
}),
GoeChargerSelect(coordinator, config_entry, "Access control", serial, "access_control", None, None, "acs", {
0: "Open",
1: "Wait"
}),
GoeChargerSelect(coordinator, config_entry, "Force state", serial, "force_state", None, None, "frc", {
0: "Neutral",
1: "Off",
2: "On"
}),
GoeChargerSelect(coordinator, config_entry, "Phase switch mode", serial, "phase_switch_mode", None, None, "psm",
{
1: "Force_1",
2: "Force_3"
})
])
class GoeChargerSelect(CoordinatorEntity, SelectEntity):
"""Representation of a Sensor."""
def __init__(self, coordinator: DataUpdateCoordinator, config_entry: ConfigEntry, name: str, serial: str,
unique_id: str, unit_of_measurement: str, device_class: str | None, key: str, options: dict[int, str]):
"""Pass coordinator to CoordinatorEntity."""
super().__init__(coordinator)
self._name = name
self._config_entry = config_entry
self._serial = serial
self._unique_id = unique_id
self._unit_of_measurement = unit_of_measurement
self._device_class = device_class
self._key = key
self._options = options
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def unique_id(self):
"""Return the unique id of the device."""
return "goe_charger_" + self._serial + "_" + self._unique_id
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit_of_measurement
@property
def device_class(self):
"""Return the device class."""
return self._device_class
@property
def available(self) -> bool:
"""Return True if entity is available."""
return (self.coordinator.data is not None and
self._key in self.coordinator.data and
self.coordinator.data[self._key] is not None)
@property
def current_option(self) -> str | None:
"""The current select option"""
if not self.available:
return None
current_data = self.coordinator.data[self._key]
if current_data in self._options:
return self._options[current_data]
return "Unknown (" + str(current_data) + ")"
@property
def options(self) -> list[str]:
"""A list of available options as strings"""
return list(self._options.values())
async def async_select_option(self, option: str) -> None:
"""Change the selected option."""
key_list = list(self._options.keys())
val_list = list(self._options.values())
index = val_list.index(option)
hub = GoeChargerHub(self._config_entry.data["secure"], self._config_entry.data["host"],
self._config_entry.data["pathprefix"])
await hub.set_data(self.hass, {
self._key: key_list[index]
})
async def async_update(self):
"""Fetch new state data for the sensor.
This is the only method that should fetch new data for Home Assistant.
"""
await self.coordinator.async_request_refresh()
@property
def device_info(self):
"""Get attributes about the device."""
return {
"identifiers": {(DOMAIN, self._serial)}
}
```
#### File: components/goe_charger/sensor.py
```python
from __future__ import annotations
import logging
from typing import Final
from homeassistant.core import HomeAssistant
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (ELECTRIC_POTENTIAL_VOLT, ELECTRIC_CURRENT_AMPERE, POWER_WATT, POWER_KILO_WATT,
FREQUENCY_HERTZ, ENERGY_WATT_HOUR, ENERGY_KILO_WATT_HOUR, TEMP_CELSIUS,
SIGNAL_STRENGTH_DECIBELS, DEVICE_CLASS_CURRENT, DEVICE_CLASS_ENERGY,
DEVICE_CLASS_POWER_FACTOR, DEVICE_CLASS_POWER, DEVICE_CLASS_SIGNAL_STRENGTH,
DEVICE_CLASS_TEMPERATURE, DEVICE_CLASS_VOLTAGE)
from homeassistant.helpers.update_coordinator import CoordinatorEntity, DataUpdateCoordinator
from homeassistant.components.sensor import STATE_CLASS_MEASUREMENT, STATE_CLASS_TOTAL_INCREASING
from homeassistant.components.sensor import SensorEntity
from .const import DOMAIN
POWER_FACTOR: Final = "%"
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities) -> None:
coordinator = hass.data[DOMAIN][config_entry.entry_id]
serial = config_entry.data["serial"]
def car_state_data(data):
car_state_texts = {
0: "Unknown",
1: "Idle",
2: "Charging",
3: "WaitCar",
4: "Complete",
5: "Error"
}
if data["car"] in car_state_texts:
return car_state_texts[data["car"]]
return "Unknown (" + str(data["car"]) + ")"
def error_data(data):
error_texts = {
0: "None",
1: "FiAc",
2: "FiDc",
3: "Phase",
4: "Overvolt",
5: "Overamp",
6: "Diode",
7: "PpInvalid",
8: "GndInvalid",
9: "ContactorStuck",
10: "ContactorMiss",
11: "FiUnknown",
12: "Unknown",
13: "Overtemp",
14: "NoComm",
15: "StatusLockStuckOpen",
16: "StatusLockStuckLocked",
17: "Reserved20",
18: "Reserved21",
19: "Reserved22",
20: "Reserved23",
21: "Reserved24"
}
if data["err"] in error_texts:
return error_texts[data["err"]]
return "Unknown (" + str(data["err"]) + ")"
def model_status_data(data):
model_status_texts = {
0: "NotChargingBecauseNoChargeCtrlData",
1: "NotChargingBecauseOvertemperature",
2: "NotChargingBecauseAccessControlWait",
3: "ChargingBecauseForceStateOn",
4: "NotChargingBecauseForceStateOff",
5: "NotChargingBecauseScheduler",
6: "NotChargingBecauseEnergyLimit",
7: "ChargingBecauseAwattarPriceLow",
8: "ChargingBecauseAutomaticStopTestLadung",
9: "ChargingBecauseAutomaticStopNotEnoughTime",
10: "ChargingBecauseAutomaticStop",
11: "ChargingBecauseAutomaticStopNoClock",
12: "ChargingBecausePvSurplus",
13: "ChargingBecauseFallbackGoEDefault",
14: "ChargingBecauseFallbackGoEScheduler",
15: "ChargingBecauseFallbackDefault",
16: "NotChargingBecauseFallbackGoEAwattar",
17: "NotChargingBecauseFallbackAwattar",
18: "NotChargingBecauseFallbackAutomaticStop",
19: "ChargingBecauseCarCompatibilityKeepAlive",
20: "ChargingBecauseChargePauseNotAllowed",
22: "NotChargingBecauseSimulateUnplugging",
23: "NotChargingBecausePhaseSwitch",
24: "NotChargingBecauseMinPauseDuration"
}
if data["modelStatus"] in model_status_texts:
return model_status_texts[data["modelStatus"]]
return "Unknown (" + str(data["modelStatus"]) + ")"
async_add_entities([
GoeChargerSensor(coordinator, "Voltage L1", serial, "voltage_l1", ELECTRIC_POTENTIAL_VOLT, DEVICE_CLASS_VOLTAGE,
STATE_CLASS_MEASUREMENT, "nrg", lambda data: data["nrg"][0]),
GoeChargerSensor(coordinator, "Voltage L2", serial, "voltage_l2", ELECTRIC_POTENTIAL_VOLT, DEVICE_CLASS_VOLTAGE,
STATE_CLASS_MEASUREMENT, "nrg", lambda data: data["nrg"][1]),
GoeChargerSensor(coordinator, "Voltage L3", serial, "voltage_l3", ELECTRIC_POTENTIAL_VOLT, DEVICE_CLASS_VOLTAGE,
STATE_CLASS_MEASUREMENT, "nrg", lambda data: data["nrg"][2]),
GoeChargerSensor(coordinator, "Voltage N", serial, "voltage_n", ELECTRIC_POTENTIAL_VOLT, DEVICE_CLASS_VOLTAGE,
STATE_CLASS_MEASUREMENT, "nrg", lambda data: data["nrg"][3]),
GoeChargerSensor(coordinator, "Current L1", serial, "current_l1", ELECTRIC_CURRENT_AMPERE, DEVICE_CLASS_CURRENT,
STATE_CLASS_MEASUREMENT, "nrg", lambda data: data["nrg"][4]),
GoeChargerSensor(coordinator, "Current L2", serial, "current_l2", ELECTRIC_CURRENT_AMPERE, DEVICE_CLASS_CURRENT,
STATE_CLASS_MEASUREMENT, "nrg", lambda data: data["nrg"][5]),
GoeChargerSensor(coordinator, "Current L3", serial, "current_l3", ELECTRIC_CURRENT_AMPERE, DEVICE_CLASS_CURRENT,
STATE_CLASS_MEASUREMENT, "nrg", lambda data: data["nrg"][6]),
GoeChargerSensorNative(coordinator, "Power L1", serial, "power_l1", POWER_KILO_WATT, DEVICE_CLASS_POWER,
STATE_CLASS_MEASUREMENT, "nrg", (lambda data: data["nrg"][7] / 1000), POWER_KILO_WATT,
lambda data: data["nrg"][7]),
GoeChargerSensorNative(coordinator, "Power L2", serial, "power_l2", POWER_KILO_WATT, DEVICE_CLASS_POWER,
STATE_CLASS_MEASUREMENT, "nrg", (lambda data: data["nrg"][8] / 1000), POWER_KILO_WATT,
lambda data: data["nrg"][8]),
GoeChargerSensorNative(coordinator, "Power L3", serial, "power_l3", POWER_KILO_WATT, DEVICE_CLASS_POWER,
STATE_CLASS_MEASUREMENT, "nrg", (lambda data: data["nrg"][9] / 1000), POWER_KILO_WATT,
lambda data: data["nrg"][9]),
GoeChargerSensorNative(coordinator, "Power N", serial, "power_n", POWER_KILO_WATT, DEVICE_CLASS_POWER,
STATE_CLASS_MEASUREMENT, "nrg", (lambda data: data["nrg"][10] / 1000), POWER_KILO_WATT,
lambda data: data["nrg"][10]),
GoeChargerSensorNative(coordinator, "Power Total", serial, "power_total", POWER_KILO_WATT, DEVICE_CLASS_POWER,
STATE_CLASS_MEASUREMENT, "nrg", (lambda data: data["nrg"][11] / 1000), POWER_KILO_WATT,
lambda data: data["nrg"][11]),
GoeChargerSensor(coordinator, "Powerfactor L1", serial, "powerfactor_l1", POWER_FACTOR,
DEVICE_CLASS_POWER_FACTOR, STATE_CLASS_MEASUREMENT, "nrg", lambda data: data["nrg"][12]),
GoeChargerSensor(coordinator, "Powerfactor L2", serial, "powerfactor_l2", POWER_FACTOR,
DEVICE_CLASS_POWER_FACTOR, STATE_CLASS_MEASUREMENT, "nrg", lambda data: data["nrg"][13]),
GoeChargerSensor(coordinator, "Powerfactor L3", serial, "powerfactor_l3", POWER_FACTOR,
DEVICE_CLASS_POWER_FACTOR, STATE_CLASS_MEASUREMENT, "nrg", lambda data: data["nrg"][14]),
GoeChargerSensor(coordinator, "Powerfactor N", serial, "powerfactor_n", POWER_FACTOR, DEVICE_CLASS_POWER_FACTOR,
STATE_CLASS_MEASUREMENT, "nrg", lambda data: data["nrg"][15]),
GoeChargerSensor(coordinator, "Frequency", serial, "frequency", FREQUENCY_HERTZ, None, STATE_CLASS_MEASUREMENT,
"fhz", lambda data: data["fhz"]),
GoeChargerSensorNative(coordinator, "Charged", serial, "charged", ENERGY_KILO_WATT_HOUR, DEVICE_CLASS_ENERGY,
STATE_CLASS_TOTAL_INCREASING, "wh", (lambda data: data["wh"] / 1000), POWER_KILO_WATT,
lambda data: data["wh"]),
GoeChargerSensorNative(coordinator, "Charged total", serial, "charged_total", ENERGY_KILO_WATT_HOUR,
DEVICE_CLASS_ENERGY, STATE_CLASS_TOTAL_INCREASING, "eto",
(lambda data: data["eto"] / 1000), POWER_KILO_WATT, lambda data: data["eto"]),
GoeChargerSensor(coordinator, "Temperature 1", serial, "temperature_1", TEMP_CELSIUS, DEVICE_CLASS_TEMPERATURE,
STATE_CLASS_MEASUREMENT, "tma", lambda data: data["tma"][0]),
GoeChargerSensor(coordinator, "Temperature 2", serial, "temperature_2", TEMP_CELSIUS, DEVICE_CLASS_TEMPERATURE,
STATE_CLASS_MEASUREMENT, "tma", lambda data: data["tma"][1]),
GoeChargerSensor(coordinator, "WiFi RSSI", serial, "wifi_rssi", SIGNAL_STRENGTH_DECIBELS,
DEVICE_CLASS_SIGNAL_STRENGTH, STATE_CLASS_MEASUREMENT, "rssi", lambda data: data["rssi"]),
GoeChargerSensor(coordinator, "Cable current limit", serial, "cable_current_limit", ELECTRIC_CURRENT_AMPERE,
DEVICE_CLASS_CURRENT, None, "cbl", lambda data: data["cbl"]),
GoeChargerSensor(coordinator, "Allowed current", serial, "allowed_current", ELECTRIC_CURRENT_AMPERE,
DEVICE_CLASS_CURRENT, None, "acu", lambda data: "" if data["acu"] is None else data["acu"]),
GoeChargerSensor(coordinator, "Car state", serial, "car_state", None, None, None, "car", car_state_data),
GoeChargerSensor(coordinator, "Error", serial, "error", None, None, None, "err", error_data),
GoeChargerSensor(coordinator, "Model status", serial, "model_status", None, None, None, "modelStatus",
model_status_data),
])
class GoeChargerSensor(CoordinatorEntity, SensorEntity):
"""Representation of a Sensor."""
def __init__(self, coordinator: DataUpdateCoordinator, name: str, serial: str, unique_id: str,
unit_of_measurement: str | None, device_class: str | None, state_class: str | None, key: str,
state_cb):
"""Pass coordinator to CoordinatorEntity."""
super().__init__(coordinator)
self._name = name
self._serial = serial
self._unique_id = unique_id
self._unit_of_measurement = unit_of_measurement
self._device_class = device_class
self._state_class = state_class
self._key = key
self._state_cb = state_cb
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def unique_id(self):
"""Return the unique id of the device."""
return "goe_charger_" + self._serial + "_" + self._unique_id
@property
def available(self) -> bool:
"""Return True if entity is available."""
return (self.coordinator.data is not None and
self._key in self.coordinator.data and
self.coordinator.data[self._key] is not None)
@property
def state(self):
"""Return the state of the sensor."""
return None if not self.available else self._state_cb(self.coordinator.data)
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit_of_measurement
@property
def device_class(self):
"""Return the device class."""
return self._device_class
@property
def state_class(self):
"""Return the state class."""
return self._state_class
async def async_update(self):
"""Fetch new state data for the sensor.
This is the only method that should fetch new data for Home Assistant.
"""
await self.coordinator.async_request_refresh()
@property
def device_info(self):
"""Get attributes about the device."""
return {
"identifiers": {(DOMAIN, self._serial)}
}
class GoeChargerSensorNative(GoeChargerSensor):
"""Representation of a Sensor with separated native unit/value."""
def __init__(self, coordinator: DataUpdateCoordinator, name: str, serial: str, unique_id: str,
unit_of_measurement: str | None, device_class: str | None, state_class: str | None, key: str, state_cb,
native_unit_of_measurement: str | None, native_state_cb):
"""Pass coordinator to GoeChargerSensor."""
super().__init__(coordinator, name, serial, unique_id, unit_of_measurement, device_class, state_class, key,
state_cb)
self._native_unit_of_measurement = native_unit_of_measurement
self._native_state_cb = native_state_cb
@property
def native_value(self):
"""Return the value reported by the sensor."""
return None if not self.available else self._native_state_cb(self.coordinator.data)
@property
def native_unit_of_measurement(self) -> str | None:
"""Return the native unit of measurement."""
return self._native_unit_of_measurement
``` |
{
"source": "0xff1234/wenshuSpider",
"score": 2
} |
#### File: 0xff1234/wenshuSpider/URLManager.py
```python
import re
from GetAPI import GetAPI
from DataOutput import DataOutput
import threading
class UrlManager(object):
def __init__(self):
# self.docids = set()
self.lock = threading.Lock() # 线程锁
def get_DocID(self, Param, Index, Page, Order, Direction):
p_docid = re.compile(r'"文书ID\\":\\"(.*?)\\"')
data = GetAPI().get_data(Param, Index, Page, Order, Direction)
return p_docid.findall(data)
def store_docids(self, Param, Index, Page, Order, Direction, db):
docids = self.get_DocID(Param, Index, Page, Order, Direction)
region = Param.split(':')[1] # 地域
self.lock.acquire() # 线程锁
for docid in docids:
if not db.in_error_list(docid): # 不在异常队列中
db.insert_docid(docid, region) # docid存入数据库
self.lock.release()
def get_one_docid(self, db):
if db.cur.execute('SELECT docid FROM info WHERE status = 0'): # 未访问id
docid = db.cur.fetchone()[0]
elif db.cur.execute('SELECT docid FROM info WHERE status = -1'): # 异常id
docid = db.cur.fetchone()[0]
elif db.cur.execute('SELECT docid FROM info WHERE status = 2'): # 异常id
docid = db.cur.fetchone()[0]
if docid:
db.change_status(docid, 2) # 更改状态为正在访问
return docid
return None
'''
def get_urls(self):
docids = self.docids.copy() # 浅拷贝
self.docids.clear() # 每次urls中只存放一个列表页的url,减少开销
return docids
'''
``` |
{
"source": "0xff-dev/SutAcmDRA",
"score": 3
} |
#### File: app/api_1_0/views.py
```python
from datetime import datetime
from flask import jsonify
from flask_login import current_user
from . import api
from ..models import User
from .. import dbm
@api.route('/users')
def get_user_data():
# 不传一拖过去, 在服务端处理好, 不要在前端处理
# 用户按做题数排名,
users = User.query.all()
users_name = [user.hdoj_username for user in users]
user_list = []
for user_name in users_name:
user_list.append(dbm.find_one(user_name)) # 只拿五条数据
user_list = sorted(user_list, key=lambda x: x['count'], reverse=True)
return jsonify({'users': user_list})
@api.route('/user')
def user_detail():
"""通过api抓取用户的主要信息, ajax在打开页面的时候直接请求"""
user_name = current_user.hdoj_username
user = User.query.filter_by(hdoj_username=user_name).first()
user_info = dbm.find_one_info(user_name)
return jsonify({'date': datetime.now(),
'photo_path': user.photo_path,
'user_info': user_info})
```
#### File: SutAcmDRA/tests/test_user_model.py
```python
import unittest
from app.models import User
class UserModelTestCase(unittest.TestCase):
def test_password_setter(self):
u = User('test_user', '<PASSWORD>', '<PASSWORD>')
self.assertTrue(u.password_hash is not None)
def test_no_password_getter(self):
u = User('test_user', '<PASSWORD>', '<PASSWORD>')
with self.assertRaises(AttributeError):
u.password
def test_password_verify(self):
u = User('test_user', '<PASSWORD>', '<PASSWORD>')
self.assertTrue(u.verify_password('<PASSWORD>'))
self.assertFalse(u.verify_password('<PASSWORD>'))
``` |
{
"source": "0xffea/iota",
"score": 2
} |
#### File: iota/api/app.py
```python
import logging
import os
import pecan
from oslo_config import cfg
from oslo_log import log
from paste import deploy
from werkzeug import serving
LOG = log.getLogger(__name__)
CONF = cfg.CONF
api_server_opts = [
cfg.StrOpt('host',
default='0.0.0.0',
help='The listen IP for the Iota API server.'),
cfg.PortOpt('port',
default=9999,
help='The port number of the Iota API server.'),
]
api_server_opt_group = cfg.OptGroup(name='api',
title='Parameters for the API server')
CONF.register_group(api_server_opt_group)
CONF.register_opts(api_server_opts, api_server_opt_group)
def setup_app():
pecan_config = {
'app': {
'root': 'iota.api.controllers.root.RootController',
'modules': [],
}
}
pecan.configuration.set_config(dict(pecan_config), overwrite=True)
app = pecan.make_app(
pecan_config['app']['root'],
debug=True,
hooks=[],
guess_content_type_from_ext=False
)
return app
def load_app():
return deploy.loadapp('config:/etc/iota/api_paste.ini')
def build_server():
app = load_app()
host, port = CONF.api.host, CONF.api.port
LOG.info('Starting server with PID %s' % os.getpid())
LOG.info('Configuration:')
CONF.log_opt_values(LOG, logging.INFO)
serving.run_simple(host, port, app, 1)
def app_factory(global_config, **local_conf):
return setup_app()
``` |
{
"source": "0xfffangel/SpreadBot",
"score": 2
} |
#### File: 0xfffangel/SpreadBot/main.py
```python
import asyncio
import multidex
import ccxt.async_support as ccxt
import argparse
def get_exchange(exchange):
if exchange == 'binance':
return ccxt.binance({'enableRateLimit': True})
elif exchange == 'bitfinex':
return ccxt.bitfinex({'enableRateLimit': True})
elif exchange == 'kucoin':
return ccxt.kucoin({'enableRateLimit': True})
elif exchange == 'bybit':
return ccxt.bybit({'enableRateLimit': True})
elif exchange == 'ftx':
return ccxt.ftx({'enableRateLimit': True})
else:
raise Exception("Exchange not supported")
def get_dex(dex):
if dex == 'stellaswap':
return multidex.Stellaswap()
elif dex == 'uniswap':
return multidex.Uniswap()
elif dex == 'spookyswap':
return multidex.Spookyswap()
elif dex == 'pancakeswap':
return multidex.Pancakeswap()
elif dex == 'beamswap':
return multidex.Beamswap()
else:
raise Exception("Dex not supported")
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--exchange", help = "Select exchange")
parser.add_argument("-p", "--pair", help = "Select pair")
parser.add_argument("-d", "--dex", help = "Select dex")
parser.add_argument("-t", "--token", help = "Select token")
return parser.parse_args()
async def main():
args = get_arguments()
exchange = get_exchange(args.exchange)
ticker = await exchange.fetch_ticker(args.pair)
print(args.exchange, " ask: ", ticker["ask"])
print(args.exchange, " bid: ", ticker["bid"])
print(args.exchange, " askVolume: ", ticker["askVolume"])
print(args.exchange, " bidVolume: ", ticker["bidVolume"])
print(args.exchange, " last: ", ticker["last"])
await exchange.close()
dex = get_dex(args.dex)
print(args.dex, " reserve_ratio: ", dex.reserve_ratio(args.token))
print(args.dex, " price: ", dex.price(args.token))
print(args.dex, " liquidity: ", dex.liquidity(args.token))
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
``` |
{
"source": "0xFireball/exascript2",
"score": 2
} |
#### File: exascript2/Tests/test_exec.py
```python
from iptest.assert_util import *
##
## to test how exec related to globals/locals
##
def _contains(large, small):
for (key, value) in small.items():
Assert(large[key] == value)
def _not_contains(dict, *keylist):
for key in keylist:
Assert(not dict.has_key(key))
## exec without in something
x = 1
y = "hello"
_contains(globals(), {"x":1, "y":"hello"})
_contains(locals(), {"x":1, "y":"hello"})
exec "x, y"
_contains(globals(), {"x":1, "y":"hello"})
_contains(locals(), {"x":1, "y":"hello"})
## exec with custom globals
# -- use global x, y; assign
g1 = {'x':2, 'y':'world'}
exec "global x; x, y; x = 4" in g1
_contains(globals(), {"x":1, "y":"hello"})
_contains(locals(), {"x":1, "y":"hello"})
_contains(g1, {"x":4, "y":"world"})
exec "global x; x, y; x = x + 4" in g1
_contains(g1, {"x":8})
# -- declare global
exec "global z" in g1
_not_contains(globals(), 'z')
_not_contains(locals(), 'z')
_not_contains(g1, 'z')
# -- new global
exec "global z; z = -1" in g1
_not_contains(globals(), 'z')
_not_contains(locals(), 'z')
_contains(g1, {'z':-1})
# y is missing in g2
g2 = {'x':3}
try:
exec "x, y" in g2
except NameError: pass
else: Assert(False, "should throw NameError exception")
exec "y = 'ironpython'" in g2
_contains(g2, {"x":3, "y":"ironpython"})
_contains(globals(), {"y":"hello"})
_contains(locals(), {"y":"hello"})
## exec with custom globals, locals
g = {'x': -1, 'y': 'python' }
l = {}
# use global
exec "if x != -1: throw" in g, l
exec "if y != 'python': throw" in g, l
_not_contains(l, 'x', 'y')
# new local
exec "x = 20; z = 2" in g, l
_contains(g, {"x":-1, "y":"python"})
_contains(l, {"x":20, "z":2})
# changes
exec "global y; y = y.upper(); z = -2" in g, l
_contains(g, {'x': -1, 'y': 'PYTHON'})
_contains(l, {'x': 20, 'z': -2})
# new global
exec "global w; w = -2" in g, l
_contains(g, {'x': -1, 'y': 'PYTHON', 'w': -2})
_contains(l, {'x': 20, 'z': -2})
# x in both g and l; use it
exec "global x; x = x - 1" in g, l
_contains(g, {'x': -2, 'y': 'PYTHON', 'w': -2})
_contains(l, {'x': 20, 'z': -2})
exec "x = x + 1" in g, l
_contains(g, {'x': -2, 'y': 'PYTHON', 'w': -2})
_contains(l, {'x': 21, 'z': -2})
## Inside Function: same as last part of previous checks
def InsideFunc():
g = {'x': -1, 'y': 'python' }
l = {}
# use global
exec "if x != -1: throw" in g, l
exec "if y != 'python': throw" in g, l
_not_contains(l, 'x', 'y')
# new local
exec "x = 20; z = 2" in g, l
_contains(g, {"x":-1, "y":"python"})
_contains(l, {"x":20, "z":2})
# changes
exec "global y; y = y.upper(); z = -2" in g, l
_contains(g, {'x': -1, 'y': 'PYTHON'})
_contains(l, {'x': 20, 'z': -2})
# new global
exec "global w; w = -2" in g, l
_contains(g, {'x': -1, 'y': 'PYTHON', 'w': -2})
_contains(l, {'x': 20, 'z': -2})
# x in both g and l; use it
exec "global x; x = x - 1" in g, l
_contains(g, {'x': -2, 'y': 'PYTHON', 'w': -2})
_contains(l, {'x': 20, 'z': -2})
exec "x = x + 1" in g, l
_contains(g, {'x': -2, 'y': 'PYTHON', 'w': -2})
_contains(l, {'x': 21, 'z': -2})
InsideFunc()
unique_global_name = 987654321
class C:
exec 'a = unique_global_name'
exec "if unique_global_name != 987654321: raise AssertionError('cannott see unique_global_name')"
AreEqual(C.a, 987654321)
def f():
exec "if unique_global_name != 987654321: raise AssertionError('cannot see unique_global_name')"
def g(): exec "if unique_global_name != 987654321: raise AssertionError('cannot see unique_global_name')"
g()
f()
# exec tests
# verify passing a bad value throws...
try:
exec(3)
except TypeError: pass
else: Fail("Should already thrown (3)")
# verify exec(...) takes a code object
codeobj = compile ('1+1', '<compiled code>', 'exec')
exec(codeobj)
if is_silverlight==False:
# verify exec(...) takes a file...
fn = path_combine(testpath.temporary_dir, 'testfile.tmp')
write_to_file(fn, "x = [1,2,3,4,5]\nx.reverse()\nAssert(x == [5,4,3,2,1])\n")
f = file(fn, "r")
exec(f)
Assert(x == [5,4,3,2,1])
f.close()
# and now verify it'll take a .NET Stream as well...
if is_cli:
import System
f = System.IO.FileStream(fn, System.IO.FileMode.Open)
exec(f)
f.Close()
# verify that exec'd code has access to existing locals
qqq = 3
exec('qqq+1')
# and not to *non*-existing locals..
del qqq
try: exec('qqq+1')
except NameError: pass
else: Fail("should already thrown (qqq+1)")
exec('qqq+1', {'qqq':99})
# Test passing alternative local and global scopes to exec.
# Explicit global and local scope.
# Functional form of exec.
myloc = {}
myglob = {}
exec("a = 1; global b; b = 1", myglob, myloc)
Assert("a" in myloc)
Assert("a" not in myglob)
Assert("b" in myglob)
Assert("b" not in myloc)
# Statement form of exec.
myloc = {}
myglob = {}
exec "a = 1; global b; b = 1" in myglob, myloc
Assert("a" in myloc)
Assert("a" not in myglob)
Assert("b" in myglob)
Assert("b" not in myloc)
# Explicit global scope implies the same local scope.
# Functional form of exec.
myloc = {}
myglob = {}
exec("a = 1; global b; b = 1", myglob)
Assert("a" in myglob)
Assert("a" not in myloc)
Assert("b" in myglob)
Assert("b" not in myloc)
# Statement form of exec.
myloc = {}
myglob = {}
exec "a = 1; global b; b = 1" in myglob
Assert("a" in myglob)
Assert("a" not in myloc)
Assert("b" in myglob)
Assert("b" not in myloc)
# Testing interesting exec cases
x = "global_x"
def TryExecG(what, glob):
exec what in glob
def TryExecGL(what, glob, loc):
exec what in glob, loc
class Nothing:
pass
def MakeDict(value):
return { 'AreEqual' : AreEqual, 'AssertError' : AssertError, 'x' : value, 'str' : str }
class Mapping:
def __init__(self, value = None):
self.values = MakeDict(value)
def __getitem__(self, item):
return self.values[item]
class MyDict(dict):
def __init__(self, value = None):
self.values = MakeDict(value)
def __getitem__(self, item):
return self.values[item]
TryExecG("AreEqual(x, 'global_x')", None)
TryExecGL("AreEqual(x, 'global_x')", None, None)
AssertError(TypeError, TryExecG, "print x", Nothing())
AssertError(TypeError, TryExecGL, "print x", Nothing(), None)
AssertError(TypeError, TryExecG, "print x", Mapping())
AssertError(TypeError, TryExecGL, "print x", Mapping(), None)
TryExecG("AreEqual(x, 17)", MakeDict(17))
TryExecGL("AreEqual(x, 19)", MakeDict(19), None)
#TryExecG("AreEqual(x, 23)", MyDict(23))
#TryExecGL("AreEqual(x, 29)", MyDict(29), None)
TryExecGL("AreEqual(x, 31)", None, MakeDict(31))
AssertError(TypeError, TryExecGL, "print x", None, Nothing())
TryExecGL("AreEqual(x, 37)", None, Mapping(37))
#TryExecGL("AreEqual(x, 41)", None, MyDict(41))
# Evaluating the "in" expressions in exec statement
def f(l):
l.append("called f")
return {}
l = []
exec "pass" in f(l)
AreEqual(l, ["called f"])
def g(l):
l.append("called g")
return {}
l = []
exec "pass" in f(l), g(l)
AreEqual(l, ["called f", "called g"])
# testing exec accepts \n eolns only
def test_eolns():
def f1(sep): exec 'x = 2$y=4$'.replace('$', sep)
def f2(sep): exec '''x = 3$y = 5$'''.replace('$', sep)
def f3(sep): exec "exec '''x = 3$y = 5$'''".replace('$', sep)
for x in [f1, f2, f3]:
if is_ironpython: #http://ironpython.codeplex.com/workitem/27991
AssertError(SyntaxError, x, '\r\n')
AssertError(SyntaxError, x, '\r')
else:
temp = x('\r\n')
temp = x('\r')
AssertError(SyntaxError, x, '\a')
x('\n')
def test_set_builtins():
g = {}
exec("", g, None)
Assert('__builtins__' in g.keys())
def test_builtins_type():
x, y = {}, {}
exec 'abc = 42' in x, y
AreEqual(type(x['__builtins__']), dict)
def test_exec_locals():
exec """
def body():
AreEqual('anythingatall' in locals(), False)
body()
foozbab = 2
def body():
AreEqual('foozbab' in locals(), False)
body()
"""
run_test(__name__)
```
#### File: exascript2/Tests/test_function.py
```python
from iptest.assert_util import *
def x(a,b,c):
z = 8
if a < b:
return c
elif c < 5 :
return a + b
else:
return z
Assert(x(1,2,10) == 10)
Assert(x(2,1,4) == 3)
Assert(x(1,1,10) == 8)
def f():
pass
f.a = 10
Assert(f.a == 10)
AreEqual(f.__module__, __name__)
def g():
g.a = 20
g()
Assert(g.a == 20)
def foo(): pass
AreEqual(foo.func_code.co_filename.lower().endswith('test_function.py'), True)
AreEqual(foo.func_code.co_firstlineno, 48) # if you added lines to the top of this file you need to update this number.
# Cannot inherit from a function
def CreateSubType(t):
class SubType(t): pass
return SubType
if is_silverlight==False:
AssertErrorWithMatch(TypeError, ".*\n?.* is not an acceptable base type", CreateSubType, type(foo))
else:
try:
CreateSubType(type(foo))
except TypeError, e:
Assert(e.message.find("is not an acceptable base type") != -1)
def a(*args): return args
def b(*args): return a(*args)
AreEqual(b(1,2,3), (1,2,3))
# some coverage for Function3 code
def xwd(a=0,b=1,c=3):
z = 8
if a < b:
return c
elif c < 5 :
return a + b
else:
return z
AreEqual(x,x)
AreEqual(xwd(), 3)
AssertError(TypeError, (lambda:x()))
AreEqual(xwd(2), 3)
AssertError(TypeError, (lambda:x(1)))
AreEqual(xwd(0,5), 3)
AssertError(TypeError, (lambda:x(0,5)))
AreEqual( (x == "not-a-Function3"), False)
def y(a,b,c,d):
return a+b+c+d
def ywd(a=0, b=1, c=2, d=3):
return a+b+c+d
AreEqual(y, y)
AreEqual(ywd(), 6)
AssertError(TypeError, y)
AreEqual(ywd(4), 10)
AssertError(TypeError, y, 4)
AreEqual(ywd(4,5), 14)
AssertError(TypeError, y, 4, 5)
AreEqual(ywd(4,5,6), 18)
AssertError(TypeError, y, 4,5,6)
AreEqual( (y == "not-a-Function4"), False)
def foo(): "hello world"
AreEqual(foo.__doc__, 'hello world')
############# coverage #############
# function5
def f1(a=1, b=2, c=3, d=4, e=5): return a * b * c * d * e
def f2(a, b=2, c=3, d=4, e=5): return a * b * c * d * e
def f3(a, b, c=3, d=4, e=5): return a * b * c * d * e
def f4(a, b, c, d=4, e=5): return a * b * c * d * e
def f5(a, b, c, d, e=5): return a * b * c * d * e
def f6(a, b, c, d, e): return a * b * c * d * e
for f in (f1, f2, f3, f4, f5, f6):
AssertError(TypeError, f, 1, 1, 1, 1, 1, 1) # 6 args
AreEqual(f(10,11,12,13,14), 10 * 11 * 12 * 13 * 14) # 5 args
for f in (f1, f2, f3, f4, f5):
AreEqual(f(10,11,12,13), 10 * 11 * 12 * 13 * 5) # 4 args
for f in (f6,):
AssertError(TypeError, f, 1, 1, 1, 1)
for f in (f1, f2, f3, f4):
AreEqual(f(10,11,12), 10 * 11 * 12 * 4 * 5) # 3 args
for f in (f5, f6):
AssertError(TypeError, f, 1, 1, 1)
for f in (f1, f2, f3):
AreEqual(f(10,11), 10 * 11 * 3 * 4 * 5) # 2 args
for f in (f4, f5, f6):
AssertError(TypeError, f, 1, 1)
for f in (f1, f2):
AreEqual(f(10), 10 * 2 * 3 * 4 * 5) # 1 args
for f in (f3, f4, f5, f6):
AssertError(TypeError, f, 1)
for f in (f1,):
AreEqual(f(), 1 * 2 * 3 * 4 * 5) # no args
for f in (f2, f3, f4, f5, f6):
AssertError(TypeError, f)
# method
class C1:
def f0(self): return 0
def f1(self, a): return 1
def f2(self, a, b): return 2
def f3(self, a, b, c): return 3
def f4(self, a, b, c, d): return 4
def f5(self, a, b, c, d, e): return 5
def f6(self, a, b, c, d, e, f): return 6
def f7(self, a, b, c, d, e, f, g): return 7
class C2: pass
c1, c2 = C1(), C2()
line = ""
for i in range(8):
args = ",".join(['1'] * i)
line += "AreEqual(c1.f%d(%s), %d)\n" % (i, args, i)
line += "AreEqual(C1.f%d(c1,%s), %d)\n" % (i, args, i)
#line += "try: C1.f%d(%s) \nexcept TypeError: pass \nelse: raise AssertionError\n" % (i, args)
#line += "try: C1.f%d(c2, %s) \nexcept TypeError: pass \nelse: raise AssertionError\n" % (i, args)
#print line
exec line
def SetAttrOfInstanceMethod():
C1.f0.attr = 1
AssertError(AttributeError, SetAttrOfInstanceMethod)
C1.f0.im_func.attr = 1
AreEqual(C1.f0.attr, 1)
AreEqual(dir(C1.f0).__contains__("attr"), True)
AreEqual(C1.f0.__module__, __name__)
######################################################################################
from iptest.assert_util import *
def f(x=0, y=10, z=20, *args, **kws):
return (x, y, z), args, kws
Assert(f(10, l=20) == ((10, 10, 20), (), {'l': 20}))
Assert(f(1, *(2,), **{'z':20}) == ((1, 2, 20), (), {}))
Assert(f(*[1,2,3]) == ((1, 2, 3), (), {}))
def a(*args, **kws): return args, kws
def b(*args, **kws):
return a(*args, **kws)
Assert(b(1,2,3, x=10, y=20) == ((1, 2, 3), {'y': 20, 'x': 10}))
def b(*args, **kws):
return a(**kws)
Assert(b(1,2,3, x=10, y=20) == ((), {'y': 20, 'x': 10}))
try:
b(**[])
Assert(False)
except TypeError:
pass
def f(x, *args):
return (x, args)
AreEqual(f(1, *[2]), (1, (2,)))
AreEqual(f(7, *(i for i in range(3))), (7, (0, 1, 2,)))
AreEqual(f(9, *range(11, 13)), (9, (11, 12)))
#verify we can call sorted w/ keyword args
import operator
inventory = [('apple', 3), ('banana', 2), ('pear', 5), ('orange', 1)]
getcount = operator.itemgetter(1)
sorted_inventory = sorted(inventory, key=getcount)
# verify proper handling of keyword args for python functions
def kwfunc(a,b,c): pass
try:
kwfunc(10, 20, b=30)
Assert(False)
except TypeError:
pass
try:
kwfunc(10, None, b=30)
Assert(False)
except TypeError:
pass
try:
kwfunc(10, None, 40, b=30)
Assert(False)
except TypeError:
pass
if is_cli or is_silverlight:
import System
# Test Hashtable and Dictionary on desktop, and just Dictionary in Silverlight
# (Hashtable is not available)
htlist = [System.Collections.Generic.Dictionary[System.Object, System.Object]()]
if not is_silverlight:
htlist += [System.Collections.Hashtable()]
for ht in htlist:
def foo(**kwargs):
return kwargs['key']
ht['key'] = 'xyz'
AreEqual(foo(**ht), 'xyz')
def foo(a,b):
return a-b
AreEqual(foo(b=1, *(2,)), 1)
# kw-args passed to init through method instance
class foo:
def __init__(self, group=None, target=None):
AreEqual(group, None)
AreEqual(target,'baz')
a = foo(target='baz')
foo.__init__(a, target='baz')
# call a params method w/ no params
if is_cli or is_silverlight:
import clr
AreEqual('abc\ndef'.Split()[0], 'abc')
AreEqual('abc\ndef'.Split()[1], 'def')
x = 'a bc def'.Split()
AreEqual(x[0], 'a')
AreEqual(x[1], 'bc')
AreEqual(x[2], '')
AreEqual(x[3], '')
AreEqual(x[4], 'def')
# calling Double.ToString(...) should work - Double is
# an OpsExtensibleType and doesn't define __str__ on this
# overload
AreEqual('1.00', System.Double.ToString(1.0, 'f'))
######################################################################################
# Incorrect number of arguments
def f(a): pass
AssertErrorWithMessage(TypeError, "f() takes exactly 1 argument (0 given)", f)
AssertErrorWithMessage(TypeError, "f() takes exactly 1 argument (3 given)", f, 1, 2, 3)
AssertErrorWithMessage(TypeError, "f() got an unexpected keyword argument 'dummy'", f, dummy=2)
AssertErrorWithMessage(TypeError, "f() got an unexpected keyword argument 'dummy'", f, dummy=2)
#AssertError calls f(*args), which generates a different AST than f(1,2,3)
AssertErrorWithMessage(TypeError, "f() takes exactly 1 argument (0 given)", lambda:f())
AssertErrorWithMessage(TypeError, "f() takes exactly 1 argument (3 given)", lambda:f(1, 2, 3))
AssertErrorWithMessage(TypeError, "f() got an unexpected keyword argument 'dummy'", lambda:f(dummy=2))
AssertErrorWithMessage(TypeError, "f() got an unexpected keyword argument 'dummy'", lambda:f(1, dummy=2))
def f(a,b,c,d,e,f,g,h,i,j): pass
AssertErrorWithMessage(TypeError, "f() takes exactly 10 arguments (0 given)", f)
AssertErrorWithMessage(TypeError, "f() takes exactly 10 arguments (3 given)", f, 1, 2, 3)
AssertErrorWithMessage(TypeError, "f() got an unexpected keyword argument 'dummy'", f, dummy=2)
AssertErrorWithMessage(TypeError, "f() got an unexpected keyword argument 'dummy'", f, dummy=2)
AssertErrorWithMessage(TypeError, "f() takes exactly 10 arguments (0 given)", lambda:f())
AssertErrorWithMessage(TypeError, "f() takes exactly 10 arguments (3 given)", lambda:f(1, 2, 3))
AssertErrorWithMessage(TypeError, "f() got an unexpected keyword argument 'dummy'", lambda:f(dummy=2))
AssertErrorWithMessage(TypeError, "f() got an unexpected keyword argument 'dummy'", lambda:f(1, dummy=2))
def f(a, b=2): pass
AssertErrorWithMessage(TypeError, "f() takes at least 1 argument (0 given)", f)
AssertErrorWithMessage(TypeError, "f() takes at most 2 arguments (3 given)", f, 1, 2, 3)
if is_cpython: #CPython bug 9326
AssertErrorWithMessage(TypeError, "f() takes at least 1 argument (1 given)", f, b=2)
else:
AssertErrorWithMessage(TypeError, "f() takes at least 1 non-keyword argument (0 given)", f, b=2)
AssertErrorWithMessage(TypeError, "f() got an unexpected keyword argument 'dummy'", f, dummy=3)
AssertErrorWithMessage(TypeError, "f() got an unexpected keyword argument 'dummy'", f, b=2, dummy=3)
AssertErrorWithMessage(TypeError, "f() got an unexpected keyword argument 'dummy'", f, 1, dummy=3)
AssertErrorWithMessage(TypeError, "f() takes at least 1 argument (0 given)", lambda:f())
AssertErrorWithMessage(TypeError, "f() takes at most 2 arguments (3 given)", lambda:f(1, 2, 3))
if is_cpython: #CPython bug 9326
AssertErrorWithMessage(TypeError, "f() takes at least 1 argument (1 given)", lambda:f(b=2))
else:
AssertErrorWithMessage(TypeError, "f() takes at least 1 non-keyword argument (0 given)", lambda:f(b=2))
AssertErrorWithMessage(TypeError, "f() got an unexpected keyword argument 'dummy'", lambda:f(dummy=3))
AssertErrorWithMessage(TypeError, "f() got an unexpected keyword argument 'dummy'", lambda:f(b=2, dummy=3))
AssertErrorWithMessage(TypeError, "f() got an unexpected keyword argument 'dummy'", lambda:f(1, dummy=3))
def f(a, *argList): pass
AssertErrorWithMessage(TypeError, "f() takes at least 1 argument (0 given)", f)
AssertErrorWithMessage(TypeError, "f() got an unexpected keyword argument 'dummy'", f, dummy=2)
AssertErrorWithMessage(TypeError, "f() got an unexpected keyword argument 'dummy'", f, 1, dummy=2)
AssertErrorWithMessage(TypeError, "f() takes at least 1 argument (0 given)", lambda:f())
AssertErrorWithMessage(TypeError, "f() got an unexpected keyword argument 'dummy'", lambda:f(dummy=2))
AssertErrorWithMessage(TypeError, "f() got an unexpected keyword argument 'dummy'", lambda:f(1, dummy=2))
def f(a, **keywordDict): pass
AssertErrorWithMessage(TypeError, "f() takes exactly 1 argument (0 given)", f)
AssertErrorWithMessage(TypeError, "f() takes exactly 1 argument (3 given)", f, 1, 2, 3)
if is_cpython: #CPython bug 9326
AssertErrorWithMessage(TypeError, "f() takes exactly 1 argument (0 given)", f, dummy=2)
AssertErrorWithMessage(TypeError, "f() takes exactly 1 argument (0 given)", f, dummy=2, dummy2=3)
else:
AssertErrorWithMessage(TypeError, "f() takes exactly 1 non-keyword argument (0 given)", f, dummy=2)
AssertErrorWithMessage(TypeError, "f() takes exactly 1 non-keyword argument (0 given)", f, dummy=2, dummy2=3)
AssertErrorWithMessage(TypeError, "f() takes exactly 1 argument (0 given)", lambda:f())
AssertErrorWithMessage(TypeError, "f() takes exactly 1 argument (3 given)", lambda:f(1, 2, 3))
if is_cpython: #CPython bug 9326
AssertErrorWithMessage(TypeError, "f() takes exactly 1 argument (0 given)", lambda:f(dummy=2))
AssertErrorWithMessage(TypeError, "f() takes exactly 1 argument (0 given)", lambda:f(dummy=2, dummy2=3))
else:
AssertErrorWithMessage(TypeError, "f() takes exactly 1 non-keyword argument (0 given)", lambda:f(dummy=2))
AssertErrorWithMessage(TypeError, "f() takes exactly 1 non-keyword argument (0 given)", lambda:f(dummy=2, dummy2=3))
AssertErrorWithMessages(TypeError, "abs() takes exactly 1 argument (0 given)",
"abs() takes exactly one argument (0 given)", abs)
AssertErrorWithMessages(TypeError, "abs() takes exactly 1 argument (3 given)",
"abs() takes exactly one argument (3 given)", abs, 1, 2, 3)
AssertErrorWithMessages(TypeError, "abs() got an unexpected keyword argument 'dummy'",
"abs() takes no keyword arguments", abs, dummy=2)
AssertErrorWithMessages(TypeError, "abs() takes exactly 1 argument (2 given)",
"abs() takes no keyword arguments", abs, 1, dummy=2)
AssertErrorWithMessages(TypeError, "abs() takes exactly 1 argument (0 given)",
"abs() takes exactly one argument (0 given)", lambda:abs())
AssertErrorWithMessages(TypeError, "abs() takes exactly 1 argument (3 given)",
"abs() takes exactly one argument (3 given)", lambda:abs(1, 2, 3))
AssertErrorWithMessages(TypeError, "abs() got an unexpected keyword argument 'dummy'",
"abs() takes no keyword arguments", lambda:abs(dummy=2))
AssertErrorWithMessages(TypeError, "abs() takes exactly 1 argument (2 given)",
"abs() takes no keyword arguments", lambda:abs(1, dummy=2))
# list([m]) has one default argument (built-in type)
#AssertErrorWithMessage(TypeError, "list() takes at most 1 argument (2 given)", list, 1, 2)
#AssertErrorWithMessage(TypeError, "'dummy' is an invalid keyword argument for this function", apply, list, [], dict({"dummy":2}))
#======== BUG 697 ===========
#AssertErrorWithMessage(TypeError, "'dummy' is an invalid keyword argument for this function", apply, list, [1], dict({"dummy":2}))
# complex([x,y]) has two default argument (OpsReflectedType type)
#AssertErrorWithMessage(TypeError, "complex() takes at most 2 arguments (3 given)", complex, 1, 2, 3)
#AssertErrorWithMessage(TypeError, "'dummy' is an invalid keyword argument for this function", apply, complex, [], dict({"dummy":2}))
#AssertErrorWithMessage(TypeError, "'dummy' is an invalid keyword argument for this function", apply, complex, [1], dict({"dummy":2}))
# bool([x]) has one default argument (OpsReflectedType and valuetype type)
#AssertErrorWithMessage(TypeError, "bool() takes at most 1 argument (2 given)", bool, 1, 2)
#AssertErrorWithMessage(TypeError, "'dummy' is an invalid keyword argument for this function", apply, bool, [], dict({"dummy":2}))
#AssertErrorWithMessage(TypeError, "'dummy' is an invalid keyword argument for this function", apply, bool, [1], dict({"dummy":2}))
class UserClass(object): pass
AssertErrorWithMessage(TypeError, "object.__new__() takes no parameters", UserClass, 1)
AssertErrorWithMessage(TypeError, "object.__new__() takes no parameters", apply, UserClass, [], dict({"dummy":2}))
class OldStyleClass: pass
AssertErrorWithMessage(TypeError, "this constructor takes no arguments", OldStyleClass, 1)
AssertErrorWithMessage(TypeError, "this constructor takes no arguments", apply, OldStyleClass, [], dict({"dummy":2}))
###############################################################################################
# accepts / returns runtype type checking tests
if is_cli or is_silverlight:
import clr
@clr.accepts(object)
def foo(x):
return x
AreEqual(foo('abc'), 'abc')
AreEqual(foo(2), 2)
AreEqual(foo(2L), 2L)
AreEqual(foo(2.0), 2.0)
AreEqual(foo(True), True)
@clr.accepts(str)
def foo(x):
return x
AreEqual(foo('abc'), 'abc')
AssertError(AssertionError, foo, 2)
AssertError(AssertionError, foo, 2L)
AssertError(AssertionError, foo, 2.0)
AssertError(AssertionError, foo, True)
@clr.accepts(str, bool)
def foo(x, y):
return x, y
AreEqual(foo('abc', True), ('abc', True))
AssertError(AssertionError, foo, ('abc',2))
AssertError(AssertionError, foo, ('abc',2L))
AssertError(AssertionError, foo, ('abc',2.0))
class bar:
@clr.accepts(clr.Self(), str)
def foo(self, x):
return x
a = bar()
AreEqual(a.foo('xyz'), 'xyz')
AssertError(AssertionError, a.foo, 2)
AssertError(AssertionError, a.foo, 2L)
AssertError(AssertionError, a.foo, 2.0)
AssertError(AssertionError, a.foo, True)
@clr.returns(str)
def foo(x):
return x
AreEqual(foo('abc'), 'abc')
AssertError(AssertionError, foo, 2)
AssertError(AssertionError, foo, 2L)
AssertError(AssertionError, foo, 2.0)
AssertError(AssertionError, foo, True)
@clr.accepts(bool)
@clr.returns(str)
def foo(x):
if x: return str(x)
else: return 0
AreEqual(foo(True), 'True')
AssertError(AssertionError, foo, 2)
AssertError(AssertionError, foo, 2)
AssertError(AssertionError, foo, False)
@clr.returns(None)
def foo(): pass
AreEqual(foo(), None)
try:
buffer()
except TypeError, e:
# make sure we get the right type name when calling w/ wrong # of args
AreEqual(str(e)[:8], 'buffer()')
#try:
# list(1,2,3)
#except TypeError, e:
# make sure we get the right type name when calling w/ wrong # of args
# AreEqual(str(e)[:6], 'list()')
# oldinstance
class foo:
def bar(self): pass
def bar1(self, xyz): pass
class foo2: pass
class foo3(object): pass
AssertError(TypeError, foo.bar)
AssertError(TypeError, foo.bar1, None, None)
AssertError(TypeError, foo.bar1, None, 'abc')
AssertError(TypeError, foo.bar1, 'xyz', 'abc')
AssertError(TypeError, foo.bar, foo2())
AssertError(TypeError, foo.bar, foo3())
# usertype
class foo(object):
def bar(self): pass
def bar1(self, xyz): pass
AssertError(TypeError, foo.bar)
AssertError(TypeError, foo.bar1, None, None)
AssertError(TypeError, foo.bar1, None, 'abc')
AssertError(TypeError, foo.bar1, 'xyz', 'abc')
AssertError(TypeError, foo.bar, foo2())
AssertError(TypeError, foo.bar, foo3())
# access a method w/ caller context w/ an args parameter.
def foo(*args):
return hasattr(*args)
AreEqual(foo('', 'index'), True)
# dispatch to a ReflectOptimized method
if is_cli and not is_silverlight:
from iptest.console_util import IronPythonInstance
from System import Environment
from sys import executable
wkdir = testpath.public_testdir
if "-X:LightweightScopes" in Environment.GetCommandLineArgs():
ipi = IronPythonInstance(executable, wkdir, "-X:LightweightScopes", "-X:BasicConsole")
else:
ipi = IronPythonInstance(executable, wkdir, "-X:BasicConsole")
if (ipi.Start()):
result = ipi.ExecuteLine("from iptest.assert_util import *")
result = ipi.ExecuteLine("load_iron_python_test()")
result = ipi.ExecuteLine("from IronPythonTest import DefaultParams")
response = ipi.ExecuteLine("DefaultParams.FuncWithDefaults(1100, z=82)")
AreEqual(response, '1184')
ipi.End()
p = ((1, 2),)
AreEqual(zip(*(p * 10)), [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1), (2, 2, 2, 2, 2, 2, 2, 2, 2, 2)])
AreEqual(zip(*(p * 10)), [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1), (2, 2, 2, 2, 2, 2, 2, 2, 2, 2)])
class A(object): pass
class B(A): pass
#unbound super
for x in [super(B), super(B,None)]:
AreEqual(x.__thisclass__, B)
AreEqual(x.__self__, None)
AreEqual(x.__self_class__, None)
# super w/ both types
x = super(B,B)
AreEqual(x.__thisclass__,B)
AreEqual(x.__self_class__, B)
AreEqual(x.__self__, B)
# super w/ type and instance
b = B()
x = super(B, b)
AreEqual(x.__thisclass__,B)
AreEqual(x.__self_class__, B)
AreEqual(x.__self__, b)
# super w/ mixed types
x = super(A,B)
AreEqual(x.__thisclass__,A)
AreEqual(x.__self_class__, B)
AreEqual(x.__self__, B)
# invalid super cases
try:
x = super(B, 'abc')
AssertUnreachable()
except TypeError:
pass
try:
super(B,A)
AssertUnreachable()
except TypeError:
pass
class A(object):
def __init__(self, name):
self.__name__ = name
def meth(self):
return self.__name__
classmeth = classmethod(meth)
class B(A): pass
b = B('derived')
AreEqual(super(B,b).__thisclass__.__name__, 'B')
AreEqual(super(B,b).__self__.__name__, 'derived')
AreEqual(super(B,b).__self_class__.__name__, 'B')
AreEqual(super(B,b).classmeth(), 'B')
# descriptor supper
class A(object):
def meth(self): return 'A'
class B(A):
def meth(self):
return 'B' + self.__super.meth()
B._B__super = super(B)
b = B()
AreEqual(b.meth(), 'BA')
#################################
# class method calls - class method should get
# correct meta class.
class D(object):
@classmethod
def classmeth(cls): pass
AreEqual(D.classmeth.im_class, type)
class MetaType(type): pass
class D(object):
__metaclass__ = MetaType
@classmethod
def classmeth(cls): pass
AreEqual(D.classmeth.im_class, MetaType)
#####################################################################################
from iptest.assert_util import *
if is_cli or is_silverlight:
from _collections import *
else:
from collections import *
global init
def Assert(val):
if val == False:
raise TypeError, "assertion failed"
def runTest(testCase):
global typeMatch
global init
class foo(testCase.subtype):
def __new__(cls, param):
ret = testCase.subtype.__new__(cls, param)
Assert(ret == testCase.newEq)
Assert((ret != testCase.newEq) != True)
return ret
def __init__(self, param):
testCase.subtype.__init__(self, param)
Assert(self == testCase.initEq)
Assert((self != testCase.initEq) != True)
a = foo(testCase.param)
Assert((type(a) == foo) == testCase.match)
class TestCase(object):
__slots__ = ['subtype', 'newEq', 'initEq', 'match', 'param']
def __init__(self, subtype, newEq, initEq, match, param):
self.match = match
self.subtype = subtype
self.newEq = newEq
self.initEq = initEq
self.param = param
cases = [TestCase(int, 2, 2, True, 2),
TestCase(list, [], [2,3,4], True, (2,3,4)),
TestCase(deque, deque(), deque((2,3,4)), True, (2,3,4)),
TestCase(set, set(), set((2,3,4)), True, (2,3,4)),
TestCase(frozenset, frozenset((2,3,4)), frozenset((2,3,4)), True, (2,3,4)),
TestCase(tuple, (2,3,4), (2,3,4), True, (2,3,4)),
TestCase(str, 'abc', 'abc', True, 'abc'),
TestCase(float, 2.3, 2.3, True, 2.3),
TestCase(type, type(object), type(object), False, object),
TestCase(long, 10000000000L, 10000000000L, True, 10000000000L),
#TestCase(complex, complex(2.0, 0), complex(2.0, 0), True, 2.0), # complex is currently a struct w/ no extensibel, we fail here
# TestCase(file, 'abc', True), # ???
]
for case in cases:
runTest(case)
# verify we can call the base init directly
if is_cli and not is_silverlight:
import clr
clr.AddReferenceByPartialName('System.Windows.Forms')
from System.Windows.Forms import *
class MyForm(Form):
def __init__(self, title):
Form.__init__(self)
self.Text = title
a = MyForm('abc')
AreEqual(a.Text, 'abc')
#TestCase(bool, True, True), # not an acceptable base type
def test_func_flags():
def foo0(): pass
def foo1(*args): pass
def foo2(**args): pass
def foo3(*args, **kwargs): pass
def foo4(a): pass
def foo5(a, *args): pass
def foo6(a, **args): pass
def foo7(a, *args, **kwargs): pass
def foo8(a,b,c,d,e,f): pass
def foo9(a,b): pass
AreEqual(foo0.func_code.co_flags & 12, 0)
AreEqual(foo1.func_code.co_flags & 12, 4)
AreEqual(foo2.func_code.co_flags & 12, 8)
AreEqual(foo3.func_code.co_flags & 12, 12)
AreEqual(foo4.func_code.co_flags & 12, 0)
AreEqual(foo5.func_code.co_flags & 12, 4)
AreEqual(foo6.func_code.co_flags & 12, 8)
AreEqual(foo7.func_code.co_flags & 12, 12)
AreEqual(foo8.func_code.co_flags & 12, 0)
AreEqual(foo9.func_code.co_flags & 12, 0)
AreEqual(foo0.func_code.co_argcount, 0)
AreEqual(foo1.func_code.co_argcount, 0)
AreEqual(foo2.func_code.co_argcount, 0)
AreEqual(foo3.func_code.co_argcount, 0)
AreEqual(foo4.func_code.co_argcount, 1)
AreEqual(foo5.func_code.co_argcount, 1)
AreEqual(foo6.func_code.co_argcount, 1)
AreEqual(foo7.func_code.co_argcount, 1)
AreEqual(foo8.func_code.co_argcount, 6)
AreEqual(foo9.func_code.co_argcount, 2)
def test_big_calls():
# check various function call sizes and boundaries
for size in [3,4,5, 7,8,9, 15,16,17, 23, 24, 25, 31,32,33, 47,48,49, 63,64,65, 127, 128, 129, 254, 255, 256, 257, 258, 511,512,513, 1023,1024,1025, 2047, 2048, 2049]:
# w/o defaults
exec 'def f(' + ','.join(['a' + str(i) for i in range(size)]) + '): return ' + ','.join(['a' + str(i) for i in range(size)])
# w/ defaults
exec 'def g(' + ','.join(['a' + str(i) + '=' + str(i) for i in range(size)]) + '): return ' + ','.join(['a' + str(i) for i in range(size)])
if size <= 255 or is_cli:
# CPython allows function definitions > 255, but not calls w/ > 255 params.
exec 'a = f(' + ', '.join([str(x) for x in xrange(size)]) + ')'
AreEqual(a, tuple(xrange(size)))
exec 'a = g()'
AreEqual(a, tuple(xrange(size)))
exec 'a = g(' + ', '.join([str(x) for x in xrange(size)]) + ')'
AreEqual(a, tuple(xrange(size)))
exec 'a = f(*(' + ', '.join([str(x) for x in xrange(size)]) + '))'
AreEqual(a, tuple(xrange(size)))
def test_compile():
x = compile("print 2/3", "<string>", "exec", 8192)
Assert((x.co_flags & 8192) == 8192)
x = compile("2/3", "<string>", "eval", 8192)
AreEqual(eval(x), 2.0 / 3.0)
names = [ "", ".", "1", "\n", " ", "@", "%^",
"a", "A", "Abc", "aBC", "filename.py",
"longlonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglonglong",
"""
stuff
more stuff
last stuff
"""
]
for name in names:
AreEqual(compile("print 2/3", name, "exec", 8192).co_filename,
name)
def test_filename():
c = compile("x = 2", "test", "exec")
AreEqual(c.co_filename, 'test')
def test_name():
def f(): pass
f.__name__ = 'g'
AreEqual(f.__name__, 'g')
Assert(repr(f).startswith('<function g'))
f.func_name = 'x'
AreEqual(f.__name__, 'x')
Assert(repr(f).startswith('<function x'))
def test_argcount():
def foo0(): pass
def foo1(*args): pass
def foo2(**args): pass
def foo3(*args, **kwargs): pass
def foo4(a): pass
def foo5(a, *args): pass
def foo6(a, **args): pass
def foo7(a, *args, **kwargs): pass
def foo8(a,b,c,d,e,f): pass
def foo9(a,b): pass
AreEqual(foo0.func_code.co_argcount, 0)
AreEqual(foo1.func_code.co_argcount, 0)
AreEqual(foo2.func_code.co_argcount, 0)
AreEqual(foo3.func_code.co_argcount, 0)
AreEqual(foo4.func_code.co_argcount, 1)
AreEqual(foo5.func_code.co_argcount, 1)
AreEqual(foo6.func_code.co_argcount, 1)
AreEqual(foo7.func_code.co_argcount, 1)
AreEqual(foo8.func_code.co_argcount, 6)
AreEqual(foo9.func_code.co_argcount, 2)
def test_defaults():
defaults = [None, object, int, [], 3.14, [3.14], (None,), "a string"]
for default in defaults:
def helperFunc(): pass
AreEqual(helperFunc.func_defaults, None)
AreEqual(helperFunc.func_defaults, None)
def helperFunc1(a): pass
AreEqual(helperFunc1.func_defaults, None)
AreEqual(helperFunc1.func_defaults, None)
def helperFunc2(a=default): pass
AreEqual(helperFunc2.func_defaults, (default,))
helperFunc2(a=7)
AreEqual(helperFunc2.func_defaults, (default,))
def helperFunc3(a, b=default, c=[42]): c.append(b)
AreEqual(helperFunc3.func_defaults, (default, [42]))
helperFunc3("stuff")
AreEqual(helperFunc3.func_defaults, (default, [42, default]))
def test_splat_defaults():
def g(a, b, x=None):
return a, b, x
def f(x, *args):
return g(x, *args)
AreEqual(f(1, *(2,)), (1,2,None))
def test_argument_eval_order():
"""Check order of evaluation of function arguments"""
x = [1]
def noop(a, b, c):
pass
noop(x.append(2), x.append(3), x.append(4))
AreEqual(x, [1,2,3,4])
def test_method_attr_access():
class foo(object):
def f(self): pass
abc = 3
method = type(foo.f)
AreEqual(method(foo, 'abc').abc, 3)
@skip("interpreted") # we don't have FuncEnv's in interpret modes so this always returns None
def test_function_closure_negative():
def f(): pass
for assignment_val in [None, 1, "a string"]:
try:
f.func_closure = assignment_val
AssertUnreachable("func_closure is a read-only attribute of functions")
except TypeError, e:
pass
def test_paramless_function_call_error():
def f(): pass
try:
f(*(1, ))
AssertUnreachable()
except TypeError: pass
try:
f(**{'abc':'def'})
AssertUnreachable()
except TypeError: pass
def test_function_closure():
def f(): pass
AreEqual(f.func_closure, None)
def f():
def g(): pass
return g
AreEqual(f().func_closure, None)
def f():
x = 4
def g(): return x
return g
AreEqual(sorted([x.cell_contents for x in f().func_closure]), [4])
def f():
x = 4
def g():
y = 5
def h(): return x,y
return h
return g()
AreEqual(sorted([x.cell_contents for x in f().func_closure]), [4, 5])
# don't use z
def f():
x = 4
def g():
y = 5
z = 7
def h(): return x,y
return h
return g()
AreEqual(sorted([x.cell_contents for x in f().func_closure]), [4, 5])
def f():
x = 4
def g():
y = 5
z = 7
def h(): return x,y,z
return h
return g()
AreEqual(sorted([x.cell_contents for x in f().func_closure]), [4, 5, 7])
def f():
x = 4
a = 9
def g():
y = 5
z = 7
def h(): return x,y
return h
return g()
AreEqual(sorted([x.cell_contents for x in f().func_closure]), [4, 5])
# closure cells are not recreated
callRes = f()
a = sorted([id(x) for x in callRes.func_closure])
b = sorted([id(x) for x in callRes.func_closure])
AreEqual(a, b)
def f():
x = 4
a = 9
def g():
y = 5
z = 7
def h(): return x,y,a,z
return h
return g()
AreEqual(sorted([x.cell_contents for x in f().func_closure]), [4, 5, 7, 9])
AssertError(TypeError, hash, f().func_closure[0])
def f():
x = 5
def g():
return x
return g
def h():
x = 5
def g():
return x
return g
def j():
x = 6
def g():
return x
return g
AreEqual(f().func_closure[0], h().func_closure[0])
Assert(f().func_closure[0] != j().func_closure[0])
# <cell at 45: int object at 44>
Assert(repr(f().func_closure[0]).startswith('<cell at '))
Assert(repr(f().func_closure[0]).find(': int object at ') != -1)
def test_func_code():
def foo(): pass
def assign(): foo.func_code = None
AssertError(TypeError, assign)
def def_func_doc():
foo.func_doc = 'abc'
AreEqual(foo.__doc__, 'abc')
foo.__doc__ = 'def'
AreEqual(foo.func_doc, 'def')
foo.func_doc = None
AreEqual(foo.__doc__, None)
AreEqual(foo.func_doc, None)
def test_func_defaults():
def f(a, b): return (a, b)
f.func_defaults = (1,2)
AreEqual(f(), (1,2))
f.func_defaults = (1,2,3,4)
AreEqual(f(), (3,4))
f.func_defaults = None
AssertError(TypeError, f)
f.func_defaults = (1,2)
AreEqual(f.func_defaults, (1,2))
del f.func_defaults
AreEqual(f.func_defaults, None)
del f.func_defaults
AreEqual(f.func_defaults, None)
def func_with_many_args(one, two, three, four, five, six, seven, eight, nine, ten, eleven=None, twelve=None, thirteen=None, fourteen=None, fifteen=None, sixteen=None, seventeen=None, eighteen=None, nineteen=None):
print 'hello'
func_with_many_args(None, None, None, None, None, None, None, None, None, None)
def test_func_dict():
def f(): pass
f.abc = 123
AreEqual(f.func_dict, {'abc': 123})
f.func_dict = {'def': 'def'}
AreEqual(hasattr(f, 'def'), True)
AreEqual(getattr(f, 'def'), 'def')
f.func_dict = {}
AreEqual(hasattr(f, 'abc'), False)
AreEqual(hasattr(f, 'def'), False)
AssertError(TypeError, lambda : delattr(f, 'func_dict'))
AssertError(TypeError, lambda : delattr(f, '__dict__'))
def test_method():
class C:
def method(self): pass
method = type(C.method)(id, None, 'abc')
AreEqual(method.im_class, 'abc')
class myobj:
def __init__(self, val):
self.val = val
self.called = []
def __hash__(self):
self.called.append('hash')
return hash(self.val)
def __eq__(self, other):
self.called.append('eq')
return self.val == other.val
def __call__(*args): pass
func1, func2 = myobj(2), myobj(2)
inst1, inst2 = myobj(3), myobj(3)
method = type(C().method)
m1 = method(func1, inst1)
m2 = method(func2, inst2)
AreEqual(m1, m2)
Assert('eq' in func1.called)
Assert('eq' in inst1.called)
hash(m1)
Assert('hash' in func1.called)
Assert('hash' in inst1.called)
def test_function_type():
def f1(): pass
def f2(a): pass
def f3(a, b, c): pass
def f4(*a, **b): pass
def decorator(f): return f
@decorator
def f5(a): pass
for x in [ f2, f3, f4, f5]:
AreEqual(type(f1), type(x))
def test_name_mangled_params():
def f1(__a): pass
def f2(__a): return __a
def f3(a, __a): return __a
def f4(_a, __a): return _a + __a
f1("12")
AreEqual(f2("hello"), "hello")
AreEqual(f3("a","b"), "b")
AreEqual(f4("a","b"), "ab")
def test_splat_none():
def f(*args): pass
def g(**kwargs): pass
def h(*args, **kwargs): pass
#CodePlex 20250
AssertErrorWithMessage(TypeError, "f() argument after * must be a sequence, not NoneType",
lambda : f(*None))
AssertErrorWithMessage(TypeError, "g() argument after ** must be a mapping, not NoneType",
lambda : g(**None))
AssertErrorWithMessage(TypeError, "h() argument after ** must be a mapping, not NoneType",
lambda : h(*None, **None))
def test_exec_funccode():
# can't exec a func code w/ parameters
def f(a, b, c): print a, b, c
AssertError(TypeError, lambda : eval(f.func_code))
# can exec *args/**args
def f(*args): pass
exec f.func_code in {}, {}
def f(*args, **kwargs): pass
exec f.func_code in {}, {}
# can't exec function which closes over vars
def f():
x = 2
def g():
print x
return g.func_code
AssertError(TypeError, lambda : eval(f()))
def test_exec_funccode_filename():
import sys
mod = type(sys)('fake_mod_name')
mod.__file__ = 'some file'
exec "def x(): pass" in mod.__dict__
AreEqual(mod.x.func_code.co_filename, '<string>')
# defined globally because unqualified exec isn't allowed in
# a nested function.
def unqualified_exec():
print x
exec ""
def test_func_code_variables():
def CompareCodeVars(code, varnames, names, freevars, cellvars):
AreEqual(code.co_varnames, varnames)
AreEqual(code.co_names, names)
AreEqual(code.co_freevars, freevars)
AreEqual(code.co_cellvars, cellvars)
# simple local
def f():
a = 2
CompareCodeVars(f.func_code, ('a', ), (), (), ())
# closed over var
def f():
a = 2
def g():
print a
return g
CompareCodeVars(f.func_code, ('g', ), (), (), ('a', ))
CompareCodeVars(f().func_code, (), (), ('a', ), ())
# tuple parameters
def f((a, b)): pass
CompareCodeVars(f.func_code, ('.0', 'a', 'b'), (), (), ())
def f((a, b), (c, d)): pass
CompareCodeVars(f.func_code, ('.0', '.1', 'a', 'b', 'c', 'd'), (), (), ())
# explicitly marked global
def f():
global a
a = 2
CompareCodeVars(f.func_code, (), ('a', ), (), ())
# implicit global
def f():
print some_global
CompareCodeVars(f.func_code, (), ('some_global', ), (), ())
# global that's been "closed over"
def f():
global a
a = 2
def g():
print a
return g
CompareCodeVars(f.func_code, ('g', ), ('a', ), (), ())
CompareCodeVars(f().func_code, (), ('a', ), (), ())
# multi-depth closure
def f():
a = 2
def g():
x = a
def h():
y = a
return h
return g
CompareCodeVars(f.func_code, ('g', ), (), (), ('a', ))
CompareCodeVars(f().func_code, ('x', 'h'), (), ('a', ), ())
CompareCodeVars(f()().func_code, ('y', ), (), ('a', ), ())
# multi-depth closure 2
def f():
a = 2
def g():
def h():
y = a
return h
return g
CompareCodeVars(f.func_code, ('g', ), (), (), ('a', ))
CompareCodeVars(f().func_code, ('h', ), (), ('a', ), ())
CompareCodeVars(f()().func_code, ('y', ), (), ('a', ), ())
# closed over parameter
def f(a):
def g():
return a
return g
CompareCodeVars(f.func_code, ('a', 'g'), (), (), ('a', ))
CompareCodeVars(f(42).func_code, (), (), ('a', ), ())
AreEqual(unqualified_exec.func_code.co_names, ('x', ))
def test_delattr():
def f(): pass
f.abc = 42
del f.abc
def g(): f.abc
AssertError(AttributeError, g)
run_test(__name__)
```
#### File: exascript2/Tests/test_ipye.py
```python
from iptest.assert_util import *
skiptest("win32")
import sys
if not is_silverlight:
remove_ironpython_dlls(testpath.public_testdir)
load_iron_python_dll()
# setup Scenario tests in module from EngineTest.cs
# this enables us to see the individual tests that pass / fail
load_iron_python_test()
import IronPython
import IronPythonTest
et = IronPythonTest.EngineTest()
multipleexecskips = [ ]
for s in dir(et):
if s.startswith("Scenario"):
if s in multipleexecskips:
exec '@skip("multiple_execute") \ndef test_Engine_%s(): getattr(et, "%s")()' % (s, s)
else :
exec 'def test_Engine_%s(): getattr(et, "%s")()' % (s, s)
#Rowan Work Item 312902
@disabled("The ProfileDrivenCompilation feature is removed from DLR")
def test_deferred_compilation():
save1 = IronPythonTest.TestHelpers.GetContext().Options.InterpretedMode
save2 = IronPythonTest.TestHelpers.GetContext().Options.ProfileDrivenCompilation
modules = sys.modules.copy()
IronPythonTest.TestHelpers.GetContext().Options.ProfileDrivenCompilation = True # this will enable interpreted mode
Assert(IronPythonTest.TestHelpers.GetContext().Options.InterpretedMode)
try:
# Just import some modules to make sure we can switch to compilation without blowing up
import test_namebinding
import test_function
import test_tcf
finally:
IronPythonTest.TestHelpers.GetContext().Options.InterpretedMode = save1
IronPythonTest.TestHelpers.GetContext().Options.ProfileDrivenCompilation = save2
sys.modules = modules
def CreateOptions():
import sys
import clr
o = IronPython.PythonEngineOptions()
if sys.argv.count('-X:ExceptionDetail') > 0: o.ExceptionDetail = True
return o
def a():
raise System.Exception()
def b():
try:
a()
except System.Exception, e:
raise System.Exception("second", e)
def c():
try:
b()
except System.Exception, e:
x = System.Exception("first", e)
return x
#Rowan Work Item 312902
@skip("silverlight", "multiple_execute")
def test_formatexception():
try:
import Microsoft.Scripting
from IronPython.Hosting import Python
pe = Python.CreateEngine()
service = pe.GetService[Microsoft.Scripting.Hosting.ExceptionOperations]()
AssertError(TypeError, service.FormatException, None)
exc_string = service.FormatException(System.Exception("first",
System.Exception("second",
System.Exception())))
AreEqual(exc_string, 'Traceback (most recent call last):\r\nException: first')
exc_string = service.FormatException(c())
AreEqual(exc_string.count(" File "), 4)
AreEqual(exc_string.count(" line "), 4)
finally:
pass
#Rowan Work Item 31290
@skip("silverlight")
def test_formatexception_showclrexceptions():
import Microsoft.Scripting
from IronPython.Hosting import Python
pe = Python.CreateEngine({'ShowClrExceptions': True})
exc_string = pe.GetService[Microsoft.Scripting.Hosting.ExceptionOperations]().FormatException(System.Exception("first",
System.Exception("second",
System.Exception())))
AreEqual(exc_string, "Traceback (most recent call last):\r\nException: first\r\nCLR Exception: \r\n Exception\r\n: \r\nfirst\r\n Exception\r\n: \r\nsecond\r\n Exception\r\n: \r\nException of type 'System.Exception' was thrown.\r\n")
exc_string = pe.GetService[Microsoft.Scripting.Hosting.ExceptionOperations]().FormatException(c())
AreEqual(exc_string.count(" File "), 4)
AreEqual(exc_string.count(" line "), 4)
Assert(exc_string.endswith("CLR Exception: \r\n Exception\r\n: \r\nfirst\r\n Exception\r\n: \r\nsecond\r\n Exception\r\n: \r\nException of type 'System.Exception' was thrown.\r\n"))
@skip("silverlight", "multiple_execute") #CodePlex 20636 - multi-execute
def test_formatexception_exceptiondetail():
import Microsoft.Scripting
from IronPython.Hosting import Python
pe = Python.CreateEngine({'ExceptionDetail': True})
try:
x = System.Collections.Generic.Dictionary[object, object]()
x[None] = 42
except System.Exception, e:
pass
import re
exc_string = pe.GetService[Microsoft.Scripting.Hosting.ExceptionOperations]().FormatException(System.Exception("first", e))
Assert(exc_string.startswith("first"))
Assert(re.match("first\r\n( at .*ThrowArgumentNullException.*\n)? at .*Insert.*\n( at .*\n)*",exc_string) is not None)
exc_string = pe.GetService[Microsoft.Scripting.Hosting.ExceptionOperations]().FormatException(c())
Assert(exc_string.endswith("Exception: first"))
@skip("silverlight")
def test_engine_access_from_within():
import clr
from Microsoft.Scripting.Hosting import ScriptEngine
pc = clr.GetCurrentRuntime().GetLanguageByName('python')
engine = pc.GetModuleState(clr.GetClrType(ScriptEngine))
Assert(engine is not None)
def test_import_clr():
from IronPython.Hosting import Python
eng = Python.CreateEngine()
mod = Python.ImportModule(eng, 'clr')
Assert('ToString' not in eng.Operations.GetMemberNames(42))
@skip("silverlight")
def test_cp6703():
import clr
clr.AddReference("IronPython")
import IronPython
pe = IronPython.Hosting.Python.CreateEngine()
stuff = '''
import System
a = 2
globals()["b"] = None
globals().Add("c", "blah")
joe = System.Collections.Generic.KeyValuePair[object,object]("d", int(3))
globals().Add(joe)
count = 0
if globals().Contains(System.Collections.Generic.KeyValuePair[object,object]("b", None)): count += 1
if globals().Contains(System.Collections.Generic.KeyValuePair[object,object]("c", "blah")): count += 1
if globals().Contains(System.Collections.Generic.KeyValuePair[object,object]("d", int(3))): count += 1
if globals().Contains(System.Collections.Generic.KeyValuePair[object,object]("d", 3)): count += 1
if globals().Contains(System.Collections.Generic.KeyValuePair[object,object]("d", "3")): count += 1
if globals().Contains(System.Collections.Generic.KeyValuePair[object,object]("a", 2)): count += 1
'''
s = pe.CreateScope()
pe.Execute(stuff, s)
AreEqual(s.count, 6)
def test_cp20594():
import IronPython
AreEqual(IronPython.Runtime.PythonContext.GetIronPythonAssembly("IronPython").split(",", 1)[1],
IronPython.Runtime.PythonContext.GetIronPythonAssembly("IronPython.Modules").split(",", 1)[1])
def test_cp27547():
import clr
clr.AddReference('IronPython')
clr.AddReference('Microsoft.Scripting')
from IronPython.Hosting import Python
from Microsoft.Scripting import SourceCodeKind, ScriptCodeParseResult
engine = Python.CreateEngine()
scope = engine.CreateScope()
text = 'lambda'
source = engine.CreateScriptSourceFromString(text, 'stdin',
SourceCodeKind.InteractiveCode)
result = source.GetCodeProperties()
AreEqual(result, ScriptCodeParseResult.IncompleteToken)
def test_hidden_base():
from IronPythonTest import DerivedFromHiddenBase
a = DerivedFromHiddenBase()
AreEqual(a.Accessible(), 42)
AssertError(AttributeError, lambda: a.Inaccessible)
def test_cp27150():
from IronPythonTest import GenericProperty
from System import DateTime
wrapper = GenericProperty[DateTime]()
def f():
wrapper.Value = None
AssertError(TypeError, f)
#--MAIN------------------------------------------------------------------------
run_test(__name__)
#Make sure this runs last
#test_dispose()
``` |
{
"source": "0xFireball/rclonesync",
"score": 2
} |
#### File: rclonesync/Test/testrcsync.py
```python
version = "V1.2 181001"
# Revision history
# 181001 Add support for path to rclone
# 180729 Rework for rclonesync Path1/Path2 changes. Added optional path to rclonesync.py.
# 180701 New
# Todos
# none
import argparse
import sys
import re
import os.path
import os
import subprocess
import shutil
import filecmp
RCSEXEC = "../rclonesync.py"
LOCALTESTBASE = "./"
TESTDIR = "testdir/"
WORKDIR = "./testwd" + "/"
CONSOLELOGFILE = WORKDIR + "consolelog.txt"
def rcstest():
path1 = path1base + TESTDIR + "path1/"
path2 = path2base + TESTDIR + "path2/"
print ("***** Test case <{}> using Path1 <{}>, Path2 <{}>, <{}>, and <{}>"
.format(testcase, path1, path2, rcsexec, rclone))
TESTCASEROOT = "./tests/" + testcase + "/"
INITIALDIR = TESTCASEROOT + "initial/"
MODFILESDIR = TESTCASEROOT + "modfiles/"
GOLDENDIR = TESTCASEROOT + "golden/"
CHANGECMDS = TESTCASEROOT + "/ChangeCmds.txt" # File of commands for changes from initial setup state for a test
SYNCCMD = TESTCASEROOT + "/SyncCmds.txt" # File of rclonesync (and other) commands
print ("CLEAN UP any remnant test content and SET UP the INITIAL STATE on both Path1 and Path2")
if os.path.exists(WORKDIR):
shutil.rmtree(WORKDIR)
os.mkdir(WORKDIR)
testdirpath1 = TESTDIR + "path1/"
if testdirpath1 in subprocess.check_output([rclone, "lsf", path1base, "-R"]).decode("utf8"):
subprocess.call([rclone, "purge", path1])
# git tends to change file mod dates. For test stability, jam initial dates to a fix past date.
# test cases that changes files (test_changes, for example) will touch specific files to fixed new dates.
subprocess.call("find " + INITIALDIR + r' -type f -exec touch --date="2000-01-01" {} +', shell=True)
subprocess.call([rclone, "copy", INITIALDIR, path1])
subprocess.call([rclone, "sync", path1, path2])
sys.stdout.flush() # Force alignment of stdout and stderr in redirected output file.
print ("\nDO <rclonesync --first-sync> to set LSL files baseline")
subprocess.call([rcsexec, path1, path2, "--first-sync", "--workdir", WORKDIR,
"--no-datetime-log", "--rclone", rclone ])
sys.stdout.flush()
print ("RUN CHANGECMDS to apply changes from test case initial state")
with open(CHANGECMDS) as ifile:
for line in ifile:
line = line[0:line.find('#')].lstrip().rstrip() # Throw away comment and any leading & trailing whitespace.
if len(line) > 0:
if ":MSG:" in line:
print (" {}".format(line))
else:
if ":RCSEXEC:" in line:
line += " --verbose --workdir :WORKDIR: --no-datetime-log --rclone :RCLONE:"
xx = line \
.replace(":TESTCASEROOT:", TESTCASEROOT) \
.replace(":PATH1:", path1) \
.replace(":PATH2:", path2) \
.replace(":RCSEXEC:", rcsexec) \
.replace(":RCLONE:", rclone) \
.replace(":WORKDIR:", WORKDIR)
print (" {}".format(xx))
subprocess.call(xx, shell=True) # using shell=True so that touch commands can have quoted date strings
sys.stdout.flush()
print ("\nRUN SYNCCMDS (console output captured to consolelog.txt)")
with open(CONSOLELOGFILE, "w") as logfile:
with open(SYNCCMD) as ifile:
for line in ifile:
line = line[0:line.find('#')].lstrip().rstrip()
if len(line) > 0:
if ":MSG:" in line:
print (" {}".format(line))
subprocess.call(["echo", line], stdout=logfile, stderr=logfile)
else:
if ":RCSEXEC:" in line:
line += " --verbose --workdir :WORKDIR: --no-datetime-log --rclone :RCLONE:"
xx = line \
.replace(":TESTCASEROOT:", TESTCASEROOT) \
.replace(":PATH1:", path1) \
.replace(":PATH2:", path2) \
.replace(":RCSEXEC:", rcsexec) \
.replace(":RCLONE:", rclone) \
.replace(":WORKDIR:", WORKDIR)
print (" {}".format(xx))
subprocess.call("echo " + xx, stdout=logfile, stderr=logfile, shell=True)
subprocess.call(xx, stdout=logfile, stderr=logfile, shell=True)
sys.stdout.flush()
errcnt = 0
if args.golden:
print ("\nCopying run results to the testcase golden directory")
if os.path.exists(GOLDENDIR):
shutil.rmtree(GOLDENDIR)
shutil.copytree(WORKDIR, GOLDENDIR)
else:
print ("\nCOMPARE RESULTS files to the testcase golden directory")
goldenfiles = os.listdir(GOLDENDIR)
resultsfiles = os.listdir(WORKDIR)
sys.stdout.flush()
print ("----------------------------------------------------------")
if len(goldenfiles) != len(resultsfiles):
print ("MISCOMPARE - Number of Golden and Results files do notmatch:")
print (" Golden count {}: {}".format(len(goldenfiles), goldenfiles))
print (" Results count {}: {}".format(len(resultsfiles), resultsfiles))
else:
print ("Number of results files ({}) match".format(len(goldenfiles)))
for xx in goldenfiles:
if xx not in resultsfiles:
errcnt += 1
print ("File found in Golden but not in Results: <{}>".format(xx))
for xx in resultsfiles:
if xx not in goldenfiles:
errcnt += 1
print ("File found in Results but not in Golden: <{}>".format(xx))
for xx in goldenfiles:
if xx in resultsfiles:
print ("\n----------------------------------------------------------")
if filecmp.cmp (GOLDENDIR + xx, WORKDIR + xx):
print ("Match: <{}>".format(xx))
else:
if xx in resultsfiles:
errcnt += 1
print ("MISCOMPARE < Golden to > Results for: <{}>".format(xx))
sys.stdout.flush()
subprocess.call(["diff", GOLDENDIR + xx, WORKDIR + xx ])
sys.stdout.flush()
print ("\n----------------------------------------------------------")
if args.no_cleanup:
print ("SKIPPING CLEANUP of testdirs and workdir")
else:
print ("CLEANING UP testdirs and workdir")
subprocess.call([rclone, "purge", path1])
subprocess.call([rclone, "purge", path2])
shutil.rmtree(WORKDIR)
if errcnt > 0:
print ("TEST <{}> FAILED WITH {} ERRORS.\n\n".format(testcase, errcnt))
else:
print ("TEST <{}> PASSED\n\n".format(testcase))
sys.stdout.flush()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="rclonesync test engine")
parser.add_argument('Path1',
help="'local' or name of cloud service with ':'")
parser.add_argument('Path2',
help="'local' or name of cloud service with ':'")
parser.add_argument('TestCase',
help="Test case subdir name (beneath ./tests). 'ALL' to run all tests in the tests subdir")
parser.add_argument('-g', '--golden',
help="Capture output and place in testcase golden subdir",
action='store_true')
parser.add_argument('--no-cleanup',
help="Disable cleanup of Path1 and Path2 testdirs. Useful for debug.",
action='store_true')
parser.add_argument('--rclonesync',
help="Full or relative path to rclonesync Python file (default <{}>).".format(RCSEXEC),
default=RCSEXEC)
parser.add_argument('-r','--rclone',
help="Full path to rclone executable (default is rclone in path)",
default="rclone")
parser.add_argument('-V', '--version',
help="Return version number and exit.",
action='version',
version='%(prog)s ' + version)
args = parser.parse_args()
testcase = args.TestCase
rcsexec = args.rclonesync
rclone = args.rclone
try:
clouds = subprocess.check_output([rclone, 'listremotes'])
except subprocess.CalledProcessError as e:
print ("ERROR Can't get list of known remotes. Have you run rclone config?"); exit()
except:
print ("ERROR rclone not installed, or invalid --rclone path?\nError message: {}\n".format(sys.exc_info()[1])); exit()
clouds = str(clouds.decode("utf8")).split()
remoteFormat = re.compile('([\w-]+):(.*)')
if args.Path1 == "local":
path1base = LOCALTESTBASE
else:
out = remoteFormat.match(args.Path1)
if out:
path1base = out.group(1) + ':'
if path1base not in clouds:
print ("ERROR Path1 parameter <{}> not in list of configured remotes: {}".format(path1base, clouds)); exit()
else:
print ("ERROR Path1 parameter <{}> cannot be parsed. ':' missing? Configured remotes: {}".format(args.Path1, clouds)); exit()
if args.Path2 == "local":
path2base = LOCALTESTBASE
else:
out = remoteFormat.match(args.Path2)
if out:
path2base = out.group(1) + ':'
if path2base not in clouds:
print ("ERROR Path2 parameter <{}> not in list of configured remotes: {}".format(path2base, clouds)); exit()
else:
print ("ERROR Path2 parameter <{}> cannot be parsed. ':' missing? Configured remotes: {}".format(args.Path2, clouds)); exit()
if testcase != "ALL":
if os.path.exists("./tests/" + testcase):
rcstest()
else:
print ("ERROR TestCase directory <{}> not found".format(testcase)); exit()
else:
for directory in os.listdir("./tests"):
print ("===================================================================")
testcase = directory
rcstest()
``` |
{
"source": "0xFireball/SoftwareListing",
"score": 3
} |
#### File: SoftwareListing/src/stache.py
```python
import re
class StacheProcessor:
def __init__(self, content):
self.stache = '(?:{{)\s?(WORD)\s?(?:}})'
self.cont = content
def put(self, name, value):
self.cont = re.sub(self.stache.replace('WORD', name), value, self.cont)
def clean(self, value = ''):
self.cont = re.sub(self.stache.replace('WORD', '[\w\.]+'), value, self.cont)
def read(self):
return self.cont
``` |
{
"source": "0xflotus/ArchiveBox",
"score": 2
} |
#### File: ArchiveBox/archivebox/index.py
```python
import os
import json
from datetime import datetime
from string import Template
from distutils.dir_util import copy_tree
from config import (
OUTPUT_DIR,
TEMPLATES_DIR,
ANSI,
GIT_SHA,
FOOTER_INFO,
)
from util import (
chmod_file,
derived_link_info,
pretty_path,
check_link_structure,
check_links_structure,
)
### Homepage index for all the links
def write_links_index(out_dir, links):
"""create index.html file for a given list of links"""
check_links_structure(links)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
print('{green}[*] [{}] Updating main index files...{reset}'.format(
datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
**ANSI,
))
write_json_links_index(out_dir, links)
print(' > {}/index.json'.format(pretty_path(out_dir)))
write_html_links_index(out_dir, links)
print(' > {}/index.html'.format(pretty_path(out_dir)))
def write_json_links_index(out_dir, links):
"""write the json link index to a given path"""
check_links_structure(links)
path = os.path.join(out_dir, 'index.json')
index_json = {
'info': 'ArchiveBox Index',
'help': 'https://github.com/pirate/ArchiveBox',
'version': GIT_SHA,
'num_links': len(links),
'updated': str(datetime.now().timestamp()),
'links': links,
}
with open(path, 'w', encoding='utf-8') as f:
json.dump(index_json, f, indent=4, default=str)
chmod_file(path)
def parse_json_links_index(out_dir):
"""load the index in a given directory and merge it with the given link"""
index_path = os.path.join(out_dir, 'index.json')
if os.path.exists(index_path):
with open(index_path, 'r', encoding='utf-8') as f:
links = json.load(f)['links']
check_links_structure(links)
return links
return []
def write_html_links_index(out_dir, links):
"""write the html link index to a given path"""
check_links_structure(links)
path = os.path.join(out_dir, 'index.html')
copy_tree(os.path.join(TEMPLATES_DIR, 'static'), os.path.join(out_dir, 'static'))
with open(os.path.join(out_dir, 'robots.txt'), 'w+') as f:
f.write('User-agent: *\nDisallow: /')
with open(os.path.join(TEMPLATES_DIR, 'index.html'), 'r', encoding='utf-8') as f:
index_html = f.read()
with open(os.path.join(TEMPLATES_DIR, 'index_row.html'), 'r', encoding='utf-8') as f:
link_row_html = f.read()
link_rows = '\n'.join(
Template(link_row_html).substitute(**derived_link_info(link))
for link in links
)
template_vars = {
'num_links': len(links),
'date_updated': datetime.now().strftime('%Y-%m-%d'),
'time_updated': datetime.now().strftime('%Y-%m-%d %H:%M'),
'footer_info': FOOTER_INFO,
'git_sha': GIT_SHA,
'short_git_sha': GIT_SHA[:8],
'rows': link_rows,
}
with open(path, 'w', encoding='utf-8') as f:
f.write(Template(index_html).substitute(**template_vars))
chmod_file(path)
def patch_index_title_hack(link_url, new_title):
"""hack to update just one link's title in the link index json"""
json_path = os.path.join(OUTPUT_DIR, 'index.json')
links = parse_json_links_index(OUTPUT_DIR)
changed = False
for link in links:
if link['url'] == link_url:
link['title'] = new_title
changed = True
break
if changed:
write_json_links_index(OUTPUT_DIR, links)
### Individual link index
def write_link_index(out_dir, link):
link['updated'] = str(datetime.now().timestamp())
write_json_link_index(out_dir, link)
write_html_link_index(out_dir, link)
def write_json_link_index(out_dir, link):
"""write a json file with some info about the link"""
check_link_structure(link)
path = os.path.join(out_dir, 'index.json')
print(' √ index.json')
with open(path, 'w', encoding='utf-8') as f:
json.dump(link, f, indent=4, default=str)
chmod_file(path)
def parse_json_link_index(out_dir):
"""load the json link index from a given directory"""
existing_index = os.path.join(out_dir, 'index.json')
if os.path.exists(existing_index):
with open(existing_index, 'r', encoding='utf-8') as f:
link_json = json.load(f)
check_link_structure(link_json)
return link_json
return {}
def write_html_link_index(out_dir, link):
check_link_structure(link)
with open(os.path.join(TEMPLATES_DIR, 'link_index.html'), 'r', encoding='utf-8') as f:
link_html = f.read()
path = os.path.join(out_dir, 'index.html')
print(' √ index.html')
with open(path, 'w', encoding='utf-8') as f:
f.write(Template(link_html).substitute({
**derived_link_info(link),
# **link['latest'],
}))
chmod_file(path)
``` |
{
"source": "0xflotus/arcs",
"score": 2
} |
#### File: build_defs/emscripten/build_defs.bzl
```python
WasmLibInfo = provider(fields = [
"srcs",
"hdrs",
])
WasmBinInfo = provider(fields = [
"wasm",
])
# Default arguments to use when compiling wasm binaries with Emscripten.
# Additional context-specific args will be added in the cc_wasm_binary rule
# below.
_emscripten_args = [
"em++",
"-std=c++17",
"-Os",
"-s",
"EXPORTED_FUNCTIONS=['_malloc','_free']",
"-s",
"EMIT_EMSCRIPTEN_METADATA",
# For workspace-relative #includes:
"-I",
".",
]
def _collect_deps(srcs, hdrs, deps):
"""Builds depsets out of the given srcs, hdrs and deps."""
src_depset = depset(
srcs,
transitive = [dep[WasmLibInfo].srcs for dep in deps],
)
hdr_depset = depset(
hdrs,
transitive = [dep[WasmLibInfo].hdrs for dep in deps],
)
return src_depset, hdr_depset
def _cc_wasm_binary(ctx):
args = ctx.actions.args()
args.add_all(_emscripten_args)
# For generated #includes.
args.add("-I", ctx.genfiles_dir.path)
# Output a wasm file.
args.add("-o", ctx.outputs.wasm)
# Inputs
srcs, hdrs = _collect_deps(
srcs = ctx.files.srcs,
hdrs = ctx.files.hdrs,
deps = ctx.attr.deps,
)
args.add_all(srcs)
ctx.actions.run(
progress_message = "Compiling C++ to WebAssembly: %s" % ctx.label.name,
inputs = depset(transitive = [srcs, hdrs]),
outputs = [ctx.outputs.wasm],
arguments = [args],
executable = ctx.executable.emsdk_wrapper,
)
return [WasmBinInfo(wasm = ctx.outputs.wasm)]
cc_wasm_binary = rule(
implementation = _cc_wasm_binary,
outputs = {
"wasm": "%{name}.wasm",
},
attrs = {
"srcs": attr.label_list(allow_files = True),
"hdrs": attr.label_list(allow_files = True),
"deps": attr.label_list(providers = [WasmLibInfo]),
"emsdk_wrapper": attr.label(
default = Label("//build_defs/emscripten:emsdk_wrapper"),
executable = True,
cfg = "host",
),
},
doc = "Builds a wasm binary from C++",
)
# cc_wasm_library just collects sources and headers, it doesn't actually build
# anything.
# TODO: make this build some sort of static/dynamic library that we can link
# into the final binary.
def _cc_wasm_library(ctx):
srcs, hdrs = _collect_deps(
srcs = ctx.files.srcs,
hdrs = ctx.files.hdrs,
deps = ctx.attr.deps,
)
return [WasmLibInfo(srcs = srcs, hdrs = hdrs)]
cc_wasm_library = rule(
implementation = _cc_wasm_library,
outputs = {},
attrs = {
"srcs": attr.label_list(allow_files = True),
"hdrs": attr.label_list(allow_files = True),
"deps": attr.label_list(providers = [WasmLibInfo]),
},
doc = """
Just collects .cc and .h files, doesn't actually build anything. Wasm output is
only actually built by the cc_wasm_binary rule.
""",
)
```
#### File: kotlin/js/impl.bzl
```python
load("@io_bazel_rules_kotlin//kotlin/internal:defs.bzl", "KtJsInfo")
def kt_js_import_impl(ctx):
"""Implementation for kt_js_import.
Args:
ctx: rule context
Returns:
Providers for the build rule.
"""
if len(ctx.files.jars) != 1:
fail("a single jar should be supplied, multiple jars not supported")
jar_file = ctx.files.jars[0]
args = ctx.actions.args()
args.add("--jar", jar_file)
args.add("--out_pattern", "\\.js$")
args.add("--out", ctx.outputs.js)
args.add("--aux_pattern", "\\.js\\.map$")
args.add("--aux", ctx.outputs.js_map)
tools, _, input_manifest = ctx.resolve_command(tools = [ctx.attr._importer])
ctx.actions.run(
inputs = [jar_file],
tools = tools,
executable = ctx.executable._importer,
outputs = [
ctx.outputs.js,
ctx.outputs.js_map,
],
arguments = [args],
input_manifests = input_manifest,
)
return [
DefaultInfo(
files = depset([ctx.outputs.js, ctx.outputs.js_map]),
),
KtJsInfo(
js = ctx.outputs.js,
js_map = ctx.outputs.js_map,
jar = jar_file,
srcjar = ctx.files.srcjar[0],
),
]
```
#### File: tools/gcb_badge/main.py
```python
from google.cloud import storage, exceptions
import base64
import json
import os
from string import Template
def copy_badge(bucket_name, obj, new_obj):
client = storage.Client()
try:
bucket = client.bucket(bucket_name)
except exceptions.NotFound:
raise RuntimeError(f"Could not find bucket {bucket_name}")
else:
blob = bucket.get_blob(obj)
if blob is None:
raise RuntimeError(f"Could not find object {obj} in bucket {bucket_name}")
else:
bucket.copy_blob(blob, bucket, new_name=new_obj)
def build_badge(event, context):
"""
Background Cloud Function to be triggered by Pub/Sub.
Updates repository build badge. Triggered by incoming
pubsub messages from Google Cloud Build.
"""
decoded = base64.b64decode(event['data']).decode('utf-8')
data = json.loads(decoded)
bucket = os.environ['BADGES_BUCKET']
try:
repo = data['source']['repoSource']['repoName']
branch = data['source']['repoSource']['branchName']
if repo.startswith('github_'):
# mirrored repo format: (github)_<owner>_<repo>
repo = repo.split('_', 2)[-1]
except KeyError:
# GitHub app sets these values.
repo = data['substitutions']['REPO_NAME']
branch = data['substitutions']['BRANCH_NAME']
finally:
if repo != 'arcs' and branch != 'master':
return
tmpl = os.environ.get('TEMPLATE_PATH',
'builds/${repo}/branches/${branch}.svg')
src = 'badges/{}.svg'.format(data['status'].lower())
dest = Template(tmpl).substitute(repo=repo, branch=branch)
copy_badge(bucket, src, dest)
return
``` |
{
"source": "0xflotus/audius-protocol",
"score": 2
} |
#### File: api/v1/metrics.py
```python
import logging # pylint: disable=C0302
from datetime import datetime
from flask import Flask, Blueprint
from flask_restx import Resource, Namespace, fields, reqparse
from src import api_helpers
from src.api.v1.helpers import make_response, success_response, to_dict
from .models.metrics import route_metric, app_name_metric, app_name
from src.queries.get_route_metrics import get_route_metrics
from src.queries.get_app_name_metrics import get_app_name_metrics
from src.queries.get_app_names import get_app_names
logger = logging.getLogger(__name__)
ns = Namespace('metrics', description='Metrics related operations')
route_metrics_response = make_response("metrics_reponse", ns, fields.List(fields.Nested(route_metric)))
app_name_response = make_response("app_name_response", ns, fields.List(fields.Nested(app_name)))
app_name_metrics_response = make_response("app_name_metrics_response", ns, fields.List(fields.Nested(app_name_metric)))
metrics_route_parser = reqparse.RequestParser()
metrics_route_parser.add_argument('path', required=True)
metrics_route_parser.add_argument('query_string', required=False)
metrics_route_parser.add_argument('start_time', required=True, type=int)
metrics_route_parser.add_argument('limit', required=False, type=int)
metrics_route_parser.add_argument('version', required=False, action='append')
@ns.route("/routes", doc=False)
class RouteMetrics(Resource):
@ns.expect(metrics_route_parser)
@ns.marshal_with(route_metrics_response)
def get(self):
"""Get the route metrics"""
args = metrics_route_parser.parse_args()
if args.get('limit') is None:
args['limit'] = 48
else:
args['limit'] = min(args.get('limit'), 48)
try:
args['start_time'] = datetime.utcfromtimestamp(args['start_time'])
except:
return api_helpers.error_response('Poorly formated start_time parameter', 400)
route_metrics = get_route_metrics(args)
response = success_response(route_metrics)
return response
metrics_app_name_list_parser = reqparse.RequestParser()
metrics_app_name_list_parser.add_argument('limit', required=False, type=int)
metrics_app_name_list_parser.add_argument('offset', required=False, type=int)
@ns.route("/app_name", doc=False)
class AppNameListMetrics(Resource):
@ns.expect(metrics_app_name_list_parser)
@ns.marshal_with(app_name_response)
def get(self):
"""List all the app names"""
args = metrics_app_name_list_parser.parse_args()
if args.get('limit') is None:
args['limit'] = 100
else:
args['limit'] = min(args.get('limit'), 100)
if args.get('offset') is None:
args['offset'] = 0
app_names = get_app_names(args)
response = success_response(app_names)
return response
metrics_app_name_parser = reqparse.RequestParser()
metrics_app_name_parser.add_argument('start_time', required=True, type=int)
metrics_app_name_parser.add_argument('limit', required=False, type=int)
@ns.route("/app_name/<string:app_name>", doc=False)
class AppNameMetrics(Resource):
@ns.expect(metrics_app_name_parser)
@ns.marshal_with(app_name_metrics_response)
def get(self, app_name):
"""Get the app name metrics"""
args = metrics_app_name_parser.parse_args()
if args.get('limit') is None:
args['limit'] = 48
else:
args['limit'] = min(args.get('limit'), 48)
try:
args['start_time'] = datetime.utcfromtimestamp(args['start_time'])
except:
return api_helpers.error_response('Poorly formated start_time parameter', 400)
app_name_metrics = get_app_name_metrics(app_name, args)
response = success_response(app_name_metrics)
return response
```
#### File: api/v1/tracks.py
```python
from urllib.parse import urljoin
import logging # pylint: disable=C0302
from flask import redirect
from flask_restx import Resource, Namespace, fields
from src.queries.get_tracks import get_tracks
from src.queries.get_track_user_creator_node import get_track_user_creator_node
from src.api.v1.helpers import abort_not_found, decode_with_abort, \
extend_track, make_response, search_parser, \
trending_parser, success_response
from .models.tracks import track
from src.queries.search_queries import SearchKind, search
from src.queries.get_trending_tracks import get_trending_tracks
from src.utils.config import shared_config
from flask.json import dumps
from src.utils.redis_cache import cache
from src.utils.redis_metrics import record_metrics
logger = logging.getLogger(__name__)
ns = Namespace('tracks', description='Track related operations')
track_response = make_response("track_response", ns, fields.Nested(track))
tracks_response = make_response(
"tracks_response", ns, fields.List(fields.Nested(track)))
@ns.route('/<string:track_id>')
class Track(Resource):
@record_metrics
@ns.doc(
id="""Get Track""",
params={'track_id': 'A Track ID'},
responses={
200: 'Success',
400: 'Bad request',
500: 'Server error'
}
)
@ns.marshal_with(track_response)
@cache(ttl_sec=5)
def get(self, track_id):
"""Fetch a track."""
decoded_id = decode_with_abort(track_id, ns)
args = {"id": [decoded_id], "with_users": True, "filter_deleted": True}
tracks = get_tracks(args)
if not tracks:
abort_not_found(track_id, ns)
single_track = extend_track(tracks[0])
return success_response(single_track)
def tranform_stream_cache(stream_url):
return redirect(stream_url)
@ns.route("/<string:track_id>/stream")
class TrackStream(Resource):
@record_metrics
@ns.doc(
id="""Stream Track""",
params={'track_id': 'A Track ID'},
responses={
200: 'Success',
216: 'Partial content',
400: 'Bad request',
416: 'Content range invalid',
500: 'Server error'
}
)
@cache(ttl_sec=5, transform=tranform_stream_cache)
def get(self, track_id):
"""
Get the track's streamable mp3 file.
This endpoint accepts the Range header for streaming.
https://developer.mozilla.org/en-US/docs/Web/HTTP/Range_requests
"""
decoded_id = decode_with_abort(track_id, ns)
args = {"track_id": decoded_id}
creator_nodes = get_track_user_creator_node(args)
if creator_nodes is None:
abort_not_found(track_id, ns)
creator_nodes = creator_nodes.split(',')
if not creator_nodes:
abort_not_found(track_id, ns)
primary_node = creator_nodes[0]
stream_url = urljoin(primary_node, 'tracks/stream/{}'.format(track_id))
return stream_url
track_search_result = make_response(
"track_search", ns, fields.List(fields.Nested(track)))
@ns.route("/search")
class TrackSearchResult(Resource):
@record_metrics
@ns.doc(
id="""Search Tracks""",
params={'query': 'Search Query'},
responses={
200: 'Success',
400: 'Bad request',
500: 'Server error'
}
)
@ns.marshal_with(track_search_result)
@ns.expect(search_parser)
@cache(ttl_sec=60)
def get(self):
"""Search for a track."""
args = search_parser.parse_args()
query = args["query"]
search_args = {
"query": query,
"kind": SearchKind.tracks.name,
"is_auto_complete": False,
"current_user_id": None,
"with_users": True,
"limit": 10,
"offset": 0
}
response = search(search_args)
tracks = response["tracks"]
tracks = list(map(extend_track, tracks))
return success_response(tracks)
@ns.route("/trending")
class Trending(Resource):
@record_metrics
@ns.doc(
id="""Trending Tracks""",
responses={
200: 'Success',
400: 'Bad request',
500: 'Server error'
}
)
@ns.marshal_with(tracks_response)
@cache(ttl_sec=30 * 60)
def get(self):
"""Gets the top 100 trending (most popular) tracks on Audius"""
args = trending_parser.parse_args()
time = args.get("time") if args.get("time") is not None else 'week'
args = {
'time': time,
'genre': args.get("genre", None),
'with_users': True
}
tracks = get_trending_tracks(args)
tracks = list(map(extend_track, tracks))
return success_response(tracks)
``` |
{
"source": "0xflotus/autodrome",
"score": 3
} |
#### File: autodrome/envs/env.py
```python
import gym
import math
import numpy as np
from ..simulator import Simulator
from ..policeman import Policeman
class SimulatorEnv(gym.Env):
def __init__(self, simulator: Simulator, map: str):
super().__init__()
self.action_space = gym.spaces.MultiDiscrete(nvec=[3, 3]) # [Left, Straight, Right], [Accelerate, Coast, Brake]
width, height = int(Simulator.Config['r_mode_width']), int(Simulator.Config['r_mode_height'])
self.observation_space = gym.spaces.Box(0, 255, shape=[width, height, 3], dtype=np.uint8) # Raw Screen Pixels
self.map = map
self.simulator = simulator
self.simulator.start()
self.policeman = Policeman(simulator)
self.info = {'map': self.policeman.map, 'world': self.policeman.world}
self.pixels, self.data = None, None
self.viewer = None
def step(self, action: np.ndarray) -> tuple:
self.simulator.control(steer=action[0] - 1, acceleration=action[1] - 1)
self.pixels, self.data = self.simulator.frame(self.data)
if self.data.wearCabin > 0 or self.data.wearChassis > 0:
reward, done = -1, True
else:
reward, done = +1, False
return self.pixels, reward, done, self.info
def reset(self) -> np.array:
self.simulator.command(f'preview {self.map}')
self.data = self.simulator.wait()
self.pixels, self.data = self.simulator.frame(self.data)
if self.data.parkingBrake:
self.simulator.keyboard.type(' ') # Release parking brake
self.simulator.keyboard.type('4') # Switch to bumper camera
return self.pixels
def render(self, mode='human'):
if mode == 'human':
self._render_human()
if mode == 'rgb':
self._render_rgb()
def _render_human(self):
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(600, 600)
self.viewer.set_bounds(-220, +220, -220, +220)
truck = rendering.make_capsule(8, 4)
truck.set_color(0.0, 0.0, 0.0)
self.truck_transform = rendering.Transform()
truck.add_attr(self.truck_transform)
self.viewer.add_geom(truck)
for node in self.policeman.map['nodes'].values():
circle = rendering.make_circle(2)
circle.set_color(0.6, 0.6, 0.6)
dot_transform = rendering.Transform((node['position']['x'], -node['position']['z']))
circle.add_attr(dot_transform)
self.viewer.add_geom(circle)
position, orientation = self.data.worldPlacement.position, self.data.worldPlacement.orientation
self.truck_transform.set_rotation(orientation.heading * math.pi * 2 - math.pi / 2)
self.truck_transform.set_translation(position.x, -position.z)
return self.viewer.render()
def _render_rgb(self):
if self.viewer is None:
from pyglet import window
from gym.envs.classic_control import rendering
self.viewer = rendering.SimpleImageViewer()
width, height = self.observation_space.shape[0] // 2, self.observation_space.shape[1] // 2
self.viewer.window = window.Window(width, height, vsync=False, resizeable=False)
self.viewer.window.set_location(2 * width, height)
if self.pixels is not None:
self.viewer.imshow(self.pixels[::2, fdf8:f53e:61e4::18, [2, 1, 0]])
self.pixels = None
return self.viewer.isopen
def close(self):
self.simulator.terminate()
```
#### File: autodrome/policeman/map.py
```python
import struct
import unittest
import warnings
from pathlib import Path
from pyparsing import Word, Group, Suppress, Regex, Keyword, Forward, Optional, QuotedString, ZeroOrMore, \
ParseException, alphas, alphanums, hexnums, nums, pythonStyleComment
class MapFile(dict):
""" SCS annotated file (.mbd, .base, .aux, .desc) parsed as a hierarchical tree of values, lists & dictionaries """
# region Grammar Definition and Type Constructors
class Grammar:
""" Lexical grammar of text (non-binary) SCS map (.mbd) file """
class Parse:
""" Helper class holding static methods to parse each type """
@staticmethod
def int(toks: list) -> list:
""" Parse ordinary int or big endian hex string as a 8-byte unsigned integer """
if toks[0].startswith('x'):
zeropad = toks[0][1:].ljust(16, '0')
binary = bytes.fromhex(zeropad)
toks[0] = struct.unpack('<Q', binary)[0]
elif toks[0].startswith('i'):
toks[0] = int(toks[0][1:])
else:
toks[0] = int(toks[0])
return toks
@staticmethod
def float(toks: list) -> list:
""" Parse fixed precision float or little endian hex string as a 4-byte float """
if toks[0].startswith('i'):
toks[0] = int(toks[0][1:]) / 256
else:
binary = bytes.fromhex(toks[0][1:])
toks[0] = struct.unpack('>f', binary)[0]
return toks
entry = Forward()
identifier = Word(alphas, alphanums + '_')
tokenValue = QuotedString('"')
token = Keyword('token') + identifier + Suppress(':') + tokenValue
intValue = (Word('x', hexnums) ^ Word('-' + nums)).setParseAction(Parse.int)
int = Regex('[us][1-9]+') + identifier + Suppress(':') + intValue
floatValue = (Word('&', hexnums) ^ Word('i', '-' + nums)).setParseAction(Parse.float)
float = Keyword('float') + identifier + Suppress(':') + floatValue
stringValue = QuotedString('"')
string = Keyword('string') + identifier + Suppress(':') + stringValue
fixed2Values = Group(floatValue + floatValue)
fixed2 = Keyword('fixed2') + identifier + Suppress(':') + fixed2Values
fixed3Values = Group(floatValue + floatValue + floatValue)
fixed3 = Keyword('fixed3') + identifier + Suppress(':') + fixed3Values
float4Value = Group(floatValue + floatValue + floatValue + floatValue)
float4 = Keyword('float4') + identifier + Suppress(':') + float4Value
quaternionValues = Group(floatValue + floatValue + floatValue + floatValue)
quaternion = Keyword('quaternion') + identifier + Suppress(':') + quaternionValues
structMembers = Group(ZeroOrMore(entry))
struct = Keyword('struct') + identifier + Suppress('{') + structMembers + Suppress('}')
arrayFloatItems = Group(ZeroOrMore(floatValue))
arrayFloat = Keyword('array_float') + identifier + Suppress('[') + arrayFloatItems + Suppress(']')
arrayStructItem = Suppress('struct') + Suppress(identifier) + Suppress('{') + structMembers + Suppress('}')
arrayStructItems = Group(ZeroOrMore(arrayStructItem))
arrayStruct = Keyword('array_struct') + identifier + Suppress('[') + arrayStructItems + Suppress(']')
header = Optional(Suppress('SCSAnnotatedFileV1'))
entry << Group(int ^ float ^ string ^ fixed2 ^ fixed3 ^ float4 ^ quaternion ^ token ^ struct ^ arrayFloat ^ arrayStruct)
file = header + ZeroOrMore(entry)
file.ignore(pythonStyleComment)
@classmethod
def tokenize(cls, string: str) -> list:
""" Perform lexical analysis and return the list of discovered tokens """
return cls.file.parseString(string, parseAll=True).asList()
class Reference(str):
""" Placeholder class to keep a cross reference to another entry """
pass
Constructors = {
'u8': int,
'u16': int,
's16': int,
'u32': int,
's32': int,
'u64': int,
's64': int,
'token': Reference,
'float': float,
'string': str,
'fixed2': lambda vals: {'x': vals[0], 'y': vals[1]},
'fixed3': lambda vals: {'x': vals[0], 'y': vals[1], 'z': vals[2]},
'float4': tuple,
'quaternion': lambda vals: {'w': vals[0], 'x': vals[1], 'y': vals[2], 'z': vals[3]},
'array_struct': list,
'array_float': list,
'struct': dict
}
# endregion
def __init__(self, path: Path=None):
""" Read a SCS annotated file and parse it into a hierarchical tree of values, lists & dictionaries """
super().__init__()
self.path = path
if path is None:
return
with path.open('rt') as file:
try:
content = file.read()
tokens = self.Grammar.tokenize(content)
self.parse(tokens)
except ParseException as exc:
exc.msg = (f"{exc.msg}\n"
f"File: \"{path.name}\"\n"
f"Entry: \"{exc.line}\")")
raise exc
def __getattr__(self, item) -> object:
""" Provide nice interface to access the map file entries via dot-notation """
return self[item] if item in self else None
def parse(self, tokens: list):
""" Parse a SCS annotated file into a hierarchical tree of values, lists & dictionaries """
def structuralize(tokens: list) -> dict:
structure = {}
for entry in tokens:
type, identifier, value = entry
constructor = self.Constructors[type]
if type == 'array_struct':
members = [dict(structuralize(val)) for val in value]
value = constructor(members)
elif type == 'struct':
members = structuralize(value)
value = constructor(members)
else:
value = constructor(value)
structure[identifier] = value
return structure
self.update(structuralize(tokens))
class Map(dict):
""" SCS map data (*.mbd, *.aux, *.base, *.desc) represented as a cross-referenced dictionary of items and nodes
The SCS map files have to be exported from the ETS2/ATS editor using the `edit_save_text` console command.
The save button or `edit_save` command produce a binary map data that are not currently supported.
"""
def __init__(self, directory: Path):
""" Read a map (.mbd) file and *.aux, *.base, *.desc map files from a directory into memory """
super().__init__()
self.directory = directory
self['nodes'] = {}
self['items'] = []
auxFiles = directory.glob('*.aux')
baseFiles = directory.glob('*.base')
descFiles = directory.glob('*.desc')
mbdFile = directory.parent / (directory.name + '.mbd')
auxs = map(MapFile, auxFiles)
bases = map(MapFile, baseFiles)
descs = map(MapFile, descFiles)
mbd = MapFile(mbdFile)
self.merge(mbd)
for aux, base, desc in zip(auxs, bases, descs):
self.merge(aux)
self.merge(base)
self.merge(desc)
def __getattr__(self, item: object) -> object:
""" Provide nice interface to access the map file entries via dot-notation """
return self[item] if item in self else None
def merge(self, another: MapFile):
""" Merge with another map file and check for duplicate values """
for identifier, value in another.items():
if identifier == 'items':
self['items'].extend(value)
continue
if identifier == 'nodes':
nodes = {node['uid']: node for node in value}
self['nodes'].update(nodes)
continue
if identifier in self:
if self[identifier] != another[identifier]:
message = (f"Duplicate found during merging:\n"
f"File \"{another.path}\"\n"
f"Identifier \"{identifier}\"")
warnings.warn(message, RuntimeWarning)
self[identifier] = value
# region Unit Tests
class TestMapFile(unittest.TestCase):
types = """
SCSAnnotatedFileV1
u8 type_info: 17
u16 right_terrain_size: 500
s32 right_road_height: -33
u64 node0_uid: x7EC4DD7E7A00000
token road_look: "look24"
float right_profile_coef: &3f800000 # 1
string override_template: "none"
fixed3 position: i99088 i-2 i93331 # x:387.063 y:-0.0078125 z:364.574
quaternion rotation: &bf78fd43 &b8d810bb &3e6e00b0 &b7ce87fd # w:-0.972614 x:-0.000103028 y:0.232424 z:-2.46204e-05
"""
def testTypes(self):
tokens = MapFile.Grammar.tokenize(self.types)
correctTokens = [
['u8', 'type_info', 17],
['u16', 'right_terrain_size', 500],
['s32', 'right_road_height', -33],
['u64', 'node0_uid', 526114473086],
['token', 'road_look', 'look24'],
['float', 'right_profile_coef', 1.0],
['string', 'override_template', 'none'],
['fixed3', 'position', [387.0625, -0.0078125, 364.57421875]],
['quaternion', 'rotation', [-0.9726144671440125, -0.00010302798909833655,
0.23242449760437012, -2.462043812556658e-05]]
]
self.assertListEqual(tokens, correctTokens)
tree = MapFile()
tree.parse(tokens)
correctTree = {
'type_info': 17,
'right_terrain_size': 500,
'right_road_height': -33,
'node0_uid': 526114473086,
'road_look': MapFile.Reference('look24'),
'right_profile_coef': 1.0,
'override_template': 'none',
'position': {'x': 387.0625, 'y': -0.0078125, 'z': 364.57421875},
'rotation': {'w': -0.9726144671440125, 'x': -0.00010302798909833655,
'y': 0.23242449760437012, 'z': -2.462043812556658e-05}
}
self.assertDictEqual(tree, correctTree)
struct = """
struct node_item {
u64 uid: x7EC4DD453100000
fixed3 position: i99088 i-2 i93331 # x:387.063 y:-0.0078125 z:364.574
quaternion rotation: &bf78fd43 &b8d810bb &3e6e00b0 &b7ce87fd # w:-0.972614 x:-0.000103028 y:0.232424 z:-2.46204e-05
u64 backward_item_uid: x7EC4DD417500001
u64 forward_item_uid: x7EC4DD707E00001
u32 flags: 1
}
"""
def testStruct(self):
tokens = MapFile.Grammar.tokenize(self.struct)
correctTokens = [
['struct', 'node_item', [
['u64', 'uid', 211625559166],
['fixed3', 'position', [387.0625, -0.0078125, 364.57421875]],
['quaternion', 'rotation',
[-0.9726144671440125, -0.00010302798909833655,
0.23242449760437012, -2.462043812556658e-05]],
['u64', 'backward_item_uid', 1152922008223073406],
['u64', 'forward_item_uid', 1152922047666308222],
['u32', 'flags', 1]]
]
]
self.assertListEqual(tokens, correctTokens)
tree = MapFile()
tree.parse(tokens)
correctTree = {
'node_item': {
'uid': 211625559166,
'position': {'x': 387.0625, 'y': -0.0078125, 'z': 364.57421875},
'rotation': {'w': -0.9726144671440125, 'x': -0.00010302798909833655,
'y': 0.23242449760437012, 'z': -2.462043812556658e-05},
'forward_item_uid': 1152922047666308222,
'backward_item_uid': 1152922008223073406,
'flags': 1
}
}
self.assertDictEqual(tree, correctTree)
arrayFloat = """
array_float minimums [
&43a95780 # 338.684
&c1780000 # -15.5
&4348ae00 # 200.68
&43941300 # 296.148
&41e07800 # 28.0586
]
"""
def testArrayFloat(self):
tokens = MapFile.Grammar.tokenize(self.arrayFloat)
correctTokens = [
['array_float', 'minimums', [338.68359375, -15.5, 200.6796875, 296.1484375, 28.05859375]]
]
self.assertListEqual(tokens, correctTokens)
tree = MapFile()
tree.parse(tokens)
correctTree = {
'minimums': [338.68359375, -15.5, 200.6796875, 296.1484375, 28.05859375]
}
self.assertDictEqual(tree, correctTree)
arrayStruct = """
array_struct right_vegetation [
struct vegetation {
token vegetation: "grass"
u16 density: 4000
u8 hi_poly_distance: 50
u8 scale_type: 0
u16 start: 0
u16 end: 0
}
struct vegetation {
token vegetation: "corn"
u16 density: 8000
u8 hi_poly_distance: 500
u8 scale_type: 0
u16 start: 1
u16 end: 1
}
]
"""
def testArrayStruct(self):
tokens = MapFile.Grammar.tokenize(self.arrayStruct)
correctTokens = [
['array_struct', 'right_vegetation', [
[
['token', 'vegetation', 'grass'],
['u16', 'density', 4000],
['u8', 'hi_poly_distance', 50],
['u8', 'scale_type', 0],
['u16', 'start', 0],
['u16', 'end', 0]
],
[
['token', 'vegetation', 'corn'],
['u16', 'density', 8000],
['u8', 'hi_poly_distance', 500],
['u8', 'scale_type', 0],
['u16', 'start', 1],
['u16', 'end', 1]
]]
]
]
self.assertListEqual(tokens, correctTokens)
tree = MapFile()
tree.parse(tokens)
correctTree = {
'right_vegetation': [
{
'vegetation': 'grass',
'density': 4000,
'hi_poly_distance': 50,
'scale_type': 0,
'start': 0,
'end': 0
},
{
'vegetation': 'corn',
'density': 8000,
'hi_poly_distance': 500,
'scale_type': 0,
'start': 1,
'end': 1
}
]
}
self.assertDictEqual(tree, correctTree)
# endregion
```
#### File: autodrome/simulator/ets2.py
```python
import timeit
import platform
import unittest
from pathlib import Path
from .simulator import Simulator
class ETS2(Simulator):
""" Derived class holding OS dependent paths to Euro Truck Simulator 2 (ETS2) game files """
if platform.system() == 'Darwin':
RootGameFolder = Path('~/Library/Application Support/Steam/steamapps/common/Euro Truck Simulator 2').expanduser()
UserGameFolder = Path('~/Library/Application Support/Euro Truck Simulator 2').expanduser()
GameExecutable = RootGameFolder / 'Euro Truck Simulator 2.app/Contents/MacOS/eurotrucks2'
TelemetryPlugin = Path(__file__).parent / 'telemetry/plugin/libautodrome.so'
if platform.system() == 'Linux':
RootGameFolder = Path('~/.steam/steam/steamapps/common/Euro Truck Simulator 2').expanduser()
UserGameFolder = Path('~/local/share/Euro Truck Simulator 2').expanduser()
GameExecutable = RootGameFolder / 'bin/eurotrucks2'
TelemetryPlugin = Path(__file__).parent / 'telemetry/plugin/todo.so'
if platform.system() == 'Windows':
RootGameFolder = Path('C:/Program Files (x86)/Steam/steamapps/common/Euro Truck Simulator 2')
UserGameFolder = Path('~/Documents/Euro Truck Simulator 2').expanduser()
GameExecutable = RootGameFolder / 'bin/eurotrucks2.exe'
TelemetryPlugin = Path(__file__).parent / 'telemetry/plugin/todo.dll'
SteamAppID = 227300
MapsFolder = Path(__file__).parent / '../maps/ets2/'
# region Unit Tests
class TestETS2(unittest.TestCase):
RepeatFPS = 100
MinimumFPS = 20
@unittest.skipUnless(ETS2.RootGameFolder.exists(), "ETS2 not installed")
def test_capture(self):
with ETS2() as ets2:
ets2.command('preview indy500')
seconds = timeit.timeit(lambda: ets2.frame(ets2.telemetry.data()), number=self.RepeatFPS)
self.assertGreater(self.RepeatFPS / seconds, self.MinimumFPS)
# endregion
``` |
{
"source": "0xflotus/Bashfuscator",
"score": 3
} |
#### File: core/engine/random.py
```python
import string
import random
import re
class RandomGen(object):
"""
Wrapper around :py:class:`random.SystemRandom`.
Provided for ease of use and to avoid
having to initialize a SystemRandom object
every time something random is desired.
.. note::
The default character set when generating random variable names
or strings is the alphanumeric charset, or the (almost) full ASCII
charset if :meth:`~RandomGen.setFullAsciiStrings` is called.
"""
randGen = random.SystemRandom()
_generatedVars = set()
_uniqueRandStrs = set()
_randStrCharList = [c for c in string.ascii_letters + string.digits + string.punctuation]
_randStrCharList.remove("'")
_randStrCharList.remove("/")
_reservedVars = {"auto_resume", "BASH", "BASH_ENV", "BASH_VERSINFO", "BASH_VERSION", "CDPATH", "COLUMNS", "COMP_CWORD", "COMP_LINE", "COMP_POINT", "COMPREPLY", "COMP_WORDS", "DIRSTACK", "EUID", "FCEDIT", "FIGNORE", "FUNCNAME", "GLOBIGNORE", "GROUPS", "histchars", "HISTCMD", "HISTCONTROL", "HISTFILE", "HISTFILESIZE", "HISTIGNORE", "HISTSIZE", "HOME", "HOSTFILE", "HOSTNAME", "HOSTTYPE", "IFS", "IGNOREEOF", "INPUTRC", "LANG", "LC_ALL", "LC_COLLATE", "LC_CTYPE", "LC_MESSAGES", "LC_NUMERIC", "LINENO", "LINES", "MACHTYPE", "MAIL", "MAILCHECK", "MAILPATH", "OLDPWD", "OPTARG", "OPTERR", "OPTIND", "OSTYPE", "PATH", "PIPESTATUS", "POSIXLY_CORRECT", "PPID", "PROMPT_COMMAND", "PS1", "PS2", "PS3", "PS4", "PWD", "RANDOM", "REPLY", "SECONDS", "SHELLOPTS", "SHLVL", "TIMEFORMAT", "TMOUT", "UID"}
_boblReservedStrsRegex = re.compile("DATA|END")
_boblSyntaxRegex = re.compile(r":\w+:|\^ \^|\? \?|% %|\* \*|#\d+#|&\d+&|DATA|END")
def __init__(self):
self.sizePref = None
def setFullAsciiStrings(self):
"""
Set the default charset used when generating random
variables and strings to the (almost) full ASCII charset.
Only "'" and "/" are not used.
"""
RandomGen._randStrCharList = [chr(i) for i in range(1, 128)]
RandomGen._randStrCharList.remove("'")
RandomGen._randStrCharList.remove("/")
# TODO: make this functionality local to each RandomGen instance
def forgetUniqueStrs(self):
"""
Clear the sets of previously generated variable names
and strings. Should be called when random variable
names/strings are needed but can have the same name as
previously generated variable names/strings without
causing conflicts.
"""
RandomGen._generatedVars.clear()
RandomGen._uniqueRandStrs.clear()
def randGenNum(self, min, max):
"""
Randomly generate an integer inclusively.
:param min: minimum integer that can be returned
:type min: int
:param max: maximum integer that can be returned
:type max: int
"""
return RandomGen.randGen.randint(min, max)
def randChoice(self, max):
"""
Generate a random choice. Useful when you need to choose
between a set number of choices randomly.
:param max: maximum integer that can be returned
:returns: integer from 0 to max-1 inclusively
"""
return self.randGenNum(0, max - 1)
def probibility(self, prob):
"""
Return True a certain percentage of the time.
:param prob: probability of returning True
:type prob: int
:returns: True prob percent of the time, False otherwise
"""
randNum = self.randGenNum(0, 100)
return randNum <= prob
def randSelect(self, seq):
"""
Randomly select an element from a sequence. If the argument
'seq' is a dict, a randomly selected key will be returned.
:param seq: sequence to randomly select from
:type seq: list
:returns: element from seq if seq is a list, a key if seq
is a dict, or None if seq is empty
"""
if isinstance(seq, dict):
selection = RandomGen.randGen.choice(list(seq.keys()))
elif seq:
selection = RandomGen.randGen.choice(seq)
else:
selection = None
return selection
def randShuffle(self, seq):
"""
Randomly shuffle a sequence in-place.
:param seq: sequence to shuffle randomly
:type seq: list
"""
RandomGen.randGen.shuffle(seq)
def randGenVar(self, minVarLen=None, maxVarLen=None):
"""
Generate a unique randomly named variable. Variable names can
consist of uppercase and lowercase letters, digits, and
underscores, but will always start with a letter or underscore.
:param sizePref: sizePref user option. Controls the minimum and
maximum length of generated variable names
:type sizePref: int
:returns: unique random variable name
.. note::
:meth:`~RandomGen.randUniqueStr` is called under the hood,
therefore the same performance concerns apply.
"""
minVarLen, maxVarLen = self._getSizes(minVarLen, maxVarLen)
randVarCharList = string.ascii_letters + string.digits + "_"
while True:
randomVar = self.randSelect(string.ascii_letters + "_")
randomVar += self.randGenStr(minVarLen, maxVarLen - 1, randVarCharList)
if len(randomVar) == 1 and randomVar.isdigit():
continue
if RandomGen._boblReservedStrsRegex.search(randomVar):
continue
if randomVar not in RandomGen._generatedVars and randomVar not in RandomGen._reservedVars:
break
RandomGen._generatedVars.add(randomVar)
return randomVar
def randUniqueStr(self, minStrLen=None, maxStrLen=None, charList=None, escapeChars="", noBOBL=True):
"""
Generate a random string that is guaranteed to be unique.
:param minStrLen: minimum length of generated string
:type minStrLen: int
:param maxStrLen: maximum length of generated string
:type maxStrLen: int
:param charList: list of characters that will be used when
generating the random string. If it is not specified, the
default character set will be used
:type charList: str or list of chrs
:returns: unique random string
.. note::
Runtime will increase incrementally as more and more unique
strings are generated, unless
:meth:`~RandomGen.forgetUniqueStrs` is called.
"""
minStrLen, maxStrLen = self._getSizes(minStrLen, maxStrLen)
if charList is None:
charList = RandomGen._randStrCharList
commonStrNum = 0
while True:
randStr = self.randGenStr(minStrLen, maxStrLen, charList, escapeChars, noBOBL)
if randStr not in RandomGen._uniqueRandStrs:
break
else:
commonStrNum += 1
# if 5 collisions are generated in a row, chances are that we are reaching the upper bound
# of our keyspace, so make the keyspace bigger so we can keep generating unique strings
if commonStrNum == 5:
minStrLen = maxStrLen
maxStrLen += 1
commonStrNum = 0
RandomGen._uniqueRandStrs.add(randStr)
return randStr
def randGenStr(self, minStrLen=None, maxStrLen=None, charList=None, escapeChars="", noBOBL=True):
"""
Generate a random string. Functions the same as
:meth:`~RandomGen.randUniqueStr`, the only difference being
that the generated string is NOT guaranteed to be unique.
"""
minStrLen, maxStrLen = self._getSizes(minStrLen, maxStrLen)
if charList is None:
charList = RandomGen._randStrCharList
randStrLen = RandomGen.randGen.randint(minStrLen, maxStrLen)
randStr = "".join(self.randSelect(charList) for x in range(randStrLen))
if noBOBL:
while RandomGen._boblSyntaxRegex.search(randStr):
randStr = "".join(self.randSelect(charList) for x in range(randStrLen))
# escape 'escapeChars', making sure that an already escaped char isn't
# accidentally un-escaped by adding an extra '\'
for char in escapeChars:
randStr = re.sub(r"(?<!\\)(\\{2})*(?!\\)" + re.escape(char), "\g<1>\\" + char, randStr)
return randStr
def _getSizes(self, minLen, maxLen):
if minLen is None or maxLen is None:
if self.sizePref == 1:
defaultMinLen = 1
elif self.sizePref == 2:
defaultMinLen = 4
else:
defaultMinLen = 8
defaultMaxLen = defaultMinLen * 2
if minLen is None:
minLen = defaultMinLen
if maxLen is None:
maxLen = defaultMaxLen
return (minLen, maxLen)
```
#### File: modules/command_obfuscators/case_swapper.py
```python
from bashfuscator.core.mutators.command_obfuscator import CommandObfuscator
from bashfuscator.core.mutators.command_obfuscator import Stub
class CaseSwapper(CommandObfuscator):
def __init__(self):
super().__init__(
name="Case Swapper",
description="Flips the case of all alpha chars",
sizeRating=1,
timeRating=1,
author="capnspacehook",
reversible=True
)
self.stubs = [
Stub(
name="bash case swap expansion",
sizeRating=1,
timeRating=1,
binariesUsed=[],
fileWrite=False,
escapeQuotes=True,
stub='''? ?VAR1='CMD'* *END0* *:printf:^ ^%s^ ^"${VAR1~~}"* *END0* *'''
)
]
def mutate(self, userCmd):
obCmd = userCmd.swapcase()
return self.deobStub.genStub(obCmd)
```
#### File: modules/command_obfuscators/reverse.py
```python
from bashfuscator.core.mutators.command_obfuscator import CommandObfuscator
from bashfuscator.core.mutators.command_obfuscator import Stub
class Reverse(CommandObfuscator):
def __init__(self):
super().__init__(
name="Reverse",
description="Reverses a command",
sizeRating=1,
timeRating=1,
author="capnspacehook",
reversible=True
)
self.stubs = [
Stub(
name="printf rev",
sizeRating=1,
timeRating=1,
binariesUsed=["rev"],
fileWrite=False,
escapeQuotes=True,
stub="""* *:printf:^ ^%s^ ^'CMD'* *|* *:rev:* *END0* *"""
),
Stub(
name="herestring rev",
sizeRating=1,
timeRating=1,
binariesUsed=["rev"],
fileWrite=False,
escapeQuotes=True,
stub="""* *:rev:^ ^<<<? ?'CMD'* *END0* *"""
)
]
def mutate(self, userCmd):
obCmd = userCmd[::-1]
return self.deobStub.genStub(obCmd)
``` |
{
"source": "0xflotus/cryptosteganography",
"score": 4
} |
#### File: src/cryptosteganography/cli.py
```python
# -*- coding: utf-8 -*-
"""
A python steganography module to store messages and files AES-256 encrypted
inside an image.
"""
import argparse
import getpass
import sys
from exitstatus import ExitStatus
import pkg_resources
import cryptosteganography.utils as utils
__author__ = '<EMAIL>'
def get_parser(parse_this=None) -> argparse.ArgumentParser:
"""Get parser for user command line arguments."""
parser = argparse.ArgumentParser(
prog='cryptosteganography',
description="""
Cryptosteganography is an application to save or retrieve
an encrypted message or encrypted file concealed inside an image.
"""
)
parser.add_argument(
'-v',
'--version',
action='version',
version=pkg_resources.require('cryptosteganography')[0].version
)
subparsers = parser.add_subparsers(help='sub-command help', dest='command')
# Sub parser: Save
parser_save = subparsers.add_parser(
'save',
help='save help'
)
# Original image
parser_save.add_argument(
'-i',
'--input',
dest='input_image_file',
required=True,
help='Input image file.'
)
group_secret = parser_save.add_mutually_exclusive_group(required=True)
# Non binary secret message to hide
group_secret.add_argument(
'-m',
'--message',
dest='message',
help='Your secret message to hide (non binary).'
)
# Binary secret message to hide
group_secret.add_argument(
'-f',
'--file',
dest='message_file',
help='Your secret to hide (Text or any binary file).'
)
# Image containing the secret
parser_save.add_argument(
'-o',
'--output',
dest='output_image_file',
required=True,
help='Output image containing the secret.'
)
# Sub parser: Retrieve
parser_retrieve = subparsers.add_parser(
'retrieve',
help='retrieve help'
)
parser_retrieve.add_argument(
'-i',
'--input',
dest='input_image_file',
required=True,
help='Input image file.'
)
parser_retrieve.add_argument(
'-o',
'--output',
dest='retrieved_file',
help='Output for the binary secret file (Text or any binary file).'
)
return parser
def _save_parse_input(args):
"""Parse input args of save action"""
message = None
error = None
output_image_file = None
if args.message:
message = args.message
elif args.message_file:
message, error = utils.get_data_from_file(args.message_file)
# Validate message
if not message and not error:
error = "Failed: Message can't be empty"
return (message, error, output_image_file)
def _handle_save_action(args) -> ExitStatus:
""""Save secret in file action."""
message, error, output_image_file = _save_parse_input(args)
# Get password (the string used to derivate the encryption key)
password = getpass.getpass('Enter the key password: ').strip()
if len(password) == 0:
error = "Failed: Password can't be empty"
if not error:
output_image_file = utils.get_output_image_filename(args.output_image_file)
# Hide message and save the image
error = utils.save_output_image(
password,
args.input_image_file,
message,
output_image_file
)
if not error:
print('Output image %s saved with success' % output_image_file)
return ExitStatus.success
print(error)
return ExitStatus.failure
def _handle_retrieve_action(args) -> ExitStatus:
""""Retrieve secret from file action."""
secret = None
error = None
password = None
# Get password (the string used to derive the encryption key)
password = get<PASSWORD>.getpass('Enter the key password: ').strip()
if len(password) == 0:
error = "Failed: Password can't be empty"
if not error:
secret, error = utils.get_secret_from_image(password, args.input_image_file)
# Print or save to a file the data
if not error and args.retrieved_file:
secret = utils.save_secret_file(secret, args.retrieved_file)
if not error:
print(secret)
return ExitStatus.success
print(error)
return ExitStatus.failure
def main() -> ExitStatus:
"""
Accept arguments and run the script.
:return:
"""
parser = get_parser()
args = parser.parse_args()
if args.command == 'save':
# Save action
return _handle_save_action(args)
elif args.command == 'retrieve':
# Retrieve action
return _handle_retrieve_action(args)
else:
parser.print_help()
return ExitStatus.failure
def init():
"""
Allow the script to be run standalone
"""
if __name__ == '__main__':
sys.exit(main())
# Run
init()
``` |
{
"source": "0xflotus/cubefs",
"score": 2
} |
#### File: docker/s3tests/test_copy_object.py
```python
import time
from env import BUCKET
from base import S3TestCase, get_env_s3_client
SOURCE_KEY = "copyTest/key/sourceKey.txt"
TARGET_KEY = "copyTest/key/targetKey.txt"
SOURCE_KEY_DIR = "copyTest/dir/targetDir/"
TARGET_KEY_DIR = "copyTest/dir/sourceDir/"
SOURCE_KEY_WITH_META = "copyTest/key/withMeta/sourceKey.txt"
TARGET_KEY_WITH_META = "copyTest/key/withMeta/targetKey.txt"
SOURCE_KEY_RESET_META = "copyTest/key/reset/sourceKey.txt"
TARGET_KEY_RESET_META = "copyTest/key/reset/targetKey.txt"
SOURCE_KEY_MODIFY_META = "copyTest/key/modify/sourceKey.txt"
META_DATE_KEY_1 = "sourceKeyMetaKey1"
META_DATE_KEY_2 = "sourceKeyMetaKey2"
META_DATE_VALUE_1 = "sourceKeyMetaValue1"
META_DATE_VALUE_2 = "sourceKeyMetaValue2"
META_DATE_VALUE_1_MODIFIED = "sourceKeyMetaValue1Modified"
class CopyObjectTest(S3TestCase):
s3 = get_env_s3_client()
def __init__(self, case):
super(CopyObjectTest, self).__init__(case)
@classmethod
def setUpClass(cls):
"""
Create test data, such as putting object of source keys.
:return:
"""
cls.clear_data()
# create source object info
cls.create_key(key=SOURCE_KEY, content=b'copyTest source key content')
cls.create_key(key=SOURCE_KEY_DIR, content='')
cls.create_key(key=SOURCE_KEY_WITH_META, content=b'copyTest source key with meta data', mete_data=True)
cls.create_key(key=SOURCE_KEY_RESET_META, content=b'copyTest source key for used reset meta data')
@classmethod
def tearDownClass(cls):
"""
Clean temp data, include initialized test data, create middle temp data and result data.
:return:
"""
cls.clear_data()
@classmethod
def create_key(cls, key, content, mete_data=False):
"""
:return:
"""
if mete_data:
metadata = {META_DATE_KEY_1: META_DATE_VALUE_1, META_DATE_KEY_2: META_DATE_VALUE_2}
cls.s3.put_object(Bucket=BUCKET, Key=key, Body=content, Metadata=metadata)
else:
cls.s3.put_object(Bucket=BUCKET, Key=key, Body=content)
@classmethod
def delete_key(cls, key):
"""
:return:
"""
cls.s3.delete_object(Bucket=BUCKET, Key=key)
def __copy_object(self, s_bucket, s_key, t_bucket, t_key, is_dir=False, contain_mete_data=False):
# sleep one second, otherwise target key last modified is same with the source
time.sleep(1)
copy_source = {'Bucket': s_bucket, 'Key': s_key}
self.s3.copy_object(CopySource=copy_source, Bucket=t_bucket, Key=t_key)
source_response = self.s3.head_object(Bucket=s_bucket, Key=s_key)
target_response = self.s3.head_object(Bucket=t_bucket, Key=t_key)
self.assertNotEqual(target_response["ETag"], "")
self.assertEqual(target_response["ETag"], source_response["ETag"])
self.assertEqual(target_response["ContentLength"], source_response["ContentLength"])
self.assertGreater(target_response["LastModified"], source_response["LastModified"])
if is_dir:
self.assertEqual(target_response["ContentLength"], 0)
if contain_mete_data:
target_meta_data = target_response["Metadata"]
# target object must have metadata
# The response returned metadata key we specified is lower,
# so when using this metadata, we need to transfer metadata key to lower
self.assertIsNotNone(target_meta_data)
self.assertTrue(META_DATE_KEY_1.lower() in target_meta_data.keys())
self.assertTrue(META_DATE_KEY_2.lower() in target_meta_data.keys())
self.assertEqual(target_meta_data[META_DATE_KEY_1.lower()], META_DATE_VALUE_1)
self.assertEqual(target_meta_data[META_DATE_KEY_2.lower()], META_DATE_VALUE_2)
@classmethod
def clear_data(cls):
cls.delete_key(key=SOURCE_KEY)
cls.delete_key(key=TARGET_KEY)
cls.delete_key(key=SOURCE_KEY_DIR)
cls.delete_key(key=TARGET_KEY_DIR)
cls.delete_key(key=SOURCE_KEY_WITH_META)
cls.delete_key(key=TARGET_KEY_WITH_META)
cls.delete_key(key=SOURCE_KEY_RESET_META)
cls.delete_key(key=TARGET_KEY_RESET_META)
cls.delete_key(key=SOURCE_KEY_MODIFY_META)
def test_copy_common_key(self):
"""
Copy common file, using default value.
:return:
"""
self.__copy_object(s_bucket=BUCKET,
s_key=SOURCE_KEY,
t_bucket=BUCKET,
t_key=TARGET_KEY)
def test_copy_dir(self):
"""
Copy directory, the source key is a directory(object content is empty, and key path has suffix '/').
The target key is a directory too.
:return:
"""
self.__copy_object(s_bucket=BUCKET,
s_key=SOURCE_KEY_DIR,
t_bucket=BUCKET,
t_key=TARGET_KEY_DIR,
is_dir=True)
def test_copy_metadata(self):
"""
Copy source object metadata.
If the source object has self_defined metadata, target object has its metadata too in default.
:return:
"""
self.__copy_object(s_bucket=BUCKET,
s_key=SOURCE_KEY_WITH_META,
t_bucket=BUCKET,
t_key=TARGET_KEY_WITH_META,
contain_mete_data=True)
def test_copy_reset_metadata(self):
"""
Reset target object metadata, no matter whether source object has self_defined metadata.
:return:
"""
source_bucket = BUCKET
target_bucket = BUCKET
source_key = SOURCE_KEY_RESET_META
target_key = TARGET_KEY_RESET_META
# sleep one second, otherwise target key last modified is same with the source
time.sleep(1)
copy_source = {'Bucket': source_bucket, 'Key': source_key}
metadata = {META_DATE_KEY_1: META_DATE_VALUE_1, META_DATE_KEY_2: META_DATE_VALUE_2}
self.s3.copy_object(CopySource=copy_source,
Bucket=target_bucket,
Key=target_key,
MetadataDirective="REPLACE",
Metadata=metadata)
source_response = self.s3.head_object(Bucket=source_bucket, Key=source_key)
target_response = self.s3.head_object(Bucket=target_bucket, Key=target_key)
# compare basic info
self.assertNotEqual(target_response["ETag"], "")
self.assertEqual(target_response["ETag"], source_response["ETag"])
self.assertEqual(target_response["ContentLength"], source_response["ContentLength"])
self.assertGreater(target_response["LastModified"], source_response["LastModified"])
# compare metadata
# source key not contain metadata
# target key not contain metadata
source_metadata = source_response["Metadata"]
target_metadata = target_response["Metadata"]
self.assertEqual(len(source_metadata), 0)
self.assertEqual(len(target_metadata), 2)
self.assertTrue(META_DATE_KEY_1.lower() in target_metadata.keys())
self.assertTrue(META_DATE_KEY_2.lower() in target_metadata.keys())
self.assertEqual(target_metadata[META_DATE_KEY_1.lower()], META_DATE_VALUE_1)
self.assertEqual(target_metadata[META_DATE_KEY_2.lower()], META_DATE_VALUE_2)
def test_copy_modify_metadata(self):
"""
Modify a object's metadata via specifying the target key has same path with source object,
and specify new metadata value.
:return:
"""
metadata = {META_DATE_KEY_1: META_DATE_VALUE_1}
content = "b'copyTest source key for used modify meta data'"
self.s3.put_object(Bucket=BUCKET, Key=SOURCE_KEY_MODIFY_META, Body=content, Metadata=metadata)
copy_source = {'Bucket': BUCKET, 'Key': SOURCE_KEY_MODIFY_META}
metadata = {META_DATE_KEY_1: META_DATE_VALUE_1_MODIFIED}
self.s3.copy_object(CopySource=copy_source,
Bucket=BUCKET,
Key=SOURCE_KEY_MODIFY_META,
MetadataDirective="REPLACE",
Metadata=metadata)
response = self.s3.head_object(Bucket=BUCKET, Key=SOURCE_KEY_MODIFY_META)
# target key not contain metadata
metadata = response["Metadata"]
self.assertEqual(len(metadata), 1)
self.assertTrue(META_DATE_KEY_1.lower() in metadata.keys())
self.assertEqual(metadata[META_DATE_KEY_1.lower()], META_DATE_VALUE_1_MODIFIED)
``` |
{
"source": "0xflotus/CUP",
"score": 2
} |
#### File: jenkinslib/internal/exception.py
```python
class Error(Exception):
"""Base exception of jenkins module."""
pass
class NotRunningOnJenkins(Error):
"""Cueent environment is not on jenkins slave node."""
pass
class BadParam(Error):
"""Inappropriate params."""
pass
class ParamTypeError(TypeError, BadParam):
"""Param type is error."""
pass
class BadValue(ValueError, Error):
"""Value is error."""
pass
class RunTimeout(RuntimeError, Error):
"""Run timeout."""
pass
class NotFound(Error):
"""Resource not found."""
pass
class UnknownNode(KeyError, NotFound):
"""Node not found."""
pass
class UnknownJob(KeyError, NotFound):
"""Job not found."""
pass
class UnknownPromotion(KeyError, NotFound):
"""Promotion not found."""
class UnknownQueueItem(KeyError, NotFound):
"""QueueItem not found."""
pass
class NotBuiltYet(KeyError, NotFound):
"""Task still in queue, not built yet."""
pass
class NoBuildData(KeyError, NotFound):
"""Build data not exist."""
pass
class DeletedBuild(NoBuildData):
"""Build data not exist because it is deleted."""
pass
class NoArtifacts(KeyError, NotFound):
"""Artifacts data not exist."""
class JenkinsAPIError(Error):
"""something wrong with jenkins api."""
pass
class UnsupportedAPI(NotFound, JenkinsAPIError):
"""Jenkins api not supported on this jenkens server version."""
pass
class NotStopYet(RuntimeError, Error):
"""Task still running, not stopped yet."""
pass
class ImappropriateMethod(Error):
"""Method is imappropriate."""
pass
class ImappropriateMethodInStaticMode(ImappropriateMethod):
"""Method should not be called in static mode."""
pass
class NotImplementedMethod(NotImplementedError, ImappropriateMethod):
"""Method is not implemented."""
pass
class OSIOError(OSError, IOError, Error):
"""OS or IO errors."""
pass
class RequestError(OSIOError):
"""Something error while access jenkins."""
def __init__(self, url, method=None, status=None, msg=None, err=None, response=None):
self.url = url
self.method = method
self.status = status
self.msg = msg
self.err = err
self.response = response
def __str__(self):
err_msg = ""
if self.msg:
err_msg = ", error: %s" % self.msg
elif self.err:
err_msg = ", error: %s" % self.err
return "request failed. url={url}, method={method}, status={status}{err_msg}".format(
url = self.url,
method=self.method,
status=self.status,
err_msg=err_msg)
class PostRequired(RequestError):
"""Jenkins API requires POST and not GET."""
pass
class InvalidRequestStatus(RequestError):
"""Request status code is invalid."""
pass
class UnauthorizedError(InvalidRequestStatus):
"""Username or password is invalid."""
pass
class NetworkError(OSIOError):
"""Something wrong on network."""
pass
class FtpError(NetworkError):
"""Something wrong with ftp."""
pass
```
#### File: jenkinslib/internal/jenkins.py
```python
import os
import cup
import cup.jenkinslib.internal
from cup.jenkinslib.internal import artifacts
from cup.jenkinslib.internal import base
from cup.jenkinslib.internal import build
from cup.jenkinslib.internal import exception
from cup.jenkinslib.internal import job
from cup.jenkinslib.internal import jobs
from cup.jenkinslib.internal import promotion
from cup.jenkinslib.internal import promotion_build
from cup.jenkinslib.internal import promotions
from cup.jenkinslib.internal import node
from cup.jenkinslib.internal import nodes
from cup.jenkinslib.internal import label
from cup.jenkinslib.internal import requester
class Jenkins(base.JenkinsBase):
"""Represents a jenkins server."""
Job = job.Job
Jobs = jobs.Jobs
Build = build.Build
Promotion = promotion.Promotion
Promotions = promotions.Promotions
PromotionBuild = promotion_build.PromotionBuild
Artifacts = artifacts.Artifacts
FTPArtifacts = artifacts.FTPArtifacts
Node = node.Node
Nodes = nodes.Nodes
Label = label.Label
Requester = requester.Requester
def __init__(self, url, username=None, password=None, static=False):
"""initialize Jenkins object.
Args:
url: url of jenkins server.
username: username to login jenkins.
password: password or API token of username.
static: never update info.
"""
self.username = username
self.password = password
self.requester = self.Requester(username, password)
# host address of ftp server.
# use self.enable_ftp() to set it before connecting to ftp server.
self.ftp_host = None
super(Jenkins, self).__init__(url, poll=static, static=static)
def __str__(self):
return "Jenkins server at %s" % self.url
def get_jenkins_obj(self):
"""get object of current jenkins."""
return self
@property
def jobs(self):
"""get container of all jobs."""
return self.Jobs(self)
def get_job(self, job_name):
"""get job by name."""
return self.jobs[job_name]
def has_job(self, job_name):
"""job exists or not."""
return job_name in self.jobs
def create_job(self, job_name, config):
"""create a new job.
create a new job named 'job_name'.
same as self.jobs['job_name'] = config.
Args:
job_name: name of new job.
config: configure for new job, xml text.
Returns:
new job object.
"""
return self.jobs.create(job_name, config)
def rename_job(self, job_name, new_job_name):
"""rename a job.
Args:
job_name: name of a existing job.
new_job_name: new job name.
Returns:
new job object.
"""
return self.jobs.rename(job_name, new_job_name)
def delete_job(self, job_name):
"""delete a job by name.
Args:
job_name: job name.
"""
del self.jobs[job_name]
def __getitem__(self, job_name):
"""get job by name."""
return self.get_job(job_name)
@property
def nodes(self):
"""get nodes."""
return self.Nodes(self)
def get_node(self, node_name):
"""get node by name."""
return self.nodes[node_name]
def get_label(self, label):
"""get label by name."""
return self.Label(label, self)
def get_create_url(self):
"""url for creating job."""
return "%s/createItem" % self.url
def enable_ftp(self, host, username="", password="", port=0):
"""enable ftp server and set host, username, password, port."""
self.ftp_host = host
self.ftp_username = username
self.ftp_password = password
self.ftp_port = port
@staticmethod
def register_special_jenkins(url, cls):
"""register special jenkins.
Args:
url: url of jenkins server.
cls: special jenkins class.
"""
_url_to_jenkins[url.rstrip("/")] = cls
_name_to_jenkins[cls.__name__] = cls
@staticmethod
def get_jenkins_by_name(name):
"""get special jenkins class by name.
Args:
name: name of jenkins server.
Returns:
special jenkins class or Jenkins.
"""
return _name_to_jenkins.get(name, Jenkins)
@staticmethod
def get_jenkins_by_url(url):
"""get special jenkins class by url.
Args:
url: url of jenkins server.
Returns:
special jenkins class or Jenkins.
"""
return _url_to_jenkins.get(url.rstrip("/"), Jenkins)
_url_to_jenkins = {}
_name_to_jenkins = {}
```
#### File: jenkinslib/internal/nodes.py
```python
from cup.jenkinslib.internal import base
from cup.jenkinslib.internal import exception
class Nodes(base.JenkinsBase):
"""A container of Node objects."""
def __init__(self, jenkins):
"""initialize Nodes object."""
url = "%s/computer" % jenkins.url
self.jenkins = jenkins
super(Nodes, self).__init__(url, static=jenkins.is_static)
def get_jenkins_obj(self):
"""get object of current jenkins."""
return self.jenkins
def __str__(self):
return "Nodes @ %s" % self.jenkins.url
def iterkeys(self):
"""get all node name."""
for item in self._data["computer"]:
yield item["displayName"]
def keys(self):
"""get all node name."""
return list(self.iterkeys())
def __contains__(self, node_name):
"""node exists or not."""
return node_name in self.keys()
def __iter__(self):
"""iterator for node names."""
return (node_name for node_name in self.iterkeys())
def iteritems(self):
"""get all nodes."""
for node_name in self.iterkeys():
node_url = self.get_node_url(node_name)
yield node_name, self.jenkins.Node(node_url, node_name, self.jenkins)
def __getitem__(self, node_name):
"""get node by name."""
for key in self.iterkeys():
if key == node_name:
node_url = self.get_node_url(node_name)
return self.jenkins.Node(node_url, node_name, self.jenkins)
raise exception.UnknownNode(node_name)
def __len__(self):
"""get node num."""
return len(self.keys())
def get_node_url(self, node_name):
"""get url of node.
Args:
node_name: node name.
Returns:
node url.
"""
if node_name.lower() == "master":
return "%s/(%s)" % (self.url, node_name)
else:
return "%s/%s" % (self.url, node_name)
```
#### File: cup/net/route.py
```python
__all__ = ['RouteInfo']
import copy
import json
import socket
import struct
class RouteInfo(object):
"""
Handler of Route Info for Linux system, for ipv4 only.
*E.g.*
::
from cup.net import route
ri = route.RouteInfo()
print json.dumps(ri.get_route_by_ip('10.32.19.92'), indent=1)
print json.dumps(ri.get_routes(), indent=1)
*Return*
::
{
"Use": "0",
"Iface": "eth1",
"Metric": "0",
"Destination": "10.0.0.0",
"Mask": "255.0.0.0",
"RefCnt": "0",
"MTU": "0",
"Window": "0",
"Gateway": "10.226.71.1",
"Flags": "0003",
"IRTT": "0"
}
[
{
"Use": "0",
"Iface": "eth1",
"Metric": "0",
"Destination": "10.226.71.0",
"Mask": "255.255.255.0",
"RefCnt": "0",
"MTU": "0",
"Window": "0",
"Gateway": "0.0.0.0",
"Flags": "0001",
"IRTT": "0"
},
{
"Use": "0",
"Iface": "eth1",
"Metric": "0",
"Destination": "169.254.0.0",
"Mask": "255.255.0.0",
"RefCnt": "0",
"MTU": "0",
"Window": "0",
"Gateway": "0.0.0.0",
"Flags": "0001",
"IRTT": "0"
},
{
"Use": "0",
"Iface": "eth1",
"Metric": "0",
"Destination": "192.168.0.0",
"Mask": "255.255.0.0",
"RefCnt": "0",
"MTU": "0",
"Window": "0",
"Gateway": "10.226.71.1",
"Flags": "0003",
"IRTT": "0"
},
{
"Use": "0",
"Iface": "eth1",
"Metric": "0",
"Destination": "172.16.0.0",
"Mask": "255.240.0.0",
"RefCnt": "0",
"MTU": "0",
"Window": "0",
"Gateway": "10.226.71.1",
"Flags": "0003",
"IRTT": "0"
},
{
"Use": "0",
"Iface": "eth1",
"Metric": "0",
"Destination": "10.0.0.0",
"Mask": "255.0.0.0",
"RefCnt": "0",
"MTU": "0",
"Window": "0",
"Gateway": "10.226.71.1",
"Flags": "0003",
"IRTT": "0"
}
]
"""
ROUTE_FILE = '/proc/net/route'
def __init__(self):
self._raw = []
self._init_proc_info()
@staticmethod
def _ip2int(ip):
"""
change ip address to integer
:param ip: ip address in type of string
:return: decimal integer in type of string
"""
return struct.unpack("!I", socket.inet_aton(ip))[0]
@staticmethod
def _int2ip(dec):
"""
change integer to ip address
:param dec: decimal integer in type fo string
:return: ip address
"""
return socket.inet_ntoa(struct.pack("!I", dec))
@staticmethod
def _ip_check(ipaddr):
q = ipaddr.split('.')
ret = filter(
lambda x: x >= 0 and x <= 255,
map(int, filter(lambda x: x.isdigit(), q))
)
return (len(q) == 4) and (len(ret) == 4)
def _init_proc_info(self):
"""
read routeinfo from /proc/net/route, and parse it to dict
this fun will be called when __init__
"""
route_info = []
with open(self.ROUTE_FILE, 'r') as fd:
for line in fd.readlines():
if line.startswith('Iface\t'):
continue
d_item = {}
items = line.split('\t')
if len(items) != 11:
continue
d_item['Iface'] = items[0]
d_item['Destination'] = items[1]
d_item['Gateway'] = items[2]
d_item['Flags'] = items[3]
d_item['RefCnt'] = items[4]
d_item['Use'] = items[5]
d_item['Metric'] = items[6]
d_item['Mask'] = items[7]
d_item['MTU'] = items[8]
d_item['Window'] = items[9]
d_item['IRTT'] = items[10].strip('\n').rstrip(' ')
route_info.append(copy.deepcopy(d_item))
self._raw = copy.deepcopy(route_info)
def _raw2view(self, r):
"""
change raw route_info to be readable
:param r:
raw route_info
:return:
readable route_info
"""
res = copy.deepcopy(r)
res['Destination'] = self._int2ip(
socket.ntohl(int(r['Destination'], 16))
)
res['Gateway'] = self._int2ip(socket.ntohl(int(r['Gateway'], 16)))
res['Mask'] = self._int2ip(socket.ntohl(int(r['Mask'], 16)))
return res
def get_routes(self):
"""
get all the route_info of this host
:return: all the readable route_info of this host
"""
res_l = []
for r in self._raw:
res_l.append(self._raw2view(r))
return res_l
def get_interface_by_ip(self, ip):
"""
get the interface which can reach to the ip
:param ip:
destination ip
:return:
interface name which can reach to the ip.
None if failed.
"""
if self._ip_check(ip) is False:
return None
route_info = self.get_route_by_ip(ip)
if route_info is not None:
return route_info['Iface']
else:
return None
def get_route_by_ip(self, ip):
"""
get the route_info which can reach to the ip address
:param ip:
destination ip address
:return:
route_info in type of dict
"""
if self._ip_check(ip) is False:
return None
i_ip = socket.ntohl(int(self._ip2int(ip)))
raw_route = self._raw
ret = None
for r in raw_route:
if int(r['Destination'], 16) == i_ip & int(r['Mask'], 16):
if ret is None:
ret = r
continue
old = int(ret['Destination'], 16) & int(ret['Mask'], 16)
new = int(r['Destination'], 16) & int(r['Mask'], 16)
if old < new:
ret = r
elif old == new:
if int(ret['Metric']) < int(r['Metric']):
ret = r
return self._raw2view(ret)
def get_interfaces(self):
"""get all the interface of this host"""
itfs = set()
for r in self._raw:
itfs.add(r['Iface'])
return list(itfs)
def _test():
ri = RouteInfo()
# print ri._ip2int('1.0.0.0')
# print ri._raw_info
# print
# print json.dumps(ri.route, indent=1)
print json.dumps(ri.get_route_by_ip('10.32.19.92'), indent=1)
print json.dumps(ri.get_routes(), indent=1)
# print json.dumps(ri.get_routes(), indent=1)
# print ri.get_interfaces()
# print '10.32.19.1:',ri._dot_decimal_to_hex('10.32.19.1')
# print '255.255.255.0:',ri._dot_decimal_to_hex('255.255.255.0')
# print '0113200A:',ri._hex_to_dot_decimal('0113200A')
# print ri._get_net()
# print json.dumps(ri.route,indent=1)
if __name__ == '__main__':
_test()
```
#### File: cup/util/misc.py
```python
import os
import sys
class CAck(object):
"""
ack class
"""
def __init__(self, binit=False):
self._rev = binit
def getack_infobool(self):
"""
get bool info
"""
return self._rev
def setack_infobool(self, binit=False):
"""
set bool info
"""
self._rev = binit
def check_type(param, expect):
"""
check type of the param is as the same as expect's
:raise:
raise TypeError if it's not the same
"""
if type(param) != expect:
raise TypeError('TypeError. Expect:%s, got %s' % (expect, type(param)))
def check_not_none(param):
"""
check param is not None
:raise:
NameError if param is None
"""
if param is None:
raise NameError('The param has not been set before access')
def get_funcname(backstep=0):
"""
get funcname of the current code line
:param backstep:
will go backward (one layer) from the current function call stack
"""
# pylint: disable=W0212
return sys._getframe(
backstep + 1).f_code.co_name
def get_filename(backstep=0):
"""
Get the file name of the current code line.
:param backstep:
will go backward (one layer) from the current function call stack
"""
return os.path.basename(
sys._getframe(backstep + 1).f_code.co_filename) # pylint:disable=W0212
def get_lineno(backstep=0):
"""
Get the line number of the current code line
:param backstep:
will go backward (one layer) from the current function call stack
"""
return sys._getframe(backstep + 1).f_lineno # pylint:disable=W0212
# vi:set tw=0 ts=4 sw=4 nowrap fdm=indent
```
#### File: CUP/docs/build_doc.py
```python
import os
import sys
import argparse
_NOW_PATH = os.path.dirname(os.path.abspath(__file__)) + '/'
_TOP_PATH = os.path.abspath(_NOW_PATH + '/../')
# sys.path.insert(0, _NOW_PATH)
sys.path.insert(0, _TOP_PATH)
from cup import version
from cup.shell import oper
class DocGenerator(object):
def __init__(self, opts):
"""
"""
self._kv_opts = opts
def build_rst(self, github=True):
"""build rst for cup"""
exclude = ''
if github:
exclude = ' cup/thirdp cup/bidu '
else:
exclude = ' cup/thirdp'
cmd = 'sphinx-apidoc {0}'
for key in self._kv_opts:
cmd = '{0} {1} {2}'.format(cmd, key, self._kv_opts[key])
print cmd
shell = oper.ShellExec()
ret = shell.run(cmd)
if __name__ == '__main__':
kvs = {
'-F': ' ',
'-o': '{0}/cup'.format(_NOW_PATH),
'--doc-version': version.VERSION,
'--doc-author' : version.AUTHOR,
'--ext-todo': ' ',
'--module-first': ' ',
}
gen = DocGenerator(kvs)
gen.build_rst()
helpinfo = "generate cup html docs"
parser = argparse.ArgumentParser(description=help_info)
helpinfo = "conf file of sphinx"
parser.add_argument('-c', '--conf', type=str, help=helpinfo)
return parser.parse_args()
# vi:set tw=0 ts=4 sw=4 nowrap fdm=indent
```
#### File: arrow/common/resource.py
```python
from cup.services import heartbeat
class BufferSerilizer(object):
"""
buffer serializer
"""
def __init__(self, buff):
pass
def serialize(self):
"""serialize the buffer"""
def deserialize(self, buff):
"""deserialize the buffer"""
class AgentResource(heartbeat.LinuxHost):
"""
resource
"""
def __init__(self, init_this_host=False, iface='eth0'):
super(self.__class__).__init__(self, init_this_host, iface)
# vi:set tw=0 ts=4 sw=4 nowrap fdm=indent
```
#### File: arrow/common/service.py
```python
class ServiceStatus(object):
"""
BaseService for arrow
"""
INITED = 0
RUNNING = 1
STOPPING = 2
STOPPED = 3
def __init__(self):
self._statuslist = [
self.INITED, self.RUNNING, self.STOPPING, self.STOPPED
]
self._status = self.INITED
def set_status(self, status):
"""set status, return true if set successfully"""
if status not in self._statuslist:
return False
else:
self._status = status
return True
def get_status(self):
"""get status"""
return self._status
# vi:set tw=0 ts=4 sw=4 nowrap fdm=indent
```
#### File: arrow/master/control.py
```python
from cup import log
# from cup import net
from cup.services import executor
from cup.net.async import msgcenter
from cup.net.async import msg
from cup.services import heartbeat as hb_service
# from cup.util import conf
# from arrow.master import heartbeat
from arrow.common import settings
# from arrow.common import service
class ControlService(msgcenter.IMessageCenter):
def __init__(self, ip, port, confdict):
"""control service of arrow master"""
# status, 0 inited, 1 running 2 stopping, 3 stopped
msgcenter.IMessageCenter.__init__(self, ip, port)
self._master_ipport = (ip, port)
self._confdict = confdict
self._status = 0
self._type_man = msg.CMsgType()
self._type_man.register_types(settings.MSG_TYPE2NUM)
self._executor = executor.ExecutionService(
self._confdict['control']['queue_exec_thdnum'],
self._confdict['control']['queue_delay_exe_thdnum']
)
self._heartbeat_service = hb_service.HeartbeatService(
self._confdict['control']['judge_agent_dead_in_sec'],
self._confdict['control']['keep_lost']
)
self._msg_recv = 0
def _add_new_agent(self, ipaddr, port, resource=None):
key = '%s:%s' % (ipaddr, port)
# refresh heart for agent(str: 'ip:port')
self._heartbeat_service.refresh(key, resource)
def _on_heartbeat(self, netmsg):
ip_port, _ = netmsg.get_from_addr()
log.info(
'receive heartbeat, msg_len:%d, msg_flag:%d, msg_src:%s, '
'uniqid:%d' %
(
netmsg.get_msg_len(),
netmsg.get_flag(),
str(ip_port),
netmsg.get_uniq_id()
)
)
ack_msg = msg.CNetMsg(is_postmsg=True)
ack_msg.set_from_addr(self._master_ipport, (1, 1))
ipaddr, stub_future = netmsg.get_from_addr()
ack_msg.set_to_addr(ipaddr, stub_future)
ack_msg.set_flag(netmsg.get_flag())
ack_msg.set_msg_type(self._type_man.getnumber_bytype('ACK_HEART_BEAT'))
ack_msg.set_uniq_id(netmsg.get_uniq_id() + 1)
ack_msg.set_body('ACK_HEART_BEAT')
resource = hb_service.LinuxHost(name=str(self._master_ipport))
resource.deserilize(netmsg.get_body())
self._heartbeat_service.refresh(
'%s:%s' % (ip_port[0], ip_port[1]), resource
)
self.post_msg(ack_msg)
return
def _do_heartbeat(self, msg):
pass
def _do_check_dead_agent(self):
lost = self._heartbeat_service.get_lost()
# schedule next handle dead_agent
# status 2 == stopping
if self._status != 2:
self._executor.queue_exec(
settings.ARROW_MASTER_DEFAULT_PARAMS['check_heartbeat_interval'],
self._do_heartbeat,
1,
None
)
else:
log.info(
'ControlService is stopping. Check dead agent service'
'exited'
)
def run(self):
"""run control service"""
self._executor.run()
# call CUP message center to run
msgcenter.IMessageCenter.run(self)
def stop(self):
"""stop control service"""
msgcenter.IMessageCenter.stop(self)
self._executor.stop()
def handle(self, msg):
"""
handle msg
"""
log.debug('to handle msg in the child class')
msg_type = msg.get_msg_type()
src_peer, stub_future = msg.get_from_addr()
# log.debug('got msg from: %s stub_future:%s' % (src_peer, stub_future))
# log.debug('type of msg_type:{0}, settings msg_type:{1}'.format(
# type(msg_type), type(self._type_man.getnumber_bytype('HEART_BEAT'))
# ))
if msg_type == self._type_man.getnumber_bytype('HEART_BEAT'):
self._executor.queue_exec(
self._on_heartbeat,
1,
msg
)
else:
self.default_handle(msg)
# vi:set tw=0 ts=4 sw=4 nowrap fdm=indent
``` |
{
"source": "0xflotus/cutecharts",
"score": 2
} |
#### File: cutecharts/render/engine.py
```python
from typing import Any, Iterable, Iterator, Optional
from cutecharts.globals import CurrentConfig, NotebookType
def _flat(obj: Any):
if isinstance(obj, (list, tuple, set)):
return obj
return (obj,) # tuple
def _expand(dict_generator: Iterator):
return dict(list(dict_generator))
def _clean_dict(mydict: dict):
for key, value in mydict.items():
if value is not None:
if isinstance(value, dict):
value = _expand(_clean_dict(value))
elif isinstance(value, (list, tuple, set)):
value = list(_clean_array(value))
elif isinstance(value, str) and not value:
# delete key with empty string
continue
yield (key, value)
def _clean_array(myarray: Iterable):
for value in myarray:
if isinstance(value, dict):
yield _expand(_clean_dict(value))
elif isinstance(value, (list, tuple, set)):
yield list(_clean_array(value))
else:
yield value
def remove_key_with_none_value(incoming_dict: dict):
if isinstance(incoming_dict, dict):
return _expand(_clean_dict(incoming_dict))
elif incoming_dict:
return incoming_dict
else:
return None
class HTML:
def __init__(self, data: Optional[str] = None):
self.data = data
def _repr_html_(self):
return self.data
def __html__(self):
return self._repr_html_()
class RenderEngine:
def __init__(self):
self.assets_host = ""
self.assets_deps = []
def render(
self, dest: str = "render.html", template_name: str = "basic_local.html"
):
template = CurrentConfig.GLOBAL_ENV.get_template(template_name)
if hasattr(self, "before_render"):
self.before_render()
html = template.render(chart=self)
with open(dest, "w+", encoding="utf8") as f:
f.write(html)
return html
def render_notebook(self, template_name: str = "basic_notebook.html"):
template = CurrentConfig.GLOBAL_ENV.get_template(template_name)
if hasattr(self, "before_render"):
self.before_render()
if CurrentConfig.NOTEBOOK_TYPE == NotebookType.JUPYTER_NOTEBOOK:
return HTML(template.render(chart=self))
def _produce_assets_cfg(self):
local_cfg, notebook_cfg = [], []
for dep in self.assets_deps:
value = CurrentConfig.ASSETS_DEPS_MAP.get(dep)
if not value:
continue
local_cfg.append("{}{}.js".format(self.assets_host, value))
notebook_cfg.append("'{}':'{}{}'".format(dep, self.assets_host, value))
return local_cfg, notebook_cfg
```
#### File: cutecharts/test/test_engine.py
```python
from nose.tools import assert_equal, assert_in
from cutecharts.charts.basic import BasicChart
from cutecharts.faker import Faker
from cutecharts.globals import AssetsHost
def test_engine_render():
basic = BasicChart()
html = basic.render()
assert_in(AssetsHost.DEFAULT_HOST, html)
assert_in("chartXkcd", html)
def test_engine_render_notebook():
basic = BasicChart()
html = basic.render_notebook().__html__()
assert_in(AssetsHost.DEFAULT_HOST, html)
assert_in("chartXkcd", html)
def test_faker():
attrs = Faker.choose()
values = Faker.values()
assert_equal(len(attrs), len(values))
``` |
{
"source": "0xflotus/DeepMimic",
"score": 3
} |
#### File: DeepMimic/learning/rl_util.py
```python
import numpy as np
def compute_return(rewards, gamma, td_lambda, val_t):
# computes td-lambda return of path
path_len = len(rewards)
assert len(val_t) == path_len + 1
return_t = np.zeros(path_len)
last_val = rewards[-1] + gamma * val_t[-1]
return_t[-1] = last_val
for i in reversed(range(0, path_len - 1)):
curr_r = rewards[i]
next_ret = return_t[i + 1]
curr_val = curr_r + gamma * ((1.0 - td_lambda) * val_t[i + 1] + td_lambda * next_ret)
return_t[i] = curr_val
return return_t
```
#### File: DeepMimic/util/math_util.py
```python
import numpy as np
RAD_TO_DEG = 57.2957795
DEG_TO_RAD = 1.0 / RAD_TO_DEG
INVALID_IDX = -1
def lerp(x, y, t):
return (1 - t) * x + t * y
def log_lerp(x, y, t):
return np.exp(lerp(np.log(x), np.log(y), t))
def flatten(arr_list):
return np.concatenate([np.reshape(a, [-1]) for a in arr_list], axis=0)
def flip_coin(p):
rand_num = np.random.binomial(1, p, 1)
return rand_num[0] == 1
``` |
{
"source": "0xflotus/delta",
"score": 2
} |
#### File: delta/models/asr_model.py
```python
import delta.compat as tf
#pylint: disable=import-error,unused-import
from tensorflow.keras import backend as K
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Bidirectional
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import LSTM
from tensorflow.keras.layers import TimeDistributed
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import Reshape
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Lambda
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Input
from absl import logging
#delta
from delta import utils
from delta.utils.loss.loss_impl import CTCLoss as ctc_loss
from delta.models.base_model import RawModel
from delta.utils.register import registers
#pylint: disable=invalid-name,missing-docstring
@registers.model.register
class CTCAsrModel(RawModel):
'''
CTC ASR Model
reference: https://github.com/holm-aune-bachelor2018/ctc
'''
def __init__(self, config, name=None):
super().__init__(name=name)
self._config = config
logging.info("--- dummy Task to get meta data ---")
logging.info("--- do not care the Task mode here ---")
task = utils.task(config, mode=utils.TRAIN)
logging.info("--- dummy Task to get meta data ---")
logging.flush()
self._feat_shape = task.feat_shape
self._vocab_size = task.vocab_size
self.build()
@property
def feat_shape(self):
assert isinstance(self._feat_shape, (list))
return self._feat_shape
@property
def config(self):
return self._config
def get_loss_fn(self):
return ctc_loss(self._config)
#return utils.loss(self._config)
def ctc_lambda_func(self, args):
y_pred, input_length, labels, label_length = args
return self.get_loss_fn()(
logits=y_pred,
input_length=input_length,
labels=labels,
label_length=label_length,
name='ctc_loss')
#return K.ctc_batch_cost(labels, y_pred, input_length, label_length)
def build(self):
input_tensor = Input(
name='inputs', shape=(None, *self._feat_shape, 1), dtype=tf.float32)
x = input_tensor
x = Conv2D(
filters=32,
kernel_size=(11, 5),
use_bias=True,
activation='relu',
padding='same',
kernel_initializer='he_normal',
name="conv1")(
x)
x = Conv2D(
filters=32,
kernel_size=(11, 5),
use_bias=True,
activation='relu',
padding='same',
kernel_initializer='he_normal',
name="conv2")(
x)
_, _, dim, channels = x.get_shape().as_list()
output_dim = dim * channels
x = Reshape((-1, output_dim))(x)
x = TimeDistributed(Dropout(0.2))(x)
x = Bidirectional(
LSTM(
units=512,
kernel_initializer='glorot_uniform',
bias_initializer='random_normal',
return_sequences=True,
name='lstm'))(
x)
x = TimeDistributed(Dropout(0.2))(x)
x = Bidirectional(
LSTM(
512,
kernel_initializer='glorot_uniform',
bias_initializer='random_normal',
return_sequences=True,
name='lstm1'))(
x)
x = TimeDistributed(Dropout(0.2))(x)
x = Bidirectional(
LSTM(
512,
kernel_initializer='glorot_uniform',
bias_initializer='random_normal',
return_sequences=True,
name='lstm2'))(
x)
x = TimeDistributed(Dropout(0.2))(x)
x = Bidirectional(
LSTM(
512,
kernel_initializer='glorot_uniform',
bias_initializer='random_normal',
return_sequences=True,
name='lstm3'))(
x)
x = TimeDistributed(Dense(1024, activation='relu'))(x)
x = TimeDistributed(Dropout(0.5))(x)
# Output layer with softmax
x = TimeDistributed(Dense(self._vocab_size), name="outputs")(x)
input_length = Input(name='input_length', shape=[], dtype='int64')
labels = Input(name='targets', shape=[None], dtype='int32')
label_length = Input(name='target_length', shape=[], dtype='int64')
loss_out = Lambda(
self.ctc_lambda_func, output_shape=(),
name='ctc')([x, input_length, labels, label_length])
self._model = tf.keras.Model(
inputs=[input_tensor, labels, input_length, label_length],
outputs=[loss_out])
@property
def model(self):
return self._model
def call(self, inputs, **kwargs):
output = self.model(inputs, **kwargs)
return output
@registers.model.register
class CTC5BlstmAsrModel(CTCAsrModel):
'''
CTC ASR Model
reference: https://www.cs.cmu.edu/~ymiao/pub/icassp2016_ctc.pdf
'''
def build(self):
input_tensor = Input(
name='inputs', shape=(None, *self._feat_shape, 1), dtype=tf.float32)
x = input_tensor
_, _, dim, channels = x.get_shape().as_list()
output_dim = dim * channels
x = Reshape((-1, output_dim))(x)
x = Bidirectional(
LSTM(
units=320,
kernel_initializer='glorot_uniform',
bias_initializer='random_normal',
return_sequences=True,
name='lstm'))(
x)
x = Bidirectional(
LSTM(
units=320,
kernel_initializer='glorot_uniform',
bias_initializer='random_normal',
return_sequences=True,
name='lstm1'))(
x)
x = Bidirectional(
LSTM(
units=320,
kernel_initializer='glorot_uniform',
bias_initializer='random_normal',
return_sequences=True,
name='lstm2'))(
x)
x = Bidirectional(
LSTM(
units=320,
kernel_initializer='glorot_uniform',
bias_initializer='random_normal',
return_sequences=True,
name='lstm3'))(
x)
x = Bidirectional(
LSTM(
units=320,
kernel_initializer='glorot_uniform',
bias_initializer='random_normal',
return_sequences=True,
name='lstm4'))(
x)
# Output layer with softmax
x = TimeDistributed(Dense(self._vocab_size))(x)
input_length = Input(name='input_length', shape=[], dtype='int64')
labels = Input(name='targets', shape=[None], dtype='int32')
label_length = Input(name='target_length', shape=[], dtype='int64')
loss_out = Lambda(
self.ctc_lambda_func, output_shape=(),
name='ctc')([x, input_length, labels, label_length])
self._model = tf.keras.Model(
inputs=[input_tensor, labels, input_length, label_length],
outputs=[loss_out])
```
#### File: v1/local/transfer_bert_model.py
```python
import os
import sys
from bert import modeling
import tensorflow as tf
from absl import logging
def transfer_bert_model(bert_model_dir, output_bert_model):
graph = tf.Graph()
max_seq_len = 512
num_labels = 2
use_one_hot_embeddings = False
with graph.as_default():
with tf.Session() as sess:
input_ids = tf.placeholder(tf.int32, (None, None), 'input_ids')
input_mask = tf.placeholder(tf.int32, (None, None), 'input_mask')
segment_ids = tf.placeholder(tf.int32, (None, None), 'segment_ids')
bert_config = modeling.BertConfig.from_json_file(os.path.join(bert_model_dir, 'bert_config.json'))
model = modeling.BertModel(
config=bert_config,
is_training=False,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
all_encoder_layers = model.get_all_encoder_layers()
input_x_bert_cls = model.get_pooled_output()
for idx, layer in enumerate(all_encoder_layers):
layer = tf.identity(layer, "encoder_layers_" + str(idx))
print("layer:", layer)
input_x_bert_cls = tf.identity(input_x_bert_cls, "input_x_bert_cls")
print("input_x_bert_cls", input_x_bert_cls)
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver.restore(sess, bert_model_dir + "/bert_model.ckpt")
saver.save(sess, output_bert_model)
if __name__ == '__main__':
logging.set_verbosity(logging.INFO)
if len(sys.argv) != 3:
logging.error("Usage python {} bert_model_dir output_bert_model".format(sys.argv[0]))
sys.exit(-1)
bert_model_dir = sys.argv[1]
output_bert_model = sys.argv[2]
transfer_bert_model(bert_model_dir, output_bert_model)
```
#### File: delta/utils/run_saved_model.py
```python
import os
from absl import logging
from absl import flags
from absl import app
from delta import utils
from delta.utils.register import registers
from delta.utils.register import import_all_modules_for_register
def main(_):
''' main func '''
FLAGS = app.flags.FLAGS #pylint: disable=invalid-name
logging.info("config: {}".format(FLAGS.config))
logging.info("mode: {}".format(FLAGS.mode))
logging.info("gpu_visible: {}".format(FLAGS.gpu))
assert FLAGS.config, 'pls give a config.yaml'
assert FLAGS.mode, 'pls give mode [eval|infer|eval_and_infer]'
os.environ["CUDA_VISIBLE_DEVICES"] = FLAGS.gpu #selects a specific device
#create dataset
mode = utils.INFER if FLAGS.mode == 'infer' else utils.EVAL
# load config
config = utils.load_config(FLAGS.config)
# process config
import_all_modules_for_register()
solver_name = config['solver']['name']
logging.info(f"sovler: {solver_name}")
solver = registers.solver[solver_name](config)
config = solver.config
# Evaluate
evaluate_name = config['serving']['name']
logging.info(f"evaluate: {evaluate_name}")
evaluate = registers.serving[evaluate_name](config, gpu_str=FLAGS.gpu, mode=mode)
if FLAGS.debug:
evaluate.debug()
evaluate.predict()
def define_flags():
''' define flags for evaluator'''
# The GPU devices which are visible for current process
flags.DEFINE_string('gpu', '', 'same to CUDA_VISIBLE_DEVICES')
flags.DEFINE_string('config', None, help='path to yaml config file')
flags.DEFINE_enum('mode', 'eval',['eval', 'infer', 'eval_and_infer'], 'eval or infer')
flags.DEFINE_bool('debug', False, 'debug mode')
# https://github.com/abseil/abseil-py/blob/master/absl/flags/_validators.py#L330
flags.mark_flags_as_required(['config', 'mode'])
if __name__ == '__main__':
logging.set_verbosity(logging.INFO)
define_flags()
app.run(main)
logging.info("OK. Done!")
``` |
{
"source": "0xflotus/dfirtrack",
"score": 2
} |
#### File: exporter/markdown/clean_directory.py
```python
from dfirtrack.settings import MARKDOWN_PATH
from dfirtrack_main.logger.default_logger import debug_logger, info_logger
import os
import shutil
def clean_directory(request_user):
""" function to clean the system path within the markdown directory """
# clean or create markdown directory
if os.path.exists(MARKDOWN_PATH + "/docs/systems/"):
# remove markdown directory (recursivly)
shutil.rmtree(MARKDOWN_PATH + "/docs/systems/")
# recreate markdown directory
os.mkdir(MARKDOWN_PATH + "/docs/systems/")
# call logger
debug_logger(request_user, " SYSTEM_MARKDOWN_ALL_SYSTEMS_DIRECTORY_CLEANED")
else:
# create markdown directory
os.mkdir(MARKDOWN_PATH + "/docs/systems/")
# call logger
info_logger(request_user, " SYSTEM_MARKDOWN_FOLDER_CREATED")
```
#### File: importer/file/csv.py
```python
import csv
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.shortcuts import redirect, render
from django.utils import timezone
from dfirtrack.config import SYSTEMTAG_HEADLINE as systemtag_headline
from dfirtrack.config import SYSTEMTAG_SUBHEADLINE as systemtag_subheadline
from dfirtrack.config import TAGLIST
from dfirtrack.config import TAGPREFIX
from dfirtrack_main.forms import SystemIpFileImport, SystemTagFileImport
from dfirtrack_main.logger.default_logger import critical_logger, debug_logger, error_logger, warning_logger
from dfirtrack_main.models import Domain, Headline, Ip, Reportitem, System, Systemstatus, Tag, Tagcolor
import ipaddress
from io import TextIOWrapper
@login_required(login_url="/login")
def systems_ips(request):
""" this function parses a csv file and tries to import systems and corresponding ips """
# form was valid to post
if request.method == "POST":
# call logger
debug_logger(str(request.user), " SYSTEM_IP_IMPORTER_BEGIN")
# get text out of file (variable results from request object via file upload field)
systemipcsv = TextIOWrapper(request.FILES['systemipcsv'].file, encoding=request.encoding)
# read rows out of csv
rows = csv.reader(systemipcsv, quotechar="'")
# set row counter (needed for logger)
i = 0
# check for wrong file type
try:
# iterate over rows
for row in rows:
# autoincrement row counter
i += 1
# check for empty rows
try:
# check system column for empty value
if row[0] == '':
warning_logger(str(request.user), " SYSTEM_IP_IMPORTER_SYSTEM_COLUMN " + "row_" + str(i) + ":empty_column")
continue
except IndexError:
warning_logger(str(request.user), " SYSTEM_IP_IMPORTER_ROW row_" + str(i) + ":empty_row")
continue
# check system column for string
if not isinstance(row[0], str):
warning_logger(str(request.user), " SYSTEM_IP_IMPORTER_SYSTEM_COLUMN " + "row_" + str(i) + ":no_string")
continue
# check system column for length of string
if len(row[0]) > 50:
warning_logger(str(request.user), " SYSTEM_IP_IMPORTER_SYSTEM_COLUMN " + "row_" + str(i) + ":long_string")
continue
# check ip column for ip
try:
ipaddress.ip_address(row[1])
except ValueError:
warning_logger(str(request.user), " SYSTEM_IP_IMPORTER_IP_COLUMN " + "row_" + str(i) + ":invalid_ip")
continue
# create ip
ip, created = Ip.objects.get_or_create(ip_ip=row[1])
if created == True:
ip.logger(str(request.user), " SYSTEMS_IP_IMPORTER_IP_CREATED")
# check for existence of system
system = System.objects.filter(system_name = row[0], ip = ip)
if system.count() > 0:
error_logger(str(request.user), " SYSTEM_IP_IMPORTER_SYSTEM_EXISTS " + "row_" + str(i) + ":system_exists|system_name:" + row[0] + "|ip:" + str(row[1]))
continue
# create form with request data
form = SystemIpFileImport(request.POST, request.FILES)
# create system
if form.is_valid():
# don't save form yet
system = form.save(commit=False)
# set system_name
system.system_name = row[0]
# set auto values
system.system_created_by_user_id = request.user
system.system_modified_by_user_id = request.user
system.system_modify_time = timezone.now()
# save object
system.save()
# save manytomany
form.save_m2m()
# save ip for system
system.ip.add(ip)
# call logger
system.logger(str(request.user), ' SYSTEM_IP_IMPORTER_EXECUTED')
# wrong file type
except UnicodeDecodeError:
critical_logger(str(request.user), " SYSTEM_IP_IMPORTER_WRONG_FILE_TYPE")
# call logger
debug_logger(str(request.user), " SYSTEM_IP_IMPORTER_END")
return redirect('/systems')
else:
# show empty form
form = SystemIpFileImport(initial={
'systemstatus': 2,
'analysisstatus': 1,
})
# call logger
debug_logger(str(request.user), " SYSTEM_IP_IMPORTER_ENTERED")
return render(request, 'dfirtrack_main/system/systems_ip_importer.html', {'form': form})
@login_required(login_url="/login")
def systems_tags(request):
""" this function imports a csv file with multiple systems and relevant tags """
"""
the following high-level workflow is done by this function
- remove all tags for systems beginning with 'TAGPREFIX' (if there are any)
- evaluate given CSV line by line (without first row)
- check whether this line has relevant tags (leave loop if not)
- get hostname and convert to lowercase
- get domain and change to empty string if incorrect (either 'NT AUTHORITY' or hostname itself)
- create domain if necessary
- check for existing systems (with this hostname)
- if == 1:
- check for existing domain (for this system)
if domain_of_system == NULL: domain is set to domain from CSV (if there is one)
- if > 1: leave loop because not distinct
- if == 0: create system
- add relevant tags to this system
- check for reportitem headline = SYSTEMTAG_HEADLINE, reportitem_subheadline = SYSTEMTAG_SUBHEADLINE and create if necessary
- fill reportitem_note with markdown table containing with information of report(s)
- logs and messages are written if applicable
- counters are incremented where necessary
"""
# form was valid to post
if request.method == "POST":
# call logger
debug_logger(str(request.user), " SYSTEM_TAG_IMPORTER_BEGIN")
# check TAGLIST (from settings.config) for empty list
if not TAGLIST:
messages.error(request, "No relevant tags defined. Check `TAGLIST` in `dfirtrack.config`!")
# call logger
error_logger(str(request.user), " SYSTEM_TAG_IMPORTER_NO_TAGS_DEFINED.")
return redirect('/systems/')
else:
taglist = TAGLIST
# check TAGPREFIX (from settings.config) for empty string
if TAGPREFIX is "":
messages.error(request, "No prefix string defined. Check `TAGPREFIX` in `dfirtrack.config`!")
# call logger
error_logger(str(request.user), " SYSTEM_TAG_IMPORTER_NO_TAGPREFIX_DEFINED.")
return redirect('/systems/')
# expand the string by an underscore
else:
tagprefix = TAGPREFIX + "_"
# check whether SYSTEMTAG_HEADLINE is defined in `dfirtrack.config`
if systemtag_headline == '':
# call logger
error_logger(str(request.user), " SYSTEMTAG_HEADLINE_VARIABLE_UNDEFINED")
messages.error(request, "The variable SYSTEMTAG_HEADLINE seems to be undefined. Check `dfirtrack.config`!")
# leave importer
return redirect('/systems/')
# check whether SYSTEMTAG_SUBHEADLINE is defined in `dfirtrack.config`
if systemtag_subheadline == '':
# call logger
error_logger(str(request.user), " SYSTEMTAG_SUBHEADLINE_VARIABLE_UNDEFINED")
messages.error(request, "The variable SYSTEMTAG_SUBHEADLINE seems to be undefined. Check `dfirtrack.config`!")
# leave importer
return redirect('/systems/')
# get text out of file (variable results from request object via file upload field)
systemtagcsv = TextIOWrapper(request.FILES['systemtagcsv'].file, encoding=request.encoding)
# read rows out of csv
rows = csv.reader(systemtagcsv)
# create empty list (this list is used to store every line as single dict: {system_name: row}), because if there are multiple rows with the same system they are added to the same reportitem
rowlist = []
""" remove all tags for systems beginning with 'TAGPREFIX' (if there are any) """
# get all systems that have tags beginning with 'TAGPREFIX' | prefixtagsystems -> queryset
prefixtagsystems=System.objects.filter(tag__tag_name__startswith=tagprefix)
# iterate over systems in queryset | prefixtagsystem -> system object
for prefixtagsystem in prefixtagsystems:
# get all tags beginning with 'TAGPREFIX' that belong to the actual system | systemprefixtags -> queryset
systemprefixtags=prefixtagsystem.tag.filter(tag_name__startswith=tagprefix)
# iterate over queryset | systemprefixtag -> tag object
for systemprefixtag in systemprefixtags:
# delete all existing tags (the m2m relationship) beginning with 'TAGPREFIX' for this system (so that removed tags from csv will be removed as well)
systemprefixtag.system_set.remove(prefixtagsystem)
# create headline if it does not exist
headline, created = Headline.objects.get_or_create(headline_name=systemtag_headline)
if created == True:
headline.logger(str(request.user), " SYSTEMS_TAG_IMPORTER_HEADLINE_CREATED")
""" remove all reportitems """
# delete reportitems (so no reportitems with legacy information / tags will be left)
Reportitem.objects.filter(headline = headline, reportitem_subheadline = systemtag_subheadline).delete()
""" prepare and start loop """
# set row_counter (needed for logger)
row_counter = 1
# set systems_created_counter (needed for messages)
systems_created_counter = 0
# set systems_skipped_counter (needed for messages)
systems_skipped_counter = 0
# iterate over rows
for row in rows:
# skip first row (headlines)
if row_counter == 1:
# autoincrement row counter
row_counter += 1
continue
# get system_name and change to lowercase
system_name = row[8].lower()
# get tags from csv
tagcsvstring = row[9]
if tagcsvstring == '':
# autoincrement systems_skipped_counter
systems_skipped_counter += 1
# autoincrement row_counter
row_counter += 1
# leave because systems without tags are not relevant
continue
else:
# convert string (at whitespaces) to list
tagcsvlist = tagcsvstring.split()
# create empty list for mapping
tagaddlist = []
# check for relevant tags and add to list
for tag in taglist:
if tag in tagcsvlist:
tagaddlist.append(tagprefix + tag)
# check if tagaddlist is empty
if not tagaddlist:
# autoincrement systems_skipped_counter
systems_skipped_counter += 1
# autoincrement row_counter
row_counter += 1
# leave because there are no relevant tags
continue
# get domain from csv
domain_name = row[7]
# change domain_name to empty string if incorrect domain_name ('NT AUTHORITY') was provided
if domain_name == 'NT AUTHORITY':
domain_name = ''
# clear domain if domain_name equals system_name
elif domain_name.lower() == system_name:
domain_name = ''
# get or create domain object if some valid name was provided
if domain_name != '':
# create domain
domain, created = Domain.objects.get_or_create(domain_name=domain_name)
# call logger if created
if created == True:
domain.logger(str(request.user), " SYSTEMS_TAG_IMPORTER_DOMAIN_CREATED")
messages.success(request, 'Domain "' + domain.domain_name + '" created.')
else:
# set domain to None to avoid further errors (domain is needed later)
domain = None
# create empty dict
rowdict = {}
# put the actual row to the dict (dict with only ONE key-value-pair)
rowdict[system_name] = row
# append dict to the global list (because if there are multiple rows with the same system, needed for reportitem SYSTEMTAG_SUBHEADLINE)
rowlist.append(rowdict)
# get all systems with this system_name
systemquery = System.objects.filter(system_name=system_name)
""" check how many systems were returned """
# if there is only one system
if len(systemquery) == 1:
# get system object
system = System.objects.get(system_name=system_name)
""" add domain from CSV only if system does not already has a domain """
# check whether system has existing domain and CSV submitted a domain
if system.domain is None and domain is not None:
# if system has no existing domain set domain of system to domain submitted by tag csv
system.domain = domain
system.system_modify_time = timezone.now()
system.system_modified_by_user_id = request.user
system.save()
# call logger
system.logger(str(request.user), " SYSTEMS_TAG_IMPORTER_SYSTEM_DOMAIN_ADDED")
# if there is more than one system
elif len(systemquery) > 1:
# call logger
error_logger(str(request.user), " SYSTEM_TAG_IMPORTER_SYSTEM_EXISTS_MULTIPLE_TIMES " + "row_" + str(row_counter) + ":system_exists_multiple_times|system_name:" + system_name)
messages.error(request, 'System "' + system_name + '" was found multiple times. Nothing was changed for this system.')
# autoincrement row_counter
row_counter += 1
# leave because of no distinct mapping
continue
else:
# create entire new system object
system = System()
system.system_name = system_name
system.systemstatus = Systemstatus.objects.get(systemstatus_name = "Unknown")
#system.analysisstatus = Analysisstatus.objects.get(analysisstatus_name = "Needs anaylsis")
# add domain if submitted
if domain is not None:
system.domain = domain
system.system_modify_time = timezone.now()
system.system_created_by_user_id = request.user
system.system_modified_by_user_id = request.user
system.save()
# autoincrement systems_created_counter
systems_created_counter += 1
# call logger
system.logger(str(request.user), " SYSTEMS_TAG_IMPORTER_SYSTEM_CREATED")
# iterate over tags in tagaddlist
for tag_name in tagaddlist:
# get tagcolor object
tagcolor = Tagcolor.objects.get(tagcolor_name='primary')
# create tag if needed
tag, created = Tag.objects.get_or_create(tag_name=tag_name, tagcolor=tagcolor)
# call logger if created
if created == True:
tag.logger(str(request.user), " SYSTEMS_TAG_IMPORTER_TAG_CREATED")
messages.success(request, 'Tag "' + tag.tag_name + '" created.')
# add tag to system
tag.system_set.add(system)
# call logger
system.logger(str(request.user), " SYSTEMS_TAG_IMPORTER_SYSTEM_MODIFIED")
# create reportitem if it does not exist (get_or_create won't work in this context because of needed user objects for saving)
try:
reportitem = Reportitem.objects.get(system = system, headline = headline, reportitem_subheadline = systemtag_subheadline)
except Reportitem.DoesNotExist:
reportitem = Reportitem()
reportitem.system = system
reportitem.headline = headline
reportitem.reportitem_subheadline = (systemtag_subheadline)
reportitem.reportitem_created_by_user_id = request.user
# create empty list (used to store elements of markdown table)
notelist = []
# put head of markdown table into list
notelist.append("|File|Type|Version|Started|Duration|Lines|Checked|Domain|Host|Tags|Errors|FirstTrace|LastToolUsage|UsageTime|MalwareInstall")
notelist.append("|:---|:---|:---|:---|:---|:---|:---|:---|:---|:---|:---|:---|:---|:---|:---|")
# iterate over entries in list (dictionaries)
for item in rowlist:
# if this single key-value-pair dict contains the system
if system_name in item:
# get row
entry = item[system_name]
# convert row
entry = "|" + "|".join(entry) + "|"
# fill empty fields with '---' (otherwise mkdocs skips these)
entry = entry.replace("||", "| --- |")
# repeat last step to catch empty fields lying next to each other
entry = entry.replace("||", "| --- |")
# put entry to markdown table
notelist.append(entry)
# join list to string with linebreaks
notestring = "\n".join(notelist)
# add changing values (existing reportitem_note will be overwritten)
reportitem.reportitem_note = notestring
reportitem.reportitem_modified_by_user_id = request.user
reportitem.save()
# call logger
reportitem.logger(str(request.user), " SYSTEMS_TAG_IMPORTER_REPORTITEM_CREATED_OR_MODIFIED")
# autoincrement row_counter
row_counter += 1
# call final messages
if systems_created_counter > 0:
if systems_created_counter == 1:
messages.success(request, str(systems_created_counter) + ' system was created.')
else:
messages.success(request, str(systems_created_counter) + ' systems were created.')
if systems_skipped_counter > 0:
if systems_skipped_counter == 1:
messages.warning(request, str(systems_skipped_counter) + ' system was skipped or cleaned (no relevant tags).')
else:
messages.warning(request, str(systems_skipped_counter) + ' systems were skipped or cleaned (no relevant tags).')
# call logger
debug_logger(str(request.user), " SYSTEM_TAG_IMPORTER_END")
return redirect('/systems/')
else:
# show empty form
form = SystemTagFileImport()
# call logger
debug_logger(str(request.user), " SYSTEM_TAG_IMPORTER_ENTERED")
return render(request, 'dfirtrack_main/system/systems_tag_importer.html', {'form': form})
```
#### File: dfirtrack/dfirtrack_main/models.py
```python
from django.contrib.auth.models import User
from django.db import models
import logging
from time import strftime
# initialize logger
stdlogger = logging.getLogger(__name__)
class Analysisstatus(models.Model):
# primary key
analysisstatus_id = models.AutoField(primary_key=True)
# main entity information
analysisstatus_name = models.CharField(max_length=30, unique=True)
analysisstatus_note = models.TextField(blank=True, null=True)
# string representation
def __str__(self):
return self.analysisstatus_name
# define logger
def logger(analysisstatus, request_user, log_text):
stdlogger.info(
request_user +
log_text +
" analysisstatus_id:" + str(analysisstatus.analysisstatus_id) +
"|analysisstatus_name:" + str(analysisstatus.analysisstatus_name) +
"|analysisstatus_note:" + str(analysisstatus.analysisstatus_note)
)
class Analystmemo(models.Model):
# primary key
analystmemo_id = models.AutoField(primary_key=True)
# foreign key(s)
system = models.ForeignKey('System', on_delete=models.CASCADE)
# main entity information
analystmemo_note = models.TextField()
# meta information
analystmemo_create_time = models.DateTimeField(auto_now_add=True)
analystmemo_modify_time = models.DateTimeField(auto_now=True)
analystmemo_created_by_user_id = models.ForeignKey(User, on_delete=models.PROTECT, related_name='analystmemo_created_by')
analystmemo_modified_by_user_id = models.ForeignKey(User, on_delete=models.PROTECT, related_name='analystmemo_modified_by')
# string representation
def __str__(self):
return 'Analystmemo %s (%s)' % (str(self.analystmemo_id), self.system)
# define logger
def logger(analystmemo, request_user, log_text):
stdlogger.info(
request_user +
log_text +
" analystmemo_id:" + str(analystmemo.analystmemo_id) +
"|system:" + str(analystmemo.system) +
"|analystmemo_note:" + str(analystmemo.analystmemo_note)
)
class Case(models.Model):
# primary key
case_id = models.AutoField(primary_key=True)
# main entity information
case_name = models.CharField(max_length=50, unique=True)
case_is_incident = models.BooleanField()
# meta information
case_create_time = models.DateTimeField(auto_now_add=True)
case_created_by_user_id = models.ForeignKey(User, on_delete=models.PROTECT, related_name='case_created_by')
# string representation
def __str__(self):
return self.case_name
# define logger
def logger(case, request_user, log_text):
stdlogger.info(
request_user +
log_text +
" case_id:" + str(case.case_id) +
"|case_name:" + str(case.case_name) +
"|case_is_incident:" + str(case.case_is_incident)
)
class Company(models.Model):
# primary key
company_id = models.AutoField(primary_key=True)
# foreign key(s)
division = models.ForeignKey('Division', on_delete=models.SET_NULL, blank=True, null=True)
# main entity information
company_name = models.CharField(max_length=50, unique=True)
company_note = models.TextField(blank=True, null=True)
# string representation
def __str__(self):
return self.company_name
# define logger
def logger(company, request_user, log_text):
stdlogger.info(
request_user +
log_text +
" company_id:" + str(company.company_id) +
"|division:" + str(company.division) +
"|company_name:" + str(company.company_name) +
"|company_note:" + str(company.company_note)
)
class Contact(models.Model):
# primary key
contact_id = models.AutoField(primary_key=True)
# main entity information
contact_name = models.CharField(max_length=100)
contact_phone = models.CharField(max_length=50, blank=True, null=True)
contact_email = models.CharField(max_length=100, unique=True)
contact_note = models.TextField(blank=True, null=True)
# string representation
def __str__(self):
return self.contact_name
# define logger
def logger(contact, request_user, log_text):
stdlogger.info(
request_user +
log_text +
" contact_id:" + str(contact.contact_id) +
"|contact_name:" + str(contact.contact_name) +
"|contact_phone:" + str(contact.contact_phone) +
"|contact_email:" + str(contact.contact_email) +
"|contact_note:" + str(contact.contact_note)
)
class Division(models.Model):
# primary key
division_id = models.AutoField(primary_key=True)
# main entity information
division_name = models.CharField(max_length=50, unique=True)
division_note = models.TextField(blank=True, null=True)
# string representation
def __str__(self):
return self.division_name
# define logger
def logger(division, request_user, log_text):
stdlogger.info(
request_user +
log_text +
" division_id:" + str(division.division_id) +
"|division_name:" + str(division.division_name) +
"|division_note:" + str(division.division_note)
)
class Domain(models.Model):
# primary key
domain_id = models.AutoField(primary_key=True)
# main entity information
domain_name = models.CharField(max_length=100, unique=True)
domain_note = models.TextField(blank=True, null=True)
# string representation
def __str__(self):
return self.domain_name
# define logger
def logger(domain, request_user, log_text):
stdlogger.info(
request_user +
log_text +
" domain_id:" + str(domain.domain_id) +
"|domain_name:" + str(domain.domain_name) +
"|domain_note:" + str(domain.domain_note)
)
class Entry(models.Model):
# primary key
entry_id = models.AutoField(primary_key=True)
# foreign key(s)
system = models.ForeignKey('System', on_delete=models.CASCADE)
case = models.ForeignKey('Case', on_delete=models.SET_NULL, blank=True, null=True)
# main entity information
entry_time = models.DateTimeField()
entry_sha1 = models.CharField(max_length=40, blank=True, null=True)
entry_date = models.CharField(max_length=10, blank=True, null=True)
entry_utc = models.CharField(max_length=8, blank=True, null=True)
entry_system = models.CharField(max_length=30, blank=True, null=True)
entry_type = models.CharField(max_length=30, blank=True, null=True)
entry_content = models.TextField(blank=True, null=True)
entry_note = models.TextField(blank=True, null=True)
# meta information
entry_create_time = models.DateTimeField(auto_now_add=True)
entry_modify_time = models.DateTimeField(auto_now=True)
entry_api_time = models.DateTimeField(null=True)
entry_created_by_user_id = models.ForeignKey(User, on_delete=models.PROTECT, related_name='entry_created_by')
entry_modified_by_user_id = models.ForeignKey(User, on_delete=models.PROTECT, related_name='entry_modified_by')
# define unique together
class Meta:
unique_together = ('system', 'entry_sha1')
# string representation
def __str__(self):
return '%s | %s | %s' % (str(self.entry_id), self.system, self.entry_sha1)
# define logger
def logger(entry, request_user, log_text):
stdlogger.info(
request_user +
log_text +
" entry_id:" + str(entry.entry_id) +
"|system:" + str(entry.system) +
"|entry_sha1:" + str(entry.entry_sha1) +
"|entry_note:" + str(entry.entry_note) +
"|case:" + str(entry.case)
)
class Headline(models.Model):
# primary key
headline_id = models.AutoField(primary_key=True)
# main entity information
headline_name = models.CharField(max_length=100, unique=True)
# string representation
def __str__(self):
return self.headline_name
# define logger
def logger(headline, request_user, log_text):
stdlogger.info(
request_user +
log_text +
" headline_id:" + str(headline.headline_id) +
"|headline_name:" + str(headline.headline_name)
)
class Ip(models.Model):
# primary key
ip_id = models.AutoField(primary_key=True)
# main entity information
ip_ip = models.GenericIPAddressField(unique=True)
# string representation
def __str__(self):
return self.ip_ip
# define logger
def logger(ip, request_user, log_text):
stdlogger.info(
request_user +
log_text +
" ip_id:" + str(ip.ip_id) +
"|ip_ip:" + str(ip.ip_ip)
)
class Location(models.Model):
# primary key
location_id = models.AutoField(primary_key=True)
# main entity information
location_name = models.CharField(max_length=50, unique=True)
location_note = models.TextField(blank=True, null=True)
# string representation
def __str__(self):
return self.location_name
# define logger
def logger(location, request_user, log_text):
stdlogger.info(
request_user +
log_text +
" location_id:" + str(location.location_id) +
"|location_name:" + str(location.location_name) +
"|location_note:" + str(location.location_note)
)
class Os(models.Model):
# primary key
os_id = models.AutoField(primary_key=True)
# main entity information
os_name = models.CharField(max_length=30, unique=True)
# string representation
def __str__(self):
return self.os_name
# define logger
def logger(os, request_user, log_text):
stdlogger.info(
request_user +
log_text +
" os_id:" + str(os.os_id) +
"|os_name:" + str(os.os_name)
)
class Osarch(models.Model):
# primary key
osarch_id = models.AutoField(primary_key=True)
# main entity information
osarch_name = models.CharField(max_length=10, unique=True)
# string representation
def __str__(self):
return self.osarch_name
# define logger
def logger(osarch, request_user, log_text):
stdlogger.info(
request_user +
log_text +
" osarch_id:" + str(osarch.osarch_id) +
"|osarch_name:" + str(osarch.osarch_name)
)
class Osimportname(models.Model):
# primary key
osimportname_id = models.AutoField(primary_key=True)
# foreign key(s)
os = models.ForeignKey('Os', on_delete=models.CASCADE)
# main entity information
osimportname_name = models.CharField(max_length=30, unique=True)
osimportname_importer = models.CharField(max_length=30)
# string representation
def __str__(self):
return '%s (%s)' % (self.osimportname_name, self.os)
# define logger
def logger(osimportname, request_user, log_text):
stdlogger.info(
request_user +
log_text +
" osimportname_id:" + str(osimportname.osimportname_id) +
"|osimportname_name:" + str(osimportname.osimportname_name) +
"|osimportname_importer:" + str(osimportname.osimportname_importer) +
"|os:" + str(osimportname.os)
)
class Reason(models.Model):
# primary key
reason_id = models.AutoField(primary_key=True)
# main entity information
reason_name = models.CharField(max_length=30, unique=True)
reason_note = models.TextField(blank=True, null=True)
# string representation
def __str__(self):
return self.reason_name
# define logger
def logger(reason, request_user, log_text):
stdlogger.info(
request_user +
log_text +
" reason_id:" + str(reason.reason_id) +
"|reason_name:" + str(reason.reason_name) +
"|reason_note:" + str(reason.reason_note)
)
class Recommendation(models.Model):
# primary key
recommendation_id = models.AutoField(primary_key=True)
# main entity information
recommendation_name = models.CharField(max_length=30, unique=True)
recommendation_note = models.TextField(blank=True, null=True)
# string representation
def __str__(self):
return self.recommendation_name
# define logger
def logger(recommendation, request_user, log_text):
stdlogger.info(
request_user +
log_text +
" recommendation_id:" + str(recommendation.recommendation_id) +
"|recommendation_name:" + str(recommendation.recommendation_name) +
"|recommendation_note:" + str(recommendation.recommendation_note)
)
class Reportitem(models.Model):
# primary key
reportitem_id = models.AutoField(primary_key=True)
# foreign key(s)
system = models.ForeignKey('System', on_delete=models.CASCADE)
headline = models.ForeignKey('Headline', on_delete=models.PROTECT)
# main entity information
reportitem_subheadline = models.CharField(max_length=100, blank=True, null=True)
reportitem_note = models.TextField()
# meta information
reportitem_create_time = models.DateTimeField(auto_now_add=True)
reportitem_modify_time = models.DateTimeField(auto_now=True)
reportitem_created_by_user_id = models.ForeignKey(User, on_delete=models.PROTECT, related_name='reportitem_created_by')
reportitem_modified_by_user_id = models.ForeignKey(User, on_delete=models.PROTECT, related_name='reportitem_modified_by')
# define unique together
class Meta:
unique_together = (('system', 'headline', 'reportitem_subheadline'),)
# string representation
def __str__(self):
return '%s | %s | %s' % (self.system, self.headline.headline_name, self.reportitem_subheadline)
# define logger
def logger(reportitem, request_user, log_text):
stdlogger.info(
request_user +
log_text +
" reportitem_id:" + str(reportitem.reportitem_id) +
"|system:" + str(reportitem.system) +
"|headline:" + str(reportitem.headline) +
"|reportitem_subheadline:" + str(reportitem.reportitem_subheadline) +
"|reportitem_note:" + str(reportitem.reportitem_note)
)
class Serviceprovider(models.Model):
# primary key
serviceprovider_id = models.AutoField(primary_key=True)
# main entity information
serviceprovider_name = models.CharField(max_length=50, unique=True)
serviceprovider_note = models.TextField(blank=True, null=True)
# string representation
def __str__(self):
return self.serviceprovider_name
# define logger
def logger(serviceprovider, request_user, log_text):
stdlogger.info(
request_user +
log_text +
" serviceprovider_id:" + str(serviceprovider.serviceprovider_id) +
"|serviceprovider_name:" + str(serviceprovider.serviceprovider_name) +
"|serviceprovider_note:" + str(serviceprovider.serviceprovider_note)
)
class System(models.Model):
# primary key
system_id = models.AutoField(primary_key=True)
# foreign key(s)
systemstatus = models.ForeignKey('Systemstatus', on_delete=models.PROTECT)
analysisstatus = models.ForeignKey('Analysisstatus', on_delete=models.PROTECT, blank=True, null=True)
reason = models.ForeignKey('Reason', on_delete=models.PROTECT, blank=True, null=True)
recommendation = models.ForeignKey('Recommendation', on_delete=models.PROTECT, blank=True, null=True)
systemtype = models.ForeignKey('Systemtype', on_delete=models.PROTECT, blank=True, null=True)
ip = models.ManyToManyField('Ip', blank=True)
domain = models.ForeignKey('Domain', on_delete=models.PROTECT, blank=True, null=True)
os = models.ForeignKey('Os', on_delete=models.PROTECT, blank=True, null=True)
osarch = models.ForeignKey('Osarch', on_delete=models.PROTECT, blank=True, null=True)
host_system = models.ForeignKey('self', on_delete=models.PROTECT, blank=True, null=True)
company = models.ManyToManyField('Company', blank=True)
location = models.ForeignKey('Location', on_delete=models.PROTECT, blank=True, null=True)
serviceprovider = models.ForeignKey('Serviceprovider', on_delete=models.PROTECT, blank=True, null=True)
contact = models.ForeignKey('Contact', on_delete=models.PROTECT, blank=True, null=True)
tag = models.ManyToManyField('Tag', blank=True)
case = models.ManyToManyField('Case', blank=True)
# main entity information
system_uuid = models.UUIDField(editable=False, null=True, unique=True)
system_name = models.CharField(max_length=50)
system_dnssuffix = models.CharField(max_length=50, blank=True, null=True)
system_install_time = models.DateTimeField(blank=True, null=True)
system_lastbooted_time = models.DateTimeField(blank=True, null=True)
system_deprecated_time = models.DateTimeField(blank=True, null=True)
system_is_vm = models.NullBooleanField(blank=True, null=True)
# meta information
system_create_time = models.DateTimeField(auto_now_add=True)
system_modify_time = models.DateTimeField()
system_api_time = models.DateTimeField(null=True)
system_created_by_user_id = models.ForeignKey(User, on_delete=models.PROTECT, related_name='system_created_by')
system_modified_by_user_id = models.ForeignKey(User, on_delete=models.PROTECT, related_name='system_modified_by')
# define unique together
class Meta:
unique_together = ('system_name', 'domain', 'system_install_time')
# string representation
def __str__(self):
if self.system_install_time == None:
return '[%s] %s' % (str(self.system_id), self.system_name)
else:
installtime = self.system_install_time.strftime('%Y-%m-%d')
return '[%s] %s (%s)' % (str(self.system_id), self.system_name, installtime)
# define logger
def logger(system, request_user, log_text):
"""
ManyToMany-Relationsship don't get the default 'None' string if they are empty.
So the default string is set to 'None'.
If there are existing entities, their strings will be used instead and concatenated and separated by comma.
"""
# get objects
ips = system.ip.all()
# create empty list
iplist = []
# set default string if there is no object at all
ipstring = 'None'
# iterate over objects
for ip in ips:
# append object to list
iplist.append(ip.ip_ip)
# join list to comma separated string if there are any objects, else default string will remain
ipstring = ','.join(iplist)
if system.system_install_time != None:
# cast datetime object to string
installtime = system.system_install_time.strftime('%Y-%m-%d %H:%M:%S')
else:
# else set default string
installtime = 'None'
if system.system_lastbooted_time != None:
# cast datetime object to string
lastbootedtime = system.system_lastbooted_time.strftime('%Y-%m-%d %H:%M:%S')
else:
# else set default string
lastbootedtime = 'None'
if system.system_deprecated_time != None:
# cast datetime object to string
deprecatedtime = system.system_deprecated_time.strftime('%Y-%m-%d %H:%M:%S')
else:
# else set default string
deprecatedtime = 'None'
# get objects
companys = system.company.all()
# create empty list
companylist = []
# set default string if there is no object at all
companystring = 'None'
# iterate over objects
for company in companys:
# append object to list
companylist.append(company.company_name)
# join list to comma separated string if there are any objects, else default string will remain
companystring = ','.join(companylist)
# get objects
tags = system.tag.all()
# create empty list
taglist = []
# set default string if there is no object at all
tagstring = 'None'
# iterate over objects
for tag in tags:
# append object to list
taglist.append(tag.tag_name)
# join list to comma separated string if there are any objects, else default string will remain
tagstring = ','.join(taglist)
# get objects
cases = system.case.all()
# create empty list
caselist = []
# set default string if there is no object at all
casestring = 'None'
# iterate over objects
for case in cases:
# append object to list
caselist.append(case.case_name)
# join list to comma separated string if there are any objects, else default string will remain
casestring = ','.join(caselist)
# finally write log
stdlogger.info(
request_user +
log_text +
" system_id:" + str(system.system_id) +
"|system_uuid:" + str(system.system_uuid) +
"|system_name:" + str(system) +
"|systemstatus:" + str(system.systemstatus) +
"|analyisstatus:" + str(system.analysisstatus) +
"|reason:" + str(system.reason) +
"|recommendation:" + str(system.recommendation) +
"|systemtype:" + str(system.systemtype) +
"|ip:" + ipstring +
"|domain:" + str(system.domain) +
"|system_dnssuffix:" + str(system.system_dnssuffix) +
"|os:" + str(system.os) +
"|osarch:" + str(system.osarch) +
"|system_install_time:" + installtime +
"|system_lastbooted_time:" + lastbootedtime +
"|system_deprecated_time:" + deprecatedtime +
"|system_is_vm:" + str(system.system_is_vm) +
"|host_system:" + str(system.host_system) +
"|company:" + companystring +
"|location:" + str(system.location) +
"|serviceprovider:" + str(system.serviceprovider) +
"|contact:" + str(system.contact) +
"|tag:" + tagstring +
"|case:" + casestring
)
class Systemstatus(models.Model):
# primary key
systemstatus_id = models.AutoField(primary_key=True)
# main entity information
systemstatus_name = models.CharField(max_length=30, unique=True)
systemstatus_note = models.TextField(blank=True, null=True)
# string representation
def __str__(self):
return self.systemstatus_name
# define logger
def logger(systemstatus, request_user, log_text):
stdlogger.info(
request_user +
log_text +
" systemstatus_id:" + str(systemstatus.systemstatus_id) +
"|systemstatus_name:" + str(systemstatus.systemstatus_name) +
"|systemstatus_note:" + str(systemstatus.systemstatus_note)
)
class Systemtype(models.Model):
# primary key
systemtype_id = models.AutoField(primary_key=True)
# main entity information
systemtype_name = models.CharField(max_length=50, unique=True)
# string representation
def __str__(self):
return self.systemtype_name
# define logger
def logger(systemtype, request_user, log_text):
stdlogger.info(
request_user +
log_text +
" systemtype_id:" + str(systemtype.systemtype_id) +
"|systemtype_name:" + str(systemtype.systemtype_name)
)
class Systemuser(models.Model):
# primary key
systemuser_id = models.AutoField(primary_key=True)
# foreign key(s)
system = models.ForeignKey('System', on_delete=models.CASCADE)
# main entity information
systemuser_name = models.CharField(max_length=50)
systemuser_lastlogon_time = models.DateTimeField(blank=True, null=True)
# define unique together
class Meta:
unique_together = ('system', 'systemuser_name')
# string representation
def __str__(self):
return '%s (%s)' % (self.systemuser_name, self.system)
# define logger
def logger(systemuser, request_user, log_text):
stdlogger.info(
request_user +
log_text +
" systemuser_id:" + str(systemuser.systemuser_id) +
"|system:" + str(systemuser.system) +
"|systemuser_name:" + str(systemuser.systemuser_name) +
"|systemuser_lastlogon_time:" + str(systemuser.systemuser_lastlogon_time)
)
class Tag(models.Model):
# primary key
tag_id = models.AutoField(primary_key=True)
# foreign key(s)
tagcolor = models.ForeignKey('Tagcolor', on_delete=models.PROTECT)
# main entity information
tag_name = models.CharField(max_length=50, unique=True)
tag_note = models.TextField(blank=True, null=True)
# meta information
tag_modified_by_user_id = models.ForeignKey(User, on_delete=models.PROTECT, related_name='tag_modified_by', blank=True, null=True)
# string representation
def __str__(self):
return self.tag_name
# define logger
def logger(tag, request_user, log_text):
stdlogger.info(
request_user +
log_text +
" tag_id:" + str(tag.tag_id) +
"|tag_name:" + str(tag.tag_name) +
"|tag_note:" + str(tag.tag_note) +
"|tagcolor:" + str(tag.tagcolor)
)
class Tagcolor(models.Model):
# primary key
tagcolor_id = models.AutoField(primary_key=True)
# main entity information
tagcolor_name = models.CharField(max_length=20, unique=True)
# string representation
def __str__(self):
return self.tagcolor_name
# define logger
def logger(tagcolor, request_user, log_text):
stdlogger.info(
request_user +
log_text +
" tagcolor_id:" + str(tagcolor.tagcolor_id) +
"|tagcolor_name:" + str(tagcolor.tagcolor_name)
)
class Task(models.Model):
# primary key
task_id = models.AutoField(primary_key=True)
# foreign key(s)
parent_task = models.ForeignKey('self', on_delete=models.PROTECT, blank=True, null=True)
taskname = models.ForeignKey('Taskname', on_delete=models.PROTECT)
taskpriority = models.ForeignKey('Taskpriority', on_delete=models.PROTECT)
taskstatus = models.ForeignKey('Taskstatus', on_delete=models.PROTECT)
system = models.ForeignKey('System', on_delete=models.CASCADE, blank=True, null=True)
task_assigned_to_user_id = models.ForeignKey(User, on_delete=models.PROTECT, blank=True, null=True, related_name='task_assigned_to')
tag = models.ManyToManyField('Tag', blank=True)
# main entity information
task_note = models.TextField(blank=True, null=True)
task_scheduled_time = models.DateTimeField(blank=True, null=True)
task_started_time = models.DateTimeField(blank=True, null=True)
task_finished_time = models.DateTimeField(blank=True, null=True)
task_due_time = models.DateTimeField(blank=True, null=True)
# meta information
task_create_time = models.DateTimeField(auto_now_add=True)
task_modify_time = models.DateTimeField(auto_now=True)
task_created_by_user_id = models.ForeignKey(User, on_delete=models.PROTECT, related_name='task_created_by')
task_modified_by_user_id = models.ForeignKey(User, on_delete=models.PROTECT, related_name='task_modified_by')
# string representation
def __str__(self):
return '[%s] %s (%s)' % (self.task_id, self.taskname, self.system)
# define logger
def logger(task, request_user, log_text):
if task.task_scheduled_time != None:
# cast datetime object to string
scheduledtime = task.task_scheduled_time.strftime('%Y-%m-%d %H:%M:%S')
else:
# else set default string
scheduledtime = 'None'
if task.task_started_time != None:
# cast datetime object to string
startedtime = task.task_started_time.strftime('%Y-%m-%d %H:%M:%S')
else:
# else set default string
startedtime = 'None'
if task.task_finished_time != None:
# cast datetime object to string
finishedtime = task.task_finished_time.strftime('%Y-%m-%d %H:%M:%S')
else:
# else set default string
finishedtime = 'None'
if task.task_due_time != None:
# cast datetime object to string
duetime = task.task_due_time.strftime('%Y-%m-%d %H:%M:%S')
# else set default string
else:
duetime = 'None'
# get objects
tags = task.tag.all()
# create empty list
taglist = []
# set default string if there is no object at all
tagstring = 'None'
# iterate over objects
for tag in tags:
# append object to list
taglist.append(tag.tag_name)
# join list to comma separated string if there are any objects, else default string will remain
tagstring = ','.join(taglist)
# finally write log
stdlogger.info(
request_user +
log_text +
" task_id:" + str(task.task_id) +
"|parent_task:" + str(task.parent_task) +
"|taskname:" + str(task.taskname) +
"|taskpriority:" + str(task.taskpriority) +
"|taskstatus:" + str(task.taskstatus) +
"|system:" + str(task.system) +
"|task_assigned_to_user_id:" + str(task.task_assigned_to_user_id) +
"|task_note:" + str(task.task_note) +
"|task_scheduled_time:" + scheduledtime +
"|task_started_time:" + startedtime +
"|task_finished_time:" + finishedtime +
"|task_due_time:" + duetime +
"|tag:" + tagstring
)
class Taskname(models.Model):
# primary key
taskname_id = models.AutoField(primary_key=True)
# main entity information
taskname_name = models.CharField(max_length=50, unique=True)
# string representation
def __str__(self):
return self.taskname_name
# define logger
def logger(taskname, request_user, log_text):
stdlogger.info(
request_user +
log_text +
" taskname_id:" + str(taskname.taskname_id) +
"|taskname_name:" + str(taskname.taskname_name)
)
class Taskpriority(models.Model):
# primary key
taskpriority_id = models.AutoField(primary_key=True)
# main entity information
taskpriority_name = models.CharField(max_length=6, unique=True)
# string representation
def __str__(self):
return self.taskpriority_name
# define logger
def logger(taskpriority, request_user, log_text):
stdlogger.info(
request_user +
log_text +
" taskpriority_id:" + str(taskpriority.taskpriority_id) +
"|taskpriority_name:" + str(taskpriority.taskpriority_name)
)
class Taskstatus(models.Model):
# primary key
taskstatus_id = models.AutoField(primary_key=True)
# main entity information
taskstatus_name = models.CharField(max_length=50, unique=True)
# string representation
def __str__(self):
return self.taskstatus_name
# define logger
def logger(taskstatus, request_user, log_text):
stdlogger.info(
request_user +
log_text +
" taskstatus_id:" + str(taskstatus.taskstatus_id) +
"|taskstatus_name:" + str(taskstatus.taskstatus_name)
)
```
#### File: dfirtrack_main/views/ips_views.py
```python
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.views.generic import DetailView, ListView
from dfirtrack_main.forms import IpForm
from dfirtrack_main.logger.default_logger import debug_logger
from dfirtrack_main.models import Ip
class Ips(LoginRequiredMixin, ListView):
login_url = '/login'
model = Ip
template_name = 'dfirtrack_main/ip/ips_list.html'
def get_queryset(self):
debug_logger(str(self.request.user), " IP_ENTERED")
return Ip.objects.order_by('ip_ip')
class IpsDetail(LoginRequiredMixin, DetailView):
login_url = '/login'
model = Ip
template_name = 'dfirtrack_main/ip/ips_detail.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
ip = self.object
ip.logger(str(self.request.user), " IPDETAIL_ENTERED")
return context
@login_required(login_url="/login")
def ips_add(request):
if request.method == 'POST':
form = IpForm(request.POST)
if form.is_valid():
ip = form.save(commit=False)
ip.save()
ip.logger(str(request.user), " IP_ADD_EXECUTED")
messages.success(request, 'IP address added')
return redirect('/ips')
else:
form = IpForm()
debug_logger(str(request.user), " IP_ADD_ENTERED")
return render(request, 'dfirtrack_main/ip/ips_add.html', {'form': form})
@login_required(login_url="/login")
def ips_add_popup(request):
if request.method == 'POST':
form = IpForm(request.POST)
if form.is_valid():
ip = form.save(commit=False)
ip.save()
ip.logger(str(request.user), " IP_ADD_POPUP_EXECUTED")
messages.success(request, 'IP address added')
return HttpResponse('<script type="text/javascript">window.close();</script>')
else:
form = IpForm()
debug_logger(str(request.user), " IP_ADD_POPUP_ENTERED")
return render(request, 'dfirtrack_main/ip/ips_add_popup.html', {'form': form})
@login_required(login_url="/login")
def ips_edit(request, pk):
ip = get_object_or_404(Ip, pk=pk)
if request.method == 'POST':
form = IpForm(request.POST, instance=ip)
if form.is_valid():
ip = form.save(commit=False)
ip.save()
ip.logger(str(request.user), " IP_EDIT_EXECUTED")
messages.success(request, 'IP address edited')
return redirect('/ips')
else:
form = IpForm(instance=ip)
ip.logger(str(request.user), " IP_EDIT_ENTERED")
return render(request, 'dfirtrack_main/ip/ips_edit.html', {'form': form})
``` |
{
"source": "0xflotus/DIY_particle_detector",
"score": 2
} |
#### File: diode_detector/diode_characterisation_and_simulation_plots/CV_diode_plots.py
```python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from os import listdir
from matplotlib.collections import PathCollection
from matplotlib.legend_handler import HandlerPathCollection
import decimal as D
mpl.rcParams['font.size']=13 #default font size
# enable for cleaning the regular mico pattern in the Neff plots, suggests data
# is affeted by rounding errors introduced by the instrument
FILTER_ROUNDING_ARTEFACTS = False
## constants
e0 = 8.854e-14 #F/cm
eSi = 11.9 #for Si
q = 1.602e-19 # charge in Coulomb = F/V
A = 0.0702 # diode area in cm^2
### helper functions
def oneOverX(x, a, b,c,d,e):
#return a * np.exp(-b * x) + c
return (a/(b * (x**c + d))) + e
def depth(C):
return eSi*e0*A/C
def Neff(oneOverCsq,dV):
oneOverCsq=oneOverCsq #*1e15
# following https://cds.cern.ch/record/1169276/files/04636908.pdf
# unit analysis suggests Neff output is in [1/cm]
dCdV = np.gradient(oneOverCsq,dV) #/1e15
return 2/(q * eSi * e0 * (A**2) * dCdV)
### read measurement series and prepare data
df = pd.DataFrame()
folder = "../data/high_resolution_CV/"
filepaths = [f for f in listdir(folder) if f.endswith('.csv')]
filepaths.sort()
count = 0
for file in filepaths:
with open(folder+file) as f:
head = [next(f) for x in range(15)]
deviceName = head[9].split()[2]
#print(deviceName)
anode = 'Vanode'
# using Decimal here to preserve original precision of numbers (reaveals rounding errors in data)
df_new = pd.read_csv(folder+file, skiprows=1407, sep=',' , converters={anode: D.Decimal, 'Cp': D.Decimal}, engine='c', skipinitialspace= True, float_precision='round_trip') #149
#df_new = pd.read_csv(folder+file, skiprows=1407, sep=',' , dtype={1:np.float128,2:np.float128,5:np.float128}, engine='c', skipinitialspace= True, float_precision='round_trip') #149
# dropping large precision numbers for testing (rounding problems of instrument?)
# c.f. patterns in Neff plots
# most of the regular micro pattern in Neff is filtered out by this!
if FILTER_ROUNDING_ARTEFACTS:
df_tmp = df_new
for index, row in df_tmp.iterrows():
s = str(row['Cp'])
len = s.rindex(s[-1]) + 1
if len > 11:
df_new = df_new.drop(index)
print(index)
if count ==0:
df['VBias'] = df_new[anode].astype(dtype='float128')
df = df.assign(VBias = np.abs(df.VBias))
count+=1
df[deviceName] = df_new['Cp'].astype(dtype='float128')
# calculate errors
df_new['err']=0
df_new['err'] = df_new['err'].astype(dtype='float128')
df_new.loc[df_new.D <= 0.1, 'err'] = 0.11/100
df_new.loc[df_new.D > 0.1, 'err'] = (0.11 * np.sqrt(1+ (df_new.D**2)))/100
df[deviceName + '_err'] = df_new['err']
# calculate 1/C^2
df[deviceName + '_cc'] = 1.0/(df[deviceName].values * df[deviceName].values)
# <codecell>
def plotNeff(df, columns,colors):
fig = plt.figure()
plot = fig.add_subplot(111)
i =0
for column in df[columns]:
print(column)
# RAW C data to Neff
d_raw = depth(df[column].values)*10000 # in um
neff_raw = Neff(df[column+'_cc'], df.VBias)#df.VBias[1]-df.VBias[0] )#df.VBias.values)
cc_err_max = 1.0/((df[column]*(1+df[column+'_err'].values)) * (df[column]*(1+df[column+'_err'].values)))
cc_err_min = 1.0/((df[column]*(1-df[column+'_err'].values)) * (df[column]*(1-df[column+'_err'].values)))
neff_err_max = Neff(cc_err_max, df.VBias)
neff_err_min = Neff(cc_err_min, df.VBias)
plot.plot(d_raw,neff_err_max, linewidth=0.1,color=colors[i])
plot.plot(d_raw,neff_err_min, linewidth=0.1,color=colors[i])
plot.scatter(x=d_raw, y=neff_raw,s=1.5,marker='d', label=column, color=colors[i])
i+=1
txt = r"\n(errors are smaller than marker symbols)"
plot.set_xlabel('Depletion layer depth ['+u'\u03bc'+'m]') #+ txt )
plot.set_ylabel('Neff [cm$^{-3}$]')
plot.set_yscale('log')
plot.set_xscale('log')
def update1(handle, orig):
handle.update_from(orig)
handle.set_sizes([30])
plot.legend(handler_map={PathCollection : HandlerPathCollection(update_func=update1)},fontsize=10,scatterpoints=1,loc=4)
plot.set_xticklabels(list(map(str, [0.1,1,10,100]))) # WARNING: adapt in case of other changes
fig.tight_layout(pad=0.2)
def plotCV(df,columns,colors):
### plot C-V curve
fig = plt.figure()
plot = fig.add_subplot(111)
i = 0
for column in df[columns]:
plot.errorbar(df.VBias,df[column],yerr=df[column]*df[column+'_err'].values,fmt='s',markeredgewidth=1,markersize=3,label=column,markerfacecolor='none',color='none', markeredgecolor=colors[i])
i +=1
txt = r"\n(errors are smaller than marker symbols)"
plot.set_ylabel('Capacitance [pF]', fontsize=14)
plot.set_xlabel('Reverse bias voltage [|V|]', fontsize=14)#+ txt )
plot.set_yscale('log')
plot.set_xscale('log')
def update2(handle, orig):
handle.update_from(orig)
handle.set_sizes([100])
plot.legend(handler_map={PathCollection : HandlerPathCollection(update_func=update2)},fontsize=10,scatterpoints=5)
plot.grid(which='minor',linewidth=0.5)
plot.grid(which='major',linewidth=1.0)
plot.set_xticklabels(list(map(str, [0.001,0.01,0.1,1,10]))) # WARNING: adapt in case of other changes
plot.set_yticklabels(list(map(str, [0.1,1,10,100,1000]))) # WARNING: adapt in case of other changes
fig.tight_layout(pad=0.2)
def plotDepth(df,columns,colors):
fig = plt.figure()
plot = fig.add_subplot(111)
i = 0
for column in df[columns]:
plot.plot(df.VBias,depth(df[column])*10000,label=column, color=colors[i])
i +=1
plot.set_ylabel('Depletion layer depth [' + u'\u03bc' + "m]", fontsize=14)
plot.set_xlabel('Reverse bias voltage [|V|]', fontsize=14)
plot.get_xaxis().set_minor_locator(mpl.ticker.AutoMinorLocator())
plot.get_yaxis().set_minor_locator(mpl.ticker.AutoMinorLocator())
plot.legend()
plot.grid(True,which='minor',linewidth=0.5)
plot.grid(True,which='major',linewidth=1.0)
plot.set_xlim(-1,25)
fig.tight_layout(pad=0.2)
df = df.replace(-np.inf, np.nan)
df = df.dropna()
# <codecell>
plotNeff(df,['BPX61-1','BPX61-2','BPX61-3','BPW34-1','BPW34-2','BPW34-3','BPW34F-1','BPW34F-2','BPW34F-3','BPW34FA-1'],
['firebrick','red','salmon','olive','yellowgreen','lawngreen','blue','royalblue','dodgerblue','lightblue'])
# <codecell>
plotDepth(df,['BPW34-3','BPX61-2','BPW34F-3'],
['lawngreen','red','dodgerblue'])
# <codecell>
plotCV(df,['BPX61-1','BPX61-2','BPX61-3','BPW34-1','BPW34-2','BPW34-3','BPW34F-1','BPW34F-2','BPW34F-3','BPW34FA-1'],
['firebrick','red','salmon','olive','yellowgreen','lawngreen','blue','royalblue','dodgerblue','lightblue'])
```
#### File: DIY_particle_detector/data_recording_software/pulse_recorder.py
```python
THL = -300 # default threshold value, can be modified
# decreasing (smaller absolute value) will increase recorded data size considerably
# increasing (larger absolute value) will help in noisy EM environments
SAVE_DATA = True # save recorded pulses in .pkl file for later analysis
DATA_FOLDER = "./data" # folder for saving recorded data files (create folder if missing)
ENABLE_SONIFICATION = False # (requires pyo module, https://github.com/belangeo/pyo)
MIN_ALPHA_PEAK = -1243 # threshold to distinguish between alpha and electron pulses
# as obtained from reference measurements
import sys
import time
from pyqtgraph.Qt import QtCore, QtGui, QtWidgets
import numpy as np
import pyqtgraph as pg
import pyaudio
import pandas as pd
import datetime
import random
from functools import partial
from scipy.signal import argrelextrema
if ENABLE_SONIFICATION: import pyo
RATE = 48000 # audio sampling rate, should stay like this.
# other rates might require new energy calibration
# and will in any case require modification of analysis scripts
FRAME_SIZE = 4096 # size of waveform frame size. could be modified, but not tested
if ENABLE_SONIFICATION:
s = pyo.Server(duplex=0).boot()
s.deactivateMidi()
s.setOutputDevice(1)
pyo.pa_list_devices()
s.start()
tab_m = pyo.HarmTable([1,0,0,0,0,.3,0,0,0,0,0,.2,0,0,0,0,0,.1,0,0,0,0,.05]).normalize()
tab_p = pyo.HarmTable([1,0,.33,0,.2,0,.143,0,.111])
class Ring:
def __init__(self, fport=250, fmod=100, amp=.3):
self.mod = pyo.Osc(tab_m, freq=fmod, mul=amp)
self.port = pyo.Osc(tab_p, freq=fport, mul=self.mod)
def out(self):
self.port.out()
return self
def sig(self):
return self.port
class Scope(QtGui.QMainWindow):
def __init__(self, parent=None):
global app
QtGui.QMainWindow.__init__(self, parent)
#super(Scope, self).__init__(parent)
#QtGui.QApplication.setGraphicsSystem("raster")
#try:
# self.app = QtGui.QApplication(sys.argv)
#except RuntimeError:
# self.app = QtGui.QApplication.instance()
self.save_data = SAVE_DATA
self.sound = ENABLE_SONIFICATION # trigger sounds for each pulse or not
self.app = app
self.app.aboutToQuit.connect(self.close)
self.pcounter=0
self.creation_time=datetime.datetime.now()
self.df = pd.DataFrame(columns = ['ts','ptype'])
self.ptypes = pd.Series(["alpha", "beta", "betagamma", "x-ray", "muon" ,"unknown"], dtype="category")
self.thl = THL
self.hl = -1243 # green cursor, highlight line for measuring only
self.peaks=[]
self.paused = False
if ENABLE_SONIFICATION:
# setup some wild Karplus-Strong oscillator
self.lf = pyo.Sine(.03, mul=.2, add=1)
self.rg = Ring(fport = [random.choice([62.5,125,187.5,250]) * random.uniform(.99,1.01) for i in range(8)],
fmod = self.lf * [random.choice([25,50,75,100]) * random.uniform(.99,1.01) for i in range(8)],
amp = 0.1)
self.env = pyo.Adsr(attack=0.01, decay=0.1, sustain=0.5, release=1.5, dur=5, mul=0.1)
self.res = pyo.Waveguide(self.rg.sig(), freq=[30.1,60.05,119.7,181,242.5,303.33], dur=30, mul=1*self.env).out()
def audio_callback(in_data, frame_count, time_info, status):
now = time.time()
samples = np.frombuffer(in_data, dtype=np.int16)
peak = samples.min()
if peak < self.thl:
t = pd.datetime.fromtimestamp(now)
print("* ", t, end="")
pulse = pd.DataFrame()
pulse = pulse.assign(ts=[t])
if peak < MIN_ALPHA_PEAK:
pulse = pulse.assign(ptype=[self.ptypes[0]]) #alpha
print(" alpha ", end="")
else:
pulse = pulse.assign(ptype=[self.ptypes[1]]) #beta/electron
print(" elect ", end="")
if self.sound:
self.lf.setMul(abs(int(peak)/16000))
self.env.dur = abs(int(peak)/500)
self.env.play()
print(self.pcounter, " ", end="")
print(peak)
minima=argrelextrema(samples, np.less)
self.peaks.append(sum(minima[0])/len(minima[0]/2))
self.peaks = self.peaks[-100:] #only keep the last 100 for averaging
pulse = pulse.assign(pulse=[samples])
if self.save_data:
self.df = self.df.append(pulse, ignore_index=True,sort=False)
self.pcounter+=1
# calculate pulse rate in counts per second
dt = (now-self.lastupdate)
if dt <= 0:
dt = 0.000000000001
cps2 = 1.0 / dt
self.lastupdate = now
self.cps = self.cps * 0.9 + cps2 * 0.1 # simple weighted average
tx = 'Mean pulse rate: {cps:.1f} CPS'.format(cps=self.cps )
self.label.setText(tx + ", THL (red): " + str(self.thl) + ", cursor(green): " + str(self.hl) + ", (avg peak: "+str(round(sum(self.peaks)/100,1)) + ")")
self.ydata=np.frombuffer(in_data, dtype=np.int16)
self.frame_counter+=frame_count
if not self.paused:
self.h2.setData(self.ydata)
self.thlp.setData(FRAME_SIZE*[self.thl])
self.hlp.setData(FRAME_SIZE*[self.hl]) #draw green highlight line
return (in_data, pyaudio.paContinue)
#### Create Gui Elements ###########
self.mainbox = QtGui.QWidget()
self.setCentralWidget(self.mainbox)
self.mainbox.setLayout(QtGui.QVBoxLayout())
self.canvas = pg.GraphicsLayoutWidget()
self.mainbox.layout().addWidget(self.canvas)
self.label = QtGui.QLabel()
self.mainbox.layout().addWidget(self.label)
self.otherplot = self.canvas.addPlot()
self.h2 = self.otherplot.plot(pen='y')
self.thlp = self.otherplot.plot(pen='r')
self.hlp = self.otherplot.plot(pen='g')
self.p = pyaudio.PyAudio()
self.stream = self.p.open(format=pyaudio.paInt16, channels=1, rate=RATE, input=True, frames_per_buffer=FRAME_SIZE,stream_callback=audio_callback)
#### Set Data #####################
self.x = np.linspace(0,50., num=100)
self.X,self.Y = np.meshgrid(self.x,self.x)
self.frame_counter = 0
self.cps = 0.
self.lastupdate = time.time()
# keyboard shortcuts
self.sh = QtGui.QShortcut(QtGui.QKeySequence("+"), self, self.thl_down)
self.sh.setContext(QtCore.Qt.ApplicationShortcut)
self.sh2 = QtGui.QShortcut(QtGui.QKeySequence("-"), self, self.thl_up)
self.sh2.setContext(QtCore.Qt.ApplicationShortcut)
self.sh3 = QtGui.QShortcut(QtGui.QKeySequence("7"), self, partial(self.hl_up,1))
self.sh3.setContext(QtCore.Qt.ApplicationShortcut)
self.sh4 = QtGui.QShortcut(QtGui.QKeySequence("1"), self, partial(self.hl_down,1))
self.sh4.setContext(QtCore.Qt.ApplicationShortcut)
self.sh5 = QtGui.QShortcut(QtGui.QKeySequence("8"), self, partial(self.hl_up,10))
self.sh5.setContext(QtCore.Qt.ApplicationShortcut)
self.sh6 = QtGui.QShortcut(QtGui.QKeySequence("2"), self, partial(self.hl_down,10))
self.sh6.setContext(QtCore.Qt.ApplicationShortcut)
self.sh7 = QtGui.QShortcut(QtGui.QKeySequence("9"), self, partial(self.hl_up,100))
self.sh7.setContext(QtCore.Qt.ApplicationShortcut)
self.sh8 = QtGui.QShortcut(QtGui.QKeySequence("3"), self, partial(self.hl_down,100))
self.sh8.setContext(QtCore.Qt.ApplicationShortcut)
self.sh9 = QtGui.QShortcut(QtGui.QKeySequence(" "), self, self.toggle_pause)
self.sh9.setContext(QtCore.Qt.ApplicationShortcut)
#self.sh3 = QtGui.QShortcut(QtGui.QKeySequence("Ctrl+C"), self.close)
#self.sh.setContext(QtCore.Qt.ApplicationShortcut)
#### Start #####################
self.stream.start_stream()
def toggle_pause(self):
self.paused = not self.paused
def thl_up(self):
self.thl+=1
#print(self.thl)
def thl_down(self):
#print(self.thl)
self.thl-=1
def hl_up(self,i):
self.hl+=i
#print(self.hl)
def hl_down(self,i):
#print(self.thl)
self.hl-=i
def close_stream(self):
self.stream.close()
self.stream.stop_stream()
print("Stream closed")
def close(self):
timediff = datetime.datetime.now() - self.creation_time
self.close_stream()
if self.save_data and self.pcounter > 0:
print("Saving data to file...")
#print(self.df.to_string)
td_str = '-'.join(str(timediff).split(':')[:2])
_ = self.df.to_pickle(DATA_FOLDER + self.creation_time.strftime("/pulses_%Y-%m-%d_%H-%M-%S") + "___" + str(self.pcounter) + "___" + td_str + ".pkl")
print("Saving completed.")
print()
print('Number of recorded waveforms:', self.pcounter, "of",self.frame_counter, "total audio frames")
print('at least', len(self.df[self.df['ptype'] == 'alpha']) ,"alphas and")
print('at least', len(self.df[self.df['ptype'] == 'beta']) ,"electrons/betas were detected")
self.p.terminate()
app = QtGui.QApplication([])
app.closeAllWindows()
app.quit()
app.exit()
print('done.')
if __name__ == '__main__':
#app = pg.mkQApp()
# app = QtWidgets.QApplication(sys.argv)
if not QtWidgets.QApplication.instance():
app = QtWidgets.QApplication(sys.argv)
else:
app = QtWidgets.QApplication.instance()
mainWin = Scope()
mainWin.show()
#if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
app.exec_()
if ENABLE_SONIFICATION: s.stop()
``` |
{
"source": "0xflotus/dvc",
"score": 2
} |
#### File: dvc/remote/s3.py
```python
from __future__ import unicode_literals
import os
import threading
try:
import boto3
except ImportError:
boto3 = None
import dvc.logger as logger
from dvc.utils.compat import urlparse
from dvc.progress import progress
from dvc.config import Config
from dvc.remote.base import RemoteBase
from dvc.exceptions import DvcException
class Callback(object):
def __init__(self, name, total):
self.name = name
self.total = total
self.current = 0
self.lock = threading.Lock()
def __call__(self, byts):
with self.lock:
self.current += byts
progress.update_target(self.name, self.current, self.total)
class RemoteS3(RemoteBase):
scheme = "s3"
REGEX = r"^s3://(?P<path>.*)$"
REQUIRES = {"boto3": boto3}
PARAM_CHECKSUM = "etag"
def __init__(self, project, config):
super(RemoteS3, self).__init__(project, config)
storagepath = "s3://{}".format(
config.get(Config.SECTION_AWS_STORAGEPATH, "").lstrip("/")
)
self.url = config.get(Config.SECTION_REMOTE_URL, storagepath)
self.region = os.environ.get("AWS_DEFAULT_REGION") or config.get(
Config.SECTION_AWS_REGION
)
self.profile = os.environ.get("AWS_PROFILE") or config.get(
Config.SECTION_AWS_PROFILE
)
self.endpoint_url = config.get(Config.SECTION_AWS_ENDPOINT_URL)
self.use_ssl = config.get(Config.SECTION_AWS_USE_SSL, True)
shared_creds = config.get(Config.SECTION_AWS_CREDENTIALPATH)
if shared_creds:
os.environ.setdefault("AWS_SHARED_CREDENTIALS_FILE", shared_creds)
parsed = urlparse(self.url)
self.bucket = parsed.netloc
self.prefix = parsed.path.lstrip("/")
self.path_info = {"scheme": self.scheme, "bucket": self.bucket}
@staticmethod
def compat_config(config):
ret = config.copy()
url = "s3://" + ret.pop(Config.SECTION_AWS_STORAGEPATH, "").lstrip("/")
ret[Config.SECTION_REMOTE_URL] = url
return ret
@property
def s3(self):
session = boto3.session.Session(
profile_name=self.profile, region_name=self.region
)
return session.client(
"s3", endpoint_url=self.endpoint_url, use_ssl=self.use_ssl
)
def get_etag(self, bucket, path):
try:
obj = self.s3.head_object(Bucket=bucket, Key=path)
except Exception:
raise DvcException(
"s3://{}/{} does not exist".format(bucket, path)
)
return obj["ETag"].strip('"')
def save_info(self, path_info):
if path_info["scheme"] != "s3":
raise NotImplementedError
return {
self.PARAM_CHECKSUM: self.get_etag(
path_info["bucket"], path_info["path"]
)
}
def copy(self, from_info, to_info, s3=None):
s3 = s3 if s3 else self.s3
source = {"Bucket": from_info["bucket"], "Key": from_info["path"]}
self.s3.copy(source, to_info["bucket"], to_info["path"])
def save(self, path_info):
if path_info["scheme"] != "s3":
raise NotImplementedError
etag = self.get_etag(path_info["bucket"], path_info["path"])
path = self.checksum_to_path(etag)
to_info = {"scheme": "s3", "bucket": self.bucket, "path": path}
self.copy(path_info, to_info)
return {self.PARAM_CHECKSUM: etag}
def remove(self, path_info):
if path_info["scheme"] != "s3":
raise NotImplementedError
logger.debug(
"Removing s3://{}/{}".format(
path_info["bucket"], path_info["path"]
)
)
self.s3.delete_object(
Bucket=path_info["bucket"], Key=path_info["path"]
)
def _list_paths(self, bucket, prefix):
s3 = self.s3
kwargs = {"Bucket": bucket, "Prefix": prefix}
while True:
# NOTE: list_objects_v2() is 90% faster than head_object [1]
#
# [1] https://www.peterbe.com/plog/
# fastest-way-to-find-out-if-a-file-exists-in-s3
resp = s3.list_objects_v2(**kwargs)
contents = resp.get("Contents", None)
if not contents:
break
for obj in contents:
yield obj["Key"]
token = resp.get("NextContinuationToken", None)
if not token:
break
kwargs["ContinuationToken"] = token
def list_cache_paths(self):
return self._list_paths(self.bucket, self.prefix)
def exists(self, path_info):
assert not isinstance(path_info, list)
assert path_info["scheme"] == "s3"
paths = self._list_paths(path_info["bucket"], path_info["path"])
return any(path_info["path"] == path for path in paths)
def upload(self, from_infos, to_infos, names=None):
names = self._verify_path_args(to_infos, from_infos, names)
s3 = self.s3
for from_info, to_info, name in zip(from_infos, to_infos, names):
if to_info["scheme"] != "s3":
raise NotImplementedError
if from_info["scheme"] != "local":
raise NotImplementedError
logger.debug(
"Uploading '{}' to '{}/{}'".format(
from_info["path"], to_info["bucket"], to_info["path"]
)
)
if not name:
name = os.path.basename(from_info["path"])
total = os.path.getsize(from_info["path"])
cb = Callback(name, total)
try:
s3.upload_file(
from_info["path"],
to_info["bucket"],
to_info["path"],
Callback=cb,
)
except Exception:
msg = "failed to upload '{}'".format(from_info["path"])
logger.error(msg)
continue
progress.finish_target(name)
def download(
self,
from_infos,
to_infos,
no_progress_bar=False,
names=None,
resume=False,
):
names = self._verify_path_args(from_infos, to_infos, names)
s3 = self.s3
for to_info, from_info, name in zip(to_infos, from_infos, names):
if from_info["scheme"] != "s3":
raise NotImplementedError
if to_info["scheme"] == "s3":
self.copy(from_info, to_info, s3=s3)
continue
if to_info["scheme"] != "local":
raise NotImplementedError
msg = "Downloading '{}/{}' to '{}'".format(
from_info["bucket"], from_info["path"], to_info["path"]
)
logger.debug(msg)
tmp_file = self.tmp_file(to_info["path"])
if not name:
name = os.path.basename(to_info["path"])
self._makedirs(to_info["path"])
try:
if no_progress_bar:
cb = None
else:
total = s3.head_object(
Bucket=from_info["bucket"], Key=from_info["path"]
)["ContentLength"]
cb = Callback(name, total)
s3.download_file(
from_info["bucket"],
from_info["path"],
tmp_file,
Callback=cb,
)
except Exception:
msg = "failed to download '{}/{}'".format(
from_info["bucket"], from_info["path"]
)
logger.error(msg)
continue
os.rename(tmp_file, to_info["path"])
if not no_progress_bar:
progress.finish_target(name)
```
#### File: dvc/utils/compat.py
```python
import sys
# Syntax sugar.
_ver = sys.version_info
#: Python 2.x?
is_py2 = _ver[0] == 2
#: Python 3.x?
is_py3 = _ver[0] == 3
# NOTE: cast_bytes_py2 is taken from https://github.com/ipython/ipython_genutils
# simplified version of ipython_genutils/encoding.py
DEFAULT_ENCODING = sys.getdefaultencoding()
def no_code(x, encoding=None):
return x
def encode(u, encoding=None):
encoding = encoding or DEFAULT_ENCODING
return u.encode(encoding, "replace")
def cast_bytes(s, encoding=None):
if not isinstance(s, bytes):
return encode(s, encoding)
return s
if is_py2:
from urlparse import urlparse, urljoin # noqa: F401
from StringIO import StringIO # noqa: F401
from BaseHTTPServer import HTTPServer # noqa: F401
from SimpleHTTPServer import SimpleHTTPRequestHandler # noqa: F401
import ConfigParser # noqa: F401
from io import open # noqa: F401
builtin_str = str # noqa: F821
bytes = str # noqa: F821
str = unicode # noqa: F821
basestring = basestring # noqa: F821
numeric_types = (int, long, float) # noqa: F821
integer_types = (int, long) # noqa: F821
input = raw_input # noqa: F821
cast_bytes_py2 = cast_bytes
elif is_py3:
from urllib.parse import urlparse, urljoin # noqa: F401
from io import StringIO # noqa: F401
from http.server import ( # noqa: F401
HTTPServer, # noqa: F401
SimpleHTTPRequestHandler, # noqa: F401
) # noqa: F401
import configparser as ConfigParser # noqa: F401
builtin_str = str # noqa: F821
str = str # noqa: F821
bytes = bytes # noqa: F821
basestring = (str, bytes) # noqa: F821
numeric_types = (int, float) # noqa: F821
integer_types = (int,) # noqa: F821
input = input # noqa: F821
open = open # noqa: F821
cast_bytes_py2 = no_code
``` |
{
"source": "0xflotus/EasyRec",
"score": 2
} |
#### File: python/test/hpo_test.py
```python
import json
import logging
import os
import time
import numpy as np
import tensorflow as tf
from easy_rec.python.utils import config_util
from easy_rec.python.utils import hpo_util
from easy_rec.python.utils import test_utils
if tf.__version__ >= '2.0':
gfile = tf.compat.v1.gfile
from tensorflow.core.protobuf import config_pb2
ConfigProto = config_pb2.ConfigProto
GPUOptions = config_pb2.GPUOptions
else:
gfile = tf.gfile
GPUOptions = tf.GPUOptions
ConfigProto = tf.ConfigProto
class HPOTest(tf.test.TestCase):
def __init__(self, methodName='HPOTest'):
super(HPOTest, self).__init__(methodName=methodName)
self._metric_data_path = 'data/test/hpo_test/eval_val/*.tfevents.*'
def test_get_metric(self):
vals = hpo_util.get_all_eval_result(self._metric_data_path)
logging.info('eval result num = %d' % len(vals))
logging.info('eval result[0] = %s' % json.dumps(vals[0]))
def load_config(self, config_path):
with gfile.GFile(config_path, 'r') as fin:
return json.load(fin)['param']
def test_save_eval_metrics(self):
test_dir = test_utils.get_tmp_dir()
tmp_file = os.path.join(test_dir,
'easy_rec_hpo_test_%d.metric' % time.time())
hpo_util.save_eval_metrics('data/test/hpo_test/', tmp_file, False)
test_utils.clean_up(test_dir)
def test_edit_config(self):
tmp_file = 'samples/model_config/deepfm_multi_cls_on_avazu_ctr.config'
tmp_config = config_util.get_configs_from_pipeline_file(tmp_file)
tmp_file = 'samples/hpo/hpo_param.json'
tmp_config = config_util.edit_config(tmp_config, self.load_config(tmp_file))
assert tmp_config.feature_config.features[0].embedding_dim == 120
def test_edit_config_v2(self):
tmp_file = 'samples/model_config/deepfm_multi_cls_on_avazu_ctr.config'
tmp_config = config_util.get_configs_from_pipeline_file(tmp_file)
tmp_file = 'samples/hpo/hpo_param_v2.json'
tmp_config = config_util.edit_config(tmp_config, self.load_config(tmp_file))
for tmp_fea in tmp_config.feature_configs:
if tmp_fea.input_names[0] == 'site_id':
assert tmp_fea.embedding_dim == 32
else:
assert tmp_fea.embedding_dim == 16
def test_edit_config_v3(self):
tmp_file = 'samples/model_config/deepfm_multi_cls_on_avazu_ctr.config'
tmp_config = config_util.get_configs_from_pipeline_file(tmp_file)
tmp_file = 'samples/hpo/hpo_param_v3.json'
tmp_config = config_util.edit_config(tmp_config, self.load_config(tmp_file))
for i, tmp_fea in enumerate(tmp_config.feature_configs):
if i >= 10 and i < 20:
assert tmp_fea.embedding_dim == 37
else:
assert tmp_fea.embedding_dim == 16
def test_edit_config_v4(self):
tmp_file = 'samples/model_config/deepfm_multi_cls_on_avazu_ctr.config'
tmp_config = config_util.get_configs_from_pipeline_file(tmp_file)
tmp_file = 'samples/hpo/hpo_param_v4.json'
tmp_config = config_util.edit_config(tmp_config, self.load_config(tmp_file))
for i, tmp_fea in enumerate(tmp_config.feature_configs):
if i < 15:
assert tmp_fea.embedding_dim == 37
else:
assert tmp_fea.embedding_dim == 16
def test_edit_config_v5(self):
tmp_file = 'samples/model_config/deepfm_multi_cls_on_avazu_ctr.config'
tmp_config = config_util.get_configs_from_pipeline_file(tmp_file)
tmp_file = 'samples/hpo/hpo_param_v5.json'
tmp_config = config_util.edit_config(tmp_config, self.load_config(tmp_file))
for i, tmp_fea in enumerate(tmp_config.feature_configs):
if i >= 5:
assert tmp_fea.embedding_dim == 37
else:
assert tmp_fea.embedding_dim == 16
def test_edit_config_v51(self):
tmp_file = 'samples/model_config/deepfm_multi_cls_on_avazu_ctr.config'
tmp_config = config_util.get_configs_from_pipeline_file(tmp_file)
tmp_file = 'samples/hpo/hpo_param_v51.json'
tmp_config = config_util.edit_config(tmp_config, self.load_config(tmp_file))
for i, tmp_fea in enumerate(tmp_config.feature_configs):
if i == 5:
assert tmp_fea.embedding_dim == 37
def test_edit_config_v6(self):
tmp_file = 'samples/model_config/deepfm_multi_cls_on_avazu_ctr.config'
tmp_config = config_util.get_configs_from_pipeline_file(tmp_file)
tmp_file = 'samples/hpo/hpo_param_v6.json'
tmp_config = config_util.edit_config(tmp_config, self.load_config(tmp_file))
for i, tmp_fea in enumerate(tmp_config.feature_configs):
if tmp_fea.input_names[0] >= 'site':
assert tmp_fea.embedding_dim == 32, 'input_name = %s %d' % (
tmp_fea.input_names[0], tmp_fea.embedding_dim)
else:
assert tmp_fea.embedding_dim == 16
def test_edit_config_v7(self):
tmp_file = 'samples/model_config/deepfm_multi_cls_on_avazu_ctr.config'
tmp_config = config_util.get_configs_from_pipeline_file(tmp_file)
tmp_file = 'samples/hpo/hpo_param_v7.json'
tmp_config = config_util.edit_config(tmp_config, self.load_config(tmp_file))
for i, tmp_fea in enumerate(tmp_config.feature_configs):
if tmp_fea.input_names[0] == 'c21':
assert len(tmp_fea.boundaries) == 4 and np.abs(tmp_fea.boundaries[0] -
10.0) < 1e-5
def test_edit_config_v71(self):
tmp_file = 'samples/model_config/deepfm_multi_cls_on_avazu_ctr.config'
tmp_config = config_util.get_configs_from_pipeline_file(tmp_file)
tmp_file = 'samples/hpo/hpo_param_v71.json'
tmp_config = config_util.edit_config(tmp_config, self.load_config(tmp_file))
for i, tmp_fea in enumerate(tmp_config.feature_configs):
if tmp_fea.input_names[0] == 'c21':
assert len(tmp_fea.boundaries) == 4 and np.abs(tmp_fea.boundaries[0] -
10.0) < 1e-5
def test_edit_config_v8(self):
tmp_file = 'samples/model_config/deepfm_multi_cls_on_avazu_ctr.config'
tmp_config = config_util.get_configs_from_pipeline_file(tmp_file)
tmp_file = 'samples/hpo/hpo_param_v8.json'
tmp_config = config_util.edit_config(tmp_config, self.load_config(tmp_file))
for i, tmp_fea in enumerate(tmp_config.feature_configs):
if tmp_fea.input_names[0] == 'c21':
assert len(tmp_fea.boundaries) == 4 and np.abs(tmp_fea.boundaries[0] -
4.0) < 1e-5
assert tmp_fea.embedding_dim == 32
def test_edit_config_v81(self):
tmp_file = 'samples/model_config/deepfm_multi_cls_on_avazu_ctr.config'
tmp_config = config_util.get_configs_from_pipeline_file(tmp_file)
tmp_file = 'samples/hpo/hpo_param_v81.json'
tmp_config = config_util.edit_config(tmp_config, self.load_config(tmp_file))
for i, tmp_fea in enumerate(tmp_config.feature_configs):
if tmp_fea.feature_type == tmp_fea.RawFeature:
assert tmp_fea.embedding_dim == 24
def test_edit_config_v9(self):
tmp_file = 'samples/model_config/deepfm_multi_cls_on_avazu_ctr.config'
tmp_config = config_util.get_configs_from_pipeline_file(tmp_file)
tmp_file = 'samples/hpo/hpo_param_v9.json'
tmp_config = config_util.edit_config(tmp_config, self.load_config(tmp_file))
assert tmp_config.train_config.fine_tune_checkpoint == \
'oss://easy-rec/test/experiment/ctr_v93/model.ckpt-1000'
def test_edit_config_v10(self):
tmp_file = 'samples/model_config/deepfm_multi_cls_on_avazu_ctr.config'
tmp_config = config_util.get_configs_from_pipeline_file(tmp_file)
tmp_file = 'samples/hpo/hpo_param_v10.json'
tmp_config = config_util.edit_config(tmp_config, self.load_config(tmp_file))
for i, tmp_fea in enumerate(tmp_config.feature_configs):
if tmp_fea.input_names[0] == 'c21':
assert len(tmp_fea.boundaries) == 4 and np.abs(tmp_fea.boundaries[0] -
4.0) < 1e-5
assert tmp_fea.embedding_dim == 32
def test_edit_config_v11(self):
tmp_file = 'samples/model_config/deepfm_multi_cls_on_avazu_ctr.config'
tmp_config = config_util.get_configs_from_pipeline_file(tmp_file)
tmp_file = 'samples/hpo/hpo_param_v11.json'
tmp_config = config_util.edit_config(tmp_config, self.load_config(tmp_file))
for i, tmp_fea in enumerate(tmp_config.feature_configs):
if tmp_fea.input_names[0] == 'c21':
assert len(tmp_fea.boundaries) == 4 and np.abs(tmp_fea.boundaries[0] -
10.0) < 1e-5
def test_edit_config_v12(self):
tmp_file = 'samples/model_config/deepfm_multi_cls_on_avazu_ctr.config'
tmp_config = config_util.get_configs_from_pipeline_file(tmp_file)
tmp_file = 'samples/hpo/hpo_param_v12.json'
tmp_config = config_util.edit_config(tmp_config, self.load_config(tmp_file))
for i, tmp_fea in enumerate(tmp_config.feature_configs):
if tmp_fea.input_names[0] == 'c21':
assert len(tmp_fea.boundaries) == 25
assert np.abs(tmp_fea.boundaries[1] - 21.0) < 1e-5
def test_save_eval_metrics_with_env(self):
os.environ['TF_CONFIG'] = """
{ "cluster": {
"worker": ["127.0.0.1:2020"],
"chief": ["127.0.0.1:2021"]
},
"task": {"type": "chief", "index": 0}
}
"""
test_dir = test_utils.get_tmp_dir()
tmp_file = os.path.join(test_dir,
'easy_rec_hpo_test_%d.metric' % time.time())
hpo_util.save_eval_metrics('data/test/hpo_test/', tmp_file, False)
test_utils.clean_up(test_dir)
if __name__ == '__main__':
tf.test.main()
``` |
{
"source": "0xflotus/flow",
"score": 2
} |
#### File: thrift/test/test.py
```python
from __future__ import print_function
from itertools import chain
import json
import logging
import multiprocessing
import argparse
import os
import sys
import crossrunner
from crossrunner.compat import path_join
ROOT_DIR = os.path.dirname(os.path.realpath(os.path.dirname(__file__)))
TEST_DIR_RELATIVE = 'test'
TEST_DIR = path_join(ROOT_DIR, TEST_DIR_RELATIVE)
FEATURE_DIR_RELATIVE = path_join(TEST_DIR_RELATIVE, 'features')
CONFIG_FILE = 'tests.json'
def run_cross_tests(server_match, client_match, jobs, skip_known_failures, retry_count, regex):
logger = multiprocessing.get_logger()
logger.debug('Collecting tests')
with open(path_join(TEST_DIR, CONFIG_FILE), 'r') as fp:
j = json.load(fp)
tests = crossrunner.collect_cross_tests(j, server_match, client_match, regex)
if not tests:
print('No test found that matches the criteria', file=sys.stderr)
print(' servers: %s' % server_match, file=sys.stderr)
print(' clients: %s' % client_match, file=sys.stderr)
return False
if skip_known_failures:
logger.debug('Skipping known failures')
known = crossrunner.load_known_failures(TEST_DIR)
tests = list(filter(lambda t: crossrunner.test_name(**t) not in known, tests))
dispatcher = crossrunner.TestDispatcher(TEST_DIR, ROOT_DIR, TEST_DIR_RELATIVE, jobs)
logger.debug('Executing %d tests' % len(tests))
try:
for r in [dispatcher.dispatch(test, retry_count) for test in tests]:
r.wait()
logger.debug('Waiting for completion')
return dispatcher.wait()
except (KeyboardInterrupt, SystemExit):
logger.debug('Interrupted, shutting down')
dispatcher.terminate()
return False
def run_feature_tests(server_match, feature_match, jobs, skip_known_failures, retry_count, regex):
basedir = path_join(ROOT_DIR, FEATURE_DIR_RELATIVE)
logger = multiprocessing.get_logger()
logger.debug('Collecting tests')
with open(path_join(TEST_DIR, CONFIG_FILE), 'r') as fp:
j = json.load(fp)
with open(path_join(basedir, CONFIG_FILE), 'r') as fp:
j2 = json.load(fp)
tests = crossrunner.collect_feature_tests(j, j2, server_match, feature_match, regex)
if not tests:
print('No test found that matches the criteria', file=sys.stderr)
print(' servers: %s' % server_match, file=sys.stderr)
print(' features: %s' % feature_match, file=sys.stderr)
return False
if skip_known_failures:
logger.debug('Skipping known failures')
known = crossrunner.load_known_failures(basedir)
tests = list(filter(lambda t: crossrunner.test_name(**t) not in known, tests))
dispatcher = crossrunner.TestDispatcher(TEST_DIR, ROOT_DIR, FEATURE_DIR_RELATIVE, jobs)
logger.debug('Executing %d tests' % len(tests))
try:
for r in [dispatcher.dispatch(test, retry_count) for test in tests]:
r.wait()
logger.debug('Waiting for completion')
return dispatcher.wait()
except (KeyboardInterrupt, SystemExit):
logger.debug('Interrupted, shutting down')
dispatcher.terminate()
return False
def default_concurrenty():
try:
return int(os.environ.get('THRIFT_CROSSTEST_CONCURRENCY'))
except (TypeError, ValueError):
# Since much time is spent sleeping, use many threads
return int(multiprocessing.cpu_count() * 1.25) + 1
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--server', default='', nargs='*',
help='list of servers to test')
parser.add_argument('--client', default='', nargs='*',
help='list of clients to test')
parser.add_argument('-F', '--features', nargs='*', default=None,
help='run server feature tests instead of cross language tests')
parser.add_argument('-R', '--regex', help='test name pattern to run')
parser.add_argument('-s', '--skip-known-failures', action='store_true', dest='skip_known_failures',
help='do not execute tests that are known to fail')
parser.add_argument('-r', '--retry-count', type=int,
default=0, help='maximum retry on failure')
parser.add_argument('-j', '--jobs', type=int,
default=default_concurrenty(),
help='number of concurrent test executions')
g = parser.add_argument_group(title='Advanced')
g.add_argument('-v', '--verbose', action='store_const',
dest='log_level', const=logging.DEBUG, default=logging.WARNING,
help='show debug output for test runner')
g.add_argument('-P', '--print-expected-failures', choices=['merge', 'overwrite'],
dest='print_failures',
help="generate expected failures based on last result and print to stdout")
g.add_argument('-U', '--update-expected-failures', choices=['merge', 'overwrite'],
dest='update_failures',
help="generate expected failures based on last result and save to default file location")
options = parser.parse_args(argv)
logger = multiprocessing.log_to_stderr()
logger.setLevel(options.log_level)
if options.features is not None and options.client:
print('Cannot specify both --features and --client ', file=sys.stderr)
return 1
# Allow multiple args separated with ',' for backward compatibility
server_match = list(chain(*[x.split(',') for x in options.server]))
client_match = list(chain(*[x.split(',') for x in options.client]))
if options.update_failures or options.print_failures:
dire = path_join(ROOT_DIR, FEATURE_DIR_RELATIVE) if options.features is not None else TEST_DIR
res = crossrunner.generate_known_failures(
dire, options.update_failures == 'overwrite',
options.update_failures, options.print_failures)
elif options.features is not None:
features = options.features or ['.*']
res = run_feature_tests(server_match, features, options.jobs, options.skip_known_failures, options.retry_count, options.regex)
else:
res = run_cross_tests(server_match, client_match, options.jobs, options.skip_known_failures, options.retry_count, options.regex)
return 0 if res else 1
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
``` |
{
"source": "0xflotus/frash",
"score": 3
} |
#### File: frash/frash/util.py
```python
import sys
def normalize_to_hex(input):
if input in [".0", "0."]:
return "0x0"
elif (
("e" in input or "E" in input)
and (float(input) >= 1 or float(input) == 0)
and not ("e0" in input or "E0" in input)
):
try:
return hex(int(float(input)))
except ValueError as ve:
print(ve)
sys.exit(-1)
else:
try:
return hex(int(input))
except ValueError:
try:
output = float.hex(float(input))
out_arr = output.split("p")
out_arr[0] = out_arr[0].rstrip("0")
out_arr[1] = out_arr[1].lstrip("+")
output = out_arr[0] + "p" + out_arr[1]
return output
except ValueError as ve:
print(ve)
sys.exit(-1)
def from_hex(input):
try:
output = float.fromhex(input)
return f"{output:f}".rstrip("0").rstrip(".")
except ValueError as ve:
print(ve)
sys.exit(-1)
except OverflowError as oe:
print(oe)
sys.exit(-1)
def from_oct(input):
try:
return str(int(input, 0o10))
except ValueError as ve:
print(ve)
sys.exit(-1)
def normalize_to_oct(input):
try:
return oct(int(input))
except ValueError as ve:
print(ve)
sys.exit(-1)
``` |
{
"source": "0xflotus/gotop",
"score": 3
} |
#### File: gotop/fonts/gen-braille.py
```python
def show_char(i):
bit = [ '--', '--', '--', '--', '--', '--', '--', '--' ]
for n in range(0, 8):
if i & (1 << n):
bit[n] = '##'
print('%')
print('// Character {}'.format(256+i))
print('Bitmap: -------- \\')
print(' -{}--{}- \\'.format(bit[0], bit[3]))
print(' -{}--{}- \\'.format(bit[0], bit[3]))
print(' -------- \\')
print(' -------- \\')
print(' -{}--{}- \\'.format(bit[1], bit[4]))
print(' -{}--{}- \\'.format(bit[1], bit[4]))
print(' -------- \\')
print(' -------- \\')
print(' -{}--{}- \\'.format(bit[2], bit[5]))
print(' -{}--{}- \\'.format(bit[2], bit[5]))
print(' -------- \\')
print(' -------- \\')
print(' -{}--{}- \\'.format(bit[6], bit[7]))
print(' -{}--{}- \\'.format(bit[6], bit[7]))
print(' --------')
print('Unicode: [{:08x}];'.format(0x2800 + i))
if __name__ == '__main__':
for i in range(0, 256):
show_char(i)
``` |
{
"source": "0xflotus/graphql-compiler",
"score": 3
} |
#### File: graphql_compiler/compiler/compiler_entities.py
```python
from abc import ABCMeta, abstractmethod
from graphql import is_type
import six
@six.python_2_unicode_compatible
@six.add_metaclass(ABCMeta)
class CompilerEntity(object):
"""An abstract compiler entity. Can represent things like basic blocks and expressions."""
__slots__ = ('_print_args', '_print_kwargs')
def __init__(self, *args, **kwargs):
"""Construct a new CompilerEntity."""
self._print_args = args
self._print_kwargs = kwargs
@abstractmethod
def validate(self):
"""Ensure that the CompilerEntity is valid."""
raise NotImplementedError()
def __str__(self):
"""Return a human-readable unicode representation of this CompilerEntity."""
printed_args = []
if self._print_args:
printed_args.append('{args}')
if self._print_kwargs:
printed_args.append('{kwargs}')
template = u'{cls_name}(' + u', '.join(printed_args) + u')'
return template.format(cls_name=type(self).__name__,
args=self._print_args,
kwargs=self._print_kwargs)
def __repr__(self):
"""Return a human-readable str representation of the CompilerEntity object."""
return self.__str__()
# pylint: disable=protected-access
def __eq__(self, other):
"""Return True if the CompilerEntity objects are equal, and False otherwise."""
if type(self) != type(other):
return False
if len(self._print_args) != len(other._print_args):
return False
# The args sometimes contain GraphQL type objects, which unfortunately do not define "==".
# We have to split them out and compare them using "is_same_type()" instead.
for self_arg, other_arg in six.moves.zip(self._print_args, other._print_args):
if is_type(self_arg):
if not self_arg.is_same_type(other_arg):
return False
else:
if self_arg != other_arg:
return False
return self._print_kwargs == other._print_kwargs
# pylint: enable=protected-access
def __ne__(self, other):
"""Check another object for non-equality against this one."""
return not self.__eq__(other)
@abstractmethod
def to_gremlin(self):
"""Return the Gremlin unicode string representation of this object."""
raise NotImplementedError()
@six.add_metaclass(ABCMeta)
class Expression(CompilerEntity):
"""An expression that produces a value in the GraphQL compiler."""
__slots__ = ()
def visit_and_update(self, visitor_fn):
"""Create an updated version (if needed) of the Expression via the visitor pattern.
Args:
visitor_fn: function that takes an Expression argument, and returns an Expression.
This function is recursively called on all child Expressions that may
exist within this expression. If the visitor_fn does not return the
exact same object that was passed in, this is interpreted as an update
request, and the visit_and_update() method will return a new Expression
with the given update applied. No Expressions are mutated in-place.
Returns:
- If the visitor_fn does not request any updates (by always returning the exact same
object it was called with), this method returns 'self'.
- Otherwise, this method returns a new Expression object that reflects the updates
requested by the visitor_fn.
"""
# Most Expressions simply visit themselves.
# Any Expressions that contain Expressions will override this method.
return visitor_fn(self)
@six.add_metaclass(ABCMeta)
class BasicBlock(CompilerEntity):
"""A basic operation block of the GraphQL compiler."""
__slots__ = ()
def visit_and_update_expressions(self, visitor_fn):
"""Create an updated version (if needed) of the BasicBlock via the visitor pattern.
Args:
visitor_fn: function that takes an Expression argument, and returns an Expression.
This function is recursively called on all child Expressions that may
exist within this BasicBlock. If the visitor_fn does not return the
exact same object that was passed in, this is interpreted as an update
request, and the visit_and_update() method will return a new BasicBlock
with the given update applied. No Expressions or BasicBlocks are
mutated in-place.
Returns:
- If the visitor_fn does not request any updates (by always returning the exact same
object it was called with), this method returns 'self'.
- Otherwise, this method returns a new BasicBlock object that reflects the updates
requested by the visitor_fn.
"""
# Most BasicBlocks do not contain expressions, and immediately return 'self'.
# Any BasicBlocks that contain Expressions will override this method.
return self
@six.add_metaclass(ABCMeta)
class MarkerBlock(BasicBlock):
"""A block that is used to mark that a context-affecting operation with no output happened."""
__slots__ = ()
def to_gremlin(self):
"""Return the Gremlin representation of the block, which should almost always be empty.
The effect of MarkerBlocks is applied during optimization and code generation steps.
"""
return u''
```
#### File: graphql_compiler/compiler/emit_sql.py
```python
from collections import namedtuple
from sqlalchemy import Column, bindparam, select
from sqlalchemy.sql import expression as sql_expressions
from sqlalchemy.sql.elements import BindParameter, and_
from . import sql_context_helpers
from ..compiler import expressions
from ..compiler.ir_lowering_sql import constants
# The compilation context holds state that changes during compilation as the tree is traversed
CompilationContext = namedtuple('CompilationContext', (
# 'query_path_to_selectable': Dict[Tuple[str, ...], Selectable], mapping from each
# query_path to the Selectable located at that query_path.
'query_path_to_selectable',
# 'query_path_to_location_info': Dict[Tuple[str, ...], LocationInfo], inverse mapping from
# each query_path to the LocationInfo located at that query_path
'query_path_to_location_info',
# 'query_path_to_output_fields': Dict[Tuple[str, ...], Dict[str, Tuple[str, type, bool]]]
# mapping from each query path to a mapping from field alias to the field name, type, and
# renamed status. This tuple is used to construct the query outputs, and track when a name
# changes due to collapsing into a CTE.
'query_path_to_output_fields',
# 'query_path_to_filters': Dict[Tuple[str, ...], List[Filter]], mapping from each query_path
# to the Filter blocks that apply to that query path
'query_path_to_filters',
# 'query_path_to_node': Dict[Tuple[str, ...], SqlNode], mapping from each
# query_path to the SqlNode located at that query_path.
'query_path_to_node',
# 'compiler_metadata': SqlMetadata, SQLAlchemy metadata about Table objects, and
# further backend specific configuration.
'compiler_metadata',
))
def emit_code_from_ir(sql_query_tree, compiler_metadata):
"""Return a SQLAlchemy Query from a passed SqlQueryTree.
Args:
sql_query_tree: SqlQueryTree, tree representation of the query to emit.
compiler_metadata: SqlMetadata, SQLAlchemy specific metadata.
Returns:
SQLAlchemy Query
"""
context = CompilationContext(
query_path_to_selectable=dict(),
query_path_to_location_info=sql_query_tree.query_path_to_location_info,
query_path_to_output_fields=sql_query_tree.query_path_to_output_fields,
query_path_to_filters=sql_query_tree.query_path_to_filters,
query_path_to_node=sql_query_tree.query_path_to_node,
compiler_metadata=compiler_metadata,
)
return _query_tree_to_query(sql_query_tree.root, context)
def _query_tree_to_query(node, context):
"""Convert this node into its corresponding SQL representation.
Args:
node: SqlNode, the node to convert to SQL.
context: CompilationContext, compilation specific metadata
Returns:
Query, the compiled SQL query
"""
_create_table_and_update_context(node, context)
return _create_query(node, context)
def _create_table_and_update_context(node, context):
"""Create an aliased table for a SqlNode.
Updates the relevant Selectable global context.
Args:
node: SqlNode, the current node.
context: CompilationContext, global compilation state and metadata.
Returns:
Table, the newly aliased SQLAlchemy table.
"""
schema_type_name = sql_context_helpers.get_schema_type_name(node, context)
table = context.compiler_metadata.get_table(schema_type_name).alias()
context.query_path_to_selectable[node.query_path] = table
return table
def _create_query(node, context):
"""Create a query from a SqlNode.
Args:
node: SqlNode, the current node.
context: CompilationContext, global compilation state and metadata.
Returns:
Selectable, selectable of the generated query.
"""
visited_nodes = [node]
output_columns = _get_output_columns(visited_nodes, context)
filters = _get_filters(visited_nodes, context)
selectable = sql_context_helpers.get_node_selectable(node, context)
query = select(output_columns).select_from(selectable).where(and_(*filters))
return query
def _get_output_columns(nodes, context):
"""Get the output columns for a list of SqlNodes.
Args:
nodes: List[SqlNode], the nodes to get output columns from.
context: CompilationContext, global compilation state and metadata.
Returns:
List[Column], list of SqlAlchemy Columns to output for this query.
"""
columns = []
for node in nodes:
for sql_output in sql_context_helpers.get_outputs(node, context):
field_name = sql_output.field_name
column = sql_context_helpers.get_column(field_name, node, context)
column = column.label(sql_output.output_name)
columns.append(column)
return columns
def _get_filters(nodes, context):
"""Get filters to apply to a list of SqlNodes.
Args:
nodes: List[SqlNode], the SqlNodes to get filters for.
context: CompilationContext, global compilation state and metadata.
Returns:
List[Expression], list of SQLAlchemy expressions.
"""
filters = []
for node in nodes:
for filter_block in sql_context_helpers.get_filters(node, context):
filter_sql_expression = _transform_filter_to_sql(filter_block, node, context)
filters.append(filter_sql_expression)
return filters
def _transform_filter_to_sql(filter_block, node, context):
"""Transform a Filter block to its corresponding SQLAlchemy expression.
Args:
filter_block: Filter, the Filter block to transform.
node: SqlNode, the node Filter block applies to.
context: CompilationContext, global compilation state and metadata.
Returns:
Expression, SQLAlchemy expression equivalent to the Filter.predicate expression.
"""
expression = filter_block.predicate
return _expression_to_sql(expression, node, context)
def _expression_to_sql(expression, node, context):
"""Recursively transform a Filter block predicate to its SQLAlchemy expression representation.
Args:
expression: expression, the compiler expression to transform.
node: SqlNode, the SqlNode the expression applies to.
context: CompilationContext, global compilation state and metadata.
Returns:
Expression, SQLAlchemy Expression equivalent to the passed compiler expression.
"""
_expression_transformers = {
expressions.LocalField: _transform_local_field_to_expression,
expressions.Variable: _transform_variable_to_expression,
expressions.Literal: _transform_literal_to_expression,
expressions.BinaryComposition: _transform_binary_composition_to_expression,
}
expression_type = type(expression)
if expression_type not in _expression_transformers:
raise NotImplementedError(
u'Unsupported compiler expression "{}" of type "{}" cannot be converted to SQL '
u'expression.'.format(expression, type(expression)))
return _expression_transformers[expression_type](expression, node, context)
def _transform_binary_composition_to_expression(expression, node, context):
"""Transform a BinaryComposition compiler expression into a SQLAlchemy expression.
Recursively calls _expression_to_sql to convert its left and right sub-expressions.
Args:
expression: expression, BinaryComposition compiler expression.
node: SqlNode, the SqlNode the expression applies to.
context: CompilationContext, global compilation state and metadata.
Returns:
Expression, SQLAlchemy expression.
"""
if expression.operator not in constants.SUPPORTED_OPERATORS:
raise NotImplementedError(
u'Filter operation "{}" is not supported by the SQL backend.'.format(
expression.operator))
sql_operator = constants.SUPPORTED_OPERATORS[expression.operator]
left = _expression_to_sql(expression.left, node, context)
right = _expression_to_sql(expression.right, node, context)
if sql_operator.cardinality == constants.CARDINALITY_UNARY:
left, right = _get_column_and_bindparam(left, right, sql_operator)
clause = getattr(left, sql_operator.name)(right)
return clause
elif sql_operator.cardinality == constants.CARDINALITY_BINARY:
clause = getattr(sql_expressions, sql_operator.name)(left, right)
return clause
elif sql_operator.cardinality == constants.CARDINALITY_LIST_VALUED:
left, right = _get_column_and_bindparam(left, right, sql_operator)
# ensure that SQLAlchemy treats the right bind parameter as list valued
right.expanding = True
clause = getattr(left, sql_operator.name)(right)
return clause
raise AssertionError(u'Unreachable, operator cardinality {} for compiler expression {} is '
u'unknown'.format(sql_operator.cardinality, expression))
def _get_column_and_bindparam(left, right, operator):
"""Return left and right expressions in (Column, BindParameter) order."""
if not isinstance(left, Column):
left, right = right, left
if not isinstance(left, Column):
raise AssertionError(
u'SQLAlchemy operator {} expects Column as left side the of expression, got {} '
u'of type {} instead.'.format(operator, left, type(left)))
if not isinstance(right, BindParameter):
raise AssertionError(
u'SQLAlchemy operator {} expects BindParameter as the right side of the expression, '
u'got {} of type {} instead.'.format(operator, right, type(right)))
return left, right
def _transform_literal_to_expression(expression, node, context):
"""Transform a Literal compiler expression into its SQLAlchemy expression representation.
Args:
expression: expression, Literal compiler expression.
node: SqlNode, the SqlNode the expression applies to.
context: CompilationContext, global compilation state and metadata.
Returns:
Expression, SQLAlchemy expression.
"""
return expression.value
def _transform_variable_to_expression(expression, node, context):
"""Transform a Variable compiler expression into its SQLAlchemy expression representation.
Args:
expression: expression, Variable compiler expression.
node: SqlNode, the SqlNode the expression applies to.
context: CompilationContext, global compilation state and metadata.
Returns:
Expression, SQLAlchemy expression.
"""
variable_name = expression.variable_name
if not variable_name.startswith(u'$'):
raise AssertionError(u'Unexpectedly received variable name {} that is not '
u'prefixed with "$"'.format(variable_name))
return bindparam(variable_name[1:])
def _transform_local_field_to_expression(expression, node, context):
"""Transform a LocalField compiler expression into its SQLAlchemy expression representation.
Args:
expression: expression, LocalField compiler expression.
node: SqlNode, the SqlNode the expression applies to.
context: CompilationContext, global compilation state and metadata.
Returns:
Expression, SQLAlchemy expression.
"""
column_name = expression.field_name
column = sql_context_helpers.get_column(column_name, node, context)
return column
```
#### File: graphql_compiler/compiler/ir_lowering_common.py
```python
import six
from .blocks import (
ConstructResult, EndOptional, Filter, Fold, MarkLocation, Recurse, Traverse, Unfold
)
from .expressions import (
BinaryComposition, ContextField, ContextFieldExistence, FalseLiteral, NullLiteral, TrueLiteral
)
from .helpers import validate_safe_string
def merge_consecutive_filter_clauses(ir_blocks):
"""Merge consecutive Filter(x), Filter(y) blocks into Filter(x && y) block."""
if not ir_blocks:
return ir_blocks
new_ir_blocks = [ir_blocks[0]]
for block in ir_blocks[1:]:
last_block = new_ir_blocks[-1]
if isinstance(last_block, Filter) and isinstance(block, Filter):
new_ir_blocks[-1] = Filter(
BinaryComposition(u'&&', last_block.predicate, block.predicate))
else:
new_ir_blocks.append(block)
return new_ir_blocks
class OutputContextVertex(ContextField):
"""An expression referring to a vertex location for output from the global context."""
def validate(self):
"""Validate that the OutputContextVertex is correctly representable."""
super(OutputContextVertex, self).validate()
if self.location.field is not None:
raise ValueError(u'Expected location at a vertex, but got: {}'.format(self.location))
def to_match(self):
"""Return a unicode object with the MATCH representation of this expression."""
self.validate()
mark_name, field_name = self.location.get_location_name()
validate_safe_string(mark_name)
if field_name is not None:
raise AssertionError(u'Vertex location has non-None field_name: '
u'{} {}'.format(field_name, self.location))
return mark_name
def lower_context_field_existence(ir_blocks, query_metadata_table):
"""Lower ContextFieldExistence expressions into lower-level expressions."""
def regular_visitor_fn(expression):
"""Expression visitor function that rewrites ContextFieldExistence expressions."""
if not isinstance(expression, ContextFieldExistence):
return expression
location_type = query_metadata_table.get_location_info(expression.location).type
# Since this function is only used in blocks that aren't ConstructResult,
# the location check is performed using a regular ContextField expression.
return BinaryComposition(
u'!=',
ContextField(expression.location, location_type),
NullLiteral)
def construct_result_visitor_fn(expression):
"""Expression visitor function that rewrites ContextFieldExistence expressions."""
if not isinstance(expression, ContextFieldExistence):
return expression
location_type = query_metadata_table.get_location_info(expression.location).type
# Since this function is only used in ConstructResult blocks,
# the location check is performed using the special OutputContextVertex expression.
return BinaryComposition(
u'!=',
OutputContextVertex(expression.location, location_type),
NullLiteral)
new_ir_blocks = []
for block in ir_blocks:
new_block = None
if isinstance(block, ConstructResult):
new_block = block.visit_and_update_expressions(construct_result_visitor_fn)
else:
new_block = block.visit_and_update_expressions(regular_visitor_fn)
new_ir_blocks.append(new_block)
return new_ir_blocks
def optimize_boolean_expression_comparisons(ir_blocks):
"""Optimize comparisons of a boolean binary comparison expression against a boolean literal.
Rewriting example:
BinaryComposition(
'=',
BinaryComposition('!=', something, NullLiteral)
False)
The above is rewritten into:
BinaryComposition('=', something, NullLiteral)
Args:
ir_blocks: list of basic block objects
Returns:
a new list of basic block objects, with the optimization applied
"""
operator_inverses = {
u'=': u'!=',
u'!=': u'=',
}
def visitor_fn(expression):
"""Expression visitor function that performs the above rewriting."""
if not isinstance(expression, BinaryComposition):
return expression
left_is_binary_composition = isinstance(expression.left, BinaryComposition)
right_is_binary_composition = isinstance(expression.right, BinaryComposition)
if not left_is_binary_composition and not right_is_binary_composition:
# Nothing to rewrite, return the expression as-is.
return expression
identity_literal = None # The boolean literal for which we just use the inner expression.
inverse_literal = None # The boolean literal for which we negate the inner expression.
if expression.operator == u'=':
identity_literal = TrueLiteral
inverse_literal = FalseLiteral
elif expression.operator == u'!=':
identity_literal = FalseLiteral
inverse_literal = TrueLiteral
else:
return expression
expression_to_rewrite = None
if expression.left == identity_literal and right_is_binary_composition:
return expression.right
elif expression.right == identity_literal and left_is_binary_composition:
return expression.left
elif expression.left == inverse_literal and right_is_binary_composition:
expression_to_rewrite = expression.right
elif expression.right == inverse_literal and left_is_binary_composition:
expression_to_rewrite = expression.left
if expression_to_rewrite is None:
# We couldn't find anything to rewrite, return the expression as-is.
return expression
elif expression_to_rewrite.operator not in operator_inverses:
# We can't rewrite the inner expression since we don't know its inverse operator.
return expression
else:
return BinaryComposition(
operator_inverses[expression_to_rewrite.operator],
expression_to_rewrite.left,
expression_to_rewrite.right)
new_ir_blocks = []
for block in ir_blocks:
new_block = block.visit_and_update_expressions(visitor_fn)
new_ir_blocks.append(new_block)
return new_ir_blocks
def extract_folds_from_ir_blocks(ir_blocks):
"""Extract all @fold data from the IR blocks, and cut the folded IR blocks out of the IR.
Args:
ir_blocks: list of IR blocks to extract fold data from
Returns:
tuple (folds, remaining_ir_blocks):
- folds: dict of FoldScopeLocation -> list of IR blocks corresponding to that @fold scope.
The list does not contain Fold or Unfold blocks.
- remaining_ir_blocks: list of IR blocks that were not part of a Fold-Unfold section.
"""
folds = dict()
remaining_ir_blocks = []
current_folded_blocks = []
in_fold_location = None
for block in ir_blocks:
if isinstance(block, Fold):
if in_fold_location is not None:
raise AssertionError(u'in_fold_location was not None at a Fold block: {} {} '
u'{}'.format(current_folded_blocks, remaining_ir_blocks,
ir_blocks))
in_fold_location = block.fold_scope_location
elif isinstance(block, Unfold):
if in_fold_location is None:
raise AssertionError(u'in_fold_location was None at an Unfold block: {} {} '
u'{}'.format(current_folded_blocks, remaining_ir_blocks,
ir_blocks))
folds[in_fold_location] = current_folded_blocks
current_folded_blocks = []
in_fold_location = None
else:
if in_fold_location is not None:
current_folded_blocks.append(block)
else:
remaining_ir_blocks.append(block)
return folds, remaining_ir_blocks
def extract_optional_location_root_info(ir_blocks):
"""Construct a mapping from locations within @optional to their correspoding optional Traverse.
Args:
ir_blocks: list of IR blocks to extract optional data from
Returns:
tuple (complex_optional_roots, location_to_optional_roots):
complex_optional_roots: list of @optional locations (location immmediately preceding
an @optional Traverse) that expand vertex fields
location_to_optional_roots: dict mapping from location -> optional_roots where location is
within some number of @optionals and optional_roots is a list
of optional root locations preceding the successive @optional
scopes within which the location resides
"""
complex_optional_roots = []
location_to_optional_roots = dict()
# These are both stacks that perform depth-first search on the tree of @optional edges.
# At any given location they contain
# - in_optional_root_locations: all the optional root locations
# - encountered_traverse_within_optional: whether the optional is complex or not
# in order that they appear on the path from the root to that location.
in_optional_root_locations = []
encountered_traverse_within_optional = []
# Blocks within folded scopes should not be taken into account in this function.
_, non_folded_ir_blocks = extract_folds_from_ir_blocks(ir_blocks)
preceding_location = None
for current_block in non_folded_ir_blocks:
if len(in_optional_root_locations) > 0 and isinstance(current_block, (Traverse, Recurse)):
encountered_traverse_within_optional[-1] = True
if isinstance(current_block, Traverse) and current_block.optional:
if preceding_location is None:
raise AssertionError(u'No MarkLocation found before an optional Traverse: {} {}'
.format(current_block, non_folded_ir_blocks))
in_optional_root_locations.append(preceding_location)
encountered_traverse_within_optional.append(False)
elif isinstance(current_block, EndOptional):
if len(in_optional_root_locations) == 0:
raise AssertionError(u'in_optional_root_locations was empty at an EndOptional '
u'block: {}'.format(ir_blocks))
if encountered_traverse_within_optional[-1]:
complex_optional_roots.append(in_optional_root_locations[-1])
in_optional_root_locations.pop()
encountered_traverse_within_optional.pop()
elif isinstance(current_block, MarkLocation):
preceding_location = current_block.location
if len(in_optional_root_locations) != 0:
# in_optional_root_locations will not be empty if and only if we are within an
# @optional scope. In this case, we add the current location to the dictionary
# mapping it to the sequence of optionals locations leading up to it.
optional_root_locations_stack = tuple(in_optional_root_locations)
location_to_optional_roots[current_block.location] = optional_root_locations_stack
else:
# No locations need to be marked, and no optional scopes begin or end here.
pass
return complex_optional_roots, location_to_optional_roots
def extract_simple_optional_location_info(
ir_blocks, complex_optional_roots, location_to_optional_roots):
"""Construct a map from simple optional locations to their inner location and traversed edge.
Args:
ir_blocks: list of IR blocks to extract optional data from
complex_optional_roots: list of @optional locations (location immmediately preceding
an @optional traverse) that expand vertex fields
location_to_optional_roots: dict mapping from location -> optional_roots where location is
within some number of @optionals and optional_roots is a list
of optional root locations preceding the successive @optional
scopes within which the location resides
Returns:
dict mapping from simple_optional_root_location -> dict containing keys
- 'inner_location_name': Location object correspoding to the unique MarkLocation present
within a simple optional (one that does not expand vertex fields)
scope
- 'edge_field': string representing the optional edge being traversed
where simple_optional_root_to_inner_location is the location preceding the @optional scope
"""
# Simple optional roots are a subset of location_to_optional_roots.values() (all optional roots)
# We filter out the ones that are also present in complex_optional_roots.
location_to_preceding_optional_root_iteritems = six.iteritems({
location: optional_root_locations_stack[-1]
for location, optional_root_locations_stack in six.iteritems(location_to_optional_roots)
})
simple_optional_root_to_inner_location = {
optional_root_location: inner_location
for inner_location, optional_root_location in location_to_preceding_optional_root_iteritems
if optional_root_location not in complex_optional_roots
}
simple_optional_root_locations = set(simple_optional_root_to_inner_location.keys())
# Blocks within folded scopes should not be taken into account in this function.
_, non_folded_ir_blocks = extract_folds_from_ir_blocks(ir_blocks)
simple_optional_root_info = {}
preceding_location = None
for current_block in non_folded_ir_blocks:
if isinstance(current_block, MarkLocation):
preceding_location = current_block.location
elif isinstance(current_block, Traverse) and current_block.optional:
if preceding_location in simple_optional_root_locations:
# The current optional Traverse is "simple"
# i.e. it does not contain any Traverses within.
inner_location = simple_optional_root_to_inner_location[preceding_location]
inner_location_name, _ = inner_location.get_location_name()
simple_optional_info_dict = {
'inner_location_name': inner_location_name,
'edge_field': current_block.get_field_name(),
}
simple_optional_root_info[preceding_location] = simple_optional_info_dict
return simple_optional_root_info
def remove_end_optionals(ir_blocks):
"""Return a list of IR blocks as a copy of the original, with EndOptional blocks removed."""
new_ir_blocks = []
for block in ir_blocks:
if not isinstance(block, EndOptional):
new_ir_blocks.append(block)
return new_ir_blocks
```
#### File: compiler/ir_lowering_match/utils.py
```python
from collections import namedtuple
import itertools
import six
from ..blocks import Filter
from ..expressions import (
BinaryComposition, Expression, GlobalContextField, Literal, LocalField, NullLiteral,
TrueLiteral, UnaryTransformation, ZeroLiteral
)
from ..helpers import get_only_element_from_collection, is_vertex_field_name
def convert_coerce_type_to_instanceof_filter(coerce_type_block):
"""Create an "INSTANCEOF" Filter block from a CoerceType block."""
coerce_type_target = get_only_element_from_collection(coerce_type_block.target_class)
# INSTANCEOF requires the target class to be passed in as a string,
# so we make the target class a string literal.
new_predicate = BinaryComposition(
u'INSTANCEOF', LocalField('@this'), Literal(coerce_type_target))
return Filter(new_predicate)
def convert_coerce_type_and_add_to_where_block(coerce_type_block, where_block):
"""Create an "INSTANCEOF" Filter from a CoerceType, adding to an existing Filter if any."""
instanceof_filter = convert_coerce_type_to_instanceof_filter(coerce_type_block)
if where_block:
# There was already a Filter block -- we'll merge the two predicates together.
return Filter(BinaryComposition(u'&&', instanceof_filter.predicate, where_block.predicate))
else:
return instanceof_filter
def expression_list_to_conjunction(expression_list):
"""Convert a list of expressions to an Expression that is the conjunction of all of them."""
if not isinstance(expression_list, list):
raise AssertionError(u'Expected `list`, Received {}.'.format(expression_list))
if len(expression_list) == 0:
return TrueLiteral
if not isinstance(expression_list[0], Expression):
raise AssertionError(u'Non-Expression object {} found in expression_list'
.format(expression_list[0]))
if len(expression_list) == 1:
return expression_list[0]
else:
return BinaryComposition(u'&&',
expression_list_to_conjunction(expression_list[1:]),
expression_list[0])
class BetweenClause(Expression):
"""A `BETWEEN` Expression, constraining a field value to lie within a lower and upper bound."""
def __init__(self, field, lower_bound, upper_bound):
"""Construct an expression that is true when the field value is within the given bounds.
Args:
field: LocalField Expression, denoting the field in consideration
lower_bound: lower bound constraint for given field
upper_bound: upper bound constraint for given field
Returns:
a new BetweenClause object
"""
super(BetweenClause, self).__init__(field, lower_bound, upper_bound)
self.field = field
self.lower_bound = lower_bound
self.upper_bound = upper_bound
self.validate()
def validate(self):
"""Validate that the Between Expression is correctly representable."""
if not isinstance(self.field, LocalField):
raise TypeError(u'Expected LocalField field, got: {} {}'.format(
type(self.field).__name__, self.field))
if not isinstance(self.lower_bound, Expression):
raise TypeError(u'Expected Expression lower_bound, got: {} {}'.format(
type(self.lower_bound).__name__, self.lower_bound))
if not isinstance(self.upper_bound, Expression):
raise TypeError(u'Expected Expression upper_bound, got: {} {}'.format(
type(self.upper_bound).__name__, self.upper_bound))
def visit_and_update(self, visitor_fn):
"""Create an updated version (if needed) of BetweenClause via the visitor pattern."""
new_lower_bound = self.lower_bound.visit_and_update(visitor_fn)
new_upper_bound = self.upper_bound.visit_and_update(visitor_fn)
if new_lower_bound is not self.lower_bound or new_upper_bound is not self.upper_bound:
return visitor_fn(BetweenClause(self.field, new_lower_bound, new_upper_bound))
else:
return visitor_fn(self)
def to_match(self):
"""Return a unicode object with the MATCH representation of this BetweenClause."""
template = u'({field_name} BETWEEN {lower_bound} AND {upper_bound})'
return template.format(
field_name=self.field.to_match(),
lower_bound=self.lower_bound.to_match(),
upper_bound=self.upper_bound.to_match())
def to_gremlin(self):
"""Must never be called."""
raise NotImplementedError()
def filter_edge_field_non_existence(edge_expression):
"""Return an Expression that is True iff the specified edge (edge_expression) does not exist."""
# When an edge does not exist at a given vertex, OrientDB represents that in one of two ways:
# - the edge's field does not exist (is null) on the vertex document, or
# - the edge's field does exist, but is an empty list.
# We check both of these possibilities.
if not isinstance(edge_expression, (LocalField, GlobalContextField)):
raise AssertionError(u'Received invalid edge_expression {} of type {}.'
u'Expected LocalField or GlobalContextField.'
.format(edge_expression, type(edge_expression).__name__))
if isinstance(edge_expression, LocalField):
if not is_vertex_field_name(edge_expression.field_name):
raise AssertionError(u'Received LocalField edge_expression {} with non-edge field_name '
u'{}.'.format(edge_expression, edge_expression.field_name))
field_null_check = BinaryComposition(u'=', edge_expression, NullLiteral)
local_field_size = UnaryTransformation(u'size', edge_expression)
field_size_check = BinaryComposition(u'=', local_field_size, ZeroLiteral)
return BinaryComposition(u'||', field_null_check, field_size_check)
def _filter_orientdb_simple_optional_edge(
query_metadata_table, optional_edge_location, inner_location_name):
"""Return an Expression that is False for rows that don't follow the @optional specification.
OrientDB does not filter correctly within optionals. Namely, a result where the optional edge
DOES EXIST will be returned regardless of whether the inner filter is satisfed.
To mitigate this, we add a final filter to reject such results.
A valid result must satisfy either of the following:
- The location within the optional exists (the filter will have been applied in this case)
- The optional edge field does not exist at the root location of the optional traverse
So, if the inner location within the optional was never visited, it must be the case that
the corresponding edge field does not exist at all.
Example:
A MATCH traversal which starts at location `Animal___1`, and follows the optional edge
`out_Animal_ParentOf` to the location `Animal__out_Animal_ParentOf___1`
results in the following filtering Expression:
(
(
(Animal___1.out_Animal_ParentOf IS null)
OR
(Animal___1.out_Animal_ParentOf.size() = 0)
)
OR
(Animal__out_Animal_ParentOf___1 IS NOT null)
)
Here, the `optional_edge_location` is `Animal___1.out_Animal_ParentOf`.
Args:
query_metadata_table: QueryMetadataTable object containing all metadata collected during
query processing, including location metadata (e.g. which locations
are folded or optional).
optional_edge_location: Location object representing the optional edge field
inner_location_name: string representing location within the corresponding optional traverse
Returns:
Expression that evaluates to False for rows that do not follow the @optional specification
"""
inner_local_field = LocalField(inner_location_name)
inner_location_existence = BinaryComposition(u'!=', inner_local_field, NullLiteral)
# The optional_edge_location here is actually referring to the edge field itself.
# This is definitely non-standard, but required to get the proper semantics.
# To get its type, we construct the location of the vertex field on the other side of the edge.
vertex_location = (
optional_edge_location.at_vertex().navigate_to_subpath(optional_edge_location.field)
)
location_type = query_metadata_table.get_location_info(vertex_location).type
edge_context_field = GlobalContextField(optional_edge_location, location_type)
edge_field_non_existence = filter_edge_field_non_existence(edge_context_field)
return BinaryComposition(u'||', edge_field_non_existence, inner_location_existence)
def construct_where_filter_predicate(query_metadata_table, simple_optional_root_info):
"""Return an Expression that is True if and only if each simple optional filter is True.
Construct filters for each simple optional, that are True if and only if `edge_field` does
not exist in the `simple_optional_root_location` OR the `inner_location` is not defined.
Return an Expression that evaluates to True if and only if *all* of the aforementioned filters
evaluate to True (conjunction).
Args:
query_metadata_table: QueryMetadataTable object containing all metadata collected during
query processing, including location metadata (e.g. which locations
are folded or optional).
simple_optional_root_info: dict mapping from simple_optional_root_location -> dict
containing keys
- 'inner_location_name': Location object correspoding to the
unique MarkLocation present within a
simple @optional (one that does not
expands vertex fields) scope
- 'edge_field': string representing the optional edge being
traversed
where simple_optional_root_to_inner_location is the location
preceding the @optional scope
Returns:
a new Expression object
"""
inner_location_name_to_where_filter = {}
for root_location, root_info_dict in six.iteritems(simple_optional_root_info):
inner_location_name = root_info_dict['inner_location_name']
edge_field = root_info_dict['edge_field']
optional_edge_location = root_location.navigate_to_field(edge_field)
optional_edge_where_filter = _filter_orientdb_simple_optional_edge(
query_metadata_table, optional_edge_location, inner_location_name)
inner_location_name_to_where_filter[inner_location_name] = optional_edge_where_filter
# Sort expressions by inner_location_name to obtain deterministic order
where_filter_expressions = [
inner_location_name_to_where_filter[key]
for key in sorted(inner_location_name_to_where_filter.keys())
]
return expression_list_to_conjunction(where_filter_expressions)
###
# A CompoundMatchQuery is a representation of several MatchQuery objects containing
# - match_queries: a list MatchQuery objects
CompoundMatchQuery = namedtuple('CompoundMatchQuery', ('match_queries'))
class OptionalTraversalTree(object):
def __init__(self, complex_optional_roots):
"""Initialize empty tree of optional root Locations (elements of complex_optional_roots).
This object construst a tree of complex optional roots. These are locations preceding an
@optional traverse that expand vertex fields within. Simple @optional traverses i.e.
ones that do not expand vertex fields within them are excluded.
Args:
complex_optional_roots: list of @optional locations (location preceding an @optional
traverse) that expand vertex fields within
"""
self._location_to_children = {
optional_root_location: set()
for optional_root_location in complex_optional_roots
}
self._root_location = None
self._location_to_children[self._root_location] = set()
def insert(self, optional_root_locations_path):
"""Insert a path of optional Locations into the tree.
Each OptionalTraversalTree object contains child Location objects as keys mapping to
other OptionalTraversalTree objects.
Args:
optional_root_locations_path: list of optional root Locations all except the last
of which must be present in complex_optional_roots
"""
encountered_simple_optional = False
parent_location = self._root_location
for optional_root_location in optional_root_locations_path:
if encountered_simple_optional:
raise AssertionError(u'Encountered simple optional root location {} in path, but'
u'further locations are present. This should not happen: {}'
.format(optional_root_location, optional_root_locations_path))
if optional_root_location not in self._location_to_children:
# Simple optionals are ignored.
# There should be no complex optionals after a simple optional.
encountered_simple_optional = True
else:
self._location_to_children[parent_location].add(optional_root_location)
parent_location = optional_root_location
def get_all_rooted_subtrees_as_lists(self, start_location=None):
"""Return a list of all rooted subtrees (each as a list of Location objects)."""
if start_location is not None and start_location not in self._location_to_children:
raise AssertionError(u'Received invalid start_location {} that was not present '
u'in the tree. Present root locations of complex @optional '
u'queries (ones that expand vertex fields within) are: {}'
.format(start_location, self._location_to_children.keys()))
if start_location is None:
start_location = self._root_location
if len(self._location_to_children[start_location]) == 0:
# Node with no children only returns a singleton list containing the null set.
return [[]]
current_children = sorted(self._location_to_children[start_location])
# Recursively find all rooted subtrees of each of the children of the current node.
location_to_list_of_subtrees = {
location: list(self.get_all_rooted_subtrees_as_lists(location))
for location in current_children
}
# All subsets of direct child Location objects
all_location_subsets = [
list(subset)
for subset in itertools.chain(*[
itertools.combinations(current_children, x)
for x in range(0, len(current_children) + 1)
])
]
# For every possible subset of the children, and every combination of the chosen
# subtrees within, create a list of subtree Location lists.
new_subtrees_as_lists = []
for location_subset in all_location_subsets:
all_child_subtree_possibilities = [
location_to_list_of_subtrees[location]
for location in location_subset
]
all_child_subtree_combinations = itertools.product(*all_child_subtree_possibilities)
for child_subtree_combination in all_child_subtree_combinations:
merged_child_subtree_combination = list(itertools.chain(*child_subtree_combination))
new_subtree_as_list = location_subset + merged_child_subtree_combination
new_subtrees_as_lists.append(new_subtree_as_list)
return new_subtrees_as_lists
def construct_optional_traversal_tree(complex_optional_roots, location_to_optional_roots):
"""Return a tree of complex optional root locations.
Args:
complex_optional_roots: list of @optional locations (location immmediately preceding
an @optional Traverse) that expand vertex fields
location_to_optional_roots: dict mapping from location -> optional_roots where location is
within some number of @optionals and optional_roots is a list
of optional root locations preceding the successive @optional
scopes within which the location resides
Returns:
OptionalTraversalTree object representing the tree of complex optional roots
"""
tree = OptionalTraversalTree(complex_optional_roots)
for optional_root_locations_stack in six.itervalues(location_to_optional_roots):
tree.insert(list(optional_root_locations_stack))
return tree
```
#### File: compiler/ir_lowering_sql/sql_tree.py
```python
class SqlQueryTree(object):
def __init__(self, root, query_path_to_location_info,
query_path_to_output_fields, query_path_to_filters, query_path_to_node):
"""Wrap a SqlNode root with additional location_info metadata."""
self.root = root
self.query_path_to_location_info = query_path_to_location_info
self.query_path_to_output_fields = query_path_to_output_fields
self.query_path_to_filters = query_path_to_filters
self.query_path_to_node = query_path_to_node
class SqlNode(object):
"""Representation of a SQL Query as a tree."""
def __init__(self, block, query_path):
"""Create a new SqlNode wrapping a QueryRoot block at a query_path."""
self.query_path = query_path
self.block = block
def __str__(self):
"""Return a string representation of a SqlNode."""
return u'SqlNode({})'.format(self.query_path)
def __repr__(self):
"""Return the repr of a SqlNode."""
return self.__str__()
```
#### File: compiler/workarounds/orientdb_class_with_while.py
```python
from ..blocks import Recurse
from ..ir_lowering_match.utils import convert_coerce_type_and_add_to_where_block
def workaround_type_coercions_in_recursions(match_query):
"""Lower CoerceType blocks into Filter blocks within Recurse steps."""
# This step is required to work around an OrientDB bug that causes queries with both
# "while:" and "class:" in the same query location to fail to parse correctly.
#
# This bug is reported upstream: https://github.com/orientechnologies/orientdb/issues/8129
#
# Instead of "class:", we use "INSTANCEOF" in the "where:" clause to get correct behavior.
# However, we don't want to switch all coercions to this format, since the "class:" clause
# provides valuable info to the MATCH query scheduler about how to schedule efficiently.
new_match_traversals = []
for current_traversal in match_query.match_traversals:
new_traversal = []
for match_step in current_traversal:
new_match_step = match_step
has_coerce_type = match_step.coerce_type_block is not None
has_recurse_root = isinstance(match_step.root_block, Recurse)
if has_coerce_type and has_recurse_root:
new_where_block = convert_coerce_type_and_add_to_where_block(
match_step.coerce_type_block, match_step.where_block)
new_match_step = match_step._replace(coerce_type_block=None,
where_block=new_where_block)
new_traversal.append(new_match_step)
new_match_traversals.append(new_traversal)
return match_query._replace(match_traversals=new_match_traversals)
```
#### File: compiler/workarounds/orientdb_query_execution.py
```python
from ..blocks import CoerceType, QueryRoot, Recurse, Traverse
from ..expressions import ContextField, ContextFieldExistence
from ..helpers import get_only_element_from_collection
from ..ir_lowering_match.utils import convert_coerce_type_and_add_to_where_block
def _is_local_filter(filter_block):
"""Return True if the Filter block references no non-local fields, and False otherwise."""
# We need the "result" value of this function to be mutated within the "visitor_fn".
# Since we support both Python 2 and Python 3, we can't use the "nonlocal" keyword here:
# https://www.python.org/dev/peps/pep-3104/
# Instead, we use a dict to store the value we need mutated, since the "visitor_fn"
# can mutate state in the parent scope, but not rebind variables in it without "nonlocal".
# TODO(predrag): Revisit this if we drop support for Python 2.
result = {
'is_local_filter': True
}
filter_predicate = filter_block.predicate
def visitor_fn(expression):
"""Expression visitor function that looks for uses of non-local fields."""
non_local_expression_types = (ContextField, ContextFieldExistence)
if isinstance(expression, non_local_expression_types):
result['is_local_filter'] = False
# Don't change the expression.
return expression
filter_predicate.visit_and_update(visitor_fn)
return result['is_local_filter']
def _classify_query_locations(match_query):
"""Classify query locations into three groups: preferred, eligible, ineligible.
- Ineligible locations are ones that cannot be the starting point of query execution.
These include locations within recursions, locations that are the target of
an optional traversal, and locations with an associated "where:" clause with non-local filter.
- Preferred locations are ones that are eligible to be the starting point, and also have
an associated "where:" clause that references no non-local fields -- only local fields,
literals, and variables.
- Eligible locations are all locations that do not fall into either of these two categories.
Args:
match_query: MatchQuery object describing the query being analyzed for optimization
Returns:
tuple (preferred, eligible, ineligible) where each element is a set of Location objects.
The three sets are disjoint.
"""
preferred_locations = set()
eligible_locations = set()
ineligible_locations = set()
# Any query must have at least one traversal with at least one step.
# The first step in this traversal must be a QueryRoot.
first_match_step = match_query.match_traversals[0][0]
if not isinstance(first_match_step.root_block, QueryRoot):
raise AssertionError(u'First step of first traversal unexpectedly was not QueryRoot: '
u'{} {}'.format(first_match_step, match_query))
# The first step in the first traversal cannot possibly be inside an optional, recursion,
# or fold. Its location is always an eligible start location for a query.
# We need to determine whether it is merely eligible, or actually a preferred location.
if first_match_step.where_block is not None:
if _is_local_filter(first_match_step.where_block):
preferred_locations.add(first_match_step.as_block.location)
else:
# TODO(predrag): Fix once we have a proper fix for tag-and-filter in the same scope.
# Either the locally-scoped tag will have to generate a LocalField
# instead of a ContextField, or we'll have to rework the local filter
# detection code in this module.
raise AssertionError(u'The first step of the first traversal somehow had a non-local '
u'filter. This should not be possible, since there is nowhere '
u'for the tagged value to have come from. Values: {} {}'
.format(first_match_step, match_query))
else:
eligible_locations.add(first_match_step.as_block.location)
# This loop will repeat the analysis of the first step of the first traversal.
# QueryRoots other than the first are required to always be at a location whose status
# (preferred / eligible / ineligible) is already known. Since we already processed
# the first QueryRoot above, the rest of the loop can assume all QueryRoots are like that.
for current_traversal in match_query.match_traversals:
for match_step in current_traversal:
current_step_location = match_step.as_block.location
if isinstance(match_step.root_block, QueryRoot):
already_encountered_location = any((
current_step_location in preferred_locations,
current_step_location in eligible_locations,
current_step_location in ineligible_locations,
))
if not already_encountered_location:
raise AssertionError(u'Unexpectedly encountered a location in QueryRoot whose '
u'status has not been determined: {} {} {}'
.format(current_step_location, match_step, match_query))
at_eligible_or_preferred_location = (
current_step_location in preferred_locations or
current_step_location in eligible_locations)
# This location has already been encountered and processed.
# Other than setting the "at_eligible_or_preferred_location" state for the sake of
# the following MATCH steps, there is nothing further to be done.
continue
elif isinstance(match_step.root_block, Recurse):
# All Recurse blocks cause locations within to be ineligible.
at_eligible_or_preferred_location = False
elif isinstance(match_step.root_block, Traverse):
# Optional Traverse blocks cause locations within to be ineligible.
# Non-optional Traverse blocks do not change the eligibility of locations within:
# if the pre-Traverse location was eligible, so will the location within,
# and if it was not eligible, neither will the location within.
if match_step.root_block.optional:
at_eligible_or_preferred_location = False
else:
raise AssertionError(u'Unreachable condition reached: {} {} {}'
.format(match_step.root_block, match_step, match_query))
if not at_eligible_or_preferred_location:
ineligible_locations.add(current_step_location)
elif match_step.where_block is not None:
if _is_local_filter(match_step.where_block):
# This location has a local filter, and is not otherwise ineligible (it's not
# in a recursion etc.). Therefore, it's a preferred query start location.
preferred_locations.add(current_step_location)
else:
# Locations with non-local filters are never eligible locations, since they
# depend on another location being executed before them.
ineligible_locations.add(current_step_location)
else:
# No local filtering (i.e. not preferred), but also not ineligible. Eligible it is.
eligible_locations.add(current_step_location)
return preferred_locations, eligible_locations, ineligible_locations
def _calculate_type_bound_at_step(match_step):
"""Return the GraphQL type bound at the given step, or None if no bound is given."""
current_type_bounds = []
if isinstance(match_step.root_block, QueryRoot):
# The QueryRoot start class is a type bound.
current_type_bounds.extend(match_step.root_block.start_class)
if match_step.coerce_type_block is not None:
# The CoerceType target class is also a type bound.
current_type_bounds.extend(match_step.coerce_type_block.target_class)
if current_type_bounds:
# A type bound exists. Assert that there is exactly one bound, defined in precisely one way.
return get_only_element_from_collection(current_type_bounds)
else:
# No type bound exists at this MATCH step.
return None
def _assert_type_bounds_are_not_conflicting(current_type_bound, previous_type_bound,
location, match_query):
"""Ensure that the two bounds either are an exact match, or one of them is None."""
if all((current_type_bound is not None,
previous_type_bound is not None,
current_type_bound != previous_type_bound)):
raise AssertionError(
u'Conflicting type bounds calculated at location {}: {} vs {} '
u'for query {}'.format(location, previous_type_bound, current_type_bound, match_query))
def _expose_only_preferred_locations(match_query, location_types, coerced_locations,
preferred_locations, eligible_locations):
"""Return a MATCH query where only preferred locations are valid as query start locations."""
preferred_location_types = dict()
eligible_location_types = dict()
new_match_traversals = []
for current_traversal in match_query.match_traversals:
new_traversal = []
for match_step in current_traversal:
new_step = match_step
current_step_location = match_step.as_block.location
if current_step_location in preferred_locations:
# This location is preferred. We have to make sure that at least one occurrence
# of this location in the MATCH query has an associated "class:" clause,
# which would be generated by a type bound at the corresponding MATCH step.
current_type_bound = _calculate_type_bound_at_step(match_step)
previous_type_bound = preferred_location_types.get(current_step_location, None)
if previous_type_bound is not None:
# The location is already valid. If so, make sure that this step either does
# not have any type bounds (e.g. via QueryRoot or CoerceType blocks),
# or has type bounds that match the previously-decided type bound.
_assert_type_bounds_are_not_conflicting(
current_type_bound, previous_type_bound, current_step_location, match_query)
else:
# The location is not yet known to be valid. If it does not have
# a type bound in this MATCH step, add a type coercion to the type
# registered in "location_types".
if current_type_bound is None:
current_type_bound = location_types[current_step_location].name
new_step = match_step._replace(
coerce_type_block=CoerceType({current_type_bound}))
preferred_location_types[current_step_location] = current_type_bound
elif current_step_location in eligible_locations:
# This location is eligible, but not preferred. We have not make sure
# none of the MATCH steps with this location have type bounds, and therefore
# will not produce a corresponding "class:" clause in the resulting MATCH query.
current_type_bound = _calculate_type_bound_at_step(match_step)
previous_type_bound = eligible_location_types.get(current_step_location, None)
if current_type_bound is not None:
# There is a type bound here that we need to neutralize.
_assert_type_bounds_are_not_conflicting(
current_type_bound, previous_type_bound, current_step_location, match_query)
# Record the deduced type bound, so that if we encounter this location again,
# we ensure that we again infer the same type bound.
eligible_location_types[current_step_location] = current_type_bound
if (current_step_location not in coerced_locations or
previous_type_bound is not None):
# The type bound here is already implied by the GraphQL query structure,
# or has already been applied at a previous occurrence of this location.
# We can simply delete the QueryRoot / CoerceType blocks that impart it.
if isinstance(match_step.root_block, QueryRoot):
new_root_block = None
else:
new_root_block = match_step.root_block
new_step = match_step._replace(
root_block=new_root_block, coerce_type_block=None)
else:
# The type bound here is not already implied by the GraphQL query structure.
# This should only be possible via a CoerceType block. Lower this CoerceType
# block into a Filter with INSTANCEOF to ensure the resulting query has the
# same semantics, while making the location invalid as a query start point.
if (isinstance(match_step.root_block, QueryRoot) or
match_step.coerce_type_block is None):
raise AssertionError(u'Unexpected MATCH step applying a type bound not '
u'already implied by the GraphQL query structure: '
u'{} {}'.format(match_step, match_query))
new_where_block = convert_coerce_type_and_add_to_where_block(
match_step.coerce_type_block, match_step.where_block)
new_step = match_step._replace(
coerce_type_block=None, where_block=new_where_block)
else:
# There is no type bound that OrientDB can find defined at this location.
# No action is necessary.
pass
else:
# This location is neither preferred nor eligible.
# No action is necessary at this location.
pass
new_traversal.append(new_step)
new_match_traversals.append(new_traversal)
return match_query._replace(match_traversals=new_match_traversals)
def _expose_all_eligible_locations(match_query, location_types, eligible_locations):
"""Return a MATCH query where all eligible locations are valid as query start locations."""
eligible_location_types = dict()
new_match_traversals = []
for current_traversal in match_query.match_traversals:
new_traversal = []
for match_step in current_traversal:
new_step = match_step
current_step_location = match_step.as_block.location
if current_step_location in eligible_locations:
# This location is eligible. We need to make sure it has an associated type bound,
# so that it produces a "class:" clause that will make it a valid query start
# location. It either already has such a type bound, or we can use the type
# implied by the GraphQL query structure to add one.
current_type_bound = _calculate_type_bound_at_step(match_step)
previous_type_bound = eligible_location_types.get(current_step_location, None)
if current_type_bound is None:
current_type_bound = location_types[current_step_location].name
new_coerce_type_block = CoerceType({current_type_bound})
new_step = match_step._replace(coerce_type_block=new_coerce_type_block)
else:
# There is a type bound here. We simply ensure that the bound is not conflicting
# with any other type bound at a different MATCH step with the same location.
_assert_type_bounds_are_not_conflicting(
current_type_bound, previous_type_bound, current_step_location, match_query)
# Record the deduced type bound, so that if we encounter this location again,
# we ensure that we again infer the same type bound.
eligible_location_types[current_step_location] = current_type_bound
else:
# This function may only be called if there are no preferred locations. Since this
# location cannot be preferred, and is not eligible, it must be ineligible.
# No action is necessary in this case.
pass
new_traversal.append(new_step)
new_match_traversals.append(new_traversal)
return match_query._replace(match_traversals=new_match_traversals)
def expose_ideal_query_execution_start_points(compound_match_query, location_types,
coerced_locations):
"""Ensure that OrientDB only considers desirable query start points in query planning."""
new_queries = []
for match_query in compound_match_query.match_queries:
location_classification = _classify_query_locations(match_query)
preferred_locations, eligible_locations, _ = location_classification
if preferred_locations:
# Convert all eligible locations into non-eligible ones, by removing
# their "class:" clause. The "class:" clause is provided either by having
# a QueryRoot block or a CoerceType block in the MatchStep corresponding
# to the location. We remove it by converting the class check into
# an "INSTANCEOF" Filter block, which OrientDB is unable to optimize away.
new_query = _expose_only_preferred_locations(
match_query, location_types, coerced_locations,
preferred_locations, eligible_locations)
elif eligible_locations:
# Make sure that all eligible locations have a "class:" clause by adding
# a CoerceType block that is a no-op as guaranteed by the schema. This merely
# ensures that OrientDB is able to use each of these locations as a query start point,
# and will choose the one whose class is of lowest cardinality.
new_query = _expose_all_eligible_locations(
match_query, location_types, eligible_locations)
else:
raise AssertionError(u'This query has no preferred or eligible query start locations. '
u'This is almost certainly a bug: {}'.format(match_query))
new_queries.append(new_query)
return compound_match_query._replace(match_queries=new_queries)
```
#### File: graphql_compiler/query_formatting/graphql_formatting.py
```python
from graphql import parse
from graphql.language.printer import PrintingVisitor, join, wrap
from graphql.language.visitor import visit
import six
from ..schema import DIRECTIVES
def pretty_print_graphql(query, use_four_spaces=True):
"""Take a GraphQL query, pretty print it, and return it."""
# Use our custom visitor, which fixes directive argument order
# to get the canonical representation
output = visit(parse(query), CustomPrintingVisitor())
# Using four spaces for indentation makes it easier to edit in
# Python source files.
if use_four_spaces:
return fix_indentation_depth(output)
return output
DIRECTIVES_BY_NAME = {d.name: d for d in DIRECTIVES}
class CustomPrintingVisitor(PrintingVisitor):
# Directives are easier to read if their arguments appear in the order in
# which we defined them in the schema. For example, @filter directives are
# much easier to read if the operation comes before the values. The
# arguments of the directives specified in the schema are defined as
# OrderedDicts which allows us to sort the provided arguments to match.
def leave_Directive(self, node, *args):
"""Call when exiting a directive node in the ast."""
name_to_arg_value = {
# Taking [0] is ok here because the GraphQL parser checks for the
# existence of ':' in directive arguments.
arg.split(':', 1)[0]: arg
for arg in node.arguments
}
ordered_args = node.arguments
directive = DIRECTIVES_BY_NAME.get(node.name)
if directive:
sorted_args = []
encountered_argument_names = set()
# Iterate through all defined arguments in the directive schema.
for defined_arg_name in six.iterkeys(directive.args):
if defined_arg_name in name_to_arg_value:
# The argument was present in the query, print it in the correct order.
encountered_argument_names.add(defined_arg_name)
sorted_args.append(name_to_arg_value[defined_arg_name])
# Get all the arguments that weren't defined in the directive schema.
# They will be printed after all the arguments that were in the schema.
unsorted_args = [
value
for name, value in six.iteritems(name_to_arg_value)
if name not in encountered_argument_names
]
ordered_args = sorted_args + unsorted_args
return '@' + node.name + wrap('(', join(ordered_args, ', '), ')')
def fix_indentation_depth(query):
"""Make indentation use 4 spaces, rather than the 2 spaces GraphQL normally uses."""
lines = query.split('\n')
final_lines = []
for line in lines:
consecutive_spaces = 0
for char in line:
if char == ' ':
consecutive_spaces += 1
else:
break
if consecutive_spaces % 2 != 0:
raise AssertionError(u'Indentation was not a multiple of two: '
u'{}'.format(consecutive_spaces))
final_lines.append((' ' * consecutive_spaces) + line[consecutive_spaces:])
return '\n'.join(final_lines)
```
#### File: graphql_compiler/query_formatting/match_formatting.py
```python
import datetime
import json
import arrow
from graphql import GraphQLBoolean, GraphQLFloat, GraphQLID, GraphQLInt, GraphQLList, GraphQLString
import six
from ..compiler import MATCH_LANGUAGE
from ..compiler.helpers import strip_non_null_from_type
from ..exceptions import GraphQLInvalidArgumentError
from ..schema import GraphQLDate, GraphQLDateTime, GraphQLDecimal
from .representations import coerce_to_decimal, represent_float_as_str, type_check_and_str
def _safe_match_string(value):
"""Sanitize and represent a string argument in MATCH."""
if not isinstance(value, six.string_types):
if isinstance(value, bytes): # should only happen in py3
value = value.decode('utf-8')
else:
raise GraphQLInvalidArgumentError(u'Attempting to convert a non-string into a string: '
u'{}'.format(value))
# Using JSON encoding means that all unicode literals and special chars
# (e.g. newlines and backslashes) are replaced by appropriate escape sequences.
# JSON has the same escaping rules as MATCH / SQL, so no further escaping is necessary.
return json.dumps(value)
def _safe_match_date_and_datetime(graphql_type, expected_python_types, value):
"""Represent date and datetime objects as MATCH strings."""
# Python datetime.datetime is a subclass of datetime.date,
# but in this case, the two are not interchangeable.
# Rather than using isinstance, we will therefore check for exact type equality.
value_type = type(value)
if not any(value_type == x for x in expected_python_types):
raise GraphQLInvalidArgumentError(u'Expected value to be exactly one of '
u'python types {}, but was {}: '
u'{}'.format(expected_python_types, value_type, value))
# The serialize() method of GraphQLDate and GraphQLDateTime produces the correct
# ISO-8601 format that MATCH expects. We then simply represent it as a regular string.
try:
serialized_value = graphql_type.serialize(value)
except ValueError as e:
raise GraphQLInvalidArgumentError(e)
return _safe_match_string(serialized_value)
def _safe_match_decimal(value):
"""Represent decimal objects as MATCH strings."""
decimal_value = coerce_to_decimal(value)
return 'decimal(' + _safe_match_string(str(decimal_value)) + ')'
def _safe_match_list(inner_type, argument_value):
"""Represent the list of "inner_type" objects in MATCH form."""
stripped_type = strip_non_null_from_type(inner_type)
if isinstance(stripped_type, GraphQLList):
raise GraphQLInvalidArgumentError(u'MATCH does not currently support nested lists, '
u'but inner type was {}: '
u'{}'.format(inner_type, argument_value))
if not isinstance(argument_value, list):
raise GraphQLInvalidArgumentError(u'Attempting to represent a non-list as a list: '
u'{}'.format(argument_value))
components = (
_safe_match_argument(stripped_type, x)
for x in argument_value
)
return u'[' + u','.join(components) + u']'
def _safe_match_argument(expected_type, argument_value):
"""Return a MATCH (SQL) string representing the given argument value."""
if GraphQLString.is_same_type(expected_type):
return _safe_match_string(argument_value)
elif GraphQLID.is_same_type(expected_type):
# IDs can be strings or numbers, but the GraphQL library coerces them to strings.
# We will follow suit and treat them as strings.
if not isinstance(argument_value, six.string_types):
if isinstance(argument_value, bytes): # should only happen in py3
argument_value = argument_value.decode('utf-8')
else:
argument_value = six.text_type(argument_value)
return _safe_match_string(argument_value)
elif GraphQLFloat.is_same_type(expected_type):
return represent_float_as_str(argument_value)
elif GraphQLInt.is_same_type(expected_type):
# Special case: in Python, isinstance(True, int) returns True.
# Safeguard against this with an explicit check against bool type.
if isinstance(argument_value, bool):
raise GraphQLInvalidArgumentError(u'Attempting to represent a non-int as an int: '
u'{}'.format(argument_value))
return type_check_and_str(int, argument_value)
elif GraphQLBoolean.is_same_type(expected_type):
return type_check_and_str(bool, argument_value)
elif GraphQLDecimal.is_same_type(expected_type):
return _safe_match_decimal(argument_value)
elif GraphQLDate.is_same_type(expected_type):
return _safe_match_date_and_datetime(expected_type, (datetime.date,), argument_value)
elif GraphQLDateTime.is_same_type(expected_type):
return _safe_match_date_and_datetime(expected_type,
(datetime.datetime, arrow.Arrow), argument_value)
elif isinstance(expected_type, GraphQLList):
return _safe_match_list(expected_type.of_type, argument_value)
else:
raise AssertionError(u'Could not safely represent the requested GraphQL type: '
u'{} {}'.format(expected_type, argument_value))
######
# Public API
######
def insert_arguments_into_match_query(compilation_result, arguments):
"""Insert the arguments into the compiled MATCH query to form a complete query.
Args:
compilation_result: a CompilationResult object derived from the GraphQL compiler
arguments: dict, mapping argument name to its value, for every parameter the query expects.
Returns:
string, a MATCH query with inserted argument data
"""
if compilation_result.language != MATCH_LANGUAGE:
raise AssertionError(u'Unexpected query output language: {}'.format(compilation_result))
base_query = compilation_result.query
argument_types = compilation_result.input_metadata
# The arguments are assumed to have already been validated against the query.
sanitized_arguments = {
key: _safe_match_argument(argument_types[key], value)
for key, value in six.iteritems(arguments)
}
return base_query.format(**sanitized_arguments)
######
```
#### File: graphql_compiler/query_formatting/representations.py
```python
import decimal
from ..exceptions import GraphQLInvalidArgumentError
def represent_float_as_str(value):
"""Represent a float as a string without losing precision."""
# In Python 2, calling str() on a float object loses precision:
#
# In [1]: 1.23456789012345678
# Out[1]: 1.2345678901234567
#
# In [2]: 1.2345678901234567
# Out[2]: 1.2345678901234567
#
# In [3]: str(1.2345678901234567)
# Out[3]: '1.23456789012'
#
# The best way to ensure precision is not lost is to convert to string via Decimal:
# https://github.com/mogui/pyorient/pull/226/files
if not isinstance(value, float):
raise GraphQLInvalidArgumentError(u'Attempting to represent a non-float as a float: '
u'{}'.format(value))
with decimal.localcontext() as ctx:
ctx.prec = 20 # floats are max 80-bits wide = 20 significant digits
return u'{:f}'.format(decimal.Decimal(value))
def type_check_and_str(python_type, value):
"""Type-check the value, and then just return str(value)."""
if not isinstance(value, python_type):
raise GraphQLInvalidArgumentError(u'Attempting to represent a non-{type} as a {type}: '
u'{value}'.format(type=python_type, value=value))
return str(value)
def coerce_to_decimal(value):
"""Attempt to coerce the value to a Decimal, or raise an error if unable to do so."""
if isinstance(value, decimal.Decimal):
return value
else:
try:
return decimal.Decimal(value)
except decimal.InvalidOperation as e:
raise GraphQLInvalidArgumentError(e)
```
#### File: tests/integration_tests/integration_test_helpers.py
```python
from decimal import Decimal
import six
from ... import graphql_to_match, graphql_to_sql
from ...compiler.ir_lowering_sql.metadata import SqlMetadata
def sort_db_results(results):
"""Deterministically sort DB results.
Args:
results: List[Dict], results from a DB.
Returns:
List[Dict], sorted DB results.
"""
sort_order = []
if len(results) > 0:
sort_order = sorted(six.iterkeys(results[0]))
def sort_key(result):
"""Convert None/Not None to avoid comparisons of None to a non-None type."""
return tuple((result[col] is not None, result[col]) for col in sort_order)
return sorted(results, key=sort_key)
def try_convert_decimal_to_string(value):
"""Return Decimals as string if value is a Decimal, return value otherwise."""
if isinstance(value, list):
return [try_convert_decimal_to_string(subvalue) for subvalue in value]
if isinstance(value, Decimal):
return str(value)
return value
def compile_and_run_match_query(schema, graphql_query, parameters, graph_client):
"""Compiles and runs a MATCH query against the supplied graph client."""
# MATCH code emitted by the compiler expects Decimals to be passed in as strings
converted_parameters = {
name: try_convert_decimal_to_string(value)
for name, value in six.iteritems(parameters)
}
compilation_result = graphql_to_match(schema, graphql_query, converted_parameters)
query = compilation_result.query
results = [row.oRecordData for row in graph_client.command(query)]
return results
def compile_and_run_sql_query(schema, graphql_query, parameters, engine, metadata):
"""Compiles and runs a SQL query against the supplied SQL backend."""
dialect_name = engine.dialect.name
sql_metadata = SqlMetadata(dialect_name, metadata)
compilation_result = graphql_to_sql(schema, graphql_query, parameters, sql_metadata, None)
query = compilation_result.query
results = []
connection = engine.connect()
with connection.begin() as trans:
for result in connection.execute(query):
results.append(dict(result))
trans.rollback()
return results
```
#### File: tests/test_data_tools/graph.py
```python
from pyorient import OrientDB
from pyorient.constants import DB_TYPE_GRAPH
from pyorient.ogm import Config, Graph
ORIENTDB_SERVER = 'localhost'
ORIENTDB_PORT = 2424
ORIENTDB_USER = 'root'
ORIENTDB_PASSWORD = '<PASSWORD>'
def get_orientdb_url(database_name):
"""Return an OrientDB path for the specified database on the ORIENTDB_SERVER."""
template = 'memory://{}/{}'
return template.format(ORIENTDB_SERVER, database_name)
def get_test_graph(graph_name, load_schema_func, generate_data_func):
"""Generate the test database and return the pyorient client."""
url = get_orientdb_url(graph_name)
config = Config.from_url(url, ORIENTDB_USER, ORIENTDB_PASSWORD, initial_drop=True)
Graph(config, strict=True)
client = OrientDB('localhost', ORIENTDB_PORT)
client.connect(ORIENTDB_USER, ORIENTDB_PASSWORD)
client.db_open(graph_name, ORIENTDB_USER, ORIENTDB_PASSWORD, db_type=DB_TYPE_GRAPH)
load_schema_func(client)
generate_data_func(client)
return client
``` |
{
"source": "0xflotus/grapl",
"score": 2
} |
#### File: etc/local_grapl/grapl_provision.py
```python
import json
import threading
import time
import os
from typing import Any, Dict
from hashlib import sha256, pbkdf2_hmac
from hmac import compare_digest
from random import uniform
import botocore
import boto3
import pydgraph
from uuid import uuid4
from grapl_analyzerlib.grapl_client import MasterGraphClient
from grapl_analyzerlib.schemas import (
AssetSchema,
ProcessSchema,
FileSchema,
IpConnectionSchema,
IpAddressSchema,
IpPortSchema,
NetworkConnectionSchema,
ProcessInboundConnectionSchema,
ProcessOutboundConnectionSchema,
)
from grapl_analyzerlib.schemas.lens_node_schema import LensSchema
from grapl_analyzerlib.schemas.risk_node_schema import RiskSchema
from grapl_analyzerlib.schemas.schema_builder import ManyToMany
def create_secret(secretsmanager):
secretsmanager.create_secret(
Name="JWT_SECRET_ID", SecretString=str(uuid4()),
)
def set_schema(client, schema) -> None:
op = pydgraph.Operation(schema=schema)
client.alter(op)
def drop_all(client) -> None:
op = pydgraph.Operation(drop_all=True)
client.alter(op)
def format_schemas(schema_defs) -> str:
schemas = "\n\n".join([schema.to_schema_str() for schema in schema_defs])
types = "\n\n".join([schema.generate_type() for schema in schema_defs])
return "\n".join(
[" # Type Definitions", types, "\n # Schema Definitions", schemas,]
)
def get_type_dict(client, type_name) -> Dict[str, Any]:
query = f"""
schema(type: {type_name}) {{
type
index
}}
"""
txn = client.txn(read_only=True)
try:
res = json.loads(txn.query(query).json)
finally:
txn.discard()
type_dict = {}
for d in res["types"][0]["fields"]:
if d["name"][0] == "~":
name = f"<{d['name']}>"
else:
name = d["name"]
type_dict[name] = d["type"]
return type_dict
def update_reverse_edges(client, schema):
type_dicts = {}
rev_edges = set()
for edge in schema.forward_edges:
edge_n = edge[0]
edge_t = edge[1]._inner_type.self_type()
if edge_t == "Any":
continue
rev_edges.add(("<~" + edge_n + ">", edge_t))
if not type_dicts.get(edge_t):
type_dicts[edge_t] = get_type_dict(client, edge_t)
if not rev_edges:
return
for (rev_edge_n, rev_edge_t) in rev_edges:
type_dicts[rev_edge_t][rev_edge_n] = "uid"
type_strs = ""
for t in type_dicts.items():
type_name = t[0]
type_d = t[1]
predicates = []
for predicate_name, predicate_type in type_d.items():
predicates.append(f"\t{predicate_name}: {predicate_type}")
predicates = "\n".join(predicates)
type_str = f"""
type {type_name} {{
{predicates}
}}
"""
type_strs += "\n"
type_strs += type_str
op = pydgraph.Operation(schema=type_strs)
client.alter(op)
def provision_mg(mclient) -> None:
# drop_all(mclient)
# drop_all(___local_dg_provision_client)
schemas = (
AssetSchema(),
ProcessSchema(),
FileSchema(),
IpConnectionSchema(),
IpAddressSchema(),
IpPortSchema(),
NetworkConnectionSchema(),
ProcessInboundConnectionSchema(),
ProcessOutboundConnectionSchema(),
)
mg_schemas = [
s.with_forward_edge("risks", ManyToMany(RiskSchema), "risky_nodes")
for s in schemas
]
mg_schemas.append(RiskSchema())
mg_schemas.append(LensSchema())
mg_schema_str = format_schemas(mg_schemas)
set_schema(mclient, mg_schema_str)
BUCKET_PREFIX = "local-grapl"
services = (
"sysmon-graph-generator",
"generic-graph-generator",
"node-identifier",
"graph-merger",
"analyzer-dispatcher",
"analyzer-executor",
"engagement-creator",
)
buckets = (
BUCKET_PREFIX + "-sysmon-log-bucket",
BUCKET_PREFIX + "-unid-subgraphs-generated-bucket",
BUCKET_PREFIX + "-subgraphs-generated-bucket",
BUCKET_PREFIX + "-subgraphs-merged-bucket",
BUCKET_PREFIX + "-analyzer-dispatched-bucket",
BUCKET_PREFIX + "-analyzers-bucket",
BUCKET_PREFIX + "-analyzer-matched-subgraphs-bucket",
BUCKET_PREFIX + "-model-plugins-bucket",
)
def provision_sqs(sqs, service_name: str) -> None:
redrive_queue = sqs.create_queue(
QueueName="grapl-%s-retry-queue" % service_name,
Attributes={"MessageRetentionPeriod": "86400"},
)
redrive_url = redrive_queue["QueueUrl"]
print(f"Provisioned {service_name} retry queue at " + redrive_url)
redrive_arn = sqs.get_queue_attributes(
QueueUrl=redrive_url, AttributeNames=["QueueArn"]
)["Attributes"]["QueueArn"]
redrive_policy = {
"deadLetterTargetArn": redrive_arn,
"maxReceiveCount": "10",
}
queue = sqs.create_queue(QueueName="grapl-%s-queue" % service_name,)
sqs.set_queue_attributes(
QueueUrl=queue["QueueUrl"],
Attributes={"RedrivePolicy": json.dumps(redrive_policy)},
)
print(f"Provisioned {service_name} queue at " + queue["QueueUrl"])
sqs.purge_queue(QueueUrl=queue["QueueUrl"])
sqs.purge_queue(QueueUrl=redrive_queue["QueueUrl"])
def provision_bucket(s3, bucket_name: str) -> None:
s3.create_bucket(Bucket=bucket_name)
print(bucket_name)
def bucket_provision_loop() -> None:
s3_succ = {bucket for bucket in buckets}
s3 = None
for i in range(0, 150):
try:
s3 = s3 or boto3.client(
"s3",
endpoint_url="http://s3:9000",
aws_access_key_id="minioadmin",
aws_secret_access_key="minioadmin",
)
except Exception as e:
if i > 10:
print("failed to connect to sqs or s3", e)
continue
for bucket in buckets:
if bucket in s3_succ:
try:
provision_bucket(s3, bucket)
s3_succ.discard(bucket)
except Exception as e:
if "BucketAlreadyOwnedByYou" in str(e):
s3_succ.discard(bucket)
continue
if i > 10:
print(e)
time.sleep(1)
if not s3_succ:
return
raise Exception("Failed to provision s3")
def hash_password(cleartext, salt) -> str:
hashed = sha256(cleartext).digest()
return pbkdf2_hmac("sha256", hashed, salt, 512000).hex()
def create_user(username, cleartext):
assert cleartext
dynamodb = boto3.resource(
"dynamodb",
region_name="us-west-2",
endpoint_url="http://dynamodb:8000",
aws_access_key_id="dummy_cred_aws_access_key_id",
aws_secret_access_key="dummy_cred_aws_secret_access_key",
)
table = dynamodb.Table("local-grapl-user_auth_table")
# We hash before calling 'hashed_password' because the frontend will also perform
# client side hashing
cleartext += "f1dafbdcab924862a198deaa5b6bae29aef7f2a442f841da975f1c515529d254"
cleartext += username
hashed = sha256(cleartext.encode("utf8")).hexdigest()
for i in range(0, 5000):
hashed = sha256(hashed.encode("utf8")).hexdigest()
salt = os.urandom(16)
password = hash_password(hashed.encode("utf8"), salt)
table.put_item(Item={"username": username, "salt": salt, "password": password})
def sqs_provision_loop() -> None:
sqs_succ = {service for service in services}
sqs = None
for i in range(0, 150):
try:
sqs = sqs or boto3.client(
"sqs",
region_name="us-east-1",
endpoint_url="http://sqs.us-east-1.amazonaws.com:9324",
aws_access_key_id="dummy_cred_aws_access_key_id",
aws_secret_access_key="dummy_cred_aws_secret_access_key",
)
except Exception as e:
print("failed to connect to sqs or s3", e)
time.sleep(1)
continue
for service in services:
if service in sqs_succ:
try:
provision_sqs(sqs, service)
sqs_succ.discard(service)
except Exception as e:
if i > 10:
print(e)
time.sleep(1)
if not sqs_succ:
return
raise Exception("Failed to provision sqs")
if __name__ == "__main__":
time.sleep(5)
local_dg_provision_client = MasterGraphClient()
print("Provisioning graph database")
for i in range(0, 150):
try:
drop_all(local_dg_provision_client)
break
except Exception as e:
time.sleep(2)
print("Failed to drop", e)
mg_succ = False
sqs_t = threading.Thread(target=sqs_provision_loop)
s3_t = threading.Thread(target=bucket_provision_loop)
sqs_t.start()
s3_t.start()
for i in range(0, 150):
try:
if not mg_succ:
time.sleep(1)
provision_mg(local_dg_provision_client,)
mg_succ = True
break
except Exception as e:
if i > 10:
print("mg provision failed with: ", e)
sqs_t.join(timeout=300)
s3_t.join(timeout=300)
for i in range(0, 150):
try:
client = boto3.client(
service_name="secretsmanager",
region_name="us-east-1",
endpoint_url="http://secretsmanager.us-east-1.amazonaws.com:4566",
aws_access_key_id="dummy_cred_aws_access_key_id",
aws_secret_access_key="dummy_cred_aws_secret_access_key",
)
create_secret(client)
break
except botocore.exceptions.ClientError as e:
if "ResourceExistsException" in e.__class__.__name__:
break
if i >= 50:
print(e)
except Exception as e:
if i >= 50:
print(e)
time.sleep(1)
print("Completed provisioning")
for i in range(0, 150):
try:
create_user("grapluser", "graplpassword")
break
except Exception as e:
if i >= 50:
print(e)
time.sleep(1)
```
#### File: analyzer_executor/src/analyzer-executor.py
```python
import base64
import hashlib
import inspect
import json
import logging
import os
import random
import sys
import time
import traceback
from collections import defaultdict
from concurrent.futures import ThreadPoolExecutor
from datetime import datetime
from multiprocessing import Process, Pipe
from multiprocessing.connection import Connection
from multiprocessing.pool import ThreadPool
from pathlib import Path
from typing import Any, Optional, Tuple, List, Dict, Type, Set, Iterator
import boto3
import botocore.exceptions
import redis
from grapl_analyzerlib.analyzer import Analyzer
from grapl_analyzerlib.execution import ExecutionHit, ExecutionComplete, ExecutionFailed
from grapl_analyzerlib.nodes.any_node import NodeView
from grapl_analyzerlib.nodes.queryable import (
Queryable,
traverse_query_iter,
generate_query,
)
from grapl_analyzerlib.nodes.subgraph_view import SubgraphView
from grapl_analyzerlib.nodes.viewable import Viewable
from grapl_analyzerlib.plugin_retriever import load_plugins
from pydgraph import DgraphClientStub, DgraphClient
sys.path.append("/tmp/")
IS_LOCAL = bool(os.environ.get("IS_LOCAL", False))
IS_RETRY = os.environ["IS_RETRY"]
GRAPL_LOG_LEVEL = os.getenv("GRAPL_LOG_LEVEL")
LEVEL = "ERROR" if GRAPL_LOG_LEVEL is None else GRAPL_LOG_LEVEL
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(LEVEL)
LOGGER.addHandler(logging.StreamHandler(stream=sys.stdout))
try:
directory = Path("/tmp/model_plugins/")
directory.mkdir(parents=True, exist_ok=True)
except Exception as e:
LOGGER.error("Failed to create directory", e)
class NopCache(object):
def set(self, key, value):
pass
def get(self, key):
return False
if IS_LOCAL:
message_cache = NopCache()
hit_cache = NopCache()
else:
MESSAGECACHE_ADDR = os.environ["MESSAGECACHE_ADDR"]
MESSAGECACHE_PORT = int(os.environ["MESSAGECACHE_PORT"])
HITCACHE_ADDR = os.environ["HITCACHE_ADDR"]
HITCACHE_PORT = os.environ["HITCACHE_PORT"]
message_cache = redis.Redis(host=MESSAGECACHE_ADDR, port=MESSAGECACHE_PORT, db=0)
hit_cache = redis.Redis(host=HITCACHE_ADDR, port=int(HITCACHE_PORT), db=0)
def parse_s3_event(s3, event) -> str:
bucket = event["s3"]["bucket"]["name"]
key = event["s3"]["object"]["key"]
return download_s3_file(s3, bucket, key)
def download_s3_file(s3, bucket: str, key: str) -> str:
obj = s3.Object(bucket, key)
return obj.get()["Body"].read()
def is_analyzer(analyzer_name, analyzer_cls):
if analyzer_name == "Analyzer": # This is the base class
return False
return (
hasattr(analyzer_cls, "get_queries")
and hasattr(analyzer_cls, "build")
and hasattr(analyzer_cls, "on_response")
)
def get_analyzer_objects(dgraph_client: DgraphClient) -> Dict[str, Analyzer]:
clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)
return {
an[0]: an[1].build(dgraph_client)
for an in clsmembers
if is_analyzer(an[0], an[1])
}
def check_caches(
file_hash: str, msg_id: str, node_key: str, analyzer_name: str
) -> bool:
if check_msg_cache(file_hash, node_key, msg_id):
LOGGER.debug("cache hit - already processed")
return True
if check_hit_cache(analyzer_name, node_key):
LOGGER.debug("cache hit - already matched")
return True
return False
def handle_result_graphs(analyzer, result_graphs, sender):
LOGGER.info(f"Re" f"sult graph: {type(analyzer)} {result_graphs[0]}")
for result_graph in result_graphs:
try:
analyzer.on_response(result_graph, sender)
except Exception as e:
LOGGER.error(f"Analyzer {analyzer} failed with {e}")
sender.send(ExecutionFailed)
raise e
def get_analyzer_view_types(query: Queryable) -> Set[Type[Viewable]]:
query_types = set()
for node in traverse_query_iter(query):
query_types.add(node.view_type)
return query_types
def exec_analyzers(
dg_client,
file: str,
msg_id: str,
nodes: List[NodeView],
analyzers: Dict[str, Analyzer],
sender: Any,
):
if not analyzers:
LOGGER.warning("Received empty dict of analyzers")
return
if not nodes:
LOGGER.warning("Received empty array of nodes")
result_name_to_analyzer = {}
query_str = ""
for node in nodes:
querymap = defaultdict(list)
for an_name, analyzer in analyzers.items():
if check_caches(file, msg_id, node.node_key, an_name):
continue
analyzer = analyzer # type: Analyzer
queries = analyzer.get_queries()
if isinstance(queries, list) or isinstance(queries, tuple):
querymap[an_name].extend(queries)
else:
querymap[an_name].append(queries)
for an_name, queries in querymap.items():
analyzer = analyzers[an_name]
for i, query in enumerate(queries):
analyzer_query_types = get_analyzer_view_types(query)
if node.node.get_node_type() + "View" not in [
n.__name__ for n in analyzer_query_types
]:
continue
r = str(random.randint(10, 100))
result_name = f"{an_name}u{int(node.uid, 16)}i{i}r{r}".strip().lower()
result_name_to_analyzer[result_name] = (
an_name,
analyzer,
query.view_type,
)
query_str += "\n"
query_str += generate_query(
query_name=result_name,
binding_modifier=result_name,
root=query,
contains_node_key=node.node_key,
)
if not query_str:
LOGGER.warning("No nodes to query")
return
txn = dg_client.txn(read_only=True)
try:
response = json.loads(txn.query(query_str).json)
finally:
txn.discard()
analyzer_to_results = defaultdict(list)
for result_name, results in response.items():
for result in results:
analyzer_meta = result_name_to_analyzer[
result_name
] # type: Tuple[str, Analyzer, Type[Viewable]]
an_name, analyzer, view_type = (
analyzer_meta[0],
analyzer_meta[1],
analyzer_meta[2],
)
result_graph = view_type.from_dict(dg_client, result)
response_ty = inspect.getfullargspec(analyzer.on_response).annotations.get(
"response"
)
if response_ty == NodeView:
LOGGER.warning("Analyzer on_response is expecting a NodeView")
result_graph = NodeView.from_view(result_graph)
analyzer_to_results[an_name].append(result_graph)
with ThreadPoolExecutor(max_workers=6) as executor:
for an_name, result_graphs in analyzer_to_results.items():
analyzer = analyzers[an_name]
executor.submit(handle_result_graphs, analyzer, result_graphs, sender)
executor.shutdown(wait=True)
def chunker(seq, size):
return [seq[pos : pos + size] for pos in range(0, len(seq), size)]
def mg_alphas() -> Iterator[Tuple[str, int]]:
mg_alphas = os.environ["MG_ALPHAS"].split(",")
for mg_alpha in mg_alphas:
host, port = mg_alpha.split(":")
yield host, int(port)
def execute_file(name: str, file: str, graph: SubgraphView, sender, msg_id):
try:
pool = ThreadPool(processes=4)
exec(file, globals())
client_stubs = (
DgraphClientStub(f"{host}:{port}") for host, port in mg_alphas()
)
client = DgraphClient(*client_stubs)
analyzers = get_analyzer_objects(client)
if not analyzers:
LOGGER.warning(f"Got no analyzers for file: {name}")
LOGGER.info(f"Executing analyzers: {[an for an in analyzers.keys()]}")
chunk_size = 100
if IS_RETRY == "True":
chunk_size = 10
for nodes in chunker([n for n in graph.node_iter()], chunk_size):
LOGGER.info(f"Querying {len(nodes)} nodes")
def exec_analyzer(nodes, sender):
try:
exec_analyzers(client, file, msg_id, nodes, analyzers, sender)
return nodes
except Exception as e:
LOGGER.error(traceback.format_exc())
LOGGER.error(f"Execution of {name} failed with {e} {e.args}")
sender.send(ExecutionFailed())
raise
exec_analyzer(nodes, sender)
pool.apply_async(exec_analyzer, args=(nodes, sender))
pool.close()
pool.join()
sender.send(ExecutionComplete())
except Exception as e:
LOGGER.error(traceback.format_exc())
LOGGER.error(f"Execution of {name} failed with {e} {e.args}")
sender.send(ExecutionFailed())
raise
def emit_event(s3, event: ExecutionHit) -> None:
LOGGER.info(f"emitting event for: {event.analyzer_name, event.nodes}")
event_s = json.dumps(
{
"nodes": json.loads(event.nodes),
"edges": json.loads(event.edges),
"analyzer_name": event.analyzer_name,
"risk_score": event.risk_score,
"lenses": event.lenses,
"risky_node_keys": event.risky_node_keys,
}
)
event_hash = hashlib.sha256(event_s.encode())
key = base64.urlsafe_b64encode(event_hash.digest()).decode("utf-8")
obj = s3.Object(
f"{os.environ['BUCKET_PREFIX']}-analyzer-matched-subgraphs-bucket", key
)
obj.put(Body=event_s)
if IS_LOCAL:
sqs = boto3.client(
"sqs",
region_name="us-east-1",
endpoint_url="http://sqs.us-east-1.amazonaws.com:9324",
aws_access_key_id="dummy_cred_aws_access_key_id",
aws_secret_access_key="dummy_cred_aws_secret_access_key",
)
send_s3_event(
sqs,
"http://sqs.us-east-1.amazonaws.com:9324/queue/grapl-engagement-creator-queue",
"local-grapl-analyzer-matched-subgraphs-bucket",
key,
)
def check_msg_cache(file: str, node_key: str, msg_id: str) -> bool:
to_hash = str(file) + str(node_key) + str(msg_id)
event_hash = hashlib.sha256(to_hash.encode()).hexdigest()
return bool(message_cache.get(event_hash))
def update_msg_cache(file: str, node_key: str, msg_id: str) -> None:
to_hash = str(file) + str(node_key) + str(msg_id)
event_hash = hashlib.sha256(to_hash.encode()).hexdigest()
message_cache.set(event_hash, "1")
def check_hit_cache(file: str, node_key: str) -> bool:
to_hash = str(file) + str(node_key)
event_hash = hashlib.sha256(to_hash.encode()).hexdigest()
return bool(hit_cache.get(event_hash))
def update_hit_cache(file: str, node_key: str) -> None:
to_hash = str(file) + str(node_key)
event_hash = hashlib.sha256(to_hash.encode()).hexdigest()
hit_cache.set(event_hash, "1")
def lambda_handler(events: Any, context: Any) -> None:
# Parse sns message
LOGGER.debug(f"handling events: {events} context: {context}")
client_stubs = (DgraphClientStub(f"{host}:{port}") for host, port in mg_alphas())
client = DgraphClient(*client_stubs)
s3 = get_s3_client()
load_plugins(os.environ["BUCKET_PREFIX"], s3, os.path.abspath("/tmp/"))
for event in events["Records"]:
if not IS_LOCAL:
event = json.loads(event["body"])["Records"][0]
data = parse_s3_event(s3, event)
message = json.loads(data)
LOGGER.info(f'Executing Analyzer: {message["key"]}')
analyzer = download_s3_file(
s3, f"{os.environ['BUCKET_PREFIX']}-analyzers-bucket", message["key"]
)
analyzer_name = message["key"].split("/")[-2]
subgraph = SubgraphView.from_proto(client, bytes(message["subgraph"]))
# TODO: Validate signature of S3 file
LOGGER.info(f"event {event}")
rx, tx = Pipe(duplex=False) # type: Tuple[Connection, Connection]
p = Process(
target=execute_file, args=(analyzer_name, analyzer, subgraph, tx, "")
)
p.start()
t = 0
while True:
p_res = rx.poll(timeout=5)
if not p_res:
t += 1
LOGGER.info(
f"Polled {analyzer_name} for {t * 5} seconds without result"
)
continue
result = rx.recv() # type: Optional[Any]
if isinstance(result, ExecutionComplete):
LOGGER.info("execution complete")
break
# emit any hits to an S3 bucket
if isinstance(result, ExecutionHit):
LOGGER.info(
f"emitting event for {analyzer_name} {result.analyzer_name} {result.root_node_key}"
)
emit_event(s3, result)
update_msg_cache(analyzer, result.root_node_key, message["key"])
update_hit_cache(analyzer_name, result.root_node_key)
assert not isinstance(
result, ExecutionFailed
), f"Analyzer {analyzer_name} failed."
p.join()
### LOCAL HANDLER
def into_sqs_message(bucket: str, key: str) -> str:
return json.dumps(
{
"Records": [
{
"eventTime": datetime.utcnow().isoformat(),
"principalId": {"principalId": None,},
"requestParameters": {"sourceIpAddress": None,},
"responseElements": {},
"s3": {
"schemaVersion": None,
"configurationId": None,
"bucket": {
"name": bucket,
"ownerIdentity": {"principalId": None,},
},
"object": {
"key": key,
"size": 0,
"urlDecodedKey": None,
"versionId": None,
"eTag": None,
"sequencer": None,
},
},
}
]
}
)
def send_s3_event(
sqs_client: Any, queue_url: str, output_bucket: str, output_path: str,
):
sqs_client.send_message(
QueueUrl=queue_url,
MessageBody=into_sqs_message(bucket=output_bucket, key=output_path,),
)
def get_s3_client():
if IS_LOCAL:
return boto3.resource(
"s3",
endpoint_url="http://s3:9000",
aws_access_key_id="minioadmin",
aws_secret_access_key="minioadmin",
)
else:
return boto3.resource("s3")
if IS_LOCAL:
while True:
try:
sqs = boto3.client(
"sqs",
region_name="us-east-1",
endpoint_url="http://sqs.us-east-1.amazonaws.com:9324",
aws_access_key_id="dummy_cred_aws_access_key_id",
aws_secret_access_key="dummy_cred_aws_secret_access_key",
)
alive = False
while not alive:
try:
if "QueueUrls" not in sqs.list_queues(
QueueNamePrefix="grapl-analyzer-executor-queue"
):
LOGGER.info(
"Waiting for grapl-analyzer-executor-queue to be created"
)
time.sleep(2)
continue
except (
botocore.exceptions.BotoCoreError,
botocore.exceptions.ClientError,
botocore.parsers.ResponseParserError,
):
LOGGER.info("Waiting for SQS to become available")
time.sleep(2)
continue
alive = True
res = sqs.receive_message(
QueueUrl="http://sqs.us-east-1.amazonaws.com:9324/queue/grapl-analyzer-executor-queue",
WaitTimeSeconds=3,
MaxNumberOfMessages=10,
)
messages = res.get("Messages", [])
if not messages:
LOGGER.warning("queue was empty")
s3_events = [
(json.loads(msg["Body"]), msg["ReceiptHandle"]) for msg in messages
]
for s3_event, receipt_handle in s3_events:
lambda_handler(s3_event, {})
sqs.delete_message(
QueueUrl="http://sqs.us-east-1.amazonaws.com:9324/queue/grapl-analyzer-executor-queue",
ReceiptHandle=receipt_handle,
)
except Exception as e:
LOGGER.error(traceback.format_exc())
time.sleep(2)
```
#### File: grapl_analyzerlib/nodes/asset_node.py
```python
from typing import *
from pydgraph import DgraphClient
from grapl_analyzerlib.nodes.comparators import Cmp, StrCmp, _str_cmps, PropertyFilter
from grapl_analyzerlib.nodes.queryable import Queryable, NQ
from grapl_analyzerlib.nodes.types import PropertyT, Property
from grapl_analyzerlib.nodes.viewable import (
EdgeViewT,
ForwardEdgeView,
Viewable,
ReverseEdgeView,
)
IAssetQuery = TypeVar("IAssetQuery", bound="AssetQuery")
IAssetView = TypeVar("IAssetView", bound="AssetView")
class AssetQuery(Queryable["AssetView"]):
def __init__(self) -> None:
super(AssetQuery, self).__init__(AssetView)
self._hostname = [] # type: List[List[Cmp[str]]]
self._asset_processes = None # type: Optional[IProcessQuery]
self._files_on_asset = None # type: Optional[IFileQuery]
def _get_reverse_edges(self) -> Mapping[str, Tuple["Queryable", str]]:
return {}
def _get_unique_predicate(self) -> Optional[Tuple[str, "PropertyT"]]:
return None
def _get_node_type_name(self) -> str:
return "Asset"
def _get_property_filters(self) -> Mapping[str, "PropertyFilter[Property]"]:
props = {
"hostname": self._hostname,
}
return {p[0]: p[1] for p in props.items() if p[1]}
def _get_forward_edges(self) -> Mapping[str, "Queryable[Viewable]"]:
f_edges = {
"asset_processes": self._asset_processes,
"files_on_asset": self._files_on_asset,
}
# This is right, Mypy just doesn't recognize it as such
return {k: v for k, v in f_edges.items() if v is not None}
def with_processes(
self: "NQ", process_query: Optional["IProcessQuery"] = None
) -> "NQ":
process = process_query or ProcessQuery() # type: ProcessQuery
process._process_asset = cast(AssetQuery, self)
cast(AssetQuery, self)._asset_processes = process
return self
def with_hostname(
self: "NQ",
eq: Optional[StrCmp] = None,
contains: Optional[StrCmp] = None,
ends_with: Optional[StrCmp] = None,
starts_with: Optional[StrCmp] = None,
regexp: Optional[StrCmp] = None,
distance: Optional[Tuple[StrCmp, int]] = None,
) -> "NQ":
self._hostname.extend(
_str_cmps(
"hostname",
eq=eq,
contains=contains,
ends_with=ends_with,
starts_with=starts_with,
regexp=regexp,
distance=distance,
)
)
return self
class AssetView(Viewable):
"""
.. list-table::
:header-rows: 1
* - Predicate
- Type
- Description
* - node_key
- string
- A unique identifier for this node.
* - hostname
- string
- The hostname of this asset.
* - asset_processes
- List[:doc:`/nodes/process`]
- Processes associated with this asset.
"""
def __init__(
self,
dgraph_client: DgraphClient,
node_key: str,
uid: str,
node_type: Optional[str] = None,
hostname: Optional[str] = None,
asset_processes: Optional[List["ProcessView"]] = None,
**kwargs,
):
super(AssetView, self).__init__(
dgraph_client=dgraph_client, node_key=node_key, uid=uid
)
self.dgraph_client = dgraph_client
self.node_key = node_key
self.uid = uid
self.hostname = hostname
self.asset_processes = asset_processes
self.kwargs = kwargs
def get_node_type(self) -> str:
return "Asset"
def get_hostname(self) -> Optional[str]:
if not self.hostname:
self.hostname = cast(Optional[str], self.fetch_property("hostname", str))
return self.hostname
@staticmethod
def _get_property_types() -> Mapping[str, "PropertyT"]:
return {
"hostname": str,
}
@staticmethod
def _get_reverse_edge_types() -> Mapping[str, Tuple["EdgeViewT", str]]:
return {}
def _get_reverse_edges(self) -> "Mapping[str, ReverseEdgeView]":
return {}
@staticmethod
def _get_forward_edge_types() -> Mapping[str, "EdgeViewT"]:
f_edges = {} # type: Dict[str, Optional["EdgeViewT"]]
return cast(
Mapping[str, "EdgeViewT"], {fe[0]: fe[1] for fe in f_edges.items() if fe[1]}
)
def _get_forward_edges(self) -> "Mapping[str, ForwardEdgeView]":
f_edges = {} # type: Dict[str, Optional[ForwardEdgeView]]
return cast(
"Mapping[str, ForwardEdgeView]",
{fe[0]: fe[1] for fe in f_edges.items() if fe[1]},
)
def _get_properties(self, fetch: bool = False) -> Mapping[str, Union[str, int]]:
props = {
"hostname": self.hostname,
}
return {p[0]: p[1] for p in props.items() if p[1] is not None}
from grapl_analyzerlib.nodes.process_node import (
IProcessQuery,
IProcessView,
ProcessQuery,
)
from grapl_analyzerlib.nodes.file_node import IFileQuery, IFileView
from grapl_analyzerlib.nodes.process_node import ProcessView
```
#### File: python/grapl-dgraph-ttl/app.py
```python
import datetime
import json
import os
from typing import Dict, Iterable, Iterator, Optional, Tuple, Union
from chalice import Chalice
from grapl_analyzerlib.grapl_client import (
GraphClient,
LocalMasterGraphClient,
MasterGraphClient,
)
IS_LOCAL = bool(os.environ.get("IS_LOCAL", False))
GRAPL_DGRAPH_TTL_S = int(os.environ.get("GRAPL_DGRAPH_TTL_S", "-1"))
GRAPL_LOG_LEVEL = os.environ.get("GRAPL_LOG_LEVEL", "ERROR")
GRAPL_TTL_DELETE_BATCH_SIZE = int(os.environ.get("GRAPL_TTL_DELETE_BATCH_SIZE", "1000"))
app = Chalice(app_name="grapl-dgraph-ttl")
app.log.setLevel(GRAPL_LOG_LEVEL)
def query_batch(
client: GraphClient,
batch_size: int,
ttl_cutoff_ms: int,
last_uid: Optional[str] = None,
) -> Iterable[Dict[str, Union[Dict, str]]]:
after = "" if last_uid is None else f", after: {last_uid}"
paging = f"first: {batch_size}{after}"
query = f"""
{{
q(func: le(last_index_time, {ttl_cutoff_ms}), {paging}) {{
uid,
expand(_all_) {{ uid }}
}}
}}
"""
txn = client.txn()
try:
app.log.debug(f"retrieving batch: {query}")
batch = txn.query(query)
app.log.debug(f"retrieved batch: {batch.json}")
return json.loads(batch.json)["q"]
finally:
txn.discard()
def calculate_ttl_cutoff_ms(now: datetime.datetime, ttl_s: int) -> int:
delta = datetime.timedelta(seconds=ttl_s)
cutoff = now - delta
return int(cutoff.timestamp() * 1000)
def expired_entities(
client: GraphClient, now: datetime.datetime, ttl_s: int, batch_size: int
) -> Iterator[Iterable[Dict[str, Union[Dict, str]]]]:
ttl_cutoff_ms = calculate_ttl_cutoff_ms(now, ttl_s)
app.log.info(f"Pruning entities last indexed before {ttl_cutoff_ms}")
last_uid = None
while 1:
results = query_batch(client, batch_size, ttl_cutoff_ms, last_uid)
if len(results) > 0:
last_uid = results[-1]["uid"]
yield results
if len(results) < batch_size:
break # this was the last page of results
def nodes(entities: Iterable[Dict[str, Union[Dict, str]]]) -> Iterator[str]:
for entity in entities:
yield entity["uid"]
def edges(
entities: Iterable[Dict[str, Union[Dict, str]]]
) -> Iterator[Tuple[str, str, str]]:
for entity in entities:
uid = entity["uid"]
for key, value in entity.items():
if isinstance(value, list):
for v in value:
if isinstance(v, dict):
if len(v.keys()) == 1 and "uid" in v.keys():
yield (uid, key, v["uid"])
def delete_nodes(client: GraphClient, nodes: Iterator[str]) -> int:
del_ = [{"uid": uid} for uid in nodes]
txn = client.txn()
try:
mut = txn.create_mutation(del_obj=del_)
app.log.debug(f"deleting nodes: {mut}")
txn.mutate(mutation=mut, commit_now=True)
app.log.debug(f"deleted nodes: {json.dumps(del_)}")
return len(del_)
finally:
txn.discard()
def delete_edges(client: GraphClient, edges: Iterator[Tuple[str, str, str]]) -> int:
del_ = [
create_edge_obj(src_uid, predicate, dest_uid)
for src_uid, predicate, dest_uid in edges
]
txn = client.txn()
try:
mut = txn.create_mutation(del_obj=del_)
app.log.debug(f"deleting edges: {mut}")
txn.mutate(mutation=mut, commit_now=True)
app.log.debug(f"deleted edges: {json.dumps(del_)}")
return len(del_)
finally:
txn.discard()
def create_edge_obj(
src_uid: str, predicate: str, dest_uid: str
) -> Dict[str, Union[Dict, str]]:
if predicate.startswith("~"): # this is a reverse edge
return {"uid": dest_uid, predicate.lstrip("~"): {"uid": src_uid}}
else: # this is a forward edge
return {"uid": src_uid, predicate: {"uid": dest_uid}}
@app.lambda_function(name="prune_expired_subgraphs")
def prune_expired_subgraphs(event, lambda_context) -> None:
if GRAPL_DGRAPH_TTL_S > 0:
client = LocalMasterGraphClient() if IS_LOCAL else MasterGraphClient()
node_count = 0
edge_count = 0
for entities in expired_entities(
client,
now=datetime.datetime.utcnow(),
ttl_s=GRAPL_DGRAPH_TTL_S,
batch_size=GRAPL_TTL_DELETE_BATCH_SIZE,
):
edge_count += delete_edges(client, edges(entities))
node_count += delete_nodes(client, nodes(entities))
app.log.info(f"Pruned {node_count} nodes and {edge_count} edges")
else:
app.log.warn("GRAPL_DGRAPH_TTL_S is not set, exiting.")
if IS_LOCAL:
import time
while 1:
time.sleep(60)
prune_expired_subgraphs(None, None)
``` |
{
"source": "0xflotus/instagram-scraper",
"score": 3
} |
#### File: igramscraper/model/like.py
```python
class Like:
def _initPropertiesCustom(self,value, prop):
if prop == 'id':
self.identifier = value
if prop == 'username':
self.username = value
```
#### File: igramscraper/model/user_stories.py
```python
from .initializer_model import InitializerModel
class UserStories(InitializerModel):
def __init__(self,owner = None, stories = []):
self.owner = owner
self.stories = stories
``` |
{
"source": "0xflotus/ip2d-py",
"score": 3
} |
#### File: ip2d-py/ip2dpy/__main__.py
```python
from ipy2d import *
from ip2dpy import __version__
import argparse
def main():
parser = argparse.ArgumentParser(description="Convert some IPs to integers")
parser.add_argument(
"-v", action="version", version=f"{__version__}", help="IP to convert"
)
parser.add_argument("ip", help="IP to convert")
parser.add_argument("-i", action="store_true", help="Integer to convert")
parser.add_argument("--hex", action="store_true", help="IPv6 Mode")
parser.add_argument("-c", action="store_true", help="Compress IPv6 addresses")
parser.add_argument(
"-o", type=str, required=False, help="Output format: (b)in, (o)ct or (h)ex"
)
parser.add_argument("-P", action="store_true", help="Output format without prefix")
args = parser.parse_args()
try:
if args.hex:
if args.i and args.c:
try:
print(to_6(int(args.ip), compressed=True))
except ValueError:
try:
print(to_6(int(args.ip, 0x10), compressed=True))
except ValueError:
print(to_6(int(args.ip, 0o10), compressed=True))
elif args.i:
try:
print(to_6(int(args.ip)))
except ValueError:
try:
print(to_6(int(args.ip, 0x10)))
except ValueError:
print(to_6(int(args.ip, 0o10)))
else:
output = from_6(args.ip)
if args.o in ["o", "oct"]:
output = (
oct(from_6(args.ip))
if not args.P
else oct(from_6(args.ip))[0x02:]
)
elif args.o in ["h", "x", "hex"]:
output = (
hex(from_6(args.ip))
if not args.P
else hex(from_6(args.ip))[0x02:]
)
elif args.o in ["b", "bin"]:
output = (
bin(from_6(args.ip))
if not args.P
else bin(from_6(args.ip))[0x02:]
)
print(output)
else:
if args.i:
try:
print(to_4(int(args.ip)))
except ValueError:
try:
print(to_4(int(args.ip, 0x10)))
except ValueError:
print(to_4(int(args.ip, 0o10)))
else:
output = from_4(args.ip)
if args.o in ["o", "oct"]:
output = (
oct(from_4(args.ip))
if not args.P
else oct(from_4(args.ip))[0x02:]
)
elif args.o in ["h", "x", "hex"]:
output = (
hex(from_4(args.ip))
if not args.P
else hex(from_4(args.ip))[0x02:]
)
elif args.o in ["b", "bin"]:
output = (
bin(from_4(args.ip))
if not args.P
else bin(from_4(args.ip))[0x02:]
)
print(output)
except IndexError:
parser.print_help()
if __name__ == "__main__":
main()
``` |
{
"source": "0xflotus/ippic",
"score": 3
} |
#### File: ippic/ippic/ipv4.py
```python
import netaddr, sys, os
from PIL import Image
from .util import *
def _ipv4(ip, debug=False):
octets = ip.split(".")
colors = [convert_term_to_rgb(int(i)) for i in octets]
width = 1024
height = 1024
im = Image.new(mode="RGB", size=(width, height), color="#ffffff")
pixels = im.load()
for x in range(width):
for y in range(height):
if x < 0x200 and y < 0x200:
pixels[x, y] = colors[0]
elif x < 0x400 and y < 0x200:
pixels[x, y] = colors[1]
elif x < 0x200 and y < 0x400:
pixels[x, y] = colors[2]
elif x < 0x400 and y < 0x400:
pixels[x, y] = colors[3]
else:
pixels[x, y] = (0xFF, 0xFF, 0xFF)
if debug:
im.show()
im.save(os.getcwd() + "/ip.png")
sys.exit(0)
``` |
{
"source": "0xflotus/maildown",
"score": 2
} |
#### File: maildown/tests/test_renderer.py
```python
import mock
from maildown import renderer
import mistune
import pygments
from pygments import lexers
from pygments.formatters import html
import premailer
import jinja2
def test_highlight_renderer(monkeypatch):
monkeypatch.setattr(mistune, "escape", mock.MagicMock())
monkeypatch.setattr(lexers, "get_lexer_by_name", mock.MagicMock())
monkeypatch.setattr(html, "HtmlFormatter", mock.MagicMock())
monkeypatch.setattr(pygments, "highlight", mock.MagicMock())
lexers.get_lexer_by_name.return_value = True
html.HtmlFormatter.return_value = {}
r = renderer.HighlightRenderer()
r.block_code("code")
mistune.escape.assert_called_with("code")
r.block_code("code", "python")
lexers.get_lexer_by_name.assert_called_with("python", stripall=True)
pygments.highlight.assert_called_with("code", True, {})
def test_generate_content(monkeypatch):
monkeypatch.setattr(mistune, "Markdown", mock.MagicMock())
monkeypatch.setattr(premailer, "transform", mock.MagicMock())
monkeypatch.setattr(renderer, "HighlightRenderer", mock.MagicMock())
monkeypatch.setattr(jinja2, "Template", mock.MagicMock())
renderer.HighlightRenderer.return_value = 1
premailer.transform.return_value = ""
jinja2.Template.render.return_value = ""
renderer.generate_content("")
mistune.Markdown.assert_called_with(renderer=1)
``` |
{
"source": "0xflotus/many_requests",
"score": 3
} |
#### File: many_requests/many_requests/common.py
```python
from collections.abc import Iterable
N_WORKERS_DEFAULT = 15
N_CONNECTIONS_DEFAULT = 10
class BadResponse(Exception):
"""BadResponse exception. Contains the `response`, `reason` and `attempt_num` as data if supplied."""
def __init__(self, description, response=None, reason=None, attempt_num=None):
self.description = description
self.response = response
self.reason = reason
self.retry_num = attempt_num
def __repr__(self):
return f"BadResponse('{self.description}')"
def is_collection(var):
"""Test if iterable but not a string"""
return isinstance(var, Iterable) and not isinstance(var, str)
``` |
Subsets and Splits