content
stringlengths 5
1.05M
|
---|
from .ltiecsdockerspawner import * |
# Exercise for reference:
# Create a script that generates and prints a list of numbers from 1 to 20. Please do not create the list manually.
# Answer:
my_list = range(1,21)
print(list(my_list))
# Explanation:
# range() is a Python built-in function that generates a range of integers. However, range() creates a Python range object. To get a real list object you need to use the list() function to convert the range object into a list object. |
import numpy as np # Matrise pakke
import pandas as pd # Database pakke
import support # For error handling
import matplotlib.pyplot as plt # Plottepakke
import matplotlib.patches as mpatches # Legend in plot
import sys # For aborting scripts
import math # For floor
import user_setup
from PIL import Image # For saving as TIFF
from io import BytesIO # For saving as TIFF
#-----------------------------------------------------------------------------------
######## Function for making list of color codes
## Takes optional color scheme as input
## Returns vector of colors to be used in plot
def adjust_color(color, amount=0.5):
"""
Lightens the given color by multiplying (1-luminosity) by the given amount.
Input can be matplotlib color string, hex string, or RGB tuple.
Examples:
>> adjust_color('g', 0.3)
>> adjust_color('#F034A3', 0.6)
>> adjust_color((.3,.55,.1), 0.5)
"""
import matplotlib.colors as mc
import colorsys
try:
c = mc.cnames[color]
except:
c = color
c = colorsys.rgb_to_hls(*mc.to_rgb(c))
return colorsys.hls_to_rgb(c[0], 1 - amount * (1 - c[1]), c[2])
def get_colors(df, cycles=None, color=None, color_scheme=None):
if not isinstance(df, pd.DataFrame): # If df is not dataframe, assumes df is list
new_df = pd.DataFrame()
new_df['cycle'] = df
df = new_df
color_list = [] # This will be the return variable containing all colors to be plotted.
if color is not None: # If single color is specified, color list will be a list of the same color for every cycle.
for i in range(0, len(cycles)):
color_list.append(color)
return color_list
# If color is qualitative, will use tab10 as default: https://matplotlib.org/examples/color/colormaps_reference.html
if color_scheme == 'Qualitative':
try:
color_list = user_setup.colors_qual
#color_list = eval('plt.cm.'+user_setup.colors_qual+'(range(0,8,1))') # Qualitative colors set by user (up to 8 colors).
except:
color_list = plt.cm.tab10(np.linspace(0, 1, 10))
return color_list
if (cycles==None): # If no cycles are defined, will plot all cycles
last_cycle = df['cycle'].as_matrix().astype(int)[-1] # Converts cycle column to int, and get last element (last cycle nr).
cycles = range(0, last_cycle, 1) # Creates list from 0 to last cycle, increment 1
color_min = 90 # Minimum color (if zero, first color is almost white).
color_max = 290 # Maximum color, 300 looks nice.
color_nr = color_min # Color for each plot (used in loop below)
try:
color_iter = int(
(color_max - color_min) / (len(cycles) - 1)) # If e.g. 3 cycles, want 300-100 = 200, 200/2=100 iteration.
except: # Need this exception to not divide by zero if plotting only one cycle.
color_iter = 0 # If only one cycle is plotted, will not need to change color.
color_nr = (color_min+color_max)/2 # Color is set to average of minimum and maximum.
try:
for i in range(0, len(cycles)):
color_list.append(getattr(plt.cm, color_scheme)(color_nr)) # Setting color for given cycle
color_nr = color_nr + color_iter # Next color or color gradient
except:
color_min = 0.3
color_max = 1.3
color_nr = color_min
try:
color_iter = (
(color_max - color_min) / (
len(cycles) - 1)) # If e.g. 3 cycles, want 0.5, 1, 1,5 so iteration is 0.5 from color_min
except: # Need this exception to not divide by zero if plotting only one cycle.
color_iter = 0 # If only one cycle is plotted, will not need to change color.
color_nr = 1 # Color is set to original color.
for i in range(0, len(cycles)):
color_list.append(adjust_color(color_scheme, color_nr))
color_nr = color_nr + color_iter
return color_list
#-----------------------------------------------------------------------------------
def default_color_gradients (nr):
return {
1 : 'Blues',
2 : 'Oranges',
3 : 'Greens',
4 : 'Greys',
5 : 'Purples',
6 : 'Reds',
7 : 'YlOrBr',
8 : 'GnBu',
9 : 'spring',
10: 'cool',
11: 'viridis',
12: 'plasma'
}[nr]
#-----------------------------------------------------------------------------------
def set_plot_specs(**kwargs):
try:
x1 = kwargs['x1']
get_labels(x1)
except:
support.error_message('Not recognizable x variable (error in set_plot_specs - get_labels)')
try:
y1 = kwargs['y1']
get_labels(y1)
except:
support.error_message('Not recognizable y variable (error in set_plot_specs - get_labels)')
try:
xlabel = kwargs['xlabel']
except:
xlabel = None
try:
ylabel = kwargs['ylabel']
except:
ylabel = None
try:
xlim = kwargs['xlim']
except:
xlim = None
try:
ylim = kwargs['ylim']
except:
ylim = None
try:
xticks = kwargs['xticks']
except:
xticks = None
try:
yticks = kwargs['yticks']
except:
yticks = None
try:
type = kwargs['type']
except:
type = 'scatter'
try:
markersize = kwargs['markersize']
except:
markersize = 2
try:
legend = kwargs['legend']
except:
try:
legend = kwargs['autolegend'] # autolegend variable used by auto_plot if no legend is specified.
except:
legend = None
try:
legend_loc = kwargs['legend_loc']
except:
legend_loc = 1
legend_color_list = [] # list of colors for each legend entry.
try:
custom_code = kwargs['custom_code']
except:
custom_code = None
try:
custom_code_first = kwargs['custom_code_first']
except:
custom_code_first = None
try:
save_path_png = kwargs['save_path']
except:
try:
save_path_png = str(user_setup.plots) + '\\' + kwargs['save_as']
except:
save_path_png = None
try:
save_path_tiff = kwargs['save_path']
except:
try:
save_path_tiff = str(user_setup.plots) + '\\' + kwargs['save_as_tiff']
except:
save_path_tiff = None
return (x1,y1, xlabel, ylabel, xlim, ylim, xticks, yticks, type, markersize, legend, legend_loc, legend_color_list, custom_code, custom_code_first, save_path_png, save_path_tiff)
#-----------------------------------------------------------------------------------
def set_pickle_specs (legend_color_list, **kwargs):
pickle_name = kwargs['pickle1']
df = pd.read_pickle(pickle_name) # Reads pickle as strings
try:
cycles = kwargs['cycles1']
except:
last_cycle = math.floor(df['cycle'].as_matrix().astype(float)[-1]) # Converts cycle column to float, then rounds last element (last cycle) down using floor function to convert to int.
cycles = range(0, last_cycle+1, 1) # Creates list from 0 to last cycle, increment 1. Need +1 after last cycle to get actual last cycle. Look up range function.
if cycles == None: # In the case that cycles1 = None from user.
last_cycle = math.floor(df['cycle'].as_matrix().astype(float)[-1]) # Converts cycle column to float, then rounds last element (last cycle) down using floor function to convert to int.
cycles = range(0, last_cycle+1, 1) # Creates list from 0 to last cycle, increment 1. Need +1 after last cycle to get actual last cycle. Look up range function.
try:
color_scheme = kwargs['color_scheme1']
except:
color_scheme = default_color_gradients(1)
try:
color = kwargs['color1']
except:
if kwargs['x1'] != 'cap_incr_spec' and kwargs['x1'] != 'Ew' and kwargs['x1'] !='discharge_incr_spec' and kwargs['x1'] !='charge_incr_spec' and kwargs['x1']!='potential_diff_cap'and kwargs['x1']!='cap_incr_spec_nonzero' and kwargs['x1']!='charge_incr_spec_nonzero' and kwargs['x1']!='discharge_incr_spec_nonzero':
try:
color = user_setup.colors_qual[0] # Default color is first color in user defined colors.
except:
color = plt.get_cmap("tab10")(0) # Default color is first color in tab10 colors (blue).
else:
color = None
color_list = get_colors(df, cycles, color, color_scheme) # Choose color maps from: https://matplotlib.org/examples/color/colormaps_reference.html
try:
legend_color_type = kwargs['legend_color_type']
except:
legend_color_type = None
if legend_color_type == 'individual_cycles':
legend_color_list = color_list # Sets color for legend to be each cycle that is plotted. Only one cell supported at the moment.
else:
legend_color_list.append(color_list[round(len(color_list) / 2)]) # Sets color for legend to be middle color if multiple cycles.
try:
marker = kwargs['marker1']
except:
marker = 'o'
try:
markerfill = kwargs['markerfill1']
except:
markerfill = 1
try:
linestyle = kwargs['linestyle1']
except:
linestyle = '-' # default linestyle is solid line
return (pickle_name, df, cycles, color, color_list, legend_color_list, marker, markerfill, linestyle)
#-----------------------------------------------------------------------------------
def add_limits(xlim, ylim): # Specifies x and y limits
if xlim != None: # If not specified, will plot default
try:
plt.xlim(xlim) # Specifies x limits (min, max) from user
except:
print ('Not recognisable x limits. \n Format: xlim = [min, max] \n Example: xlim = [0, 50]')
if ylim != None:
try:
plt.ylim(ylim) # Specifies y limits (min, max) from user
except:
print ('Not recognisable y limits. \n Format: ylim = [min, max] \n Example: ylim = [0.1, 2.2]')
return
#-----------------------------------------------------------------------------------
def add_ticks(xticks, yticks): # Specifies x and y ticks
if xticks != None: # If not specified, will plot default
try:
plt.xticks(xticks) # Specifies x ticks from user
except:
print ('Not recognisable x ticks. \n Format: xticks = [value1,value2,...] \n Example: xticks=[25,50,75,100]')
if yticks != None:
try:
plt.yticks(yticks) # Specifies y ticks from user
except:
print ('Not recognisable y ticks. \n Format: yticks = [value1, value2, value3, ...] \n Example: yticks = [0, 0.5, 1, 1.5]')
return
#-----------------------------------------------------------------------------------
def add_labels(x1, y1, xlabel, ylabel): # Add label to plot
if xlabel == None:
plt.xlabel(get_labels(x1)) # Adds default label corresponding to variable
else:
plt.xlabel(xlabel)
if ylabel == None:
plt.ylabel(get_labels(y1)) # Adds default label corresponding to variable
else:
plt.ylabel(ylabel)
return
#-----------------------------------------------------------------------------------
def add_legend(legend, colorlist, legend_loc=1): # Legend guide: https://matplotlib.org/users/legend_guide.html
if legend == None:
return
patches=[]
for i in range(0, len(legend)): # Iterates through each legend entry
if legend[i] != None:
patches.append(mpatches.Patch(color=colorlist[i], label=legend[i])) #Sets legend color and text
plt.legend(handles=patches, loc=legend_loc) # Add all the specified legends to the plot.
return
#-----------------------------------------------------------------------------------
def add_custom (custom_code): # Executes string in custom_code as code. Multiple lines separated by \n. Ex: custom_code='plt.text(50,1,\'Awesome\') \nplt.text(100,1,\'Awesomer\')'
if custom_code!= None:
try:
exec(custom_code)
except:
print('Not recognisable custom code. Needs to be one string, where multiple lines are separated by \\n.')
return
#-----------------------------------------------------------------------------------
def SavePlot(save_path_png, save_path_tiff): # Save high resolution by saving displayed plot as .eps and use that in latex, or as png by using save_path variable
if save_path_png!=None: # Saves plot as png, if save_path variable is used.
plt.savefig((save_path_png+'.png'), format='png', dpi=1000) # > 300 DPI is recommended by NTNU in master theses.
if save_path_tiff!=None: # Saves plot as tiff, if save_path variable is used.
png1 = BytesIO() # from https://stackoverflow.com/questions/37945495/python-matplotlib-save-as-tiff
plt.savefig(png1, format='png', dpi=600) # first save as png
png2 = Image.open(png1) # load this image into PIL
png2.save((save_path_tiff + '.tiff')) # save as TIFF
png1.close()
return
#-----------------------------------------------------------------------------------
def AddPickleToPlot (df, cycles, x1, y1, color_list, type, marker, markerfill, markersize, linestyle, custom_code_first=None):
for i in range(0, len(cycles)): # OBS: When plotting capacity vs cycle, it will only iterate once (different type of "cycle variable")
df_cycle_x = df[df['cycle'].astype(float) == cycles[i]] # Make new data frame for given cycle
add_custom(custom_code_first) # Executes string in custom_code_first as code. Used if custom code must be executed before plotting. Multiple lines separated by \n. Ex: custom_code='plt.text(50,1,\'Awesome\') \nplt.text(100,1,\'Awesomer\')'
if type == 'scatter':
try:
if markerfill != 1:
plt.scatter(df_cycle_x[x1].astype(float), df_cycle_x[y1].astype(float), marker=marker,facecolors=markerfill, s=markersize, edgecolors=np.array(color_list[i])) # s = size
else:
plt.scatter(df_cycle_x[x1].astype(float), df_cycle_x[y1].astype(float), marker=marker, facecolors=markerfill, s=markersize, c=np.array(color_list[i])) # s = size
except:
if markerfill != 1:
plt.scatter(df_cycle_x[x1].astype(float), df_cycle_x[y1].astype(float), marker=marker,facecolors=markerfill, s=markersize, edgecolors=color_list[i]) # s = size
else:
plt.scatter(df_cycle_x[x1].astype(float), df_cycle_x[y1].astype(float),marker=marker, facecolors=markerfill,s=markersize,c=color_list[i]) # s = size
elif type == 'line':
x_mask = np.isfinite(df_cycle_x[x1].astype(float))
#y_mask = np.isfinite(df_cycle_x[y1].astype(float))
plt.plot(df_cycle_x[x1].astype(float)[x_mask], df_cycle_x[y1].astype(float)[x_mask], c=np.array(color_list[i]), linestyle=linestyle)
#plt.plot(df_cycle_x[x1].astype(float), df_cycle_x[y1].astype(float), c=np.array(color_list[i]))
else:
print('Type variable might be wrong, plotting as scatter plot')
plt.scatter(df_cycle_x[x1].astype(float), df_cycle_x[y1].astype(float), marker=marker,facecolors=markerfill, s=markersize,
c=np.array(color_list[i])) # s = size
return
#-----------------------------------------------------------------------------------
def set_next_pickle (nr, **kwargs):
try:
kwargs['pickle'+str(nr-1)] = kwargs['pickle'+str(nr)]
except:
try:
kwargs['pickle' + str(nr - 1)] = kwargs['override']
except:
sys.exit(0)
try:
kwargs['x' + str(nr - 1)] = kwargs['x' + str(nr)] # Looks for x2 etc, will be plotted on same axis as before.
except:
kwargs['x' + str(nr - 1)] = kwargs['x1']
try:
kwargs['y' + str(nr - 1)] = kwargs['y' + str(nr)] # Looks for y2 etc, will be plotted on same axis as before.
except:
kwargs['y' + str(nr - 1)] = kwargs['y1']
try:
kwargs['cycles'+str(nr - 1)] = kwargs['cycles'+str(nr)]
except:
kwargs['cycles'+str(nr - 1)] = None
try:
kwargs['color_scheme'+str(nr - 1)] = kwargs['color_scheme'+str(nr)]
except:
kwargs['color_scheme'+str(nr - 1)] = default_color_gradients(nr)
try:
kwargs['color'+str(nr - 1)] = kwargs['color'+str(nr)]
except:
if kwargs['x1'] != 'cap_incr_spec' and kwargs['x1'] != 'Ew' and kwargs['x1'] != 'charge_incr_spec' and kwargs['x1']!='discharge_incr_spec' and kwargs['x1']!='potential_diff_cap' and kwargs['x1']!='cap_incr_spec_nonzero' and kwargs['x1']!='charge_incr_spec_nonzero' and kwargs['x1']!='discharge_incr_spec_nonzero':
kwargs['color'+str(nr - 1)] = plt.get_cmap("tab10")(nr-1)
else:
kwargs['color'+str(nr - 1)] = None
try:
kwargs['marker'+str(nr - 1)] = kwargs['marker'+str(nr)]
except:
kwargs['marker'+str(nr - 1)] = 'o'
try:
kwargs['markerfill'+str(nr - 1)] = kwargs['markerfill'+str(nr)]
except:
kwargs['markerfill'+str(nr - 1)] = 1
try:
kwargs['linestyle'+str(nr - 1)] = kwargs['linestyle'+str(nr)]
except:
kwargs['linestyle'+str(nr - 1)] = '-'
return (kwargs['pickle'+str(nr-1)],kwargs['x' + str(nr - 1)],kwargs['y' + str(nr - 1)],kwargs['cycles'+str(nr-1)], kwargs['color'+str(nr-1)], kwargs['color_scheme'+str(nr-1)], kwargs['marker'+str(nr-1)], kwargs['markerfill'+str(nr-1)], kwargs['linestyle'+str(nr-1)])
#-----------------------------------------------------------------------------------
def plot_plot(x1, y1, xlabel, ylabel, xlim, ylim, xticks, yticks, legend_list, legend_color_list, legend_loc, custom_code, save_path_png, save_path_tiff):
add_limits(xlim, ylim) # Changes x and y limits (min, max), if specified.
add_ticks(xticks, yticks) # Changes x and y ticks (min, max), if specified.
add_labels(x1, y1, xlabel, ylabel) # Adds label, either default to variable or specified
add_legend(legend_list, legend_color_list, legend_loc) # Adding legend(s) to plot.
add_custom(custom_code) # Executes string in custom_code as code. Multiple lines separated by \n. Ex: custom_code='plt.text(50,1,\'Awesome\') \nplt.text(100,1,\'Awesomer\')'
SavePlot(save_path_png, save_path_tiff) # Save high resolution by saving displayed plot as .eps and use that in latex, or as png by using save_path variable
plt.show()
return
#-----------------------------------------------------------------------------------
def get_labels(x): # Takes in variable as input and returns the corresponding string
return {
'time': 'Time (s)',
'time_hour' : 'Time (h)',
'potential': 'Potential (V)',
'potential_diff_cap': 'Potential (V)',
'energy_char': 'Charge energy (Wh)',
'energy_dis': 'Discharge energy (Wh)',
'capacitance_char': 'Charge capacitance (F)',
'capacitance_dis': 'Discharge capacitance (F)',
'current': 'Current (mA)',
'current_uA': 'Current ($\mu$A)',
'charge_incr': 'Charge capacity (mAh)',
'discharge_incr': 'Discharge capacity (mAh)',
'cap_incr': 'Capacity (mAh)',
'discharge_incr_spec': 'Discharge capacity (mAh/g)',
'discharge_incr_spec_nonzero': 'Discharge capacity (mAh/g)',
'charge_incr_spec': 'Charge capacity (mAh/g)',
'charge_incr_spec_nonzero': 'Charge capacity (mAh/g)',
'cap_incr_spec': 'Capacity (mAh/g)',
'cap_incr_spec_nonzero': 'Capacity (mAh/g)',
'diff_cap': 'Differential capacity (mAh/g/V)',
'cycle_nr': 'Cycles',
'discharge_spec': 'Discharge capacity (mAh/g)',
'charge_spec': 'Charge capacity (mAh/g)',
'Ew' : 'Ew (V)',
'Ec' : 'Ec (V)',
'Ec_inv' : 'Ec (V)',
'Ew-Ec' : 'Ew-Ec (V)',
'Re_Z' : 'Re (Z)',
'-Im_Z': '-Im (Z)'
}[x] |
import pytest
@pytest.mark.parametrize(
'compressed, seed, privhex, privwif, address',
[
(False, 'qwe', '1814825e69d2e72eabfbec9c0168f5689dcc26509aa2a8590d859a90402f0495', b'5Hztg9Lf6fPida3GtdxhzmC6gTh98oQ6dGPotiFWMBSauUioQxj', b'1HXSiiE8wqH7rtqXW3duSWLBjm4v8XZNX8'),
(True, 'qwe', '1814825e69d2e72eabfbec9c0168f5689dcc26509aa2a8590d859a90402f0495', b'Kx2X5mom9zTGkQq38v8swx3z5ApAuRnwq4wfyF52Y55veC7Ce5dz', b'1A3XHZzcxp3bC62T21DQhXZuA6GdzVYxP9'),
(False, '12345', '28820488de48082a13c570e68e1295e0207c6ef826a685c220d10fd6d8b95d49', b'5J88JQkRPEffAwVL73kwtDzGFqtBFiCsFXajzb9ytmCZbs4VSUY', b'1MN1fFX2xmKS1qZXyhw5EUpS9Laa2HaeYX'),
(True, '12345', '28820488de48082a13c570e68e1295e0207c6ef826a685c220d10fd6d8b95d49', b'KxaTDqped9KdUsW3KhAyF6KkLWktFvsNo7yvmBke7U62tWmMs8dk', b'1sW6JDNWppzUjQr8jjQ9KJmVx92ooKEd6'),
]
)
def test(compressed, seed, privhex, privwif, address):
from crypto import seed2privkey, privkey2privwif, privkey2addr
privkey = seed2privkey(seed)
assert privkey.hex() == privhex
assert privkey2privwif(privkey, compressed) == privwif
assert privkey2addr(privkey, compressed) == address
"""
this test included in main test()
def test_bin2privkey():
import axolotl_curve25519 as curve
from crypto import seed2privkey, seed2bin
seed = 'my test seed'
assert seed2privkey(seed) == curve.generatePrivateKey(seed2bin(seed))
"""
|
# coding=utf-8
from train.optimizer.optimizer import BaseOptimizer
import torch
class Adam(BaseOptimizer):
"""Adam optimizer
"""
def __init__(self, **kwargs):
super(Adam, self).__init__(**kwargs)
def get_optimizer(self, weight_params):
return torch.optim.Adam(weight_params,lr = self.lr)
|
from hirefire.procs.celery import CeleryProc
class WorkerProc(CeleryProc):
name = 'worker'
queues = ['celery']
inspect_statuses = ['active', 'reserved', 'scheduled']
|
#!/usr/bin/env python3
import web3
import json
import time
import requests
from web3 import Web3
from setup import WS_ARBITRUM_ALCHEMY, WS_MAINNET_INFURA, MY_TELEGRAM_ID, send_message, BONDING_MANAGER_PROXY, BONDING_MANAGER_ABI, ROUND_MANAGER_PROXY, ROUND_MANAGER_ABI, TICKET_BROKER_PROXY
w3 = Web3(Web3.WebsocketProvider(WS_ARBITRUM_ALCHEMY))
w3m = Web3(Web3.WebsocketProvider(WS_MAINNET_INFURA))
###
# Variables
###
poll_interval = 300
###
# Contracts, Filters & Classes
###
bonding_manager_proxy = w3.eth.contract(address=BONDING_MANAGER_PROXY, abi=json.loads(BONDING_MANAGER_ABI))
round_manager_proxy = w3.eth.contract(address=ROUND_MANAGER_PROXY, abi=json.loads(ROUND_MANAGER_ABI))
class Transcoder:
# Class Attributes, defaults to true in case script crashes -> no invalid warnings
rewardCalled = True
isActive = True
def __init__(self, address, subscriber=[]):
self.address = address
self.subscriber = subscriber
class ErrorMessage:
# For debugging purposes - avoid sending a telegram message of the error for every poll_interval
ErrorSent = False
###
# Functions
###
def update_transcoder_instances():
"""Reads the subscription file and updates the transcoder dict.
"""
with open("transcoder_subscriptions.json", "r") as f:
ts = json.load(f)
# Without resetting the dict (and losing updated variables like .rewardCalled), remove the transcoders that are no longer in the subscriber list
noLongerInList = list(set(transcoder.keys()).difference(ts.keys()))
for addr in noLongerInList:
del transcoder[addr]
for address, subscriber in ts.items():
if address not in transcoder.keys():
transcoder[address] = Transcoder(address, subscriber)
else:
setattr(transcoder[address], "subscriber", subscriber)
def get_active_transcoders():
"""Gets all the active transcoders in the Livepeer Pool
"""
first = bonding_manager_proxy.functions.getFirstTranscoderInPool().call()
transcoderPoolSize = bonding_manager_proxy.functions.getTranscoderPoolSize().call()
activeTranscoders = [first]
for i in range(0, transcoderPoolSize-1):
activeTranscoders.append(bonding_manager_proxy.functions.getNextTranscoderInPool(activeTranscoders[i]).call())
return activeTranscoders
def process_round():
"""After a new round has begun, check for missed reward calls.
Also checks if the transcoder became active/inactive.
"""
activeTranscoders = get_active_transcoders()
for address in transcoder:
if (transcoder[address].rewardCalled == False and transcoder[address].isActive == True):
for chat_id in transcoder[address].subscriber:
send_message("NO REWARDS CLAIMED - Orchestrator {} did not claim the rewards in the last round! You did not get any rewards for your stake.".format(address[:8]+"..."), chat_id)
time.sleep(1.5)
transcoder[address].rewardCalled = False
if address not in activeTranscoders:
transcoder[address].isActive = False
for chat_id in transcoder[address].subscriber:
send_message("WARNING - Orchestrator {} is no longer in the active orchestrator set! You will no longer receive rewards for your stake.".format(address[:8]+"..."), chat_id)
time.sleep(1.5)
elif transcoder[address].isActive == False and address in activeTranscoders:
transcoder[address].isActive = True
for chat_id in transcoder[address].subscriber:
send_message("Orchestrator {} is back in the active orchestrator set! You will get notified if and when rewards are called.".format(address[:8]+"..."), chat_id)
time.sleep(1.5)
def check_rewardCut_changes(fromBlock, toBlock):
"""Checks for changes in the reward & fee cut values between fromBlock and toBlock.
If an event exists, get the caller of the event and check if it is in the subscription list.
Get the new and old fee and reward cut values and check if either one changed.
Send notification to the subscribers.
"""
rewardCut_filter = w3.eth.filter({
"fromBlock": fromBlock,
"toBlock": toBlock,
"address": BONDING_MANAGER_PROXY,
"topics": ['0x7346854431dbb3eb8e373c604abf89e90f4865b8447e1e2834d7b3e4677bf544'],
})
for event in rewardCut_filter.get_all_entries():
caller = w3.toChecksumAddress("0x" + event["topics"][1].hex()[26:])
if caller in transcoder.keys():
rewardCut = w3.toInt(hexstr=event["data"][2:][:64])
feeShare = w3.toInt(hexstr=event["data"][2:][64:])
roundNr = round_manager_proxy.functions.currentRound().call()
previousData = bonding_manager_proxy.functions.getTranscoderEarningsPoolForRound(caller, roundNr).call()
pRewardCut = previousData[1]
pFeeShare = previousData[2]
tx = event["transactionHash"].hex()
if rewardCut != pRewardCut or feeShare != pFeeShare:
message = "REWARD AND/OR FEE CUT CHANGE - for Orchestrator {caller}!\n\n" \
"New values:\nReward cut = {rewardCut} (old: {pRewardCut})\n" \
"Fee cut = {feeCut} (old: {pFeeCut})\n" \
"[Transaction link](https://arbiscan.io/tx/{tx})".format(
caller = caller[:8]+"...", rewardCut = str(rewardCut/10**4)+"%",
pRewardCut = str(pRewardCut/10**4)+"%", feeCut = str(100-(feeShare/10**4))+"%",
pFeeCut = str(100-(pFeeShare/10**4))+"%", tx = tx)
for chat_id in transcoder[caller].subscriber:
send_message(message, chat_id)
time.sleep(1.5)
def check_rewardCall(fromBlock, toBlock):
"""Checks for reward transactions between blockOld and block.
If an event exists, get the caller of the event and check if it is in the subscription list.
Sends notification to the subscribers and sets the rewardCalled attribute for the caller to true.
"""
rewardCall_filter = w3.eth.filter({
"fromBlock": fromBlock,
"toBlock": toBlock,
"address": BONDING_MANAGER_PROXY,
"topics": ['0x619caafabdd75649b302ba8419e48cccf64f37f1983ac4727cfb38b57703ffc9'],
})
for event in rewardCall_filter.get_all_entries():
caller = w3.toChecksumAddress("0x" + event["topics"][1].hex()[26:])
if caller in transcoder.keys() and transcoder[caller].rewardCalled == False:
tokens = round(w3.toInt(hexstr=event["data"])/10**18,2)
roundNr = round_manager_proxy.functions.currentRound().call()
data = bonding_manager_proxy.functions.getTranscoderEarningsPoolForRound(caller, roundNr).call()
totalStake = round(data[0]/10**18)
rewardCut = data[1]/10**4
rewardCutTokens = round(tokens*(rewardCut/10**2),2)
tx = event["transactionHash"].hex()
message = "Rewards claimed for round {roundNr} -> Orchestrator {caller} received {tokens} LPT " \
"for a total stake of {totalStake} LPT (keeping {rewardCutTokens} LPT due to its {rewardCut} reward cut)\n" \
"[Transaction link](https://arbiscan.io/tx/{tx})".format(
roundNr = roundNr, caller = caller[:8]+"...", tokens = tokens, totalStake = totalStake,
rewardCutTokens = rewardCutTokens, rewardCut = str(rewardCut)+"%", tx = tx)
for chat_id in transcoder[caller].subscriber:
send_message(message, chat_id)
time.sleep(1.5)
transcoder[caller].rewardCalled = True
def check_rewardCall_status(block):
"""Sends a notification if a transcoder didn't call reward yet but is in the active set
"""
for address in transcoder.keys():
if transcoder[address].rewardCalled == False and transcoder[address].isActive == True:
for chat_id in transcoder[address].subscriber:
send_message("WARNING - Orchestrator {} did not yet claim rewards at block {} of 5760 in the current round!".format(address[:8]+"...", str(block%5760)), chat_id)
time.sleep(1.5)
def check_ticketRedemption(fromBlock, toBlock):
"""Checks for ticket redemptions between blockOld and block.
If an event exists, get the caller of the event and check if it is in the subscription list.
Sends notification to the subscribers.
"""
ticket_filter = w3.eth.filter({
"fromBlock": fromBlock,
"toBlock": toBlock,
"address": TICKET_BROKER_PROXY,
"topics": ['0x8b87351a208c06e3ceee59d80725fd77a23b4129e1b51ca231fc89b40712649c']})
for event in ticket_filter.get_all_entries():
caller = w3.toChecksumAddress("0x" + event["topics"][2].hex()[26:])
if caller in transcoder.keys():
ticketValue = round(w3.toInt(hexstr=event["data"])/10**18, 4)
feeShare = bonding_manager_proxy.functions.getTranscoder(caller).call()[2]/10**6
ticketShare = round(ticketValue*feeShare, 4)
tx = event["transactionHash"].hex()
message = "Orchestrator {caller} redeemed a winning ticket worth {ticketValue} ETH!\n\n" \
"Its delegators share {ticketShare} ETH due to its {feeCut}% fee cut.\n" \
"[Transaction link](https://arbiscan.io/tx/{tx})".format(
caller = caller[:8]+"...", ticketValue = ticketValue, ticketShare = ticketShare,
feeCut = round((1-feeShare)*100), tx = tx)
for chat_id in transcoder[caller].subscriber:
send_message(message, chat_id)
time.sleep(1.5)
def check_round_change(fromBlock, toBlock):
"""Checks for round initilized txs between blockOld and block.
If an event exists, get the blocknumber of this tx and the round number
"""
round_filter = w3.eth.filter({
"fromBlock": fromBlock,
"toBlock": toBlock,
"address": ROUND_MANAGER_PROXY,
"topics": ['0x22f2fc17c5daf07db2379b3a03a8ef20a183f761097a58fce219c8a14619e786'],
})
result = round_filter.get_all_entries()
if result:
return result[0]["blockNumber"], w3.toInt(result[0]["topics"][1])
else:
return None, None
###
# Loop
###
transcoder = {}
# Just for debugging: Avoid sending the same telegram exception message every polling interval
latestError = 0
def main():
global latestError
# Mainnet
with open('mainnet_block_records.txt', 'r') as fh:
mainnetBlockOld = int(fh.readlines()[-1])
# Arbitrum
with open('arbitrum_block_records.txt', 'r') as fh:
arbitrumBlockOld = int(fh.readlines()[-1])
# Rounds
with open('roundNr_records.txt', 'r') as fh:
roundNrOld = int(fh.readlines()[-1])
while True:
try:
arbitrumBlock = w3.eth.blockNumber
mainnetBlock = w3m.eth.blockNumber
update_transcoder_instances()
roundStartBlock, roundNr = check_round_change(arbitrumBlockOld, arbitrumBlock)
if roundStartBlock and roundNr > roundNrOld:
# In this case, process the previous round from blockOld to the first block of the new round
check_rewardCut_changes(arbitrumBlockOld, roundStartBlock)
check_rewardCall(arbitrumBlockOld, roundStartBlock)
check_ticketRedemption(arbitrumBlockOld, roundStartBlock)
# Set the blockOld to last processed blocknumber
mainnetBlockOld = roundNr*5760 # this can be caluclated based on the previous round
arbitrumBlockOld = roundStartBlock
roundNrOld = roundNr # otherwise we process the same round again and again due to the above assignment
# Write to processed blocks to both files - in case we need to restart the script
with open('mainnet_block_records.txt', 'a') as fh:
fh.write(str(mainnetBlockOld) + "\n")
with open('arbitrum_block_records.txt', 'a') as fh:
fh.write(str(arbitrumBlockOld) + "\n")
with open('roundNr_records.txt', 'a') as fh:
fh.write(str(roundNrOld) + "\n")
process_round()
print("processed round at block {}".format(str(arbitrumBlockOld)))
# Run checks once there are at least 500 new blocks since last check
if mainnetBlock > mainnetBlockOld + 500:
check_rewardCut_changes(arbitrumBlockOld, arbitrumBlock)
check_rewardCall(arbitrumBlockOld, arbitrumBlock)
check_rewardCall_status(mainnetBlock) #we only use the block for the notification, no query necessary
check_ticketRedemption(arbitrumBlockOld, arbitrumBlock)
# Set the blockOld to last processed blocknumber
mainnetBlockOld = mainnetBlock
arbitrumBlockOld = arbitrumBlock
# Write to processed blocks file
with open('mainnet_block_records.txt', 'a') as fh:
fh.write(str(mainnetBlockOld) + "\n")
with open('arbitrum_block_records.txt', 'a') as fh:
fh.write(str(arbitrumBlockOld) + "\n")
print("Processed until: " + str(arbitrumBlockOld))
except Exception as ex:
print(ex)
# Only send telegram message if its a different error
if str(ex) != latestError:
send_message(ex, MY_TELEGRAM_ID)
latestError = str(ex)
time.sleep(poll_interval)
if __name__ == '__main__':
main()
|
import numpy as np
import os
import torch.utils.data as data
from utils import to_device, get_device, load_img_as_np_array
class RootDataset(data.Dataset):
"""
Only implements some basic preparations to put images into the neural net.
Any subclass has to take care of loading images and calculating all the
input for TextSnake, like center line, root polygons etc. as well as
applying augmentation.
"""
def __getitem__(self, index):
raise NotImplementedError()
def __init__(self):
super().__init__()
def get_training_data(self, img_and_masks):
"""
Prepares meta data the network needs for training.
:param img_and_masks: dictionary with input image and one mask per
TextSnake input
"""
img_height, img_width, _ = img_and_masks['img'].shape
# to pytorch channel sequence
img_and_masks['img'] = img_and_masks['img'].transpose(2, 0, 1)
# All input images are uint8. Do some type conversions
# to match expected model input:
# Image: float32
# Root mask: uint8, 0 or 1
# Center line mask: uint8, 0 or 1
# Radius map: float32
# Sin map: float32, -1.0 to 1.0
# Cos map: float32, -1.0 to 1.0
for mask in [img_and_masks['roots'], img_and_masks['centerlines']]:
if mask.max() > 1:
# Store result of array division in int array
# without type conversions.
# See https://github.com/numpy/numpy/issues/17221
np.divide(mask, 255, out=mask, casting='unsafe')
img_and_masks['img'] = img_and_masks['img'].astype(np.float32)
img_and_masks['radii'] = img_and_masks['radii'].astype(np.float32)
# Map [0, 255] int to [-1, 1] float
for key in ['sin', 'cos']:
map = img_and_masks[key].astype(np.float32)
map -= 255 / 2 # [0, 255] -> [-127.5, 127.5]
map /= 255 / 2 # [-127.5, 127.5] -> [-1, 1]
img_and_masks[key] = map
return (img_and_masks['img'],
img_and_masks['roots'],
img_and_masks['centerlines'],
img_and_masks['radii'],
img_and_masks['sin'],
img_and_masks['cos']
)
def get_test_data(self, image, image_id, image_path):
# TODO
H, W, _ = image.shape
if self.transform:
# TODO mean and stds
# image, polygons = self.transform(image)
pass
# to pytorch channel sequence
image = image.transpose(2, 0, 1)
meta = {
'image_id': image_id,
'image_path': image_path,
'Height': H,
'Width': W
}
return image, meta
def __len__(self):
raise NotImplementedError()
class Eco2018(RootDataset):
"""
Iterable to be passed into PyTorch's data.DataLoader.
This class loads the images and masks, and extracts root polygons from the
binary annotation mask (we must feed these into the net). Additional stuff
like image type conversions etc. is handled by the baseclass.
Input is expected to follow this structure:
./data
\ Eco2018
\ annotation
\ training
\ roots
\ ...
\ validation
\ ...
\ images
\ training
\ validation
Eco2018 differs from Total-Text, because the input for TextSnake, like
center lines, is already present as additional image masks.
"""
def __init__(
self,
data_root='data/Eco2018',
is_training=True,
transformations=None):
super().__init__()
self.data_root = data_root
self.is_training = is_training
self.transformations = transformations
self._annotation_names = ['roots', 'centerlines', 'radii', 'sin', 'cos']
self.image_root = os.path.join(data_root, 'images',
'training' if is_training else 'validation')
self.annotation_root = os.path.join(data_root, 'annotations',
'training' if is_training else 'validation')
self.image_list = os.listdir(self.image_root)
# One list per image with names of root mask, center line mask, etc.
self.annotation_lists = {
key: [
img_name.replace('-', f'-{key}-') for img_name in
self.image_list
] for key in self._annotation_names}
def __getitem__(self, item):
image_id = self.image_list[item]
image_path = os.path.join(self.image_root, image_id)
# Read image data
image = load_img_as_np_array(image_path)
# Read annotations and build a dict with them
img_and_masks = {'img': image}
for annotation_name in self._annotation_names:
annotation_id = self.annotation_lists[annotation_name][item]
annotation_path = os.path.join(self.annotation_root,
annotation_name, annotation_id)
img_and_masks[annotation_name] = load_img_as_np_array(annotation_path)
# Apply augmentations to image and masks
if self.transformations:
img_and_masks = self.transformations(img_and_masks)
# image, tr_mask, tcl_mask, radius_map, sin_map, cos_map
return self.get_training_data(img_and_masks)
def __len__(self):
return len(self.image_list)
class DeviceLoader:
"""
Thin wrapper around a PyTorch DataLoader moving
data to the GPU (if available).
"""
def __init__(self, data_loader):
self._loader = data_loader
self._device = get_device()
def __iter__(self):
"""Move the current batch to device and return it."""
for batch in self._loader:
yield to_device(batch, self._device)
def __len__(self):
return len(self._loader)
|
import re
import argparse
import itertools
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from modules.parser import default_parser
marker_re = re.compile(r'^\(([0-9]+)x([0-9]+)\)')
def len_decode(line, v2=False):
total_len, idx = 0, 0
while (idx < len(line)):
assert line[idx] != ')'
if line[idx] == '(':
match = marker_re.match(line[idx:])
num_chars = int(match.group(1))
num_repeates = int(match.group(2))
idx += len(match.group(0))
if v2:
len_chars_to_repeat = len_decode(line[idx:idx+num_chars], True)
else:
len_chars_to_repeat = len(line[idx:idx+num_chars])
total_len += num_repeates * len_chars_to_repeat
idx += num_chars
else:
total_len += 1
idx += 1
return total_len
def main():
with open(args.input_file, 'r') as infile:
for line in infile:
print len_decode(line.rstrip())
print len_decode(line.rstrip(), True)
if __name__ == "__main__":
parser = argparse.ArgumentParser(parents=[default_parser()])
args = parser.parse_args()
main()
|
# Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataflow pipeline to find all the names associated with an email, and all the emails associated
with a name, and output the results to two tables.
"""
from __future__ import absolute_import
import argparse
import json
import logging
import apache_beam as beam
from apache_beam.io.gcp.internal.clients import bigquery
def create_set(values):
res = set()
for elt in values:
if elt is not None and not elt == set([None]):
res = res.union(elt)
return res
def mf1(k_v):
return {'from_name': k_v[0], 'emails': list(k_v[1]) if k_v[1] else []}
def mf2(k_v):
return {'from_email': k_v[0], 'names': list(k_v[1]) if k_v[1] else []}
def get_emails(input_data):
"""..."""
return (
input_data
| 'emails per name' >> beam.FlatMap(
lambda row: [(row['from_name'], set([row['from_email']]))] if row['from_name'] and '@' in row['from_email'] else [])
| 'name emails' >> beam.CombinePerKey(create_set)
| 'format1' >>
beam.Map(mf1)
# beam.Map(lambda k_v: {
# 'from_name': k_v[0], 'emails': list(k_v[1]) if k_v[1] else []
# })
)
def get_names(input_data):
"""..."""
return (
input_data
| 'names per email' >> beam.FlatMap(
lambda row: [(row['from_email'], set([row['from_name']]))] if row['from_email'] and '@' in row['from_email'] else [])
| 'email names' >> beam.CombinePerKey(create_set)
| 'format2' >>
beam.Map(mf2)
# beam.Map(lambda k_v: {
# 'from_email': k_v[0], 'names': list(k_v[1]) if k_v[1] else []
# })
)
def run(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument(
'--input',
default='aju-vtests2:mail_archives.ingestion_test',
help=(
'Input BigQuery table to process specified as: '
'PROJECT:DATASET.TABLE or DATASET.TABLE.'))
parser.add_argument(
'--output1',
required=True,
help=(
'Output BigQuery table for results specified as: '
'PROJECT:DATASET.TABLE or DATASET.TABLE.'))
parser.add_argument(
'--output2',
required=True,
help=(
'Output BigQuery table for results specified as: '
'PROJECT:DATASET.TABLE or DATASET.TABLE.'))
parser.add_argument(
'--gcs_location',
required=False,
help=('GCS Location to store files to load '
'data into Bigquery'))
known_args, pipeline_args = parser.parse_known_args(argv)
table_schema1 = bigquery.TableSchema()
field_schema = bigquery.TableFieldSchema()
field_schema.name = 'from_name'
field_schema.type = 'string'
field_schema.mode = 'required'
table_schema1.fields.append(field_schema)
# repeated field
field_schema = bigquery.TableFieldSchema()
field_schema.name = 'emails'
field_schema.type = 'string'
field_schema.mode = 'repeated'
table_schema1.fields.append(field_schema)
table_schema2 = bigquery.TableSchema()
field_schema = bigquery.TableFieldSchema()
field_schema.name = 'from_email'
field_schema.type = 'string'
field_schema.mode = 'required'
table_schema2.fields.append(field_schema)
# repeated field
field_schema = bigquery.TableFieldSchema()
field_schema.name = 'names'
field_schema.type = 'string'
field_schema.mode = 'repeated'
table_schema2.fields.append(field_schema)
with beam.Pipeline(argv=pipeline_args) as p:
# Read the table rows into a PCollection.
rows = p | 'read' >> beam.io.ReadFromBigQuery(table=known_args.input)
emails_per_name = get_emails(rows)
names_per_email = get_names(rows)
# Write the output using a "Write" transform that has side effects.
# pylint: disable=expression-not-assigned
emails_per_name | 'Write1' >> beam.io.WriteToBigQuery(
known_args.output1,
# schema='from_name:STRING, emails:STRING',
schema = table_schema1,
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
write_disposition=beam.io.BigQueryDisposition.WRITE_TRUNCATE)
names_per_email | 'Write2' >> beam.io.WriteToBigQuery(
known_args.output2,
schema = table_schema2,
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
write_disposition=beam.io.BigQueryDisposition.WRITE_TRUNCATE)
# Run the pipeline (all operations are deferred until run() is called).
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()
# example invocation. If output tables of the same name already exist, they will be dropped
# and overwritten. You can use the 'DirectRunner' also, if you set
# GOOGLE_APPLICATION_CREDENTIALS locally.
# python names_emails.py \
# --region $REGION \
# --input '[PROJECT ID]:mail_archives.names_emails' \
# --output1 '[PROJECT ID]:mail_archives.emails_name_test2' \
# --output2 '[PROJECT ID]:mail_archives.names_email_test2' \
# --runner DataflowRunner \
# --project $PROJECT \
# --temp_location gs://$BUCKET/tmp/
|
"""empty message
Revision ID: 638d823a6ee4
Revises: cba1420943e0
Create Date: 2018-02-25 02:13:57.105607
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '638d823a6ee4'
down_revision = 'cba1420943e0'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('notes', sa.Column('created_at', sa.DateTime(), nullable=False))
op.add_column('notes', sa.Column('updated_at', sa.DateTime(), nullable=False))
op.add_column('users', sa.Column('created_at', sa.DateTime(), nullable=False))
op.add_column('users', sa.Column('updated_at', sa.DateTime(), nullable=False))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'updated_at')
op.drop_column('users', 'created_at')
op.drop_column('notes', 'updated_at')
op.drop_column('notes', 'created_at')
# ### end Alembic commands ###
|
import basic
while True:
text = input('basic> ')
result, error = basic.run(text)
if error:
print(error.as_string())
else:
print(result)
|
import os
DX = (-1, -1, -1, 0, 0, 0, 1, 1, 1)
DY = (-1, 0, 1, -1, 0, 1, -1, 0, 1)
def read_input(file_name='input.txt'):
file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), file_name)
with open(file_path, 'r') as f:
lines = [x.strip() for x in f.readlines()]
ie_algo = lines[0]
return lines[2:], ie_algo
def add_padding(image: list, ch, size=2):
padding_top_down = ch * (len(image[0]) + size * 2)
padding_left_right = ch * size
padded = [padding_top_down for _ in range(size)]
for line in image:
padded.append(f'{padding_left_right}{line}{padding_left_right}')
for _ in range(size):
padded.append(padding_top_down)
return padded
def get_index(image, r, c):
binary_str = ''
for index in range(len(DX)):
x = r + DX[index]
y = c + DY[index]
binary_str += '0' if image[x][y] == '.' else '1'
return int(binary_str, 2)
def count_lit_pixels(image):
lit_count = 0
for row in image:
for col in row:
if col == '#':
lit_count += 1
return lit_count
def enhance_image(image, ie_algo):
enhanced_image = []
for row in range(1, len(image) - 1):
pixel_row = ''
for col in range(1, len(image[row]) - 1):
index = get_index(image, row, col)
pixel_row += ie_algo[index]
enhanced_image.append(pixel_row)
# print(f"Dimensions of enhanced image: {len(enhanced_image)} X {len(enhanced_image[0])}")
return enhanced_image
def main():
image, ie_algo = read_input('input.txt')
print(f"Image dimensions: {len(image)} X {len(image[0])}")
assert len(ie_algo) == 512
# Part one
image_new = image
for i in range(2):
border = '.' if i % 2 == 0 else '#'
image_new = add_padding(image_new, border)
image_new = enhance_image(image_new, ie_algo)
lit_count = count_lit_pixels(image_new)
print("After 2 iterations")
print(f"Dimensions of enhanced image: {len(image_new)} X {len(image_new[0])}")
print(f"Lit pixels: {lit_count}")
# Part two: 2/50 iterations already done in part 1
for i in range(48):
border = '.' if i % 2 == 0 else '#'
image_new = add_padding(image_new, border)
image_new = enhance_image(image_new, ie_algo)
lit_count = count_lit_pixels(image_new)
print("After 50 iterations")
print(f"Dimensions of enhanced image: {len(image_new)} X {len(image_new[0])}")
print(f"Lit pixels: {lit_count}")
# Dump image to file
file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'image.txt')
with open(file_path, 'w') as f:
for row in image_new:
f.write(row)
f.write('\n')
if __name__ == '__main__':
main()
|
import numpy as np
### PHYSICAL CONSTANTS
m_n = 1.674927471e-27 # kg
h_eV = 4.135667662e-15 # eV*s
h_J = 6.626070040e-34 # J*s
hbar_eV = h_eV/(2*np.pi)
hbar_J = h_J/(2*np.pi)
eV = 1.602176565e-19 # J
k_B_meV_per_K = 8.617333262e-02 # meV/K
#------------------------------------------------------------------------------
### Utilities
def constant_normed(start, stop):
return 1.0 / (stop - start)
#------------------------------------------------------------------------------
def energy_lambda_symetric_nrange(elow, eup, lam, dlam, ne, nlam):
"""
Spans energy and lambda arrays for calculations
Parameters
----------
elow : float
lower energy boundary
eup : float
upper energy boundary
lam : float
center wavelength
dlam : float
relative one sided width of the triangular wavelength distr.
ne : int
number of energy points
nlam : int
number of lambda points
Returns
-------
ee : numpy.ndarray
dim. (ne, nlam) energy array
ll : numpy.ndarray
dim. (ne, nlam) wavelength array
"""
l = lam * np.linspace(1-dlam, 1+dlam, nlam)
e = np.linspace(elow, eup, ne)
return np.meshgrid(l, e)
#------------------------------------------------------------------------------
def energy_lambda_nrange(eup, lam, dlam, ne, nlam):
"""
Spans energy and lambda arrays for calculations, wehere lower
energy boudary is given by neutron wavelength
Parameters
----------
eup : float
upper energy boundary
lam : float
center wavelength
dlam : float
relative one sided width of the triangular wavelength distr.
ne : int
number of energy points
nlam : int
number of lambda points
Returns
-------
ee : numpy.ndarray
dim. (ne, nlam) energy array
ll : numpy.ndarray
dim. (ne, nlam) wavelength array
"""
l = lam * np.linspace(1-dlam, 1+dlam, nlam)
ll = np.tile(l,(ne,1))
a = -0.99999 * energy_from_lambda(l)
ee = np.linspace(a, eup, ne)
assert ee.shape == ll.shape
return ee, ll
#------------------------------------------------------------------------------
def energy_lambda_symetric_drange(elow, eup, lam, dlam, step_e, step_lam):
"""
Spans energy and lambda arrays for calculations
Parameters
----------
elow : float
lower energy boundary
eup : float
upper energy boundary
lam : float
center wavelength
dlam : float
relative one sided width of the triangular wavelength distr.
step_e : float
step between energy points
step_lam : int
step between lambda points
Returns
-------
ee : numpy.ndarray
dim. (ne, nlam) energy array
ll : numpy.ndarray
dim. (ne, nlam) wavelength array
"""
return energy_lambda_symetric_nrange(
elow,
eup,
lam,
dlam,
ne=int((eup - elow)/step_e) + 1,
nlam=int(2*dlam * lam / step_lam) + 1
)
# l = lam * np.arange(1-dlam, 1+dlam+(step_lam/lam/2), step_lam/lam)
# e = np.linspace(elow, eup + step_e/2, step_e)
# return np.meshgrid(l, e)
#------------------------------------------------------------------------------
def energy_lambda_drange(eup, lam, dlam, step_e, step_lam):
"""
Spans energy and lambda arrays for calculations, where lower
energy boundary is given by neutron wavelength
Parameters
----------
eup : float
upper energy boundary
lam : float
center wavelength
dlam : float
relative one sided width of the triangular wavelength distr.
step_e : float
step between energy points
step_lam : int
step between lambda points
Returns
-------
ee : numpy.ndarray
dim. (ne, nlam) energy array
ll : numpy.ndarray
dim. (ne, nlam) wavelength array
"""
return energy_lambda_nrange(
eup,
lam,
dlam,
ne=int((eup - -0.99999 * energy_from_lambda(lam))/step_e) + 1,
nlam=int(2*dlam * lam / step_lam) + 1
)
#------------------------------------------------------------------------------
### CONVERSION FUNCTIONS
def energy_from_lambda(lam):
"""Takes lambda in angstroem and gives energy in meV"""
if hasattr(lam, "__len__"):
return np.array([h_J**2/(2*m_n*(l*1e-10)**2)/eV*1e3 for l in lam])
return h_J**2/(2*m_n*(lam*1e-10)**2)/eV*1e3
#------------------------------------------------------------------------------
def wavelength_from_energy(energy):
"""
Calculates the wavelength of a neutron from its energy
Parameters
----------
energy : float, ndarray
neutron energy in meV
Return
------
wavelength : float, ndarray
wavelength of neutron in angstroem
"""
return 9.045 / np.sqrt(energy)
#------------------------------------------------------------------------------
def detector_efficiency(energy, lam, on):
"""
Efficiency of the CASCADE detector depending on the initial wavelength and
energy transfer.
Parameters
----------
energy : float, ndarray
energy transfer
lam : float, ndarray
initial wavelength of the neutron
on : bool, int
True, 1 for efficiency included
Flase, 0 for efficiency neglected
Returns
-------
: float, ndarray
efficiency factor of the cascade detector
"""
return (6*(0.0233+0.079*(0.5*np.log10(81.82) - 0.5*np.log10(81.92/lam**2 + energy)))-1) * on + 1
#------------------------------------------------------------------------------
def velocity_from_lambda(lam):
"""Takes lambda in angstroem and gives the velocity in m/s"""
return h_J/(m_n*lam*1e-10)
#------------------------------------------------------------------------------
def MIEZE_DeltaFreq_from_time(miezetime, lsd, lam):
"""
Calculates the MIEZE frequency (omega_B-omega_A) for operation with pi-flip in Hz.
Takes miezetime in s, sample detector distance lsd in m and the wavelength lam in AA
"""
return miezetime*m_n*velocity_from_lambda(lam)**3/(4*np.pi*hbar_J*lsd)
#------------------------------------------------------------------------------
### explicit MIEZE phase calculation
def MIEZE_phase(energy, freq, lsd, lam):
"""
Explicit MIEZE phase applying a pi-flip
energy - in meV, frequency freq - in Hz, sample detector distance lsd - in m, wavelength lam - in AA'
"""
vel = velocity_from_lambda(lam)
return 4.0*np.pi*lsd*freq*(1/vel-1/(np.sqrt(vel**2+2/m_n*energy*eV*1e-3)))
#------------------------------------------------------------------------------
### TRIANGULAR WAVELENGTH DISTRIBUTION OF A VELOCITY SELECTOR
def triangle_distribution(x, m, FWHM):
"""
Triangular wavelength distribution.
Takes lambda x, center lambda m, and FWHM lam_width
"""
l = m-m*FWHM
r = m+m*FWHM
# if l<=x and x<=m:
# return ((x-l)/(m-l))/(m-l)
# elif m<=x and x<=r:
# return (1-(x-m)/(r-m))/(m-l)
# else:
# return 0
left_side = np.where(np.logical_and(l <= x, x <= m), (x-l)/(m-l)/(m-l), 0.0)
right_side = np.where(np.logical_and(m <= x, x <= r), (1-(x-m)/(r-m))/(m-l), 0.0)
return left_side + right_side
#------------------------------------------------------------------------------
def bose_factor(e, T):
"""
Parameters
----------
e : float
Energy value of the excitation [meV]
T : float
Temperture of the sample [K]
Returns
-------
n_B : float
Bose factor
"""
return np.exp(-abs(e)/ k_B_meV_per_K / T) |
# There is no need to change any code of common function
# Each project adds validation function before calling the
# common function.
def f1():
print('f1')
def f2():
print('f2')
def f3():
print('f3')
def f4():
print('f4')
# Project 1
def proj1_validation():
print('project 1 validation')
proj1_validation()
f1()
proj1_validation()
f2()
proj1_validation()
f4()
# Project 2
def proj2_validation():
print('project 2 validation')
proj2_validation()
f2()
proj2_validation()
f3()
proj2_validation()
f4()
# Project 3
def proj3_validation():
print('project 3 validation')
proj3_validation()
f2()
proj3_validation()
f3()
|
import collections
class FifoDeque(collections.deque):
'''|
| collections.deque, a double-ended queue type that also ensures O(1) performance when
| used as a FIFO (using its append and popleft methods)
|________'''
pop = collections.deque.popleft
#-----------------------------------------------------------------------------------------------
class Order(object):
'''|
| Used to represent an active order in the book. Its data fields capture the information parsed from the input.
| The same class is used for limit and iceberg orders. Therefore, there are 2 additional variables used, i.e.,
| isIceberg denotes an iceberg order and reloadPeakSize is a variable used to keep the value of the visible part
| (and to reload it after an execution) of an iceberg order. In case of iceberg orders, peakSize is used to keep track
| on the remaining quantity of the order, which is visualized when printing the book state (the same as the limit orders).
|________'''
def __init__(self, isBuy, orderId, price, quantity, peakSize, isIceberg = None):
self.isBuy = isBuy # True for buy orders
self.isIceberg = peakSize < quantity if isIceberg == None else isIceberg
self.orderId = orderId # Unique number identifying an order
self.price = price # Unsigned integer representing scaled decimal (with maximum 6 d.p. and not exceeding 10000.0)
self.quantity = quantity # Total quantity
self.peakSize = peakSize # Visible quantity
def show(self):
'''|
| Used when visualizing the Book state (command s)
|________'''
bstr = 'B' if self.isBuy else 'S' # Ask/Bid
istr = '*' if self.isIceberg else ''
# TODO: price/price_scale; some allignment...
print bstr, self.price, self.orderId, self.quantity, istr
# print bstr, self.price, self.orderId, self.peakSize, self.quantity, istr
|
from jina.drivers.querylang.queryset.lookup import QuerySet, Q
from tests import random_docs
def test_docs_filter():
s = random_docs(10)
ss = QuerySet(s).filter(tags__id__lt=5, tags__id__gt=3)
ssr = list(ss)
assert len(ssr) == 1
for d in ssr:
assert (3 < d.tags['id'] < 5)
def test_docs_filter_equal():
s = random_docs(10)
ss = QuerySet(s).filter(tags__id=4)
ssr = list(ss)
assert len(ssr) == 1
for d in ssr:
assert int(d.tags['id']) == 4
assert len(d.chunks) == 5
def test_nested_chunks_filter():
s = random_docs(10)
ss = QuerySet(s).filter(Q(chunks__filter=Q(tags__id__lt=35, tags__id__gt=33)))
ssr = list(ss)
assert len(ssr) == 1
for d in ssr:
assert len(d.chunks) == 5
|
from discord.ext import commands
|
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 18 23:48:52 2021
@author: James
"""
# import packages
import requests
from bs4 import BeautifulSoup
import time
import re
import pandas as pd
import json
# 非函數測試
'''
url = 'https://www.ptt.cc/bbs/Steam/index.html'
keyword = '心得'
previousPage = 5
# def 1
# 爬網頁
r = requests.get(url)
soup = BeautifulSoup(r.text, "html.parser")
# over18
# ref https://ithelp.ithome.com.tw/articles/10202493
if len(soup.select('div.over18-button-container')) > 0:
btn18 = soup.select('div.over18-button-container')
btn18[0]
r = requests.Session()
payload = {
'from':'/bbs/' + str(config_data["board"]) + '/index.html',
'yes':'yes'
}
rpost = r.post('https://www.ptt.cc/ask/over18?from=%2Fbbs%2F' + str(config_data["board"]) + '%2Findex.html', payload)
rget = r.get('https://www.ptt.cc/bbs/Gossiping/index.html')
print(rget.text)
# 美化網頁架構
print(soup.prettify())
# 找網址、標題、日期
cell = soup.select('div.r-ent')
hrefList = []
titleList = []
dateList = []
for item in cell:
try:
# href
href = 'https://www.ptt.cc' + item.select_one('a').get('href')
# title & date
titletext = item.text
title = re.split('\n', titletext)[3].strip()
date = re.split('\n', titletext)[14].strip()
# 找限免
if keyword in title:
hrefList.append(href)
titleList.append(title)
dateList.append(date)
print(title, date)
except AttributeError:
# deleted article
titletext = item.text
deleteTitle = re.split('\n', titletext)
deleted = [x.strip() for x in deleteTitle if '(本文已被刪除)' in x]
deletedDate = deleteTitle[11]
print(deleted, deletedDate)
if len(titleList) == 0:
print('no article')
else:
print("that's all")
# 往前一頁搜尋
btn = soup.select('div.btn-group > a')
prev_page_href = btn[3]['href']
prev_page_url = 'https://www.ptt.cc' + prev_page_href
'''
# load json config
with open('pttCrawler_config.json', encoding = 'utf-8') as jsonfile:
config_data = json.load(jsonfile)
board = config_data['board']
keyword = config_data['keyword']
previousPage = config_data['previousPage']
url = 'https://www.ptt.cc/bbs/{}/index.html'.format(board)
# define config
# board = 'Steam'
# url = 'https://www.ptt.cc/bbs/{}/index.html'.format(board)
# keyword = '限免'
# previousPage = 10
def over18(board):
# over18
# if len(soup.select('div.over18-button-container')) > 0:
r = requests.Session()
payload = {
'from':'/bbs/' + board + '/index.html',
'yes':'yes'
}
rpost = r.post('https://www.ptt.cc/ask/over18?from=%2Fbbs%2F' + board + '%2Findex.html', payload)
rget = r.get('https://www.ptt.cc/bbs/{}/index.html'.format(board))
return rget
# def get_url 得到當前頁面的keyword網址
def get_url(url, keyword):
print('\nthis page url is', url)
# 爬網頁
r = requests.get(url)
soup = BeautifulSoup(r.text, "html.parser")
if len(soup.select('div.over18-button-container')) > 0:
r = over18(board)
soup = BeautifulSoup(r.text, "html.parser")
# 美化網頁架構
# print(soup.prettify())
# 找網址、標題、日期
cell = soup.select('div.r-ent')
hrefList = []
titleList = []
dateList = []
for item in cell:
try:
# href
href = 'https://www.ptt.cc' + item.select_one('a').get('href')
# title & date
titletext = item.text
title = re.split('\n', titletext)[3].strip()
date = re.split('\n', titletext)[14].strip()
# 找限免
if keyword in title:
hrefList.append(href)
titleList.append(title)
dateList.append(date)
print(title, date)
except AttributeError:
# deleted article
titletext = item.text
deleteTitle = re.split('\n', titletext)
deleted = [x.strip() for x in deleteTitle if '(本文已被刪除)' in x]
deletedDate = deleteTitle[11]
# print(deleted, deletedDate)
if len(titleList) == 0:
print('this page no "{}" article'.format(keyword))
# 往前一頁搜尋
btn = soup.select('div.btn-group > a')
prev_page_href = btn[3]['href']
prev_page_url = 'https://www.ptt.cc' + prev_page_href
return hrefList, titleList, dateList, prev_page_url
# def prev_page_search 往前頁搜尋
def prev_page_search(url, keyword, previousPage):
print('search start', time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
hrefList = []
titleList = []
dateList = []
for page in range(0, previousPage):
href, title, date, prev_page_url = get_url(url, keyword)
hrefList.append(href)
titleList.append(title)
dateList.append(date)
url = prev_page_url
time.sleep(5)
print('\nsearch end', time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
return hrefList, titleList, dateList
hrefList, titleList, dateList = prev_page_search(url, keyword, previousPage)
# 將結果list鋪平,轉成dataframe
flat_hrefList = [item for sublist in hrefList for item in sublist]
flat_titleList = [item for sublist in titleList for item in sublist]
flat_dateList = [item for sublist in dateList for item in sublist]
df_output = pd.DataFrame(list(zip(flat_hrefList, flat_titleList, flat_dateList)), columns = ['網址', '標題', '日期'])
df_output.to_csv('PTT_{}版_關鍵字_{}_網址搜尋.csv'.format(board, keyword), index = False, encoding = 'big5')
|
from .base import ApiBase
import requests
class Labels(ApiBase):
__module__ = 'trello'
def __init__(self, apikey, token=None):
self._apikey = apikey
self._token = token
def get(self, idLabel, fields=None):
resp = requests.get(f"https://trello.com/1/labels/{idLabel}", params={"key": self._apikey, "token": self._token, "fields": fields}, data=None)
return self.raise_or_json(resp)
def get_board(self, idLabel, fields=None):
resp = requests.get(f"https://trello.com/1/labels/{idLabel}/board", params={"key": self._apikey, "token": self._token, "fields": fields}, data=None)
return self.raise_or_json(resp)
def get_board_field(self, field, idLabel):
resp = requests.get(f"https://trello.com/1/labels/{idLabel}/board/{field}", params={"key": self._apikey, "token": self._token}, data=None)
return self.raise_or_json(resp)
def update(self, idLabel, name=None, color=None):
resp = requests.put(f"https://trello.com/1/labels/{idLabel}", params={"key": self._apikey, "token": self._token}, data={"name": name, "color": color})
return self.raise_or_json(resp)
def update_color(self, idLabel, value):
resp = requests.put(f"https://trello.com/1/labels/{idLabel}/color", params={"key": self._apikey, "token": self._token}, data={"value": value})
return self.raise_or_json(resp)
def update_name(self, idLabel, value):
resp = requests.put(f"https://trello.com/1/labels/{idLabel}/name", params={"key": self._apikey, "token": self._token}, data={"value": value})
return self.raise_or_json(resp)
def new(self, name, color, idBoard):
resp = requests.post("https://trello.com/1/labels", params={"key": self._apikey, "token": self._token}, data={"name": name, "color": color, "idBoard": idBoard})
return self.raise_or_json(resp)
def delete(self, idLabel):
resp = requests.delete(f"https://trello.com/1/labels/{idLabel}", params={"key": self._apikey, "token": self._token}, data=None)
return self.raise_or_json(resp)
|
from rclpy.node import Node
# ms5837 needed in order to utilize the BlueRobotics MS5837 Python Library which must be installed
from sensor_barometer import ms5837
from sensor_interfaces.msg import Barometer
import time
import re, uuid
class BarometerDataPublisher(Node):
# Initializer
def __init__(self):
super().__init__('BarometerDataPublisher')
self.publisher_ = self.create_publisher(Barometer, 'barometer_data', 10) # Creates a publisher over the topic barometer_data
read_period = 2 # Does a reading every 2 seconds
self.timer = self.create_timer(read_period, self.barometer_read_and_publish)
self.sensor = ms5837.MS5837_30BA()
# self.sensor.setFluidDensity() # Configuring fluid density for fresh or saltwater. Defaulting to fresh water
if not self.sensor.init():
# If sensor can not be detected
print("Sensor could not be initialized")
exit(1)
def barometer_read_and_publish(self):
# Custom barometer message to publish. Can be found in the sensor_interfaces.
msg = Barometer()
# Adding a way to read the time
tim = time.localtime()
msg.local_time = time.strftime("%H:%M",tim)
# Getting the mac address of the system
msg.mac = ':'.join(re.findall('..','%012x' % uuid.getnode()))
# Reading barometer and loading data into custom message
if self.sensor.read():
msg.depth = self.sensor.depth() # Depth in meters using the fluid density (kg/m^3) configured by setFluidDensity()
msg.pressure_mbar = self.sensor.pressure() # Default is mbar (no arguments)
msg.pressure_psi = self.sensor.pressure(ms5837.UNITS_psi) # Request psi
else:
print("Sensor read failed!")
exit(1)
# Publishing message and logging data sent over the topic /barometer_data
self.publisher_.publish(msg)
self.get_logger().info('Mac: %s Depth: %0.2f m\tP: %0.1f mbar %0.3f psi %s' % (msg.mac,
msg.depth,
msg.pressure_mbar,
msg.pressure_psi,
msg.local_time))
|
# Lint as: python3
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared type definition for symbolic bindings.
For example, in:
fn [M: u32, N: u32] f(x: bits[M], y: bits[N]) -> bits[N] {
...
}
The symbolic bindings that instantiate this function might be:
(('M', 42), ('N', 64))
"""
from typing import Tuple
SymbolicBindings = Tuple[Tuple[str, int], ...]
|
import logging
from stock.core.product import SKU, Category, Product
from stock.core.shelve import RestockThreshold, ProductAmount, Shelve
class RegisterShelve:
def __call__(
self,
product_sku: SKU,
product_category: Category,
shelve_restock_threshold: RestockThreshold,
shelve_stock_amount: ProductAmount
) -> Shelve:
product = Product(product_sku, product_category)
shelve = Shelve(product, shelve_restock_threshold, shelve_stock_amount)
logging.debug("RegisterShelve.__call__:Completed")
return shelve
|
# 给定两个整数,被除数 dividend 和除数 divisor。
# 将两数相除,要求不使用乘法、除法和 mod 运算符。
# 返回被除数 dividend 除以除数 divisor 得到的商。
# 整数除法的结果应当截去(truncate)其小数部分,
# 例如:truncate(8.345) = 8 以及 truncate(-2.7335) = -2
# 示例 1:
# 输入: dividend = 10, divisor = 3
# 输出: 3
# 解释: 10/3 = truncate(3.33333..) = truncate(3) = 3
# 示例 2:
# 输入: dividend = 7, divisor = -3
# 输出: -2
# 解释: 7/-3 = truncate(-2.33333..) = -2
# 提示:
# 被除数和除数均为 32 位有符号整数。
# 除数不为 0。
# 假设我们的环境只能存储 32 位有符号整数,其数值范围是 [−231, 231 − 1]。本题中,如果除法结果溢出,则返回 231 − 1。
class Solution:
def divide(self, dividend: int, divisor: int) -> int:
if dividend == 0:
return 0
res = 0 # 移位次数和
# 判断结果是否为负
# 两数异或计算,结果为负则为两数异号
negetive = (dividend ^ divisor) < 0
# 取绝对值计算
dividend = abs(dividend)
divisor = abs(divisor)
if dividend < divisor:
return 0
while True:
k = divisor # 保存除数副本
i = 0 # 记录左移次数
while (k << 1) < dividend:
k = k << 1
i += 1
res += (1 << i) # 此处加上除数左移次数的 2 的幂,即乘法的因数
dividend -= k
if dividend < divisor: # 当被除数已经不能再减去除数的整数倍时
if negetive:
return -res
else:
# 处理溢出
if res > (2**31 - 1):
return 2**31 -1
else:
return res
if __name__ == "__main__":
S = Solution()
print(S.divide(-2147483648, -1))
print(S.divide(10, 3))
print(S.divide(-7, 2))
print(S.divide(-10, -1)) |
from xml.etree import ElementTree as ET
class XMLParser:
@staticmethod
def get_root(data):
tree = ET.parse(data)
return tree.getroot()
@staticmethod
def get_node(parent, pattern=''):
return parent.find('.//{}'.format(pattern))
@staticmethod
def get_text(node):
return node.text
@staticmethod
def get_attr(node, pattern):
return node.attrib.get(pattern)
|
# -*- coding: utf-8 -*-
# CreateTime: 2019-07-02 11:46:34
from base import BaseTestCase
import json
from app.module import db, User
def add_user(username, email):
user = User(username=username, email=email)
db.session.add(user)
db.session.commit()
return user
class TestUsersService(BaseTestCase):
def test_users(self):
"/ping 服务"
response = self.client.get('/ping')
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 200)
self.assertIn('pong', data['message'])
self.assertIn('success', data['status'])
def test_add_users(self):
"添加用户"
with self.client:
response = self.client.post(
'/users',
data=json.dumps(dict(username='test_python', email='[email protected]')),
content_type='application/json')
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 201)
self.assertIn('[email protected]', data['message'])
self.assertEqual('success', data['status'])
def test_add_user_invalid_json(self):
"user,email为空"
with self.client:
response = self.client.post(
'/users',
data=json.dumps(dict()),
content_type='application/json')
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 400)
self.assertIn('Invalid payload', data['message'])
self.assertEqual('fail', data['status'])
def test_add_user_invalid_json_keys(self):
"user,email参数错误"
with self.client:
response = self.client.post(
'/users',
data=json.dumps(dict(email='[email protected]')),
content_type='application/json'
)
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 400)
self.assertIn('Invalid payload', data['message'])
self.assertEqual('fail', data['status'])
with self.client:
response = self.client.post(
'/users',
data=json.dumps(dict(email='test_python')),
content_type='application/json'
)
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 400)
self.assertIn('Invalid payload', data['message'])
self.assertEqual('fail', data['status'])
def test_get_user(self):
"获取单个用户信息"
user = add_user('python', '[email protected]')
with self.client:
response = self.client.get('/users/{}'.format(user.id))
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 200)
self.assertEqual('success', data['status'])
self.assertIn('created_at', data['data'])
self.assertEqual('python', data['data']['username'])
self.assertEqual('[email protected]', data['data']['email'])
def test_get_user_no_id(self):
"获取用户信息,用户id必须为int类型"
with self.client:
response = self.client.get('/users/xxxx')
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 400)
self.assertIn('Param id error', data['message'])
self.assertEqual('fail', data['status'])
def test_get_user_incorrect_id(self):
"获取用户信息,用户id不存在"
with self.client:
response = self.client.get('/users/-1')
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 404)
self.assertIn('User not exists', data['message'])
self.assertEqual('fail', data['status'])
def test_all_users(self):
"获取所有用户信息"
add_user('lucy', '[email protected]')
add_user('lilei', '[email protected]')
with self.client:
response = self.client.get('/users')
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 200)
self.assertEqual('success', data['status'])
self.assertEqual(2, len(data['data']['users'])) |
import tensorflow as tf
def dense_encoder(inputs,
latent_size=8, activation=tf.nn.leaky_relu,
return_gaussian=False,
is_training=True, reuse=tf.AUTO_REUSE, scope='encoder'):
"""
Encoder using a dense neural network
"""
with tf.variable_scope(scope, [inputs], reuse=reuse) as sc:
x = tf.layers.flatten(inputs)
net = tf.layers.dense(x, 200, activation=tf.nn.leaky_relu)
net = tf.layers.dense(net, 200, activation=tf.nn.leaky_relu)
mu = tf.layers.dense(net, latent_size, activation=None)
if return_gaussian:
log_sigma = tf.layers.dense(net, latent_size, activation=None)
return (mu, log_sigma)
else:
return mu
def dense_decoder(code,
output_size=28,
output_channels=1,
activation=tf.nn.leaky_relu,
is_training=True, reuse=tf.AUTO_REUSE, scope='decoder'):
"""
Decoder using a dense neural network
"""
nx = output_size*output_size
with tf.variable_scope(scope, [code], reuse=reuse) as sc:
net = tf.layers.dense(code, 200, activation=tf.nn.leaky_relu)
net = tf.layers.dense(net, 200, activation=tf.nn.leaky_relu)
net = tf.layers.dense(net, nx, activation=None)
net = tf.reshape(net, (-1, output_size, output_size, output_channels))
return net
|
import attr
from ndk.construct import Construct
from ndk.directives import *
from ndk.options import contact as options
@attr.s
class ContactDirective(Construct):
__object_type__ = 'contact'
contact_name = PrimaryKey()
alias = StringField()
contactgroups = OneToMany('ContactGroup')
minimum_importance = IntegerField()
host_notifications_enabled = BooleanField(required=True)
service_notifications_enabled = BooleanField(required=True)
host_notifications_period = OneToOne(
'TimePeriod', required=True)
service_notifications_period = OneToOne(
'TimePeriod', required=True)
host_notifications_options = ChoiceField(
options.HostNotifications, required=True)
service_notifications_options = ChoiceField(
options.ServiceNotifications, required=True)
host_notification_commands = OneToOne('Command', required=True)
service_notification_commands = OneToOne('Command', required=True)
email = StringField()
pager = StringField()
addressx = StringField()
can_submit_commands = BooleanField()
retain_status_information = BooleanField()
retain_nonstatus_information = BooleanField()
@property
def pk(self):
return self.contact_name
|
import os
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
def plot_all(events):
df = pd.DataFrame(events)
def bar_chart(df, type):
df = df[df.type == type]
df = df[['days_to_birth','type']]
df = df.assign(Bin=lambda x: pd.cut(x.days_to_birth, bins=10, precision=0))
df = df[['Bin','type']]
df = df.rename(columns={"type": type, 'Bin': 'days_to_birth'})
df.groupby(['days_to_birth']).count().plot(kind='bar')
#fig, ax = plt.subplots()
#groupby = df.groupby(['days_to_birth']).count().plot(kind='bar', ax=ax)
#groupby = df.groupby(['Bin']).count().plot(kind='bar', ax=ax)
# ticks = ax.set_xticks(ax.get_xticks()[::100])
bar_chart(df, 'diagnosis')
bar_chart(df, 'observation')
bar_chart(df, 'sample')
bar_chart(df, 'treatment')
df = pd.DataFrame(events)
fig, ax = plt.subplots()
df[df.observation_type == 'weight_ohsu' ].plot(x ='days_to_birth', y='measurement', kind = 'scatter', title='weight', c='case_submitter_id', colormap='viridis', ax=ax)
df[df.observation_type == 'height_ohsu' ].plot(x ='days_to_birth', y='measurement', kind = 'scatter', title='height', c='case_submitter_id', colormap='viridis' )
df[df.observation_type == 'glycemic_lab_tests' ].plot(x ='days_to_birth', y='measurement', kind = 'scatter', title='glycemic', c='case_submitter_id', colormap='viridis' )
df[df.observation_type == 'lesion_size' ].plot(x ='days_to_birth', y='measurement', kind = 'scatter', title='lesion (axis1)', c='case_submitter_id', colormap='viridis' )
df[(df.observation_type == 'biomarker_measurement_ohsu') | (df.observation_type == 'biomarker_measurement_manually_entered')].plot(x ='days_to_birth', y='measurement', kind = 'scatter', title='biomarker', c='case_submitter_id', colormap='viridis' )
def plot_events_summary(events, case_submitter_id=None):
sns.set(style="ticks", color_codes=True)
# all events
if not case_submitter_id:
df = pd.DataFrame(events)
else:
df = pd.DataFrame([e for e in events if e.case_submitter_id == case_submitter_id])
# count by type
groups = df.groupby(['days_to_birth','type']).size().to_frame(name='size').reset_index()
# make days_to_birth positive so it graphs left -> right
groups['days_to_birth'] = groups['days_to_birth'].apply(lambda x: x*-1)
chart = sns.scatterplot(x="days_to_birth", y="size", data=groups, hue="type" )
legend = chart.legend(loc='center left', bbox_to_anchor=(1.25, 0.5), ncol=1)
def plot_events_details(events):
sns.set(style="ticks", color_codes=True)
# all events
# count by type
df = pd.DataFrame(events)
groups = df.groupby(['days_to_birth','type']).size().to_frame(name='count').reset_index()
# make days_to_birth positive so it graphs left -> right
groups['days_to_birth'] = groups['days_to_birth'].apply(lambda x: x*-1)
g = sns.FacetGrid(groups, col="type", height=4,col_wrap=3)
g.map(plt.scatter, "days_to_birth",'count');
|
"""
上传头像的练习
客户端需要将自己的头像上传给服务端
在服务端以当前日期保存为jpg格式
提示:图片如果大的话,需要一条一条读取发送
"""
from socket import *
import time
tcp_socket = socket()
tcp_socket.bind(("0.0.0.0", 6955))
tcp_socket.listen(5)
while True:
print("等待客户端链接")
connfd, addr = tcp_socket.accept() # 创建新的套接字
print("连接上客户端:", addr)
file_name = time.localtime()[:3]
file = open("%s-%s-%s.jpeg" % file_name, "wb") # 没必要用ab,wb就行
while True:
data = connfd.recv(1024)
if not data: #
connfd.close()
file.close()
break
file.write(data)
# 关闭套接字
|
import os
import threading
import subprocess
import time
import gc
from PyQt5.QtCore import Qt, QRegExp, QUrl, QThread, pyqtSignal
from PyQt5.QtGui import QIcon, QPixmap, QDesktopServices
from PyQt5.QtWidgets import QMainWindow, QWidget, QDialog, QDialogButtonBox, QLabel, QTextBrowser
from TwitchLinkConfig import Config
from Services.Twitch import TwitchGqlModels
from Services.TwitchLinkUiLoader import uiLoader as UiFiles
from Services.Twitch.TwitchPlaybackAccessTokens import *
from Services.TwitchLinkUtils import Utils
from Services.TwitchLinkAdManager import AdManager
from Services.TwitchLinkTranslator import translator, T
from Auth.TwitchUserAuth import BrowserNotFoundError, BrowserNotLoadableError
from Engines.TwitchLinkPopcornEngine import TwitchDownloader as TwitchDownloaderPopcornEngine
from Engines.TwitchLinkBiscuitEngine import TwitchDownloader as TwitchDownloaderBiscuitEngine
class Ui:
def __init__(self, db):
self.db = db
def Loading(self):
return self.setup(Loading(self.db), forceAllLabelFonts=True)
def Settings(self):
return self.setup(Settings(self.db), forceAllLabelFonts=True)
def Login(self):
return self.setup(Login(self.db), forceAllLabelFonts=True)
def About(self):
return self.setup(About(self.db))
def TermsOfService(self):
return self.setup(TermsOfService(self.db))
def MainMenu(self):
return self.setup(MainMenu(self.db))
def Search(self, mode):
return self.setup(Search(self.db, mode))
def VideoFrame(self, data):
return self.setup(VideoFrame(self.db, data))
def VideoBox(self, window, video):
return self.setup(VideoBox(self.db, window, video))
def VideoList(self, mode, data):
return self.setup(VideoList(self.db, mode, data), forceSize=False, forceAllLabelFonts=True)
def DownloadMenu(self, video, vod):
return self.setup(DownloadMenu(self.db, video, vod), forceAllLabelFonts=True)
def Download(self):
return self.setup(Download(self.db), forceAllLabelFonts=True)
def setup(self, ui, forceSize=True, forceAllLabelFonts=False):
if isinstance(ui, QDialog):
ui.setWindowIcon(QIcon(Config.ICON_IMAGE))
title = ui.windowTitle()
if title == "":
ui.setWindowTitle(T("#PROGRAM_NAME"))
else:
ui.setWindowTitle("{} - {}".format(T("#PROGRAM_NAME"), T(title)))
ui.setWindowFlag(Qt.WindowContextHelpButtonHint, False)
ui = self.setAds(ui)
if forceSize:
if isinstance(ui, QDialog):
if Config.SHOW_ADS:
ui.setFixedSize(ui.size())
else:
ui.setFixedSize(ui.width() - ui.adSize[0], ui.height() - ui.adSize[1])
ui.setFont(translator.getFont())
for widget in ui.findChildren(QTextBrowser):
widget.setFont(translator.getDocFont(widget.font()))
if forceAllLabelFonts:
for widget in ui.findChildren(QLabel):
widget.setFont(translator.getFont(widget.font()))
return ui
def setAds(self, ui):
for ad in ui.findChildren(QLabel, QRegExp("^ad_\d+$")):
if Config.SHOW_ADS:
ad.parent().layout().addWidget(AdManager(ad.width(), ad.height()))
ad.setParent(None)
else:
ad.parent().setParent(None)
return ui
class Loading(QMainWindow, UiFiles.loading):
def __init__(self, db):
super().__init__()
self.db = db
self.setupUi(self)
self.setWindowFlag(Qt.FramelessWindowHint)
self.setWindowIcon(QIcon(Config.ICON_IMAGE))
self.programLogo.setContentsMargins(10, 10, 10, 10)
self.programLogo.setPixmap(Utils.Image(Config.LOGO_IMAGE))
class Settings(QDialog, UiFiles.settings):
adSize = [0, 0]
def __init__(self, db):
super().__init__()
self.db = db
self.setupUi(self)
self.enableMp4.setChecked(self.db.settings.enableMp4)
self.autoClose.setChecked(self.db.settings.autoClose)
self.popcornEngine.setChecked(self.db.settings.engineType == "popcorn")
self.biscuitEngine.setChecked(self.db.settings.engineType == "biscuit")
self.downloadEngineInfo.clicked.connect(self.showDownloadEngineInfo)
self.streamFilename.setText(self.db.templates.streamFilename)
self.videoFilename.setText(self.db.templates.videoFilename)
self.clipFilename.setText(self.db.templates.clipFilename)
self.streamTemplateInfo.clicked.connect(self.showStreamTemplateInfo)
self.videoTemplateInfo.clicked.connect(self.showVideoTemplateInfo)
self.clipTemplateInfo.clicked.connect(self.showClipTemplateInfo)
self.bookmarkList.addItems(self.db.settings.bookmarks)
self.newBookmark.returnPressed.connect(self.addBookmark)
self.addBookmarkButton.clicked.connect(self.addBookmark)
self.insertBookmarkButton.clicked.connect(self.insertBookmark)
self.removeBookmarkButton.clicked.connect(self.removeBookmark)
self.currentTempDirectory.setText(self.db.temp.tempDirectory)
self.currentTempDirectory.setCursorPosition(0)
self.tempDirectoryInfo.clicked.connect(self.showTempDirectoryInfo)
self.searchTempDirectory.clicked.connect(self.askTempDirectory)
self.language.addItems(list(translator.LANGUAGES.values()))
self.language.setCurrentIndex(list(translator.LANGUAGES.keys()).index(self.db.localization.language))
self.language.currentIndexChanged.connect(self.setLanguage)
self.timezone.addItems(self.db.localization.TIMEZONE)
self.timezone.setCurrentIndex(self.db.localization.timezoneNo)
self.timezone.currentIndexChanged.connect(self.setTimezone)
self.popcornEngineInfo.setHtml(Utils.getDocs(self.db.localization.language, "PopcornEngine"))
self.biscuitEngineInfo.setHtml(Utils.getDocs(self.db.localization.language, "BiscuitEngine"))
self.popcornFastDownload.setChecked(self.db.engines.popcorn.fastDownload)
self.popcornUpdateTracking.setChecked(self.db.engines.popcorn.updateTracking)
self.popcornNormal.setChecked(not self.db.engines.popcorn.updateTracking)
if self.db.downloading:
self.engineArea.setEnabled(False)
self.languageArea.setEnabled(False)
self.timezoneArea.setEnabled(False)
self.popcornSettingsArea.setEnabled(False)
self.popcornDownloadModeArea.setEnabled(False)
self.buttonBox.button(QDialogButtonBox.Reset).setEnabled(False)
else:
self.restrictedLabel.hide()
self.buttonBox.accepted.connect(self.saveSettings)
self.buttonBox.button(QDialogButtonBox.Reset).clicked.connect(self.resetSettings)
def keyPressEvent(self, event):
if ((not event.modifiers() and event.key() == Qt.Key_Return) or (event.modifiers() == Qt.KeypadModifier and event.key() == Qt.Key_Enter)):
event.accept()
else:
super().keyPressEvent(event)
def showDownloadEngineInfo(self):
info = "#Select your download engine.\nThese settings apply only to video downloads.\nThe live download is automatically fixed with [Biscuit Engine].\nThe clips are downloaded without an engine."
Utils.info("notification", info)
def showStreamTemplateInfo(self):
info = "#{type} : File Type (Stream)\n{id} : Stream ID (XXXXXXXXXX)\n{title} : Title\n{game} : Category\n{channel} : Channel\n{started_at} : Started At (XXXX-XX-XX XX:XX:XX)\n{date} : Started Date (XXXX-XX-XX)\n{time} : Started Time (XX:XX:XX)"
Utils.info("#Stream Filename Template Variables", info, noFormat=True)
def showVideoTemplateInfo(self):
info = "#{type} : File Type (Video)\n{id} : Video ID (XXXXXXXXXX)\n{title} : Title\n{game} : Category\n{channel} : Channel\n{duration} : Duration\n{published_at} : Published At (XXXX-XX-XX XX:XX:XX)\n{date} : Published Date (XXXX-XX-XX)\n{time} : Published Time (XX:XX:XX)\n{views} : Views"
Utils.info("#Video Filename Template Variables", info, noFormat=True)
def showClipTemplateInfo(self):
info = "#{type} : File Type (Clip)\n{id} : Clip ID (XXXXXXXXXX)\n{title} : Title\n{game} : Category\n{slug} : Slug (HappySlugExampleHelloTwitch)\n{channel} : Channel\n{creator} : Creator\n{duration} : Duration\n{created_at} : Created At (XXXX-XX-XX XX:XX:XX)\n{date} : Created Date (XXXX-XX-XX)\n{time} : Created Time (XX:XX:XX)\n{views} : Views"
Utils.info("#Clip Filename Template Variables", info, noFormat=True)
def addBookmark(self):
bookmark = self.newBookmark.text().strip().lower()
if bookmark == "":
return
if len(self.bookmarkList.findItems(bookmark, Qt.MatchFixedString)) == 0:
self.bookmarkList.addItem(bookmark)
self.newBookmark.clear()
else:
Utils.info("warning", "#Bookmark already exists.")
def insertBookmark(self):
bookmark = self.newBookmark.text().strip().lower()
if bookmark == "":
return
if len(self.bookmarkList.findItems(bookmark, Qt.MatchFixedString)) == 0:
self.bookmarkList.insertItem(self.bookmarkList.currentRow(), bookmark)
self.newBookmark.clear()
else:
Utils.info("warning", "#Bookmark already exists.")
def removeBookmark(self):
self.bookmarkList.takeItem(self.bookmarkList.currentRow())
def showTempDirectoryInfo(self):
Utils.info("notification", "#A place to store various temporary data.\nAllocate it to a disk with a lot of free space.\n\nIt is mainly used for temporary storage of video data in [Popcorn Engine].\n[Popcorn Engine] processes download and encoding separately, which requires additional free space as much as the size of the video to be downloaded.\nWhen the download is complete or canceled, all temporary data will be deleted.\n[Biscuit Engine] does not require this free space.")
def askTempDirectory(self):
newDirectory = Utils.askFileDirectory(self, self.db.temp.tempDirectory)
if newDirectory != "":
self.db.setTempDirectory(newDirectory)
self.currentTempDirectory.setText(newDirectory)
self.currentTempDirectory.setCursorPosition(0)
def setLanguage(self, index):
self.saveSettings()
self.db.setLanguage(index)
def setTimezone(self, index):
self.saveSettings()
self.db.setTimezone(index)
def saveSettings(self):
enableMp4 = self.enableMp4.isChecked()
autoClose = self.autoClose.isChecked()
if self.popcornEngine.isChecked():
engineType = "popcorn"
else:
engineType = "biscuit"
streamFilename = self.streamFilename.text()
videoFilename = self.videoFilename.text()
clipFilename = self.clipFilename.text()
bookmarks = []
for index in range(self.bookmarkList.count()):
bookmarks.append(self.bookmarkList.item(index).text())
popcornFastDownload = self.popcornFastDownload.isChecked()
popcornUpdateTracking = self.popcornUpdateTracking.isChecked()
self.db.setGeneralSettings(enableMp4, autoClose, engineType, bookmarks)
self.db.setFilenameSettings(streamFilename, videoFilename, clipFilename)
self.db.setPopcornEngineSettings(popcornFastDownload, popcornUpdateTracking)
self.db.setBiscuitEngineSettings()
def resetSettings(self):
if Utils.ask("reset-settings", "#This will reset all settings, including logs.\nWould you like to continue?"):
self.db.resetSettings()
class Login(QDialog, UiFiles.login):
adSize = [0, 0]
def __init__(self, db):
super().__init__()
self.db = db
self.setupUi(self)
self.setWindowFlag(Qt.WindowCloseButtonHint, False)
try:
self.db.account.user.reloadUser(self.db.api)
except:
pass
if self.db.account.user.connected:
self.accountMenu.setCurrentIndex(1)
self.profile_image.setPixmap(Utils.Image(self.db.account.user.data.profileImageURL, Config.PROFILE_IMAGE))
self.account.setText(self.db.account.user.data.displayName)
self.logoutButton.clicked.connect(self.tryLogout)
else:
self.accountMenu.setCurrentIndex(0)
self.loginInfo.hide()
self.loginButton.clicked.connect(self.tryLogin)
self.loginButton.setAutoDefault(True)
def tryLogin(self):
self.setEnabled(False)
self.loginInfo.show()
self.loginButton.setText(T("#Logging in..."))
self.loginThread = LoginThread(self.db)
self.loginThread.loginResult.connect(self.loginResult)
self.loginThread.start()
def loginResult(self, result):
self.setEnabled(True)
self.loginInfo.hide()
self.loginButton.setText(T("login"))
if result == "Succeed":
Utils.info("login", "#Login complete.")
self.close()
elif result == "BrowserNotFound":
Utils.info("error", "#Chrome browser or Edge browser is required to proceed.")
elif result == "BrowserNotLoadable":
Utils.info("error", "#Unable to load Chrome browser or Edge browser.\nIf the error persists, try Run as administrator.")
else:
Utils.info("error", "#Login failed.")
def tryLogout(self):
if Utils.ask("logout", "#Are you sure you want to log out?"):
self.db.account.user.logout()
self.db.saveDB()
Utils.info("logout", "#Logout complete.")
self.close()
def closeEvent(self, event):
if not self.isEnabled():
event.ignore()
class LoginThread(QThread):
loginResult = pyqtSignal(str)
def __init__(self, db):
super().__init__()
self.db = db
def run(self):
try:
self.db.account.user.login(self.db.api, Config.APPDATA_PATH + "/webdrivers")
self.db.saveDB()
self.loginResult.emit("Succeed")
except BrowserNotFoundError:
self.loginResult.emit("BrowserNotFound")
except BrowserNotLoadableError:
self.loginResult.emit("BrowserNotLoadable")
except:
self.loginResult.emit("Error")
class About(QDialog, UiFiles.about):
adSize = [0, 0]
def __init__(self, db):
super().__init__()
self.db = db
self.setupUi(self)
self.versionLabel.setText("v {}".format(Config.VERSION))
self.homepageButton.clicked.connect(self.openHomepage)
self.donateButton.clicked.connect(self.openDonate)
def openHomepage(self):
QDesktopServices.openUrl(QUrl(Config.HOMEPAGE_URL + "?lang=" + self.db.localization.language))
def openDonate(self):
QDesktopServices.openUrl(QUrl(Config.HOMEPAGE_URL + "/donate?lang=" + self.db.localization.language))
class TermsOfService(QDialog, UiFiles.termsOfService):
adSize = [0, 0]
def __init__(self, db):
super().__init__()
self.db = db
self.setupUi(self)
self.selectLanguage.clicked.connect(self.db.mainWindow.openSettings)
self.textBrowser.setHtml(Utils.getDocs(self.db.localization.language, "TermsOfService"))
if self.db.setup.termsOfServiceAgreedTime == None:
self.setWindowFlag(Qt.WindowCloseButtonHint, False)
self.agreed.hide()
self.agree.stateChanged.connect(self.checkAgree)
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(False)
self.buttonBox.accepted.connect(self.db.agreeTermsOfService)
self.buttonBox.rejected.connect(self.db.forceClose)
else:
self.agree.hide()
self.agreed.setText(T("#Agreed at {time}", time=str(self.db.setup.termsOfServiceAgreedTime).split(".")[0]))
self.buttonBox.setStandardButtons(QDialogButtonBox.Ok)
def checkAgree(self):
if self.agree.isChecked():
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(True)
else:
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(False)
class MainMenu(QWidget, UiFiles.mainMenu):
adSize = [100, 50]
def __init__(self, db):
super().__init__()
self.db = db
self.setupUi(self)
self.programLogo.setContentsMargins(10, 10, 10, 10)
self.programLogo.setPixmap(Utils.Image(Config.LOGO_IMAGE))
self.channel_id.clicked.connect(lambda: self.db.mainWindow.startSearch("channel_id"))
self.video_id.clicked.connect(lambda: self.db.mainWindow.startSearch("video_id"))
self.video_url.clicked.connect(lambda: self.db.mainWindow.startSearch("video_url"))
class Search(QDialog, UiFiles.search):
adSize = [0, 0]
def __init__(self, db, mode):
super().__init__()
self.db = db
self.setupUi(self)
self.setup(mode)
def setup(self, mode):
if mode == "channel_id":
self.window_title.setText(T("#Search by Channel ID"))
if len(self.db.settings.bookmarks) == 0:
self.queryArea.setCurrentIndex(0)
else:
self.queryComboBox.addItems(self.db.settings.bookmarks)
self.queryArea.setCurrentIndex(1)
elif mode == "video_id":
self.window_title.setText(T("#Search by Video / Clip ID"))
self.queryArea.setCurrentIndex(0)
else:
self.window_title.setText(T("#Search by Channel / Video / Clip Link"))
self.queryArea.setCurrentIndex(0)
class VideoFrame(QWidget, UiFiles.videoFrame):
thumbnailImageLoaded = pyqtSignal(QPixmap)
categoryImageLoaded = pyqtSignal(QPixmap)
def __init__(self, db, data):
super().__init__()
self.db = db
self.setupUi(self)
self.videoType = type(data)
self.thumbnail_image.setPixmap(QPixmap(Config.THUMBNAIL_IMAGE))
self.category_image.setPixmap(QPixmap(Config.CATEGORY_IMAGE))
self.thumbnailImageLoaded.connect(self.setThumbnailImage)
self.categoryImageLoaded.connect(self.setCategoryImage)
if self.videoType == TwitchGqlModels.Stream:
self.stream = data
self.setStreamInfo()
elif self.videoType == TwitchGqlModels.Video:
self.video = data
self.setVideoInfo()
else:
self.clip = data
self.setClipInfo()
def setStreamInfo(self):
self.title.setText(self.stream.title)
self.title.setToolTip(self.stream.title)
self.info_1.setText(self.stream.game.name)
self.info_2.setText(str(self.stream.createdAt.toUTC(self.db.localization.timezone)))
self.more.clicked.connect(self.showStreamInfo)
self.loadThumbnailImage(self.stream.previewImageURL)
self.loadCategoryImage(self.stream.game.boxArtURL)
def setVideoInfo(self):
self.title.setText(self.video.title)
self.title.setToolTip(self.video.title)
self.info_1.setText(str(self.video.publishedAt.toUTC(self.db.localization.timezone)))
self.info_2.setText(str(self.video.lengthSeconds))
self.more.clicked.connect(self.showVideoInfo)
self.loadThumbnailImage(self.video.previewThumbnailURL)
self.loadCategoryImage(self.video.game.boxArtURL)
def setClipInfo(self):
self.title.setText(self.clip.title)
self.title.setToolTip(self.clip.title)
self.info_1.setText(str(self.clip.createdAt.toUTC(self.db.localization.timezone)))
self.info_2.setText(str(self.clip.durationSeconds))
self.more.clicked.connect(self.showClipInfo)
self.loadThumbnailImage(self.clip.thumbnailURL)
self.loadCategoryImage(self.clip.game.boxArtURL)
def setThumbnailImage(self, pixmap):
self.thumbnail_image.setPixmap(pixmap)
def setCategoryImage(self, pixmap):
self.category_image.setPixmap(pixmap)
def loadThumbnailImage(self, url):
threading.Thread(target=self.thumbnailImageThread, args=(url,)).start()
def loadCategoryImage(self, url):
threading.Thread(target=self.categoryImageThread, args=(url,)).start()
def thumbnailImageThread(self, url):
image = Utils.Image(url, Config.THUMBNAIL_IMAGE)
try:
self.thumbnailImageLoaded.emit(image)
except:
pass
def categoryImageThread(self, url):
image = Utils.Image(url, Config.CATEGORY_IMAGE)
try:
self.categoryImageLoaded.emit(image)
except:
pass
def showStreamInfo(self):
kwargs = {
"channel": self.stream.broadcaster.formattedName(),
"title": self.stream.title,
"game": self.stream.game.displayName,
"startedAt": self.stream.createdAt.toUTC(self.db.localization.timezone),
"viewer": self.stream.viewersCount
}
Utils.info("#Stream Information", "#Channel : {channel}\nTitle : {title}\nCategory : {game}\nStarted At : {startedAt}\nViewer Count : {viewer}", **kwargs)
def showVideoInfo(self):
kwargs = {
"channel": self.video.owner.formattedName(),
"title": self.video.title,
"game": self.video.game.displayName,
"duration": self.video.lengthSeconds,
"publishedAt": self.video.publishedAt.toUTC(self.db.localization.timezone),
"view": self.video.viewCount
}
Utils.info("#Video Information", "#Channel : {channel}\nTitle : {title}\nCategory : {game}\nDuration : {duration}\nPublished At : {publishedAt}\nView Count : {view}", **kwargs)
def showClipInfo(self):
kwargs = {
"channel": self.clip.broadcaster.formattedName(),
"title": self.clip.title,
"game": self.clip.game.displayName,
"creator": self.clip.curator.formattedName(),
"duration": self.clip.durationSeconds,
"createdAt": self.clip.createdAt.toUTC(self.db.localization.timezone),
"view": self.clip.viewCount
}
Utils.info("#Clip Information", "#Channel : {channel}\nTitle : {title}\nCategory : {game}\nCreator : {creator}\nDuration : {duration}\nCreated At : {createdAt}\nView Count : {view}", **kwargs)
class VideoBox(QWidget, UiFiles.videoBox):
def __init__(self, db, window, data):
super().__init__()
self.db = db
self.setupUi(self)
self.window = window
self.data = data
self.videoFrameArea.layout().addWidget(self.db.ui.VideoFrame(self.data))
self.videoType = type(self.data)
if self.videoType == TwitchGqlModels.Video:
self.downloadButton.clicked.connect(self.downloadVideo)
else:
self.downloadButton.clicked.connect(self.downloadClip)
def checkVideo(self):
try:
video = TwitchVod(self.data.id, self.db.account.user)
except TokenError:
try:
self.db.account.user.reloadUser(self.db.api)
Utils.info("authentication-error", "#An authentication error has occurred.\nIf the error persists, please log in again.")
except:
Utils.info("login-expired", "#Your login has expired.\nIf you do not log in again, the downloader will operate in a logged out state.")
return False
except:
Utils.info("download-failed", "#A network error has occurred.")
return False
if video.found == False:
Utils.info("unable-to-download", "#Video not found. Deleted or temporary error.")
return False
elif video.found == VodRestricted:
if self.db.account.user.connected:
advice = T("#Unable to find subscription in your account.\nSubscribe to this streamer or log in with another account.")
else:
advice = T("#You need to log in to download subscriber-only videos.")
Utils.info("unable-to-download", "#This video is for subscribers only.\n{advice}", advice=advice)
return False
return video
def checkClip(self):
try:
clip = TwitchClip(self.data.slug, self.db.account.user)
except TokenError:
try:
self.db.account.user.reloadUser(self.db.api)
Utils.info("authentication-error", "#An authentication error has occurred.\nIf the error persists, please log in again.")
except:
Utils.info("login-expired", "#Your login has expired.\nIf you do not log in again, the downloader will operate in a logged out state.")
return False
except:
Utils.info("download-failed", "#A network error has occurred.")
return False
if clip.found == False:
Utils.info("unable-to-download", "#Clip not found. Deleted or temporary error.")
return False
return clip
def downloadVideo(self):
self.downloadButton.setText(T("#Loading..."))
self.downloadButton.repaint()
videoToken = self.checkVideo()
self.downloadButton.setText(T("download"))
if videoToken == False:
return
self.db.setupDownload(self.data, videoToken)
downloadMenu = self.db.ui.DownloadMenu(self.data, videoToken)
if downloadMenu.exec():
self.window.close()
else:
self.db.cancelDownload()
del downloadMenu
gc.collect()
def downloadClip(self):
self.downloadButton.setText(T("#Loading..."))
self.downloadButton.repaint()
clipToken = self.checkClip()
self.downloadButton.setText(T("download"))
if clipToken == False:
return
self.db.setupDownload(self.data, clipToken)
downloadMenu = self.db.ui.DownloadMenu(self.data, clipToken)
if downloadMenu.exec():
if self.db.fileDownload["downloadType"] == "clip":
self.window.setLoading(T("#Downloading..."), 0)
self.db.downloadClip(self.window.progressSignal)
self.window.setLoading(False)
else:
self.window.close()
else:
self.db.cancelDownload()
del downloadMenu
gc.collect()
class VideoList(QDialog, UiFiles.videoList):
progressSignal = pyqtSignal(str, int)
windowSize = {
"small": {
# Original Size : 530 * 760
"window": (580, 800),
"adSize": (0, 50),
"previewSize": (480, 270),
"layoutColumn": 1,
"showAdSize": (320, 100)
},
"medium": {
# Original Size : 1000 * 940
"window": (938, 980),
"adSize": (0, 50),
"previewSize": (800, 450),
"layoutColumn": 2,
"showAdSize": (300, 250)
},
"large": {
# Original Size : 1300 * 940
"window": (1400, 980),
"adSize": (0, 50),
"previewSize": (800, 450),
"layoutColumn": 3,
"showAdSize": (300, 250)
}
}
class SizeMode:
def __init__(self, name, data):
self.sizeName = name
self.windowSize = data["window"]
self.adSize = data["adSize"]
self.previewSize = data["previewSize"]
self.layoutColumn = data["layoutColumn"]
self.showAdSize = data["showAdSize"]
DATA_LOAD_POSITION = 100
searchTypes = [
("past-broadcasts", "ARCHIVE"),
("highlights", "HIGHLIGHT"),
("clips", None),
("uploads", "UPLOAD"),
("past-premiers", "PAST_PREMIERE"),
("all-videos", None)
]
sortList = [
("date", "TIME"),
("popular", "VIEWS")
]
filterList = [
("24h", "LAST_DAY"),
("7d", "LAST_WEEK"),
("30d", "LAST_MONTH"),
("all", "ALL_TIME")
]
def __init__(self, db, mode, data):
super().__init__()
self.db = db
self.setupUi(self)
self.db.cancelDownload()
self.sizeMode = self.SizeMode(self.db.temp.videoListWindowSize, self.windowSize[self.db.temp.videoListWindowSize])
self.setup(mode, data)
self.smallWindow.clicked.connect(lambda: self.setSizeMode("small"))
self.mediumWindow.clicked.connect(lambda: self.setSizeMode("medium"))
self.largeWindow.clicked.connect(lambda: self.setSizeMode("large"))
self.reloadSize()
def setSizeMode(self, mode):
if self.sizeMode.sizeName != mode:
self.sizeMode = self.SizeMode(mode, self.windowSize[mode])
self.db.setVideoListWindowSize(mode)
self.reloadSize()
def reloadSize(self):
if Config.SHOW_ADS:
self.setFixedSize(self.sizeMode.windowSize[0], self.sizeMode.windowSize[1])
else:
self.setFixedSize(self.sizeMode.windowSize[0] - self.sizeMode.adSize[0], self.sizeMode.windowSize[1] - self.sizeMode.adSize[1])
self.preview_image.setFixedSize(self.sizeMode.previewSize[0], self.sizeMode.previewSize[1])
self.reloadSizeButtons()
self.reloadVideoLayout()
def reloadSizeButtons(self):
if self.sizeMode.sizeName == "small":
self.smallWindow.setEnabled(False)
self.mediumWindow.setEnabled(True)
self.largeWindow.setEnabled(True)
elif self.sizeMode.sizeName == "medium":
self.smallWindow.setEnabled(True)
self.mediumWindow.setEnabled(False)
self.largeWindow.setEnabled(True)
else:
self.smallWindow.setEnabled(True)
self.mediumWindow.setEnabled(True)
self.largeWindow.setEnabled(False)
def setup(self, mode, data):
if mode == "channel_id":
self.window_title.setText(T("#{channel}'s channel", channel=data["channel"].displayName))
self.setChannel(data["channel"])
self.searchType.addItems(list(map(lambda item: T(item[0]), self.searchTypes)))
self.searchType.setCurrentIndex(0)
self.searchType.currentIndexChanged.connect(self.loadSortOrFilter)
self.sortOrFilter.currentIndexChanged.connect(self.setSearchOptions)
self.loadSortOrFilter(0)
elif mode == "video_id":
self.tabWidget.setTabEnabled(0, False)
self.window_title.setText(T("#Video ID : {id}", id=data["video"].id))
self.controlArea.hide()
self.noResultsLabel.hide()
self.clearVideoList()
self.setVideoList([data["video"]])
else:
self.tabWidget.setTabEnabled(0, False)
self.window_title.setText(T("#Clip ID : {id}", id=data["clip"].slug))
self.controlArea.hide()
self.noResultsLabel.hide()
self.clearVideoList()
self.setVideoList([data["clip"]])
self.scrollArea.verticalScrollBar().valueChanged.connect(self.searchMoreVideos)
self.progressSignal.connect(self.setLoading)
self.setLoading(False)
def loadSortOrFilter(self, index):
self.sortOrFilter.clear()
if self.searchTypes[index][0] == "clips":
self.sortOrFilter.addItems(list(map(lambda item: T(item[0]), self.filterList)))
else:
self.sortOrFilter.addItems(list(map(lambda item: T(item[0]), self.sortList)))
self.sortOrFilter.setCurrentIndex(0)
def setSearchOptions(self, index):
if index == -1:
return
searchType = self.searchTypes[self.searchType.currentIndex()][0]
self.channelVideosLabel.setText(T("#{channel}'s {searchType}", channel=self.channel.displayName, searchType=T(searchType)))
self.searchVideos()
def searchVideos(self, cursor=""):
self.setLoading(T("#Loading..."))
self.statusArea.repaint()
if self.searchTypes[self.searchType.currentIndex()][0] == "clips":
filter = self.filterList[self.sortOrFilter.currentIndex()][1]
try:
self.searchResult = self.db.api.getChannelClips(self.channel.login, filter, Config.DATA_LOAD_LIMIT, cursor)
except:
Utils.info("error", "#A network error has occurred.")
return
else:
type = self.searchTypes[self.searchType.currentIndex()][1]
sort = self.sortList[self.sortOrFilter.currentIndex()][1]
try:
self.searchResult = self.db.api.getChannelVideos(self.channel.login, type, sort, Config.DATA_LOAD_LIMIT, cursor)
except:
Utils.info("error", "#A network error has occurred.")
return
if cursor == "":
self.clearVideoList()
if len(self.searchResult.data) == 0:
self.videoArea.hide()
self.noResultsLabel.show()
else:
self.videoArea.show()
self.noResultsLabel.hide()
self.setVideoList(self.searchResult.data)
self.setLoading(False)
def searchMoreVideos(self, value):
if self.searchResult.hasNextPage:
if (self.scrollArea.verticalScrollBar().maximum() - value) < self.DATA_LOAD_POSITION:
self.searchVideos(self.searchResult.cursor)
def setChannel(self, channel):
self.channel = channel
if channel.stream == None:
self.liveLabel.setText(T("offline"))
self.viewer_count.hide()
self.preview_image.setPixmap(Utils.Image(channel.offlineImageURL, Config.OFFLINE_IMAGE))
self.infoArea.hide()
self.liveDownload.hide()
else:
stream = channel.stream
self.liveLabel.setText(T("live"))
self.viewer_count.setText(T("#{viewer} viewers", viewer=stream.viewersCount))
self.preview_image.setPixmap(Utils.Image(stream.previewImageURL, Config.THUMBNAIL_IMAGE))
self.category_image.setPixmap(Utils.Image(stream.game.boxArtURL, Config.CATEGORY_IMAGE))
self.title.setText(stream.title)
self.category.setText(stream.game.displayName)
self.started_at.setText(str(stream.createdAt.toUTC(self.db.localization.timezone)))
self.liveDownload.clicked.connect(self.downloadStream)
self.profile_image.setPixmap(Utils.Image(channel.profileImageURL, Config.PROFILE_IMAGE))
self.display_name.setText(channel.displayName)
self.description.setText(channel.description)
self.followers.setText(T("#{followers} followers", followers=channel.followers))
if channel.isPartner:
broadcasterType = "#Partner Streamer"
elif channel.isAffiliate:
broadcasterType = "#Affiliate Streamer"
else:
broadcasterType = "#Streamer"
self.broadcaster_type.setText(T(broadcasterType))
def setVideoList(self, videoList):
for data in videoList:
self.addLayoutWidget(self.db.ui.VideoBox(self, data))
def clearVideoList(self):
self.clearVideoLayout()
self.layoutWidgets = []
def addLayoutWidget(self, widget):
self.layoutWidgets.append(widget)
self.setLayoutWidget(len(self.layoutWidgets) - 1)
if len(self.layoutWidgets) % 6 == 0:
if Config.SHOW_ADS:
self.addLayoutWidget(AdManager(self.sizeMode.showAdSize[0], self.sizeMode.showAdSize[1]))
def setLayoutWidget(self, index):
self.videoArea.layout().addWidget(self.layoutWidgets[index], index // self.sizeMode.layoutColumn, index % self.sizeMode.layoutColumn)
def reloadVideoLayout(self):
self.clearVideoLayout()
for index in range(len(self.layoutWidgets)):
self.setLayoutWidget(index)
def clearVideoLayout(self):
self.scrollArea.verticalScrollBar().setValue(0)
layout = self.videoArea.layout()
for index in range(layout.count()):
layout.itemAt(0).widget().setParent(None)
def checkStream(self):
try:
stream = TwitchStream(self.channel.login, self.db.account.user)
except TokenError:
try:
self.db.account.user.reloadUser(self.db.api)
Utils.info("authentication-error", "#An authentication error has occurred.\nIf the error persists, please log in again.")
except:
Utils.info("login-expired", "#Your login has expired.\nIf you do not log in again, the downloader will operate in a logged out state.")
return False
except:
Utils.info("download-failed", "{error}\n{reason}", error=T("#A network error has occurred."), reason=T("#Temporary Error or Restricted Content"))
return False
if stream.found == False:
Utils.info("unable-to-download", "#Channel not found. Deleted or temporary error.")
elif stream.found == ChannelIsOffline:
Utils.info("unable-to-download", "#Stream not found. Terminated or temporary error.")
return False
if stream.hideAds == False:
if self.db.account.user.connected:
if not Utils.ask("warning", "#Twitch's ads can't be blocked during the stream because your account doesn't have any subscription to this channel.\nProceed?"):
return False
else:
if not Utils.ask("warning", "#To block Twitch ads during the stream, you need to log in with your subscribed account.\nYou are currently not logged in and cannot block Twitch ads during the stream.\nProceed?"):
return False
return stream
def downloadStream(self):
self.liveDownload.setText(T("#Loading..."))
self.liveDownload.repaint()
streamToken = self.checkStream()
self.liveDownload.setText(T("live-download"))
if streamToken == False:
return
self.db.setupDownload(self.channel.stream, streamToken)
downloadMenu = self.db.ui.DownloadMenu(self.channel.stream, streamToken)
if downloadMenu.exec():
self.close()
else:
self.db.cancelDownload()
del downloadMenu
gc.collect()
def setLoading(self, loading, progress=None):
if loading == False:
self.statusArea.hide()
else:
self.statusLabel.setText(loading)
if progress == None:
self.loadingProgress.hide()
else:
self.loadingProgress.setValue(progress)
self.loadingProgress.show()
self.statusArea.show()
class DownloadMenu(QDialog, UiFiles.downloadMenu):
adSize = [0, 50]
def __init__(self, db, dataModel, accessToken):
super().__init__()
self.db = db
self.setupUi(self)
self.dataModel = dataModel
self.accessToken = accessToken
self.videoFrameArea.layout().addWidget(self.db.ui.VideoFrame(dataModel))
self.loadOptions()
def keyPressEvent(self, event):
if ((not event.modifiers() and event.key() == Qt.Key_Return) or (event.modifiers() == Qt.KeypadModifier and event.key() == Qt.Key_Enter)):
event.accept()
else:
super().keyPressEvent(event)
def loadOptions(self):
self.window_title.setText(T("#Download {type}", type=T(self.accessToken.dataType)))
self.currentDirectory.setText(self.db.fileDownload["saveDirectory"] + "/" + self.db.fileDownload["fileName"])
self.currentDirectory.setCursorPosition(0)
self.searchDirectory.clicked.connect(self.askSaveDirectory)
self.resolution.addItems(self.accessToken.getResolutions())
self.resolution.setCurrentIndex(0)
self.resolution.currentIndexChanged.connect(self.db.setDownloadResolution)
if self.accessToken.dataType == "stream":
self.cropArea.hide()
self.settings.clicked.connect(self.openSettings)
elif self.accessToken.dataType == "video":
self.setupCropArea()
self.reloadCropArea()
h, m, s = self.cropRange
self.endSpinH.setValue(h)
self.endSpinM.setValue(m)
self.endSpinS.setValue(s)
self.settings.clicked.connect(self.openSettings)
else:
self.cropArea.hide()
self.settings.hide()
def setupCropArea(self):
self.cropArea.setTitle(T("#Crop / Total Length : {duration}", duration=self.dataModel.lengthSeconds))
self.startCheckBox.stateChanged.connect(self.reloadCropBar)
self.endCheckBox.stateChanged.connect(self.reloadCropBar)
h, m, s = str(self.dataModel.lengthSeconds).split(":")
self.cropRange = [int(h), int(m), int(s)]
self.startSpinH.valueChanged.connect(self.reloadCropRange)
self.startSpinM.valueChanged.connect(self.reloadCropRange)
self.startSpinS.valueChanged.connect(self.reloadCropRange)
self.endSpinH.valueChanged.connect(self.reloadCropRange)
self.endSpinM.valueChanged.connect(self.reloadCropRange)
self.endSpinS.valueChanged.connect(self.reloadCropRange)
def reloadCropRange(self):
h, m, s = self.cropRange
self.startSpinH.setRange(0, h)
if self.startSpinH.value() == h:
self.startSpinM.setRange(0, m)
else:
self.startSpinM.setRange(0, 59)
if self.startSpinM.value() == m:
self.startSpinS.setRange(0, s)
else:
self.startSpinS.setRange(0, 59)
self.endSpinH.setRange(0, h)
if self.endSpinH.value() == h:
self.endSpinM.setRange(0, m)
else:
self.endSpinM.setRange(0, 59)
if self.endSpinM.value() == m:
self.endSpinS.setRange(0, s)
else:
self.endSpinS.setRange(0, 59)
def reloadCropBar(self):
self.startTimeBar.setEnabled(not self.startCheckBox.isChecked())
self.endTimeBar.setEnabled(not self.endCheckBox.isChecked())
def reloadCropArea(self):
self.reloadCropRange()
self.reloadCropBar()
if self.db.settings.engineType == "popcorn":
self.cropArea.setEnabled(False)
self.cropInfoLabel.setText(T("#Cropping is only supported in [Biscuit Engine]."))
else:
self.cropArea.setEnabled(True)
self.cropInfoLabel.setText(T("#The crop is based on the nearest point in the crop range that can be processed."))
def askSaveDirectory(self):
directory = self.db.fileDownload["saveDirectory"] + "/" + self.db.fileDownload["fileName"]
if self.accessToken.dataType == "clip":
filters = T("#mp4 file (*.mp4)")
initialFilter = "mp4"
else:
if self.db.settings.enableMp4:
filters = T("#ts file (recommended) (*.ts);;mp4 file (*.mp4)")
else:
filters = T("#ts file (*.ts)")
initialFilter = "ts"
newDirectory = Utils.askSaveDirectory(self, directory, filters, initialFilter)
if newDirectory != "":
self.db.setFileSaveDirectory(newDirectory)
self.currentDirectory.setText(newDirectory)
self.currentDirectory.setCursorPosition(0)
def accept(self):
if self.accessToken.dataType == "video" and self.db.settings.engineType == "biscuit":
if not self.validateCropRange():
return
if Utils.checkFileExists(self.db.fileDownload["saveDirectory"] + "/" + self.db.fileDownload["fileName"]):
if not Utils.ask("overwrite", "#A file with the same name already exists.\nOverwrite?"):
return
super().accept()
def validateCropRange(self):
if self.saveCropRange():
return True
else:
Utils.info("warning", "#The end point of the cropping range is equal to or earlier than the start point.")
return False
def saveCropRange(self):
if self.startCheckBox.isChecked():
start = None
else:
start = self.startSpinH.value() * 3600 + self.startSpinM.value() * 60 + self.startSpinS.value()
if self.endCheckBox.isChecked():
end = None
else:
end = self.endSpinH.value() * 3600 + self.endSpinM.value() * 60 + self.endSpinS.value()
if start != None and end != None:
if start >= end:
return False
self.db.setCropRange(start, end)
return True
def openSettings(self):
self.db.mainWindow.openSettings()
if self.accessToken.dataType == "video":
self.reloadCropArea()
class Download(QWidget, UiFiles.download):
adSize = [300, 100]
def __init__(self, db):
super().__init__()
self.db = db
self.setupUi(self)
if self.db.fileDownload["downloadType"] == "stream":
self.accessToken = self.db.fileDownload["stream"]
else:
self.accessToken = self.db.fileDownload["video"]
self.category_image.setPixmap(Utils.Image(self.accessToken.game.boxArtURL, Config.CATEGORY_IMAGE))
self.category.setText(self.accessToken.game.displayName)
self.title.setText(T("#Title : {title}", title=self.accessToken.title))
self.title.setToolTip(self.accessToken.title)
if self.db.fileDownload["downloadType"] == "stream":
self.thumbnail_image.setPixmap(Utils.Image(self.accessToken.previewImageURL, Config.THUMBNAIL_IMAGE))
self.user_name.setText(T("#Channel : {channel}", channel=self.accessToken.broadcaster.displayName))
self.date.setText(T("#Date : {date}", date=self.accessToken.createdAt.toUTC(self.db.localization.timezone)))
self.duration.setText(T("#Duration : {duration}", duration=T("unknown")))
self.view_count.setText(T("#Viewer Count : {viewer}", viewer=self.accessToken.viewersCount))
else:
self.thumbnail_image.setPixmap(Utils.Image(self.accessToken.previewThumbnailURL, Config.THUMBNAIL_IMAGE))
self.user_name.setText(T("#Channel : {channel}", channel=self.accessToken.owner.displayName))
self.date.setText(T("#Date : {date}", date=self.accessToken.publishedAt.toUTC(self.db.localization.timezone)))
start, end = self.db.fileDownload["range"]
if start == None and end == None:
self.duration.setText(T("#Duration : {duration}", duration=self.accessToken.lengthSeconds))
else:
if start == None:
start = T("#From start")
else:
start = self.getTimeString(start)
if end == None:
end = T("#To end")
else:
end = self.getTimeString(end)
self.duration.setText(T("#Duration : {duration} / Crop : {start} ~ {end}", duration=self.accessToken.lengthSeconds, start=start, end=end))
self.view_count.setText(T("#View Count : {view}", view=self.accessToken.viewCount))
self.resolution.setText(T("#Resolution : {resolution}", resolution=self.db.fileDownload["resolution"]))
self.file.setText(T("#File : {file}", file=self.db.fileDownload["saveDirectory"] + "/" + self.db.fileDownload["fileName"]))
self.file.setToolTip(self.db.fileDownload["saveDirectory"] + "/" + self.db.fileDownload["fileName"])
self.cancelCompleteButtonArea.setCurrentIndex(0)
self.cancelButton.clicked.connect(self.cancelDownload)
self.completeButton.clicked.connect(self.db.mainWindow.startMainMenu)
self.openFolderButton.clicked.connect(self.openFolder)
self.openFileButton.clicked.connect(self.openFile)
self.openFileButton.hide()
self.status.setText(T("#Preparing..."))
if self.db.fileDownload["downloadType"] == "stream":
self.downloadProgressBar.setRange(0, 0)
else:
self.liveLabel.hide()
self.downloader = Downloader(self.db)
self.downloader.streamProgress.connect(self.streamProgress)
self.downloader.videoProgress.connect(self.videoProgress)
self.downloader.downloadComplete.connect(self.downloadComplete)
self.downloader.errorOccurred.connect(self.errorOccurred)
self.db.setDownloadingState(True)
self.downloader.start()
def getTimeString(self, totalSeconds):
h = str(totalSeconds // 3600)
h = (2 - len(h)) * "0" + h
m = str(totalSeconds % 3600 // 60)
m = (2 - len(m)) * "0" + m
s = str(totalSeconds % 3600 % 60)
s = (2 - len(s)) * "0" + s
return h + ":" + m + ":" + s
def streamProgress(self, complete, duration):
if complete:
self.downloadProgressBar.setRange(0, 100)
self.encodingProgressBar.setRange(0, 100)
self.downloadProgressBar.setValue(100)
self.encodingProgressBar.setValue(100)
else:
self.status.setText(T("#Downloading Live Stream..."))
self.currentDuration.setText(duration)
def videoProgress(self, downloadProgress, encodingProgress, duration):
if downloadProgress == -1:
self.status.setText(T("#Waiting for download... / Checking for additional files (5 minutes)"))
self.downloadProgressBar.setRange(0, 0)
else:
self.status.setText(T("#Downloading..."))
self.downloadProgressBar.setRange(0, 100)
self.downloadProgressBar.setValue(downloadProgress)
self.encodingProgressBar.setValue(encodingProgress)
self.currentDuration.setText(duration)
def cancelDownload(self):
if Utils.ask("cancel-download", "#Are you sure you want to cancel the download?"):
self.cancelButton.setText(T("#Canceling..."))
self.repaint()
if self.downloader.cancelDownload() == False:
Utils.info("notification", "#The download has already been completed.")
def downloadComplete(self, complete):
if complete:
self.window_title.setText(T("download-complete"))
self.status.setText(T("download-complete"))
self.openFileButton.show()
else:
self.window_title.setText(T("download-canceled"))
self.status.setText(T("download-canceled"))
self.cancelCompleteButtonArea.setCurrentIndex(1)
self.db.setDownloadingState(False)
if complete:
if self.db.settings.autoClose:
self.db.forceClose()
else:
Utils.info("download-complete", "#Download completed.")
def errorOccurred(self):
Utils.info("error", "#An error occurred while downloading.")
def openFolder(self):
try:
#print(self.db.fileDownload["saveDirectory"])
subprocess.call(['open', self.db.fileDownload["saveDirectory"]])
#os.startfile(self.db.fileDownload["saveDirectory"])
except:
Utils.info("error", "#Folder not found.\nIt has been moved, renamed or deleted.")
def openFile(self):
try:
#os.startfile(self.db.fileDownload["saveDirectory"] + "/" + self.db.fileDownload["fileName"])
subprocess.call(['open', self.db.fileDownload["saveDirectory"] + "/" + self.db.fileDownload["fileName"]])
except:
Utils.info("error", "#File not found.\nIt has been moved, renamed or deleted.")
class Downloader(QThread):
streamProgress = pyqtSignal(bool, str)
videoProgress = pyqtSignal(int, int, str)
downloadComplete = pyqtSignal(bool)
errorOccurred = pyqtSignal()
def __init__(self, db):
super().__init__()
self.db = db
if self.db.fileDownload["downloadType"] == "stream":
self.url = self.db.fileDownload["streamData"].resolution(self.db.fileDownload["resolution"]).url
else:
self.url = self.db.fileDownload["videoData"].resolution(self.db.fileDownload["resolution"]).url
self.fileName = self.db.fileDownload["saveDirectory"] + "/" + self.db.fileDownload["fileName"]
def run(self):
self.startDownload()
def startDownload(self):
# "/ffmpeg.exe" is for Windows OS
# ffmpeg = Config.DEPENDENCIES_ROOT + "/ffmpeg.exe"
ffmpeg = Config.DEPENDENCIES_ROOT + "/ffmpeg"
try:
if self.db.fileDownload["downloadType"] == "video" and self.db.settings.engineType == "popcorn":
data_path = self.db.temp.tempDirectory + "/" + self.db.fileDownload["video"].id
#print(data_path)
Utils.createDirectory(data_path)
self.downloader = TwitchDownloaderPopcornEngine(ffmpeg, self.url, self.fileName, data_path, self.db.engines.popcorn.fastDownload, self.db.engines.popcorn.updateTracking)
else:
self.downloader = TwitchDownloaderBiscuitEngine(ffmpeg, self.db.fileDownload["downloadType"], self.url, self.fileName)
if self.db.fileDownload["downloadType"] == "video":
start, end = self.db.fileDownload["range"]
self.downloader.setRange(start, end)
except:
self.videoProgress.emit(100, 100, "{time} / {time}".format(time=T("unknown")))
self.downloadComplete.emit(False)
self.errorOccurred.emit()
return
self.downloader.download()
if self.db.fileDownload["downloadType"] == "stream":
while self.downloader.done == False:
self.streamProgress.emit(False, self.downloader.timeProgress)
time.sleep(1)
self.streamProgress.emit(True, self.downloader.timeProgress)
if self.downloader.canceled:
self.downloadComplete.emit(False)
if self.downloader.error:
self.errorOccurred.emit()
else:
self.downloadComplete.emit(True)
else:
while self.downloader.done == False:
downloadProgress = (self.downloader.fileProgress / self.downloader.totalFiles) * 100
encodingProgress = (Utils.getTotalSeconds(self.downloader.timeProgress) / self.downloader.totalSeconds) * 100
duration = self.downloader.timeProgress + " / " + self.downloader.totalTime
if self.db.settings.engineType == "popcorn":
if self.downloader.waiting:
self.videoProgress.emit(-1, encodingProgress, duration)
else:
self.videoProgress.emit(downloadProgress, encodingProgress, duration)
else:
self.videoProgress.emit(downloadProgress, encodingProgress, duration)
time.sleep(1)
if self.downloader.canceled:
self.videoProgress.emit(100, 100, self.downloader.timeProgress + " / " + self.downloader.timeProgress)
self.downloadComplete.emit(False)
if self.downloader.error:
self.errorOccurred.emit()
else:
self.videoProgress.emit(100, 100, self.downloader.totalTime + " / " + self.downloader.totalTime)
self.downloadComplete.emit(True)
def cancelDownload(self):
if self.downloader.done:
return False
else:
self.downloader.cancelDownload()
return True |
from django.shortcuts import get_object_or_404, render
from django.template import loader
from django.http import HttpResponse
from django.conf import settings
from .models import Node
import string
from random import randint,choice
from passlib.context import CryptContext
from esxicfg.forms import RequestedConfigForm
def index(request):
template = loader.get_template('esxicfg/mainpage.html')
context = {
'SITE_FQDN': settings.SITE_FQDN
}
return HttpResponse(template.render(context, request))
def buildconfig(request):
if request.method == "POST":
form = RequestedConfigForm(request.POST)
if form.is_valid():
template = loader.get_template('esxicfg/success.html')
allchar = string.ascii_letters + string.punctuation + string.digits
password = "".join(choice(allchar) for x in range(randint(8, 12)))
myctx = CryptContext(schemes=["md5_crypt"])
password_hash = myctx.hash(password)
newNode = Node(password_hash=password_hash, network_autoconfig=request.POST['NetworkMode'], network_vlan=request.POST['NetworkVLAN'], network_manualip=request.POST['NetworkIP'], network_manualnm=request.POST['NetworkNM'], network_manualgw=request.POST['NetworkGW'], ssh_config=request.POST['SSHmode'])
newNode.save()
context = {
'SITE_FQDN': settings.SITE_FQDN,
'password': password,
'node_id': newNode.id
}
return HttpResponse(template.render(context, request))
else:
template = loader.get_template('esxicfg/mainpage.html')
context = {
error_message: 'Invalid options, please try again.'
}
return HttpResponse(template.render(content, request))
else:
template = loader.get_template('esxicfg/mainpage.html')
context = {
error_message: 'Form input are required.'
}
return HttpResponse(template.render(content, request))
def ipxe(request, node_id):
template = loader.get_template('esxicfg/ipxe-65.txt')
context = {
'SITE_FQDN': settings.SITE_FQDN,
'node_id': node_id
}
return HttpResponse(template.render(context, request), content_type="text/plain")
def bootcfg(request, node_id):
template = loader.get_template('esxicfg/bootcfg-65.txt')
context = {
'SITE_FQDN': settings.SITE_FQDN,
'node_id': node_id
}
return HttpResponse(template.render(context, request), content_type="text/plain")
def kscfg(request, node_id):
template = loader.get_template('esxicfg/kscfg-65.txt')
try:
data = Node.objects.get(id=node_id)
except:
return HttpResponse("404")
else:
context = {
'SITE_FQDN': settings.SITE_FQDN,
'node_id': node_id,
'data': data
}
return HttpResponse(template.render(context, request), content_type="text/plain")
|
import pycurl
import cStringIO
from urllib import urlencode
import re
import math
SERVER = "http://localhost:8081"
def makeCurlObject():
c = pycurl.Curl()
c.setopt(pycurl.COOKIEFILE, 'cookie.txt')
c.setopt(pycurl.FOLLOWLOCATION, 1)
return c
def sendRequest(curlObject, path, postfields=""):
buf = cStringIO.StringIO()
c = curlObject
c.setopt(c.WRITEFUNCTION, buf.write)
c.setopt(c.URL, SERVER+path)
c.setopt(c.POSTFIELDS, postfields)
result = c.perform()
result = buf.getvalue()
buf.close()
return result
def testPage(curlObject, path, expectedText, message, postfields=""):
print "Testing for .....",expectedText," path = ",path,
result = sendRequest(curlObject, path, postfields)
if result.find(expectedText )==-1:
print result
print "expectedText:",expectedText
raise Exception(message)
print "PASSED"
return result
def testPageRex(curlObject, path, expectedRE, message, postfields=""):
print "Testing for .....",expectedRE," path = ",path,
result = sendRequest(curlObject, path, postfields)
if re.search(expectedRE, result) == None:
print result
raise Exception(message)
print "PASSED"
return result
NumberOfStudents = 10
studentArr = [makeCurlObject() for i in range(NumberOfStudents)]
#student
for i,student in enumerate(studentArr):
studentid = str(i+1)
testPage(student,'/login', "<title>Menu - KhanAcademy</title>","Login failed", 'username=s%s'%studentid)
testPage(student,'/englishessay/', "Here is a list of your previous assignments","EnglishEssay failed")
#teacher
admin = makeCurlObject()
testPage(admin,'/englishessay/admin', "<title>Assignments - KhanAcademy</title>","Admin login failed", 'password=x')
newass = {'assignmentid':'new','oper':'add','title':'My holiday','description':'My holiday','duration':'15'}
testPage(admin,'/englishessay/admineditassignment', "My holiday","New assignment failed", urlencode(newass))
# teacher activates essay
testPage(admin,'/englishessay/adminopassignment?assignmentid=1&oper=busy', """<button onclick="javascript:document.location='adminopassignment?assignmentid=1&oper=marking'">Mark</button>""","Set to busy failed")
# student sees and submits an essay
for i,student in enumerate(studentArr):
studentid = str(i+1)
testPage(student,'/englishessay/', '''<form action="submitAssignment" method="post">''',"EnglishEssay not ready for submission")
s1ass = {'assignmentid':'1','essay_text':'Student %s essay' % studentid,'bsubmit':'Save'}
testPage(student, '/englishessay/submitAssignment',"","Submitting assignment failed", urlencode(s1ass))
# teacher sees the submitted essays
testPage(admin, '/englishessay/adminessayresults?assignmentid=1&complete=0', '<td>Student 1 essay', 'Student essay missing')
# teacher set marking mode
testPage(admin,'/englishessay/adminopassignment?assignmentid=1&oper=marking', """<button class="btn" onclick="javascript:document.location='adminopassignment?assignmentid=1&oper=complete'">Complete</button>""","Set to marking failed")
#students mark
## work out how many iterations
repetitions = 3
N = NumberOfStudents
maxCombinations = math.factorial(N)/math.factorial(N-2)/math.factorial(2)
if maxCombinations< N*repetitions:
repetitions = int(math.floor(maxCombinations / N))
for i,student in enumerate(studentArr):
# s1 marks
studentid = str(i+1)
result = testPage(student,'/englishessay/', '''Student Marking: s%s''' % studentid,"EnglishEssay not ready for marking for s%s" % studentid)
for r in range(repetitions):
# check that student does not get own essay
if result.find("Student %s essay" % studentid) != -1:
raise Exception("Student %s got own essay to mark" % studentid)
m = re.search("""<input type="hidden" name="essay1_id" value="(\d+)"/>""", result)
essay1 = m.groups()[0]
m = re.search("""<input type="hidden" name="essay2_id" value="(\d+)"/>""", result)
essay2 = m.groups()[0]
m = re.search("""<input type="hidden" name="essayeval_id" value="(\d+)"/>""", result)
essayevalid = m.groups()[0]
s1mark = {'essayeval_id':essayevalid,'essay1_id':str(essay1),'essay2_id':str(essay2), 'scorerange':'0.0','pcomment1':'pcomment%s'% essay1,'ccomment1':'ccomment%s'%essay1,'ccomment2':'ccomment%s'%essay2,'pcomment2':'pcomment%s'%essay2,'bsubmit':'Next >','pcountdown1':'0','pcountdown2':'0','ccountdown1':'0','ccountdown2':'0'}
if r == repetitions-1:
expectedText = "s%s - Done marking" % studentid
else:
expectedText = "Student Marking: s%s" % studentid
result = testPage(student, '/englishessay/evalEssay',expectedText,"Submitting marking failed", urlencode(s1mark))
# teacher set marking mode
testPage(admin,'/englishessay/adminopassignment?assignmentid=1&oper=complete', """<button class="btn" onclick="document.location='adminessayresults?assignmentid=1&complete=1'">View</button> ""","Set to complete failed")
# Student view result before grading
for i,student in enumerate(studentArr):
studentid = str(i+1)
testPageRex(student,'/englishessay/', '''<td>None</td>([^<]*)<td>Student %s''' % studentid,"Student essay not correct after marking s%s" % studentid)
testPage(student, '/englishessay/viewessay?essayid=%s' % studentid,"<b>Grade</b> : None<br/>","Submitting view essay mark failed")
## teacher grades
calcmarks = {"lowgrade":40, "highgrade":80, "assignmentid":1}
testPage(admin,'/englishessay/adminsubmitmarks', """<td>80.0</td>""","Set to complete failed", urlencode(calcmarks))
print "Tests done"
|
def levenshtein(str1, str2):
if str1 is None:
raise TypeError("str1 is None!")
if str2 is None:
raise TypeError("str2 is None!")
if str1 == str2:
return 0.0
if len(str1) == 0:
return len(str2)
if len(str2) == 0:
return len(str1)
v0 = [0] * (len(str2) + 1)
v1 = [0] * (len(str2) + 1)
for i in range(len(v0)):
v0[i] = i
for i in range(len(str1)):
v1[0] = i + 1
for j in range(len(str2)):
cost = 1
if str1[i] == str2[j]:
cost = 0
v1[j + 1] = min(v1[j] + 1, v0[j + 1] + 1, v0[j] + cost)
v0, v1 = v1, v0
return v0[len(str2)]
def levenshtein_ratio(str1, str2):
ratio = 1.0*levenshtein(str1, str2) / max(len(str1), len(str2))
return 1.0 - ratio
|
__author__ = "Cameron Summers"
# -*- coding: utf-8 -*-
"""
Utilities for downloading projects from Tidepool API
Reference: https://developer.tidepool.org/tidepool-api/index/
"""
import os
import datetime as dt
import sys
import requests
import logging
from data_science_tidepool_api_python.util import DATESTAMP_FORMAT
logger = logging.getLogger(__name__)
def read_auth_csv(path_to_csv):
"""
Read csv file and read top line containing: username,password
Args:
path_to_csv:
Returns:
(username, password)
"""
with open(path_to_csv, "r") as file_to_read:
(username, password) = file_to_read.readline().split(",")
return username, password
class TidepoolAPI(object):
"""
Class representing a user with a Tidepool account.
# TODO: Add checks and enforcement for order of events
# TODO: Add helper functions for getting earlier/latest data
"""
def __init__(self, username, password):
self.login_url = "https://api.tidepool.org/auth/login"
self.user_data_url = "https://api.tidepool.org/data/{user_id}"
self.logout_url = "https://api.tidepool.org/auth/logout"
self.users_sharing_to_url = "https://api.tidepool.org/metadata/users/{user_id}/users"
self.users_sharing_with_url = "https://api.tidepool.org/access/groups/{user_id}"
self.invitations_url = "https://api.tidepool.org/confirm/invitations/{user_id}"
self.accept_invitations_url = "https://api.tidepool.org/confirm/accept/invite/{observer_id}/{user_id}"
self.user_notes_url = "https://api.tidepool.org/message/notes/{user_id}"
self.username = username
self.password = password
self._login_user_id = None
self._login_headers = None
def _check_login(func):
"""
Decorator for enforcing login.
"""
def is_logged_in(self, *args, **kwargs):
if self._login_headers is None or self._login_user_id is None:
raise Exception("Not logged in.")
return func(self, *args, **kwargs)
return is_logged_in
def _check_http_error(func):
"""
Decorator to batch handle failed http requests.
"""
def response_is_ok(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except requests.HTTPError as e:
logger.info("Failed request. HTTPError: {}".format(e))
return response_is_ok
def login(self):
"""
Login to Tidepool API
"""
login_response = requests.post(self.login_url, auth=(self.username, self.password))
xtoken = login_response.headers["x-tidepool-session-token"]
user_id_master = login_response.json()["userid"]
self._login_user_id = user_id_master
self._login_headers = {
"x-tidepool-session-token": xtoken,
"Content-Type": "application/json"
}
@_check_http_error
@_check_login
def logout(self):
"""
Logout of Tidepool API
Args:
auth:
Returns:
"""
logout_response = requests.post(self.logout_url, auth=(self.username, self.password))
logout_response.raise_for_status()
@_check_login
def get_pending_observer_invitations(self):
"""
Get pending invitations that have been sent to an observer.
Args:
user_id_observer:
headers:
Returns:
list of invitation json objects
"""
try:
invitations_url = self.invitations_url.format(**{"user_id": self._login_user_id})
invitations_response = requests.get(invitations_url, headers=self._login_headers)
invitations_response.raise_for_status()
pending_invitations_json = invitations_response.json()
except requests.HTTPError:
pending_invitations_json = []
return pending_invitations_json
@_check_login
def accept_observer_invitations(self):
"""
Get pending invitations sent to an observer and accept them.
Args:
user_id_observer:
headers:
Returns:
(list, list)
pending
"""
pending_invitations_json = self.get_pending_observer_invitations()
total_invitations = len(pending_invitations_json)
logger.info("Num pending invitations {}".format(total_invitations))
invitation_accept_failed = []
for i, invitation in enumerate(pending_invitations_json):
try:
share_key = invitation["key"]
user_id = invitation["creatorId"]
accept_url = self.accept_invitations_url.format(**{"observer_id": self._login_user_id, "user_id": user_id})
accept_response = requests.put(accept_url, headers=self._login_headers, json={"key": share_key})
accept_response.raise_for_status()
except requests.HTTPError as e:
invitation_accept_failed.append((e, invitation))
if i % 20 == 0:
num_failed = len(invitation_accept_failed)
logger.info("Accepted {}. Failed {}. Out of {}".format(i - num_failed, num_failed, total_invitations))
return pending_invitations_json, invitation_accept_failed
@_check_http_error
@_check_login
def get_user_event_data(self, start_date, end_date, observed_user_id=None):
"""
Get health event data for user. TODO: Make more flexible
Args:
start_date (dt.datetime): Start date of data, inclusive
end_date (dt.datetime): End date of data, inclusive of entire day
observed_user_id (str): Optional id of observed user if login id is clinician/study
Returns:
list: List of events as objects
"""
user_id = self._login_user_id
if observed_user_id:
user_id = observed_user_id
start_date_str, end_date_str = self.get_date_filter_string(start_date, end_date)
user_data_base_url = self.user_data_url.format(**{"user_id": user_id})
user_data_url = "{url_base}?startDate={start_date}&endDate={end_date}&dexcom=true&medtronic=true&carelink=true".format(**{
"url_base": user_data_base_url,
"end_date": end_date_str,
"start_date": start_date_str,
})
data_response = requests.get(user_data_url, headers=self._login_headers)
data_response.raise_for_status()
user_event_data = data_response.json()
return user_event_data
@_check_http_error
@_check_login
def get_users_sharing_to(self):
"""
Get a list of users the login id is sharing data to. The login id is typically
a patient and the user list is clinicians or studies.
Returns:
list: List of users as objects
"""
user_metadata_url = self.users_sharing_to_url.format(**{
"user_id": self._login_user_id
})
metadata_response = requests.get(user_metadata_url, headers=self._login_headers)
metadata_response.raise_for_status()
users_sharing_to = metadata_response.json()
return users_sharing_to
@_check_http_error
@_check_login
def get_users_sharing_with(self):
"""
Get a list of users the login id is observing. The login id is typically the
clinician or study and the user list is patients.
Returns:
list: List of users as objects
"""
users_sharing_with_url = self.users_sharing_with_url.format(**{
"user_id": self._login_user_id
})
users_sharing_with_response = requests.get(users_sharing_with_url, headers=self._login_headers)
users_sharing_with_response.raise_for_status()
users_sharing_with_json = users_sharing_with_response.json()
return users_sharing_with_json
@_check_http_error
@_check_login
def get_notes(self, start_date, end_date, observed_user_id=None):
"""
Get notes for a user.
"""
user_id = self._login_user_id
if observed_user_id:
user_id = observed_user_id
start_date_str, end_date_str = self.get_date_filter_string(start_date, end_date)
base_notes_url = self.user_notes_url.format(**{"user_id": user_id})
notes_url = "{url_base}?startDate={start_date}&endDate={end_date}".format(
**{
"url_base": base_notes_url,
"end_date": end_date_str,
"start_date": start_date_str,
})
notes_response = requests.get(notes_url, headers=self._login_headers)
notes_response.raise_for_status()
notes_data = notes_response.json()
return notes_data
def get_date_filter_string(self, start_date, end_date):
"""
Get string representations for date filters.
Args:
start_date dt.DateTime: start date
end_date dt.Datetime: end date
Returns:
(str, str): start and end date strings
"""
start_date_str = start_date.strftime(DATESTAMP_FORMAT) + "T00:00:00.000Z"
end_date_str = end_date.strftime(DATESTAMP_FORMAT) + "T23:59:59.999Z"
return start_date_str, end_date_str
@_check_login
def get_login_user_id(self):
"""
Get the id of the user logged in.
Returns:
str: user id
"""
return self._login_user_id
def accept_pending_share_invitations(account_username, account_password):
"""
Accept all invitations for an observer account (e.g. study). This is a common operation
so generalizing it here.
Args:
account_username (str):
account_password (str):
"""
tp_api = TidepoolAPI(account_username, account_password)
tp_api.login()
invitations, failed_accept_invitations = tp_api.accept_observer_invitations()
if invitations is not None:
logger.info(account_username)
logger.info("\tNum Invitations {},".format(len(invitations)))
logger.info("\tNum Failed Acceptance {}".format(len(failed_accept_invitations)))
else:
logger.info("No invitations for {}".format(account_username))
tp_api.logout()
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetServiceResult',
'AwaitableGetServiceResult',
'get_service',
'get_service_output',
]
@pulumi.output_type
class GetServiceResult:
def __init__(__self__, producer_project_id=None, service_name=None):
if producer_project_id and not isinstance(producer_project_id, str):
raise TypeError("Expected argument 'producer_project_id' to be a str")
pulumi.set(__self__, "producer_project_id", producer_project_id)
if service_name and not isinstance(service_name, str):
raise TypeError("Expected argument 'service_name' to be a str")
pulumi.set(__self__, "service_name", service_name)
@property
@pulumi.getter(name="producerProjectId")
def producer_project_id(self) -> str:
"""
ID of the project that produces and owns this service.
"""
return pulumi.get(self, "producer_project_id")
@property
@pulumi.getter(name="serviceName")
def service_name(self) -> str:
"""
The name of the service. See the [overview](/service-management/overview) for naming requirements.
"""
return pulumi.get(self, "service_name")
class AwaitableGetServiceResult(GetServiceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetServiceResult(
producer_project_id=self.producer_project_id,
service_name=self.service_name)
def get_service(service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetServiceResult:
"""
Gets a managed service. Authentication is required unless the service is public.
"""
__args__ = dict()
__args__['serviceName'] = service_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('google-native:servicemanagement/v1:getService', __args__, opts=opts, typ=GetServiceResult).value
return AwaitableGetServiceResult(
producer_project_id=__ret__.producer_project_id,
service_name=__ret__.service_name)
@_utilities.lift_output_func(get_service)
def get_service_output(service_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetServiceResult]:
"""
Gets a managed service. Authentication is required unless the service is public.
"""
...
|
from __future__ import print_function
# python
import datetime
import logging
import os
import traceback
from abc import ABCMeta
import jwt
# django
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect
from django.template import loader
# DRF
from rest_framework import permissions
from rest_framework.views import APIView
# halolib
from .const import HTTPChoice
from .logs import log_json
from .ssm import set_app_param_config
from .util import Util
# aws
# other
# Create your views here.
logger = logging.getLogger(__name__)
class AbsBaseLink(APIView):
__metaclass__ = ABCMeta
"""
View to list all users in the system.
* Requires token authentication.
* Only admin users are able to access this view.
"""
# authentication_classes = (authentication.TokenAuthentication,)
# permission_classes = (permissions.IsAdminUser,permissions.IsAuthenticatedOrReadOnly)
permission_classes = (permissions.AllowAny,)
def __init__(self, **kwargs):
super(AbsBaseLink, self).__init__(**kwargs)
def do_process(self, request, typer, vars, format=None):
"""
:param request:
:param typer:
:param vars:
:param format:
:return:
"""
now = datetime.datetime.now()
self.req_context = Util.get_req_context(request)
self.correlate_id = self.req_context["x-correlation-id"]
self.user_agent = self.req_context["x-user-agent"]
error_message = None
error = None
orig_log_level = 0
if Util.isDebugEnabled(self.req_context, request):
orig_log_level = logger.getEffectiveLevel()
logger.setLevel(logging.DEBUG)
logger.debug("DebugEnabled - in debug mode",
extra=log_json(self.req_context, Util.get_req_params(request)))
logger.debug("headers", extra=log_json(self.req_context, Util.get_headers(request)))
logger.debug("environ", extra=log_json(self.req_context, os.environ))
if settings.HALO_HOST is None and 'HTTP_HOST' in request.META:
settings.HALO_HOST = request.META['HTTP_HOST']
set_app_param_config(settings.AWS_REGION, settings.HALO_HOST)
try:
ret = self.process(request,typer,vars)
total = datetime.datetime.now() - now
logger.info("performance_data", extra=log_json(self.req_context,
{"type": "LAMBDA", "milliseconds": int(total.total_seconds() * 1000)}))
return ret
except Exception as e:
error = e
error_message = str(error)
e.stack = traceback.format_exc()
logger.error(error_message, extra=log_json(self.req_context, Util.get_req_params(request), e))
#exc_type, exc_obj, exc_tb = sys.exc_info()
#fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
#logger.debug('An error occured in '+str(fname)+' lineno: '+str(exc_tb.tb_lineno)+' exc_type '+str(exc_type)+' '+e.message)
finally:
self.process_finally(request, orig_log_level)
total = datetime.datetime.now() - now
logger.info("error performance_data", extra=log_json(self.req_context,
{"type": "LAMBDA", "milliseconds": int(total.total_seconds() * 1000)}))
error_code, json_error = Util.json_error_response(self.req_context, settings.ERR_MSG_CLASS, error)
if settings.FRONT_WEB:
return HttpResponseRedirect("/" + str(error_code))
return HttpResponse(json_error, status=error_code, content_type='application/json')
def process_finally(self, request, orig_log_level):
"""
:param request:
:param orig_log_level:
"""
if Util.isDebugEnabled(self.req_context, request):
if logger.getEffectiveLevel() != orig_log_level:
logger.setLevel(orig_log_level)
logger.info("process_finally - back to orig:" + str(orig_log_level),
extra=log_json(self.req_context))
def get(self, request, format=None):
"""
:param request:
:param format:
:return:
"""
vars = {}
return self.do_process(request, HTTPChoice.get, vars, format)
def post(self, request, format=None):
"""
:param request:
:param format:
:return:
"""
vars = {}
return self.do_process(request, HTTPChoice.post, vars, format)
def put(self, request, format=None):
"""
:param request:
:param format:
:return:
"""
vars = {}
return self.do_process(request, HTTPChoice.put, vars, format)
def patch(self, request, format=None):
"""
:param request:
:param format:
:return:
"""
vars = {}
return self.do_process(request, HTTPChoice.patch, vars, format)
def delete(self, request, format=None):
"""
:param request:
:param format:
:return:
"""
vars = {}
return self.do_process(request, HTTPChoice.delete, vars, format)
def process(self,request,typer,vars):
"""
Return a list of all users.
:param request:
:param typer:
:param vars:
:return:
"""
if typer == HTTPChoice.get:
return self.process_get(request,vars)
if typer == HTTPChoice.post:
return self.process_post(request,vars)
if typer == HTTPChoice.put:
return self.process_put(request,vars)
if typer == HTTPChoice.patch:
return self.process_patch(request, vars)
if typer == HTTPChoice.delete:
return self.process_delete(request,vars)
return HttpResponse('this is a '+str(typer)+' on '+self.get_view_name())
def process_get(self,request,vars):
"""
:param request:
:param vars:
:return:
"""
return HttpResponse('this is process get on '+self.get_view_name())
def process_post(self,request,vars):
"""
:param request:
:param vars:
:return:
"""
return HttpResponse('this is process post on '+self.get_view_name())
def process_put(self,request,vars):
"""
:param request:
:param vars:
:return:
"""
return HttpResponse('this is process put on '+self.get_view_name())
def process_patch(self, request, vars):
"""
:param request:
:param vars:
:return:
"""
return HttpResponse('this is process patch on ' + self.get_view_name())
def process_delete(self,request,vars):
return HttpResponse('this is process delete on '+self.get_view_name())
def get_the_template(self, request,html):
"""
:param request:
:param html:
:return:
"""
return loader.get_template(html)
def get_template(self, request):
"""
get the proper template
:param request:
:return:
"""
if Util.mobile(request):
t = loader.get_template(self.the_html)
the_mobile_web = self.the_tag
else:
t = loader.get_template(self.other_html)
the_mobile_web = self.other_tag
return t, the_mobile_web
def get_client_ip(self,request):
"""
:param request:
:return:
"""
ip = request.META.get('REMOTE_ADDR')
logger.debug("get_client_ip: " + str(ip), extra=log_json(self.req_context))
return ip
def get_jwt(self, request):
"""
:param request:
:return:
"""
ip = self.get_client_ip(request)
encoded_token = jwt.encode({'ip': ip}, settings.SECRET_JWT_KEY, algorithm ='HS256')
return encoded_token
def check_jwt(self, request):#return true if token matches
ip = self.get_client_ip(request)
encoded_token = request.GET.get('jwt',None)
if not encoded_token:
return False
decoded_token = jwt.decode(encoded_token, settings.SECRET_JWT_KEY, algorithm ='HS256')
return ip == decoded_token['ip']
def get_jwt_str(self, request):
return '&jwt=' + self.get_jwt(request).decode()
from .mixin import PerfMixin
class PerfLink(PerfMixin, AbsBaseLink):
permission_classes = (permissions.AllowAny,)
##################################### test ##########################
from .mixin import TestMixin
class TestLink(TestMixin, AbsBaseLink):
permission_classes = (permissions.AllowAny,)
|
def up(config, conn, semester, course):
with conn.cursor() as cursor:
cursor.execute("ALTER TABLE gradeable_component_data ADD COLUMN gcd_verifier_id VARCHAR(255)")
cursor.execute("ALTER TABLE gradeable_component_data ADD COLUMN gcd_verify_time TIMESTAMP")
cursor.execute("ALTER TABLE gradeable_component_data ADD CONSTRAINT gradeable_component_data_verifier_id_fkey FOREIGN KEY (gcd_verifier_id) REFERENCES users(user_id)")
def down(config, conn, semester, course):
with conn.cursor() as cursor:
cursor.execute("ALTER TABLE ONLY gradeable_component_data DROP COLUMN gcd_verifier_id")
cursor.execute("ALTER TABLE ONLY gradeable_component_data DROP COLUMN gcd_verify_time")
|
import json
import os
import subprocess
import boto3
import spotipy
from sklearn.neighbors import NearestNeighbors
from spotipy.oauth2 import SpotifyClientCredentials
from infiniteremixer.data.createdataset import _create_data_pipeline
from infiniteremixer.remix.generateremix import _create_objects
from infiniteremixer.segmentation.segmentextractor import SegmentExtractor
from infiniteremixer.utils.io import load_from_pickle, save_to_pickle, write_wav
# PATHS
AUDIOS_PATH = "results/audios"
SEGMENTS_PATH = "results/segmented"
DATASET_PATH = "results/dataset"
NEAREST_NEIGHBOUR_PATH = "results/model/model.pkl"
REMIX_PATH = "results/remix/remix.wav"
SAMPLE_RATE = 22050
# PARAMETERS FOR GENERATION
JUMP_RATE = 0.005
NUM_BEATS = 60
# SPOTIFY CREDENTIALS (setted based on darko api and db)
os.environ["SPOTIPY_CLIENT_ID"] = "3e97af2ef40f4abdb8804a4cf480dee2"
os.environ["SPOTIPY_CLIENT_SECRET"] = "59ac0311c2cb463bb035c67258aa4ac1"
sp = spotipy.Spotify(auth_manager=SpotifyClientCredentials())
# AWS CONFIG
boto_session = boto3.session.Session(profile_name="mfa")
sqs = boto_session.client("sqs")
# THIS WILL BE CHANGED BY CLOOUDFORMATION OUTPUT SQS URL
QUEUE_URL = "https://sqs.us-east-1.amazonaws.com/315217542954/testQueue"
def parse_spotify_url(url):
"""
Parse the provided Spotify playlist URL and determine if it is a playlist, track or album.
:param url: URL to be parsed
:return tuple indicating the type and id of the item
"""
parsed_url = url.replace("https://open.spotify.com/", "")
item_type = parsed_url.split("/")[0]
item_id = parsed_url.split("/")[1]
return item_type, item_id
# ****************************************************************
# Simulate sending from api
message = json.dumps(
{
"playlist": "https://open.spotify.com/playlist/4ooacn2V0kwAcWkxDosuYr?si=64adb5ad66194816"
}
)
response = sqs.send_message(
QueueUrl=QUEUE_URL,
MessageBody=message,
)
# ****************************************************************
# Retrieve message
response = sqs.receive_message(
QueueUrl=QUEUE_URL,
MaxNumberOfMessages=1,
)
# Download songs given the URL
# Delete message after processing
message = response.get("Messages")
print(message)
if message is not None:
# Get the playlist uri
playlist_uri = json.loads(message[0].get("Body")).get("playlist")
print(playlist_uri)
# Download the songs
output = subprocess.run(
["spotify_dl", "-l", playlist_uri, "-o", AUDIOS_PATH],
stdout=subprocess.PIPE,
text=True,
)
print(output.returncode)
_, item_id = parse_spotify_url(playlist_uri)
playlist_name = sp.playlist(playlist_id=item_id, fields="name").get("name")
# Clean the files that aren't mp3 files
PLAYLIST_TRACKS_PATH = f"{AUDIOS_PATH}/{playlist_name}"
not_audio_files = [
i for i in os.listdir(PLAYLIST_TRACKS_PATH) if not i.endswith("mp3")
]
for f in not_audio_files:
output = subprocess.run(
["rm", f"{PLAYLIST_TRACKS_PATH}/{f}"], stdout=subprocess.PIPE, text=True
)
# Segment
segment_extractor = SegmentExtractor(SAMPLE_RATE)
segment_extractor.create_and_save_segments(
PLAYLIST_TRACKS_PATH,
SEGMENTS_PATH,
)
# Extract and aggreagate features
data_pipeline = _create_data_pipeline()
data_pipeline.process(
SEGMENTS_PATH,
DATASET_PATH,
)
# Fit nearest neighbors
dataset = load_from_pickle(f"{DATASET_PATH}/dataset.pkl")
print(f"Loaded dataset from {DATASET_PATH}/dataset.pkl")
print(f"Dataset array has shape {dataset.shape}")
nearest_neighbour = NearestNeighbors()
nearest_neighbour.fit(dataset)
print("Created nearest neighbour")
save_to_pickle(NEAREST_NEIGHBOUR_PATH, nearest_neighbour)
print(f"Saved nearest neighbour model to {NEAREST_NEIGHBOUR_PATH}")
# Generate remix
remixer, chunk_merger = _create_objects(JUMP_RATE, NUM_BEATS)
remix = remixer.generate_remix()
print(f"Generated remix with {NUM_BEATS} beats")
audio_remix = chunk_merger.concatenate(remix.file_paths)
print(f"Merged beats together")
write_wav(REMIX_PATH, audio_remix, SAMPLE_RATE)
print(f"Saved new remix to {SAMPLE_RATE}")
|
import os
from PIL import Image
import array as np
pngs=[]
labels=[]
count = 0
for filename in os.listdir("./pymol/"):
if filename.endswith(".png"):
img = Image.open("./pymol/"+filename)
arr = np.array(img)
pngs.append(arr)
labels.append(int(filename[0]))
labels=np.array(labels)
def OneHot(T):
targetType=list(set(T.flatten()))
oneHotT=np.zeros((T.shape[0]), len(targetType))
for i in range(T.shape[0]):
oneHotT[i,int(T[i])]=1
return oneHotT
x_train=np.array(png)
t_train=OneHot(labels)
|
# coding: utf-8
import asyncio
import tkinter as tk
import tkinter.ttk as ttk
from tkinter import filedialog
import ffmpeg_helper
import subtitle_helper
from utils import async_callback, get_ext
class STSearchApp(tk.Tk):
def __init__(self, interval=1/60):
super().__init__()
self.loop = asyncio.get_event_loop()
self.tasks = []
self.protocol('WM_DELETE_WINDOW', self.close)
self.loop.create_task(self.updater(interval))
self.subtitles = []
self.create_widgets()
self.cancel_event = asyncio.Event()
async def updater(self, interval):
while True:
self.update()
await asyncio.sleep(interval)
def cancel(self):
self.cancel_event.set()
self.cancel_event = asyncio.Event()
@async_callback
async def ask_extract_files(self):
files = filedialog.askopenfilenames()
if files and len(files):
for name in files:
print('loading ' + name + '...')
tmp = await ffmpeg_helper.load_subtitles(file_input=name)
for subtitle in tmp:
self.subtitles.append(subtitle_helper.Subtitle(
start=subtitle.start,
end=subtitle.end,
text=subtitle.text,
video=name
))
print('loaded {} subtitles'.format(len(self.subtitles)))
def clear_subtitles(self):
self.subtitles = []
def search(self):
# TODO: make search support multiple words
self.audio = []
self.results_box.delete(0, tk.END)
self.results = list(subtitle_helper.search(self.subtitles, self.search_box.get()))
for result in self.results:
self.results_box.insert(tk.END, subtitle_helper.to_string(result))
self.audio.append(False)
@async_callback
async def load_audio(self):
idx = self.results_box.index(tk.ACTIVE)
if not self.audio[idx]:
r = self.results[idx]
start, end, video = r.start, r.end, r.video
self.audio[idx], _ = await ffmpeg_helper.trim(start, end, file_input=video, format=get_ext(video), format_out='matroska')
print('loaded ' + video)
@async_callback
async def play_audio(self):
idx = self.results_box.index(tk.ACTIVE)
if not self.audio[idx]:
print('Audio not loaded yet!')
return
sound, err = await ffmpeg_helper.trim(self.start_scale.get(), self.end_scale.get(), input=self.audio[idx], format='matroska', format_out='wav')
await ffmpeg_helper.play_sound(input=sound, format='wav', event=self.cancel_event)
@async_callback
async def export_audio(self):
idx = self.results_box.index(tk.ACTIVE)
if not self.audio[idx]:
print('Audio not loaded yet!')
return
out, err = await ffmpeg_helper.trim(self.start_scale.get(), self.end_scale.get(), input=self.audio[idx], format='matroska')
n = filedialog.asksaveasfilename(filetypes=(('.mka files', '*.mka'),))
if n is not None and len(n) > 0:
if not n.endswith('.mka'):
n = n + '.mka'
with open(n, 'wb') as f:
f.write(out)
def create_extract_tab(self):
tab = tk.Frame(master=self, bd=1, relief=tk.GROOVE)
tab.grid()
ttk.Label(master=tab, text='Extract').grid()
e = ttk.Button(master=tab, text='Extract from files', command=self.ask_extract_files)
e.grid(row=1, column=1, padx=10, pady=10)
c = ttk.Button(master=tab, text='Clear', command=self.clear_subtitles)
c.grid(row=1, column=2, padx=10, pady=10)
return tab
def create_search_tab(self):
tab = tk.Frame(master=self, bd=1, relief=tk.GROOVE)
tab.grid()
ttk.Label(master=tab, text='Search').grid()
self.search_box = ttk.Entry(master=tab)
self.search_box.grid(row=1, column=1, padx=5, pady=10)
b = ttk.Button(master=tab, text='Go', command=self.search)
b.grid(row=1, column=2, padx=5, pady=10)
self.results_box = tk.Listbox(master=tab)
self.results_box.grid(row=2, column=1, padx=5, pady=10)
return tab
def create_audio_tab(self):
tab = tk.Frame(master=self, bd=1, relief=tk.GROOVE)
tab.grid()
ttk.Label(master=tab, text='Clip').grid()
ttk.Button(master=tab, text='Load audio segment', command=self.load_audio).grid(row=1, column=1, padx=10, pady=10)
ttk.Button(master=tab, text='Play clip', command=self.play_audio).grid(row=2, column=1, padx=10, pady=10)
ttk.Button(master=tab, text='Stop', command=self.cancel).grid(row=3, column=1, padx=10, pady=10)
ttk.Button(master=tab, text='Export clip', command=self.export_audio).grid(row=4, column=1, padx=10, pady=10)
self.start_scale = ttk.Scale(master=tab)
self.start_scale.grid(row=5, column=1, padx=10, pady=10)
self.end_scale = ttk.Scale(master=tab)
self.end_scale.grid(row=6, column=1, padx=10, pady=10)
return tab
def create_widgets(self):
self.create_extract_tab().grid(row=0, column=0, padx=10, pady=10)
self.create_search_tab().grid(row=1, column=0, padx=10, pady=10)
self.create_audio_tab().grid(row=0, rowspan=2, column=1, padx=10, pady=10)
def close(self):
self.loop.stop()
self.destroy()
def update(self):
super().update()
if self.results_box.size() > 0:
idx = self.results_box.index(tk.ACTIVE)
if idx >= 0 and len(self.results) > idx:
s = self.results[idx]
self.start_scale.config(from_=0, to_=s.end-s.start)
self.end_scale.config(from_=0, to_=s.end-s.start)
if __name__ == '__main__':
app = STSearchApp()
app.loop.run_forever()
app.loop.close() |
from parser import DocumentParser
from store import DocumentStoreFactory, TermStoreFactory
from index import IndexFactory
from parser import TextParser
url1 = "https://stackoverflow.com/questions/9626535/get-domain-name-from-url"
text1 = "Extracting domain from URL in python"
text2 = "How to Get Domain Name from URL String domain in Python"
text3 = "How to automatically extract domain from URL through conf files at search-time"
url3 = "https://answers.splunk.com/answers/188774/how-to-automatically-extract-domain-from-url-throu.html"
url2 = "https://ashiknesin.com"
doc1 = DocumentParser.parse_document(url1, text1)
doc2 = DocumentParser.parse_document(url2, text2)
doc3 = DocumentParser.parse_document(url3, text3)
doc_store = DocumentStoreFactory.get_store()
print(doc_store._data)
index = IndexFactory.get_or_create_index("default")
index.add_document(doc1)
index.add_document(doc2)
index.add_document(doc3)
index.display()
from query import QueryEvaluator
qeval = QueryEvaluator(IndexFactory, TermStoreFactory)
query = "extracting domain"
docs = qeval.evaluate_phrase(query)
print("PHRASE QUERY RESULT")
print(docs)
|
from setuptools import setup
from setuptools import find_packages
setup(
name='image_quality',
version='0.1.0',
description='image quality assessment',
url='https://github.com/welcotravel/image-quality-assessment',
author='victusfate',
author_email='[email protected]',
license='Apache License 2.0',
packages=find_packages(),
install_requires = [
'tensorflow==2.4.*',
'nose==1.3.*',
'scikit-learn==0.24.*',
'Pillow==7.2.*',
'Flask==1.0.*',
'python-consul==1.1.*',
'requests>=2.22.0,<3.0.0'
],
dependency_links = [
'git+https://github.com/victusfate/rsyslog_cee.git@main#egg=rsyslog_cee'
'git+https://github.com/victusfate/bandolier.git@main#egg=bandolier'
],
zip_safe=False
)
|
from summary import FrequencySummarizer
import urllib.request,re , html,sys,getopt
summarize= FrequencySummarizer()
def main(argv):
username = ''
date = ''
try:
opts, args = getopt.getopt(argv,"hu:d:",["username=","date="])
except getopt.GetoptError:
print("test.py -u <inputfile> -d <outputfile>")
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print("test.py -u <inputfile> -d <outputfile>")
sys.exit()
elif opt in ("-u", "--username"):
username = arg
elif opt in ("-d", "--date"):
date = arg
return username,date
def countDot(word):
dot=word.count(".")
if dot >= 6:
return 2
return 1
user=""
prev=""
if __name__ == "__main__":
user,prev=main(sys.argv[1:])
if user=="" or prev=="":
user="sbyudhoyono"
prev=" 12 31"
url = "https://tweets-grabber.000webhostapp.com/grab.php?username="+user
x = urllib.request.urlopen(url)
raw_data = x.read()
twit=str(raw_data).split("<br>")
tweets=[]
for tw in twit[2:]:
tw=re.sub(r'\\x[a-zA-Z0-9]{2}','',tw)
if "followe" in tw:
continue
tw=tw.replace("\\n"," ")
rawa=tw.split("||")
if rawa[0]==prev:
tweets.append(rawa[1])
dummy4=". ".join(tweets)
dummy4=re.sub(r'\.+','.',dummy4)
dummy4=html.unescape(dummy4)
print(dummy4)
summ=summarize.summarize(dummy4,countDot(dummy4))
print(summ)
|
import numpy as np
import pyroms
import netCDF4
from mpl_toolkits.basemap import pyproj
from pyroms_toolbox.Grid_HYCOM import Grid_HYCOM
def get_nc_Grid_HYCOM(grdfile, name='GLBa0.08_NEP'):
"""
grd = get_nc_Grid_HYCOM(grdfile)
Load grid object for HYCOM_GLBa0.08_NEP
"""
nc = netCDF4.Dataset(grdfile)
lon = nc.variables['lon'][:]
lat = nc.variables['lat'][:]
depth = nc.variables['z'][:]
var = nc.variables['temp'][0,:,1:-1,1:-1]
nc.close()
lon_t = lon[1:-1,1:-1]
lat_t = lat[1:-1,1:-1]
lon_vert = 0.5 * (lon[:,1:] + lon[:,:-1])
lon_vert = 0.5 * (lon_vert[1:,:] + lon_vert[:-1,:])
lat_vert = 0.5 * (lat[1:,:] + lat[:-1,:])
lat_vert = 0.5 * (lat_vert[:,1:] + lat_vert[:,:-1])
mask_t = np.array(~var[:].mask, dtype='int')
z_t = np.tile(depth,(mask_t.shape[2],mask_t.shape[1],1)).T
depth_bnds = np.zeros(len(depth)+1)
for i in range(1,len(depth)):
depth_bnds[i] = 0.5 * (depth[i-1] + depth[i])
depth_bnds[-1] = 5750
bottom = pyroms.utility.get_bottom(var[::-1,:,:], mask_t[0], spval=var.fill_value)
nlev = len(depth)
bottom = (nlev-1) - bottom
h = np.zeros(mask_t[0,:].shape)
for i in range(mask_t[0,:].shape[1]):
for j in range(mask_t[0,:].shape[0]):
if mask_t[0,j,i] == 1:
h[j,i] = depth_bnds[bottom[j,i]+1]
geod = pyproj.Geod(ellps='WGS84')
az_forward, az_back, dx = geod.inv(lon_vert[:,:-1], lat_vert[:,:-1], lon_vert[:,1:], lat_vert[:,1:])
angle = 0.5 * (az_forward[1:,:] + az_forward[:-1,:])
angle = (90 - angle) * np.pi/180.
return Grid_HYCOM(lon_t, lat_t, lon_vert, lat_vert, mask_t, z_t, h, angle, name)
|
"""
MIT License
Copyright (c) 2021 AlphaMusicRoBo
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import asyncio
from pyrogram import Client, idle, filters
import os
from config import Config
from utils import um, USERNAME, FFMPEG_PROCESSES
from pyrogram.raw import functions, types
import os
import sys
from threading import Thread
from signal import SIGINT
import subprocess
CHAT=Config.CHAT
AlphaMusic = Client(
"AlphaMusic",
Config.API_ID,
Config.API_HASH,
bot_token=Config.BOT_TOKEN,
plugins=dict(root="plugins")
)
if not os.path.isdir("./downloads"):
os.makedirs("./downloads")
def stop_and_restart():
AlphaMusic.stop()
os.system("git pull")
os.execl(sys.executable, sys.executable, *sys.argv)
AlphaMusic.start()
@AlphaMusic.on_message(filters.command(["restart", f"restart@{USERNAME}"]) & filters.user(Config.ADMINS) & (filters.chat(CHAT) | filters.private))
async def restart(client, message):
await message.reply_text("🔄 Wi8, actualización y reinicio del bot...")
await asyncio.sleep(3)
try:
await message.delete()
except:
pass
process = FFMPEG_PROCESSES.get(CHAT)
if process:
try:
process.send_signal(SIGINT)
except subprocess.TimeoutExpired:
process.kill()
except Exception as e:
print(e)
pass
FFMPEG_PROCESSES[CHAT] = ""
Thread(
target=stop_and_restart
).start()
AlphaMusic.send(
functions.bots.SetBotCommands(
commands=[
types.BotCommand(
command="start",
description="Comprueba si el bot está vivo"
),
types.BotCommand(
command="help",
description="Muestra mensaje de ayuda"
),
types.BotCommand(
command="play",
description="Reproducir canción de youtube/archivo de audio"
),
types.BotCommand(
command="dplay",
description="Reproducir canción de Deezer"
),
types.BotCommand(
command="player",
description="Muestra la canción que se está reproduciendo actualmente con controles"
),
types.BotCommand(
command="playlist",
description="Muestra la lista de reproducción"
),
types.BotCommand(
command="skip",
description="Omitir la canción actual"
),
types.BotCommand(
command="join",
description="Únete a VC."
),
types.BotCommand(
command="leave",
description="Salir de VC"
),
types.BotCommand(
command="vc",
description="Compruebe si la máquina virtual está unida"
),
types.BotCommand(
command="stop",
description="Deja de reproducir"
),
types.BotCommand(
command="radio",
description="Iniciar radio / transmisión en vivo"
),
types.BotCommand(
command="stopradio",
description="Detiene la radio / transmisión en vivo"
),
types.BotCommand(
command="replay",
description="Repetir desde el principio"
),
types.BotCommand(
command="clean",
description="Limpia archivos RAW"
),
types.BotCommand(
command="pause",
description="Pausa la canción"
),
types.BotCommand(
command="resume",
description="Reanudar la canción pausada"
),
types.BotCommand(
command="mute",
description="Mudo en VC"
),
types.BotCommand(
command="volume",
description="Establecer volumen entre 0-200"
),
types.BotCommand(
command="unmute",
description="Activar sonido en VC"
),
types.BotCommand(
command="restart",
description="Actualiza y reinicia el bot"
)
]
)
)
idle()
AlphaMusic.stop()
|
import matplotlib
matplotlib.use('Agg')
from argparse import ArgumentParser
import matplotlib.pyplot as plt
import numpy as np
from multihist import Hist1d
from blueice.inference import bestfit_scipy
from LowER.sciencerun import SR1
import LowER.stats as lstat
from LowER.signals import SolarAxion
from tqdm import tqdm
def bestfit():
"""Second example in osg_tutorial"""
# this is setting up LowER likelihood
# there is no partitioning here -- just a single SR1 likelihood
print("Setting up the likelihood...")
sr1 = SR1()
# axion signal model. this is ABC-only
A = SolarAxion(1e-3, gae=3.5e-12)
# initialize the likelihood
lf = lstat.sciencerun_likelihood(sr1, A)
print("Simulating dataset")
# simulate a fake dataset
data = lf.base_model.simulate()
# feed the data to the likelihood
lf.set_data(data)
# now do a fit.
# we use a convenience function that get best-fit, null-fit, and likelihood ratio b/t the two
llr, bestfit, nullfit = lstat.ll_ratio(lf, 'solar_axion')
# get significance of this sim using Wilk's theorem
print("Background model rejected at %0.2f sigma" % (llr**0.5))
# create histogram objects from best/null fit results
bestfit_hist = lstat.get_fit(lf, bestfit)
nullfit_hist = lstat.get_fit(lf, nullfit)
# plot the data, best-fit, and null-fit
h = Hist1d(data['ces'], bins=np.linspace(0, 30, 31))
f = plt.figure()
h.plot(errors=True, label='sim data')
bestfit_hist.plot(label='best-fit')
nullfit_hist.plot(label='null-fit')
plt.xlim(0, 30)
plt.ylim(0, max(h.histogram) + 20)
plt.xlabel("Energy [keV]")
plt.ylabel('events/keV')
plt.legend()
plt.title("Simulating axions on OSG")
plt.savefig('my_axion_fit.png', dpi=300)
def upper_limit(index=None):
"""Second example in osg_tutorial
:arg index: If passed, tags an index onto the output filename
"""
# this is setting up LowER likelihood
# there is no partitioning here -- just a single SR1 likelihood
print("Setting up the likelihood...")
sr1 = SR1()
# remove a few sources, this helps the sims go faster
for source in ['kr83m', 'xe131m', 'i125', 'dec']:
sr1.remove_source(source)
# axion signal model. this is ABC-only
A = SolarAxion(1e-3, gae=3.5e-12)
# initialize the likelihood
lf = lstat.sciencerun_likelihood(sr1, A)
nsims = 20
limits = np.zeros(nsims)
print("Simulating datasets")
for i in tqdm(range(nsims)):
# simulate a fake dataset, with axion signal = 0
# for a background-only simulation
data = lf.base_model.simulate({'solar_axion': 0})
# feed the data to the likelihood
lf.set_data(data)
# get upper limit on the axion rate multiplier parameter. this assumes Wilk's theorem.
limits[i] = lf.one_parameter_interval('solar_axion_rate_multiplier', 100, bestfit_routine=bestfit_scipy,
minimize_kwargs=lstat.minimize_kwargs)
# convert limits back to limit on the axion coupling
glimits = A.convert_limit(limits)
output_file = "limits.npy"
if index is not None:
output_file = output_file.replace('.npy', '_%d.npy' % index)
np.save(output_file, glimits)
print("Output saved to %s" % output_file)
def main():
parser = ArgumentParser()
parser.add_argument('example', type=int)
parser.add_argument('--index', type=int)
args = parser.parse_args()
if args.example == 1:
bestfit()
elif args.example == 2:
upper_limit(args.index)
if __name__ == "__main__":
main()
|
import time
import picamerax
import picamerax.array
with picamerax.PiCamera() as camera:
with picamerax.array.PiYUVArray(camera) as stream:
camera.resolution = (100, 100)
camera.start_preview()
time.sleep(2)
camera.capture(stream, 'yuv')
# Show size of YUV data
print(stream.array.shape)
# Show size of RGB converted data
print(stream.rgb_array.shape)
|
#!/usr/bin/env python3
import numpy as np
from itypes import Path, File
import torch
print()
def test_read(file, **kwargs):
data = File(f"../data/{file}").read(**kwargs)
cmd = f"File('../data/{file}').read({kwargs}):"
print(f"{cmd:80s} shape={data.shape}, dtype={data.dtype}, min={data.min()}, max={data.max()}, device={data.device}")
def test_read_write(file, **kwargs):
data = File(f"../data/{file}").read(**kwargs)
cmd = f"File('../data/{file}').read({kwargs}):"
print(f"{cmd:80s} shape={data.shape}, dtype={data.dtype}, min={data.min()}, max={data.max()}, device={data.device}")
cmd = f"After write:"
File(f"out_read_write_torch/{file}").write(data)
data = File(f"out_read_write_torch/{file}").read(device=kwargs["device"])
print(f"{cmd:80s} shape={data.shape}, dtype={data.dtype}, min={data.min()}, max={data.max()}, device={data.device}")
test_read('test-rgb.png', device=torch.device("cpu"))
test_read('test-rgb.png', device=torch.device("cuda:0"))
print()
test_read_write('test-rgb.png', device=torch.device("cpu"))
test_read_write('test-rgb.png', device=torch.device("cuda:0"))
print()
Path("out_read_write_torch").remove() |
from PyQt5 import QtWidgets, QtCore
from PyQt5.QtCore import *
from PyQt5.QtWidgets import QSlider,QLabel
class MySlider(QSlider):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.updateBoth=None
def setRangeAndLinkLabel(self, sliderMax, minRange, maxRange, label:QLabel):
self.setTickInterval(1)
self.setRange(0,sliderMax)
self.setValue(sliderMax/2)
self.sliderMax=sliderMax
self.minRange=minRange
self.maxRange=maxRange
self.label=label
self.valueChanged.connect(self.updateLabel)
self.updateLabel()
#def setSuddenUpdate
def getContinuousValue(self):
return self.minRange+(self.value()/self.sliderMax)*(self.maxRange-self.minRange)
def updateLabel(self):
v=self.getContinuousValue()
self.label.setText('{0:.3f}'.format(v))
if(self.updateBoth is not None):
self.updateBoth()
def setContinuousValue(self,v):
self.valueChanged.disconnect(self.updateLabel)
tmp=int((v-self.minRange)/(self.maxRange-self.minRange)*self.sliderMax)
tmp=max(0,min(tmp,self.sliderMax))
self.setValue(tmp)
v=self.getContinuousValue()
self.label.setText('{0:.3f}'.format(v))
self.valueChanged.connect(self.updateLabel)
class MySliderForRadius(QSlider):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def setRangeAndLinkLabel(self, sliderMax, minRange, maxRange, label:QLabel):
self.setTickInterval(1)
self.setRange(0,sliderMax)
self.setValue(sliderMax/2)
self.sliderMax=sliderMax
self.minRange=minRange
self.maxRange=maxRange
self.label=label
self.valueChanged.connect(self.updateLabel)
self.updateLabel()
def getContinuousValue(self):
return self.minRange+(self.value()/self.sliderMax)*(self.maxRange-self.minRange)
def updateLabel(self):
v=self.getContinuousValue()
self.label.setText('{0:.3f}'.format(v))
def setContinuousValue(self,v):
tmp=int((v-self.minRange)/(self.maxRange-self.minRange)*self.sliderMax)
tmp=max(0,min(tmp,self.sliderMax))
self.setValue(tmp)
|
from .biotables_tailor import (aa2codon_table, codon2aa_table, default_cai_table,
default_tai_tuller, sense_codons_list)
from random import choice
from math import log,exp
def analyzeCodons(seq, positions=None, data_table=default_cai_table):
'''
given a sequence it returns a list with two elements: [ list_of_codons, list_of_codons_cai]
seq (str):
input dna sequence
positions
Usage:
--
>>> analyzeCodons("ATGCAGTAGCAGTGCAAGTTG")
[['atg', 'cag', 'tag', 'cag', 'tgc', 'aag', 'ttg'], [1, 1, 1, 1, 1, 0.253, 0.02]]
>>> analyzeCodons("ATGCAGTAGCAGTGCAAGTTG",[0,3,6])
[['atg', 'cag', 'tag'], [1, 1, 1]]
'''
if positions == None:
positions = range(0, len(seq), 3)
seq = seq.lower()
codons = []
codons_cai = []
for i in positions:
codon = seq[i:i + 3]
codons.append(codon)
if codon in data_table:
codons_cai.append(data_table[codon])
else:
codons_cai.append("NA")
return [codons, codons_cai]
def get_alternate_codons(codon, data=default_tai_tuller, dist=0):
"""
returns a alternate codon to codon
data:
dictionary with a map between codons and tAI
dist:
0 --> only synonymous codon
1-3 --> only codon with 1-3 nt difference from original
"""
if dist == 0:
# return only syn codon
return [(syn_cod, data[syn_cod])
for syn_cod in aa2codon_table[codon2aa_table[codon]]
if syn_cod != codon]
else:
def diff(str1, str2):
nbr = 0
for i in range(len(str1)):
if str1[i] != str2[i]:
nbr += 1
return nbr
# return syn codon and codon 1 nt away
return [(alt_cod, data[alt_cod]) for alt_cod in sense_codons_list
if (alt_cod != codon and diff(codon, alt_cod) <= dist)]
def randomMutation(nucleotide):
possible_mut = list(set('atcg') - set(nucleotide))
return choice(possible_mut)
def analyze_cai(seq,cai_table = default_cai_table):
seq = seq.lower()
score = 0
len_sq = 0
for i in range(0, len(seq), 3):
if seq[i:i + 3] in cai_table:
score += log(cai_table[seq[i:i + 3]])
len_sq += 1
score /= len_sq
return exp(score) |
# -*- coding: utf-8 -*-
"""
This module contains the Model class which is used to make querys on the
database.
:license: MIT, see LICENSE for details.
"""
import warnings
from cuttlepool import CuttlePool
LEGAL_COMPARISONS = [
'=',
'<=>',
'>',
'>=',
'<',
'<=',
'!=',
'<>',
]
LEGAL_CONDITIONS = [
'AND',
'&&',
'OR',
'||',
'XOR'
]
class Model(object):
"""
``Model`` represents a table. It is used for querying the database. It is
meant to be subclassed to create tables.
:param obj transaction: A ``Transaction`` object which will bundle all
executed SQL statements into one transaction.
:param bool validate_columns: Requires a validation check on all query
methods that pass columns as parameters.
If raise_error_on_validation is false, no
error will be raised, but the query method
will not modify ``query`` or ``values`` on
the object. Defaults to ``True``.
:param bool raise_error_on_validation: Requires that an error is raised
when a column fails validation.
Defaults to ``True``. If
validate_columns is false, no error
will be raised.
:raises TypeError: Error caused by instantiating Model.
"""
def __init__(self, transaction=None, validate_columns=True, raise_error_on_validation=True):
#: Holds the connection to the database.
self._connection = None
#: Holds a cursor to the database.
self._cursor = None
#: Holds query to be executed as a list of strings.
self._query = []
#: Holds values to be inserted into query when executed.
self._values = []
self._transaction = transaction
self.validate_columns = validate_columns
self.raise_error_on_validation = raise_error_on_validation
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def __iter__(self):
return self.cursor.__iter__()
@property
def name(self):
"""
Returns the table name which can be used for writing queries.
"""
return type(self).__name__.lower()
@property
def connection_arguments(self):
"""
Returns the connection arguments used by the underlying connection
driver.
"""
return self._pool.connection_arguments
@property
def connection(self):
"""
Returns a connection to the database. Gets a connection from the
connection pool if it doesn't already have one.
:note: Use :func:`~cuttle.home.Model.close` to close the connection.
:func:`~cuttle.home.Model.close` is not necessary if using the
``Model`` object as a context manager.
"""
try:
self._connection.ping()
except:
self._connection = self._pool.get_connection()
return self._connection
@property
def cursor(self):
"""
Returns a cursor to the database. The cursor must be closed explicitly
before a new one will be made.
:note: A connection will automatically be made to the database before
creating a cursor.
"""
if self._transaction is not None:
return self._transaction._cursor
if self._cursor is None or self._cursor.connection is None:
self._cursor = self.connection.cursor()
return self._cursor
@property
def query(self):
"""
Returns the current query string.
"""
return ' '.join(self._query)
@property
def values(self):
"""
Returns the values as a tuple.
"""
return tuple(self._values)
@property
def seq_of_values(self):
"""
Returns a sequence of values as tuples.
"""
return [tuple(v) for v in self._values]
@classmethod
def _configure(cls, sql_type, **kwargs):
"""
Configures the Model class to connect to the database.
:param str sql_type: The SQL implementation to use.
:param \**kwargs: Connection arguments to be used by the underlying
connection object.
:raises ValueError: If improper sql_type parameter.
"""
cls._sql_type = sql_type.lower()
if cls._sql_type == 'mysql':
import pymysql
connect = pymysql.connect
# add ping method to pool
class Pool(CuttlePool):
def normalize_connection(self, connection):
connection.cursorclass = pymysql.cursors.Cursor
def ping(self, connection):
try:
connection.ping()
except Exception:
pass
return connection.open
else:
msg = "Please choose a valid sql extension"
raise ValueError(msg)
cls._pool = Pool(connect, **kwargs)
def _create_table(self):
"""
Generates table schema.
:raises AttributeError: If table has multiple primary keys.
"""
create_tbl = []
create_tbl.append(
'CREATE TABLE IF NOT EXISTS {} (\n'.format(self.name))
for column in self.columns:
create_tbl.append(column._column_schema())
create_tbl[-1] = create_tbl[-1].replace(',', '')
create_tbl.append(')')
self.append_query(''.join(create_tbl))
return self
def select(self, *args):
"""
Adds a SELECT query on the table associated with the model. If no
arguments are supplied, all rows will be returned.
:param \*args: Columns to select for as strings. If no columns
provided, all columns will be selected.
"""
if args:
args = self.columns_lower(*args)
if self.check_columns(*args):
q = ['SELECT']
if args:
q.append(', '.join([c for c in args]))
else:
q.append('*')
q.append('FROM {}'.format(self.name))
self.append_query(' '.join(q))
return self
def insert(self, columns=[], values=[]):
"""
Adds an INSERT query on the table associated with the model.
:param list columns: The columns to insert values into.
:param list values: Values to be inserted into the table. They must be
in the same order as the columns. Also accepts a
list of lists/tuples which would be used with
:func:`~cuttle.model.Model.executemany`.
"""
if columns:
columns = self.columns_lower(*tuple(columns))
if self.check_columns(*columns):
q = ['INSERT INTO {}'.format(self.name)]
c = '({})'.format(', '.join(columns))
q.append(c)
q.append('VALUES')
holder = '({})'.format(
', '.join(['%s' for __ in range(len(columns))]))
q.append(holder)
self.append_query(' '.join(q))
self.extend_values(values)
return self
def update(self, **kwargs):
"""
Adds an UPDATE query on the table associated with the model.
:param dict \**kwargs: The values to be updated in the table where the
key is the column.
:raises ValueError: If no column value pairs passed in.
"""
if not kwargs:
raise ValueError('column value pairs required to update table')
kwargs = self.columns_lower(**kwargs)
if self.check_columns(*tuple(key for key in kwargs.keys())):
columns, values = [], []
for key, value in kwargs.items():
columns.append(key)
values.append(value)
q = ['UPDATE {} SET'.format(self.name)]
q.append(', '.join(['{}=%s'.format(column) for column in columns]))
self.append_query(' '.join(q))
self.extend_values(values)
return self
def delete(self):
"""
Adds a DELETE query on the table associated with the model.
"""
self.append_query('DELETE FROM {}'.format(self.name))
return self
def where(self, condition='AND', comparison='=', **kwargs):
"""
Adds a WHERE clause to the query. The WHERE clause checks for equality.
:param str condition: The conditional operator to use in the WHERE
clause.
:param str comparison: The comparison operator to use in the WHERE
clause.
:param \**kwargs: Key value pairs where the keys are the columns of the
table.
:raises ValueError: If condition or comparison operator is invalid. If
no column value pairs passed in.
"""
if not kwargs:
raise ValueError('column value pairs required for WHERE clause')
condition = condition.upper()
comparison = comparison.upper()
if condition not in LEGAL_CONDITIONS or comparison not in LEGAL_COMPARISONS:
raise ValueError(
'The conditional or comparison operator is not legal.')
kwargs = self.columns_lower(**kwargs)
if self.check_columns(*tuple(key for key in kwargs.keys())):
columns, values = [], []
for key, value in kwargs.items():
columns.append(key)
values.append(value)
q = []
if 'WHERE' in self.query:
q.append(condition)
else:
q.append('WHERE')
q.append(' {} '.format(condition).join(['{}{}%s'.format(column, comparison)
for column in columns]))
self.append_query(' '.join(q))
self.extend_values(values)
return self
def execute(self, commit=False):
"""
Executes the query and returns the results (if any). If a
:param bool commit: Will commit the executed statement if ``True``.
Defaults to ``False``.
:returns: The result of ``cursor.execute()``.
"""
result = self.cursor.execute(self.query, self.values)
self.reset_query()
if commit:
self.commit()
return result
def executemany(self, commit=False):
"""
Executes the query with multiple values and returns the results (if any).
:param bool commit: Will commit the executed statement if ``True``.
Defaults to ``False``.
:returns: The result of ``cursor.execute()``.
"""
result = self.cursor.executemany(self.query, self.seq_of_values)
self.reset_query()
if commit:
self.commit()
return result
def fetchone(self):
"""
Fetches the next row.
"""
return self.cursor.fetchone()
def fetchmany(self, size=None):
"""
Fetches ``size`` number of rows or all if ``size`` is ``None``.
:param int size: The number of rows to fetch. Defaults to ``None``.
"""
return self.cursor.fetchmany(size)
def fetchall(self):
"""
Fetches all the rows in the cursor.
"""
return self.cursor.fetchall()
def commit(self):
"""
Commits changes.
"""
self.connection.commit()
def rollback(self):
"""
Rolls back the current transaction.
"""
self.connection.rollback()
def append_query(self, query):
"""
Appends query string to current _query attribute.
:param str query: A SQL query string.
"""
self._query.append(query)
def extend_values(self, values):
"""
Extends _values with input values.
:param list values: Values to be inserted into the query.
"""
self._values.extend(values)
def columns_lower(self, *args, **kwargs):
"""
Converts columns to lowercase. Accepts both args and kwargs, but args
take precedence for conversion. If both are passed as arguments, only
converted args will be returned.
:param \*args: Column names.
:param \**kwargs: Pairs where the key is the column name.
:raises ValueError: If no argument(s) are passed to the function.
"""
if args:
return tuple(arg.lower() for arg in args)
elif kwargs:
return {key.lower(): value for key, value in kwargs.items()}
else:
raise ValueError("columns_lower must receive input of either args "
"or kwargs")
def check_columns(self, *args):
"""
Ensures columns exist on model before creating query string. Failing to
check columns can result in sql injection.
:param \*args: Columns to be checked against model.
:raises ValueError: If parameters are not columns on model.
"""
column_names = set(col._attributes['name'].lower()
for col in self.columns)
failed_columns = set(arg.lower() for arg in args) - column_names
if self.validate_columns and failed_columns:
msg = ('Columns {} were not found on the Model. Be wary of SQL '
'injection.').format(failed_columns)
if self.raise_error_on_validation:
raise ValueError(msg)
else:
warnings.warn(msg)
return False
return True
def reset_query(self):
"""
Resets query and values property on model.
"""
self._query = []
self._values = []
def close(self):
"""
Closes the database connection and cursor, if any.
:note: If model is instantiated outside of a with block, it is
recommended to explicitly call ``close()``.
"""
self._close_connection()
def _close_cursor(self):
"""
Close the cursor, if any.
"""
try:
self._cursor.close()
except Exception:
pass
finally:
self._cursor = None
def _close_connection(self):
"""
Close the connection, if any.
"""
self._close_cursor()
try:
self._connection.close()
except Exception:
pass
finally:
self._connection = None
|
import numpy as np
def is_multilabel(df, col):
'''check if a column contains multilabels
Args:
df (pd.DataFrame): Dataframe to use for filtering
col (str): column to check for multilabel
Returns:
bool: True if column contains multilabel, False if not
'''
if isinstance(df[col].iloc[0], (list, np.ndarray)):
return True
else:
return False
def remove_empty(df, col='anno_data'):
'''Remove images with empty columns in specified column
Args:
df (pd.DataFrame): Dataframe to use for filtering
col (str): column to seatch for empty entries
Returns:
pd.DataFrame
'''
return df[df[col].notnull()]
def unique_labels(df, col='anno_lbl'):
'''Get unique dataset labels.
Args:
df (pd.DataFrame): dataframe to analyze
col (str): Column containing list of labels
Retrun:
str: List of strings with unique classnames
'''
if is_multilabel(df=df, col=col):
return np.unique(df[col].map(list).sum())
return list(df[col].unique())
def selection_mask(labels, df, col='anno_lbl'):
'''Get mask for labels in dataset
Args:
df (pd.DataFrame): dataset to mask
col (str): Column containing list of labels
Returns:
pd.DataFrame: boolean mask defining which row contains one of the
provided labels
'''
if not isinstance(labels, (list, np.ndarray)):
labels = [labels]
if is_multilabel(df, col):
return df[col].apply(lambda x:
bool(sum([l in list(x) for l in labels])))
return df[col].apply(lambda x: x in labels)
def label_selection(labels, df, col='anno_lbl'):
'''Get entries with a selection of labels from the dataframe
Args:
labels (list): list of labels to select
df (pd.DataFrame): Frame to apply label selection
col (str): Column containing list of labels
Returns:
pd.DataFrame: dataframe with label selection
'''
return df[selection_mask(df=df, labels=labels, col=col)]
def ignore_labels(labels, df, col='anno_lbl'):
''' Remove dataframe entries where the given labels occures
Args:
labels (list): list of labels to ignore
df (pd.DataFrame): Frame to apply label ignore
col (str): Column containing list of labels
Returns:
pd.DataFrame: dataframe with label selection
'''
return df[~selection_mask(df=df, labels=labels, col=col)]
def img_selection(imgs, df, invers=False):
'''Get entries with a selection of labels from the dataframe
Args:
imgs (list): list of imgs to select
invers (bool): get the selection if True, get the rest if False
df (pd.DataFrame): Frame to apply image selection
Returns:
pd.DataFrame: dataframe with image selection
'''
selection_mask = df.img_path.isin(imgs)
if invers:
selection_mask = ~selection_mask
return df[selection_mask] |
import importlib
import os
from ase.io import read
import mala
from mala.common.parameters import ParametersBase
import numpy as np
import pytest
from mala.datahandling.data_repo import data_repo_path
data_path = os.path.join(os.path.join(data_repo_path, "Be2"), "training_data")
# This test checks whether MALA interfaces to other codes, mainly the ASE
# calculator, still work.
# For the ASE calculator test, it's enough when the energies are roughly the
# same.
accuracy_coarse = 10
class TestInterfaces:
"""Tests MALA interfaces."""
def test_json(self):
"""
Test whether MALA JSON interface is still working.
Please note that this does not test whether all parameters are
correctly serializable, only the interface itself.
"""
params = mala.Parameters()
# Change a few parameter to see if anything is actually happening.
params.manual_seed = 2022
params.network.layer_sizes = [100, 100, 100]
params.network.layer_activations = ['test', 'test']
params.descriptors.rcutfac = 4.67637
# Save, load, compare.
params.save("interface_test.json")
new_params = params.load_from_file("interface_test.json")
for v in vars(params):
if isinstance(getattr(params, v), ParametersBase):
v_old = getattr(params, v)
v_new = getattr(new_params, v)
for subv in vars(v_old):
assert (getattr(v_new, subv) == getattr(v_old, subv))
else:
assert (getattr(new_params, v) == getattr(params, v))
@pytest.mark.skipif(importlib.util.find_spec("lammps") is None,
reason="LAMMPS is currently not part of the pipeline.")
def test_ase_calculator(self):
"""
Test whether the ASE calculator class can still be used.
This test tests for serial and energy calculation only.
Forces are still an experimental feature, so they are not included
here.
"""
####################
# PARAMETERS
####################
test_parameters = mala.Parameters()
test_parameters.data.data_splitting_type = "by_snapshot"
test_parameters.data.input_rescaling_type = "feature-wise-standard"
test_parameters.data.output_rescaling_type = "normal"
test_parameters.network.layer_activations = ["ReLU"]
test_parameters.running.max_number_epochs = 100
test_parameters.running.mini_batch_size = 40
test_parameters.running.learning_rate = 0.00001
test_parameters.running.trainingtype = "Adam"
test_parameters.targets.target_type = "LDOS"
test_parameters.targets.ldos_gridsize = 11
test_parameters.targets.ldos_gridspacing_ev = 2.5
test_parameters.targets.ldos_gridoffset_ev = -5
test_parameters.running.inference_data_grid = [18, 18, 27]
test_parameters.descriptors.descriptor_type = "SNAP"
test_parameters.descriptors.twojmax = 10
test_parameters.descriptors.rcutfac = 4.67637
test_parameters.targets.pseudopotential_path = os.path.join(
data_repo_path,
"Be2")
####################
# DATA
####################
data_handler = mala.DataHandler(test_parameters)
data_handler.add_snapshot("Be_snapshot1.in.npy", data_path,
"Be_snapshot1.out.npy", data_path, "tr")
data_handler.add_snapshot("Be_snapshot2.in.npy", data_path,
"Be_snapshot2.out.npy", data_path, "va")
data_handler.prepare_data()
####################
# NETWORK SETUP AND TRAINING.
####################
test_parameters.network.layer_sizes = [data_handler.get_input_dimension(),
100,
data_handler.get_output_dimension()]
# Setup network and trainer.
test_network = mala.Network(test_parameters)
test_trainer = mala.Trainer(test_parameters, test_network, data_handler)
test_trainer.train_network()
####################
# INTERFACING.
####################
# Set up the ASE objects.
atoms = read(os.path.join(data_path, "Be_snapshot1.out"))
calculator = mala.MALA(test_parameters, test_network,
data_handler,
reference_data=
["qe.out",
os.path.join(data_path,
"Be_snapshot1.out")])
total_energy_dft_calculation = calculator.data_handler.\
target_calculator.total_energy_dft_calculation
calculator.calculate(atoms, properties=["energy"])
assert np.isclose(total_energy_dft_calculation,
calculator.results["energy"],
atol=accuracy_coarse)
|
"""Testing utilities
"""
from textwrap import dedent
from xml.etree import ElementTree
import time
import datetime
import calendar
def to_utc(a_datetime):
timestamp = time.mktime(a_datetime.timetuple())
return datetime.datetime.utcfromtimestamp(timestamp)
def to_rfc3339(a_datetime):
utc_dt = to_utc(a_datetime)
ms = utc_dt.microsecond / 10000
return utc_dt.strftime('%Y-%m-%dT%H:%M:%S') + '.%02dZ' % ms
class SpreadsheetFeed(object):
"""A helper class for constructing XML spreadsheet feed responses.
:param updated_dt: The datetime at which there was an update to some
element of the spreadsheet feed.
:param dev_email: The @developer.gserviceaccount.com address used to
access the spreadsheets API.
"""
SPREADSHEET_FEED = dedent("""
<?xml version="1.0" encoding="UTF-8"?>
<ns0:feed xmlns:ns0="http://www.w3.org/2005/Atom"
xmlns:ns1="http://a9.com/-/spec/opensearchrss/1.0/">
<ns0:id>https://spreadsheets.google.com/feeds/spreadsheets/private/full</ns0:id>
<ns0:updated>{updated}</ns0:updated>
<ns0:category scheme="http://schemas.google.com/spreadsheets/2006"
term="http://schemas.google.com/spreadsheets/2006#spreadsheet" />
<ns0:title type="text">Available Spreadsheets - {dev_email}</ns0:title>
<ns0:link href="http://docs.google.com" rel="alternate" type="text/html" />
<ns0:link
href="https://spreadsheets.google.com/feeds/spreadsheets/private/full"
rel="http://schemas.google.com/g/2005#feed" type="application/atom+xml" />
<ns0:link
href="https://spreadsheets.google.com/feeds/spreadsheets/private/full"
rel="self" type="application/atom+xml" />
<ns1:totalResults>{num_results}</ns1:totalResults>
<ns1:startIndex>1</ns1:startIndex>
{entries}
</ns0:feed>
""").strip('\n')
ENTRY = dedent("""
<ns0:entry>
<ns0:id>https://spreadsheets.google.com/feeds/spreadsheets/private/full/{key}</ns0:id>
<ns0:updated>{updated}</ns0:updated>
<ns0:category scheme="http://schemas.google.com/spreadsheets/2006"
term="http://schemas.google.com/spreadsheets/2006#spreadsheet" />
<ns0:title type="text">{title}</ns0:title>
<ns0:content type="text">{title}</ns0:content>
<ns0:link
href="https://spreadsheets.google.com/feeds/worksheets/{key}/private/full"
rel="http://schemas.google.com/spreadsheets/2006#worksheetsfeed"
type="application/atom+xml" />
<ns0:link href="https://docs.google.com/spreadsheets/d/{key}/edit"
rel="alternate" type="text/html" />
<ns0:link
href="https://spreadsheets.google.com/feeds/spreadsheets/private/full/{key}"
rel="self" type="application/atom+xml" />
<ns0:author>
<ns0:name>{name}</ns0:name>
<ns0:email>{email}</ns0:email>
</ns0:author>
</ns0:entry>
""").strip('\n')
def __init__(self, updated_dt, dev_email):
self.updated = to_rfc3339(updated_dt)
self.dev_email = dev_email
self.entries = []
def add_entry(self, sheet_key, sheet_title, sheet_owner_name,
sheet_owner_email, updated_dt):
"""Adds a spreadsheet entry to the feed.
:param sheet_key: The unique spreadsheet key consisting of 44 Base64
characters.
:param sheet_title: The title of the sheet.
:param sheet_owner_name: The name of the sheet owner. This will be the
full name attached to the Google account.
:param sheet_owner_email: The email of the sheet owner. This will be
the email address attached to the Google account (will probably
end in @gmail.com).
:param updated_dt: The datetime at which the spreadsheet was last
updated.
"""
self.entries.append({
'key': sheet_key,
'title': sheet_title,
'updated': to_rfc3339(updated_dt),
'name': sheet_owner_name,
'email': sheet_owner_email,
})
def to_xml(self):
return ElementTree.fromstring(str(self))
def __str__(self):
entry_strs = [self.ENTRY.format(**entry_dict)
for entry_dict in self.entries]
return self.SPREADSHEET_FEED.format(**{
'updated': self.updated,
'dev_email': self.dev_email,
'num_results': len(self.entries),
'entries': '\n\n'.join(entry_strs),
})
class WorksheetFeed(object):
"""A helper class for constructing XML worksheet feed responses.
:param updated_dt: The datetime at which there was an update to some
element of the spreadsheet feed.
:param user_name: The name associated with the
@developer.gserviceaccount.com account used to access the spreadsheets
API.
:param user_email: The @developer.gserviceaccount.com address used to
access the spreadsheets API.
:param title: The title of the spreadsheet.
:param key: The unique spreadsheet key consisting of 44 Base64 characters.
"""
WORKSHEET_FEED = dedent("""
<?xml version="1.0" encoding="UTF-8"?>
<ns0:feed xmlns:ns0="http://www.w3.org/2005/Atom" xmlns:ns1="http://a9.com/-/spec/opensearchrss/1.0/" xmlns:ns2="http://schemas.google.com/spreadsheets/2006">
<ns0:id>https://spreadsheets.google.com/feeds/worksheets/{key}/private/full</ns0:id>
<ns0:updated>{updated}</ns0:updated>
<ns0:category scheme="http://schemas.google.com/spreadsheets/2006" term="http://schemas.google.com/spreadsheets/2006#worksheet" />
<ns0:title type="text">{title}</ns0:title>
<ns0:link href="https://docs.google.com/spreadsheets/d/{key}/edit" rel="alternate" type="application/atom+xml" />
<ns0:link href="https://spreadsheets.google.com/feeds/worksheets/{key}/private/full" rel="http://schemas.google.com/g/2005#feed" type="application/atom+xml" />
<ns0:link href="https://spreadsheets.google.com/feeds/worksheets/{key}/private/full" rel="http://schemas.google.com/g/2005#post" type="application/atom+xml" />
<ns0:link href="https://spreadsheets.google.com/feeds/worksheets/{key}/private/full" rel="self" type="application/atom+xml" />
<ns0:author>
<ns0:name>{name}</ns0:name>
<ns0:email>{email}</ns0:email>
</ns0:author>
<ns1:totalResults>{num_results}</ns1:totalResults>
<ns1:startIndex>1</ns1:startIndex>
{entries}
</ns0:feed>
""").strip('\n')
ENTRY = dedent("""
<ns0:entry>
<ns0:id>https://spreadsheets.google.com/feeds/worksheets/{key}/private/full/{ws_key}</ns0:id>
<ns0:updated>{updated}</ns0:updated>
<ns0:category
scheme="http://schemas.google.com/spreadsheets/2006"
term="http://schemas.google.com/spreadsheets/2006#worksheet" />
<ns0:title type="text">{ws_title}</ns0:title>
<ns0:content type="text">{ws_title}</ns0:content>
<ns0:link
href="https://spreadsheets.google.com/feeds/list/{key}/{ws_key}/private/full"
rel="http://schemas.google.com/spreadsheets/2006#listfeed"
type="application/atom+xml" />
<ns0:link
href="https://spreadsheets.google.com/feeds/cells/{key}/{ws_key}/private/full"
rel="http://schemas.google.com/spreadsheets/2006#cellsfeed"
type="application/atom+xml" />
<ns0:link
href="https://docs.google.com/spreadsheets/d/{key}/gviz/tq?gid={ws_id}"
rel="http://schemas.google.com/visualization/2008#visualizationApi"
type="application/atom+xml" />
<ns0:link
href="https://docs.google.com/spreadsheets/d/{key}/export?gid={ws_id}&format=csv"
rel="http://schemas.google.com/spreadsheets/2006#exportcsv"
type="text/csv" />
<ns0:link
href="https://spreadsheets.google.com/feeds/worksheets/{key}/private/full/{ws_key}"
rel="self" type="application/atom+xml" />
<ns0:link
href="https://spreadsheets.google.com/feeds/worksheets/{key}/private/full/{ws_key}/{ws_version}"
rel="edit" type="application/atom+xml" />
<ns2:colCount>{num_cols}</ns2:colCount>
<ns2:rowCount>{num_rows}</ns2:rowCount>
</ns0:entry>
""").strip('\n')
def __init__(self, updated_dt, user_name, user_email, title, key):
self.updated = to_rfc3339(updated_dt)
self.user_name = user_name
self.user_email = user_email
self.title = title
self.key = key
self.entries = []
def add_entry(self, ws_key, ws_title, ws_id, ws_version, num_cols,
num_rows, updated_dt):
"""Adds a worksheet entry to the feed.
:param ws_key: The worksheet identifier consisting of 7 Base64
characters.
:param ws_title: The worksheet title.
:param ws_id: The numeric worksheet identifier.
:param ws_version: The current worksheet version identifier consisting
of 5-7 (?) Base64 characters.
:param num_cols: The number of columns in the worksheet.
:param num_rows: The number of rows in the worksheet.
:param updated_dt: The datetime at which the worksheet was last
updated.
"""
self.entries.append({
'key': self.key,
'ws_key': ws_key,
'ws_title': ws_title,
'ws_id': ws_id,
'ws_version': ws_version,
'num_cols': num_cols,
'num_rows': num_rows,
'updated': to_rfc3339(updated_dt),
})
def to_xml(self):
return ElementTree.fromstring(str(self))
def __str__(self):
entry_strs = [self.ENTRY.format(**entry_dict)
for entry_dict in self.entries]
return self.WORKSHEET_FEED.format(**{
'updated': self.updated,
'name': self.user_name,
'email': self.user_email,
'title': self.title,
'key': self.key,
'num_results': len(self.entries),
'entries': '\n\n'.join(entry_strs),
})
|
import os
# Note, it's very important that keys read from the environment have the same name as in the config
class Config(object):
pass
class ProdConfig(Config):
ENV = 'production'
class DevConfig(Config):
ENV = 'development'
DEBUG = True
|
import unittest
class TruthTest(unittest.TestCase):
def testAssertTrue(self):
self.assertTrue(True)
def testAssertFalse(self):
self.assertFalse(False)
|
import unittest
from billy.sunlightparsers import MemberParser
from billybot.message_handler import ContactQueryMessageHandler
from billybot.query_handler import MemberQuery
class TestMemberQuery(unittest.TestCase):
def test_narrow_results_by_keyword(self):
data = MemberParser.find_members('D', '02052')
query = MemberQuery(ContactQueryMessageHandler)
query._query_results = data
matches = query._narrow_results('Elizabeth')
self.assertEqual(len(matches), 1)
self.assertTrue(matches[0][0], "Sen. Elizabeth Warren (D-MA)")
def test_validate_results(self):
data = MemberParser.find_members('D', '02052')
query = MemberQuery(ContactQueryMessageHandler)
query._query_results = data
valid, found = query._validate_results()
self.assertTrue(valid)
self.assertFalse(found)
def test_initialize_results_without_zipcode(self):
query = MemberQuery(ContactQueryMessageHandler)
query._initialize_results('Elizabeth')
self.assertTrue(len(query._query_results) > 1)
self.assertTrue(all(['Elizabeth' in member[0]
for member in query._query_results]))
def test_initialize_results_with_zipcode(self):
query = MemberQuery(ContactQueryMessageHandler)
query._initialize_results('Elizabeth 02052')
self.assertEqual(len(query._query_results), 1)
def test_extract_results(self):
query = MemberQuery(ContactQueryMessageHandler)
query._initialize_results('Elizabeth 02052')
self.assertEqual(query.member_summary, None)
query._extract_results()
self.assertEqual(query.member_summary, "Sen. Elizabeth Warren (D-MA)")
self.assertTrue(type(query.member_data) == dict)
|
"""
The I_wedge measure, as proposed by Griffith et al.
"""
from __future__ import division
from .pid import BasePID
from .. import Distribution
from ..algorithms import insert_meet
from ..multivariate import coinformation
def i_wedge(d, inputs, output):
"""
Compute I_wedge(inputs : output) = I(meet(inputs) : output)
Parameters
----------
d : Distribution
The distribution to compute i_wedge for.
inputs : iterable of iterables
The input variables.
output : iterable
The output variable.
Returns
-------
iwedge : float
The value of I_wedge.
"""
d = d.coalesce(inputs+(output,))
d = Distribution(d.outcomes, d.pmf, sample_space=d.outcomes)
d = insert_meet(d, -1, d.rvs[:-1])
return coinformation(d, [d.rvs[-2], d.rvs[-1]])
class PID_GK(BasePID):
"""
The Griffith et al partial information decomposition.
This PID is known to produce negative partial information values.
"""
_name = "I_GK"
_measure = staticmethod(i_wedge)
|
from allennlp.common import Params
from allennlp.models import Model, archive_model, load_archive
from allennlp.data import Vocabulary, Dataset, DataIterator, DatasetReader, Tokenizer, TokenIndexer
import argparse
import sys,os
from src.rte.mithun.log import setup_custom_logger
from types import *
from src.scripts.rte.da.train_da import train_da
from src.scripts.rte.da.eval_da import eval_model
from src.scripts.rte.da.train_da import train_model_uofa_version
from src.scripts.rte.da.eval_da import convert_fnc_to_fever_and_annotate
from rte.parikh.reader_uofa import FEVERReaderUofa
from tqdm import tqdm
from rte.mithun.trainer import UofaTrainTest
from retrieval.fever_doc_db import FeverDocDB
from subprocess import call
"""takes a data set and a dictionary of features and generate features based on the requirement.
EG: take claim evidence and create smartner based replaced text
Eg: take claim evidence and create feature vectors for word overlap
Parameters
----------
"""
#todo: eventually when you merge hand crafted features + text based features, you will have to make both the functions return the same thing
def generate_features(zipped_annotated_data,feature,feature_details,reader,mithun_logger,objUofaTrainTest,dataset,length_data):
mithun_logger.info(f"got inside generate_features")
mithun_logger.info(f"value of feature is:{feature}")
mithun_logger.info(f"value of dataset is:{dataset}")
instances = []
for index, (he, be, hl, bl, hw, bw, ht, hd, hfc) in enumerate(zipped_annotated_data):
new_label =""
label = hfc
if(dataset == "fnc"):
if (label == "unrelated"):
continue
else:
if (label == 'discuss'):
new_label = "NOT ENOUGH INFO"
if (label == 'agree'):
new_label = "SUPPORTS"
if (label == 'disagree'):
new_label = "REFUTES"
else :
new_label=label
he_split = he.split(" ")
be_split = be.split(" ")
hl_split = hl.split(" ")
bl_split = bl.split(" ")
hw_split = hw.split(" ")
bw_split = bw.split(" ")
premise_ann=""
hypothesis_ann=""
if (feature=="plain_NER"):
premise_ann, hypothesis_ann = objUofaTrainTest.convert_NER_form_per_sent_plain_NER(he_split, be_split, hl_split,
bl_split, hw_split, bw_split,mithun_logger)
else:
if (feature == "smart_NER"):
premise_ann, hypothesis_ann, found_intersection = objUofaTrainTest.convert_SMARTNER_form_per_sent(he_split,
be_split,
hl_split,
bl_split,hw_split, bw_split,mithun_logger)
# mithun_logger.info(f"value of old label is:{label}")
# mithun_logger.info(f"value of new label is:{new_label}")
# mithun_logger.info(f"value of claim before annotation is:{hw}")
# mithun_logger.info(f"value of evidence before anntoation is is:{bw}")
# mithun_logger.info(f"value of premise_ann is:{premise_ann}")
# mithun_logger.info(f"value of hypothesis_ann is:{hypothesis_ann}")
# mithun_logger.debug(f"value of old label is:{label}")
# mithun_logger.debug(f"value of new label is:{new_label}")
# mithun_logger.debug(f"value of claim before annotation is:{hw}")
# mithun_logger.debug(f"value of evidence before anntoation is is:{bw}")
# mithun_logger.debug(f"value of premise_ann is:{premise_ann}")
# mithun_logger.debug(f"value of hypothesis_ann is:{hypothesis_ann}")
#todo: fixe me. not able to cleanly retrieve boolean values from the config file
# person_c1 = feature_details.pop('person_c1', {})
# lower_case_tokens= feature_details.pop('lower_case_tokens', {})
# update_embeddings= feature_details.pop('update_embeddings', {})
# assert type(person_c1) is str
# assert type(lower_case_tokens) is bool
# assert type(update_embeddings) is bool
#
# if(lower_case_tokens):
# premise_ann=premise_ann.lower(),
# hypothesis_ann=hypothesis_ann.lower()
# mithun_logger.debug(f"value of premise_ann after lower case token is:{premise_ann}")
# mithun_logger.debug(f"value of label after lower case token is:{hypothesis_ann}")
instances.append(reader.text_to_instance(premise_ann, hypothesis_ann, new_label))
if len(instances)==0:
mithun_logger.error("No instances were read from the given filepath {}. ""Is the path correct?")
sys.exit(1)
mithun_logger.info(f"type of instances is :{type(instances)}")
return Dataset(instances)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-p','--param_path',
type=str,
help='path to parameter file describing the model to be trained')
parser.add_argument('-o', '--overrides',
type=str,
default="",
help='a HOCON structure used to override the experiment configuration')
args = parser.parse_args()
'''All of this must be done in this file run_fact_verify.py
1.1 Get list of data sets
1.2 get list of runs (eg: train,dev)
1.3 for zip (eacha of data-run combination)
Step2:
- decide what kinda data it is eg: fnc or ever
- extract corresponding data related details from config file Eg: path to annotated folder
- find is it dev or train that must be run
- if dev, extract trained model path
- if train , nothing
- create a logger
- what kinda classifier to run?
3. read data (with input/data folder path from 2.1)
4. create features
4.1 get corresponding details for features from config file
4.2 create features (based on output from 4.1)
8.1 call the corresponding function with input (features) and trained model (if applicable)- return results
9. print results
'''
params = Params.from_file(args.param_path)
uofa_params = params.pop('uofa_params', {})
datasets_to_work_on = uofa_params.pop('datasets_to_work_on', {})
list_of_runs = uofa_params.pop('list_of_runs', {})
assert len(datasets_to_work_on) == len(list_of_runs)
path_to_trained_models_folder = uofa_params.pop('path_to_trained_models_folder', {})
cuda_device = uofa_params.pop('cuda_device', {})
random_seed = uofa_params.pop('random_seed', {})
assert type(path_to_trained_models_folder) is not Params
assert type(cuda_device) is not Params
assert type(random_seed) is not Params
# step 2.1- create a logger
logger_details = uofa_params.pop('logger_details', {})
print(f"value of logger_details is {logger_details}")
print(type(logger_details))
assert type(logger_details) is Params
logger_mode = logger_details.pop('logger_mode', {})
assert type(logger_mode) is not Params
mithun_logger = setup_custom_logger('root', logger_mode, "general_log.txt")
#all one time used config values move outside for loop. This has to be done because the allennlp pop function clears out the dictionary if its read once.
path_to_saved_db = uofa_params.pop("path_to_saved_db")
# step 4 - generate features
features = uofa_params.pop("features", {})
assert type(features) is not Params
type_of_classifier = uofa_params.pop("type_of_classifier", {})
assert type(type_of_classifier) is str
name_of_trained_model_to_use = uofa_params.pop('name_of_trained_model_to_use', {})
mithun_logger.info((f"value of name_of_trained_model_to_use is: {name_of_trained_model_to_use}"))
assert type(name_of_trained_model_to_use) is str
serialization_dir_base = uofa_params.pop("serialization_dir", {})
assert type(name_of_trained_model_to_use) is str
for (dataset, run_name) in (zip(datasets_to_work_on, list_of_runs)):
#Step 2.2- get relevant config details from config file
mithun_logger.info((f"value of dataset is: {dataset}"))
mithun_logger.info((f"value of run_name is: {run_name}"))
fds= dataset + "_dataset_details"
mithun_logger.info((f"value of fds is: {fds}"))
dataset_details = uofa_params.pop(fds, {})
mithun_logger.info((f"value of dataset_details is: {dataset_details}") )
assert type(dataset_details) is Params
frn= run_name + "_partition_details"
mithun_logger.info((f"value of frn is: {frn}"))
data_partition_details = dataset_details.pop(frn, {})
mithun_logger.info((f"value of data_partition_details is: {data_partition_details}"))
assert type(data_partition_details) is Params
path_to_pyproc_annotated_data_folder = data_partition_details.pop('path_to_pyproc_annotated_data_folder', {})
mithun_logger.info(
(f"value of path_to_pyproc_annotated_data_folder is: {path_to_pyproc_annotated_data_folder}"))
assert type(path_to_pyproc_annotated_data_folder) is not Params
slice_percent = data_partition_details.pop("slice_percent", {})
mithun_logger.info(
(f"value of slice_percent is: {slice_percent}"))
assert type(slice_percent) is int
serialization_dir= serialization_dir_base+dataset+"_"+run_name + "_"+str(slice_percent)
#remove the log folder if it exists.
remove = "rm -rf " + serialization_dir
os.system(remove)
#create the folder.
create = "mkdir -p " + serialization_dir
os.system(create)
mithun_logger.info(
(f"just finished creating a serialization_dir with path:{serialization_dir}"))
# Step 2.6 - find is it dev or train that must be run
# - if dev, extract trained model path
# - if train , nothing
# update: the feverdatareader we are using from the fever code needs the name of trained model. EVen for training. wtf..
# update: so moved it to outside this for loop, since we are accessing it only once using uofa_params.pop anyway
#step 3 -read data
objUofaTrainTest = UofaTrainTest()
if (run_name == "annotation" and dataset == "fnc"):
path_to_trained_models=path_to_trained_models_folder+ name_of_trained_model_to_use
convert_fnc_to_fever_and_annotate(FeverDocDB, path_to_trained_models, mithun_logger,cuda_device,path_to_pyproc_annotated_data_folder)
db = FeverDocDB(path_to_saved_db)
archive = load_archive(path_to_trained_models_folder + name_of_trained_model_to_use, cuda_device)
config = archive.config
ds_params = config["dataset_reader"]
model = archive.model
model.eval()
mithun_logger.info(f"going to initiate FEVERReaderUofa.")
fever_reader = FEVERReaderUofa(db,
sentence_level=ds_params.pop("sentence_level", False),
wiki_tokenizer=Tokenizer.from_params(ds_params.pop('wiki_tokenizer', {})),
claim_tokenizer=Tokenizer.from_params(ds_params.pop('claim_tokenizer', {})),
token_indexers=TokenIndexer.dict_from_params(ds_params.pop('token_indexers', {})))
cwd=os.getcwd()
mithun_logger.info(f"going to start reading data.")
zipped_annotated_data,length_data = fever_reader.read(mithun_logger, cwd+path_to_pyproc_annotated_data_folder)
mithun_logger.info(f"done with reading data. going to generate features.")
data = None
for feature in features:
# todo: right now there is only one feature, NER ONE, so you will get away with data inside this for loop. However, need to dynamically add features
fdl= feature + "_details"
mithun_logger.info(f"value of fdl is:{fdl}")
mithun_logger.info(f"value of feature is:{feature}")
feature_details=uofa_params.pop("fdl", {})
data=generate_features(zipped_annotated_data, feature, feature_details, fever_reader, mithun_logger,objUofaTrainTest,dataset,length_data)
if(type_of_classifier=="decomp_attention"):
mithun_logger.info(f"found that the type_of_classifier is decomp attention")
if(run_name== "train"):
mithun_logger.info(f"found that the run_name is train. Going to get into is train_model_uofa_version attention")
train_model_uofa_version(params, cuda_device, serialization_dir, slice_percent , mithun_logger,data)
else:
if(run_name== "dev"):
eval_model(data,mithun_logger,path_to_trained_models_folder,name_of_trained_model_to_use,cuda_device)
|
# GameCredits network and client specific constants
# For communicating with other clients
CLIENT_DEFAULT_PORT = 40002
# For client RPC calls
CLIENT_DEFAULT_RPC_PORT = 40001
# Reward halving every xx blocks
SUBSIDY_HALVING_INTERVAL = 840000
# GameCredits network magic number
MAGIC_NUMBER = 0xFBA4C795
# Used to form paytopubkey addresses,
# when hashed using this prefix addresses start with uppercase G
PAY_TO_PUBKEY_VERSION_PREFIX = 38
# Used to form pay to script addresses,
# when hashed using this prefix addresses start with 3
PAY_TO_SCRIPT_VERSION_PREFIX = 5
# Max possible difficulty for the Proof of Work algorithm
MAX_DIFFICULTY = int("0x00000000FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", 16)
|
# Copyright 2021 The SODA Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import six
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
from scp import SCPClient
from oslo_log import log as logging
from oslo_utils import units
LOG = logging.getLogger(__name__)
class Tools(object):
def time_str_to_timestamp(self, time_str, time_pattern):
""" Time str to time stamp conversion
"""
time_stamp = ''
if time_str:
time_array = time.strptime(time_str, time_pattern)
time_stamp = int(time.mktime(time_array) * units.k)
return time_stamp
def timestamp_to_time_str(self, time_stamp, time_pattern):
""" Time stamp to time str conversion
"""
time_str = ''
if time_stamp:
time_stamp = time_stamp / units.k
time_array = time.localtime(time_stamp)
time_str = time.strftime(time_pattern, time_array)
return time_str
@staticmethod
def change_capacity_to_bytes(unit):
unit = unit.upper()
if unit == 'TB':
res = units.Ti
elif unit == 'GB':
res = units.Gi
elif unit == 'MB':
res = units.Mi
elif unit == 'KB':
res = units.Ki
else:
res = 1
return int(res)
@staticmethod
def get_capacity_size(value):
capacity = 0
if value and value != '' and value != '-' and value != '0B':
if value.isdigit():
capacity = float(value)
else:
unit = value[-2:]
capacity = float(value[:-2]) * int(
Tools.change_capacity_to_bytes(unit))
return capacity
@staticmethod
def split_value_map_list(value_info, map_list, is_alert=False, split=":"):
detail_array = value_info.split('\r\n')
value_map = {}
temp_key = ''
for detail in detail_array:
if detail:
string_info = detail.split(split + " ")
key = string_info[0].replace(' ', '')
value = ''
if len(string_info) > 1:
for string in string_info[1:]:
value = string.replace('""', '')
value_map[key] = value
if is_alert and key and len(string_info) > 1:
temp_key = key
continue
if is_alert and temp_key and 'entries' not in detail:
if len(string_info) > 1:
value_map[temp_key] += string_info[1]
elif len(string_info) == 1:
value_map[temp_key] += string_info[0]
else:
if value_map != {}:
map_list.append(value_map)
value_map = {}
if value_map != {}:
map_list.append(value_map)
return map_list
@staticmethod
def remove_file_with_same_type(file_name, file_path):
file_type = '%s_%s_%s' % (file_name.split('_')[0],
file_name.split('_')[1],
file_name.split('_')[2])
path_dir = os.listdir(file_path)
for file in path_dir:
if file_type in file:
local_file = '%s%s' % (file_path, file)
os.remove(local_file)
@staticmethod
def get_remote_file_to_xml(ssh, file, local_path, remote_path):
root_node = None
local_file = '%s%s' % (local_path, file)
try:
scp_client = SCPClient(ssh.get_transport(),
socket_timeout=15.0)
remote_file = '%s%s' % (remote_path, file)
scp_client.get(remote_file, local_path)
root_node = open(local_file).read()
root_node = ET.fromstring(root_node)
except Exception as e:
err_msg = "Failed to copy statics file: %s" % \
(six.text_type(e))
LOG.error(err_msg)
finally:
if os.path.exists(local_file):
Tools.remove_file_with_same_type(file, local_path)
return root_node
|
from stft import stft_framework
from stqft import stqft_framework
from frontend import frontend, signal, transform
from tests import *
frontend.enableInteractive()
TOPIC = "speech"
# speechSignal = '../dataset/zero/4a1e736b_nohash_2.wav' #male clear
# speechSignal = '../dataset/zero/0fa1e7a9_nohash_1.wav' #male noise
# speechSignal = '../dataset/zero/7ea032f3_nohash_3.wav' #male questionary
# speechSignal = '../dataset/zero/8e05039f_nohash_4.wav' #female clear
speechSignal = '../dataset/zero/4634529e_nohash_2.wav' #female noise
windowLength = 2**10
overlapFactor=0.5
windowType='hanning'
print("Initializing Signal")
y = signal(samplingRate=16000, signalType='file', path=speechSignal)
y.show(subplot=[1,3,1])
print("Processing STFT")
stft = transform(stft_framework)
y_hat_stft, f ,t = stft.forward(y, nSamplesWindow=windowLength, overlapFactor=overlapFactor, windowType=windowType)
y_hat_stft_p, f_p, t_p = stft.postProcess(y_hat_stft, f ,t, scale='mel', fmax=4000)
stft.show(y_hat_stft_p, f_p, t_p, subplot=[1,3,2])
print("Processing STQFT")
stqft = transform(stqft_framework, suppressPrint=True, minRotation=0.2)
y_hat_stqft, f, t = stqft.forward(y, nSamplesWindow=windowLength, overlapFactor=overlapFactor, windowType=windowType)
y_hat_stqft_p, f_p, t_p = stqft.postProcess(y_hat_stqft, f ,t, scale='mel', fmax=4000)
stqft.show(y_hat_stqft_p, f_p, t_p, subplot=[1,3,3])
# grader_inst = grader()
# y_hat_diff = grader_inst.correlate2d(y_hat_stft_p, y_hat_stqft_p)
# grader_inst.show(y_hat_diff, f_p, t=t_p, subplot=[1,4,4])
print("Showing all figures")
frontend.primeTime() # Show all with blocking |
#!/bin/env python3
try:
invalid = 10 / 0
except ZeroDivisionError as e:
print('catch except: ', e)
finally:
print('finally...')
try:
invalid = 10 / 1
except ZeroDivisionError as e:
print('catch except: ', e)
finally:
print('finally...') |
from async.http.constants import *
|
import os
BASE_DIR = os.path.dirname(__file__)
SAML_PATH = os.path.join(BASE_DIR, 'saml')
TEMPLATE_PATH = os.path.join(BASE_DIR, 'templates')
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 12 15:03:12 2018
@author: jgoldstein
read in decklist and make Deck objects
"""
import os
import io
from collections import defaultdict
from glob import glob
class Deck:
def __init__(self, from_dicts=None, min_main=60, max_side=15):
self._maindeck = {}
self._sideboard = {}
self.min_main = min_main
self.max_side = max_side
if from_dicts is not None:
self.set_decklist(*from_dicts)
@classmethod
def from_file(cls, filename):
"""
Read in decklist from the given filename
Parameters
----------
filename: str
path to the decklist file
"""
# create dictionaries that set the value of any key to 0 by default
maindeck = defaultdict(int)
sideboard = defaultdict(int)
with open(filename, 'r') as f:
# start by reading maindeck, flip to sideboard
# if we encounter a newline or the word sideboard
read_sideboard = False
for line in f:
if line == '\n' or 'sideboard' in line.lower():
read_sideboard = True
continue
# read number of copies and cardname
vals = line.split(maxsplit=1)
try:
if vals[0][-1] == 'x':
num = int(vals[0][:-1])
else:
num = int(vals[0])
except ValueError:
raise IOError('Could not read number of cards in line: {!r}'.format(line))
cardname = vals[1].strip()
if read_sideboard:
sideboard[cardname] += num
else:
maindeck[cardname] += num
return Deck(from_dicts=(maindeck, sideboard))
def set_decklist(self, maindeck, sideboard):
"""
Set decklist directly with dictionaries
Paramters
---------
maindeck: dict
dictionary with maindeck cards
sideboard: dict
dictionary with sideboard cards
"""
num_main = sum(maindeck.values())
num_side = sum(sideboard.values())
if num_main < self.min_main:
raise ValueError('Maindeck of {} cards does not make minimum of {}'.format(num_main, self.min_main))
if num_side > self.max_side:
raise ValueError('Sideboard of {} cards exceeds maximum of {}'.format(num_side, self.max_side))
self._maindeck = maindeck
self._sideboard = sideboard
def __str__(self):
s = io.StringIO()
print('Mainboard', file=s)
for card in self._maindeck:
print('{} {}'.format(self._maindeck[card], card), file=s)
print('\nSidebaord', file=s)
for card in self._sideboard:
print('{} {}'.format(self._sideboard[card], card), file=s)
return s.getvalue()
def save_decklist(self, savefile):
"""
Save decklist to a file
Parameters
----------
savefile: str
Path to the file to save in
"""
with open(savefile, 'w') as f:
for card in self._maindeck:
f.write('{} {}\n'.format(self._maindeck[card], card))
f.write('\n')
for card in self._sideboard:
f.write('{} {}\n'.format(self._sideboard[card], card))
@classmethod
def read_folder(cls, folder, ext='.dck'):
"""
Read in all decklists from a folder
Parameters
----------
folder: str
Path to the directory with decklist files
ext: str
default = '.dck'
Extension of decklist files
Returns
-------
list
list with Decks read from folder
"""
deck_files = glob(os.path.join(folder, '*'+ext))
return [cls.from_file(f) for f in deck_files]
|
"""
Desafio 015
Problema: Escreva um programa que pergunte a quantidade de Km
percorridos por um carro alugado e a quantidade de dias
pelos quais ele foi alugado. Calcule o preço a pagar,
sabendo que o carro custa R$60 por dia e R$0,15 por Km rodado.
Resolução do problema:
"""
dia = int(input('Dias alugado: '))
km = int(input('Km rodados: '))
valor = (dia * 60) + (km * 0.15)
print('Total a pagar: R${:.2f}'.format(valor))
|
import logging
from django.http import HttpResponse
dblog = logging.getLogger('dblog.' + __name__)
def test_dblog(request):
dblog.info('Testing dblog in view.')
return HttpResponse('OK', content_type='text/plain')
|
#!/usr/bin/env python3
"""
Shooting Stars
This program lights up the LEDs on arm 1 one at a time and
then fades them. Then arm 2. Then arm 3.
....................
Functions:
- shooting_star_1: lights up the LEDs on arm 1 one at a time and
then fades them.
- shooting_star_2: lights up the LEDs on arm 2 one at a time and
then fades them.
- shooting_star_3: lights up the LEDs on arm 3 one at a time and
then fades them.
....................
Requirements:
PyGlow.py (many thanks to benleb for this program)
bfp_piglow_modules.py
You will have these files if you downloaded the entire repository.
....................
Author: Paul Ryan
This program was written on a Raspberry Pi using the Geany IDE.
"""
########################################################################
# Import modules #
########################################################################
import logging
from time import sleep
from PyGlow import PyGlow
from bfp_piglow_modules import print_header
from bfp_piglow_modules import check_log_directory
from bfp_piglow_modules import delete_empty_logs
from bfp_piglow_modules import stop
########################################################################
# Initialize #
########################################################################
PYGLOW = PyGlow()
PYGLOW.all(0)
########################################################################
# Functions #
########################################################################
def shooting_star_1():
"""
Turn on Arm 1 LEDS and fade
"""
LOGGER.debug("Shooting Star 1")
sleep_speed = 0.01
# Turn on A1L1
PYGLOW.led(1, 60)
sleep(sleep_speed)
# Turn on A1L2
PYGLOW.led(2, 60)
sleep(sleep_speed)
# Fade A1L1
PYGLOW.led(1, 50)
sleep(sleep_speed)
# Turn on A1L3
PYGLOW.led(3, 60)
sleep(sleep_speed)
# Fade A1L1 - 2
PYGLOW.led(1, 40)
sleep(sleep_speed)
PYGLOW.led(2, 50)
sleep(sleep_speed)
# Turn on A1L4
PYGLOW.led(4, 60)
sleep(sleep_speed)
# Fade A1L1 - 3
PYGLOW.led(1, 30)
sleep(sleep_speed)
PYGLOW.led(2, 40)
sleep(sleep_speed)
PYGLOW.led(3, 50)
sleep(sleep_speed)
# Turn on A1L5
PYGLOW.led(5, 60)
sleep(sleep_speed)
# Fade A1L1 - 4
PYGLOW.led(1, 20)
sleep(sleep_speed)
PYGLOW.led(2, 30)
sleep(sleep_speed)
PYGLOW.led(3, 40)
sleep(sleep_speed)
PYGLOW.led(4, 50)
sleep(sleep_speed)
# Turn on A1L6
PYGLOW.led(6, 60)
sleep(sleep_speed)
# Fade A1L1 - 5
PYGLOW.led(1, 10)
sleep(sleep_speed)
PYGLOW.led(2, 20)
sleep(sleep_speed)
PYGLOW.led(3, 30)
sleep(sleep_speed)
PYGLOW.led(4, 40)
sleep(sleep_speed)
PYGLOW.led(5, 50)
sleep(sleep_speed)
# Fade A1L1 - 6
PYGLOW.led(1, 0)
sleep(sleep_speed)
PYGLOW.led(2, 10)
sleep(sleep_speed)
PYGLOW.led(3, 20)
sleep(sleep_speed)
PYGLOW.led(4, 30)
sleep(sleep_speed)
PYGLOW.led(5, 40)
sleep(sleep_speed)
PYGLOW.led(6, 50)
sleep(sleep_speed)
# Fade A1L2 - 6
PYGLOW.led(2, 0)
sleep(sleep_speed)
PYGLOW.led(3, 10)
sleep(sleep_speed)
PYGLOW.led(4, 20)
sleep(sleep_speed)
PYGLOW.led(5, 30)
sleep(sleep_speed)
PYGLOW.led(6, 40)
sleep(sleep_speed)
# Fade A1L3 - 6)
PYGLOW.led(3, 0)
sleep(sleep_speed)
PYGLOW.led(4, 10)
sleep(sleep_speed)
PYGLOW.led(5, 20)
sleep(sleep_speed)
PYGLOW.led(6, 30)
sleep(sleep_speed)
# Fade A1L4 - 6)
PYGLOW.led(4, 0)
sleep(sleep_speed)
PYGLOW.led(5, 10)
sleep(sleep_speed)
PYGLOW.led(6, 20)
sleep(sleep_speed)
# Fade A1L5 - 6)
PYGLOW.led(5, 0)
sleep(sleep_speed)
PYGLOW.led(6, 10)
sleep(sleep_speed)
# Fade A1L6)
PYGLOW.led(6, 0)
sleep(sleep_speed)
sleep(2)
def shooting_star_2():
"""
Turn on Arm 2 LEDS and fade
"""
LOGGER.debug("Shooting Star 2")
sleep_speed = 0.01
# Turn on A2L7
PYGLOW.led(7, 60)
sleep(sleep_speed)
# Turn on A2L8
PYGLOW.led(8, 60)
sleep(sleep_speed)
# Fade A2L7
PYGLOW.led(7, 50)
sleep(sleep_speed)
# Turn on A2L9
PYGLOW.led(9, 60)
sleep(sleep_speed)
# Fade A2L7 - 8
PYGLOW.led(7, 40)
sleep(sleep_speed)
PYGLOW.led(8, 50)
sleep(sleep_speed)
# Turn on A2L10
PYGLOW.led(10, 60)
sleep(sleep_speed)
# Fade A2L7 - 9
PYGLOW.led(7, 30)
sleep(sleep_speed)
PYGLOW.led(8, 40)
sleep(sleep_speed)
PYGLOW.led(9, 50)
sleep(sleep_speed)
# Turn on A2L11
PYGLOW.led(11, 60)
sleep(sleep_speed)
# Fade A2L7 - 10
PYGLOW.led(7, 20)
sleep(sleep_speed)
PYGLOW.led(8, 30)
sleep(sleep_speed)
PYGLOW.led(9, 40)
sleep(sleep_speed)
PYGLOW.led(10, 50)
sleep(sleep_speed)
# Turn on A2L12
PYGLOW.led(12, 60)
sleep(sleep_speed)
# Fade A2L7 - 11
PYGLOW.led(7, 10)
sleep(sleep_speed)
PYGLOW.led(8, 20)
sleep(sleep_speed)
PYGLOW.led(9, 30)
sleep(sleep_speed)
PYGLOW.led(10, 40)
sleep(sleep_speed)
PYGLOW.led(11, 50)
sleep(sleep_speed)
# Fade A2L7 - 12
PYGLOW.led(7, 0)
sleep(sleep_speed)
PYGLOW.led(8, 10)
sleep(sleep_speed)
PYGLOW.led(9, 20)
sleep(sleep_speed)
PYGLOW.led(10, 30)
sleep(sleep_speed)
PYGLOW.led(11, 40)
sleep(sleep_speed)
PYGLOW.led(12, 50)
sleep(sleep_speed)
# Fade A2L8 - 12
PYGLOW.led(8, 0)
sleep(sleep_speed)
PYGLOW.led(9, 10)
sleep(sleep_speed)
PYGLOW.led(10, 20)
sleep(sleep_speed)
PYGLOW.led(11, 30)
sleep(sleep_speed)
PYGLOW.led(12, 40)
sleep(sleep_speed)
# Fade A2L9 - 12
PYGLOW.led(9, 0)
sleep(sleep_speed)
PYGLOW.led(10, 10)
sleep(sleep_speed)
PYGLOW.led(11, 20)
sleep(sleep_speed)
PYGLOW.led(12, 30)
sleep(sleep_speed)
# Fade A2L10 - 12
PYGLOW.led(10, 0)
sleep(sleep_speed)
PYGLOW.led(11, 10)
sleep(sleep_speed)
PYGLOW.led(12, 20)
sleep(sleep_speed)
# Fade A2L11 - 12
PYGLOW.led(11, 0)
sleep(sleep_speed)
PYGLOW.led(12, 10)
sleep(sleep_speed)
# Fade A2L12
PYGLOW.led(12, 0)
sleep(sleep_speed)
sleep(2)
def shooting_star_3():
"""
Turn on Arm 3 LEDS and fade
"""
LOGGER.debug("Shooting Star 3")
sleep_speed = 0.01
# Turn on A3L13
PYGLOW.led(13, 60)
sleep(sleep_speed)
# Turn on A3L14
PYGLOW.led(14, 60)
sleep(sleep_speed)
# Fade A3L13
PYGLOW.led(13, 50)
sleep(sleep_speed)
# Turn on A3L15
PYGLOW.led(15, 60)
sleep(sleep_speed)
# Fade A3L13 - 14
PYGLOW.led(13, 40)
sleep(sleep_speed)
PYGLOW.led(14, 50)
sleep(sleep_speed)
# Turn on A3L16
PYGLOW.led(16, 60)
sleep(sleep_speed)
# Fade A3L13 - 15
PYGLOW.led(13, 30)
sleep(sleep_speed)
PYGLOW.led(14, 40)
sleep(sleep_speed)
PYGLOW.led(15, 50)
sleep(sleep_speed)
# Turn on A3L17
PYGLOW.led(17, 60)
sleep(sleep_speed)
# Fade A3L13 - 16
PYGLOW.led(13, 20)
sleep(sleep_speed)
PYGLOW.led(14, 30)
sleep(sleep_speed)
PYGLOW.led(15, 40)
sleep(sleep_speed)
PYGLOW.led(16, 50)
sleep(sleep_speed)
# Turn on A3L18
PYGLOW.led(18, 60)
sleep(sleep_speed)
# Fade A3L13 - 17
PYGLOW.led(13, 10)
sleep(sleep_speed)
PYGLOW.led(14, 20)
sleep(sleep_speed)
PYGLOW.led(15, 30)
sleep(sleep_speed)
PYGLOW.led(16, 40)
sleep(sleep_speed)
PYGLOW.led(17, 50)
sleep(sleep_speed)
# Fade A3L13 - 18
PYGLOW.led(13, 0)
sleep(sleep_speed)
PYGLOW.led(14, 10)
sleep(sleep_speed)
PYGLOW.led(15, 20)
sleep(sleep_speed)
PYGLOW.led(16, 30)
sleep(sleep_speed)
PYGLOW.led(17, 40)
sleep(sleep_speed)
PYGLOW.led(18, 50)
sleep(sleep_speed)
# Fade A3L14 - 18
PYGLOW.led(14, 0)
sleep(sleep_speed)
PYGLOW.led(15, 10)
sleep(sleep_speed)
PYGLOW.led(16, 20)
sleep(sleep_speed)
PYGLOW.led(17, 30)
sleep(sleep_speed)
PYGLOW.led(18, 40)
sleep(sleep_speed)
# Fade A3L15 - 18
PYGLOW.led(15, 0)
sleep(sleep_speed)
PYGLOW.led(16, 10)
sleep(sleep_speed)
PYGLOW.led(17, 20)
sleep(sleep_speed)
PYGLOW.led(18, 30)
sleep(sleep_speed)
# Fade A3L16 - 18
PYGLOW.led(16, 0)
sleep(sleep_speed)
PYGLOW.led(17, 10)
sleep(sleep_speed)
PYGLOW.led(18, 20)
sleep(sleep_speed)
# Fade A3L17 - 18
PYGLOW.led(17, 0)
sleep(sleep_speed)
PYGLOW.led(18, 10)
sleep(sleep_speed)
# Fade A3L18
PYGLOW.led(18, 0)
sleep(sleep_speed)
sleep(2)
def main():
"""
This is the main function.
"""
LOGGER.debug("START")
# 1, 2, and 3
shooting_star_1()
shooting_star_2()
shooting_star_3()
# 2, 3, 1
shooting_star_2()
shooting_star_3()
shooting_star_1()
# 3, 1, 2
shooting_star_3()
shooting_star_1()
shooting_star_2()
# 1, 3, 2
shooting_star_1()
shooting_star_3()
shooting_star_2()
# 3, 2, 1
shooting_star_3()
shooting_star_2()
shooting_star_1()
# 2, 1, 3
shooting_star_2()
shooting_star_1()
shooting_star_3()
LOGGER.debug("END")
delete_empty_logs(LOG)
stop()
if __name__ == '__main__':
try:
# STEP01: Check if Log directory exits.
check_log_directory()
# STEP02: Enable logging
LOG = 'Logs/shooting_stars.log'
LOG_FORMAT = '%(asctime)s %(name)s: %(funcName)s: \
%(levelname)s: %(message)s'
LOGGER = logging.getLogger(__name__)
# Nothing will log unless logging level is changed to DEBUG
LOGGER.setLevel(logging.ERROR)
FORMATTER = logging.Formatter(fmt=LOG_FORMAT,
datefmt='%m/%d/%y %I:%M:%S %p:')
FILE_HANDLER = logging.FileHandler(LOG, 'w')
FILE_HANDLER.setFormatter(FORMATTER)
LOGGER.addHandler(FILE_HANDLER)
# STEP03: Print header
print_header()
# STEP04: Print instructions in white text
print("\033[1;37;40mPress Ctrl-C to stop the program.")
# STEP05: Run the main function
main()
except KeyboardInterrupt:
delete_empty_logs(LOG)
stop()
|
# -*- coding:utf-8 -*-
import time
from splitter import Corpus
__author__ = u'gree-gorey'
def main():
t1 = time.time()
# new_corpus = Corpus(u'/home/gree-gorey/CorpusTemp/')
new_corpus = Corpus(u'/home/gree-gorey/CorpusClean/')
# new_corpus = Corpus(u'/home/gree-gorey/stupid/')
# new_corpus = Corpus(u'/home/gree-gorey/tested_tested/')
for text in new_corpus.texts(u'txt'):
# text.mystem_analyzer()
text.normalize(mode=u'write')
text.treetagger_analyzer()
text.write_pos_ann()
t2 = time.time()
print t2 - t1
if __name__ == '__main__':
main()
|
import os
from celery import Celery
os.environ.setdefault('DJANGO_SETTINGS_MODULE','webs.settings')
app = Celery('webs')
app.config_from_object('django.conf:settings',namespace='CELERY')
app.autodiscover_tasks() |
from collections.abc import MutableMapping
import pandas as pd
import regex as re
class LexiconsDict(MutableMapping):
def __init__(self, *args, **kwargs):
self.store = dict()
self.update(dict(*args, **kwargs))
def __getitem__(self, key):
# e.g. gender_adj_basic (we don't store those directly, but under
# 'gender' as a data frame -- for memory efficiency and this also
# allows to directly access that dataframe through lexicons['gender'])
# e.g. to check what properties it supports, or query it directly
if key not in self.store and "_" in key and \
key.split("_")[0] in self.store and \
type(self.store[key.split("_")[0]]) == pd.DataFrame:
parts = key.split("_")
term = parts[0]
df = self.store[term]
# TODO: for now we assume the sets of possible values are different
# for each of the properties
val2property = {}
for p in df.columns:
for val in df[p].unique():
if type(val) == int:
continue
val2property[val] = p
try:
# if this remains None then we return index (term)
form_to_ret = None
for pval in parts:
if pval == term:
continue
if pval == "all":
# don't filter anything, return all terms
break
# if there is a RANK in the df then allow to query
# for top x terms, e.g. names_top100
top_match = re.match(r'^top([0-9]+)$', pval)
# when another column's value is to be returned instead of
# the index (e.g. another form of a verb)
if pval.upper() in df.columns:
form_to_ret = pval.upper()
elif top_match:
if "RANK" in df.columns:
rank = int(top_match.group(1))
df = df[df["RANK"] <= rank]
else:
prop = val2property[pval]
df = df[df[prop] == pval]
except Exception:
raise KeyError
# indexed by "TERM" (see read_csv_terms in fill_the_lexicon)
if not form_to_ret:
return list(df.index)
else:
return list(df[form_to_ret])
return self.store[key]
def __setitem__(self, key, value):
self.store[key] = value
def __delitem__(self, key):
del self.store[key]
def __iter__(self):
return iter(self.store)
def __len__(self):
return len(self.store)
|
#!/usr/bin/python
# This Python script will read the contents of the vault_password file and its output will be feed back to
# the caller, which is Ansible Vault.
# When editing this file, please make sure you are using "LF" as line ending instead of the usual "CRLF",
# otherwise running ansible-vault from Docker container will fail!
# In case you're using Visual Studio Code, please check this SO article for instructions:
# https://stackoverflow.com/a/39532890.
# Read vault_password file using read mode.
file = open("/opt/ansible-vault-password/vault_password", "r")
# Print the contents of this file () to the console
print file.read() |
import torch
from catalyst.utils import metrics
def test_dice():
"""
Tests for catalyst.utils.metrics.hitrate metric.
"""
assert (
metrics.hitrate(torch.tensor([[1], [2], [3]]), torch.tensor([2, 3, 4]))
== 0
)
assert (
metrics.hitrate(torch.tensor([[1], [2], [3]]), torch.tensor([1, 2, 3]))
== 1
)
assert (
metrics.hitrate(
torch.tensor([[2, 0], [1, 3], [4, 6]]), torch.tensor([2, 3, 6])
)
== 1
)
assert (
metrics.hitrate(
torch.tensor([[1, 2], [3, 4], [5, 6], [7, 8]]),
torch.tensor([1, 3, 1, 2]),
)
== 0.5
)
assert (
metrics.hitrate(
torch.tensor([[2, 1], [4, 3], [6, 5], [8, 7]]),
torch.tensor([1, 3, 1, 2]),
)
== 0.5
)
assert (
metrics.hitrate(
torch.tensor([[4, 3], [6, 5], [8, 7], [2, 1]]),
torch.tensor([3, 1, 2, 1]),
)
== 0.5
)
assert (
metrics.hitrate(
torch.tensor([[2, 1], [4, 3], [6, 5], [8, 7]]),
torch.tensor([3, 5, 7, 9]),
)
== 0
)
|
import jax
import jax.numpy as jnp
import numpy as np
from absl.testing import absltest, parameterized # pylint: disable=no-name-in-module
from jax import test_util as jtu
from jax.config import config
from jax.experimental.sparse.ops import COO, CSR
from spax import ops
from spax.test_utils import random_uniform
# pylint: disable=undefined-variable
config.parse_flags_with_absl()
ALL_TYPES = (CSR, COO, jnp.ndarray)
class SparseOpsTest(jtu.JaxTestCase):
@parameterized.named_parameters(
jtu.cases_from_list(
{
"testcase_name": "_{}_{}".format(
jtu.format_shape_dtype_string(shape, dtype),
sparse_type.__name__,
),
"shape": shape,
"dtype": dtype,
"sparse_type": sparse_type,
}
for sparse_type in (COO, CSR)
for shape in ((7, 11), (1, 13), (13, 1))
for dtype in (np.float32, np.float64)
)
)
def test_to_dense(self, sparse_type, shape, dtype):
mat = random_uniform(jax.random.PRNGKey(0), shape, dtype=dtype, fmt=jnp.ndarray)
sp = sparse_type.fromdense(mat)
redense = ops.to_dense(sp)
self.assertAllClose(redense, mat)
@parameterized.named_parameters(
jtu.cases_from_list(
{
"testcase_name": "_{}_{}".format(
jtu.format_shape_dtype_string(shape, dtype),
sparse_type.__name__,
),
"shape": shape,
"dtype": dtype,
"sparse_type": sparse_type,
}
for sparse_type in (COO, CSR, jnp.ndarray)
for shape in ((7, 11), (1, 13), (13, 1))
for dtype in (np.float32, np.float64)
)
)
def test_add_sparse(self, sparse_type, shape, dtype):
k0, k1 = jax.random.split(jax.random.PRNGKey(0))
mat0 = random_uniform(k0, shape, dtype=dtype, fmt=sparse_type)
mat1 = random_uniform(k1, shape, dtype=dtype, fmt=sparse_type)
actual = ops.to_dense(ops.add(mat0, mat1))
expected = ops.to_dense(mat0) + ops.to_dense(mat1)
self.assertAllClose(actual, expected)
@parameterized.named_parameters(
jtu.cases_from_list(
{
"testcase_name": "_{}_{}_r{}".format(
jtu.format_shape_dtype_string(shape, dtype),
sparse_type.__name__,
other_rank,
),
"shape": shape,
"dtype": dtype,
"sparse_type": sparse_type,
"other_rank": other_rank,
}
for sparse_type in (COO, CSR, jnp.ndarray)
for shape in ((7, 11), (1, 13), (13, 1))
for dtype in (np.float32, np.float64)
for other_rank in (0, 1, 2, 3)
)
)
def test_add_array(self, sparse_type, shape, other_rank, dtype):
k0, k1 = jax.random.split(jax.random.PRNGKey(0))
mat = random_uniform(k0, shape, dtype=dtype, fmt=sparse_type)
if other_rank > len(shape):
shape = tuple(range(2, 2 + len(shape) - other_rank)) + shape
else:
shape = shape[-other_rank:]
v = jax.random.uniform(k1, shape=shape, dtype=dtype)
actual = ops.to_dense(ops.add(mat, v))
expected = ops.to_dense(mat) + v
self.assertAllClose(actual, expected)
@parameterized.named_parameters(
jtu.cases_from_list(
{
"testcase_name": "_{}_{}_r{}".format(
jtu.format_shape_dtype_string(shape, dtype),
sparse_type.__name__,
other_rank,
),
"shape": shape,
"dtype": dtype,
"sparse_type": sparse_type,
"other_rank": other_rank,
}
for sparse_type in (COO, CSR, jnp.ndarray)
for shape in ((7, 11), (1, 13), (13, 1))
for dtype in (np.float32, np.float64)
for other_rank in (0, 1, 2, 3)
)
)
def test_mul_array(self, sparse_type, shape, other_rank, dtype):
k0, k1 = jax.random.split(jax.random.PRNGKey(0))
mat = random_uniform(k0, shape, dtype=dtype, fmt=sparse_type)
if other_rank > len(shape):
shape = tuple(range(2, 2 + len(shape) - other_rank)) + shape
else:
shape = shape[-other_rank:]
v = jax.random.uniform(k1, shape=shape, dtype=dtype)
actual = ops.to_dense(ops.add(mat, v))
expected = ops.to_dense(mat) + v
self.assertAllClose(actual, expected)
@parameterized.named_parameters(
jtu.cases_from_list(
{
"testcase_name": "_{}_{}".format(
jtu.format_shape_dtype_string((nx, ny, nh), dtype),
sparse_type.__name__,
),
"nx": nx,
"ny": ny,
"nh": nh,
"dtype": dtype,
"sparse_type": sparse_type,
}
for sparse_type in (COO, CSR, jnp.ndarray)
for dtype in (np.float32, np.float64)
for nx in (5,)
for ny in (7,)
for nh in (11,)
)
)
def test_masked_matmul(self, nx, ny, nh, dtype, sparse_type):
keys = jax.random.split(jax.random.PRNGKey(0), 3)
mat = random_uniform(keys[0], (nx, ny), dtype=dtype, fmt=sparse_type)
x = jax.random.uniform(keys[1], (nx, nh), dtype=dtype)
y = jax.random.uniform(keys[2], (nh, ny), dtype=dtype)
actual = ops.to_dense(ops.with_data(mat, ops.masked_matmul(mat, x, y)))
xt = x @ y
expected = jnp.where(ops.to_dense(mat) != 0.0, xt, jnp.zeros_like(xt))
self.assertAllClose(actual, expected)
@parameterized.named_parameters(
jtu.cases_from_list(
{
"testcase_name": "_{}_{}".format(
jtu.format_shape_dtype_string((nx, ny, nh), dtype),
sparse_type.__name__,
),
"nx": nx,
"ny": ny,
"nh": nh,
"dtype": dtype,
"sparse_type": sparse_type,
}
for sparse_type in (COO, CSR, jnp.ndarray)
for dtype in (np.float32, np.float64)
for nx in (5,)
for ny in (7,)
for nh in (11,)
)
)
def test_masked_inner(self, nx, ny, nh, dtype, sparse_type):
keys = jax.random.split(jax.random.PRNGKey(0), 3)
mat = random_uniform(keys[0], (nx, ny), dtype=dtype, fmt=sparse_type)
x = jax.random.uniform(keys[1], (nh, nx), dtype=dtype)
y = jax.random.uniform(keys[2], (nh, ny), dtype=dtype)
actual = ops.to_dense(ops.with_data(mat, ops.masked_inner(mat, x, y)))
xt = x.T @ y
expected = jnp.where(ops.to_dense(mat) != 0.0, xt, jnp.zeros_like(xt))
self.assertAllClose(actual, expected)
@parameterized.named_parameters(
jtu.cases_from_list(
{
"testcase_name": "_{}_{}".format(
jtu.format_shape_dtype_string((nx, ny), dtype),
sparse_type.__name__,
),
"nx": nx,
"ny": ny,
"dtype": dtype,
"sparse_type": sparse_type,
}
for sparse_type in (COO, CSR, jnp.ndarray)
for dtype in (np.float32, np.float64)
for nx in (5,)
for ny in (7,)
)
)
def test_masked_outer(self, nx, ny, dtype, sparse_type):
keys = jax.random.split(jax.random.PRNGKey(0), 3)
mat = random_uniform(keys[0], (nx, ny), dtype=dtype, fmt=sparse_type)
x = jax.random.uniform(keys[1], (nx,), dtype=dtype)
y = jax.random.uniform(keys[2], (ny,), dtype=dtype)
actual = ops.to_dense(ops.with_data(mat, ops.masked_outer(mat, x, y)))
xt = jnp.outer(x, y)
expected = jnp.where(ops.to_dense(mat) != 0.0, xt, jnp.zeros_like(xt))
self.assertAllClose(actual, expected)
@parameterized.named_parameters(
jtu.cases_from_list(
{
"testcase_name": "_{}_{}".format(
jtu.format_shape_dtype_string((size, size), dtype),
sparse_type.__name__,
),
"size": size,
"dtype": dtype,
"sparse_type": sparse_type,
}
for sparse_type in (COO, CSR, jnp.ndarray)
for dtype in (np.float32, np.float64)
for size in (5, 7)
)
)
def test_symmetrize(self, size, dtype, sparse_type):
mat = random_uniform(
jax.random.PRNGKey(0), (size, size), dtype=dtype, fmt=sparse_type
)
actual = ops.symmetrize(mat)
expected = ops.to_dense(mat)
expected = (expected + expected.T) / 2
self.assertAllClose(ops.to_dense(actual), expected)
@parameterized.named_parameters(
jtu.cases_from_list(
{
"testcase_name": f"_{sparse_type.__name__}_{axis}",
"sparse_type": sparse_type,
"axis": axis,
}
for sparse_type in (COO, jnp.ndarray)
for axis in (0, 1, -1)
)
)
def test_boolean_mask(self, sparse_type, axis):
shape = (7, 11)
dtype = jnp.float32
k0, k1 = jax.random.split(jax.random.PRNGKey(0), 2)
mat = random_uniform(k0, shape, dtype=dtype, fmt=sparse_type)
mask = jax.random.uniform(k1, (shape[axis],)) > 0.5
expected = ops.to_dense(mat)
if axis == 0:
expected = expected[mask]
else:
expected = expected[:, mask]
actual = ops.to_dense(ops.boolean_mask(mat, mask, axis=axis))
self.assertAllClose(actual, expected)
@parameterized.named_parameters(
jtu.cases_from_list(
{
"testcase_name": f"_{sparse_type.__name__}_{axis}",
"sparse_type": sparse_type,
"axis": axis,
}
for sparse_type in (COO, jnp.ndarray)
for axis in (0, 1, -1)
)
)
def test_gather(self, sparse_type, axis):
shape = (7, 11)
dtype = jnp.float32
k0, k1 = jax.random.split(jax.random.PRNGKey(0), 2)
mat = random_uniform(k0, shape, dtype=dtype, fmt=sparse_type)
mask = jax.random.uniform(k1, (shape[axis],)) > 0.5
(indices,) = jnp.where(mask)
del mask
expected = ops.to_dense(mat)
if axis == 0:
expected = expected[indices]
else:
expected = expected[:, indices]
actual = ops.to_dense(ops.gather(mat, indices, axis=axis))
self.assertAllClose(actual, expected)
@parameterized.named_parameters(
jtu.cases_from_list(
{
"testcase_name": f"_{sparse_type.__name__}_{axis}",
"sparse_type": sparse_type,
"axis": axis,
}
for sparse_type in (COO, CSR, jnp.ndarray)
for axis in (0, 1, -1)
)
)
def test_sum(self, sparse_type, axis):
shape = (7, 11)
dtype = jnp.float32
mat = random_uniform(jax.random.PRNGKey(0), shape, dtype=dtype, fmt=sparse_type)
expected = ops.sum(mat, axis=axis)
actual = ops.to_dense(mat).sum(axis=axis)
self.assertAllClose(actual, expected)
@parameterized.named_parameters(
jtu.cases_from_list(
{
"testcase_name": f"_{sparse_type.__name__}_{axis}",
"sparse_type": sparse_type,
"axis": axis,
}
for sparse_type in (COO, jnp.ndarray)
for axis in (0, 1, -1)
)
)
def test_max(self, sparse_type, axis):
shape = (7, 11)
dtype = jnp.float32
mat = random_uniform(jax.random.PRNGKey(0), shape, dtype=dtype, fmt=sparse_type)
expected = ops.max(mat, axis=axis)
actual = ops.to_dense(mat).max(axis=axis)
self.assertAllClose(actual, expected)
@parameterized.named_parameters(
jtu.cases_from_list(
{
"testcase_name": f"_{sparse_type.__name__}_{axis}_{ord}",
"sparse_type": sparse_type,
"axis": axis,
"ord": ord,
}
for sparse_type in (COO, jnp.ndarray)
for axis in (0, 1, -1)
for ord in (1, 2, jnp.inf)
)
)
def test_norm(
self,
sparse_type,
ord,
axis, # pylint: disable=redefined-builtin
):
shape = (7, 11)
dtype = jnp.float32
mat = random_uniform(jax.random.PRNGKey(0), shape, dtype=dtype, fmt=sparse_type)
mat = ops.map_data(mat, lambda d: d - 0.5) # make sure we have some negatives
expected = ops.norm(mat, ord=ord, axis=axis)
actual = jnp.linalg.norm(ops.to_dense(mat), ord=ord, axis=axis)
self.assertAllClose(actual, expected)
@parameterized.named_parameters(
jtu.cases_from_list(
{
"testcase_name": "_{}_{}".format(
jtu.format_shape_dtype_string((nx, ny, nh), dtype),
sparse_type.__name__,
),
"nx": nx,
"ny": ny,
"nh": nh,
"dtype": dtype,
"sparse_type": sparse_type,
}
for sparse_type in (COO, CSR, jnp.ndarray)
for dtype in (np.float32, np.float64)
for nx in (5,)
for ny in (7,)
for nh in (11,)
)
)
def test_masked_outer_rank2(self, nh, nx, ny, dtype, sparse_type):
keys = jax.random.split(jax.random.PRNGKey(0), 3)
mat = random_uniform(keys[0], (nx, ny), dtype=dtype, fmt=sparse_type)
x = jax.random.uniform(keys[1], (nx, nh), dtype=dtype)
y = jax.random.uniform(keys[2], (ny, nh), dtype=dtype)
actual = ops.to_dense(ops.with_data(mat, ops.masked_outer(mat, x, y)))
xt = x @ y.T
expected = jnp.where(ops.to_dense(mat) == 0, jnp.zeros_like(xt), xt)
self.assertAllClose(actual, expected)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
|
"""
Handle splitting of IPA graphemes into chunks, so that
diacritics go with the non-diacritic characters they modify.
"""
from typing import List, Optional, Tuple, Callable
def split_by_phonetes(some_text: str) -> List[str]:
"""
Splits text in the International Phonetic Alphabet by
phones. This is also called tokenization.
Note: it does not recognize affricates, unless a tie-bar
is provided.
"""
return parse_start(some_text)
def parse_start(some_text: str) -> List[str]:
"""
Start parsing some IPA text, in order to
chunk it into phonemes.
:param some_text: some text in IPA
:return: the same text but split into a list
"""
return split_by_phonetes_prepostdiacrtic(some_text)
def split_by_phonetes_prediacritic(text: str) -> List[str]:
"""
Handle situations where the diacritic character occurs
before the main character.
Handle strings like "ⁿd".
If it doesn't find a main character with a diacritic before
it, it will look for a diacritic after the main character.
:param text: text that may contain text with prediacrtics
:return: a list of IPA characters split
"""
result: Optional[Tuple[str, str]] = prediacritic_parser_function(text)
if result is None:
return split_by_phonetes_postdiacrtic(text)
return [result[0]] + parse_start(result[1])
def split_by_phonetes_prepostdiacrtic(text: str) -> List[str]:
"""
Handle "ⁿdʰ", "ⁿdʷʰ" and other text strings
where a phoneme is represented in IPA by
a segmental preceded and followed by at least
one diacritic
"""
result: Optional[Tuple[str, str]] = prepostdiacritic_parser_function(text)
if result is None:
return split_by_phonetes_prediacritic(text)
(chunk, rest) = result
return [chunk] + parse_start(rest)
def split_by_phonetes_postdiacrtic(text: str) -> List[str]:
"""
Try to split IPA text into a list of IPA text, each element
representing phonemes. Handle "dʰ", etc.
:param text: a string of IPA text not yet split into phonemes
:return: a list of phonemes represented by IPA text
"""
result: Optional[Tuple[str, str]] = postdiacritic_parser_function(text)
if result is None:
return split_by_phonetes_nondiacrtic(text)
(chunk, rest) = result
return [chunk] + parse_start(rest)
def split_by_phonetes_nondiacrtic(text: str) -> List[str]:
"""
Handle "d", "t", etc. and situations where there is no diacritic.
:param text: text containing IPA text
:return: a list of phonemes
"""
result: Optional[Tuple[str, str]] = nondiacritic_parser_function(text)
if result is None:
return [text] # stop parsing!
(chunk, rest) = result
return [chunk] + parse_start(rest)
def nondiacritic_parser_function(text: str) -> Optional[Tuple[str, str]]:
"""
Parse the part containing diacritic (except the tie-bar).
:param text: text containing IPA
:return: a tuple with the part parsed in the frist part, and the
part not yet parsed after.
"""
if len(text) > 0 and is_segmental(text[0]):
if is_tie_bar_at(1, text):
return text[:3], text[3:]
return text[:1], text[1:]
return None
def is_consonant_at(index: int, some_text: str) -> bool:
"""
Whether the character in the string at a certain place,
represents a consonant.
:param index: an index within the range of 0, and the length of the string argument
:param some_text: a text string
:return: true if it is a consonant
"""
return is_such_at(is_consonant, index, some_text)
def is_consonant(a_char: str) -> bool:
"""
Whether a character is one that is used in the
International Phonetic Alphabet to represent a
consonant.
"""
return elem_w(consonants)(a_char)
def is_segmental_at(index: int, some_text: str) -> bool:
"""
Whether a character in some text, at a specific place
within the text is a "segmental" (i.e. not a diacritic or modifier).
"""
return is_such_at(is_segmental, index, some_text)
def is_segmental(a_char: str) -> bool:
"""
Whether a character is one that is used in the
International Phonetic Alphabet to represent something
that is not a diacritic, and can stand on its own.
This means characters that can represent a
consonant or vowel.
"""
return elem_w(strict_segmentals)(a_char)
def is_exponential_after_at(index: int, some_text: str) -> bool:
"""
Whether a character is a diacritic that can go after
the main character.
:param index: a number indicating where the character is in the text
:param some_text: the text that contains the character
:return: true if the character can be a diacritic after the main character
"""
return is_such_at(is_exponential_after, index, some_text)
def is_tie_bar_at(index: int, some_text: str) -> bool:
"""
Whether a character at a certain place in a string,
is the tie-bar diacritic.
:param index: a number telling which character in the string
:param some_text: the string (some text)
:return: true if it is a tie-bar
"""
return is_such_at(is_tie_bar, index, some_text)
def is_such_at(func: Callable[[str], bool], index: int, text: str) -> bool:
"""
Whether a character at a string is of a certain class.
:param func: a function
:param index: a number indicating which character in the text
:param text: a string
:return: whether it is true
"""
return index < len(text) and func(text[index])
def is_exponential_after(a_char: str) -> bool:
"""
Whether a character is a superscript character, that
often goes after a full character to modify the full
character's meaning.
For example in the International Phonetic Alphabet,
a superscript `h` causes the phoneme represented by the
previous character to
be aspirated.
"""
return elem_w(exponentials_after)(a_char)
def is_exponential_before(a_char: str) -> bool:
"""
ًWhether a character is a diacritic that can go
before a main character.
"""
return elem_w(exponentials_before)(a_char)
def is_tie_bar(a_character: str) -> bool:
"""
Whether a character is used to tie two characters in the
international phonetic alphabet together. The tie bar is
usually used to indicate an affricate, or double-articulation.
"""
return a_character in ["͜", "͡"]
def elem_w(string_list: List[str]) -> Callable[[str], bool]:
"""
Create a function that sees whether
a character is equal to (the first character in) an element
in a list of text
"""
return lambda x: x in map(lambda y: y[0], string_list)
def prediacritic_parser_function(text: str) -> Optional[Tuple[str, str]]:
"""
Gets a pre-diacritic exponential with a segmental,
the segmental may have a tie bar.
If it has a tie-bar the character after the tie-bar
is also included. These
are returned in the first part of the tuple.
the text not yet parsed is in the second part
of the tuple.
"""
if (
not len(text) == 0
and is_exponential_before(text[0])
and is_segmental_at(1, text)
):
if is_tie_bar_at(2, text):
# include tie bar and character after it.
return text[:4], text[4:]
return text[:2], text[2:]
return None
def prepostdiacritic_parser_function(text: str) -> Optional[Tuple[str, str]]:
"""
Parse text that contains IPA text, can parse the next phoneme
even if it contains diacritics before and after the main character.
:param text: text to parse
:return: a tuple or nothing. The first part of the tuple is a parsed
phoneme, the second part is the part of the text not parsed yet
"""
preresult: Optional[Tuple[str, str]] = prediacritic_parser_function(text)
if preresult is None:
return None
else:
(prepart, middle) = preresult
if is_exponential_after_at(0, middle):
length_of_first: int = len(prepart)
segmental: str = prepart[(length_of_first - 1) :]
postresult: Optional[Tuple[str, str]] = postdiacritic_parser_function(
segmental + middle
)
if postresult is None:
return None
else:
(postpart, rest) = postresult
return prepart + postpart[1:], rest
else:
return None
def postdiacritic_parser_function(text: str) -> Optional[Tuple[str, str]]:
"""
Parse IPA text that can contain a diacritic after.
:param text: text to attempt to parse
:return: nothing if it was not parsable, otherwise a tuple with what
was parsed first (IPA text representing a phoneme), and the part not
parsed yet after.
"""
if is_segmental_at(0, text) and is_exponential_after_at(1, text):
number_of_postdiacritics: int = count_post_diacritics_in_a_row(text, 1)
chunk_length: int = number_of_postdiacritics + 1
return text[:chunk_length], text[chunk_length:]
elif (
is_segmental_at(0, text)
and is_tie_bar_at(1, text)
and is_exponential_after_at(2, text)
):
number_of_postdiacritics: int = count_post_diacritics_in_a_row(text, 3)
chunk_length: int = number_of_postdiacritics + 3
return text[:chunk_length], text[chunk_length:]
else:
return None
def count_post_diacritics_in_a_row(some_text: str, start_index: int) -> int:
"""
Count how many superscript characters occur one after another, at a
specific place in a text (that could modify a previous character).
"""
if is_exponential_after_at(start_index, some_text):
return 1 + count_post_diacritics_in_a_row(some_text, (start_index + 1))
else:
return 0
def is_exponential(character: str) -> bool:
"""
Whether an IPA character is written above the base line
and to the right of the previous character,
like how exponents of a power are written
in mathematical notation.
"""
return character in exponentials
plosivePulmonic: List[str] = [
"p",
"b",
"t",
"d",
"ʈ",
"ɖ",
"c",
"ɟ",
"k",
"g",
"q",
"ɢ",
"ʔ",
]
nasalPulmonic: List[str] = ["m", "ɱ", "n", "ɳ", "ɲ", "ŋ", "ɴ"]
trillPulmonic: List[str] = ["ʙ", "r", "ʀ"]
tapOrFlapPulmonic: List[str] = ["ⱱ", "ɾ", "ɽ"]
fricativePulmonic: List[str] = [
"ɸ",
"β",
"f",
"v",
"θ",
"ð",
"s",
"z",
"ʃ",
"ʒ",
"ʂ",
"ʐ",
"ç",
"ʝ",
"x",
"ɣ",
"χ",
"ʁ",
"ħ",
"ʕ",
"h",
"ɦ",
]
lateral_fricative_pulmonic: List[str] = ["ɬ", "ɮ"]
approximant_pulmonic: List[str] = ["ʋ", "ɹ", "ɻ", "j", "ɰ"]
lateral_approximant_pulmonic: List[str] = ["l", "ɭ", "ʎ", "ʟ"]
consonants_pulmonic: List[str] = (
plosivePulmonic
+ nasalPulmonic
+ trillPulmonic
+ tapOrFlapPulmonic
+ fricativePulmonic
+ lateral_fricative_pulmonic
+ approximant_pulmonic
+ lateral_approximant_pulmonic
)
consonants_nonpulmonic: List[str] = [
"ʘ",
"ɓ", # Bilabial
"ǀ", # Dental
"ɗ", # Dental/alveolar
"ǃ", # (Post)alveolar
"ʄ",
"ǂ",
"ɠ",
"ǁ",
"ʛ",
]
other_symbols: List[str] = [
"ʍ",
"ɕ",
"w",
"ʑ",
"ɥ",
"ɺ",
"ʜ",
"ɧ",
"ʢ",
"ʡ",
]
consonants: List[str] = consonants_pulmonic + consonants_nonpulmonic + other_symbols
vowels: List[str] = [
"i",
"y",
"ɨ",
"ʉ",
"ɯ",
"u", # Close
"ɪ",
"ʏ",
"ʊ", # Close-mid
"e",
"ø",
"ɘ",
"ɵ",
"ɤ",
"o", # Open-mid
"ə",
"ɛ",
"œ",
"ɜ",
"ɞ",
"ʌ",
"ɔ", # Open-mid
"æ",
"ɐ",
"a",
"ɶ",
"ɑ",
"ɒ", # Open
]
strict_segmentals: List[str] = consonants + vowels
""" IPA text that is not a semantic modifier to what is before or after it.
This includes vowels, and consonants. It excludes all diacritics.
"""
diacritics_and_suprasegmentals: List[str] = [
"̥", # Voiceless
"̊", # Voiceless (diacritic placed above symbol with descender)
"̤", # Breathy voiced
# End of first row.
"̬", # Voiced
"̰", # Creaky voiced
"̺", # Apical
# End of second row.
"ʰ", # Aspirated
"̼", # Linguolabial
"̻", # Laminal
# End of third row.
"̹", # More rounded
"ʷ", # Labialised
"̃", # Nasalised
# End of fourth row.
"̜", # Less rounded
"ʲ", # Palatalised
"ⁿ", # Pre/post nasalised
"̟", # Advanced
"ˠ", # Velarised
"ˡ", # Lateral release
"̠", # Retracted
"ˤ", # Pharyngealised
"̚", # No audible release
"̈", # Centralised
"̽", # Mid centralised
"̝", # Raised
"̩", # Syllabic
"̞", # Lowered
"̯", # Non-syllabic
"̘", # Advanced tongue root
"˞", # Rhoticity
"̙", # Retracted tongue root
"ʼ", # Ejective
"̍", # Syllabic (diacritic placed above)
"̪", # Dental
"̣", # Closer variety/Fricative
"̇", # Palatalization/Centralization
]
exponentials_before: List[str] = ["ⁿ"]
exponentials_after: List[str] = diacritics_and_suprasegmentals + ["ː", "ˑ", "̆"]
exponentials: List[str] = exponentials_before + exponentials_after
# To do: find a more suitable name than exponentials.
# They only look like exponentials if you consider how they
# look similar to mathematical notation for exponentials.
# Really, they should be named something different.
|
def fatorial(n, show=False):
"""
-> Calcula o Fatorial de um número.
:param n: O número a ser calculado.
:param show: (Opcional) Mostrar ou não a conta.
:return: O valor do Fatorialde um número n.
"""
f = 1
for c in range(n, 0, -1):
if show:
print(c, end='')
if c > 1:
print(' x ', end='')
else:
print(' = ', end='')
f *= c
return f
n = int(input('Digite um valor: '))
while True:
r = input('Deseja ver a conta do Fatorial? [S/N]: ').upper()
if r in 'S':
print(fatorial(n, show=True))
break
if r in 'N':
print(fatorial(n))
break
print('ERRO! Favor informar S ou N.')
print('FIM!')
|
# /usr/bin/env python
# -*- coding: utf-8 -*-
"""
CLI for celltrack application. It is usefull mainly for adding ndpi files to common xlsx file
"""
from loguru import logger
import sys
import click
from pathlib import Path
import ast
# print("start")
# from . import image
# print("start 5")
# print("start 6")
from celltrack import celltrack_app
from celltrack import app_tools
from celltrack import celltrack_app
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
# print("Running __main__.py")
# @batch_detect.command(context_settings=CONTEXT_SETTINGS)
# @click.argument("image_stack_dir", type=click.Path(exists=True))
# @click.argument("working_dir", type=click.Path())
# @click.option("--create-icon", is_flag=True,
# help="Create desktop icon"
# )
@click.group(context_settings=CONTEXT_SETTINGS, invoke_without_command=True)
@click.option(
"--log-level",
"-ll",
# type=,
help="Set logging level",
default="INFO",
)
@click.pass_context
def run(ctx, log_level, *args, **kwargs):
logger.debug("CLI run...")
if log_level is not None:
logger.debug(f"changing log level to {log_level}")
# try:
# log_level = int(log_level)
# except ValueError as e:
# log_level = log_level.upper()
# logger.remove()
# i = logger.add(sys.stderr, level=log_level, colorize=True)
# logger.debug("log level changed")
if ctx.invoked_subcommand is None:
# click.echo('I was invoked without subcommand')
logger.debug("invoke subcommand gui")
ctx.invoke(gui, *args, **kwargs)
# a.main()
else:
logger.debug(f"invoke subcommand {ctx.invoked_subcommand} {args} {kwargs}")
# Invoked automatically
@run.command(context_settings=CONTEXT_SETTINGS, help="Set persistent values")
@click.option(
"--common-spreadsheet-file",
help="Set path for common spreadsheet file.",
type=click.Path(),
)
def set(common_spreadsheet_file=None):
mainapp = celltrack_app.CellTrack()
if common_spreadsheet_file is not None:
mainapp.set_common_spreadsheet_file(path=common_spreadsheet_file)
logger.info(f"Common spreadsheet file path is : {common_spreadsheet_file}")
print(f"Common spreadsheet file path is : {common_spreadsheet_file}")
# def print_params(params):
# algorithm.Scaffan().parameters.
# params.
@run.command(context_settings=CONTEXT_SETTINGS)
@click.option(
"--params",
"-p",
multiple=True,
default="",
nargs=2,
help='Set parameter. First argument is path to parameter separated by ";". Second is the value.'
"python -m celltrack gui -p Processing,Show True",
)
@click.option("--print-params", "-pp", is_flag=True, help="Print parameters")
def gui(params, print_params):
mainapp = celltrack_app.CellTrack()
app_tools.set_parameters_by_path(mainapp.parameters, params)
if print_params:
import pprint
pprint.pprint(app_tools.params_and_values(mainapp.parameters))
exit()
# for param in params:
# mainapp.set_parameter(param[0], value=ast.literal_eval(param[1]))
# mainapp.parameters.param(*param[0].split(";")).setValue(ast.literal_eval(param[1]))
mainapp.start_gui()
@run.command(
context_settings=CONTEXT_SETTINGS, help="Create an icon on Windows platform"
)
def install():
import platform
print(platform.system)
if platform.system() == "Windows":
logger.info("Creating icon")
logger.warning("TODO")
from .app_tools import create_icon
import pathlib
logo_fn2 = pathlib.Path(__file__).parent / pathlib.Path("celltrack_icon512.ico")
create_icon(
"CellTrack", logo_fn2, conda_env_name="celltrack", package_name="celltrack"
)
@run.command(context_settings=CONTEXT_SETTINGS)
@click.option(
"--input-path",
"-i",
type=click.Path(exists=True),
help="Path to input directory with video files.",
default=None,
)
@click.option(
"--common-xlsx",
"-o",
type=click.Path(),
help="Path to common xlsx file.",
default=None,
)
@click.option(
"--params",
"-p",
multiple=True,
default="",
nargs=2,
help='Set parameter. First argument is path to parameter separated by ";". Second is the value.'
"python -m celltrack gui -p Processing,Show True",
)
@click.option("--print-params", "-pp", is_flag=True, help="Print parameters")
def nogui(input_path, common_xlsx, params, print_params):
print("nogui")
logger.debug(
f"input path={input_path}, output_path={common_xlsx}, params={params}"
)
mainapp = celltrack_app.CellTrack()
logger.debug(f"Celltrack created")
app_tools.set_parameters_by_path(mainapp.parameters, params)
if print_params:
import pprint
pprint.pprint(app_tools.params_and_values(mainapp.parameters))
exit()
# for param in params:
# logger.debug(f"param={param}")
# mainapp.parameters.param(*param[0].split(";")).setValue(ast.literal_eval(param[1]))
logger.debug(f"common xlsx: {common_xlsx}")
if common_xlsx is not None:
mainapp.set_common_spreadsheet_file(common_xlsx)
# logger.debug(f"common xlsx: {mainapp.report.com}")
logger.debug(f"before input file: {input_path}")
if input_path is not None:
logger.debug(f"Setting new input file from CLI: {input_path}")
mainapp.set_input_file(input_path)
mainapp.run()
# def install():
@run.command(
context_settings=CONTEXT_SETTINGS, help="Create an icon on Windows platform"
)
def install():
import platform
print(platform.system)
if platform.system() == "Windows":
from .app_tools import create_icon
import pathlib
logo_fn2 = pathlib.Path(__file__).parent / pathlib.Path("celltrack_icon512.ico")
create_icon(
"CellTrack", logo_fn2, conda_env_name="celltrack", package_name="celltrack"
)
|
# -*- coding: utf-8 -*-
"""
jishaku.inspections test
~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2021 Devon (Gorialis) R
:license: MIT, see LICENSE for more details.
"""
import collections # for __iadd__ test
import nextcord
import pytest
from utils import run_async
from jishaku.repl.inspections import all_inspections
@pytest.mark.parametrize(
"target",
[
4,
nextcord.Client, # cover type subclasses
tuple, # cover many-subclass truncation
[False, 1, "2", 3.0], # cover content types
collections.Counter, # cover inplace operators
run_async # cover current-working-directory inspections
]
)
def test_object_inspection(target):
for _, _ in all_inspections(target):
pass
|
#!/usr/bin/env python3
import torch
from ..._utils.approximation_methods import approximation_parameters
from ..._utils.attribution import LayerAttribution, GradientAttribution
from ..._utils.batching import _batched_operator
from ..._utils.common import (
_reshape_and_sum,
_format_input_baseline,
_validate_input,
_format_additional_forward_args,
_format_attributions,
_expand_additional_forward_args,
_expand_target,
)
from ..._utils.gradient import compute_layer_gradients_and_eval
class InternalInfluence(LayerAttribution, GradientAttribution):
def __init__(self, forward_func, layer, device_ids=None):
r"""
Args:
forward_func (callable): The forward function of the model or any
modification of it
layer (torch.nn.Module): Layer for which attributions are computed.
Output size of attribute matches this layer's input or
output dimensions, depending on whether we attribute to
the inputs or outputs of the layer, corresponding to
attribution of each neuron in the input or output of
this layer.
device_ids (list(int)): Device ID list, necessary only if forward_func
applies a DataParallel model. This allows reconstruction of
intermediate outputs from batched results across devices.
If forward_func is given as the DataParallel model itself,
then it is not necessary to provide this argument.
"""
LayerAttribution.__init__(self, forward_func, layer, device_ids)
GradientAttribution.__init__(self, forward_func)
def attribute(
self,
inputs,
baselines=None,
target=None,
additional_forward_args=None,
n_steps=50,
method="gausslegendre",
internal_batch_size=None,
attribute_to_layer_input=False,
):
r"""
Computes internal influence by approximating the integral of gradients
for a particular layer along the path from a baseline input to the
given input.
If no baseline is provided, the default baseline is the zero tensor.
More details on this approach can be found here:
https://arxiv.org/pdf/1802.03788.pdf
Note that this method is similar to applying integrated gradients and
taking the layer as input, integrating the gradient of the layer with
respect to the output.
Args:
inputs (tensor or tuple of tensors): Input for which internal
influence is computed. If forward_func takes a single
tensor as input, a single input tensor should be provided.
If forward_func takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples, and if multiple input tensors
are provided, the examples must be aligned appropriately.
baselines scalar, tensor, tuple of scalars or tensors, optional):
Baselines define a starting point from which integral
is computed and can be provided as:
- a single tensor, if inputs is a single tensor, with
exactly the same dimensions as inputs or the first
dimension is one and the remaining dimensions match
with inputs.
- a single scalar, if inputs is a single tensor, which will
be broadcasted for each input value in input tensor.
- a tuple of tensors or scalars, the baseline corresponding
to each tensor in the inputs' tuple can be:
- either a tensor with matching dimensions to
corresponding tensor in the inputs' tuple
or the first dimension is one and the remaining
dimensions match with the corresponding
input tensor.
- or a scalar, corresponding to a tensor in the
inputs' tuple. This scalar value is broadcasted
for corresponding input tensor.
In the cases when `baselines` is not provided, we internally
use zero scalar corresponding to each input tensor.
Default: None
target (int, tuple, tensor or list, optional): Output indices for
which gradients are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (tuple, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
For a tensor, the first dimension of the tensor must
correspond to the number of examples. It will be
repeated for each of `n_steps` along the integrated
path. For all other types, the given argument is used
for all forward evaluations.
Note that attributions are not computed with respect
to these arguments.
Default: None
n_steps (int, optional): The number of steps used by the approximation
method. Default: 50.
method (string, optional): Method for approximating the integral,
one of `riemann_right`, `riemann_left`, `riemann_middle`,
`riemann_trapezoid` or `gausslegendre`.
Default: `gausslegendre` if no method is provided.
internal_batch_size (int, optional): Divides total #steps * #examples
data points into chunks of size internal_batch_size,
which are computed (forward / backward passes)
sequentially.
For DataParallel models, each batch is split among the
available devices, so evaluations on each available
device contain internal_batch_size / num_devices examples.
If internal_batch_size is None, then all evaluations
are processed in one batch.
Default: None
attribute_to_layer_input (bool, optional): Indicates whether to
compute the attribution with respect to the layer input
or output. If `attribute_to_layer_input` is set to True
then the attributions will be computed with respect to
layer inputs, otherwise it will be computed with respect
to layer outputs.
Note that currently it is assumed that either the input
or the output of internal layer, depending on whether we
attribute to the input or output, is a single tensor.
Support for multiple tensors will be added later.
Default: False
Returns:
*tensor* or tuple of *tensors* of **attributions**:
- **attributions** (*tensor* or tuple of *tensors*):
Internal influence of each neuron in given
layer output. Attributions will always be the same size
as the output or input of the given layer depending on
whether `attribute_to_layer_input` is set to `False` or
`True`respectively.
Attributions are returned in a tuple based on whether
the layer inputs / outputs are contained in a tuple
from a forward hook. For standard modules, inputs of
a single tensor are usually wrapped in a tuple, while
outputs of a single tensor are not.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> # It contains an attribute conv1, which is an instance of nn.conv2d,
>>> # and the output of this layer has dimensions Nx12x32x32.
>>> net = ImageClassifier()
>>> layer_int_inf = InternalInfluence(net, net.conv1)
>>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
>>> # Computes layer internal influence.
>>> # attribution size matches layer output, Nx12x32x32
>>> attribution = layer_int_inf.attribute(input)
"""
inputs, baselines = _format_input_baseline(inputs, baselines)
_validate_input(inputs, baselines, n_steps, method)
# Retrieve step size and scaling factor for specified approximation method
step_sizes_func, alphas_func = approximation_parameters(method)
step_sizes, alphas = step_sizes_func(n_steps), alphas_func(n_steps)
# Compute scaled inputs from baseline to final input.
scaled_features_tpl = tuple(
torch.cat(
[baseline + alpha * (input - baseline) for alpha in alphas], dim=0
).requires_grad_()
for input, baseline in zip(inputs, baselines)
)
additional_forward_args = _format_additional_forward_args(
additional_forward_args
)
# apply number of steps to additional forward args
# currently, number of steps is applied only to additional forward arguments
# that are nd-tensors. It is assumed that the first dimension is
# the number of batches.
# dim -> (bsz * #steps x additional_forward_args[0].shape[1:], ...)
input_additional_args = (
_expand_additional_forward_args(additional_forward_args, n_steps)
if additional_forward_args is not None
else None
)
expanded_target = _expand_target(target, n_steps)
# Returns gradient of output with respect to hidden layer.
layer_gradients, _, is_layer_tuple = _batched_operator(
compute_layer_gradients_and_eval,
scaled_features_tpl,
input_additional_args,
internal_batch_size=internal_batch_size,
forward_fn=self.forward_func,
layer=self.layer,
target_ind=expanded_target,
device_ids=self.device_ids,
attribute_to_layer_input=attribute_to_layer_input,
)
# flattening grads so that we can multiply it with step-size
# calling contiguous to avoid `memory whole` problems
scaled_grads = tuple(
layer_grad.contiguous().view(n_steps, -1)
* torch.tensor(step_sizes).view(n_steps, 1).to(layer_grad.device)
for layer_grad in layer_gradients
)
# aggregates across all steps for each tensor in the input tuple
attrs = tuple(
_reshape_and_sum(
scaled_grad, n_steps, inputs[0].shape[0], layer_grad.shape[1:]
)
for scaled_grad, layer_grad in zip(scaled_grads, layer_gradients)
)
return _format_attributions(is_layer_tuple, attrs)
|
'''
2-D Convolutional Neural Networks using TensorFlow library for Kaggle competition.
Target competition on Kaggle: https://www.kaggle.com/c/digit-recognizer
Author: Taegyun Jeon
Project: https://github.com/tgjeon/cnnForMnist
Train instances: 42000 number images with vector format (1 number = 1 x 748)
Test instances: 20000 number images with vector format (1 number = 1 x 748)
'''
import numpy as np
import pandas as pd
import tensorflow as tf
# Parameters
LEARNING_RATE = 0.001
TRAINING_EPOCHS = 3000
BATCH_SIZE = 100
DISPLAY_STEP = 10
DROPOUT_CONV = 0.8
DROPOUT_HIDDEN = 0.6
VALIDATION_SIZE = 2000 # Set to 0 to train on all available data
# Weight initialization
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
# Weight initialization (Xavier's init)
def weight_xavier_init(n_inputs, n_outputs, uniform=True):
if uniform:
init_range = tf.sqrt(6.0 / (n_inputs + n_outputs))
return tf.random_uniform_initializer(-init_range, init_range)
else:
stddev = tf.sqrt(3.0 / (n_inputs + n_outputs))
return tf.truncated_normal_initializer(stddev=stddev)
# Bias initialization
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
# 2D convolution
def conv2d(X, W):
return tf.nn.conv2d(X, W, strides=[1, 1, 1, 1], padding='SAME')
# Max Pooling
def max_pool_2x2(X):
return tf.nn.max_pool(X, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# Serve data by batches
def next_batch(batch_size):
global train_images
global train_labels
global index_in_epoch
global epochs_completed
start = index_in_epoch
index_in_epoch += batch_size
# when all trainig data have been already used, it is reorder randomly
if index_in_epoch > num_examples:
# finished epoch
epochs_completed += 1
# shuffle the data
perm = np.arange(num_examples)
np.random.shuffle(perm)
train_images = train_images[perm]
train_labels = train_labels[perm]
# start next epoch
start = 0
index_in_epoch = batch_size
assert batch_size <= num_examples
end = index_in_epoch
return train_images[start:end], train_labels[start:end]
# Convert class labels from scalars to one-hot vectors
# 0 => [1 0 0 0 0 0 0 0 0 0]
# 1 => [0 1 0 0 0 0 0 0 0 0]
def dense_to_one_hot(labels_dense, num_classes):
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
'''
Preprocessing for MNIST dataset
'''
# Read MNIST data set (Train data from CSV file)
data = pd.read_csv('./input/train.csv')
# Extracting images and labels from given data
# For images
images = data.iloc[:,1:].values
images = images.astype(np.float)
# Normalize from [0:255] => [0.0:1.0]
images = np.multiply(images, 1.0 / 255.0)
image_size = images.shape[1]
image_width = image_height = np.ceil(np.sqrt(image_size)).astype(np.uint8)
# For labels
labels_flat = data[[0]].values.ravel()
labels_count = np.unique(labels_flat).shape[0]
labels = dense_to_one_hot(labels_flat, labels_count)
labels = labels.astype(np.uint8)
# Split data into training & validation
validation_images = images[:VALIDATION_SIZE]
validation_labels = labels[:VALIDATION_SIZE]
train_images = images[VALIDATION_SIZE:]
train_labels = labels[VALIDATION_SIZE:]
'''
Create model with 2D CNN
'''
# Create Input and Output
X = tf.placeholder('float', shape=[None, image_size]) # mnist data image of shape 28*28=784
Y_gt = tf.placeholder('float', shape=[None, labels_count]) # 0-9 digits recognition => 10 classes
drop_conv = tf.placeholder('float')
drop_hidden = tf.placeholder('float')
# Model Parameters
W1 = tf.get_variable("W1", shape=[5, 5, 1, 32], initializer=weight_xavier_init(5*5*1, 32))
W2 = tf.get_variable("W2", shape=[5, 5, 32, 64], initializer=weight_xavier_init(5*5*32, 64))
W3_FC1 = tf.get_variable("W3_FC1", shape=[64*7*7, 1024], initializer=weight_xavier_init(64*7*7, 1024))
W4_FC2 = tf.get_variable("W4_FC2", shape=[1024, labels_count], initializer=weight_xavier_init(1024, labels_count))
#W1 = weight_variable([5, 5, 1, 32]) # 5x5x1 conv, 32 outputs
#W2 = weight_variable([5, 5, 32, 64]) # 5x5x32 conv, 64 outputs
#W3_FC1 = weight_variable([64 * 7 * 7, 1024]) # FC: 64x7x7 inputs, 1024 outputs
#W4_FC2 = weight_variable([1024, labels_count]) # FC: 1024 inputs, 10 outputs (labels)
B1 = bias_variable([32])
B2 = bias_variable([64])
B3_FC1 = bias_variable([1024])
B4_FC2 = bias_variable([labels_count])
# CNN model
X1 = tf.reshape(X, [-1,image_width , image_height,1]) # shape=(?, 28, 28, 1)
# Layer 1
l1_conv = tf.nn.relu(conv2d(X1, W1) + B1) # shape=(?, 28, 28, 32)
l1_pool = max_pool_2x2(l1_conv) # shape=(?, 14, 14, 32)
l1_drop = tf.nn.dropout(l1_pool, drop_conv)
# Layer 2
l2_conv = tf.nn.relu(conv2d(l1_drop, W2)+ B2) # shape=(?, 14, 14, 64)
l2_pool = max_pool_2x2(l2_conv) # shape=(?, 7, 7, 64)
l2_drop = tf.nn.dropout(l2_pool, drop_conv)
# Layer 3 - FC1
l3_flat = tf.reshape(l2_drop, [-1, W3_FC1.get_shape().as_list()[0]]) # shape=(?, 1024)
l3_feed = tf.nn.relu(tf.matmul(l3_flat, W3_FC1)+ B3_FC1)
l3_drop = tf.nn.dropout(l3_feed, drop_hidden)
# Layer 4 - FC2
Y_pred = tf.nn.softmax(tf.matmul(l3_drop, W4_FC2)+ B4_FC2) # shape=(?, 10)
# Cost function and training
cost = -tf.reduce_sum(Y_gt*tf.log(Y_pred))
regularizer = (tf.nn.l2_loss(W3_FC1) + tf.nn.l2_loss(B3_FC1) + tf.nn.l2_loss(W4_FC2) + tf.nn.l2_loss(B4_FC2))
cost += 5e-4 * regularizer
#train_op = tf.train.AdamOptimizer(LEARNING_RATE).minimize(cost)
train_op = tf.train.RMSPropOptimizer(LEARNING_RATE, 0.9).minimize(cost)
correct_predict = tf.equal(tf.argmax(Y_pred, 1), tf.argmax(Y_gt, 1))
accuracy = tf.reduce_mean(tf.cast(correct_predict, 'float'))
predict = tf.argmax(Y_pred, 1)
'''
TensorFlow Session
'''
epochs_completed = 0
index_in_epoch = 0
num_examples = train_images.shape[0]
# start TensorFlow session
init = tf.initialize_all_variables()
sess = tf.InteractiveSession()
sess.run(init)
# visualisation variables
train_accuracies = []
validation_accuracies = []
DISPLAY_STEP=1
for i in range(TRAINING_EPOCHS):
#get new batch
batch_xs, batch_ys = next_batch(BATCH_SIZE)
# check progress on every 1st,2nd,...,10th,20th,...,100th... step
if i%DISPLAY_STEP == 0 or (i+1) == TRAINING_EPOCHS:
train_accuracy = accuracy.eval(feed_dict={X:batch_xs,
Y_gt: batch_ys,
drop_conv: DROPOUT_CONV,
drop_hidden: DROPOUT_HIDDEN})
if(VALIDATION_SIZE):
validation_accuracy = accuracy.eval(feed_dict={ X: validation_images[0:BATCH_SIZE],
Y_gt: validation_labels[0:BATCH_SIZE],
drop_conv: DROPOUT_CONV, drop_hidden: DROPOUT_HIDDEN})
print('training_accuracy / validation_accuracy => %.2f / %.2f for step %d'%(train_accuracy, validation_accuracy, i))
validation_accuracies.append(validation_accuracy)
else:
print('training_accuracy => %.4f for step %d'%(train_accuracy, i))
train_accuracies.append(train_accuracy)
# increase DISPLAY_STEP
if i%(DISPLAY_STEP*10) == 0 and i:
DISPLAY_STEP *= 10
# train on batch
sess.run(train_op, feed_dict={X: batch_xs, Y_gt: batch_ys, drop_conv: DROPOUT_CONV, drop_hidden: DROPOUT_HIDDEN})
# check final accuracy on validation set
if(VALIDATION_SIZE):
validation_accuracy = accuracy.eval(feed_dict={X: validation_images,
Y_gt: validation_labels,
drop_conv: DROPOUT_CONV, drop_hidden: DROPOUT_HIDDEN})
print('validation_accuracy => %.4f'%validation_accuracy)
# read test data from CSV file
test_images = pd.read_csv('./input/test.csv').values
test_images = test_images.astype(np.float)
# convert from [0:255] => [0.0:1.0]
test_images = np.multiply(test_images, 1.0 / 255.0)
print('test_images({0[0]},{0[1]})'.format(test_images.shape))
# predict test set
#predicted_lables = predict.eval(feed_dict={X: test_images, keep_prob: 1.0})
# using batches is more resource efficient
predicted_lables = np.zeros(test_images.shape[0])
for i in range(0,test_images.shape[0]//BATCH_SIZE):
predicted_lables[i*BATCH_SIZE : (i+1)*BATCH_SIZE] = predict.eval(feed_dict={X: test_images[i*BATCH_SIZE : (i+1)*BATCH_SIZE], drop_conv: 1.0, drop_hidden: 1.0})
# save results
np.savetxt('submission.csv',
np.c_[range(1,len(test_images)+1),predicted_lables],
delimiter=',',
header = 'ImageId,Label',
comments = '',
fmt='%d')
sess.close()
|
import glob
import os
import secrets
from django.contrib.auth import get_user_model
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.management.base import (
BaseCommand,
CommandError,
)
from gifz_api.gifs.models import GIFEntry
class Command(BaseCommand):
help = 'Populate gifs database from directory'
def add_arguments(self, parser):
parser.add_argument('dir_path', nargs='+', type=str)
def handle(self, *args, **options):
if GIFEntry.objects.count() > 1000:
self.stdout.write('You already have 1000 gifs in db.')
return
dir_path = options['dir_path'][0]
if os.path.exists(dir_path):
for filename in glob.glob(os.path.join(dir_path, '*.gif')):
with open(filename, 'rb') as f:
f.seek(0)
inmemory_file = SimpleUploadedFile(filename, f.read())
gif_entry = GIFEntry(
title=secrets.token_hex(nbytes=16),
author=get_user_model().objects.get(username='admin'),
gif_file=inmemory_file,
)
gif_entry.save()
gif_entry.tags.add('funny', 'animals', 'dogs')
inmemory_file.close()
self.stdout.write('Gifs created.')
else:
raise CommandError('Directory not exists.')
|
# Copyright 2021 Mobvoi Inc. All Rights Reserved.
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modified from wenet(https://github.com/wenet-e2e/wenet)
from pathlib import Path
from typing import List
import numpy as np
import paddle
from paddlespeech.s2t.utils import text_grid
from paddlespeech.s2t.utils import utility
from paddlespeech.s2t.utils.log import Log
logger = Log(__name__).getlog()
__all__ = ["forced_align", "remove_duplicates_and_blank", "insert_blank"]
def remove_duplicates_and_blank(hyp: List[int], blank_id=0) -> List[int]:
"""ctc alignment to ctc label ids.
"abaa-acee-" -> "abaace"
Args:
hyp (List[int]): hypotheses ids, (L)
blank_id (int, optional): blank id. Defaults to 0.
Returns:
List[int]: remove dupicate ids, then remove blank id.
"""
new_hyp: List[int] = []
cur = 0
while cur < len(hyp):
# add non-blank into new_hyp
if hyp[cur] != blank_id:
new_hyp.append(hyp[cur])
# skip repeat label
prev = cur
while cur < len(hyp) and hyp[cur] == hyp[prev]:
cur += 1
return new_hyp
def insert_blank(label: np.ndarray, blank_id: int=0) -> np.ndarray:
"""Insert blank token between every two label token.
"abcdefg" -> "-a-b-c-d-e-f-g-"
Args:
label ([np.ndarray]): label ids, List[int], (L).
blank_id (int, optional): blank id. Defaults to 0.
Returns:
[np.ndarray]: (2L+1).
"""
label = np.expand_dims(label, 1) #[L, 1]
blanks = np.zeros((label.shape[0], 1), dtype=np.int64) + blank_id
label = np.concatenate([blanks, label], axis=1) #[L, 2]
label = label.reshape(-1) #[2L], -l-l-l
label = np.append(label, label[0]) #[2L + 1], -l-l-l-
return label
def forced_align(ctc_probs: paddle.Tensor, y: paddle.Tensor,
blank_id=0) -> List[int]:
"""ctc forced alignment.
https://distill.pub/2017/ctc/
Args:
ctc_probs (paddle.Tensor): hidden state sequence, 2d tensor (T, D)
y (paddle.Tensor): label id sequence tensor, 1d tensor (L)
blank_id (int): blank symbol index
Returns:
List[int]: best alignment result, (T).
"""
y_insert_blank = insert_blank(y, blank_id) #(2L+1)
log_alpha = paddle.zeros(
(ctc_probs.shape[0], len(y_insert_blank))) #(T, 2L+1)
log_alpha = log_alpha - float('inf') # log of zero
# TODO(Hui Zhang): zeros not support paddle.int16
# self.__setitem_varbase__(item, value) When assign a value to a paddle.Tensor, the data type of the paddle.Tensor not support int16
state_path = (paddle.zeros(
(ctc_probs.shape[0], len(y_insert_blank)), dtype=paddle.int32) - 1
) # state path, Tuple((T, 2L+1))
# init start state
# TODO(Hui Zhang): VarBase.__getitem__() not support np.int64
log_alpha[0, 0] = ctc_probs[0][int(y_insert_blank[0])] # State-b, Sb
log_alpha[0, 1] = ctc_probs[0][int(y_insert_blank[1])] # State-nb, Snb
for t in range(1, ctc_probs.shape[0]): # T
for s in range(len(y_insert_blank)): # 2L+1
if y_insert_blank[s] == blank_id or s < 2 or y_insert_blank[
s] == y_insert_blank[s - 2]:
candidates = paddle.to_tensor(
[log_alpha[t - 1, s], log_alpha[t - 1, s - 1]])
prev_state = [s, s - 1]
else:
candidates = paddle.to_tensor([
log_alpha[t - 1, s],
log_alpha[t - 1, s - 1],
log_alpha[t - 1, s - 2],
])
prev_state = [s, s - 1, s - 2]
# TODO(Hui Zhang): VarBase.__getitem__() not support np.int64
log_alpha[t, s] = paddle.max(candidates) + ctc_probs[t][int(
y_insert_blank[s])]
state_path[t, s] = prev_state[paddle.argmax(candidates)]
# TODO(Hui Zhang): zeros not support paddle.int16
# self.__setitem_varbase__(item, value) When assign a value to a paddle.Tensor, the data type of the paddle.Tensor not support int16
state_seq = -1 * paddle.ones((ctc_probs.shape[0], 1), dtype=paddle.int32)
candidates = paddle.to_tensor([
log_alpha[-1, len(y_insert_blank) - 1], # Sb
log_alpha[-1, len(y_insert_blank) - 2] # Snb
])
prev_state = [len(y_insert_blank) - 1, len(y_insert_blank) - 2]
state_seq[-1] = prev_state[paddle.argmax(candidates)]
for t in range(ctc_probs.shape[0] - 2, -1, -1):
state_seq[t] = state_path[t + 1, state_seq[t + 1, 0]]
output_alignment = []
for t in range(0, ctc_probs.shape[0]):
output_alignment.append(y_insert_blank[state_seq[t, 0]])
return output_alignment
def ctc_align(config, model, dataloader, batch_size, stride_ms, token_dict,
result_file):
"""ctc alignment.
Args:
config (cfgNode): config
model (nn.Layer): U2 Model.
dataloader (io.DataLoader): dataloader.
batch_size (int): decoding batchsize.
stride_ms (int): audio feature stride in ms unit.
token_dict (List[str]): vocab list, e.g. ['blank', 'unk', 'a', 'b', '<eos>'].
result_file (str): alignment output file, e.g. /path/to/xxx.align.
"""
if batch_size > 1:
logger.fatal('alignment mode must be running with batch_size == 1')
sys.exit(1)
assert result_file and result_file.endswith('.align')
model.eval()
# conv subsampling rate
subsample = utility.get_subsample(config)
logger.info(f"Align Total Examples: {len(dataloader.dataset)}")
with open(result_file, 'w') as fout:
# one example in batch
for i, batch in enumerate(dataloader):
key, feat, feats_length, target, target_length = batch
# 1. Encoder
encoder_out, encoder_mask = model._forward_encoder(
feat, feats_length) # (B, maxlen, encoder_dim)
maxlen = encoder_out.shape[1]
ctc_probs = model.ctc.log_softmax(
encoder_out) # (1, maxlen, vocab_size)
# 2. alignment
ctc_probs = ctc_probs.squeeze(0)
target = target.squeeze(0)
alignment = forced_align(ctc_probs, target)
logger.info(f"align ids: {key[0]} {alignment}")
fout.write('{} {}\n'.format(key[0], alignment))
# 3. gen praat
# segment alignment
align_segs = text_grid.segment_alignment(alignment)
logger.info(f"align tokens: {key[0]}, {align_segs}")
# IntervalTier, List["start end token\n"]
tierformat = text_grid.align_to_tierformat(align_segs, subsample,
token_dict)
# write tier
align_output_path = Path(result_file).parent / "align"
align_output_path.mkdir(parents=True, exist_ok=True)
tier_path = align_output_path / (key[0] + ".tier")
with tier_path.open('w') as f:
f.writelines(tierformat)
# write textgrid
textgrid_path = align_output_path / (key[0] + ".TextGrid")
second_per_frame = 1. / (1000. /
stride_ms) # 25ms window, 10ms stride
second_per_example = (
len(alignment) + 1) * subsample * second_per_frame
text_grid.generate_textgrid(
maxtime=second_per_example,
intervals=tierformat,
output=str(textgrid_path))
|
# Parsing
from .tiling_parser import *
# Global Placement
from .diffusion_placer import *
from .simulated_annealing_placer import *
from .complete_bipartite_placer import *
|
frase = 'Curso em Video Phyton'
print(frase.capitalize())
|
#!/usr/bin/python3
'''
############################################################################
->Autores:
-Montiel Martinez Luis Javier
-Rodríguez Dávalos Carolina
->Fecha de creación: 01/11/2020
->Descripción: Análisis y resolución del problema 'Los alumnos y el asesor'
############################################################################
'''
import threading
import random
import time
def alumnos(id):
#Indica el número de preguntas que hará el alumno, puede ser de 1 a 10 preguntas
num_preguntas = random.randint(1, 10)
while num_preguntas > 0:
#El alumno intenta conseguir una silla
sillas.acquire()
print('---->El alumno %d consiguió silla' %id)
#El arreglo se usa para saber que alumnos ocupan sillas
alumnos_en_silla.append(id)
mutex_primer_alumno.acquire()
#En caso de ser el primer alumno en conseguir silla, despierta al profe
if len(alumnos_en_silla) == 1:
profe_dormido.release()
mutex_primer_alumno.release()
#Reduce el número de preguntas restantes del alumno
num_preguntas= num_preguntas - 1
print('-------->El alumno %d ya NO tiene dudas, ya se va' %id)
def profe():
while True:
#En caso de que haya alumnos ocupando sillas, resuelve duda
if len(alumnos_en_silla) > 0:
print('------>Resolviendo duda...')
time.sleep(random.random())
alumno_id = alumnos_en_silla.pop()
print('->Duda resuelta del alumno %d' %alumno_id)
sillas.release()
print('El alumno %d dejo la silla' %alumno_id)
#En caso contrario, se va a dormir
else:
print('->Profesor descansando')
profe_dormido.acquire()
print('->Profesor despierto')
#Máximo de alumnos
num_alumno = 10
#Máximo de sillas en el cubículo
num_sillas = 2
#Lista de alumnos sentados
alumnos_en_silla = []
#Creando semáforos
sillas = threading.Semaphore(num_sillas)
profe_dormido = threading.Semaphore(0)
mutex_primer_alumno = threading.Semaphore(1)
#Creando hilos
threading.Thread(target=profe).start()
for alumno_id in range(num_alumno):
threading.Thread(target=alumnos,args=[alumno_id]).start()
|
import sys
import numpy as np
import tensorflow as tf
from cfgs import *
from data import *
from baseline.baseline import *
print("Setting: %s"%(sys.argv[1]))
setting = sys.argv[1]
if setting == "additive_1x2_uniform":
cfg = additive_1x2_uniform_config.cfg
Generator = uniform_01_generator.Generator
elif setting == "additive_1x2_uniform_416_47":
cfg = additive_1x2_uniform_416_47_config.cfg
Generator = uniform_416_47_generator.Generator
elif setting == "additive_1x2_uniform_04_03":
cfg = additive_1x2_uniform_04_03_config.cfg
Generator = uniform_04_03_generator.Generator
elif setting == "additive_1x2_uniform_triangle":
cfg = additive_1x2_uniform_triangle_config.cfg
Generator = uniform_triangle_01_generator.Generator
elif setting == "additive_1x10_uniform":
cfg = additive_1x10_uniform_config.cfg
Generator = uniform_01_generator.Generator
elif setting == "unit_1x2_uniform":
cfg = unit_1x2_uniform_config.cfg
Generator = uniform_01_generator.Generator
elif setting == "unit_1x2_uniform_23":
cfg = unit_1x2_uniform_23_config.cfg
Generator = uniform_23_generator.Generator
else:
print("None selected")
sys.exit(0)
np.random.seed(cfg.test.seed)
generator = Generator(cfg, 'test')
data = np.array([ next(generator.gen_func) for _ in range(cfg.test.num_batches)])
data = data.reshape(-1, cfg.num_items)
print(OptRevOneBidder(cfg, data).opt_rev())
|
# -*- coding: utf-8 -*-
import scrapy
import re
import json
from locations.items import GeojsonPointItem
class BannerHealthSpider(scrapy.Spider):
name = "bannerhealth"
item_attributes = {'brand': "Banner Health"}
allowed_domains = ['bannerhealth.com']
start_urls = (
'https://www.bannerhealth.com/locations?PageNo=ALL',
)
def parse(self, response):
urls = response.xpath('//div[@class="location-link"][2]/a/@href').extract()
for url in urls:
yield scrapy.Request(response.urljoin(url), callback=self.parse_location)
def parse_location(self, response):
try:
locs = response.xpath('//div[@class="text-card-location-image-content"]/p[1]/text()[2]').extract_first()
city, state_postalcode = locs.split(',')
state_postalcode = state_postalcode.strip()
jsondata = json.loads(response.xpath('//div[@data-js="map_canvas-v2"]/@data-map-config').extract_first())
data = jsondata["markerList"]
name = re.search(r'.+/(.+)', response.xpath('//meta[@property="og:url"]/@content').extract_first()).group(1)
ref = re.search(r'.+/(.+)', response.url).group(1)
if " " in state_postalcode:
state, postcode = state_postalcode.split(" ")
state = state.strip()
postcode = postcode.strip()
else:
state = state_postalcode
postcode = None
for locations in data:
location = json.dumps(locations)
location_data = json.loads(location)
properties = {
'ref': ref,
'name': name,
'addr_full': response.xpath('//div[@class="text-card-location-image-content"]/p[1]/text()').extract_first(),
'city': city,
'state': state,
'postcode': postcode,
'phone': response.xpath('//li[@class="text-card-location-image-content-action-list-item"][1]/a/text()').extract_first().strip(),
'lat': float(location_data["Latitude"]),
'lon': float(location_data["Longitude"]),
'website': response.url
}
yield GeojsonPointItem(**properties)
except:
pass |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2016-09-09 19:01:07
# @Author : kkopite ([email protected])
# @Link : kkopitehong.info
# @Version : 1.0
__author__ = 'kkopite'
' url handlers '
import re, time, json, logging, hashlib, base64, asyncio
import markdown2
from aiohttp import web
from coroweb import get, post
from apis import Page,APIValueError, APIResourceNotFoundError, APIPermissionError
from models import User, Comment, Blog, next_id
from config import configs
COOKIE_NAME = 'awesession'
_COOKIE_KEY = configs.session.secret
#返回值都是交给拦截器处理额,即response_factory
_RE_EMAIL = re.compile(r'^[a-z0-9\.\-\_]+\@[a-z0-9\-\_]+(\.[a-z0-9\-\_]+){1,4}$')
_RE_SHA1 = re.compile(r'^[0-9a-f]{40}$')
def check_admin(request):
if request.__user__ is None or not request.__user__.admin:
raise APIPermissioinError()
def get_page_index(page_str):
p = 1
try:
p = int(page_str)
except ValueError as e:
pass
if p < 1:
p = 1
return p
#一个转换而已,没有IO操作,还搞成aysnc
def user2cookie(user,max_age):
'''
Generate cookie str by user.
'''
#build cookie string by: id-expires-sha1
expires = str(int(time.time() + max_age))
s = '%s-%s-%s-%s' % (user.id,user.passwd,expires,_COOKIE_KEY)
L = [user.id,expires,hashlib.sha1(s.encode('utf-8')).hexdigest()]
return '-'.join(L)
#cookie: uid-expires-sha1(三部分组成)
#由于需要数据库操作,所以异步
async def cookie2user(cookie_str):
if not cookie_str:
return None
try:
L = cookie_str.split('-')
if len(L) != 3:
return None
uid,expires,sha1 = L
#过期了
if int(expires) < time.time():
return None
user = await User.find(uid)
s = '%s-%s-%s-%s' % (uid,user.passwd,expires,_COOKIE_KEY)
#判断是不是伪造的cookie,因为sha1这一串是服务器根据用户信息以及添加一些KEY生成的,不好伪造
if sha1 != hashlib.sha1(s.encode('utf-8')).hexdigest():
logging.info('invalid sha1')
return None
user.passwd = '******'
return user
except Exception as e:
logging.exception(e)
return None
def text2html(text):
lines = map(lambda s: '<p>%s</p>' % s.replace('&', '&').replace('<', '<').replace('>', '>'), filter(lambda s: s.strip() != '', text.split('\n')))
return ''.join(lines)
#--------------------------用户浏览页面----------------------------------------
#主界面
@get('/')
async def index(*,page='1'):
page_index = get_page_index(page)
num = await Blog.findNumber('count(id)')
p = Page(num,page_index)
if num == 0:
blogs = []
else:
blogs = await Blog.findAll(orderBy='created_at desc',limit=(p.offset,p.limit))
# return dict(blogs=blogs)
return {
'__template__': 'blogs.html',
'blogs': blogs,
'page' : p #坑呀,要的是这个呀Page对象呀,才能方便找出是否有上下页
}
#注册页面
@get('/register')
async def register():
return {
'__template__':'register.html'
}
#登录页面
@get('/signin')
async def signin():
return {
'__template__':'signin.html'
}
#登录操作
@post('/api/authenticate')
async def authenticate(*,email,passwd):
if not email:
raise APIValueError('email','Invalid email')
if not passwd:
raise APIValueError('passwd','Invalid password.')
users = await User.findAll('email=?',[email])
if len(users) == 0:
raise APIValueError('email','Email not exist')
user = users[0]
sha1 = hashlib.sha1()
#check passwd
sha1.update(user.id.encode('utf-8'))
sha1.update(b':')
sha1.update(passwd.encode('utf-8'))
if user.passwd != sha1.hexdigest():
raise APIValueError('passwd','Invalid password')
#authenticate ok set cookie:
r = web.Response()
logging.info('%s,%s,%s' % (user.id,user.passwd,user.email))
r.set_cookie(COOKIE_NAME,user2cookie(user,86400),max_age=86400,httponly=True)
user.passwd = '******'
r.content_type = 'application/json'
r.body = json.dumps(user,ensure_ascii=False).encode('utf-8')
return r
#注销页
@get('/signout')
async def signout(request):
referer = request.headers.get('Referer')
r = web.HTTPFound(referer or '/')
r.set_cookie(COOKIE_NAME,'-deleted-',max_age=0,httponly=True)
logging.info('user sign out.')
return r
#博客详情页
@get('/blog/{id}')
async def get_blog(id):
blog = await Blog.find(id)
comments = await Comment.findAll('blog_id=?',[id],orderBy='created_at desc')
for c in comments:
c.html_content = text2html(c.content)
#神器呀,直接就可以转换markdown格式
blog.html_content = markdown2.markdown(blog.content)
return {
'__template__':'blog.html',
'blog':blog,
'comments':comments
}
#查看某一个
@get('/api/blogs/{id}')
async def api_get_blogs(*,id):
blog = await Blog.find(id)
return blog
#---------------------------------管理页面--------------------------------------------
@get('/manage/')
def manage():
return 'redirect:/manage/comments'
@get('/manage/comments')
def manage_comments(*, page='1'):
return {
'__template__': 'manage_comments.html',
'page_index': get_page_index(page)
}
@get('/manage/blogs')
def manage_blogs(*,page='1'):
return {
'__template__':'manage_blogs.html',
'page_index':get_page_index(page)
}
@get('/manage/blogs/create')
async def manage_create_blog():
return {
'__template__':'manage_edit_blog.html',
'id':'',
'action':'/api/blogs'
}
@get('/manage/blogs/edit')
async def manage_edit_blog(*,id):
return {
'__template__':'manage_edit_blog.html',
'id':id,
'action':'/api/blogs/%s' % id
}
@get('/manage/users')
def manage_users(*, page='1'):
return {
'__template__': 'manage_users.html',
'page_index': get_page_index(page)
}
#---------------------------------后端API--------------------------------------------
#---------------------------------后端API--------------------------------------------
#---------------------------------后端API--------------------------------------------
#获取日志
#http://127.0.0.1:9000/api/blogs?page=1
@get('/api/blogs')
async def api_blog(*,page='1'):
page_index = get_page_index(page)
num = await Blog.findNumber('count(id)')
p = Page(num,page_index)
if num == 0:
return dict(page = p,blogs = ())
blogs = await Blog.findAll(orderBy='created_at desc',limit=(p.offset,p.limit))
return dict(page=p,blogs=blogs)
#创建日志
@post('/api/blogs')
async def api_create_blog(request,*,name,summary,content):
check_admin(request)
if not name or not name.strip():
raise APIValueError('name','name connot be empty')
if not summary or not summary.strip():
raise APIValueError('summary','summary connot be empty')
if not content or not content.strip():
raise APIValueError('content','content connot be empty')
blog = Blog(user_id=request.__user__.id, user_name=request.__user__.name, user_image=request.__user__.image, name=name.strip(), summary=summary.strip(), content=content.strip())
await blog.save()
return blog
#更新日志
@post('/api/blogs/{id}')
async def api_update_blog(request,*,id,name,summary,content):
check_admin(request)
#明明是Blogs去找,为毛是变成user了?
blog = await Blog.find(id)
if not name or not name.strip():
raise APIValueError('name','name connot be empty')
if not summary or not summary.strip():
raise APIValueError('summary','summary connot be empty')
if not content or not content.strip():
raise APIValueError('content','content connot be empty')
blog.name = name.strip()
blog.summary = summary.strip()
blog.content = content.strip()
await blog.update()
return blog
#删除日志
@post('/api/blogs/{id}/delete')
async def api_delete_blog(id,request):
check_admin(request)
#这个id并没有取到呀
blog = await Blog.find(id)
await blog.remove()
return dict(id=id)
#获取评论
#http://127.0.0.1:9000/api/comments?page=1,2,3,....
@get('/api/comments')
async def api_get_comments(*,page='1'):
page_index = get_page_index(page)
num = await Comment.findNumber('count(id)')
p = Page(num,page_index)
if num == 0:
return dict(page=p,comment=())
comments = await Comment.findAll(orderBy='created_at desc',limit=(p.offset,p.limit))
return dict(page=p,comments=comments)
#创建评论
@post('/api/blogs/{id}/comments')
async def api_create_comments(id,request,*,content):
user = request.__user__
if user is None:
raise APIPermissioinError('please signin first.')
if not content or not content.strip():
raise APIValueError('content','content connot be empty')
blog = await Blog.find(id)
if blog is None:
#确保每一个存入的评论都有对应的blog
raise APIResourceNotFoundError('Blog')
comment = Comment(blog_id=blog.id,user_id=request.__user__.id, user_name=request.__user__.name,user_image=request.__user__.image,content=content.strip())
await comment.save()
return comment
#删除评论
@post('/api/comments/{id}/delete')
async def api_delete_comment(id,request):
check_admin(request)
c = await Comment.find(id)
if c is None:
raise APIResourceNotFoundError('comment is not exist')
await c.remove()
return dict(id=id)
#创建新用户
@post('/api/users')
async def api_register_user(*,email,name,passwd):
if not name or not name.strip():
raise APIValueError('name')
if not email or not _RE_EMAIL.match(email):
raise APIValueError('email')
if not passwd or not _RE_SHA1.match(passwd):
raise APIValueError('passwd')
users = await User.findAll('email=?',[email])
if len(users) > 0:
raise APIValueError('register:failed','email','Email is already in use')
uid = next_id()
sha1_passwd = '%s:%s' % (uid,passwd)
# 注意用户口令是客户端传递的经过SHA1计算后的40位Hash字符串,所以服务器端并不知道用户的原始口令。
user = User(id=uid,name=name.strip(),email=email,passwd=hashlib.sha1(sha1_passwd.encode('utf-8')).hexdigest(),image='http://www.gravatar.com/avatar/%s?d=mm&s=120' % hashlib.md5(email.encode('utf-8')).hexdigest())
await user.save()
# make session cookie:
r = web.Response()
r.set_cookie(COOKIE_NAME,user2cookie(user,86400),max_age=86400,httponly=True)
user.passwd = '******'
r.content_type = 'application/json'
r.body = json.dumps(user,ensure_ascii=False).encode('utf-8')
return r
#获取用户
#http://127.0.0.1:9000/api/users
@get('/api/users')
async def api_get_users():
users = await User.findAll(orderBy='created_at')
for u in users:
u.passwd = '******'
#返回一个dict,user=>user的list
return dict(users=users)
#---------------------------------后端API--------------------------------------------
#---------------------------------后端API--------------------------------------------
#---------------------------------后端API-------------------------------------------- |
from elasticsearch import Elasticsearch
# Connect to database
client = Elasticsearch("http://username:[email protected]:9200")
# Create index
client.indices.create('index_name')
# Insert information
client.create(index='index_name', id=1, body={
'sku': 134218478,
'name': 'Rb-01 - Robô Aspirador De Pó Fast Clean Bivolt - Mondial',
'description': 'Use a tecnologia a seu favor para aproveitar a vida longe da faxina. Conheça mais essa facilidade para o seu lar e deixe tuuuudo limpinho :)',
'category': 'eletroportáteis'
})
# Query information
p = client.get(index='index_name', id=1)
print(p)
# Destroy index
client.indices.delete('index_name') |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from numpy.testing import assert_allclose
from gammapy.astro.population.velocity import (
FaucherKaspi2006VelocityBimodal,
FaucherKaspi2006VelocityMaxwellian,
Paczynski1990Velocity,
)
test_cases = [
{
"class": FaucherKaspi2006VelocityMaxwellian,
"x": [1, 10],
"y": [4.28745276e-08, 4.28443169e-06],
},
{
"class": FaucherKaspi2006VelocityBimodal,
"x": [1, 10],
"y": [1.754811e-07, 1.751425e-05],
},
{"class": Paczynski1990Velocity, "x": [1, 10], "y": [0.00227363, 0.00227219]},
]
@pytest.mark.parametrize("case", test_cases, ids=lambda _: _["class"].__name__)
def test_velocity_model(case):
model = case["class"]()
y = model(case["x"])
assert_allclose(y, case["y"], rtol=1e-5)
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2008, 2019 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""
Implementation of `copyTransactionsFrom`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import logging
import tempfile
from ZODB.loglevels import TRACE
from ZODB.blob import is_blob_record
from ZODB.utils import cp as copy_blob
from ZODB.utils import readable_tid_repr
from ZODB.POSException import POSKeyError
from relstorage._compat import perf_counter
from relstorage._util import byte_display
logger = logging.getLogger(__name__)
class Copy(object):
__slots__ = (
'blobhelper',
'tpc',
'restore',
)
def __init__(self, blobhelper, tpc, restore):
self.blobhelper = blobhelper
self.tpc = tpc
self.restore = restore
def copyTransactionsFrom(self, other):
logger.info("Counting the transactions to copy.")
other_it = other.iterator()
logger.debug("Opened the other iterator: %s", other_it)
num_txns, other_it = self.__get_num_txns_to_copy(other, other_it)
logger.info("Copying %d transactions", num_txns)
progress = _ProgressLogger(num_txns, other, self.__copy_transaction)
try:
for trans in other_it:
progress(trans)
finally:
try:
close = other_it.close
except AttributeError:
pass
else:
close()
now = perf_counter()
logger.info(
"Copied transactions: %s",
progress.display_at(now))
def __copy_transaction(self, other, trans):
# Originally adapted from ZODB.blob.BlobStorageMixin
tpc = self.tpc
num_txn_records = 0
txn_data_size = 0
num_blobs = 0
tmp_blobs_to_rm = []
tpc.tpc_begin(trans, trans.tid, trans.status)
for record in trans:
num_txn_records += 1
if record.data:
txn_data_size += len(record.data)
blobfile = None
if is_blob_record(record.data):
try:
blobfile = other.openCommittedBlobFile(
record.oid, record.tid)
except POSKeyError:
logger.exception("Failed to open blob to copy")
if blobfile is not None:
fd, name = tempfile.mkstemp(
suffix='.tmp',
dir=self.blobhelper.temporaryDirectory()
)
tmp_blobs_to_rm.append(name)
logger.log(
TRACE,
"Copying %s to temporary blob file %s for upload",
blobfile, name)
with os.fdopen(fd, 'wb') as target:
# If we don't get the length, ``copy_blob`` will.
old_pos = blobfile.tell()
blobfile.seek(0, 2)
length = blobfile.tell()
blobfile.seek(old_pos)
copy_blob(blobfile, target, length)
txn_data_size += length
blobfile.close()
self.restore.restoreBlob(record.oid, record.tid, record.data,
name, record.data_txn, trans)
else:
self.restore.restore(record.oid, record.tid, record.data,
'', record.data_txn, trans)
tpc.tpc_vote(trans)
tpc.tpc_finish(trans)
num_blobs = len(tmp_blobs_to_rm)
if num_blobs:
for tmp_blob in tmp_blobs_to_rm:
logger.log(TRACE, "Removing temporary blob file %s", tmp_blob)
try:
os.unlink(tmp_blob)
except OSError:
pass
return num_txn_records, txn_data_size, num_blobs
def __get_num_txns_to_copy(self, other, other_it):
try:
num_txns = len(other_it)
if num_txns == 0:
# Hmm, that shouldn't really be right, should it?
# Try the other path.
raise TypeError()
except TypeError:
logger.debug("Iterator %s doesn't support len()", other_it)
num_txns = 0
for _ in other_it:
num_txns += 1
other_it.close()
other_it = other.iterator()
return num_txns, other_it
class _ProgressLogger(object):
# Time in seconds between major progress logging.
# (minor progress logging occurs every ``log_count`` commits)
log_interval = 60
# Number of transactions to copy before checking if we should perform a major
# log.
log_count = 100
# Number of transactions to copy before performing a minor log.
minor_log_count = 25
minor_log_interval = 15
minor_log_tx_record_count = 100
minor_log_tx_size = 100 * 1024
minor_log_copy_time_threshold = 1.0
class _IntervalStats(object):
__slots__ = (
'begin_time',
'txns_copied',
'total_size',
)
def __init__(self, begin_time):
self.begin_time = begin_time
self.txns_copied = 0
self.total_size = 0
def display_at(self, now, total_num_txns, include_elapsed=False):
pct_complete = '%1.2f%%' % (self.txns_copied * 100.0 / total_num_txns)
elapsed_total = now - self.begin_time
if elapsed_total:
rate_mb = self.total_size / elapsed_total
rate_tx = self.txns_copied / elapsed_total
else:
rate_mb = rate_tx = 0.0
rate_mb_str = byte_display(rate_mb)
rate_tx_str = '%1.3f' % rate_tx
result = "%d/%d,%7s, %6s/s %6s TX/s, %s" % (
self.txns_copied, total_num_txns, pct_complete,
rate_mb_str, rate_tx_str,
byte_display(self.total_size),
)
if include_elapsed:
result += ' %4.1f minutes' % (elapsed_total / 60.0)
return result
def __init__(self, num_txns, other_storage, copy):
self.num_txns = num_txns
begin_time = perf_counter()
self._entire_stats = self._IntervalStats(begin_time)
self._interval_stats = self._IntervalStats(begin_time)
self.log_at = begin_time + self.log_interval
self.minor_log_at = begin_time + self.minor_log_interval
self.debug_enabled = logger.isEnabledFor(logging.DEBUG)
self._other_storage = other_storage
self._copy = copy
def display_at(self, now):
return self._entire_stats.display_at(now, self.num_txns, True)
def __call__(self, trans):
begin_copy = perf_counter()
result = self._copy(self._other_storage, trans)
now = perf_counter()
self._copied(now, now - begin_copy, trans, result)
def _copied(self, now, copy_duration, trans, copy_result):
entire_stats = self._entire_stats
interval_stats = self._interval_stats
entire_stats.txns_copied += 1
interval_stats.txns_copied += 1
total_txns_copied = self._entire_stats.txns_copied
txn_byte_size = copy_result[1]
entire_stats.total_size += txn_byte_size
interval_stats.total_size += txn_byte_size
if self.debug_enabled:
num_txn_records, txn_byte_size, _num_txn_blobs = copy_result
if (total_txns_copied % self.minor_log_count == 0 and now >= self.minor_log_at) \
or txn_byte_size >= self.minor_log_tx_size \
or num_txn_records >= self.minor_log_tx_record_count \
or copy_duration >= self.minor_log_copy_time_threshold:
self.minor_log_at = now + self.minor_log_interval
logger.debug(
"Copied %s in %1.4fs",
self.__transaction_display(trans, copy_result),
copy_duration
)
if total_txns_copied % self.log_count and now >= self.log_at:
self.log_at = now + self.log_interval
self.__major_log(
now,
self.__transaction_display(trans, copy_result))
self._interval_stats = self._IntervalStats(now)
def __major_log(self, now, transaction_display):
logger.info(
"Copied %s | %60s | (%s)",
transaction_display,
self._interval_stats.display_at(now, self.num_txns),
self._entire_stats.display_at(now, self.num_txns, True)
)
def __transaction_display(self, trans, copy_result):
num_txn_records, txn_byte_size, num_txn_blobs = copy_result
return 'transaction %s <%4d records, %3d blobs, %9s>' % (
readable_tid_repr(trans.tid),
num_txn_records, num_txn_blobs, byte_display(txn_byte_size)
)
|
#!/usr/bin/env python3
import click
import cmocean
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import rc
import numpy as np
import pandas as pd
prop1range = [-0.01, 1.0] # max_pair_distance
prop2range = [0.0, 800.0] # ML
num_ch4_a3 = 2.69015E-05 # from methane-comparison.xlsx
fsl = fs = 8
# rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
font = {'family':'sans-serif',
'sans-serif':['Helvetica'],
'weight' : 'bold',
'size' : 8}
matplotlib.rc('font', **font)
# rc('text', usetex=True)
@click.command()
@click.argument('csv-path', type=click.File())
def figure_ml_vs_max_pair_distance(csv_path):
fig = plt.figure(figsize=(4.6, 4.6))
cm = matplotlib.cm.get_cmap("viridis")
# cm = cmocean.cm.thermal
points = pd.read_csv(csv_path)
points['ch4_uc'] = points.absolute_volumetric_loading * (num_ch4_a3 * points.a * points.b * points.c)
ax = fig.subplots(ncols=1)
ax.set_xlim(prop1range[0], prop1range[1])
ax.set_ylim(prop2range[0], prop2range[1])
ax.set_xticks(prop1range[1] * np.array([0.0, 0.25, 0.5, 0.75, 1.0]))
ax.set_yticks(prop2range[1] * np.array([0.0, 0.25, 0.5, 0.75, 1.0]))
ax.tick_params(axis='x', which='major', labelsize=fs)
ax.tick_params(axis='y', which='major', labelsize=fs)
ax.grid(which='major', axis='both', linestyle='-', color='0.9', zorder=0)
# ax.grid(which='minor', axis='both', linestyle='-', color='0.9', zorder=0)
sc = ax.scatter(points.max_pair_distance, points.absolute_volumetric_loading, zorder=2,
alpha=0.6, s=points.a, edgecolors=None, linewidths=0, c=points.ch4_uc.round(),
# c=np.log(points.epsilon_density),
# norm=matplotlib.colors.LogNorm(vmin=points.epsilon_density.min(), vmax=points.epsilon_density.max()),
cmap=cm)
ax.axvline(3**0.5 / 2, 0, 1, lw=1, linestyle="--", color="0.5", label="Max pair distance max", zorder=1)
ax.set_xlabel('Max Pair Distance', fontsize=fsl)
ax.set_ylabel('Methane Loading [V/V]', fontsize=fsl)
# fig.subplots_adjust(wspace=0.05, hspace=0.05)
output_path = "figure.png"
fig.savefig(output_path, dpi=1200, bbox_inches='tight')
plt.close(fig)
if __name__ == '__main__':
figure_ml_vs_max_pair_distance()
|
from questions_three.scaffolds.common import skip
from .test import test
def funcname(f):
return f.__name__.replace("_", " ")
def identify(thing):
attrs = dir(thing)
if "func" in attrs and "args" in attrs:
return "%s with %s" % (funcname(thing.func), thing.args)
return funcname(thing)
def do_and_check(*, do, checks):
"""
Perform one action, followed by a series of checks, each as its own test
do (function): Execute this
checks (sequence of functions): Execute each of these as its own test
"""
err_from_do = None
try:
do()
except Exception as e:
err_from_do = e
for check in checks:
with test("%s and %s" % (identify(do), identify(check))):
if err_from_do:
skip('Skipping check because "do" function failed')
else:
check()
if err_from_do:
raise err_from_do
|
import mimetypes
print(mimetypes.guess_extension(mimetypes.guess_type('./something.tar.xz')[0]))
|
import argparse
import glob
import os
import numpy as np
import pandas as pd
from pathlib import Path
import seaborn as sns;
sns.set()
import matplotlib.pyplot as plt
from collections import defaultdict
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
###
# Extract CSV of Tensorflow summaries
# https://stackoverflow.com/a/52095336/11702735
###
FOLDER_NAME = 'csv'
METHOD_DICT = {
"CPE_Reward_Weighted_Sequential_Doubly_Robust": "SWDR",
"CPE_Reward_MAGIC": "MAGIC",
}
def tabulate_events(dpath, subpath, tags):
summary_iterators = [EventAccumulator(os.path.join(dpath, dname, subpath)).Reload()
for dname in os.listdir(dpath) if dname != FOLDER_NAME]
out = defaultdict(list)
steps = []
for tag in tags:
steps = [e.step for e in summary_iterators[0].Scalars(tag)]
for events in zip(*[acc.Scalars(tag) for acc in summary_iterators]):
assert len(set(e.step for e in events)) == 1
out[tag].append([e.value for e in events])
return out, steps
def to_csv(dpath, subpath, input_tags):
dirs = os.listdir(dpath)
d, steps = tabulate_events(dpath, subpath, input_tags)
tags, values = zip(*d.items())
np_values = np.array(values)
for index, tag in enumerate(tags):
df = pd.DataFrame(np_values[index], index=steps, columns=dirs)
df.to_csv(get_file_path(dpath, tag), index_label="step")
def get_file_path(dpath, tag):
file_name = tag.replace("/", "_") + '.csv'
folder_path = os.path.join(dpath, 'csv')
if not os.path.exists(folder_path):
os.makedirs(folder_path)
return os.path.join(folder_path, file_name)
def plot_csv(dpath):
folder_path = os.path.join(dpath, 'csv')
dfs = []
for filename in glob.glob(F"{folder_path}/*.csv"):
method = Path(filename).stem
df = pd.read_csv(filename)
df.rename(columns={'step': 'Step'}, inplace=True)
df.set_index('Step', inplace=True)
df = df.stack().reset_index().rename(columns={'level_1': 'run', 0: 'Value'})
df["Method"] = METHOD_DICT[method] if method in METHOD_DICT.keys() else method
dfs.append(df)
dados = pd.concat(dfs)
ax = sns.lineplot(x="Step", y="Value", hue="Method", data=dados)
ax.axhline(1, ls='--', c='red', alpha=0.5)
fig_filename = F"{folder_path}/CPE_Reward.png"
plt.savefig(fig_filename)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--path", type=str, help="main path for tensorboard files", default=os.getcwd())
parser.add_argument("--subpath", type=str, help="subpath path for tensorboard files", default="outputs")
parser.add_argument("--tags", type=str, help="Tags to use",
default=['CPE/Reward/Weighted_Sequential_Doubly_Robust', 'CPE/Reward/MAGIC'])
args = parser.parse_args()
path = Path(args.path)
to_csv(path, args.subpath, args.tags)
plot_csv(path)
|
# coding=utf-8
import re
import datetime
from sqlalchemy.orm.query import Query
from .utils import *
from .finance_tables import *
FUNDAMENTAL_RESULT_LIMIT = 10000
def get_tables_from_sql(sql):
m = re.findall(
r'cash_flow_statement_day|balance_sheet_day|financial_indicator_day|'
r'income_statement_day|stock_valuation|bank_indicator_acc|'
r'security_indicator_acc|insurance_indicator_acc',
sql
)
return list(set(m))
def get_table_class(tablename):
for t in (BalanceSheetDay, CashFlowStatementDay, FinancialIndicatorDay,
IncomeStatementDay, StockValuation, BankIndicatorAcc, SecurityIndicatorAcc,
InsuranceIndicatorAcc):
if t.__tablename__ == tablename:
return t
def get_stat_date_column(cls):
if only_year:
# 只支持按年份查询的表没有 day 字段
return cls.statDate
else:
# valuation表没有statDate
return getattr(cls, 'statDate', cls.day)
def get_fundamentals_sql(query_object, date=None, statDate=None):
from .calendar_service import CalendarService
if not isinstance(query_object, Query):
raise AssertionError(
"query_object must be a sqlalchemy's Query object."
" But what passed in was: " + str(type(query_object))
)
stat_date = statDate
assert (not date) ^ (not stat_date), "(statDate, date) only one param is required"
if query_object.limit_value:
limit = min(FUNDAMENTAL_RESULT_LIMIT, query_object.limit_value)
else:
limit = FUNDAMENTAL_RESULT_LIMIT
offset = query_object.offset_value
query_object = query_object.limit(None).offset(None)
tablenames = get_tables_from_sql(str(query_object.statement))
tables = [get_table_class(name) for name in tablenames]
by_year = False
# if date:
# date = CalendarService.get_previous_trade_date(date)
only_year = bool({
"bank_indicator_acc",
"security_indicator_acc",
"insurance_indicator_acc"
} & set(tablenames))
if only_year:
if date:
date = None
stat_date = str(datetime.date.min)
elif stat_date:
if isinstance(stat_date, (str, six.string_types)):
stat_date = stat_date.lower()
if 'q' in stat_date:
stat_date = '0001-01-01'
else:
stat_date = '{}-12-31'.format(int(stat_date))
elif isinstance(stat_date, int):
stat_date = '{}-12-31'.format(stat_date)
stat_date = to_date(stat_date)
else:
today = datetime.date.today()
yesteryear = today.year - 1
stat_date = datetime.date(yesteryear, 12, 31)
elif stat_date:
if isinstance(stat_date, (str, six.string_types)):
stat_date = stat_date.lower()
if 'q' in stat_date:
stat_date = (stat_date.replace('q1', '-03-31')
.replace('q2', '-06-30')
.replace('q3', '-09-30')
.replace('q4', '-12-31'))
else:
year = int(stat_date)
by_year = True
stat_date = '%s-12-31' % year
elif isinstance(stat_date, int):
year = int(stat_date)
by_year = True
stat_date = '%s-12-31' % year
stat_date = to_date(stat_date)
# 不晚于 stat_date 的一个交易日
trade_day_not_after_stat_date = None
for table in tables:
if date:
query_object = query_object.filter(table.day == date)
else:
if hasattr(table, 'statDate'):
query_object = query_object.filter(table.statDate == stat_date)
else:
# 估值表, 在非交易日没有数据
# 所以如果传入的非交易日, 就需要取得前一个交易日
assert table is StockValuation
if trade_day_not_after_stat_date is None:
trade_day_not_after_stat_date = CalendarService.get_previous_trade_date(stat_date)
query_object = query_object.filter(table.day == trade_day_not_after_stat_date)
# 连表
for table in tables[1:]:
query_object = query_object.filter(table.code == tables[0].code)
# 恢复 offset, limit
query_object = query_object.limit(limit).offset(offset)
# 编译 query 对象为纯 sql
sql = compile_query(query_object)
if stat_date:
if by_year:
sql = sql.replace('balance_sheet_day', 'balance_sheet')\
.replace('financial_indicator_day', 'financial_indicator_acc')\
.replace('income_statement_day', 'income_statement_acc')\
.replace('cash_flow_statement_day', 'cash_flow_statement_acc')
else:
for t in ('balance_sheet_day', 'financial_indicator_day', 'income_statement_day',
'cash_flow_statement_day'):
sql = sql.replace(t, t[:-4])
sql = re.sub(r'(cash_flow_statement|balance_sheet|income_statement|financial_indicator|'
r'financial_indicator_acc|income_statement_acc|cash_flow_statement_acc)\.`?day`?\b',
r'\1.statDate', sql)
return sql
def fundamentals_redundant_continuously_query_to_sql(query, trade_day):
'''
根据传入的查询对象和起始时间生成sql
trade_day是要查询的交易日列表
'''
from .fundamentals_tables_gen import (
BalanceSheet, IncomeStatement, CashFlowStatement, FinancialIndicator,
BankIndicatorAcc, SecurityIndicatorAcc, InsuranceIndicatorAcc, StockValuation)
if query.limit_value:
limit = min(FUNDAMENTAL_RESULT_LIMIT, query.limit_value)
else:
limit = FUNDAMENTAL_RESULT_LIMIT
offset = query.offset_value
query = query.limit(None).offset(None)
def get_table_class(tablename):
for t in (BalanceSheet, CashFlowStatement, FinancialIndicator,
IncomeStatement, StockValuation, BankIndicatorAcc, SecurityIndicatorAcc,
InsuranceIndicatorAcc):
if t.__tablename__ == tablename:
return t
def get_tables_from_sql(sql):
m = re.findall(
r'cash_flow_statement_day|balance_sheet_day|financial_indicator_day|'
r'income_statement_day|stock_valuation|bank_indicator_acc|security_indicator_acc|'
r'insurance_indicator_acc', sql)
return list(set(m))
# 从query对象获取表对象
tablenames = get_tables_from_sql(str(query.statement))
tables = [get_table_class(name) for name in tablenames]
query = query.filter(StockValuation.day.in_(trade_day))
# 根据stock valuation 表的code和day字段筛选
for table in tables:
if table is not StockValuation:
query = query.filter(StockValuation.code == table.code)
if hasattr(table, 'day'):
query = query.filter(StockValuation.day == table.day)
else:
query = query.filter(StockValuation.day == table.statDate)
# 连表
for table in tables[1:]:
query = query.filter(table.code == tables[0].code)
# 恢复 offset, limit
query = query.limit(limit).offset(offset)
# query = query.subquery()
sql = compile_query(query)
# 默认添加查询code和day作为panel索引
sql = sql.replace(
'SELECT ',
'SELECT DISTINCT stock_valuation.day AS day, stock_valuation.code as code, '
)
return sql
def get_continuously_query_to_sql(query, trade_day):
'''
根据传入的查询对象和起始时间生成sql
trade_day是要查询的交易日列表
'''
if query.limit_value:
limit = min(FUNDAMENTAL_RESULT_LIMIT, query.limit_value)
else:
limit = FUNDAMENTAL_RESULT_LIMIT
offset = query.offset_value
query = query.limit(None).offset(None)
def get_table_class(tablename):
for t in (BalanceSheet, CashFlowStatement, FinancialIndicator,
IncomeStatementDay, StockValuation, BankIndicatorAcc, SecurityIndicatorAcc,
InsuranceIndicatorAcc):
if t.__tablename__ == tablename:
return t
def get_tables_from_sql(sql):
m = re.findall(
r'cash_flow_statement|balance_sheet|financial_indicator|'
r'income_statement|stock_valuation|bank_indicator_acc|security_indicator_acc|'
r'insurance_indicator_acc', sql)
return list(set(m))
# 从query对象获取表对象
tablenames = get_tables_from_sql(str(query.statement))
tables = [get_table_class(name) for name in tablenames]
query = query.filter(StockValuation.day.in_(trade_day))
# 根据stock valuation 表的code和day字段筛选
for table in tables:
if table is StockValuation:
query = query.filter(StockValuation.code == table.code)
query = query.filter(StockValuation.day >= table.periodStart)
query = query.filter(StockValuation.day <= table.periodEnd)
# 连表
for table in tables[1:]:
query = query.filter(table.code == tables[0].code)
# 恢复 offset, limit
query = query.limit(limit).offset(offset)
sql = compile_query(query)
# 默认添加查询code和day作为panel索引
sql = sql.replace('SELECT ', 'SELECT DISTINCT stock_valuation.day AS day,stock_valuation.code as code, ')
return sql
balance = balance_sheet = BalanceSheetDay
income = income_statement = IncomeStatementDay
cash_flow = cash_flow_statement = CashFlowStatementDay
indicator = financial_indicator = FinancialIndicatorDay
bank_indicator = bank_indicator_acc = BankIndicatorAcc
security_indicator = security_indicator_acc = SecurityIndicatorAcc
insurance_indicator = insurance_indicator_acc = InsuranceIndicatorAcc
valuation = stock_valuation = StockValuation
__all__ = [
"query",
"balance",
"income",
"cash_flow",
"indicator",
"bank_indicator",
"security_indicator",
"insurance_indicator",
"valuation"
]
|
"""
Copyright (c) 2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from abc import ABC
from abc import abstractmethod
from enum import Enum
from typing import List, Tuple, Any
class Dtype(Enum):
FLOAT = 'float'
INTEGER = 'int'
class BaseLayerAttributes(ABC):
"""
This class stores base useful for some algorithms attributes
of modules/layers.
"""
class MultipleInputLayerAttributes(BaseLayerAttributes):
"""
Represents a layer with multiple inputs.
"""
def __init__(self,
axis: int):
self.axis = axis
def __eq__(self, other: Any):
return isinstance(other, MultipleInputLayerAttributes) \
and self.axis == other.axis
class WeightedLayerAttributes(BaseLayerAttributes):
"""
Represents a layer with weights.
"""
def __init__(self, weight_requires_grad: bool, dtype: Dtype = Dtype.FLOAT):
self.weight_requires_grad = weight_requires_grad
self.dtype = dtype
def __eq__(self, other: Any):
return isinstance(other, WeightedLayerAttributes) \
and self.weight_requires_grad == other.weight_requires_grad
@abstractmethod
def get_weight_shape(self) -> List[int]:
pass
def get_num_filters(self) -> int:
weight_shape = self.get_weight_shape()
return weight_shape[self.get_target_dim_for_compression()]
@abstractmethod
def get_target_dim_for_compression(self) -> int:
pass
class GenericWeightedLayerAttributes(WeightedLayerAttributes):
"""
Represents a weighted layer for which there is no information ahead of time
of the exact meaning of the weight indices.
"""
def __init__(self, weight_requires_grad: bool, weight_shape: List[int],
filter_dimension_idx: int = 0):
super().__init__(weight_requires_grad)
self.weight_shape = weight_shape
self.filter_dimension_idx = filter_dimension_idx
def get_weight_shape(self) -> List[int]:
return self.weight_shape
def get_target_dim_for_compression(self) -> int:
return 0
class LinearLayerAttributes(WeightedLayerAttributes):
def __init__(self,
weight_requires_grad: bool,
in_features: int,
out_features: int):
super().__init__(weight_requires_grad)
self.in_features = in_features
self.out_features = out_features
def get_weight_shape(self) -> List[int]:
return [self.out_features, self.in_features]
def get_target_dim_for_compression(self) -> int:
return 0
class ConvolutionLayerAttributes(WeightedLayerAttributes):
"""
This class stores attributes of convolution modules/layers
that are useful for some algorithms.
"""
def __init__(self,
weight_requires_grad: bool,
in_channels: int,
out_channels: int,
kernel_size: Tuple[int, ...],
stride: Tuple[int, ...],
groups: int,
transpose: bool,
padding_values: Tuple[int, ...]):
super().__init__(weight_requires_grad)
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.groups = groups
self.transpose = transpose
self.padding_values = padding_values
def __eq__(self, other: Any):
return isinstance(other, ConvolutionLayerAttributes) \
and super().__eq__(other) \
and self.in_channels == other.in_channels \
and self.out_channels == other.out_channels \
and self.kernel_size == other.kernel_size \
and self.stride == other.stride \
and self.groups == other.groups \
and self.transpose == other.transpose
def get_weight_shape(self) -> List[int]:
if not self.transpose:
return [self.out_channels, self.in_channels // self.groups, *self.kernel_size]
return [self.in_channels, self.out_channels // self.groups, *self.kernel_size]
def get_target_dim_for_compression(self) -> int:
# Always quantize per each "out" channel
if self.transpose:
return 1
return 0
class GroupNormLayerAttributes(WeightedLayerAttributes):
"""
This class stores attributes of group normalization modules/layers
that are useful for some algorithms.
"""
def __init__(self,
weight_requires_grad: bool,
num_channels: int,
num_groups: int):
super().__init__(weight_requires_grad)
self.num_channels = num_channels
self.num_groups = num_groups
def __eq__(self, other: Any):
return isinstance(other, GroupNormLayerAttributes) \
and super().__eq__(other) \
and self.num_channels == other.num_channels \
and self.num_groups == other.num_groups
def get_weight_shape(self) -> List[int]:
return [self.num_channels]
def get_target_dim_for_compression(self) -> int:
return 0
class ReshapeLayerAttributes(BaseLayerAttributes):
"""
This class stores attributes of reshape modules/layers
that are useful for some algorithms.
"""
def __init__(self,
input_shape: List[int],
output_shape: List[int]):
self.input_shape = input_shape
self.output_shape = output_shape
|
from posixpath import ismount
import numpy as np
import shapely
from shapely.geometry import Polygon, MultiPoint # Polygon
def calculate_iou(pred, gt):
_pred = np.array(pred).reshape(4, 2)
pred_poly = Polygon(_pred).convex_hull
# print(Polygon(_pred).convex_hull) # you can print to see if this is the case
_gt = np.array(gt).reshape(4, 2)
gt_poly = Polygon(_gt).convex_hull
# print(Polygon(_gt).convex_hull)
# Merge two box coordinates to become 8*2
union_poly = np.concatenate((_pred, _gt))
# print(MultiPoint(union_poly).convex_hull)
# If the two quadrilaterals do not intersect
if not pred_poly.intersects(gt_poly):
iou = 0
else:
try:
inter_area = pred_poly.intersection(
gt_poly).area # intersection area
# print(inter_area)
union_area = MultiPoint(union_poly).convex_hull.area
# print(union_area)
if union_area == 0:
iou = 0
iou = float(inter_area)/union_area
except shapely.geos.TopologicalError:
print('shapely.geos.TopologicalError occured, iou set to 0')
iou = 0
return iou
# line1 = [196, 194, 287, 201, 289, 267, 196, 263]
# line2 = [199, 194, 287, 201, 289, 267, 196, 263]
# line1=[2,0,2,2,0,0,0,0] #One-dimensional array representation of the coordinates of the four points of the quadrilateral, [x,y,x,y....]
# line2=[1,1,4,1,4,4,1,4]
# print(calculate_iou(line1, line2))
gt = np.array([
[196, 194, 287, 201, 289, 267, 196, 263],
[291, 207, 419, 211, 420, 271, 294, 266],
[425, 220, 471, 220, 472, 273, 425, 271],
[475, 219, 561, 219, 562, 275, 477, 274],
[567, 224, 642, 228, 644, 279, 567, 277],
])
# print(gt.shape[0])
# QUAD format [x1,y1,x2,y2,x3,y3,x4,y4]
pred = np.array([
[199, 194, 287, 201, 289, 267, 196, 263],
[200, 194, 287, 201, 289, 267, 199, 263],
[291, 207, 419, 211, 420, 271, 294, 266],
[425, 220, 471, 220, 472, 273, 425, 271],
[475, 219, 561, 219, 562, 275, 477, 274],
[567, 224, 642, 228, 644, 279, 567, 277],
[765, 305, 785, 305, 788, 336, 764, 337],
[786, 307, 800, 309, 799, 332, 789, 331],
[763, 376, 799, 375, 799, 408, 766, 407],
[779, 414, 799, 413, 799, 438, 783, 43],
])
print(pred.shape[0])
def compute_iou(preds,gt):
# iou = np.array([])
iou = []
for idx_gt in range(gt.shape[0]):
for idx_pred in range(preds.shape[0]):
temp_iou = np.array([calculate_iou(gt[idx_gt],preds[idx_pred])])
iou.append(temp_iou)
# print(temp_iou)
# np.concatenate(iou,temp_iou)
iou = np.array(iou).reshape(preds.shape[0],gt.shape[0])
return iou
# gt = np.array([199, 194, 287, 201, 289, 267, 196, 263])
print(compute_iou(pred,gt))
# print(np.arange(pred.shape[0]*gt.shape[0]).reshape(pred.shape[0],gt.shape[0]) ) |
Subsets and Splits