blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7824f1a34399e412abf45a090606e6f9607837a1 | 0eaab1305900d8e70dd746d676126d1667d9c314 | /scripts/cached_credentials.py | d2360fae08a54472419d0ad570e1d841faa8c3e2 | [
"Apache-2.0"
] | permissive | scudette/winreg-kb | 89ffc7f63c2630b266bed41d1c66dff64fd1d32d | f81b8bcaef8365d0c52bf3c87af2bccb4274bece | refs/heads/master | 2020-06-08T20:51:37.427445 | 2019-06-14T06:47:16 | 2019-06-14T06:47:16 | 193,304,780 | 1 | 0 | null | 2019-06-23T04:07:02 | 2019-06-23T04:07:02 | null | UTF-8 | Python | false | false | 2,497 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script to extract cached credentials."""
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import logging
import sys
from dfvfs.helpers import command_line as dfvfs_command_line
from winregrc import cached_credentials
from winregrc import collector
from winregrc import output_writers
def Main():
"""The main program function.
Returns:
bool: True if successful or False if not.
"""
argument_parser = argparse.ArgumentParser(description=(
'Extracts the cached credentials from a SECURITY Registry file.'))
argument_parser.add_argument(
'-d', '--debug', dest='debug', action='store_true', default=False, help=(
'enable debug output.'))
argument_parser.add_argument(
'source', nargs='?', action='store', metavar='PATH', default=None, help=(
'path of the volume containing C:\\Windows, the filename of '
'a storage media image containing the C:\\Windows directory, '
'or the path of a SECURITY Registry file.'))
options = argument_parser.parse_args()
if not options.source:
print('Source value is missing.')
print('')
argument_parser.print_help()
print('')
return False
logging.basicConfig(
level=logging.INFO, format='[%(levelname)s] %(message)s')
output_writer = output_writers.StdoutOutputWriter()
if not output_writer.Open():
print('Unable to open output writer.')
print('')
return False
volume_scanner_mediator = dfvfs_command_line.CLIVolumeScannerMediator()
registry_collector = collector.WindowsRegistryCollector(
mediator=volume_scanner_mediator)
if not registry_collector.ScanForWindowsVolume(options.source):
print('Unable to retrieve the Windows Registry from: {0:s}.'.format(
options.source))
print('')
return False
if registry_collector.IsSingleFileRegistry():
print('Both SECURITY and SYSYEM Registry files are required.')
print('')
return False
# TODO: map collector to available Registry keys.
collector_object = cached_credentials.CachedCredentialsKeyCollector(
debug=options.debug, output_writer=output_writer)
result = collector_object.Collect(registry_collector.registry)
if not result:
print('No Cache key found.')
else:
output_writer.WriteText('\n')
output_writer.Close()
return True
if __name__ == '__main__':
if not Main():
sys.exit(1)
else:
sys.exit(0)
| [
"[email protected]"
] | |
e1be6cb81e9d66c660fafd0db382193191ef9e47 | 6b2db6fca8f31c4e6c96e68cf11e5ca3ce7e8a9b | /src/drawPerceptionAnaTimeSeries.py | 32382b65cd5bd0c0a5f3649d575b1d52a7cebdd3 | [
"MIT"
] | permissive | ningtangla/escapeFromMultipleSuspectors | e04da12488be9767c5b6511355c167fdcf18e723 | e6dcb0f7f9371b7ca6cca8779f69f18095092140 | refs/heads/master | 2022-05-03T05:25:21.556950 | 2022-04-20T13:51:53 | 2022-04-20T13:51:53 | 190,686,484 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,552 | py | from collections import OrderedDict
from collections import OrderedDict
import pandas as pd
from matplotlib import pyplot as plt
import itertools as it
import os
import trajectoriesSaveLoad as tsl
class Readcsv:
def __init__(self, getCSVSavePathByCondition, columnNames):
self.getCSVSavePathByCondition = getCSVSavePathByCondition
self.columnNames = columnNames
def __call__(self, condition):
getCSVSavePath = self.getCSVSavePathByCondition(tsl.readParametersFromDf(condition))
CSVSavePath = getCSVSavePath({})
results = pd.read_csv(CSVSavePath, header = None, skiprows = [0], names = self.columnNames)#, header = None)
mean = results.mean()
return mean
def main():
manipulatedVariables = OrderedDict()
manipulatedVariables['alphaForStateWidening'] = [0.25]
#manipulatedVariables['attentionType'] = ['idealObserver']#, 'hybrid4']
manipulatedVariables['attentionType'] = ['hybrid4']#, 'preAttention']
#manipulatedVariables['attentionType'] = ['preAttention', 'attention4', 'hybrid4', 'idealObserver']#, 'attention3', 'hybrid3']
#manipulatedVariables['attentionType'] = ['preAttentionMem0.65', 'preAttentionMem0.25', 'preAttentionPre0.5', 'preAttentionPre4.5', 'preAttention']
manipulatedVariables['measure'] = ['attentionNumber']
#manipulatedVariables['measure'] = ['identity']
manipulatedVariables['chasingSubtlety'] = [3.3]
manipulatedVariables['CForStateWidening'] = [2]
#manipulatedVariables['minAttentionDistance'] = [8.5, 12.5]#[18.0, 40.0]
manipulatedVariables['minAttentionDistance'] = [10.0]#[5.0, 10.0, 20.0, 40.0]
manipulatedVariables['rangeAttention'] = [10.0]#[5.0, 10.0, 20.0, 40.0]#, 6.2, 6.3]
manipulatedVariables['cBase'] = [44]
manipulatedVariables['numTrees'] = [1]
manipulatedVariables['numSimulationTimes'] = [1]
manipulatedVariables['actionRatio'] = [0.2]
manipulatedVariables['burnTime'] = [0]
productedValues = it.product(*[[(key, value) for value in values] for key, values in manipulatedVariables.items()])
parametersAllCondtion = [dict(list(specificValueParameter)) for specificValueParameter in productedValues]
DIRNAME = os.path.dirname(__file__)
trajectoryDirectory = os.path.join(DIRNAME, '..', 'data', 'mcts',
'trajectories')
if not os.path.exists(trajectoryDirectory):
os.makedirs(trajectoryDirectory)
measurementEscapeExtension = '.csv'
getCSVSavePathByCondition = lambda condition: tsl.GetSavePath(trajectoryDirectory, measurementEscapeExtension, condition)
#columnNames = [500.0, 11.0, 3.3, 1.83, 0.92, 0.31, 0.001]
columnNames = list(range(250))
readcsv = Readcsv(getCSVSavePathByCondition, columnNames)
precisionToSubtletyDict={500.0:0, 50.0:5, 11.0:30, 3.3:60, 1.83:90, 0.92:120, 0.31:150, 0.001: 180}
levelNames = list(manipulatedVariables.keys())
levelValues = list(manipulatedVariables.values())
modelIndex = pd.MultiIndex.from_product(levelValues, names=levelNames)
toSplitFrame = pd.DataFrame(index = modelIndex)
modelResultDf = toSplitFrame.groupby(levelNames).apply(readcsv)
toDropLevels = ['alphaForStateWidening', 'CForStateWidening', 'cBase', 'numTrees', 'numSimulationTimes', 'actionRatio', 'burnTime', 'measure', 'chasingSubtlety']
modelResultDf.index = modelResultDf.index.droplevel(toDropLevels)
fig = plt.figure()
numColumns = len(manipulatedVariables['minAttentionDistance'])
numRows = len(manipulatedVariables['rangeAttention'])
plotCounter = 1
for key, group in modelResultDf.groupby(['rangeAttention', 'minAttentionDistance']):
#columnNamesAsSubtlety = [precisionToSubtletyDict[precision] for precision in group.columns]
#group.columns = columnNamesAsSubtlety
group = group.stack()
group.index.names = ['attentionType', 'minAttentionDistance', 'rangeAttention', 'time']
group.index = group.index.droplevel(['minAttentionDistance', 'rangeAttention'])
group = group.to_frame()
group.columns = ['model']
axForDraw = fig.add_subplot(numRows, numColumns, plotCounter)
if (plotCounter) % max(numColumns, 2) == 1:
axForDraw.set_ylabel(str(key[0]))
if plotCounter <= numColumns:
axForDraw.set_title(str(key[1]))
for attentionType, grp in group.groupby('attentionType'):
grp.index = grp.index.droplevel('attentionType')
#if str(attentionType) == manipulatedVariables['attentionType'][-1]:
# grp['human'] = [0.6, 0.37, 0.24]
# grp['human'] = [0.6, 0.48, 0.37, 0.25, 0.24, 0.42, 0.51]
# grp.plot.line(ax = axForDraw, y = 'human', label = 'human', ylim = (0, 0.7), marker = 'o', rot = 0 )
grp.plot.line(ax = axForDraw, y = 'model', label = str(attentionType), ylim = (0, 4.1), marker = 'o', ms = 3, rot = 0 )
plotCounter = plotCounter + 1
plt.suptitle('Measurement = Perception Rate')
#plt.suptitle('Measurement = Action Deviation')
#plt.suptitle('Measurement = Velocity Diff')
#plt.suptitle('Measurement = Escape rate')
#fig.text(x = 0.5, y = 0.92, s = 'Min Attention Distance', ha = 'center', va = 'center')
fig.text(x = 0.05, y = 0.5, s = 'Attention Range', ha = 'center', va = 'center', rotation=90)
fig.text(x = 0.05, y = 0.5, s = 'Number of Simulations', ha = 'center', va = 'center', rotation=90)
plt.show()
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
489bda8d4a39af6af95efc8ecc8a858a81b01e02 | e9d7689655887232b652ef369c7eaf3a1ef06955 | /In vivo Multi/intan_test_optotag.py | 74fa683bcf65aa8362441996dda07acdf632d494 | [] | no_license | Gilles-D/main | 81ac13cdb1614eb0c82afb3d0e847a30b78cad30 | f3714d2cbe4aae22ab36f4f94c94067159270820 | refs/heads/master | 2023-08-31T06:20:48.554237 | 2023-08-30T20:33:27 | 2023-08-30T20:33:27 | 222,518,250 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,900 | py | # -*- coding: utf-8 -*-
#! /bin/env python
"""
Created on Tue Feb 28 16:06:17 2023
@author: Gilles.DELBECQ
"""
import sys, struct, math, os, time
import numpy as np
import matplotlib.pyplot as plt
import scipy.signal as sp
from intanutil.read_header import read_header
from intanutil.get_bytes_per_data_block import get_bytes_per_data_block
from intanutil.read_one_data_block import read_one_data_block
from intanutil.notch_filter import notch_filter
from intanutil.data_to_result import data_to_result
import pandas as pd
def read_data(filename):
"""Reads Intan Technologies RHD2000 data file generated by evaluation board GUI.
Data are returned in a dictionary, for future extensibility.
"""
from intanutil.read_header import read_header
from intanutil.get_bytes_per_data_block import get_bytes_per_data_block
from intanutil.read_one_data_block import read_one_data_block
from intanutil.notch_filter import notch_filter
from intanutil.data_to_result import data_to_result
tic = time.time()
fid = open(filename, 'rb')
filesize = os.path.getsize(filename)
header = read_header(fid)
print('Found {} amplifier channel{}.'.format(header['num_amplifier_channels'], plural(header['num_amplifier_channels'])))
print('Found {} auxiliary input channel{}.'.format(header['num_aux_input_channels'], plural(header['num_aux_input_channels'])))
print('Found {} supply voltage channel{}.'.format(header['num_supply_voltage_channels'], plural(header['num_supply_voltage_channels'])))
print('Found {} board ADC channel{}.'.format(header['num_board_adc_channels'], plural(header['num_board_adc_channels'])))
print('Found {} board digital input channel{}.'.format(header['num_board_dig_in_channels'], plural(header['num_board_dig_in_channels'])))
print('Found {} board digital output channel{}.'.format(header['num_board_dig_out_channels'], plural(header['num_board_dig_out_channels'])))
print('Found {} temperature sensors channel{}.'.format(header['num_temp_sensor_channels'], plural(header['num_temp_sensor_channels'])))
print('')
# Determine how many samples the data file contains.
bytes_per_block = get_bytes_per_data_block(header)
# How many data blocks remain in this file?
data_present = False
bytes_remaining = filesize - fid.tell()
if bytes_remaining > 0:
data_present = True
if bytes_remaining % bytes_per_block != 0:
raise Exception('Something is wrong with file size : should have a whole number of data blocks')
num_data_blocks = int(bytes_remaining / bytes_per_block)
num_amplifier_samples = header['num_samples_per_data_block'] * num_data_blocks
num_aux_input_samples = int((header['num_samples_per_data_block'] / 4) * num_data_blocks)
num_supply_voltage_samples = 1 * num_data_blocks
num_board_adc_samples = header['num_samples_per_data_block'] * num_data_blocks
num_board_dig_in_samples = header['num_samples_per_data_block'] * num_data_blocks
num_board_dig_out_samples = header['num_samples_per_data_block'] * num_data_blocks
record_time = num_amplifier_samples / header['sample_rate']
if data_present:
print('File contains {:0.3f} seconds of data. Amplifiers were sampled at {:0.2f} kS/s.'.format(record_time, header['sample_rate'] / 1000))
else:
print('Header file contains no data. Amplifiers were sampled at {:0.2f} kS/s.'.format(header['sample_rate'] / 1000))
if data_present:
# Pre-allocate memory for data.
print('')
print('Allocating memory for data...')
data = {}
if (header['version']['major'] == 1 and header['version']['minor'] >= 2) or (header['version']['major'] > 1):
data['t_amplifier'] = np.zeros(num_amplifier_samples, dtype=np.int_)
else:
data['t_amplifier'] = np.zeros(num_amplifier_samples, dtype=np.uint)
data['amplifier_data'] = np.zeros([header['num_amplifier_channels'], num_amplifier_samples], dtype=np.uint)
data['aux_input_data'] = np.zeros([header['num_aux_input_channels'], num_aux_input_samples], dtype=np.uint)
data['supply_voltage_data'] = np.zeros([header['num_supply_voltage_channels'], num_supply_voltage_samples], dtype=np.uint)
data['temp_sensor_data'] = np.zeros([header['num_temp_sensor_channels'], num_supply_voltage_samples], dtype=np.uint)
data['board_adc_data'] = np.zeros([header['num_board_adc_channels'], num_board_adc_samples], dtype=np.uint)
# by default, this script interprets digital events (digital inputs and outputs) as booleans
# if unsigned int values are preferred(0 for False, 1 for True), replace the 'dtype=np.bool_' argument with 'dtype=np.uint' as shown
# the commented line below illustrates this for digital input data; the same can be done for digital out
#data['board_dig_in_data'] = np.zeros([header['num_board_dig_in_channels'], num_board_dig_in_samples], dtype=np.uint)
data['board_dig_in_data'] = np.zeros([header['num_board_dig_in_channels'], num_board_dig_in_samples], dtype=np.bool_)
data['board_dig_in_raw'] = np.zeros(num_board_dig_in_samples, dtype=np.uint)
data['board_dig_out_data'] = np.zeros([header['num_board_dig_out_channels'], num_board_dig_out_samples], dtype=np.bool_)
data['board_dig_out_raw'] = np.zeros(num_board_dig_out_samples, dtype=np.uint)
# Read sampled data from file.
print('Reading data from file...')
# Initialize indices used in looping
indices = {}
indices['amplifier'] = 0
indices['aux_input'] = 0
indices['supply_voltage'] = 0
indices['board_adc'] = 0
indices['board_dig_in'] = 0
indices['board_dig_out'] = 0
print_increment = 10
percent_done = print_increment
for i in range(num_data_blocks):
read_one_data_block(data, header, indices, fid)
# Increment indices
indices['amplifier'] += header['num_samples_per_data_block']
indices['aux_input'] += int(header['num_samples_per_data_block'] / 4)
indices['supply_voltage'] += 1
indices['board_adc'] += header['num_samples_per_data_block']
indices['board_dig_in'] += header['num_samples_per_data_block']
indices['board_dig_out'] += header['num_samples_per_data_block']
fraction_done = 100 * (1.0 * i / num_data_blocks)
if fraction_done >= percent_done:
print('{}% done...'.format(percent_done))
percent_done = percent_done + print_increment
# Make sure we have read exactly the right amount of data.
bytes_remaining = filesize - fid.tell()
if bytes_remaining != 0: raise Exception('Error: End of file not reached.')
# Close data file.
fid.close()
if (data_present):
print('Parsing data...')
# Extract digital input channels to separate variables.
for i in range(header['num_board_dig_in_channels']):
data['board_dig_in_data'][i, :] = np.not_equal(np.bitwise_and(data['board_dig_in_raw'], (1 << header['board_dig_in_channels'][i]['native_order'])), 0)
# Extract digital output channels to separate variables.
for i in range(header['num_board_dig_out_channels']):
data['board_dig_out_data'][i, :] = np.not_equal(np.bitwise_and(data['board_dig_out_raw'], (1 << header['board_dig_out_channels'][i]['native_order'])), 0)
# Scale voltage levels appropriately.
data['amplifier_data'] = np.multiply(0.195, (data['amplifier_data'].astype(np.int32) - 32768)) # units = microvolts
data['aux_input_data'] = np.multiply(37.4e-6, data['aux_input_data']) # units = volts
data['supply_voltage_data'] = np.multiply(74.8e-6, data['supply_voltage_data']) # units = volts
if header['eval_board_mode'] == 1:
data['board_adc_data'] = np.multiply(152.59e-6, (data['board_adc_data'].astype(np.int32) - 32768)) # units = volts
elif header['eval_board_mode'] == 13:
data['board_adc_data'] = np.multiply(312.5e-6, (data['board_adc_data'].astype(np.int32) - 32768)) # units = volts
else:
data['board_adc_data'] = np.multiply(50.354e-6, data['board_adc_data']) # units = volts
data['temp_sensor_data'] = np.multiply(0.01, data['temp_sensor_data']) # units = deg C
# Check for gaps in timestamps.
num_gaps = np.sum(np.not_equal(data['t_amplifier'][1:]-data['t_amplifier'][:-1], 1))
if num_gaps == 0:
print('No missing timestamps in data.')
else:
print('Warning: {0} gaps in timestamp data found. Time scale will not be uniform!'.format(num_gaps))
# Scale time steps (units = seconds).
data['t_amplifier'] = data['t_amplifier'] / header['sample_rate']
data['t_aux_input'] = data['t_amplifier'][range(0, len(data['t_amplifier']), 4)]
data['t_supply_voltage'] = data['t_amplifier'][range(0, len(data['t_amplifier']), header['num_samples_per_data_block'])]
data['t_board_adc'] = data['t_amplifier']
data['t_dig'] = data['t_amplifier']
data['t_temp_sensor'] = data['t_supply_voltage']
# If the software notch filter was selected during the recording, apply the
# same notch filter to amplifier data here.
if header['notch_filter_frequency'] > 0 and header['version']['major'] < 3:
print('Applying notch filter...')
print_increment = 10
percent_done = print_increment
for i in range(header['num_amplifier_channels']):
data['amplifier_data'][i,:] = notch_filter(data['amplifier_data'][i,:], header['sample_rate'], header['notch_filter_frequency'], 10)
fraction_done = 100 * (i / header['num_amplifier_channels'])
if fraction_done >= percent_done:
print('{}% done...'.format(percent_done))
percent_done += print_increment
else:
data = [];
# Move variables to result struct.
result = data_to_result(header, data, data_present)
print('Done! Elapsed time: {0:0.1f} seconds'.format(time.time() - tic))
return result
def plural(n):
"""Utility function to optionally pluralize words based on the value of n.
"""
if n == 1:
return ''
else:
return 's'
def filter_signal(signal, order=4, sample_rate=20000, freq_low=300, freq_high=3000, axis=0):
"""
From Théo G.
Filtering with scipy
inputs raw signal (array)
returns filtered signal (array)
"""
import scipy.signal
Wn = [freq_low / (sample_rate / 2), freq_high / (sample_rate / 2)]
sos_coeff = scipy.signal.iirfilter(order, Wn, btype="band", ftype="butter", output="sos")
filtered_signal = scipy.signal.sosfiltfilt(sos_coeff, signal, axis=axis)
return filtered_signal
"""
Load intan file
"""
path=r"//equipe2-nas1/Gilles.DELBECQ/Data/ePhy/Février2023/Test_Gustave/raw/raw intan/Test_Gustave_16_03_230316_165717/merged.rhd"
savepath=r'\\equipe2-nas1\Gilles.DELBECQ\Data\ePhy\Février2023\Test_Gustave\figures_optotag'
save_prefix='16_03_optotag_5ms_2Hz'
reader=read_data(path)
sampling_rate = reader['frequency_parameters']['amplifier_sample_rate']
time_vector=reader['t_amplifier']
signal=reader['amplifier_data']
dig_inputs=reader['board_dig_in_data'][1]
"""
Récupérer l'index des stims'
"""
stim_idx=[]
for i in range(len(dig_inputs)):
if dig_inputs[i] and (i==0 or not dig_inputs[i-1]):
stim_idx.append(i)
"""
Filtering
"""
filtered_signals=[]
for i in range(len(signal)):
filtered_signal=filter_signal(signal[int(i),:])
filtered_signals.append(filtered_signal)
# plt.figure()
# # plt.plot(time_vector,signal[0,:])
# plt.plot(time_vector,filtered_signal)
# plt.title(rf'channel {int(i)}')
filtered_signals = np.array(filtered_signals)
median = np.median(filtered_signals, axis=0)#compute median on all
cmr_signals = filtered_signals-median #compute common ref removal median on all
"""
Spike detection
"""
# Noise parameters
std_threshold = 4 #Times the std
noise_window = 2 #window for the noise calculation in sec
distance = 50 # distance between 2 spikes
waveform_window=10#⌡in ms
thresholds,spikes_list,spikes_list_y,spikes_times_list,wfs,waveforms=[],[],[],[],[],[]
for signal in cmr_signals:
# Threshold calculation
noise = signal[0:int(noise_window*sampling_rate)] #noise window taken from individual channel signal
threshold = np.median(noise)+std_threshold*np.std(noise) #threshold calculation for the channel
thresholds.append(threshold) #append it to the list regrouping threshold for each channel
#Detect the spike indexes
spike_idx, _ = sp.find_peaks(-signal,height=threshold,distance=distance)
#Convert to spike times
spike_times = spike_idx*1./sampling_rate
#Get spikes peak
spike_y = signal[spike_idx]
#Append spikes times to the list of all channels spikes
spikes_list.append(spike_idx)
spikes_times_list.append(spike_times)
spikes_list_y.append(spike_y)
# if Waveforms == True :
# wfs = extract_spike_waveform(signal,spike_idx)
# waveforms.append(wfs)
for index,i in np.ndenumerate(waveforms):
plt.figure()
# plt.title(rf'waveform_chan_{selected_chan[index[0]]}')
time_axis=np.array(range(int(-(waveform_window/1000)*20000/2),int(waveform_window/1000*20000/2)))/20000*1000
for j in i:
plt.plot(j*1000)
# plt.savefig(rf'{save_path}\waveform_chan_{selected_chan[index[0]]}.svg')
"""
Raster plot
"""
for Channel_to_analyze in range(16):
event_indices=spikes_list[Channel_to_analyze]
signal=cmr_signals[Channel_to_analyze]
# Durée de la fenêtre avant et après chaque stimulation (en nombre de points)
pre_stim_window_size = int(10*sampling_rate/1000)
post_stim_window_size = int(50*sampling_rate/1000)
# Durée de chaque fenêtre (en nombre de points)
window_size = int(pre_stim_window_size + post_stim_window_size)
# Création du tableau 2D pour stocker les fenêtres de signal
num_stimulations = len(stim_idx)
num_events = len(event_indices)
signal_windows = np.zeros((num_events, num_stimulations, window_size))
num_windows = num_stimulations
event_times = spikes_times_list
stim_times = np.array(stim_idx)/sampling_rate*1000
plt.figure()
plt.plot(time_vector[500:100000],signal[500:100000])
# for spike in event_indices/sampling_rate:
# plt.axvline(spike,color='red')
"""
#Raster plot global
plt.figure()
plt.eventplot(event_times, colors='k')
"""
# Indices des pics
peak_indices = spikes_list[Channel_to_analyze]
# Nombre total de fenêtres
num_windows = len(stim_idx)
# Créer une matrice de zéros pour stocker les fenêtres de signal
signal_windows = np.zeros((num_windows, pre_stim_window_size + post_stim_window_size))
# Remplir la matrice avec les fenêtres de signal
for i, stim_index in enumerate(stim_idx):
signal_windows[i, :] = signal[stim_index - pre_stim_window_size : stim_index + post_stim_window_size]
# Créer une matrice de zéros pour stocker les événements
event_matrix = np.zeros_like(signal_windows)
# Trouver les indices des pics dans chaque fenêtre de signal
for i in range(num_windows):
peak_indices_in_window = peak_indices[(peak_indices >= stim_idx[i] - pre_stim_window_size) & (peak_indices < stim_idx[i] + post_stim_window_size)]
if len(peak_indices_in_window) >0:
event_matrix[i, peak_indices_in_window - (stim_idx[i] - pre_stim_window_size)] = 1
from itertools import groupby
# votre array
arr = np.argwhere(event_matrix == 1)
# Regrouper les valeurs de la deuxième colonne par les valeurs de la première colonne
grouped_array = []
for i in np.unique(arr[:, 0]):
grouped_array.append(list(arr[arr[:, 0] == i, 1]))
# Créer un nouveau 2D array avec les valeurs de la deuxième colonne regroupées
new_array = np.array([np.array(x) for x in grouped_array])
# Afficher le raster plot
plt.figure()
plt.eventplot(np.array(new_array), colors='k')
# Ajouter une ligne verticale pour chaque stimulation
# plt.axvline(200, color='r', linestyle='--')
plt.axvspan(200,300,color='blue',alpha=0.2)
# Ajouter une étiquette pour l'axe des y
plt.ylabel('Stimulation')
plt.title(rf'Channel : {Channel_to_analyze}')
plt.savefig(rf"{savepath}/{save_prefix}_Channel_{Channel_to_analyze}.png")
test= pd.DataFrame(grouped_array)
test.to_excel(rf"{savepath}/{save_prefix}_Channel_{Channel_to_analyze}.xlsx")
num_events = int(np.sum(event_matrix))
event_times = np.where(event_matrix == 1)
# events_to_plot=event_times[1]
events_to_plot=np.where((event_times[1] >= 195) & (event_times[1] <= 205), np.nan, event_times[1])
events_to_plot=np.where((events_to_plot >= 295) & (events_to_plot <= 305), np.nan, events_to_plot)
bins_in_ms=0.5
plt.figure()
plt.hist(events_to_plot, bins=int(1200/bins_in_ms/sampling_rate*1000))
plt.axvspan(200,300,color='blue',alpha=0.2)
plt.xlim(0,500)
plt.title(rf'Channel {Channel_to_analyze} bins = {bins_in_ms}ms')
plt.savefig(rf"{savepath}/{save_prefix}_Hist_Channel_{Channel_to_analyze}_no_artefact.svg")
plt.savefig(rf"{savepath}/{save_prefix}_Hist_Channel_{Channel_to_analyze}_no_artefact.png")
| [
"[email protected]"
] | |
23b9d302ab52d35d1cb3fdd270c785503d99aacb | b3b68efa404a7034f0d5a1c10b281ef721f8321a | /Scripts/simulation/interactions/base/cheat_interaction.py | 90cea49d59271ca03aa011bf3d7fd934e0685f7e | [
"Apache-2.0"
] | permissive | velocist/TS4CheatsInfo | 62195f3333076c148b2a59f926c9fb5202f1c6fb | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | refs/heads/main | 2023-03-08T01:57:39.879485 | 2021-02-13T21:27:38 | 2021-02-13T21:27:38 | 337,543,310 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,778 | py | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\interactions\base\cheat_interaction.py
# Compiled at: 2020-10-09 00:03:45
# Size of source mod 2**32: 6769 bytes
from date_and_time import create_time_span
from distributor.shared_messages import IconInfoData
from interactions.aop import AffordanceObjectPair
from interactions.base.immediate_interaction import ImmediateSuperInteraction
from interactions.base.picker_interaction import PickerSuperInteraction
from interactions.interaction_finisher import FinishingType
from scheduler import AlarmData
from sims4.tuning.tunable import Tunable
from sims4.utils import flexmethod
from singletons import DEFAULT
from situations.service_npcs.service_npc_manager import ServiceNpcSituationCreationParams
from statistics.skill import Skill
from ui.ui_dialog_generic import UiDialogTextInputOkCancel
from ui.ui_dialog_picker import ObjectPickerRow
import services, sims4
TEXT_INPUT_SKILL_LEVEL = 'skill_level'
class CheatSetSkillSuperInteraction(PickerSuperInteraction):
INSTANCE_TUNABLES = {'skill_level_dialog':UiDialogTextInputOkCancel.TunableFactory(description="\n The dialog that is displayed (and asks for the user to enter\n the skill level).\n \n An additional token is passed in: the selected stat's name. \n ",
text_inputs=(
TEXT_INPUT_SKILL_LEVEL,)),
'set_almost_level_up':Tunable(description='\n True means this interaction will set the skill to the value\n that almost level up the skill level passed in. False means it\n will set the skill directly to the level',
tunable_type=bool,
default=False)}
def _run_interaction_gen(self, timeline):
self._show_picker_dialog((self.target), target_sim=(self.target))
return True
if False:
yield None
@flexmethod
def picker_rows_gen(cls, inst, target, context, **kwargs):
skill_manager = services.get_instance_manager(sims4.resources.Types.STATISTIC)
for skill in skill_manager.get_ordered_types(only_subclasses_of=Skill):
if not skill.can_add(target):
continue
row = ObjectPickerRow(name=(skill.stat_name), icon=(skill.icon),
row_description=(skill.skill_description(context.sim)),
tag=skill)
yield row
def on_choice_selected(self, choice_tag, **kwargs):
if choice_tag is None:
return
skill = choice_tag
sim = self.target
def on_response(level_dialog):
if not level_dialog.accepted:
self.cancel((FinishingType.DIALOG), cancel_reason_msg='Set Skill level dialog timed out from client.')
return
level = level_dialog.text_input_responses.get(TEXT_INPUT_SKILL_LEVEL)
if not level:
self.cancel((FinishingType.DIALOG), cancel_reason_msg='Empty skill level returned from client.')
return
try:
level = int(level)
except:
self.cancel((FinishingType.DIALOG), cancel_reason_msg='Invalid skill level returned from client.')
return
else:
tracker = sim.get_tracker(skill)
stat = tracker.get_statistic(skill, add=True)
if stat is None:
self.cancel((FinishingType.FAILED_TESTS), cancel_reason_msg='Unable to add Skill due to entitlement restriction.')
return
if self.set_almost_level_up:
skill_value = stat.get_skill_value_for_level(level) - 50
tracker.set_value(skill, skill_value)
else:
tracker.set_user_value(skill, level)
dialog = self.skill_level_dialog(sim, self.get_resolver())
dialog.show_dialog(on_response=on_response, additional_tokens=(skill.stat_name,), icon_override=IconInfoData(icon_resource=(skill.icon)))
class CheatRequestServiceNpcSuperInteraction(ImmediateSuperInteraction):
def __init__(self, aop, context, service_tuning=None, **kwargs):
(super().__init__)(aop, context, **kwargs)
self._service_tuning = service_tuning
def _run_interaction_gen(self, timeline):
sim = self.sim
end_time = services.time_service().sim_now + create_time_span(hours=8)
fake_alarm_data = AlarmData(None, end_time, None, False)
default_user_specified_data_id = self._service_tuning.get_default_user_specified_data_id()
creation_data = ServiceNpcSituationCreationParams((sim.household), (self._service_tuning), user_specified_data_id=default_user_specified_data_id, is_recurring=False)
services.current_zone().service_npc_service._send_service_npc(None, fake_alarm_data, creation_data)
return True
if False:
yield None
@flexmethod
def _get_name(cls, inst, target=DEFAULT, context=DEFAULT, service_tuning=None, outfit_index=None, **interaction_parameters):
if inst is not None:
inst._service_tuning.display_name
return service_tuning.display_name
@classmethod
def potential_interactions(cls, target, context, **kwargs):
for service_tuning_type in services.service_npc_manager().types:
service_tuning = services.service_npc_manager().get(service_tuning_type)
yield AffordanceObjectPair(cls, target, cls, None, service_tuning=service_tuning, **kwargs) | [
"[email protected]"
] | |
8b1c66b8b3b792914470aa94f0f354eefa4daa9e | b033000ca119f3fd9feaa64cebd6e60b1d85839a | /src/dotpaths/cfc_utils.py | 19dcca11906c6e1f21482644b4ab61f30b8bc87e | [
"MIT"
] | permissive | KamasamaK/sublimetext-cfml | 93c8e6858d08b3b8e85c1d23ef77587f3ce06f63 | 3730fca4679a5294234dfb3207eb41a4b313b87e | refs/heads/master | 2021-09-03T07:13:20.218103 | 2017-11-30T17:57:30 | 2017-11-30T17:57:30 | 97,273,191 | 0 | 1 | null | 2017-07-14T21:13:24 | 2017-07-14T21:13:24 | null | UTF-8 | Python | false | false | 10,298 | py | import re
import sublime
from os.path import dirname
from ..component_index import component_index
from .. import utils
component_name_re = r"""
["']component["']\s*,\s*(?:class\s*=\s*)?["']([$_\w.]+)["']
"""
component_name_re = re.compile(component_name_re, re.I | re.X)
cfc_dot_path_re = re.compile(r"^[\w\-_][\w.\-_]+$")
component_selector = "meta.function-call.support.createcomponent.cfml"
constructor_selector = "meta.instance.constructor.cfml"
def get_component_name(source_string):
cn = re.search(component_name_re, source_string)
if cn:
return cn.group(1)
return None
def is_cfc_dot_path(source_string):
dp = re.search(cfc_dot_path_re, source_string)
if dp:
return True
return False
def is_possible_cfc_instance(dot_context):
if dot_context[0].is_function:
return False
if len(dot_context) == 1:
return True
if len(dot_context) == 2 and dot_context[1].name == "variables":
return True
return False
def get_folder_cfc_path(cfml_view, cfc_path):
folder_mapping = get_folder_mapping(cfml_view)
if folder_mapping:
folder_cfc_path = folder_mapping
if len(cfc_path) > 0:
folder_cfc_path += "." + cfc_path
return folder_cfc_path
return None
def get_folder_mapping(cfml_view):
"""
Checks current file to see if it is inside of a mapped folder
and returns the dot path to the file's containing folder.
For example, if 'C:/projects/project/model/' is mapped to '/model',
and the current file is 'C:/projects/project/model/services/myservice.cfc'
then this function will return 'model.services'
"""
if not cfml_view.file_path:
return None
mappings = component_index.get_project_data(cfml_view.project_name).get("mappings", [])
for mapping in mappings:
normalized_mapping = utils.normalize_mapping(mapping, dirname(cfml_view.project_name))
if not cfml_view.file_path.startswith(normalized_mapping["path"]):
continue
mapped_path = normalized_mapping["mapping"] + cfml_view.file_path.replace(normalized_mapping["path"], "")
path_parts = mapped_path.split("/")[1:-1]
dot_path = ".".join(path_parts)
if len(dot_path) > 0:
return dot_path
return None
def find_cfc(cfml_view, position):
"""
returns cfc_path, file_path, dot_path, function_name, regions
"""
if cfml_view.view.match_selector(position, "entity.other.inherited-class.cfml"):
r = utils.get_scope_region_containing_point(cfml_view.view, position, "entity.other.inherited-class.cfml")
cfc_path = cfml_view.view.substr(r)
file_path, dot_path = get_cfc_file_info(cfml_view, cfc_path)
return cfc_path, file_path, dot_path, None, [r]
if cfml_view.view.match_selector(position, component_selector):
r = utils.get_scope_region_containing_point(cfml_view.view, position, component_selector)
cfc_path = get_component_name(cfml_view.view.substr(r))
file_path, dot_path = get_cfc_file_info(cfml_view, cfc_path)
return cfc_path, file_path, dot_path, None, [r]
if cfml_view.view.match_selector(position, constructor_selector):
r = utils.get_scope_region_containing_point(cfml_view.view, position, constructor_selector)
cfc_path = cfml_view.view.substr(r)[4:].split("(")[0]
file_path, dot_path = get_cfc_file_info(cfml_view, cfc_path)
return cfc_path, file_path, dot_path, None, [r]
if cfml_view.view.match_selector(position, "string.quoted.single.cfml, string.quoted.double.cfml"):
r = cfml_view.view.extract_scope(position)
cfc_path = cfml_view.view.substr(r)
if cfc_path[0] in ["\"", "'"]:
cfc_path = cfc_path[1:-1]
if is_cfc_dot_path(cfc_path):
file_path, dot_path = get_cfc_file_info(cfml_view, cfc_path)
if file_path:
return cfc_path, file_path, dot_path, None, [r]
if cfml_view.view.match_selector(position, "meta.function-call.method"):
function_name, function_name_region, function_args_region = cfml_view.get_function_call(position)
funct_region = sublime.Region(function_name_region.begin(), function_args_region.end())
if cfml_view.view.substr(function_name_region.begin() - 1) == ".":
dot_context = cfml_view.get_dot_context(function_name_region.begin() - 1)
if len(dot_context):
if cfml_view.view.match_selector(dot_context[-1].name_region.begin(), "meta.instance.constructor.cfml"):
r = utils.get_scope_region_containing_point(cfml_view.view, dot_context[-1].name_region.begin(), "meta.instance.constructor.cfml")
cfc_path = cfml_view.view.substr(r)[4:].split("(")[0]
file_path, dot_path = get_cfc_file_info(cfml_view, cfc_path)
return cfc_path, file_path, dot_path, function_name, [r, funct_region]
if cfml_view.view.match_selector(dot_context[-1].name_region.begin(), "meta.function-call.support.createcomponent.cfml"):
r = utils.get_scope_region_containing_point(cfml_view.view, dot_context[-1].name_region.begin(), "meta.function-call.support.createcomponent.cfml")
cfc_path = get_component_name(cfml_view.view.substr(r))
file_path, dot_path = get_cfc_file_info(cfml_view, cfc_path)
return cfc_path, file_path, dot_path, function_name, [r, funct_region]
if is_possible_cfc_instance(dot_context):
cfc_path, file_path, dot_path, temp, regions = find_cfc_by_var_assignment(cfml_view, position, dot_context[0].name)
if regions:
regions.append(dot_context[0].name_region)
regions.append(funct_region)
return cfc_path, file_path, dot_path, function_name, regions
if cfml_view.view.match_selector(position, "variable.other, meta.property.cfml"):
var_region = cfml_view.view.word(position)
dot_context = cfml_view.get_dot_context(var_region.begin() - 1)
if (len(dot_context) == 0
or (len(dot_context) == 1 and dot_context[0].name == "variables")):
cfc_path, file_path, dot_path, temp, regions = find_cfc_by_var_assignment(cfml_view, position, cfml_view.view.substr(var_region).lower())
if regions:
regions.append(var_region)
return cfc_path, file_path, dot_path, temp, regions
return None, None, None, None, None
def find_cfc_by_var_assignment(cfml_view, position, var_name):
empty_tuple = None, None, None, None, None
if not utils.get_setting("instantiated_component_completions"):
return empty_tuple
var_assignment_region = cfml_view.find_variable_assignment(position, var_name)
if not var_assignment_region:
return empty_tuple
s = component_selector + "," + constructor_selector
if not cfml_view.view.match_selector(var_assignment_region.end(), s):
return empty_tuple
# try to determine component
cfc_path, file_path, dot_path, function_name, regions = find_cfc(cfml_view, var_assignment_region.end())
if cfc_path is None:
return empty_tuple
regions.append(var_assignment_region)
next_pt = utils.get_next_character(cfml_view.view, regions[0].end())
if cfml_view.view.substr(next_pt) != ".":
return cfc_path, file_path, dot_path, function_name, regions
# if next char is a `.`, try to determine if what follows is init method
if not cfml_view.view.match_selector(next_pt + 1, "meta.function-call.method"):
return empty_tuple
function_name, function_name_region, function_args_region = cfml_view.get_function_call(next_pt + 1)
if not function_name:
return empty_tuple
metadata = component_index.get_extended_metadata_by_file_path(cfml_view.project_name, file_path)
if metadata["initmethod"] is None:
if function_name != 'init':
return empty_tuple
else:
if metadata["initmethod"] != function_name:
return empty_tuple
next_pt = utils.get_next_character(cfml_view.view, function_args_region.end())
if cfml_view.view.substr(next_pt) == ".":
return empty_tuple
return cfc_path, file_path, dot_path, function_name, regions
def get_cfc_file_info(cfml_view, cfc_path):
if not cfc_path:
return None, None
cfc_dot_path = component_index.get_file_path_by_dot_path(cfml_view.project_name, cfc_path.lower())
if not cfc_dot_path:
folder_cfc_path = get_folder_cfc_path(cfml_view, cfc_path)
if folder_cfc_path:
cfc_dot_path = component_index.get_file_path_by_dot_path(cfml_view.project_name, folder_cfc_path.lower())
if cfc_dot_path:
return cfc_dot_path["file_path"], cfc_dot_path["dot_path"]
# at this point, we know the cfc dot path is not one that is indexed in the model index
# but we might be able to find it via mappings
if cfml_view.project_name:
mappings = cfml_view.view.window().project_data().get("mappings", [])
mapped_cfc_path = "/" + cfc_path.lower().replace(".", "/") + ".cfc"
for mapping in mappings:
normalized_mapping = utils.normalize_mapping(mapping, cfml_view.project_name)
if mapped_cfc_path.lower().startswith(normalized_mapping["mapping"]):
relative_path = mapped_cfc_path.replace(normalized_mapping["mapping"], "")[1:]
relative_path, path_exists = utils.get_verified_path(normalized_mapping["path"], relative_path)
if path_exists:
full_file_path = normalized_mapping["path"] + "/" + relative_path
return full_file_path, None
# last option is to do a relative search from the directory of the current file
if cfml_view.file_path:
directory = "/".join(cfml_view.file_path.split("/")[:-1])
relative_path, path_exists = utils.get_verified_path(directory, cfc_path.lower().replace(".", "/") + ".cfc")
if path_exists:
full_file_path = directory + "/" + relative_path
return full_file_path, None
return None, None
| [
"[email protected]"
] | |
1a6d7326648830c51c9d6ddcfd52f90ae9a5f1ed | 0fccee4c738449f5e0a8f52ea5acabf51db0e910 | /genfragments/ThirteenTeV/PythiaChargedResonance_WG/PythiaChargedResonance_WGToLNuG_M4000_width0p01_13TeV-pythia8_cff.py | 45ca4f0a00d2ad1221666d6f875dca26a7287b5c | [] | no_license | cms-sw/genproductions | f308ffaf3586c19b29853db40e6d662e937940ff | dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4 | refs/heads/master | 2023-08-30T17:26:02.581596 | 2023-08-29T14:53:43 | 2023-08-29T14:53:43 | 11,424,867 | 69 | 987 | null | 2023-09-14T12:41:28 | 2013-07-15T14:18:33 | Python | UTF-8 | Python | false | false | 1,131 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
comEnergy = cms.double(13000.0),
filterEfficiency = cms.untracked.double(1),
maxEventsToPrint = cms.untracked.int32(0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
pythiaPylistVerbosity = cms.untracked.int32(0),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
"37:onMode = off",
"37:addChannel = 1 0.00001 101 24 22",
"37:onIfMatch = 24 22",
"37:m0 = 4000",
"37:doForceWidth = on",
"37:mWidth = 0.400000",
"24:onMode = off",
"24:onIfAny = 11 13 15",
"Higgs:useBSM = on",
"HiggsBSM:ffbar2H+- = on"),
parameterSets = cms.vstring(
"pythia8CommonSettings",
"pythia8CUEP8M1Settings",
"processParameters")
)
)
| [
"shubhanshu.chauhan.cern.ch"
] | shubhanshu.chauhan.cern.ch |
b538521c1a4e868a82ca658bfa9726213a686173 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /W7S25BPmjEMSzpnaB_4.py | 1710d798d37e7f63b51f71720d88fb6747d84732 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 301 | py |
def bonacci(sum_num, index):
list = []
for i in range(1, sum_num):
list.append(0)
list.append(1)
for i in range(sum_num , index ):
num = 0
for j in range(1, sum_num + 1):
num += list[i - j]
list.append(num)
return list[index - 1]
| [
"[email protected]"
] | |
cc2e37ddd71fb1e52708459d304b69b5718bc428 | 8d55d41a4f5c0b89331cac714c1525e9581d9720 | /PyCommon/modules/VirtualPhysics/setup_mac.py | df7c84427729204e8ca11d58188d86486b5080f3 | [
"Apache-2.0"
] | permissive | hpgit/HumanFoot | 8cf35ceeeb35a0371e03eaf19d6da58dc01487eb | f9a1a341b7c43747bddcd5584b8c98a0d1ac2973 | refs/heads/master | 2022-04-13T23:38:19.072203 | 2019-12-06T06:36:10 | 2019-12-06T06:36:10 | 41,348,141 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 660 | py | from distutils.core import setup, Extension
import sys
py_major_ver = sys.version_info[0]
boost_lib = 'boost_python'
if py_major_ver == 3:
boost_lib = boost_lib + '3'
def moduleSetup(moduleName):
moduleToSetup = Extension(moduleName,
include_dirs = ['../usr/include/'],
#extra_compile_args=['-fopenmp'],
# extra_link_args=['-lgomp'],
libraries = [boost_lib, 'vpLib'],
library_dirs = ['../usr/lib'],
sources = ['pyV'+moduleName[1:]+'.cpp'])
setup (name = moduleName,
version = '0.1',
description = moduleName,
ext_modules = [moduleToSetup])
moduleSetup('vpWorld')
moduleSetup('vpBody')
| [
"[email protected]"
] | |
f4199947212a6889d65b9832b7341e5f36dafffd | 39405775f9ba7b9325a1348c8172f98ad24d442c | /user_portrait/user_portrait/cron/info_consume/propagate/propagate_time_weibo.py | 6e2575bd9086745a5444b63f5d897d7058f14b32 | [] | no_license | yuyuqi/info_consume | cfef72d697b929eb69244bd54b335cf46c4252ff | c984fb3b7235bae02cd5ba8f04f7ffb41ba0b910 | refs/heads/master | 2021-01-14T12:22:25.929097 | 2016-11-11T08:18:38 | 2016-11-11T08:18:38 | 66,696,804 | 0 | 0 | null | 2016-08-27T04:56:46 | 2016-08-27T04:56:46 | null | UTF-8 | Python | false | false | 834 | py | # -*- coding: utf-8 -*-
from config import db
class PropagateTimeWeibos(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
topic = db.Column(db.String(20))
end = db.Column(db.BigInteger(10, unsigned=True))
range = db.Column(db.BigInteger(10, unsigned=True))
mtype = db.Column(db.Integer(1, unsigned=True))
limit = db.Column(db.BigInteger(10, unsigned=True))
weibos = db.Column(db.Text) # weibos=[weibos]
def __init__(self, topic, end, range, mtype, limit, weibos):
self.topic = topic
self.end = end
self.range = range
self.mtype = mtype
self.limit = limit
self.weibos = weibos
@classmethod
def _name(cls):
return u'PropagateTimeWeibos'
if __name__ == '__main__':
db.create_all() | [
"[email protected]"
] | |
b5126f5c82e7deb25eaa06a3bf6f79162e98be49 | 9c8fdfa389eaaf2df4c8ba0e3072d94671b5a622 | /0547. Friend Circles.py | 0a8f07d64c078d5a21a44614caac046e77dc1720 | [] | no_license | aidardarmesh/leetcode2 | 41b64695afa850f9cc7847158abb6f2e8dc9abcd | 4cf03307c5caeccaa87ccce249322bd02397f489 | refs/heads/master | 2023-02-27T11:22:09.803298 | 2021-02-07T06:47:35 | 2021-02-07T06:47:35 | 264,491,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 758 | py | from typing import *
class Solution:
def findCircleNum(self, M: List[List[int]]) -> int:
n = len(M)
self.circles = n
uf = {i:i for i in range(n)}
def find(v):
if uf[v] == v:
return v
uf[v] = find(uf[v])
return uf[v]
def union(v, u):
v_root = find(v)
u_root = find(u)
if v_root != u_root:
self.circles -= 1
uf[v_root] = u_root
for i in range(n):
for j in range(n):
if M[i][j]:
if find(i) != find(j):
union(i,j)
return self.circles
| [
"[email protected]"
] | |
728690b3b498ad97e32534f2b46aa151a0ac28a4 | bee9d96912078d68877aa53e0c96537677ec3e6a | /peakpo/control/cakemakecontroller.py | 1003e9635181ea50fa417f7c3418c3b18cf82084 | [
"Apache-2.0"
] | permissive | SHDShim/PeakPo | ce0a637b6307787dd84fd3dcb3415e752d180c32 | 4c522e147e7715bceba218de58ee185cccd2055e | refs/heads/master | 2022-06-26T11:26:45.097828 | 2022-06-19T22:03:24 | 2022-06-19T22:03:24 | 94,345,216 | 17 | 3 | null | null | null | null | UTF-8 | Python | false | false | 573 | py | import os
from PyQt5 import QtWidgets
from utils import undo_button_press, dialog_savefile, writechi
class CakemakeController(object):
def __init__(self, model, widget):
self.model = model
self.widget = widget
def read_settings(self):
self.model.diff_img.set_calibration(self.model.poni)
self.model.diff_img.set_mask((self.widget.spinBox_MaskMin.value(),
self.widget.spinBox_MaskMax.value()))
def cook(self):
self.read_settings()
self.model.diff_img.integrate_to_cake()
| [
"[email protected]"
] | |
aa9336da4d4fa8ada35ebde970a9ccf696c296fb | b2826350dab8935de4c54092fac2090ec9b562bc | /prog_1d_hubbard.py | 1ba9a833239abe660c4ca0920ebd3f3c417466c4 | [
"MIT"
] | permissive | ryuikaneko/exactly_solvable_models | 73c8bea988279a3ea595d067986a131494cd0d2b | d6f7617ae979b2ca7cbb1e9c1c562fae780b6956 | refs/heads/master | 2022-01-29T10:12:43.789610 | 2022-01-12T02:20:31 | 2022-01-12T02:20:31 | 205,546,034 | 15 | 2 | null | null | null | null | UTF-8 | Python | false | false | 709 | py | #!/usr/bin/env python
from __future__ import print_function
import numpy as np
import scipy as scipy
import scipy.integrate as integrate
import scipy.special
if __name__ == "__main__":
Umax = 101
dU = 0.1
for intU in range(Umax):
U = dU*intU
# f[U_] = -4 Integrate[ BesselJ[0, x] BesselJ[1, x]/x/(1 + Exp[0.5 U*x]), {x, 0, Infinity}]
ene = integrate.quad(lambda x, c : \
-4.0 * scipy.special.j0(x) * scipy.special.j1(x) \
/ x / (1+np.exp(0.5*c*x)) \
, 0.0, np.inf, args=U \
, epsabs=1e-11, epsrel=1e-11, limit=10000)
print("{0:.16f} {1:.16f} {2:.16f}".format(U,ene[0],ene[1]))
print("# exact (4/pi):",0.0,4.0/np.pi)
| [
"[email protected]"
] | |
f75acb67e52cb8ec24de9fe04efc2181e8dae4b9 | 9d0195aa83cc594a8c61f334b90375961e62d4fe | /JTTest/SL7/CMSSW_10_2_15/src/dataRunA/nano1606.py | 224e1d24dc5734464eade6e5711ee8dbc5f0ffb1 | [] | no_license | rsk146/CMS | 4e49592fc64f6438051544c5de18598db36ed985 | 5f8dab8c59ae556598b9747b52b88205fffc4dbe | refs/heads/master | 2022-12-01T03:57:12.126113 | 2020-08-04T03:29:27 | 2020-08-04T03:29:27 | 284,863,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,293 | py | # Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: nanoAOD_jetToolbox_cff -s NANO --data --eventcontent NANOAOD --datatier NANOAOD --no_exec --conditions 102X_dataRun2_Sep2018Rereco_v1 --era Run2_2018,run2_nanoAOD_102Xv1 --customise_commands=process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False))) --customise JMEAnalysis/JetToolbox/nanoAOD_jetToolbox_cff.nanoJTB_customizeMC --filein /users/h2/rsk146/JTTest/SL7/CMSSW_10_6_12/src/ttbarCutTest/dataReprocessing/0004A5E9-9F18-6B42-B31D-4206406CE423.root --fileout file:jetToolbox_nano_datatest.root
import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process('NANO',eras.Run2_2018,eras.run2_nanoAOD_102Xv1)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')
process.load('PhysicsTools.NanoAOD.nano_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:root://cms-xrd-global.cern.ch//store/data/Run2018A/EGamma/MINIAOD/17Sep2018-v2/100000/29F6835B-E804-9247-B8B2-2A78E410E681.root'),
secondaryFileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('nanoAOD_jetToolbox_cff nevts:1'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.NANOAODoutput = cms.OutputModule("NanoAODOutputModule",
compressionAlgorithm = cms.untracked.string('LZMA'),
compressionLevel = cms.untracked.int32(9),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('NANOAOD'),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('file:jetToolbox_nano_datatest1606.root'),
outputCommands = process.NANOAODEventContent.outputCommands
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '102X_dataRun2_Sep2018Rereco_v1', '')
# Path and EndPath definitions
process.nanoAOD_step = cms.Path(process.nanoSequence)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.NANOAODoutput_step = cms.EndPath(process.NANOAODoutput)
# Schedule definition
process.schedule = cms.Schedule(process.nanoAOD_step,process.endjob_step,process.NANOAODoutput_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
# customisation of the process.
# Automatic addition of the customisation function from PhysicsTools.NanoAOD.nano_cff
from PhysicsTools.NanoAOD.nano_cff import nanoAOD_customizeData
#call to customisation function nanoAOD_customizeData imported from PhysicsTools.NanoAOD.nano_cff
process = nanoAOD_customizeData(process)
# Automatic addition of the customisation function from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff import nanoJTB_customizeMC
#call to customisation function nanoJTB_customizeMC imported from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
process = nanoJTB_customizeMC(process)
# End of customisation functions
# Customisation from command line
process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False)))
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion | [
"[email protected]"
] | |
eea817d60254307a68dbd9b0943756d17da0c46b | 3f100a1002a1f8ed453c8b81a9b403444d77b4c6 | /while_loops/trial.py | 0bf2cb0e9f1ac677e22c4333bf6b64a527e9b842 | [] | no_license | Kimuda/Phillip_Python | c19c85a43c5a13760239e4e94c08436c99787ebf | 59d56a0d45839656eb15dbe288bdb0d18cb7df2b | refs/heads/master | 2016-09-09T22:19:02.347744 | 2015-05-01T10:56:49 | 2015-05-01T10:56:49 | 32,330,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,027 | py | 1. The program will continue producing output until it is interrupted.
2. A line that increments i by 1 needs to be put at the end of, but inside of
the while loop.
3. --- smallest.py ---
smallest = "strings are always larger than integers"
print "Enter a sequence of numbers:"
line = raw_input("> ")
while line != "":
if int(line) < smallest:
smallest = int(line)
line = raw_input("> ")
print smallest
--- end average.py ---
4. --- average.py ---
print "Enter a sequence of numbers:"
total = 0
count = 0
line = raw_input("> ")
while line != "":
total += int(line)
count += 1
line = raw_input("> ")
print float(total)/count
--- end average.py ---
5. --- average_loop.py ---
answer = 'y'
while answer == 'y':
print "Enter a sequence of numbers:"
total = 0
count = 0
line = raw_input("> ")
while line != "":
total += int(line)
count += 1
line = raw_input("> ")
print float(total)/count
answer = raw_input('Would you like to calculate the average of another sequence (y/n): ')
--- end average_loop.py ---
| [
"[email protected]"
] | |
5cb365c88d11e4906a757b2995cf93d0128fcde3 | a4e5a695e749e49953d173e7ac2aeaf372d0513d | /flask/myapp.py | 33873b70b27a80cd5f95b83bd1af3eeb4cebe55f | [] | no_license | mamueller/nix_examples | 7e79194a479e8a73490f22d9087bc11a834f9c66 | 98fd728d0bba83d9c7a8151e724ef06aadd6cb95 | refs/heads/master | 2021-06-12T11:02:02.224015 | 2021-04-09T18:44:51 | 2021-04-09T18:44:51 | 167,869,054 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 281 | py | #! /usr/bin/env python
import subprocess
from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello, Nix!"
def run():
res=subprocess.run(['qemu-x86_64','-h'])
print(res)
app.run(host="0.0.0.0")
if __name__ == "__main__":
run()
| [
"[email protected]"
] | |
de950a06dbd6d91de3321c096458f9b7b203f8b5 | b102b0312b3b5391652a117ecf9f09f5059af485 | /server/mysite/monitorlist.py | de84bb2417b2835174296bd7a5a466e969ec40be | [
"MIT"
] | permissive | zhangjiongcn/BR | 06772079e050ceb99f3d8a93b09c8d0cd234d780 | c08d8acd26c58caf5d939892a9cdf3d3bb3ffff1 | refs/heads/master | 2021-04-29T20:24:05.395174 | 2018-02-16T08:54:36 | 2018-02-16T08:54:36 | 121,595,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,540 | py | from django.http import HttpResponse
import redis
def index(request):
pool = redis.ConnectionPool(host="redis.devops.pousheng.com", port=6379, password="redis")
r = redis.Redis(connection_pool=pool)
try:
gettimelist = r.time()
gettime = gettimelist[0]
svrdic=r.hgetall("prd")
resp_str = "["
for key,value in svrdic.items():
svrname = str(key.decode('utf-8'))
svroptiondic = eval(str(value.decode('utf-8')))
resp_str = resp_str + "{"
# servername
resp_str = resp_str + "\"svrname\":\""+svrname+"\","
# cpu
# if svroptiondic.get("cpu") != None :
# resp_str = resp_str + "\"cpu\":" + str(svroptiondic['cpu'])+","
# sqlserver block
# if svroptiondic.get("block") != None :
# resp_str = resp_str + "\"block\":"+str(svroptiondic['block'])+","
# sqlserver opentran
# if svroptiondic.get("opentran") != None :
# resp_str = resp_str + "\"opentran\":"+str(svroptiondic['opentran'])+","
# memory
# if svroptiondic.get("mem") != None :
# memdic = eval(str(svroptiondic['mem']))
# resp_str = resp_str + "\"mem\":{\"memtotal\":"+str(memdic["memtotal"])+",\"memfree\":"+str(memdic["memfree"])+",},"
# # disks
# if svroptiondic.get("disks") != None :
# resp_str = resp_str + "\"disks\":"+str(svroptiondic['disks'])+","
# uptime
diffuptime = None
if svroptiondic.get("uptime") != None :
difftime = gettime - svroptiondic['uptime']
diffuptime = difftime
if diffuptime >6 :
resp_str = resp_str + "\"dt\":"+str(difftime)+","
else:
resp_str = resp_str + "\"dt\":\"\","
# cpu 2
if svroptiondic.get("cpu") != None :
cpu2 = svroptiondic['cpu']
if diffuptime != None and diffuptime>6:
if cpu2 < 50 :
resp_str = resp_str + "\"c\":[10," + str(cpu2)+"],"
elif cpu2 >=50 and cpu2 < 85 :
resp_str = resp_str + "\"c\":[11," + str(cpu2)+"],"
else:
resp_str = resp_str + "\"c\":[12," + str(cpu2)+"],"
else:
if cpu2 < 50 :
resp_str = resp_str + "\"c\":[0," + str(cpu2)+"],"
elif cpu2 >=50 and cpu2 < 85 :
resp_str = resp_str + "\"c\":[1," + str(cpu2)+"],"
else:
resp_str = resp_str + "\"c\":[2," + str(cpu2)+"],"
# sqlserver block 2
if svroptiondic.get("block") != None :
block2 = svroptiondic['block']
if diffuptime != None and diffuptime>6:
if block2 == 0 :
resp_str = resp_str + "\"b\":[10," + str(block2)+"],"
elif block2 > 0 and block2 <= 10 :
resp_str = resp_str + "\"b\":[11," + str(block2)+"],"
else:
resp_str = resp_str + "\"b\":[12," + str(block2)+"],"
else:
if block2 == 0 :
resp_str = resp_str + "\"b\":[0," + str(block2)+"],"
elif block2 > 0 and block2 <= 10 :
resp_str = resp_str + "\"b\":[1," + str(block2)+"],"
elif block2 == -1 :
resp_str = resp_str + "\"b\":[12," + str(block2)+"],"
else:
resp_str = resp_str + "\"b\":[2," + str(block2)+"],"
# sqlserver opentran 2
if svroptiondic.get("opentran") != None :
opentran2 = svroptiondic['opentran']
if diffuptime != None and diffuptime>6:
if opentran2 == 0 :
resp_str = resp_str + "\"o\":[10," + str(opentran2)+"],"
elif opentran2 > 0 and opentran2 <= 20 :
resp_str = resp_str + "\"o\":[11," + str(opentran2)+"],"
else:
resp_str = resp_str + "\"o\":[12," + str(opentran2)+"],"
else:
if opentran2 == 0 :
resp_str = resp_str + "\"o\":[0," + str(opentran2)+"],"
elif opentran2 > 0 and opentran2 <= 20 :
resp_str = resp_str + "\"o\":[1," + str(opentran2)+"],"
elif opentran2 == -1:
resp_str = resp_str + "\"o\":[12," + str(opentran2)+"],"
else:
resp_str = resp_str + "\"o\":[2," + str(opentran2)+"],"
# memory 2
if svroptiondic.get("mem") != None :
memdic2 = eval(str(svroptiondic['mem']))
memtotal2 = memdic2["memtotal"]
memfree2 = memdic2["memfree"]
memfreepercent3 = memfree2 / memtotal2 * 100
memfreepercent2 = round(memfreepercent3 ,3)
memfree3 = round(memfree2/1024/1024/1024,3)
if diffuptime != None and diffuptime>6:
if memfreepercent2 > 0.05 or memfree2 > 5368709120 :
resp_str = resp_str + "\"m\":[10,\"" + str(memfreepercent2)+"%/"+str(memfree3)+"GB\"],"
elif memfreepercent2 > 0.03 or memfree2 >3221225472 :
resp_str = resp_str + "\"m\":[11,\"" + str(memfreepercent2)+"%/"+str(memfree3)+"GB\"],"
else:
resp_str = resp_str + "\"m\":[12,\"" + str(memfreepercent2)+"%/"+str(memfree3)+"GB\"],"
else:
if memfreepercent2 > 0.05 or memfree2 > 5368709120 :
resp_str = resp_str + "\"m\":[0,\"" + str(memfreepercent2)+"%/"+str(memfree3)+"GB\"],"
elif memfreepercent2 > 0.03 or memfree2 >3221225472 :
resp_str = resp_str + "\"m\":[1,\"" + str(memfreepercent2)+"%/"+str(memfree3)+"GB\"],"
else:
resp_str = resp_str + "\"m\":[2,\"" + str(memfreepercent2)+"%/"+str(memfree3)+"GB\"],"
# disks 2
if svroptiondic.get("disks") != None :
disks2list = svroptiondic['disks']
disk2minpercent = 0
disk2minfree = 0
disk2red=0
disk2yellow=0
diskfor=0
for disk2obj in disks2list :
disk2label = disk2obj['disklabel']
disk2diskfree = disk2obj['diskfree']
disk3diskfree = round(disk2diskfree/1024/1024/1024,3)
disk2freepercent = round(disk2obj['diskfreepercent'],3)
if diskfor ==0 :
disk2minfree = disk3diskfree
disk2minpercent = disk2freepercent
else:
if disk3diskfree<disk2minfree:
disk2minfree = disk3diskfree
if disk2freepercent<disk2minpercent:
disk2minpercent = disk2freepercent
if disk2freepercent<8 and disk2diskfree < 10737418240 :
if disk2red == 0:
disk2red = 1
elif disk2freepercent<15 and disk2diskfree < 16106127360 :
if disk2yellow == 0:
disk2yellow = 1
else :
diskfor = diskfor+0
diskfor = diskfor +1
if diffuptime != None and diffuptime>6:
if disk2red == 1:
resp_str = resp_str + "\"di\":[12,\"" + str(disk2minpercent)+"%/"+str(disk2minfree)+"GB\"],"
elif disk2yellow == 1:
resp_str = resp_str + "\"di\":[11,\"" + str(disk2minpercent)+"%/"+str(disk2minfree)+"GB\"],"
else:
resp_str = resp_str + "\"di\":[10,\"" + str(disk2minpercent)+"%/"+str(disk2minfree)+"GB\"],"
else:
if disk2red == 1:
resp_str = resp_str + "\"di\":[2,\"" + str(disk2minpercent)+"%/"+str(disk2minfree)+"GB\"],"
elif disk2yellow == 1:
resp_str = resp_str + "\"di\":[1,\"" + str(disk2minpercent)+"%/"+str(disk2minfree)+"GB\"],"
else:
resp_str = resp_str + "\"di\":[0,\"" + str(disk2minpercent)+"%/"+str(disk2minfree)+"GB\"],"
# resp_str = resp_str + "\"di\":"+str(disk2freepercent)+","
# resp_str = resp_str + "\"disks\":"+str(svroptiondic['disks'])+","
resp_str = resp_str + "},"
resp_str = resp_str + "]"
except Exception as err:
resp_str = "["
resp_str += "{\"svrname\":\"test01\",\"cpu\":86,\"block\":10,\"opentran\":12,},"
resp_str += "{\"svrname\":\"test02\",\"cpu\":61,\"block\":10,\"opentran\":12,},"
resp_str += "{\"svrname\":\"test03\",\"cpu\":20,\"block\":10,\"opentran\":12,},"
resp_str += "]"
return HttpResponse(resp_str)
| [
"[email protected]"
] | |
6fdb094e7f8936d65474156243043ac349a04633 | c6bfc62c5d90a8eaa7dcb0f4149b48a18dec3dc1 | /model.py | 503d31ccb1cd347ef5807b9bd8ced9f9f9543e4d | [] | no_license | PabloRR100/Bert_Sentiment_Multilingual | 4e2e897a7638ca097043980f2bf00336b7f87673 | d19b0ebca0c890313b25b3c8fbd3ac165f94856c | refs/heads/master | 2023-07-12T17:34:46.746524 | 2020-05-13T10:23:01 | 2020-05-13T10:23:01 | 261,702,895 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,423 | py |
import torch
import torch.nn as nn
import config
import transformers
"""
EXTENDES TRANSFORMERS BERT MODEL
"""
class BERTBaseUncased(nn.Module):
def __init__(self):
super(BERTBaseUncased, self).__init__()
self.bert = transformers.BertModel.from_pretrained(config.BERT_PATH)
self.bert_drop = nn.Dropout(0.3)
self.out = nn.Linear(2*768, 1)
def forward(self, ids, mask, token_type_ids):
o1, _ = self.bert(
ids,
attention_mask=mask,
token_type_ids=token_type_ids
)
mean_pool = torch.mean(o1,1)
max_pool,_ = torch.max(o1,1)
cat = torch.cat((mean_pool, max_pool),dim=1)
bo = self.bert_drop(cat)
output = self.out(bo)
return output
class DistilBERTBaseUncased(nn.Module):
def __init__(self):
super(DistilBERTBaseUncased, self).__init__()
self.bert = transformers.DistilBertModel.from_pretrained(config.DISTILBERT_PATH)
self.bert_drop = nn.Dropout(0.3)
self.out = nn.Linear(2*768, 1)
def forward(self, ids, mask, token_type_ids):
o1, _ = self.bert(
ids,
attention_mask=mask
)
mean_pool = torch.mean(o1,1)
max_pool,_ = torch.max(o1,1)
cat = torch.cat((mean_pool, max_pool),dim=1)
bo = self.bert_drop(cat)
output = self.out(bo)
return output | [
"[email protected]"
] | |
9a9875f9ef74c131bf2885171b2c1645e587bd42 | 9d64a438cdfe4f3feb54f2f0dc7431139c4b9fb9 | /trendmicro_apex/icon_trendmicro_apex/actions/search_agents/action.py | 82238c4c95b6add16427768df0993ff18d46a01e | [
"MIT"
] | permissive | PhilippBehmer/insightconnect-plugins | 5ad86faaccc86f2f4ed98f7e5d518e74dddb7b91 | 9195ddffc575bbca758180473d2eb392e7db517c | refs/heads/master | 2021-07-25T02:13:08.184301 | 2021-01-19T22:51:35 | 2021-01-19T22:51:35 | 239,746,770 | 0 | 0 | MIT | 2020-02-11T11:34:52 | 2020-02-11T11:34:51 | null | UTF-8 | Python | false | false | 1,935 | py | import komand
from .schema import SearchAgentsInput, SearchAgentsOutput, Input, Output, Component
# Custom imports below
import re
import urllib.parse
class SearchAgents(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name='search_agents',
description=Component.DESCRIPTION,
input=SearchAgentsInput(),
output=SearchAgentsOutput())
def run(self, params={}):
agent_ids = params.get(Input.AGENT_IDS)
query_params = {}
quoted_param = ""
entity_pattern = re.compile(r'^[0-9a-f]{8}(?:-[0-9a-f]{4}){3}-[0-9a-f]{8}$')
ip_address_pattern = re.compile(r'^(?:\d{1,3}\.){3}\d{1,3}$')
mac_address_pattern = re.compile(r'\d\d([-:]\d\d){5}')
if agent_ids:
for agent_id in agent_ids:
if re.match(mac_address_pattern, agent_id):
query_params["mac_address"] = agent_id
elif re.match(ip_address_pattern, agent_id):
query_params["ip_address"] = agent_id
elif re.match(entity_pattern, agent_id):
query_params["entity_id"] = agent_id
else:
query_params["host_name"] = agent_id
quoted_param = urllib.parse.quote("&".join([k + "=" + v for k, v in query_params.items()]))
if quoted_param:
quoted_param = "?" + quoted_param
agents = self.connection.api.execute(
"get",
"/WebApp/API/AgentResource/ProductAgents" + quoted_param,
""
)
if agents.get("result_code", 0) == 1:
f = agents.get("result_content")
self.logger.info(f"result_content: {f}")
return {
Output.SEARCH_AGENT_RESPONSE: agents.get("result_content")
}
return {
Output.SEARCH_AGENT_RESPONSE: []
}
| [
"[email protected]"
] | |
1ea8d734403be436ba043166a9c312562de2a51a | 48517a9b7ec7b0f0bf0a03291b7d1e3def751c0a | /Choose Your Own Colors/horizontal_striper_15.py | 9e0f6da63d3a326564cc317ef111c0c8348888c7 | [
"MIT"
] | permissive | Breakfast-for-Pigeons/Unicorn-HAT | 1ae033bf11c05b9cc739b1eacfc77665506e0bc8 | 9ff1388ee627a8e81f361929e9e9b708db4e2832 | refs/heads/master | 2021-06-06T12:22:48.162031 | 2020-10-22T17:31:51 | 2020-10-22T17:31:51 | 74,648,524 | 1 | 0 | null | 2018-10-02T17:37:31 | 2016-11-24T07:28:23 | Python | UTF-8 | Python | false | false | 2,042 | py | #!/usr/bin/python3
"""
Horizontal Striper 15 - Choose Your Own Colors
With the Raspberry Pi oriented with the GPIO pins at the top, this
program stripes from the top to the bottom and alternates from left to
right and right to left.
This is exactly the same as Horizontal Striper 7 except the color order
is reversed.
....................
Functions:
- horizontal_striper_15: Gets x and y coordinates and sends them to the
striping function
....................
Author: Paul Ryan
This program was written on a Raspberry Pi using the Geany IDE.
"""
########################################################################
# Import modules #
########################################################################
from bfp_unicornhat import print_header
from bfp_unicornhat import stop
from bfp_unicornhat import stripe_horizontally_rev_alt_2
########################################################################
# Import variables #
########################################################################
from bfp_unicornhat import X_COORDINATES
from bfp_unicornhat import Y_COORDINATES
########################################################################
# Functions #
########################################################################
def horizontal_striper_15():
"""
Sends x and y coordinates to the striper function
"""
x_coordinate_list = X_COORDINATES
y_coordinate_list = Y_COORDINATES[::-1]
stripe_horizontally_rev_alt_2(x_coordinate_list, y_coordinate_list)
if __name__ == '__main__':
try:
# STEP01: Print header
print_header()
# STEP02: Print instructions in white text
print("\033[1;37;40mPress Ctrl-C to stop the program.")
# STEP03:
horizontal_striper_15()
# STEP04: Exit the program.
stop()
except KeyboardInterrupt:
stop()
| [
"[email protected]"
] | |
9a2bfa2e085d4bdbbcd7e45e25f31886e442b9b4 | 3cd19164c17d9793ea506369454b8bacd5cebfa9 | /Backend/py2club/bin/pyjwt | 56637acb75b755885e5c894368fe1121d7d3a106 | [] | no_license | Myxg/BadmintonClubSystem | 337a17728122ab929d37e7f2732850beb49d8be0 | 1c51236098ab3770cadd925212f9d3978ed83c2a | refs/heads/master | 2022-12-27T10:13:55.129630 | 2020-09-16T10:21:36 | 2020-09-16T10:21:36 | 295,998,099 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | #!/home/ubuntu/BadmintonClubSystem/Backend/py2club/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from jwt.__main__ import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
cf8d0b5cf4e4a0107de6cd9c133e7f4aa366db30 | 95deb106d41a4612628c50568b2e6107e0a6773d | /applications/admin/handlers/role.py | bbeb9f28634de63f2130e98cf28534b2ae5137dc | [
"BSD-3-Clause"
] | permissive | sjl421/py_admin | a57b0d2449a1beabef5ccffb88bd510a38db8013 | 2fb404af8b8435d247eb23c13386ae8deb88d144 | refs/heads/master | 2020-03-18T21:04:26.213089 | 2018-05-22T06:11:15 | 2018-05-22T06:11:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,344 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""URL处理器
[description]
"""
import json
import tornado
from applications.core.settings_manager import settings
from applications.core.logger.client import SysLogger
from applications.core.cache import sys_config
from applications.core.decorators import required_permissions
from ..models import Role
from ..models import AdminMenu
from .common import CommonHandler
class RoleHandler(CommonHandler):
"""docstring for Passport"""
@tornado.web.authenticated
@required_permissions('admin:role:index')
def get(self, *args, **kwargs):
params = {
}
self.render('role/index.html', **params)
@tornado.web.authenticated
@required_permissions('admin:role:delete')
def delete(self, *args, **kwargs):
"""删除角色
"""
uuid = self.get_argument('uuid', None)
# 超级管理员角色 默认角色
user_role_li = [settings.SUPER_ROLE_ID,'6b0642103a1749949a07f4139574ead9']
if uuid in user_role_li:
return self.error('角色不允许删除')
Role.Q.filter(Role.uuid==uuid).delete()
Role.session.commit()
return self.success()
class RoleListHandler(CommonHandler):
"""用户组列表"""
@tornado.web.authenticated
@required_permissions('admin:role:index')
def get(self, *args, **kwargs):
limit = self.get_argument('limit', 10)
page = self.get_argument('page', 1)
pagelist_obj = Role.Q.filter().paginate(page=page, per_page=limit)
if pagelist_obj is None:
return self.error('暂无数据')
total = pagelist_obj.total
page = pagelist_obj.page
items = pagelist_obj.items
params = {
'count': total,
'uri': self.request.uri,
'path': self.request.path,
'data': [role.as_dict() for role in items],
}
return self.success(**params)
class RoleAddHandler(CommonHandler):
"""用户组添加功能"""
@tornado.web.authenticated
@required_permissions('admin:role:add')
def post(self, *args, **kwargs):
rolename = self.get_argument('rolename', None)
uuid = self.get_argument('uuid', None)
status = self.get_argument('status', 1)
if not rolename:
return self.error('分组名称不能为空')
count = Role.Q.filter(Role.rolename==rolename).count()
if count>0:
return self.error('名称已被占用')
role = {
'rolename':rolename,
'status': status,
}
role = Role(**role)
Role.session.add(role)
Role.session.commit()
return self.success()
class RoleEditHandler(CommonHandler):
"""用户组增删查改功能"""
@tornado.web.authenticated
@required_permissions('admin:role:edit')
def get(self, *args, **kwargs):
uuid = self.get_argument('uuid', None)
role = Role.Q.filter(Role.uuid==uuid).first()
menu_list = AdminMenu.children(status=1)
data_info = role.as_dict()
try:
data_info['permission'] = json.loads(role.permission)
except Exception as e:
data_info['permission'] = []
params = {
'role': role,
'menu_list': menu_list,
'data_info': data_info,
}
self.render('role/edit.html', **params)
@tornado.web.authenticated
@required_permissions('admin:role:edit')
def post(self, *args, **kwargs):
rolename = self.get_argument('rolename', None)
uuid = self.get_argument('uuid', None)
sort = self.get_argument('sort', None)
status = self.get_argument('status', 0)
permission = self.get_body_arguments('permission[]')
role = {
'status': status,
}
if rolename:
role['rolename'] = rolename
count = Role.Q.filter(Role.uuid!=uuid).filter(Role.rolename==rolename).count()
if count>0:
return self.error('名称已被占用')
if sort:
role['sort'] = sort
if permission:
role['permission'] = json.dumps(permission)
Role.Q.filter(Role.uuid==uuid).update(role)
Role.session.commit()
return self.success(data=role)
| [
"[email protected]"
] | |
426ade794f90ab2e86de0d16b927af50151d2053 | bc441bb06b8948288f110af63feda4e798f30225 | /monitor_sdk/model/container/workload_pb2.py | 676d445506d633237c03b3b1996dd2305b993b3c | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | true | 12,632 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: workload.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from monitor_sdk.model.container import container_pb2 as monitor__sdk_dot_model_dot_container_dot_container__pb2
from monitor_sdk.model.container import volume_pb2 as monitor__sdk_dot_model_dot_container_dot_volume__pb2
from monitor_sdk.model.container import deployment_strategy_pb2 as monitor__sdk_dot_model_dot_container_dot_deployment__strategy__pb2
from monitor_sdk.model.container import local_object_reference_pb2 as monitor__sdk_dot_model_dot_container_dot_local__object__reference__pb2
from monitor_sdk.model.container import deployment_status_pb2 as monitor__sdk_dot_model_dot_container_dot_deployment__status__pb2
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='workload.proto',
package='container',
syntax='proto3',
serialized_options=_b('ZCgo.easyops.local/contracts/protorepo-models/easyops/model/container'),
serialized_pb=_b('\n\x0eworkload.proto\x12\tcontainer\x1a+monitor_sdk/model/container/container.proto\x1a(monitor_sdk/model/container/volume.proto\x1a\x35monitor_sdk/model/container/deployment_strategy.proto\x1a\x38monitor_sdk/model/container/local_object_reference.proto\x1a\x33monitor_sdk/model/container/deployment_status.proto\x1a\x1cgoogle/protobuf/struct.proto\"\xe3\x04\n\x08Workload\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0c\n\x04kind\x18\x02 \x01(\t\x12\x0c\n\x04name\x18\x03 \x01(\t\x12\x11\n\tnamespace\x18\x04 \x01(\t\x12\x14\n\x0cresourceName\x18\x05 \x01(\t\x12.\n\ncontainers\x18\x06 \x03(\x0b\x32\x1a.container.ContainerConfig\x12\x10\n\x08replicas\x18\x07 \x01(\x05\x12\"\n\x07volumes\x18\x08 \x03(\x0b\x32\x11.container.Volume\x12,\n\x0b\x61nnotations\x18\t \x01(\x0b\x32\x17.google.protobuf.Struct\x12\'\n\x06labels\x18\n \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x11\n\tdnsPolicy\x18\x0b \x01(\t\x12\x15\n\rrestartPolicy\x18\x0c \x01(\t\x12\x39\n\x12\x64\x65ploymentStrategy\x18\r \x01(\x0b\x32\x1d.container.DeploymentStrategy\x12\x39\n\x10imagePullSecrets\x18\x0e \x03(\x0b\x32\x1f.container.LocalObjectReference\x12\x35\n\x10\x64\x65ploymentStatus\x18\x0f \x01(\x0b\x32\x1b.container.DeploymentStatus\x12\x14\n\x0cresourceSpec\x18\x10 \x01(\t\x12\x0f\n\x07\x63reator\x18\x11 \x01(\t\x12\x19\n\x11\x63reationTimestamp\x18\x12 \x01(\t\x12\r\n\x05state\x18\x13 \x01(\t\x12\x19\n\x11transitionMessage\x18\x14 \x01(\tBEZCgo.easyops.local/contracts/protorepo-models/easyops/model/containerb\x06proto3')
,
dependencies=[monitor__sdk_dot_model_dot_container_dot_container__pb2.DESCRIPTOR,monitor__sdk_dot_model_dot_container_dot_volume__pb2.DESCRIPTOR,monitor__sdk_dot_model_dot_container_dot_deployment__strategy__pb2.DESCRIPTOR,monitor__sdk_dot_model_dot_container_dot_local__object__reference__pb2.DESCRIPTOR,monitor__sdk_dot_model_dot_container_dot_deployment__status__pb2.DESCRIPTOR,google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,])
_WORKLOAD = _descriptor.Descriptor(
name='Workload',
full_name='container.Workload',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='instanceId', full_name='container.Workload.instanceId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='kind', full_name='container.Workload.kind', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='container.Workload.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='namespace', full_name='container.Workload.namespace', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='resourceName', full_name='container.Workload.resourceName', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='containers', full_name='container.Workload.containers', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='replicas', full_name='container.Workload.replicas', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='volumes', full_name='container.Workload.volumes', index=7,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='annotations', full_name='container.Workload.annotations', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='labels', full_name='container.Workload.labels', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dnsPolicy', full_name='container.Workload.dnsPolicy', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='restartPolicy', full_name='container.Workload.restartPolicy', index=11,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='deploymentStrategy', full_name='container.Workload.deploymentStrategy', index=12,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='imagePullSecrets', full_name='container.Workload.imagePullSecrets', index=13,
number=14, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='deploymentStatus', full_name='container.Workload.deploymentStatus', index=14,
number=15, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='resourceSpec', full_name='container.Workload.resourceSpec', index=15,
number=16, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='creator', full_name='container.Workload.creator', index=16,
number=17, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='creationTimestamp', full_name='container.Workload.creationTimestamp', index=17,
number=18, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='state', full_name='container.Workload.state', index=18,
number=19, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='transitionMessage', full_name='container.Workload.transitionMessage', index=19,
number=20, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=313,
serialized_end=924,
)
_WORKLOAD.fields_by_name['containers'].message_type = monitor__sdk_dot_model_dot_container_dot_container__pb2._CONTAINERCONFIG
_WORKLOAD.fields_by_name['volumes'].message_type = monitor__sdk_dot_model_dot_container_dot_volume__pb2._VOLUME
_WORKLOAD.fields_by_name['annotations'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_WORKLOAD.fields_by_name['labels'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_WORKLOAD.fields_by_name['deploymentStrategy'].message_type = monitor__sdk_dot_model_dot_container_dot_deployment__strategy__pb2._DEPLOYMENTSTRATEGY
_WORKLOAD.fields_by_name['imagePullSecrets'].message_type = monitor__sdk_dot_model_dot_container_dot_local__object__reference__pb2._LOCALOBJECTREFERENCE
_WORKLOAD.fields_by_name['deploymentStatus'].message_type = monitor__sdk_dot_model_dot_container_dot_deployment__status__pb2._DEPLOYMENTSTATUS
DESCRIPTOR.message_types_by_name['Workload'] = _WORKLOAD
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Workload = _reflection.GeneratedProtocolMessageType('Workload', (_message.Message,), {
'DESCRIPTOR' : _WORKLOAD,
'__module__' : 'workload_pb2'
# @@protoc_insertion_point(class_scope:container.Workload)
})
_sym_db.RegisterMessage(Workload)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
] | |
f4cf7019f39021a4b77232222532df88fce1f0ff | e1c7d4257b7e8ccdd369b12a3cb6c07e7b46df85 | /tools/zpycompletion | 64f55f2ab6dfb05cf738656662d96813f57b5eaa | [] | no_license | Trietptm-on-Coding-Algorithms/workshop | c461d6dd78ce80850a1a238a19509a5f30559c5b | 5a26fc86c737dc5f236983c6dbf53318b0a559aa | refs/heads/master | 2021-01-12T19:24:31.472826 | 2016-05-26T13:21:45 | 2016-05-26T13:21:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,993 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# zpycompletion
#
# Copyright 2015 Spencer McIntyre <[email protected]>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import argparse
import contextlib
import datetime
import imp
import os
import pwd
import sys
import jinja2
__all__ = ['make_completion_from_argparse']
__version__ = '1.1'
# script.arguments
# script.author
# script.c_year
# script.name
# script.subparser_cmds
# script.subparsers
# version
ZSH_COMPLETION_TEMPLATE = """\
#compdef {{ script.name }}
# ------------------------------------------------------------------------------
# Copyright (c) {{ script.c_year }} {{ script.author }}
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the project nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL ZSH-USERS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ------------------------------------------------------------------------------
# Description
# -----------
#
# Completion script for {{ script.name }}.
#
# ------------------------------------------------------------------------------
# Authors
# -------
#
# * {{ script.author }}
#
# ------------------------------------------------------------------------------
# Generated with zpycompletion v{{ version }}
{% for subparser_cmd in script.subparser_cmds %}
{{ subparser_cmd.name }}() {
_arguments \\
{% for line in subparser_cmd.arguments %}
{{ line }}
{% endfor %}
}
{% endfor %}
{% for subparser in script.subparsers %}
{{ subparser.name }}() {
local -a _subparser_cmds
_subparser_cmds=(
{% for action in subparser.actions %}
"{{ action.name }}:{{ action.help }}"
{% endfor %}
)
if (( CURRENT == 1 )); then
_describe "commands" _subparser_cmds
else
local curcontext="$curcontext"
cmd="${${_subparser_cmds[(r)$words[1]:*]%%:*}}"
if (( $#cmd )); then
if (( $+functions[{{ subparser.name }}_cmd_$cmd] )); then
{{ subparser.name }}_cmd_$cmd CURRENT
else
_files
fi
else
_message "unknown command: $words[1]"
fi
fi
}
{% endfor %}
_arguments \\
{% for line in script.arguments %}
{{ line }}
{% endfor %}
"""
def _actions_sort(action):
if len(action.option_strings):
return sorted(action.option_strings)[-1]
return ''
def _argument_action(action):
zarg_action = ''
if action.choices:
zarg_action = ':(' + ' '.join(action.choices) + ')'
elif isinstance(action.type, argparse.FileType):
zarg_action = ':_files'
return zarg_action
class _ZshCompletion(object):
def __init__(self, parser):
self.parser = parser
self.arguments = []
self.subparsers = []
self.subparser_cmds = []
actions = sorted(self.parser._actions, key=_actions_sort)
self.arguments = self._parse_actions(actions)
def _parse_actions(self, actions, subparser_name=None):
lines = []
subparser = None
positional = 1
for action in actions:
if isinstance(action, argparse._SubParsersAction):
subparser = action
continue
if isinstance(action, argparse._HelpAction):
lines.append("{-h,--help}\"[show help text]\"")
continue
if len(action.option_strings) == 0: # positional
if isinstance(action.nargs, int) and action.nargs > 1:
for _ in range(positional, (positional + action.nargs)):
lines.append("\"{0}::{1}{2}\"".format(positional, action.dest, _argument_action(action)))
positional += 1
continue
elif action.nargs in (argparse.REMAINDER, '*', '+'):
line = '"*'
elif action.nargs in (None, 1, '?'):
line = '"' + str(positional)
positional += 1
line += '::' + action.dest
else:
if len(action.option_strings) == 1:
options = '"' + action.option_strings[0]
else:
options = '{' + ','.join(action.option_strings) + '}"'
if isinstance(action, argparse._StoreAction):
line = options + "[{0}]:{1}".format(action.help, action.dest.replace('_', ' '))
elif isinstance(action, argparse._VersionAction):
line = options + '[show version information]'
elif isinstance(action, argparse._StoreConstAction):
line = options + '[' + action.help + ']'
else:
continue
line += _argument_action(action) + '"'
lines.append(line)
if subparser:
subp_actions = map(lambda a: dict(name=a.dest, help=a.help), subparser._choices_actions)
subp_dest = ('action' if subparser.dest == argparse.SUPPRESS else subparser.dest)
subp_name = (subparser_name or '_subparser') + '_' + subp_dest
self.subparsers.append(dict(name=subp_name, actions=subp_actions))
lines.append("\"*::{0}:{1}\"".format(subp_dest, subp_name))
for key, value in subparser._name_parser_map.items():
subp_cmd_name = "{0}_cmd_{1}".format(subp_name, key)
subp_cmd_arguments = self._parse_actions(value._actions, subparser_name=subp_name)
self.subparser_cmds.append(dict(name=subp_cmd_name, arguments=subp_cmd_arguments))
for i in range(len(lines) - 1):
lines[i] = lines[i] + ' \\'
return lines
def make_completion_from_argparse(parser, input_prompt=True):
"""
Create a zsh completion file from a :py:class:`argparse.ArgumentParser`
instance.
:param parser: The parser instance to build completions for.
:type parser: :py:class:`argparse.ArgumentParser`
:param bool input_prompt: Whether to prompt for user input or not.
"""
script = {}
script['author'] = pwd.getpwuid(os.getuid()).pw_gecos
if input_prompt:
script['author'] = input("[?] author ({0}): ".format(script['author'])) or script['author']
script['c_year'] = datetime.date.today().year
if input_prompt:
script['c_year'] = input("[?] copyright year ({0}): ".format(script['c_year'])) or script['c_year']
script['name'] = parser.prog
if input_prompt:
script['name'] = input("[?] script name ({0}): ".format(script['name'])) or script['name']
zsh_comp = _ZshCompletion(parser)
script['arguments'] = zsh_comp.arguments
script['subparsers'] = zsh_comp.subparsers
script['subparser_cmds'] = zsh_comp.subparser_cmds
env = jinja2.Environment(trim_blocks=True)
template = env.from_string(ZSH_COMPLETION_TEMPLATE)
with open('_' + script['name'], 'w') as file_h:
file_h.write(template.render(script=script, version=__version__))
print('[*] completion saved as _' + script['name'])
class _FakeArgparse(object):
def __init__(self):
self.parser = None
def __getattr__(self, name):
if name == 'ArgumentParser':
return self._hook
return getattr(argparse, name)
def _hook(self, *args, **kwargs):
self.parser = argparse.ArgumentParser(*args, **kwargs)
return self.parser
@contextlib.contextmanager
def muted_std_streams():
stdout = sys.stdout
stderr = sys.stderr
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
try:
yield
except Exception:
raise
finally:
sys.stdout = stdout
sys.stderr = stderr
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--method', dest='target_function', default='main', help='the function which creates the ArgumentParser instance')
parser.add_argument('script', help='the python script to load from')
arguments = parser.parse_args()
script = os.path.abspath(arguments.script)
if not os.path.isfile(script):
print('[-] invalid script file: ' + script)
return
script_path, script_name = os.path.split(script)
sys.path.append(script_path)
script_import_name = script_name
if script_import_name.endswith('.py'):
script_import_name = script_name[:-3]
sys.dont_write_bytecode = True
sys.modules['argparse'] = _FakeArgparse()
print('[*] importing: ' + script_import_name)
try:
module = imp.load_source(script_import_name, script)
except SyntaxError:
print('[!] failed to import the python file')
return
if not hasattr(module, arguments.target_function):
print("[-] the specified script has no {0}() function".format(arguments.target_function))
print('[-] can not automatically get the parser instance')
return
sys.argv = [script_name, '--help']
try:
with muted_std_streams():
getattr(module, arguments.target_function)()
except SystemExit:
pass
if not sys.modules['argparse'].parser:
print("[-] no parser was created in {0}.main()".format(script_name))
return
make_completion_from_argparse(sys.modules['argparse'].parser)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | ||
12c2fb913ef27cff8bbefcf384d426de3192582e | 9da8754002fa402ad8e6f25659978bd269bbcec8 | /src/314B/test_cdf_314B.py | e9e2e5ce0864198663b5fa16bab21ce3f1026005 | [
"MIT"
] | permissive | kopok2/CodeforcesSolutionsPython | a00f706dbf368ba0846c8ae86d4145b5dd3e1613 | 35bec0dbcff47765b123b5fe60476014376153df | refs/heads/master | 2023-02-02T03:08:22.097651 | 2020-12-17T22:00:50 | 2020-12-17T22:00:50 | 196,035,812 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 563 | py | import unittest
from unittest.mock import patch
from cdf_314B import CodeforcesTask314BSolution
class TestCDF314B(unittest.TestCase):
def test_314B_acceptance_1(self):
mock_input = ['10 3', 'abab', 'bab']
expected = '3'
with patch('builtins.input', side_effect=mock_input):
Solution = CodeforcesTask314BSolution()
Solution.read_input()
Solution.process_task()
actual = Solution.get_result()
self.assertEqual(expected, actual)
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
f2cc5f6f8b3ac1a429f71c5a1e78bcae39d6be7a | 255e19ddc1bcde0d3d4fe70e01cec9bb724979c9 | /all-gists/2604992/snippet.py | d1ee8b056d79d916da3e98173cd2729fc7778229 | [
"MIT"
] | permissive | gistable/gistable | 26c1e909928ec463026811f69b61619b62f14721 | 665d39a2bd82543d5196555f0801ef8fd4a3ee48 | refs/heads/master | 2023-02-17T21:33:55.558398 | 2023-02-11T18:20:10 | 2023-02-11T18:20:10 | 119,861,038 | 76 | 19 | null | 2020-07-26T03:14:55 | 2018-02-01T16:19:24 | Python | UTF-8 | Python | false | false | 485 | py | # Print each permutation.
def perm(l, n, str_a):
if len(str_a) == n:
print str_a
else:
for c in l:
perm(l, n, str_a+c)
# Return a list of permutations.
def perm2(l, n, str_a, perm_a):
if len(str_a) == n:
return [str_a] + perm_a
else:
new_perm_a = perm_a
for c in l:
new_perm_a = perm2(l, n, str_a+c, new_perm_a)
return new_perm_a
perm(['a','b','c'], 3, "")
print perm2(['a','b','c'], 3, "", []) | [
"[email protected]"
] | |
19a76e26092ec389793026a5b6b3a32e09ff7ac3 | ac1d3a1ff9e4ccac37c1ae206f3c12021e56e420 | /code/figures/MBL_xan_titration.py | d3cb715bf2652b4a6ed9ccd6e0df1e2db8f0fb2c | [
"MIT",
"LicenseRef-scancode-proprietary-license",
"CC-BY-4.0"
] | permissive | RPGroup-PBoC/mwc_activation | 4bcdeb2271047df4c1fe8243de2eade709012d0a | 6ef3f02a53ecd80877082006ecc4b8fe4204c1d6 | refs/heads/master | 2023-07-22T22:09:25.442468 | 2020-02-27T18:59:17 | 2020-02-27T18:59:17 | 167,025,404 | 0 | 0 | MIT | 2023-07-06T21:20:04 | 2019-01-22T16:16:05 | Jupyter Notebook | UTF-8 | Python | false | false | 5,692 | py | #%%
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import joypy
import glob
import sys
import scipy.stats
sys.path.insert(0, '../../')
import act.viz
import act.flow
colors = act.viz.pub_style()
# %%
# Parse all of hte files.
files = glob.glob('../../data/flow/csv/*.csv')
dfs = []
for f in files:
if '2018' in f:
date, _, strain, conc = f.split('/')[-1].split('_')
conc = float(conc.split('mg')[0]) / 1000
promoter = 'wt'
elif '_XAN' in f:
date = '20180628'
strain, conc, _ = f.split('/')[-1].split('_')
conc = float(conc.split('mg')[0])
promoter = 'wt'
elif '27yfp' in f:
date, _, _, strain, _, conc = f.split('/')[-1].split('_')
if strain == 'dilution':
strain = 'delAB'
elif strain == 'delta':
strain = 'delAB_delta'
elif strain == 'auto':
strain = 'delAB_auto'
conc = float(conc.split('mg')[0])
promoter = 'wt'
elif '28yfp' in f:
date, _, _, strain, _, conc = f.split('/')[-1].split('_')
if strain == 'dilution':
strain = 'delAB'
elif strain == 'delta':
strain = 'delAB_delta'
elif strain == 'auto':
strain = 'delAB_auto'
conc = float(conc.split('mg')[0])
promoter = 'proximal'
_df = pd.read_csv(f)
gated = act.flow.gaussian_gate(_df, 0.4)
gated = gated[gated['gate']==1]
keys = _df.keys()
if 'GFP-A' in keys:
intensity = _df['GFP-A']
elif 'GFP' in keys:
intensity = _df['GFP']
elif 'FITC-H' in keys:
intensity = _df['FITC-H']
_df['intensity'] = np.log(intensity)
_df['date'] = date
_df['xan_mgml'] = conc
_df['strain'] = strain.lower()
_df['promoter'] = promoter
_df = _df[['intensity', 'date', 'xan_mgml', 'strain', 'promoter']]
dfs.append(_df)
intensity_df = pd.concat(dfs, sort=False)
# %%
wt = intensity_df[(intensity_df['strain']=='wt') & (intensity_df['promoter']=='wt') & (intensity_df['intensity'] > -2) & (intensity_df['intensity'] < 10)]
delta = intensity_df[(intensity_df['strain']=='delta') & (intensity_df['promoter']=='wt') & (intensity_df['intensity'] > -2) & (intensity_df['intensity'] < 10)]
delAB_wt = intensity_df[(intensity_df['date']=='20190205') & (intensity_df['strain']=='delab') & (intensity_df['promoter']=='wt') & (intensity_df['intensity'] > 5) & (intensity_df['intensity'] < 12)]
delAB_prox = intensity_df[(intensity_df['date']=='20190205') & (intensity_df['strain']=='delab') & (intensity_df['promoter']=='proximal')]
delAB_wt_delta = intensity_df[(intensity_df['strain']=='delab_delta') & (intensity_df['promoter']=='wt') & (intensity_df['intensity'] > -5) & (intensity_df['intensity'] < 15)]
delAB_prox_delta = intensity_df[(intensity_df['strain']=='delab_delta') & (intensity_df['promoter']=='proximal')]
# Rescale all of the delAB properly.
delAB_wt_delta_mean = delAB_wt_delta['intensity'].mean()
delAB_prox_delta_mean = delAB_prox_delta['intensity'].mean()
delAB_wt['norm_int'] = delAB_wt['intensity'].values / delAB_wt_delta_mean
delAB_prox['norm_int'] = delAB_prox['intensity'].values / delAB_prox_delta_mean
# %%
_ = joypy.joyplot(delta, column='intensity', by='xan_mgml', color=colors[1],
figsize=(3, 4))
plt.savefig('../../figs/delta_xan_titration.pdf', bbox_inches='tight')
#%%
_ = joypy.joyplot(wt, column='intensity', by='xan_mgml', color=colors[0],
figsize=(3, 4))
plt.savefig('../../figs/wt_xan_titration.pdf', bbox_inches='tight')
#%%
_ = joypy.joyplot(delAB_wt, column='intensity', by='xan_mgml', color=colors[2],
figsize=(3, 4))
plt.savefig('../../figs/delAB_xan_titration.pdf', bbox_inches='tight')
# %%
# Isolate the two concentrations
wt = delAB_wt[(delAB_wt['xan_mgml']==0) | (delAB_wt['xan_mgml']==4)]
prox = delAB_prox[(delAB_prox['xan_mgml']==0) | (delAB_prox['xan_mgml']==4)]
fig, ax = plt.subplots(1, 1, figsize=(5.5, 2))
_ = joypy.joyplot(wt, by='xan_mgml', column='norm_int', color=colors[1], ax=ax)
_ = joypy.joyplot(prox, by='xan_mgml', column='norm_int', ax=ax, color=colors[0])
#%%
fig, ax = plt.subplots(1, 2, figsize=(6, 2), sharex=True)
for a in ax:
a.tick_params(labelsize=8)
a.tick_params(labelsize=8)
a.yaxis.set_ticks([])
# Compute the KDEs
x_range = np.linspace(0.5, 2, 500)
wt_low_kde = scipy.stats.gaussian_kde(wt_low['norm_int'])(x_range)
wt_high_kde = scipy.stats.gaussian_kde(wt_high['norm_int'])(x_range)
prox_low_kde = scipy.stats.gaussian_kde(prox_low['norm_int'].dropna())(x_range)
prox_high_kde = scipy.stats.gaussian_kde(prox_high['norm_int'].dropna())(x_range)
ax[0].fill_between(x_range, wt_low_kde, color=colors[1], label='0 mg/mL', alpha=0.25)
ax[0].plot(x_range, wt_low_kde, '-', color=colors[1], lw=1, label='__nolegend__')
ax[0].fill_between(x_range, wt_high_kde, color=colors[0], label='4 mg/mL', alpha=0.25)
ax[0].plot(x_range, wt_high_kde, '-',color=colors[0], lw=1, label='__nolegend__')
ax[1].fill_between(x_range, prox_low_kde, color=colors[1], label='0 mg/mL', alpha=0.25)
ax[1].plot(x_range,prox_low_kde, '-', color=colors[1], lw=1, label='__nolegend__')
ax[1].fill_between(x_range, prox_high_kde, color=colors[0], label='4 mg/mL', alpha=0.25)
ax[1].plot(x_range, prox_high_kde, '--', color=colors[0], lw=1, label='__nolegend__')
ax[1].set_xlabel('fold-change in expression', fontsize=8)
ax[0].set_ylabel('density', fontsize=8)
ax[1].set_ylabel('density', fontsize=8)
for a in ax:
leg = a.legend(title='xanthosine', fontsize=8)
leg.get_title().set_fontsize(8)
plt.savefig('../../figs/wt_prox_comparison.svg', bbox_inches='tight')
# %%
# %%
| [
"[email protected]"
] | |
0a85d36cc1a0d025855da19e08d0826209a0d735 | 508321d683975b2339e5292202f3b7a51bfbe22d | /Userset.vim/ftplugin/python/CompletePack/PySide2/QtWidgets/QStyleOption.py | c82ecadb7fa2cdc8b2a6f3c10eb0f9bc1b529d32 | [] | no_license | cundesi/vimSetSa | 4947d97bcfe89e27fd2727423112bb37aac402e2 | 0d3f9e5724b471ab21aa1199cc3b4676e30f8aab | refs/heads/master | 2020-03-28T05:54:44.721896 | 2018-08-31T07:23:41 | 2018-08-31T07:23:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,232 | py | # encoding: utf-8
# module PySide2.QtWidgets
# from C:\Program Files\Autodesk\Maya2017\Python\lib\site-packages\PySide2\QtWidgets.pyd
# by generator 1.145
# no doc
# imports
import PySide2.QtCore as __PySide2_QtCore
import PySide2.QtGui as __PySide2_QtGui
import Shiboken as __Shiboken
class QStyleOption(__Shiboken.Object):
# no doc
def init(self, *args, **kwargs): # real signature unknown
pass
def initFrom(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
direction = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
fontMetrics = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
palette = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
rect = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
state = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
styleObject = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
type = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
version = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
OptionType = None # (!) real value is ''
SO_Button = None # (!) real value is ''
SO_ComboBox = None # (!) real value is ''
SO_Complex = None # (!) real value is ''
SO_ComplexCustomBase = None # (!) real value is ''
SO_CustomBase = None # (!) real value is ''
SO_Default = None # (!) real value is ''
SO_DockWidget = None # (!) real value is ''
SO_FocusRect = None # (!) real value is ''
SO_Frame = None # (!) real value is ''
SO_GraphicsItem = None # (!) real value is ''
SO_GroupBox = None # (!) real value is ''
SO_Header = None # (!) real value is ''
SO_MenuItem = None # (!) real value is ''
SO_ProgressBar = None # (!) real value is ''
SO_RubberBand = None # (!) real value is ''
SO_SizeGrip = None # (!) real value is ''
SO_Slider = None # (!) real value is ''
SO_SpinBox = None # (!) real value is ''
SO_Tab = None # (!) real value is ''
SO_TabBarBase = None # (!) real value is ''
SO_TabWidgetFrame = None # (!) real value is ''
SO_TitleBar = None # (!) real value is ''
SO_ToolBar = None # (!) real value is ''
SO_ToolBox = None # (!) real value is ''
SO_ToolButton = None # (!) real value is ''
SO_ViewItem = None # (!) real value is ''
StyleOptionType = None # (!) real value is ''
StyleOptionVersion = None # (!) real value is ''
Type = None # (!) real value is ''
Version = None # (!) real value is ''
| [
"[email protected]"
] | |
2d955a76eb32edaf404ea4ca61f1fa9696e17582 | 9852cee063eb438227abf75fa4aa06c0d2d7a23a | /hacktivitycon/web/ladybug.py | 6c45caddae6a0597e627efbc95dec881fc2a2349 | [] | no_license | arwildo/ctf | 227a6038b4d068f33134153534942e91ec5f36f8 | 623ac5cf7f177870d837ae298310bbd244a1db56 | refs/heads/master | 2021-09-08T16:50:32.890102 | 2021-08-28T04:13:37 | 2021-08-28T04:13:37 | 248,462,299 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | #!/usr/bin/python3
import requests
url = 'http://two.jh2i.com:50018/mail.php'
myobj = {'name': 'fsdkfkjs', 'email': 'dfjskjsf', 'subject': 'dfdfs', 'message': 'dfssf'}
x = requests.post(url, data = myobj)
print(x.text)
| [
"[email protected]"
] | |
534ff2ff2d4c67fe84121d7af419371c58a918e2 | 1078c61f2c6d9fe220117d4c0fbbb09f1a67f84c | /paws/lib/python2.7/site-packages/euca2ools-3.4.1_2_g6b3f62f2-py2.7.egg/EGG-INFO/scripts/eulb-attach-lb-to-subnets | fcf3618ac8aa00e3864a9b452656c3acae5f986c | [
"MIT"
] | permissive | cirobessa/receitas-aws | c21cc5aa95f3e8befb95e49028bf3ffab666015c | b4f496050f951c6ae0c5fa12e132c39315deb493 | refs/heads/master | 2021-05-18T06:50:34.798771 | 2020-03-31T02:59:47 | 2020-03-31T02:59:47 | 251,164,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 284 | #!/media/ciro/LOCALDRV/A_DESENVOLVIMENTO/AWS/receitas/paws/bin/python -tt
import euca2ools.commands.elasticloadbalancing.attachloadbalancertosubnets
if __name__ == '__main__':
euca2ools.commands.elasticloadbalancing.attachloadbalancertosubnets.AttachLoadBalancerToSubnets.run()
| [
"[email protected]"
] | ||
310fbda3e7edaa5707602795633ce2c70e3ef932 | 75cd5cb3ad898548b3be1863de233c2ad01334d2 | /python/Drone_utils.py | 42cee4cff8311d96cd44869e30d0f3e7d7763e01 | [] | no_license | bacisback/tfm | 25e6fa61c202b679e64c10f18062717539a4432d | 626ef8065ea150de882442f523c9fbb0af5c8428 | refs/heads/master | 2023-07-17T06:22:03.869401 | 2021-09-03T11:18:12 | 2021-09-03T11:18:12 | 349,205,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,989 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 29 17:40:46 2021
@author: e321075
"""
import numpy as np
from PIL import Image
from matplotlib import pyplot as plt
import pandas as pd
import cv2
import scipy.misc
from skimage.transform import resize
import os
from collections import namedtuple
import re
#############################
# global variables #
#############################
label_translation= {"unlabeled":0,
1:1,#road
"paved-area":2, #sidewalk
"roof":3,#building
"wall":3, #wall
"door": 3,
"window": 4,
"fence": 4,
5:3,#billboard
"fence-pole":6, #pole
7:7, #trafic light
8:5, #trafic sign
"dirt":8, #vegetation
"grass":8,#terrain
"water":8,#terrain
"rocks":8,#terrain
"pool":8,#terrain
"vegetation":8,#terrain
"tree":8,
"bald-tree":8,
11:9,#sky
"person":10,# pedestrian
13:10,# rider
"car":11, #car
15:12, #truck
16:12, #bus
17:12, #train
18:11, #moto
"bicycle":11, #bike
20:1, #roadmarks
21:0, #unknown
}
root_dir = "./../semantic_drone_dataset/"
training_set = "training_set/"
training_dir = os.path.join(root_dir, training_set)
img_dir = os.path.join(training_dir, "images/")
gt_dir = os.path.join(training_dir, "gt/semantic/")
class_dict = pd.read_csv(os.path.join(gt_dir, "class_dict.csv"))
class_label_dict = {tuple(class_dict.iloc[i,1:].values): class_dict.iloc[i,0] for i in range(len(class_dict))}
label_dir = os.path.join(gt_dir, "label_images/")
np_labels = os.path.join(gt_dir, "np_labels/")
train_label_file = os.path.join(root_dir, "train.csv") # train file
csv_file = open(train_label_file, "w")
csv_file.write("img,label\n")
for idx, img in enumerate(os.listdir(img_dir)):
img_name = os.path.join(img_dir, img)
label_name = os.path.join(label_dir, img[:-3]+"png")
image = Image.open(label_name)
labels = np.asarray(image.convert("RGB"))
height, weight, _ = labels.shape
label = np.zeros((height,weight))
for h in range(height):
for w in range(weight):
try:
label[h,w] = label_translation[class_label_dict[tuple(labels[h,w,:])]]
except:
label[h,w] = 0
label_name = os.path.join(np_labels, img[:-4]+".npy")
np.save(label_name, label)
csv_file.write("{},{}\n".format(img_name, label_name))
csv_file.close()
| [
"[email protected]"
] | |
3e6f34481593e6d3a94d3f6cd125b50d1b4b91db | b001b44c95f4a7c5574385baa4fe72c5f3d02236 | /home/migrations/0014_auto_20181104_1611.py | 4acbda228febaf659b1e14a983a45d508d1d4673 | [
"MIT"
] | permissive | gjergjk71/Attendence | 3ae9b9f3cb3c4e0bfe2addf4124b7612a78f1533 | 9e9370125bfc4958de02171a3ae4c8e16bf10913 | refs/heads/master | 2020-04-14T08:46:58.649674 | 2019-01-01T14:27:29 | 2019-01-01T14:27:29 | 163,743,067 | 0 | 0 | MIT | 2019-01-01T14:34:28 | 2019-01-01T14:34:28 | null | UTF-8 | Python | false | false | 2,087 | py | # Generated by Django 2.1.3 on 2018-11-04 10:41
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('home', '0013_auto_20181104_1610'),
]
operations = [
migrations.AlterField(
model_name='semester_1',
name='professerr_name',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='home.Facultys'),
),
migrations.AlterField(
model_name='semester_2',
name='professerr_name',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='home.Facultys'),
),
migrations.AlterField(
model_name='semester_3',
name='professerr_name',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='home.Facultys'),
),
migrations.AlterField(
model_name='semester_4',
name='professerr_name',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='home.Facultys'),
),
migrations.AlterField(
model_name='semester_5',
name='professerr_name',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='home.Facultys'),
),
migrations.AlterField(
model_name='semester_6',
name='professerr_name',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='home.Facultys'),
),
migrations.AlterField(
model_name='semester_7',
name='professerr_name',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='home.Facultys'),
),
migrations.AlterField(
model_name='semester_8',
name='professerr_name',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='home.Facultys'),
),
]
| [
"[email protected]"
] | |
52d152cd936a19bcfaff23e213b40de5abd66f4d | e4e4c60ffa509f257afc915d4c6cd32c0cb7098c | /.history/app_20200916151418.py | ce395406ad7ab0b64c3691b8145d6d189f1fbc90 | [] | no_license | QianyueMa/Google-Health-Search-Project | 01dbd597780158f50eebfba2a228b505f8169726 | 6ef6b270dc7ab0826ad4f0338c9cd95d3571e19a | refs/heads/master | 2022-12-19T03:55:10.328167 | 2020-10-02T12:54:27 | 2020-10-02T12:54:27 | 296,495,736 | 0 | 0 | null | 2020-09-18T02:44:12 | 2020-09-18T02:44:11 | null | UTF-8 | Python | false | false | 3,775 | py | import numpy as np
import os
import requests
import json
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine
import pandas.io.sql as pdsql
from config import pg_user, pg_password, db_name
from flask import Flask, jsonify, render_template, abort, redirect
#################################################
# Database Setup
##################################################
connection_string = f"{pg_user}:{pg_password}@localhost:5432/{db_name}"
engine = create_engine(f'postgresql://{connection_string}')
# checking the table names
engine.table_names()
#################################################
# Flask Setup
#################################################
app = Flask(__name__)
#################################################
# Flask Routes
#################################################
@app.route("/")
def home():
return render_template("index.html")
@app.route("/comparison")
def comparison():
return render_template("comparison.html")
@app.route('/searchbyyear')
def searchbyyear():
sqlStatement = """
SELECT year, SUM ("Cancer" + "cardiovascular" + "stroke" + "depression" + "rehab" + "vaccine" + "diarrhea" + "obesity" + "diabetes") AS Searches
FROM search_condition
GROUP BY year
ORDER BY year;
"""
df = pdsql.read_sql(sqlStatement, engine)
df.set_index('year', inplace=True)
df = df.to_json(orient='table')
result = json.loads(df)
return jsonify(result)
@app.route('/searchyearandcondition')
def searchyearandcondition():
sqlStatement = """
SELECT year, SUM ("Cancer") AS Cancer,SUM ("cardiovascular") As Cardiovascular,SUM ("stroke") As Stroke,SUM ("depression") As Depression,SUM ("rehab") AS Rehab,SUM ("vaccine") AS Vaccine, SUM ("diarrhea") AS Diarrhea, SUM("obesity") AS Obesity, SUM ("diabetes") AS Diabetes
FROM search_condition
GROUP BY year
ORDER BY year;
"""
df = pdsql.read_sql(sqlStatement, engine)
df.set_index('year', inplace=True)
df = df.to_json(orient='table')
result = json.loads(df)
return jsonify(result)
@app.route('/searchbystate')
def searchbystate():
sqlStatement = """
SELECT l.location, l.latitude, l.longitude, SUM (s."Cancer" + s."cardiovascular" + s."stroke" + s."depression" + s."rehab" + s."vaccine" + s."diarrhea" + s."obesity" + s."diabetes") AS Searches
FROM location l
INNER JOIN search_condition s on s.location_id = l.location_id
GROUP BY l.location, l.latitude, l.longitude
ORDER BY location;
"""
df = pdsql.read_sql(sqlStatement, engine)
df.set_index('location', inplace=True)
df = df.to_json(orient='table')
result = json.loads(df)
return jsonify(result)
@app.route('/bylocationandyear')
def bylocationandyear():
sqlStatement = """
SELECT l.location, l.latitude, l.longitude,s.year, SUM (s."Cancer" + s."cardiovascular" + s."stroke" + s."depression" + s."rehab" + s."vaccine" + s."diarrhea" + s."obesity" + s."diabetes") AS Searches
FROM location l
INNER JOIN search_condition s on s.location_id = l.location_id
GROUP BY l.location, l.latitude, l.longitude,s.year
ORDER BY year;
"""
df = pdsql.read_sql(sqlStatement, engine)
df.set_index('year', inplace=True)
df = df.to_json(orient='table')
result = json.loads(df)
return jsonify(result)
@app.route('/casesleadingdeath')
def casesleadingdeath():
sqlStatement = """
SELECT * FROM leading_causes_of_death;
"""
df = pdsql.read_sql(sqlStatement, engine)
df.set_index('year', inplace=True)
df = df.to_json(orient='table')
result = json.loads(df)
return jsonify(result)
if __name__ == '__main__':
app.run(debug=True)
| [
"[email protected]"
] | |
dd3a8b4df0a8549639ca2811e00c55d75a191693 | 1c63089e6efa2e63948075cdfad69ded88b7d40e | /symphony/cli/pyinventory/graphql/service_endpoint_role_enum.py | cb96e2664531afd15454ef3508c35251b2889523 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | aharonnovo/magma | 4c305b3b0ea414a0355a7dc8fdcc7ed7e1be436e | d96a272ee58fea436cc94990c7ad05c4edc76341 | refs/heads/master | 2021-08-01T05:38:35.882598 | 2020-04-02T15:40:13 | 2020-04-02T15:41:55 | 252,531,569 | 0 | 0 | NOASSERTION | 2020-04-02T18:11:35 | 2020-04-02T18:11:34 | null | UTF-8 | Python | false | false | 313 | py | #!/usr/bin/env python3
# @generated AUTOGENERATED file. Do not Change!
from enum import Enum
class ServiceEndpointRole(Enum):
CONSUMER = "CONSUMER"
PROVIDER = "PROVIDER"
MISSING_ENUM = ""
@classmethod
def _missing_(cls, value: str) -> "ServiceEndpointRole":
return cls.MISSING_ENUM
| [
"[email protected]"
] | |
0deb5d5658c60f9a95c30297977be47460475bd9 | f4c4546f21046ddfd109a0dd34005ac4872f123d | /oldboy/day2/基础数据类型4.py | c5fe2670b3666f8eb90c4ca3640d26fd759070c6 | [] | no_license | wuzhisheng/Python | ebbeacc81881b65d6085b8f4bf10e15a25549ab0 | 84930c2eb5fb0397d546819effef0d879055e2c8 | refs/heads/master | 2022-01-17T14:57:08.926481 | 2021-12-31T09:52:01 | 2021-12-31T09:52:01 | 146,844,250 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,770 | py | ''''''
'''
字符串:str
存储少量数据类型
python中:凡是用单/双引号引起来的数据就是字符串。
str = 'wuzhisheng 吴志盛'
str1 = "wuzhisheng 吴志盛"
wu='''
'''
# """ """或''' ''' 保留格式
字符串可以用 + 或 *
+ 将字符串进行拼接
w = 'wu'
z = 'zhi'
# 加可以
print(w + z)
print(w * 3)
切片:
w="wwww"
print(w[::2])
'''
#capitalize()首字母大写,其余小写 **
s1='oldBoy'
s2=s1.capitalize()
print (s2)
#swapcase 大小写转换 **
s1='oldBoy'
print (s1.swapcase())
#title() 对字符串非字母隔开的每个单词首字母大写**
s1='alex oldboy*tai2tian'
print(s1.title())
#upper 全部转换为大写 *****
# lower 全部转换为小写
# s1='oldBoy'
# print (s1.upper())
# #应用场景
# username=input("用户名:")
# password=input("密码:")
# code = 'QweR'.upper()
# your_code=input('验证码').upper()
# if code == your_code:
# if username == 'alex' and password=='123':
# print('登录成功')
# else:
# print('验证失败')
#strip 默认去除字符串前后两端的空格,换行符,制表符*****
#可指定去除某和某些元素 ????
#lstrip() rstrip()
name=' alex '
print(name.strip()) #这种删除只是暂时的
#要永久删除这个字符串中的空白,必须将删除操作的结果存回到变量中
name1=name.strip()
n1=' :aldydtojhgld'
print(n1.strip('ald'))
print(n1.strip(':'))
#startswith endswith *****判断以...开头....结尾
s1='oldBoy'
print(s1.startswith('o'))
print(s1.startswith('old'))
print(s1.startswith('B',3,))
s1.endswith
s1='oldBoyB'
#find 通过字符串元素找索引,找不到返回-1 *****
#index 通过字符串元素找索引,找不到会报错
print (s1.index('B'))
print (s1.index('B',4)) #4查找范围
# print (s1.index('A')) #找不到,会报错
# print (s1.find('A')) #找不到返回-1
i=(s1.index('B'))
print(i,type(i))
#split 分割 str ---> list 默认以空格分隔 *****
#分割后以列表显示
s1='wu zhi sheng'
s2='wu,zhi,sheng'
s3='wuzhisheng'
print (s1.split())
print (s2.split(','))
print(s3.split('w',1)) #切第一个,则会流空格
#replace 替换*****
s1='alex是一个sg,alex确实是,alex'
print(s1.replace('alex','oldboy'))
print(s1.replace('alex','oldboy',2)) #2指定替换个数
print(s1.replace('wu','oldboy'))#不存在,打印原来的,但没报错
#format 三种用法
#1
msg='我们{},今年{},性别{}'.format('wu',25,'男')
print (msg)
#2
msg='我们{0},今年{1},性别{0}'.format('wu',25,'男')
print (msg)
#3
msg='我们{name},今年{age},性别{sex}'.format(name='wu',sex='男',age=25)
print(msg)
#count 查询元素出现的次数
s1='oldBoyB'
print(s1.count('B'))
#len 显示长度
print(len(s1))
name='wuzhi123'
print(name.isalnum()) #判断字符串是否由字母或数字组成
print (name.isalpha()) #判断字符串是否只由字母组成
print (name.isdigit()) #判断字符串是否只由数字组成
#join 将序列中的元素以指定的字符连接生成一个新的字符串
lst = ['alex','alex3714']
ret = '|'.join(lst)
print(ret)
str='-'
seq=("a","b","c")
print (str.join(seq))
#eval 去掉字符串,转换相应数据类型
l = '[1,2,3,4,[5,6,7,8,9]]'
d = "{'a':123,'b':456,'c':789}"
t = '([1,3,5],[5,6,7,8,9],[123,456,789])'
print(type(l))
print(type(eval(l)))
print(eval(l))
print(type(d))
print(type(eval(d)))
print(eval(d))
print(type(t))
print(type(eval(t)))
print(eval(t))
#while:无限循环,强调次数
s1='oldboy'
index=0
while index < len(s1):
print(s1[index])
index+=1
#for 有限循环,遍历存在的对象
'''
for循环可以与else配合,与break和continue
'''
s1='oldboy'
for i in s1:
print(i)
#break
#continue
else:
pass
| [
"[email protected]"
] | |
71b4904a308e77ad43f1976e2817ec36814cab43 | 4644eb637a27eb7a546ee0b0887a174f7d4c13d6 | /old/test/das_memcache_t.py | f137479d19a8ab168f7662a474e36ae144de4a5c | [] | no_license | dmwm/DAS | 4c33ce63156bbe7f87703532e04321320fd6d66d | a2dddea2c2cb18287d607a3f8da0c6364f9f70d7 | refs/heads/master | 2021-08-08T17:40:27.066179 | 2021-05-27T18:47:21 | 2021-05-27T18:47:21 | 4,300,629 | 7 | 5 | null | 2021-05-27T18:47:22 | 2012-05-11T19:07:55 | Python | UTF-8 | Python | false | false | 3,977 | py | #!/usr/bin/env python
#pylint: disable-msg=C0301,C0103
"""
Unit test for DAS cache module
"""
import unittest
import time
from DAS.utils.utils import genkey
from DAS.utils.das_config import das_readconfig
from DAS.utils.logger import DASLogger
from DAS.core.das_memcache import DASMemcache
try:
# with python 2.5
import hashlib
except:
# prior python 2.5
import md5
class testDASMemcache(unittest.TestCase):
"""
A test class for the DAS cache module
"""
def setUp(self):
"""
set up DAS core module
"""
debug = 0
config = das_readconfig()
logger = DASLogger(verbose=debug, stdout=debug)
config['logger'] = logger
config['verbose'] = debug
self.memcache = DASMemcache(config)
def test_key(self):
"""test DAS cache key generator"""
query = "find site where site=T2_UK"
result = genkey(query)
try:
hash = hashlib.md5()
except:
# prior python 2.5
hash = md5.new()
hash.update(query)
expect = hash.hexdigest()
self.assertEqual(expect, result)
def test_result(self):
"""test DAS memcache result method"""
self.memcache.delete_cache()
query = "find site where site=T2_UK"
expire = 60
expect = [0,1,2,3,4,5,6,7,8,9]
expect = self.memcache.update_cache(query, expect, expire)
expect = [i for i in expect]
result = [i for i in self.memcache.get_from_cache(query)]
result.sort()
self.assertEqual(expect, result)
def test_pagintation(self):
"""test DAS memcache result method with pagination"""
self.memcache.delete_cache()
query = "find site where site=T2_UK"
expire = 60
expect = [0,1,2,3,4,5,6,7,8,9]
expect = self.memcache.update_cache(query, expect, expire)
expect = [i for i in expect]
idx = 1
limit = 3
result = [i for i in self.memcache.get_from_cache(query, idx, limit)]
result.sort()
self.assertEqual(expect[idx:limit+1], result)
def test_sorting(self):
"""test DAS memcache result method with sorting"""
self.memcache.delete_cache()
query = "find site where site=T2_UK"
expire = 60
data = [
{'id':0, 'data':'a', 'run':1},
{'id':1, 'data':'b', 'run':3},
{'id':2, 'data':'c', 'run':2},
]
gen = self.memcache.update_cache(query, data, expire)
res = [i for i in gen]
skey = 'run'
order = 'desc'
result = [i for i in \
self.memcache.get_from_cache(query, skey=skey, order=order)]
expect = [
{'id':1, 'data':'b', 'run':3},
{'id':2, 'data':'c', 'run':2},
{'id':0, 'data':'a', 'run':1},
]
self.assertEqual(expect, result)
skey = 'run'
order = 'asc'
result = [i for i in \
self.memcache.get_from_cache(query, skey=skey, order=order)]
expect = [
{'id':0, 'data':'a', 'run':1},
{'id':2, 'data':'c', 'run':2},
{'id':1, 'data':'b', 'run':3},
]
self.assertEqual(expect, result)
def test_incache(self):
"""test DAS memcache incache method"""
self.memcache.delete_cache()
query = "find site where site=T2_UK"
expire = 1
expect = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
expect = self.memcache.update_cache(query, expect, expire)
expect = [i for i in expect]
result = self.memcache.incache(query)
self.assertEqual(1, result)
time.sleep(2)
result = self.memcache.incache(query)
self.assertEqual(0, result)
#
# main
#
if __name__ == '__main__':
unittest.main()
| [
"metson@4525493e-7705-40b1-a816-d608a930855b"
] | metson@4525493e-7705-40b1-a816-d608a930855b |
c2f66ca6d5d76b2c7657e6035b85941cfe6b9f61 | db303c68682dfd18965a04026ff14e15c1ba6120 | /ch04/ans35.py | b9577362e4fe622f0c34d0265e8758e7446b7b73 | [] | no_license | Toshiyana/nlp100v2020 | 1a89f164de0c720da6d42c19b3fa60f8013d662c | 37d4d208d5d527d163356793b630f36eb7595779 | refs/heads/master | 2023-07-15T15:01:28.454515 | 2021-08-21T13:20:03 | 2021-08-21T13:20:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 931 | py | from collections import defaultdict
def parse_mecab(block):
res = []
for line in block.split('\n'):
if line == '':
return res
(surface, attr) = line.split('\t')
attr = attr.split(',')
lineDict = {
'surface': surface,
'base': attr[6],
'pos': attr[0],
'pos1': attr[1]
}
res.append(lineDict)
def extract_words(block):
return [b['base'] + '_' + b['pos'] + '_' + b['pos1'] for b in block]
filename = 'ch04/neko.txt.mecab'
with open(filename, mode='rt', encoding='utf-8') as f:
blocks = f.read().split('EOS\n')
blocks = list(filter(lambda x: x != '', blocks))
blocks = [parse_mecab(block) for block in blocks]
words = [extract_words(block) for block in blocks]
d = defaultdict(int)
for word in words:
for w in word:
d[w] += 1
ans = sorted(d.items(), key=lambda x: x[1], reverse=True)
print(ans)
| [
"[email protected]"
] | |
fa97372f1c7c1784d5b67306cc72c036f1556e99 | bb048a319e732cb7c059b5fb877b834aab7525c5 | /dcbots/bench/sqltypes.py | b6544393cc54f81cbb49f1292c8f0a71288a7a8f | [] | no_license | dodysw/dodysw-svn | 9ec38513b7be8ec87d4ae8f6510bc7624e4ce4c0 | 1f784dae610b1052bf4cd96f5d470e56f0a08528 | refs/heads/master | 2021-01-22T02:34:00.193636 | 2016-09-10T17:02:22 | 2016-09-10T17:02:22 | 25,316,568 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,340 | py | import time
import MySQLdb as dbm
def measure(f):
start = time.clock()
f()
print time.clock()-start
def init():
global tth_rows, cur
MYSQL_CONNECT_PARAM = dict(host='localhost', user='whatsnew_dcbot', passwd='', db='whatsnew_dcbot')
conn = dbm.connect(**MYSQL_CONNECT_PARAM)
cur = conn.cursor()
sql = "select tth from shares limit 20000"
cur.execute(sql)
tth_rows = cur.fetchall()
def normal():
for (tth,) in tth_rows:
sql = "insert into shares (tth, share_path, filename, extension, size, last_nick, first_found_time, last_found_time, dc_file_type) values (%s,'kucing', 'kucing', 'kcg', 0, 'kucing', 1, 1, 10)"
try:
cur.execute(sql, tth)
except dbm.IntegrityError:
pass
def fast1():
sql = "insert ignore into shares (tth, share_path, filename, extension, size, last_nick, first_found_time, last_found_time, dc_file_type) values (%s,'kucing', 'kucing', 'kcg', 0, 'kucing', 1, 1, 10)"
cur.executemany(sql, [r[0] for r in tth_rows])
def try1():
for (tth,) in tth_rows:
if cur.execute("select 1 from shares where tth=%s", tth) == 0:
sql = "insert into shares (tth, share_path, filename, extension, size, last_nick, first_found_time, last_found_time, dc_file_type) values (%s,'kucing', 'kucing', 'kcg', 0, 'kucing', 1, 1, 10)"
cur.execute(sql, tth)
def try2():
# create heap table
cur.execute("create temporary table tth_list (tth char(39)) engine=MEMORY")
# populate the heap
cur.executemany("insert into tth_list (tth) values (%s)", [r[0] for r in tth_rows])
# join with shares, and get list of new tth
if cur.execute("select tth_list.tth from tth_list left join shares on tth_list.tth = shares.tth where shares.tth is null"):
print 'at least 1 is new'
sql = "insert into shares (tth, share_path, filename, extension, size, last_nick, first_found_time, last_found_time, dc_file_type) values (%s,'kucing', 'kucing', 'kcg', 0, 'kucing', 1, 1, 10)"
cur.executemany(sql, [r[0] for r in cur])
else:
print 'all tth are not new'
#~ cur.execute("drop table tth_new")
if __name__ == '__main__':
init()
measure(normal)
measure(fast1)
measure(try1)
measure(try2) | [
"[email protected]"
] | |
55c0387190029797fc40a4a5795409b09aa8c92e | f98de2db6b24d30d64f1145c7d8da4a40385a87f | /packages/grid_control/backends/aspect_cancel.py | 5b86166d12d61c4cbe5cccbb139d0d6dfe99df5a | [] | no_license | greyxray/grid-control | f9f453491fe7bc506d4cfc240afaa364ba9db84b | ed10fdb6ff604006a5d52dcd43c2e55c9e962c0a | refs/heads/master | 2020-04-15T13:15:21.103357 | 2019-01-08T18:23:07 | 2019-01-08T18:23:07 | 164,709,043 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,028 | py | # | Copyright 2016 Karlsruhe Institute of Technology
# |
# | Licensed under the Apache License, Version 2.0 (the "License");
# | you may not use this file except in compliance with the License.
# | You may obtain a copy of the License at
# |
# | http://www.apache.org/licenses/LICENSE-2.0
# |
# | Unless required by applicable law or agreed to in writing, software
# | distributed under the License is distributed on an "AS IS" BASIS,
# | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# | See the License for the specific language governing permissions and
# | limitations under the License.
import time
from grid_control import utils
from grid_control.backends.backend_tools import BackendExecutor, ProcessCreatorAppendArguments
from grid_control.utils.activity import Activity
from hpfwk import AbstractError
from python_compat import identity, lmap
class CancelJobs(BackendExecutor):
def execute(self, wmsIDs, wmsName): # yields list of (wmsID,)
raise AbstractError
class CancelJobsWithProcess(CancelJobs):
def __init__(self, config, proc_factory):
CancelJobs.__init__(self, config)
self._timeout = config.getTime('cancel timeout', 60, onChange = None)
self._errormsg = 'Job cancel command returned with exit code %(proc_status)s'
self._proc_factory = proc_factory
def _parse(self, wmsIDs, proc): # yield list of (wmsID,)
raise AbstractError
def execute(self, wmsIDs, wmsName):
proc = self._proc_factory.create_proc(wmsIDs)
for result in self._parse(wmsIDs, proc):
if not utils.abort():
yield result
if proc.status(timeout = 0, terminate = True) != 0:
self._handleError(proc)
def _handleError(self, proc):
self._filter_proc_log(proc, self._errormsg)
class CancelJobsWithProcessBlind(CancelJobsWithProcess):
def __init__(self, config, cmd, args = None, fmt = identity, unknownID = None):
proc_factory = ProcessCreatorAppendArguments(config, cmd, args, fmt)
CancelJobsWithProcess.__init__(self, config, proc_factory)
self._blacklist = None
if unknownID is not None:
self._blacklist = [unknownID]
def _parse(self, wmsIDs, proc): # yield list of (wmsID,)
proc.status(self._timeout, terminate = True)
return lmap(lambda wmsID: (wmsID,), wmsIDs)
def _handleError(self, proc):
self._filter_proc_log(proc, self._errormsg, blacklist = self._blacklist, log_empty = False)
class CancelAndPurgeJobs(CancelJobs):
def __init__(self, config, cancel_executor, purge_executor):
CancelJobs.__init__(self, config)
(self._cancel_executor, self._purge_executor) = (cancel_executor, purge_executor)
def setup(self, log):
CancelJobs.setup(self, log)
self._cancel_executor.setup(log)
self._purge_executor.setup(log)
def execute(self, wmsIDs, wmsName): # yields list of (wmsID,)
marked_wmsIDs = lmap(lambda result: result[0], self._cancel_executor.execute(wmsIDs, wmsName))
time.sleep(5)
activity = Activity('Purging jobs')
for result in self._purge_executor.execute(marked_wmsIDs, wmsName):
yield result
activity.finish()
| [
"[email protected]"
] | |
1cec7bbfb2bf32ef41170fcc4fcf729d5ebf3d4c | 9d29ca19feddfb774e990ccef6903206ecdb4ea1 | /src/comparable/sim_calculator.py | 0ec1b3506959ed19ff36479fad85deb95fb454e8 | [] | no_license | rasoolims/ImageTranslate | 180f5d6c310f7eb028bc3246e12ff7a5ab7b4fa8 | 51593a845a95fa3d05fc722a7c6a33077ee267be | refs/heads/master | 2023-06-23T14:21:13.985028 | 2022-09-29T18:57:06 | 2022-09-29T18:57:06 | 250,050,377 | 5 | 1 | null | 2023-06-12T21:28:44 | 2020-03-25T17:49:40 | Python | UTF-8 | Python | false | false | 9,318 | py | from collections import defaultdict
from optparse import OptionParser
import torch
import torch.nn as nn
from apex import amp
from torch.nn.utils.rnn import pad_sequence
replacements = {"۰": "0", "۱": "1", "۲": "2", "۳": "3", "۴": "4", "۵": "5", "۶": "6", "۷": "7", "۸": "8", "۹": "9",
"٫": ".", "૦": "0", "०": "0", "૧": "1", "१": "1", "૨": "2", "२": "2", "૩": "3", "३": "3", "૪": "4",
"४": "4", "૫": "5", "५": "5", "૬": "6", "६": "6", "૭": "7", "७": "7", "૮": "8", "८": "8", "૯": "9",
"९": "9"}
tok_replacements = {}
def digit_replace(tok, convert_numbers=False):
if not convert_numbers:
return tok
if tok in tok_replacements:
return tok_replacements[tok]
new_tok = "".join(map(lambda char: replacements[char] if char in replacements else char, list(tok)))
tok_replacements[tok] = new_tok
return new_tok
def get_option_parser():
parser = OptionParser()
parser.add_option("--src", dest="src_file", metavar="FILE", default=None)
parser.add_option("--dst", dest="dst_file", metavar="FILE", default=None)
parser.add_option("--src-embed", dest="src_embed", metavar="FILE", default=None)
parser.add_option("--dst-embed", dest="dst_embed", metavar="FILE", default=None)
parser.add_option("--dict", dest="src2dst_dict", metavar="FILE", default=None)
parser.add_option("--output", dest="output_file", metavar="FILE", default=None)
parser.add_option("--batch", dest="batch", help="Batch size", type="int", default=40000)
parser.add_option("--fp16", action="store_true", dest="fp16", default=False)
parser.add_option("--convert", action="store_true", dest="convert_numbers", default=False)
return parser
class SimModel(nn.Module):
def __init__(self, src_vectors, dst_vectors):
super(SimModel, self).__init__()
self.src_embed = nn.Embedding(src_vectors.size(0), src_vectors.size(1), _weight=src_vectors)
self.dst_embed = nn.Embedding(dst_vectors.size(0), dst_vectors.size(1), _weight=dst_vectors)
def forward(self, src_batch, dst_batch, match_vectors, digit_mask):
try:
src_embed = self.src_embed(src_batch)
dst_embed = self.dst_embed(dst_batch)
src_pad = (src_batch == 0).unsqueeze(-1).float()
dst_pad = (dst_batch == 0).unsqueeze(-1).float()
mm = torch.bmm(src_embed, dst_embed.transpose(1, 2))
pad_mm = (torch.bmm(src_pad, dst_pad.transpose(1, 2)) == 1)
mm[pad_mm].fill_(0)
max_cos = torch.max(mm, dim=-1)[0]
max_cos = torch.max(max_cos, match_vectors) # Incorporating dictionary information.
max_cos = torch.min(max_cos, digit_mask)
return torch.sum(max_cos, dim=-1)
except RuntimeError as err:
return torch.zeros(src_batch.size(0)).fill_(-1)
get_id = lambda x, dct: dct[x] if x in dct else (dct[x.lower()] if x.lower() in dct else None)
get_ids = lambda line, dct: list(filter(lambda x: x is not None, map(lambda x: get_id(x, dct), line.split(" "))))
get_src_id = lambda x, dct: dct[x] if x in dct else (dct[x.lower()] if x.lower() in dct else 0)
get_src_ids = lambda line, dct: list(
filter(lambda x: x is not None, map(lambda x: get_src_id(x, dct), line.split(" "))))
def read_dict(dict_file):
src2dst_dict = defaultdict(set)
if dict_file is None:
return src2dst_dict
with open(dict_file, "r") as r:
for line in r:
spl = line.strip().split("\t")
if len(spl) != 2: continue
src2dst_dict[spl[0].lower()].add(spl[1].lower())
return src2dst_dict
def build_batches(src_file, dst_file, src_embed_dict, dst_embed_dict, src2dst_dict, batch=40000):
current_src_batch, current_dst_batch, dict_match_vectors, digit_masks, num_tok = [], [], [], [], 0
with open(src_file, "r") as src_r, open(dst_file, "r") as dst_r:
for src_line, dst_line in zip(src_r, dst_r):
src_words = src_line.lower().strip().split(" ")
dst_words = dst_line.lower().strip().split(" ")
dict_match_vector = [0] * len(src_words)
digit_src = list(map(lambda x: digit_replace(x, options.convert_numbers), src_words))
digit_dst = list(map(lambda x: digit_replace(x, options.convert_numbers), dst_words))
is_digit_src = list(map(lambda x: x.replace('.', '', 1).isdigit(), digit_src))
is_digit_dst = list(map(lambda x: x.replace('.', '', 1).isdigit(), digit_dst))
digit_mask = [1.0] * len(src_words)
for i, w in enumerate(src_words):
if is_digit_src[i]:
digit_mask[i] = -100
for j, t in enumerate(dst_words):
if t in src2dst_dict[w] or t == w:
dict_match_vector[i] = 1.0
digit_mask[i] = 1.0
break
if (is_digit_src[i] and is_digit_dst[j]) and digit_src[i] == digit_dst[j]:
digit_mask[i] = 1.0
dict_match_vector = torch.Tensor(dict_match_vector)
dict_match_vectors.append(dict_match_vector)
digit_mask = torch.Tensor(digit_mask)
digit_masks.append(digit_mask)
src_ids = torch.LongTensor(get_src_ids(src_line.strip(), src_embed_dict))
dst_ids = torch.LongTensor(get_ids(dst_line.strip(), dst_embed_dict))
current_src_batch.append(src_ids)
current_dst_batch.append(dst_ids)
num_tok += len(src_ids) + len(dst_line)
if num_tok >= batch:
src_batch = pad_sequence(current_src_batch, batch_first=True, padding_value=0)
dst_batch = pad_sequence(current_dst_batch, batch_first=True, padding_value=0)
dict_match_vectors = pad_sequence(dict_match_vectors, batch_first=True, padding_value=0)
digit_masks = pad_sequence(digit_masks, batch_first=True,
padding_value=1) # Padding is one for digit mask
yield src_batch, dst_batch, dict_match_vectors, digit_masks
current_src_batch, current_dst_batch, dict_match_vectors, digit_masks, num_tok = [], [], [], [], 0
if num_tok > 0:
src_batch = pad_sequence(current_src_batch, batch_first=True, padding_value=0)
dst_batch = pad_sequence(current_dst_batch, batch_first=True, padding_value=0)
dict_match_vectors = pad_sequence(dict_match_vectors, batch_first=True, padding_value=0)
digit_masks = pad_sequence(digit_masks, batch_first=True, padding_value=1) # Padding is one for digit mask
yield src_batch, dst_batch, dict_match_vectors, digit_masks
if __name__ == "__main__":
parser = get_option_parser()
(options, args) = parser.parse_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
src2dst_dict = read_dict(options.src2dst_dict)
print("Reading src embedding")
src_vectors = []
src_embed_dict = {}
vec_length = 150
with open(options.src_embed, "r") as r:
for line in r:
spl = line.strip().split(" ")
if len(spl) < 3: continue
v = list(map(lambda x: float(x), spl[1:]))
if len(v) != 150: continue
src_vectors.append(torch.Tensor(v))
vec_length = len(src_vectors[-1])
src_embed_dict[spl[0]] = len(src_embed_dict) + 1
src_vectors.insert(0, torch.Tensor([1e-4] * vec_length))
src_vectors = torch.stack(src_vectors)
src_norm = torch.norm(src_vectors, dim=-1, p=2).unsqueeze(-1) + 1e-4
src_embed = torch.div(src_vectors, src_norm)
print("Reading dst embedding")
dst_vectors = []
dst_embed_dict = {}
with open(options.dst_embed, "r") as r:
for line in r:
spl = line.strip().split(" ")
if len(spl) < 3: continue
dst_vectors.append(torch.Tensor(list(map(lambda x: float(x), spl[1:]))))
dst_embed_dict[spl[0]] = len(dst_embed_dict) + 1
dst_vectors.insert(0, torch.Tensor([1e-4] * vec_length))
dst_vectors = torch.stack(dst_vectors)
dst_norm = torch.norm(dst_vectors, dim=-1, p=2).unsqueeze(-1) + 1e-4
dst_vectors = torch.div(dst_vectors, dst_norm)
sim_model = SimModel(src_vectors=src_vectors, dst_vectors=dst_vectors)
if options.fp16:
sim_model.to(device)
sim_model = amp.initialize(sim_model, opt_level="O2")
with torch.no_grad(), open(options.output_file, "w") as ow:
for i, (src_batch, dst_batch, dict_match_batch, digit_mask) in enumerate(
build_batches(options.src_file, options.dst_file, src_embed_dict, dst_embed_dict, src2dst_dict,
options.batch)):
src_batch = src_batch.to(device)
dst_batch = dst_batch.to(device)
dict_match_batch = dict_match_batch.to(device)
digit_mask = digit_mask.to(device)
sims = sim_model(src_batch, dst_batch, dict_match_batch, digit_mask)
sims_txt = "\n".join(list(map(lambda x: str(float(x)), sims)))
ow.write(sims_txt)
ow.write("\n")
print(i, end="\r")
print("\nDone!")
| [
"[email protected]"
] | |
6eb50de935128c4b95da982aeecb7b0ac8837c6e | f121695e2dff353607fa47fb42482470e03bbf8a | /capitulo_05-Instrucoes_if/hello_admin.py | 498fd1d357d23865008f7fd5357e38db07827afc | [] | no_license | ranog/python_work | 76cbcf784c86fae4482be5383223e4b0a34f4130 | 47c442a90dcf32d5aef70858693a772a3c76a7ac | refs/heads/master | 2022-12-22T11:02:26.482059 | 2021-04-17T01:12:22 | 2021-04-17T01:12:22 | 233,634,221 | 2 | 1 | null | 2022-12-08T07:38:43 | 2020-01-13T15:58:46 | Python | UTF-8 | Python | false | false | 4,523 | py | #! /usr/bin/env python3
"""
NOME
hello_admin.py - FAÇA VOCÊ MESMO
SINOPSES
chmod +x hello_admin.py
./hello_admin.py
- Precisamos encontrar alguns usuários!
- Breno: Usuário está disponível.
- Felipe: Usuário está disponível.
- João: Usuário está disponível.
- ISAAC: Usuário indisponível, fornecer um novo nome por favor.
- eRicK: Usuário indisponível, fornecer um novo nome por favor.
1st
2nd
3rd
4th
5th
6th
7th
8th
9th
DESCRIÇÃO
FAÇA VOCÊ MESMO
5.8 – Olá admin: Crie uma lista com cinco ou mais nomes de usuários,
incluindo o nome 'admin'. Suponha que você esteja escrevendo um código que
exibirá uma saudação a cada usuário depois que eles fizerem login em um site.
Percorra a lista com um laço e mostre uma saudação para cada usuário:
• Se o nome do usuário for 'admin', mostre uma saudação especial, por exemplo,
Olá admin, gostaria de ver um relatório de status?
• Caso contrário, mostre uma saudação genérica, como Olá Eric, obrigado por
fazer login novamente.
5.9 – Sem usuários: Acrescente um teste if em hello_admin.py para garantir
que a lista de usuários não esteja vazia.
• Se a lista estiver vazia, mostre a mensagem Precisamos encontrar alguns
usuários!
• Remova todos os nomes de usuário de sua lista e certifique-se de que a
mensagem correta seja exibida.
5.10 – Verificando nomes de usuários: Faça o seguinte para criar um programa
que simule o modo como os sites garantem que todos tenham um nome de usuário
único.
• Crie uma lista chamada current_users com cinco ou mais nomes de usuários.
• Crie outra lista chamada new_users com cinco nomes de usuários. Garanta
que um ou dois dos novos usuários também estejam na lista current_users.
• Percorra a lista new_users com um laço para ver se cada novo nome de
usuário já foi usado. Em caso afirmativo, mostre uma mensagem informando
que a pessoa deverá fornecer um novo nome. Se um nome de usuário não foi
usado, apresente uma mensagem dizendo que o nome do usuário está disponível.
• Certifique-se de que sua comparação não levará em conta as diferenças
entre letras maiúsculas e minúsculas. Se 'John' foi usado, 'JOHN' não deverá
ser aceito.
5.11 – Números ordinais: Números ordinais indicam sua posição em uma lista,
por exemplo, 1st ou 2nd, em inglês. A maioria dos números ordinais nessa
língua termina com th, exceto 1, 2 e 3.
• Armazene os números de 1 a 9 em uma lista.
• Percorra a lista com um laço.
• Use uma cadeia if-elif-else no laço para exibir a terminação apropriada
para cada número ordinal. Sua saída deverá conter "1st 2nd 3rd 4th 5th
6th 7th 8th 9th", e cada resultado deve estar em uma linha separada.
----------------------------------------------------------------------
HISTÓRICO
20202210: João Paulo, outubro de 2020.
- FAÇA VOCÊ MESMO - Pag. 127-128;
- 5.8 – Olá admin - Pag. 127-128.
20202310: João Paulo, outubro de 2020.
- 5.9 – Sem usuários - Pag. 128;
- 5.10 – Verificando nomes de usuários - 128;
- 5.11 – Números ordinais - Pag. 128.
"""
# users = ['maria', 'joão', 'josé', 'joana', 'manoel', 'admin']
users = []
if users:
for user in users:
if user == 'admin':
print("\n- Olá " + user + ", gostaria de ver um relatório de status?")
else:
print("\n- Olá " + user.title() + ", obrigado por fazer login novamente.")
else: print("\n- Precisamos encontrar alguns usuários!")
# XXX na lista current_users tive que colocar tudo em minúscula para a função
# lower() funcionar, não consigo aplicar a função nas duas variáveis.
current_users = ['alexandre', 'eduardo', 'henrique', 'augusto', 'erick', 'isaac']
new_users = ['Breno', 'Felipe', 'João', 'ISAAC', 'eRicK']
for new_user in new_users:
if new_user.lower() in current_users:
print("\n- " + new_user + ": Usuário indisponível, fornecer um novo nome por favor.")
else: print("\n- " + new_user + ": Usuário está disponível.")
numbers = list(range(1, 10))
for number in numbers:
if number == 1: print("\n" + str(number) + "st")
elif number == 2: print(str(number) + "nd")
elif number == 3: print(str(number) + "rd")
else: print(str(number) + "th")
| [
"[email protected]"
] | |
02e8402c5117c2b27d6c879a64f03f2ff0da8a59 | 1afae73794c6d4874360c7a12e4d2f2dbe62ca56 | /gilda/tests/test_process.py | 10e7d813d8f9ab94695884c5b91a62d83ae79e63 | [
"BSD-2-Clause",
"BSD-2-Clause-Views"
] | permissive | steppi/gilda | ce2dc1d1d56363c6543b6076a8605d32baca0f60 | 4927469e5f9a4ca20a056f617c126fe6a4bf3b34 | refs/heads/master | 2021-11-19T12:52:13.465346 | 2021-09-25T00:40:17 | 2021-09-25T00:40:17 | 194,151,959 | 0 | 0 | null | 2019-06-27T19:28:27 | 2019-06-27T19:28:26 | null | UTF-8 | Python | false | false | 638 | py | from gilda.process import depluralize, replace_greek_spelled_out
def test_depluralize():
assert depluralize('BRAF') == ('BRAF', 'non_plural')
assert depluralize('apoptosis') == ('apoptosis', 'non_plural')
assert depluralize('mosquitoes') == ('mosquito', 'plural_oes')
assert depluralize('antibodies') == ('antibody', 'plural_ies')
assert depluralize('branches') == ('branch', 'plural_es')
assert depluralize('CDs') == ('CD', 'plural_caps_s')
assert depluralize('receptors') == ('receptor', 'plural_s')
def test_greek():
assert replace_greek_spelled_out('interferon-γ') == \
'interferon-gamma'
| [
"[email protected]"
] | |
f5a4068248717cdecf969c101345a43b1250ad3f | dd5d05ecb3e6752941dd5c72ca955307fca8ec14 | /d11/d11p2.py | 0b9004cb76477d36074131ce8fe0ef87a6d26774 | [] | no_license | jabadia/adventOfCode2020 | 81e7166cb2646f83da10d70bbaf4c31026ce7938 | 587001526b90bc6eed703e273b2d6cbec7e9b151 | refs/heads/main | 2023-02-05T15:39:22.123879 | 2020-12-28T08:52:54 | 2020-12-28T08:52:54 | 317,472,068 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,140 | py | import time
from collections import defaultdict
from utils.test_case import TestCase
from d11_input import INPUT
TEST_CASES = [
TestCase("""
L.LL.LL.LL
LLLLLLL.LL
L.L.L..L..
LLLL.LL.LL
L.LL.LL.LL
L.LLLLL.LL
..L.L.....
LLLLLLLLLL
L.LLLLLL.L
L.LLLLL.LL
""", 26),
]
def find_nearest_from(ferry, pos, delta):
rows = len(ferry)
cols = len(ferry[0])
neighbour = (pos[0] + delta[0], pos[1] + delta[1])
while 0 <= neighbour[0] < rows and 0 <= neighbour[1] < cols and ferry[neighbour[0]][neighbour[1]] == '.':
neighbour = (neighbour[0] + delta[0], neighbour[1] + delta[1])
if 0 <= neighbour[0] < rows and 0 <= neighbour[1] < cols:
return neighbour
else:
return None
def find_neighbours(ferry):
neighbours = {}
rows = len(ferry)
cols = len(ferry[0])
for row in range(rows):
for col in range(cols):
# search for nearest neighbour in each direction
if ferry[row][col] == '.':
continue
key = (row, col)
neighbours[key] = list(filter(None, [
find_nearest_from(ferry, (row, col), delta)
for delta in [(1, 0), (1, 1), (0, 1), (-1, 1), (-1, 0), (-1, -1), (0, -1), (1, -1)]
]))
return neighbours
def visible_seats(ferry, neighbours, row, col):
return sum(1 for i, j in neighbours[(row, col)] if ferry[i][j] == '#')
def next_generation(ferry, neighbours):
rows = len(ferry)
cols = len(ferry[0])
next_ferry = [['.'] * cols for _ in range(rows)]
for row in range(rows):
for col in range(cols):
if ferry[row][col] == 'L':
next_ferry[row][col] = '#' if visible_seats(ferry, neighbours, row, col) == 0 else 'L'
elif ferry[row][col] == '#':
next_ferry[row][col] = '#' if visible_seats(ferry, neighbours, row, col) < 5 else 'L'
return next_ferry
def test_visible(ferry, row, col, expected_visible):
neighbours = find_neighbours(ferry)
actual_visible = visible_seats(ferry, neighbours, row, col)
assert actual_visible == expected_visible
print('ok')
test_visible([
".......#.",
"...#.....",
".#.......",
".........",
"..#L....#",
"....#....",
".........",
"#........",
"...#.....",
], 4, 3, 8)
test_visible([
".............",
".L.L.#.#.#.#.",
".............",
], 1, 1, 0)
test_visible([
".##.##.",
"#.#.#.#",
"##...##",
"...L...",
"##...##",
"#.#.#.#",
".##.##.",
], 3, 3, 0)
def solve(input):
ferry = [list(row) for row in input.strip().split('\n')]
iteration = 0
neighbours = find_neighbours(ferry)
while True:
next_ferry = next_generation(ferry, neighbours)
if next_ferry == ferry:
return sum(row.count('#') for row in ferry)
ferry = next_ferry
# print(iteration)
iteration += 1
if __name__ == '__main__':
for case in TEST_CASES:
result = solve(case.case)
case.check(result)
t0 = time.time()
print(solve(INPUT))
t1 = time.time()
print(f"{(t1 - t0) * 1000:0.1f} ms")
| [
"[email protected]"
] | |
5e365d856993c7130f5b9560833669d748b12ddf | d60ee49abaee6c74c5b777f8f112a7f75f71f029 | /genome/variants2/active_driver/convert_ptm.py | 8bc23a1001aa4e3e7c10a5588f7623f8747323f3 | [] | no_license | ak352/melanomics | 41530f623b4bfdbd5c7b952debcb47622d1a8e88 | fc5e6fdb1499616fb25a8dc05259add8a65aeca0 | refs/heads/master | 2020-12-24T16:14:42.271416 | 2015-08-06T12:48:52 | 2015-08-06T12:48:52 | 18,439,919 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,484 | py | import sys
import re
def get_genes(gene_file):
genes = {}
for gene in gene_file:
gene = gene.strip('\n').split('\t')
transcript = gene[0]
genename = gene[1]
genes[transcript] = genename
return genes
def get_gene_intersection(data_file, gene_list, out_file):
# open files
data = open(data_file, 'r')
g = open(gene_list, 'r')
out = open(out_file, 'w')
logfile = out_file + ".log"
log = open(logfile, "w")
sys.stderr.write("Input: %s\n"% data_file)
sys.stderr.write("Output: %s\n"% out_file)
sys.stderr.write("Logfile: %s\n"% logfile)
genes = get_genes(g)
num_not_found = 0
out.write(next(data))
for line in data:
line = line.strip('\n').split('\t')
idname = line[0]
#print line
#print idname
if idname not in genes:
log.write(idname + " NOT in genes\n")
num_not_found += 1
else:
idname = genes[idname][2:-2]
line[0] = idname
line = "\t".join(line)
out.write(line + "\n")
sys.stderr.write("Number of transcripts not found in RefSeq = %d\n"% num_not_found)
# close files
data.close()
g.close()
out.close()
if __name__ == '__main__':
# python script.py myData.txt gene_list.txt gene_intersection.txt
data = sys.argv[1]
genes = sys.argv[2]
out = sys.argv[3]
get_gene_intersection(data, genes, out)
| [
"[email protected]"
] | |
89af33c9dbbe0218a856138862c6a6083250c2ff | c96901e702b0c5f84170f95ed28263528a590e99 | /trials/trial_25_seaborn_heatmap.py | 3a115563ffc9b2f2f86649deb3e2621612ade7df | [] | no_license | CovertLab/SingleCellSequencing | d31c1898f07707e524bff24e02448f3b9798476d | 244dbe0757ffde813d683fa2f0fa68d125735685 | refs/heads/master | 2021-06-07T04:27:25.723887 | 2016-09-27T22:17:36 | 2016-09-27T22:17:36 | 27,788,049 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 13,537 | py |
"""
Analysis!
Cluster the time traces and then compare the gene expression for each cluster
"""
"""
Import python packages
"""
import HTSeq
import time
import collections
import itertools
import os
import subprocess
import collections
import datetime
import yaml
import fnmatch
import shlex
import numpy
import scipy
import scipy.io as sio
import pyensembl
import h5py
import pandas as pd
import numpy as np
import matplotlib as mpl
import cPickle as pickle
import matplotlib.pyplot as plt
import matplotlib as mpl
import scipy.cluster.hierarchy as sch
import rpy2
import rpy2.robjects.numpy2ri
import seaborn as sns
from rpy2.robjects.packages import importr
rpy2.robjects.numpy2ri.activate()
import rpy2.robjects as ro
from rpy2.robjects import pandas2ri
from rpy2.robjects import r
from rpy2 import robjects as ro
from dba import dba
from dba import align_to
from rpy2.robjects.vectors import DataFrame as RDataFrame
from rpy2 import rinterface
from rpy2.robjects import conversion
@conversion.py2ro.register(pd.DataFrame)
def py2ro_pandasdataframe(obj):
ri_dataf = conversion.py2ri(obj)
# cast down to an R list (goes through a different code path
# in the DataFrame constructor, avoiding `str(k)`)
ri_list = rinterface.SexpVector(ri_dataf)
return RDataFrame(ri_list)
def zero_crossing(data, offset):
new_data = data - offset
zc = np.where(np.diff(np.signbit(new_data)))[0]
return zc[0]
mpl.use("Agg")
mpl.rcParams['pdf.fonttype'] = 42
# mpl.style.use('ggplot')
R = rpy2.robjects.r
DTW = importr('dtw')
DTWclust = importr('dtwclust')
scde = importr("scde")
# Load data sets in R
r("""load("/scratch/PI/mcovert/dvanva/sequencing/all_cells_scde_fit_linear.RData")""")
r("""load("/scratch/PI/mcovert/dvanva/sequencing/counts_data.RData")""")
# Load pickle file with cell objects
direc = '/scratch/PI/mcovert/dvanva/sequencing/'
all_cell_file = 'all_cells_qc_complete.pkl'
all_cells_total = pickle.load(open(os.path.join(direc,all_cell_file)))
# Determine which genes to look at
# list_of_genes = ["Nfkbia", "Nfkbie", "Nfkbiz", "Nlrp3", "Tnfaip2", "Tnfaip3"]
# list_of_genes = ["Cxcl2", "Cxcl3", "Cxcl10", "Ccl3", "Ccl4", "Ccl5", "Ccl20", "Tnf", "Tnfsf9", "Il1a", "Il1b", "Il1f6", "Il6", "Il27", "Il1f9", "Lif", "Csf3"]
# list_of_genes = ["Hmox1", "Prdx1", "Hdc", "Ptgs2", "Irg1"]
# list_of_genes = ["Plaur", "Sqstm1", "Clec4e", "Sdc4", "Procr", "Slpi", "Plk2", "Saa3", "Slc7a11", "Cish", "Gp49a", "Hcar2", "Gpr84", "Malt1"]
# list_of_genes = ['Dnmt3b', 'Tecta', 'Tm6sf2', 'Bricd5', 'Prdm12', 'Prdm13', 'Adora2a', 'Ccdc162', 'Gm5283', 'Gm11400', 'Olfr536', 'Gm13145', 'Gm13333', 'Zfp661', 'Angptl3', 'Sipa1l3', 'Scn1a', 'Sprr2d', 'Il17rc', 'Zglp1', 'Akr1cl', 'Map1a', 'Trim14', 'Adgrg6', 'Gm13991', 'Dhrs11', 'Gm21834', 'Iqca', 'Gm2007', 'Slc39a8', 'Gng7', 'AL663030.1', 'Nphp4', 'Nod1', 'Emc9', 'Akr1b7', 'Il33', 'Mmp14', 'Zfyve1', 'Cetn4', '2610305D13Rik', 'Mettl25', 'Ric8b', 'Mterf2', 'Zfp850', 'Clec4a4', 'Saa3', 'Hist1h4n', 'Gm11007', 'Cntrob', 'Atp7b', 'Mtl5', '1700061G19Rik', 'Coro2b', '1700030J22Rik', 'Gm8898', 'Tmem86b', 'Car9', 'Gm5157', 'Gm15539', 'Arhgef18', 'Slc13a3', 'Dclk1', 'Ager', 'Actr3b', 'Zfp41', 'Fzd8', '4930524J08Rik', 'Zic5', 'Trem1', 'Ppp1r32', 'Stk36', 'Gnao1', 'Tmem239', 'Polm', 'Fgf21', 'Gprasp2', 'Tesk1', 'Athl1', 'Kptn']
# list_of_genes = ['Mmp3', 'Ccl5', 'Gpr137c', 'Efna5', 'Tiam1', 'D2hgdh', 'Nod2', 'Gm14440', 'Pla2r1', 'Serpinb9g', 'Hic2', 'Cdkl4', 'Slc18b1', 'H2-M2', 'Klhdc1', 'Iqcb1', 'Sh3bp2', 'Ifit3', 'Cmpk2', 'Adamts10', 'Sirt5', 'Plekhg2', 'Cxcl10', 'Gm13051', 'Tppp3', 'Krt24', 'Lamb3', 'Serpind1', 'Pars2', 'Spopl', 'Rsad2', 'Tnfsf4', 'Gm12728', 'Siglece', '4930432K21Rik', 'Vmn1r32', 'Fbxw10', 'Ngb', 'Bdkrb1', 'B3galt2']
list_of_genes = ["Ccl3"]
"""
Analyze all the time points
"""
times_to_analyze = [0, 75, 150, 300]
cluster_list = {}
cluster_name_dict = {'0':{}, '75':{}, '150':{}, '300':{}}
for time_point in times_to_analyze:
print "Analyzing " + str(time_point) + " minute time point"
all_cells = []
cell_names = []
longest_time = 0
number_of_cells = 0
for cell in all_cells_total:
if cell.time_point == time_point and cell.condition == 'Stim':
number_of_cells += 1
longest_time = np.amax([longest_time, cell.NFkB_dynamics.shape[0]])
all_cells += [cell]
cell_names += [cell.id]
dynamics_matrix = np.zeros((number_of_cells,longest_time), dtype = 'float32')
"""
Fill up the dynamics heat map matrix
"""
cell_counter = 0
for cell in all_cells:
dynam = cell.NFkB_dynamics
dynamics_matrix[cell_counter,0:dynam.shape[0]] = dynam
cell_counter += 1
"""
Perform hierarchical clustering of the dynamics
"""
distance_matrix_dynamics = np.zeros((number_of_cells, number_of_cells))
if time_point != 0:
dynamics_load = np.load('/home/dvanva/SingleCellSequencing/' + str(time_point)+'_dynamics_distance_matrix_kshape.npz')
distance_matrix_dynamics = dynamics_load["distance_matrix"]
Y_dynamics = sch.linkage(distance_matrix_dynamics, method = 'ward')
ind_dynamics = sch.fcluster(Y_dynamics,0.5*np.amax(Y_dynamics[:,2]),'distance')
if time_point == 0:
cluster_list[str(time_point)] = np.arange(1,2)
else:
cluster_list[str(time_point)] = np.arange(np.amin(ind_dynamics), np.amax(ind_dynamics)+1)
if time_point == 0:
for j in xrange(number_of_cells):
all_cells[j].clusterID = 1
else:
for j in xrange(number_of_cells):
all_cells[j].clusterID = ind_dynamics[j]
cluster_dict = {}
for cell in all_cells:
cluster_dict[cell.id] = str(cell.clusterID)
for cluster in cluster_list[str(time_point)]:
cluster_name_dict[str(time_point)][str(cluster)] = []
for cell in all_cells:
if cell.clusterID == cluster:
cluster_name_dict[str(time_point)][str(cluster)] += [cell.id]
"""
Compute posterior FPM distribution for a given gene
"""
plt.clf()
# fig, axes = plt.subplots(len(list_of_genes)/4,4, figsize = (60,60))
counter = 0
for gene in list_of_genes:
print gene
cluster_1_mean = []
cluster_2_mean = []
cluster_3_mean = []
cluster_1_low = []
cluster_2_low = []
cluster_3_low = []
cluster_1_high = []
cluster_2_high = []
cluster_3_high = []
for time_point in times_to_analyze:
gene_name = """'""" + gene + """'"""
r("o.prior = scde.expression.prior(models = o.ifm, counts = counts_data_int, length.out = 400, max.value = 10, show.plot = FALSE )")
r("""gene_counts = counts_data_int[c(""" + gene_name + ""","mt-Atp8"),]""")
ratio_list = []
post_list = []
for cluster in cluster_list[str(time_point)]:
if time_point == 0:
list_of_cells_r = ro.vectors.StrVector(cluster_name_dict[str(time_point)][str(cluster)])
r("list_of_cells = " + list_of_cells_r.r_repr())
r("""joint_posterior = scde.posteriors(models = o.ifm[list_of_cells,], gene_counts, o.prior, n.cores = 4)""")
r("prior = o.prior")
r("jp_0 = joint_posterior[" + gene_name + ",]")
r("jp_0 = t(jp_0)")
r("ratio = scde:::calculate.ratio.posterior(jp_0, jp_0, prior = o.prior, n.cores = 2)")
ratios = 10 ** np.float32(pandas2ri.ri2py(r("colnames(ratio)")))
post = np.float32(pandas2ri.ri2py(r("ratio")))
ratio_list += [ratios]
post_list += [post]
else:
list_of_cells_r = ro.vectors.StrVector(cluster_name_dict[str(time_point)][str(cluster)])
r("list_of_cells = " + list_of_cells_r.r_repr())
r("""joint_posterior = scde.posteriors(models = o.ifm[list_of_cells,], gene_counts, o.prior, n.cores = 4)""")
r("jp = joint_posterior[" + gene_name + ",]")
r("jp = t(jp)")
r("ratio = scde:::calculate.ratio.posterior(jp, jp_0, o.prior, n.cores = 2)")
ratios = 10 ** np.float32(pandas2ri.ri2py(r("colnames(ratio)")))
post = np.float32(pandas2ri.ri2py(r("ratio")))
ratio_list += [ratios]
post_list += [post]
# Give the clusters the proper order
if time_point == 150:
ratio_list_new = []
post_list_new = []
ratio_list_new += [ratio_list[2]]
ratio_list_new += [ratio_list[0]]
ratio_list_new += [ratio_list[1]]
post_list_new += [post_list[2]]
post_list_new += [post_list[0]]
post_list_new += [post_list[1]]
ratio_list = ratio_list_new
post_list = post_list_new
if time_point == 0:
ratio = ratio_list[0]
post = post_list[0]
ratios = [ratio[np.argmax(post)]]
cumsum = np.cumsum(post)
err_low = [ratio[zero_crossing(cumsum, 0.16)]]
err_high = [ratio[zero_crossing(cumsum, 0.84)]]
cluster_1_mean += ratios
cluster_2_mean += ratios
cluster_3_mean += ratios
cluster_1_low += err_low
cluster_2_low += err_low
cluster_3_low += err_low
cluster_1_high += err_high
cluster_2_high += err_high
cluster_3_high += err_high
if time_point == 75:
ratio= ratio_list[0]
post = post_list[0]
ratios = [ratio[np.argmax(post)]]
cumsum = np.cumsum(post)
err_low = [ratio[zero_crossing(cumsum, 0.16)]]
err_high = [ratio[zero_crossing(cumsum, 0.84)]]
cluster_1_mean += ratios
cluster_3_mean += ratios
cluster_1_low += err_low
cluster_3_low += err_low
cluster_1_high += err_high
cluster_3_high += err_high
ratio = ratio_list[1]
post = post_list[1]
ratios = [ratio[np.argmax(post)]]
cumsum = np.cumsum(post)
err_low = [ratio[zero_crossing(cumsum, 0.16)]]
err_high = [ratio[zero_crossing(cumsum, 0.84)]]
cluster_2_mean += ratios
cluster_2_low += err_low
cluster_2_high += err_high
if time_point == 150:
ratio = ratio_list[0]
post = post_list[0]
ratios = [ratio[np.argmax(post)]]
cumsum = np.cumsum(post)
err_low = [ratio[zero_crossing(cumsum, 0.16)]]
err_high = [ratio[zero_crossing(cumsum, 0.84)]]
cluster_1_mean += ratios
cluster_1_low += err_low
cluster_1_high += err_high
ratio = ratio_list[1]
post = post_list[1]
ratios = [ratio[np.argmax(post)]]
cumsum = np.cumsum(post)
err_low = [ratio[zero_crossing(cumsum, 0.16)]]
err_high = [ratio[zero_crossing(cumsum, 0.84)]]
cluster_2_mean += ratios
cluster_2_low += err_low
cluster_2_high += err_high
ratio = ratio_list[2]
post = post_list[2]
ratios = [ratio[np.argmax(post)]]
cumsum = np.cumsum(post)
err_low = [ratio[zero_crossing(cumsum, 0.16)]]
err_high = [ratio[zero_crossing(cumsum, 0.84)]]
cluster_3_mean += ratios
cluster_3_low += err_low
cluster_3_high += err_high
if time_point == 300:
ratio = ratio_list[0]
post = post_list[0]
ratios = [ratio[np.argmax(post)]]
cumsum = np.cumsum(post)
err_low = [ratio[zero_crossing(cumsum, 0.16)]]
err_high = [ratio[zero_crossing(cumsum, 0.84)]]
cluster_1_mean += ratios
cluster_1_low += err_low
cluster_1_high += err_high
ratio = ratio_list[1]
post = post_list[1]
ratios = [ratio[np.argmax(post)]]
cumsum = np.cumsum(post)
err_low = [ratio[zero_crossing(cumsum, 0.16)]]
err_high = [ratio[zero_crossing(cumsum, 0.84)]]
cluster_2_mean += ratios
cluster_2_low += err_low
cluster_2_high += err_high
ratio = ratio_list[2]
post = post_list[2]
ratios = [ratio[np.argmax(post)]]
cumsum = np.cumsum(post)
err_low = [ratio[zero_crossing(cumsum, 0.16)]]
err_high = [ratio[zero_crossing(cumsum, 0.84)]]
cluster_3_mean += ratios
cluster_3_low += err_low
cluster_3_high += err_high
"""
Plot posteriors
"""
colors = ['g', 'r', 'b']
cluster_1_low = np.array(cluster_1_low)
cluster_2_low = np.array(cluster_2_low)
cluster_3_low = np.array(cluster_3_low)
cluster_1_high = np.array(cluster_1_high)
cluster_2_high = np.array(cluster_2_high)
cluster_3_high = np.array(cluster_3_high)
cluster_1_mean = np.array(cluster_1_mean)
cluster_2_mean = np.array(cluster_2_mean)
cluster_3_mean = np.array(cluster_3_mean)
cluster_means = np.zeros((3,4), dtype = 'float32')
cluster_means[0,:] = cluster_2_mean
cluster_means[1,:] = cluster_3_mean
cluster_means[2,:] = cluster_1_mean
# print [np.abs(cluster_3_low - cluster_3_mean), np.abs(cluster_3_high - cluster_3_mean)]
# max_val = np.amax(np.array([np.amax(cluster_1_high), np.amax(cluster_2_high), np.amax(cluster_3_high)]))
# axes.flatten()[counter].errorbar(times_to_analyze, cluster_1_mean, yerr = [np.abs(cluster_1_low - cluster_1_mean), np.abs(cluster_1_high - cluster_1_mean)], fmt = '-o', color = colors[0], ecolor = colors[0], linewidth = 2, label = 'Cluster 1')
# axes.flatten()[counter].errorbar(times_to_analyze, cluster_2_mean, yerr = [np.abs(cluster_2_low - cluster_2_mean), np.abs(cluster_2_high - cluster_2_mean)], fmt = '-o', color = colors[1], ecolor = colors[1], linewidth = 2, label = 'Cluster 2')
# axes.flatten()[counter].errorbar(times_to_analyze, cluster_3_mean, yerr = [np.abs(cluster_3_low - cluster_3_mean), np.abs(cluster_3_high - cluster_3_mean)], fmt = '-o', color = colors[2], ecolor = colors[2], linewidth = 2, label = 'Cluster 3')
# axes.flatten()[counter].set_xlabel('Time (minutes)', fontsize = 16)
# axes.flatten()[counter].set_ylabel('Fold change', fontsize = 16)
# axes.flatten()[counter].set_title(gene, fontsize = 16)
# axes.flatten()[counter].set_ylim([0,np.ceil(1.05*max_val)])
# axes.flatten()[counter].set_yticks([0,np.ceil(1.05*max_val)])
# axes.flatten()[counter].set_xlim([0, 1.05*np.amax(times_to_analyze)])
# axes.flatten()[counter].set_xticks(times_to_analyze)
ax = sns.heatmap(cluster_means, linewidths = 3, cmap = sns.light_palette("muted purple", input = "xkcd", as_cmap = True), vmin = 1, vmax = np.amax(cluster_means.flatten()), xticklabels = False, yticklabels = [2,3,1])
ax.set_title(gene)
counter += 1
# fig.tight_layout()
file_name = "trial_25_heatmap.pdf"
plt.savefig("plots/" + file_name)
| [
"[email protected]"
] | |
bd36975c81170ebc8231899dace68949b8c3af3b | c4a57dced2f1ed5fd5bac6de620e993a6250ca97 | /huaxin/huaxin_ui/ui_ios_xjb_2_0/setting_trade_password_page.py | ad3b0792d49495208856a117a3e182156aa9fd78 | [] | no_license | wanglili1703/firewill | f1b287b90afddfe4f31ec063ff0bd5802068be4f | 1996f4c01b22b9aec3ae1e243d683af626eb76b8 | refs/heads/master | 2020-05-24T07:51:12.612678 | 2019-05-17T07:38:08 | 2019-05-17T07:38:08 | 187,169,391 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,762 | py | # coding=utf-8
from _common.page_object import PageObject
import huaxin_ui.ui_ios_xjb_2_0.security_center_page
from _common.xjb_decorator import robot_log
FIND_TRADE_PASSWORD = "accId_UIAElement_找回交易密码"
MODIFY_TRADE_PASSWORD = "accId_UIAElement_修改交易密码"
CURRENT_TRADE_PASSWORD = "accId_UIATextField_(tradePwdTextField)"
CURRENT_TRADE_PASSWORD_CONFIRM = "accId_UIAButton_下一步"
SETTING_TRADE_PASSWORD = "accId_UIATextField_(tradePwdTextField)"
SETTING_TRADE_PASSWORD_AGAIN = "accId_UIATextField_(tradePwdTextField)"
SETTING_TRADE_PASSWORD_AGAIN_CONFIRM = "accId_UIAButton_下一步"
ID_FACE = "accId_UIAButton_(imageButton)"
ID_BACK = "xpath_//android.widget.ImageView[@resource-id='com.shhxzq.xjb:id/pickBack']"
FROM_PHONE_PICTURE = "accId_UIAButton_从相册选择"
RECENTLY = "accId_UIATableCell_屏幕快照"
# ID_FACE_PICTURE = "xpathIOS_UIACollectionCell_/AppiumAUT/UIAApplication/UIAWindow/UIACollectionView/UIACollectionCell[contains(@name, '月')]"
ID_FACE_PICTURE = "axis_IOS_月"
ID_FACE_PICTURE_CONFIRM = "axis_IOS_选取"
# ID_BAC_PICTURE = "xpathIOS_UIACollectionCell_/AppiumAUT/UIAApplication/UIAWindow/UIACollectionView/UIACollectionCell[contains(@name, '月')]"
ID_CONFIRM = "accId_UIAButton_确认"
SETTING_TRADE_PASSWORD_CONFIRM = "accId_UIAButton_确定提交"
SETTING_TRADE_PASSWORD_DONE = "accId_UIAButton_确认"
current_page = []
class SettingTradePasswordPage(PageObject):
def __init__(self, web_driver):
super(SettingTradePasswordPage, self).__init__(web_driver)
self.elements_exist(*current_page)
@robot_log
def modify_trade_password(self, trade_password_old, trade_password_new):
self.perform_actions(
MODIFY_TRADE_PASSWORD,
CURRENT_TRADE_PASSWORD, trade_password_old,
# CURRENT_TRADE_PASSWORD_CONFIRM,
SETTING_TRADE_PASSWORD, trade_password_new,
SETTING_TRADE_PASSWORD_AGAIN, trade_password_new,
# SETTING_TRADE_PASSWORD_AGAIN_CONFIRM,
)
# page = huaxin_ui.ui_ios_xjb_2_0.security_center_page.SecurityCenterPage(self.web_driver)
page = self
return page
@robot_log
def find_trade_password(self):
self.perform_actions(
FIND_TRADE_PASSWORD,
ID_FACE,
FROM_PHONE_PICTURE,
RECENTLY,
ID_FACE_PICTURE,
ID_FACE_PICTURE_CONFIRM,
# ID_BACK,
# FROM_PHONE_PICTURE,
# RECENTLY,
# ID_BAC_PICTURE,
ID_CONFIRM,
SETTING_TRADE_PASSWORD_CONFIRM,
SETTING_TRADE_PASSWORD_DONE,
)
page = huaxin_ui.ui_ios_xjb_2_0.security_center_page.SecurityCenterPage(self.web_driver)
return page
| [
"[email protected]"
] | |
e01ad7691ea38c36d638bae31a9495c9d187f087 | cf55e0e3ac8f340b379bb211c9f40a71930a4891 | /src/app_v1/order_paid.py | 8561892863f309ffc3634b0fafdf09563c70ae50 | [] | no_license | jack139/pretty | 666af96fd412c9fba1b4180dc14b2dcb77ae677d | 5641341d23191cd3dcc050524cf862c5e1dde2c7 | refs/heads/master | 2021-01-19T23:02:25.805841 | 2017-08-24T07:19:56 | 2017-08-24T07:19:56 | 101,262,255 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,894 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import web
import time, json
from config import setting
import app_helper
db = setting.db_web
# 支付完成
url = ('/app/v1/order_paid')
class handler:
@app_helper.check_sign(['app_id','dev_id','ver_code','tick','session','order_trade_id','data'])
def POST(self, version='v1'):
web.header('Content-Type', 'application/json')
param = web.input(app_id='', dev_id='', ver_code='', session='', order_trade_id='', data='', tick='')
if '' in (param.app_id, param.dev_id, param.ver_code, param.order_trade_id, param.session, param.tick):
return json.dumps({'ret' : -2, 'msg' : '参数错误'})
# 检查session登录
uname = app_helper.app_logged(param.session)
if uname is None:
return json.dumps({'ret' : -4, 'msg' : '无效的session'})
#--------------------------------------------------
# 修改充值订单状态
r2 = db.order_recharge.find_one_and_update(
{'recharge_id' : param.order_trade_id}, # 实充值订单号
{
'$set' : {'status':'PREPAY'},
'$push' : {'order_paid_data':param.data},
},
)
if r2 is None:
return json.dumps({'ret' : -3, 'msg' : '未找到订单'})
# 如果是IAP订单,使用data数据检查支付情况,backrun异步检查
if r2['pay_type']=='iap':
if r2['status']=='DUE': # 只有DUE才推,防止重复支付 2017-06-21, gt
app_helper.event_push_notify('iap', param.data, param.order_trade_id)
else:
print 'Error: 可疑重复支付', param.order_trade_id
ret_data = {
"order_trade_id" : param.order_trade_id,
"due" : r2['due'], # 应付金额,单位 分
"paid" : r2['due'], # 实付金额
"status" : "PENDING", # 订单状态:PAID/PENDING 已支付/待支付
}
# 返回
return json.dumps({
'ret' : 0,
'data' : ret_data,
})
'''
IAP 校验结果
{
u'status': 0,
u'environment': u'Sandbox',
u'receipt': {
u'download_id': 0,
u'adam_id': 0,
u'request_date': u'2017-06-19 03:19:56 Etc/GMT',
u'app_item_id': 0,
u'original_purchase_date_pst': u'2013-08-01 00:00:00 America/Los_Angeles',
u'version_external_identifier': 0,
u'receipt_creation_date': u'2017-06-17 07:27:09 Etc/GMT',
u'in_app': [
{
u'is_trial_period': u'false',
u'purchase_date_pst': u'2017-06-17 00:27:09 America/Los_Angeles',
u'product_id': u'com.006.pay',
u'original_transaction_id': u'1000000307992143',
u'original_purchase_date_pst': u'2017-06-17 00:27:09 America/Los_Angeles',
u'original_purchase_date': u'2017-06-17 07:27:09 Etc/GMT',
u'original_purchase_date_ms': u'1497684429000',
u'purchase_date': u'2017-06-17 07:27:09 Etc/GMT',
u'purchase_date_ms': u'1497684429000',
u'transaction_id': u'1000000307992143',
u'quantity': u'1'
}
],
u'original_purchase_date_ms': u'1375340400000',
u'original_application_version': u'1.0',
u'original_purchase_date': u'2013-08-01 07:00:00 Etc/GMT',
u'request_date_ms': u'1497842396474',
u'bundle_id': u'com.nuoyin.app',
u'receipt_creation_date_pst': u'2017-06-17 00:27:09 America/Los_Angeles',
u'application_version': u'1.0',
u'request_date_pst': u'2017-06-18 20:19:56 America/Los_Angeles',
u'receipt_creation_date_ms': u'1497684429000',
u'receipt_type': u'ProductionSandbox'
}
}
''' | [
"[email protected]"
] | |
8eb81c367c3e97cfc7fc8836fe6fe26710691f9e | 9f2f386a692a6ddeb7670812d1395a0b0009dad9 | /python/paddle/fluid/tests/unittests/test_tensor_uva.py | 4af04b8f6d41e2801fb1bba280f648c81bd3c2ae | [
"Apache-2.0"
] | permissive | sandyhouse/Paddle | 2f866bf1993a036564986e5140e69e77674b8ff5 | 86e0b07fe7ee6442ccda0aa234bd690a3be2cffa | refs/heads/develop | 2023-08-16T22:59:28.165742 | 2022-06-03T05:23:39 | 2022-06-03T05:23:39 | 181,423,712 | 0 | 7 | Apache-2.0 | 2022-08-15T08:46:04 | 2019-04-15T06:15:22 | C++ | UTF-8 | Python | false | false | 2,195 | py | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import unittest
import numpy as np
from paddle.fluid import core
from paddle.fluid.framework import _test_eager_guard, _in_legacy_dygraph
class TestTensorCopyFrom(unittest.TestCase):
def func_main(self):
if paddle.fluid.core.is_compiled_with_cuda():
place = paddle.CPUPlace()
np_value = np.random.random(size=[10, 30]).astype('float32')
tensor = paddle.to_tensor(np_value, place=place)
tensor._uva()
self.assertTrue(tensor.place.is_gpu_place())
def test_main(self):
with _test_eager_guard():
self.func_main()
self.func_main()
class TestUVATensorFromNumpy(unittest.TestCase):
def func_uva_tensor_creation(self):
if paddle.fluid.core.is_compiled_with_cuda():
dtype_list = [
"int32", "int64", "float32", "float64", "float16", "int8",
"int16", "bool"
]
for dtype in dtype_list:
data = np.random.randint(10, size=[4, 5]).astype(dtype)
if _in_legacy_dygraph():
tensor = paddle.fluid.core.to_uva_tensor(data, 0)
else:
tensor = core.eager.to_uva_tensor(data, 0)
self.assertTrue(tensor.place.is_gpu_place())
self.assertTrue(np.allclose(tensor.numpy(), data))
def test_uva_tensor_creation(self):
with _test_eager_guard():
self.func_uva_tensor_creation()
self.func_uva_tensor_creation()
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
415cedb3d3fa0bda509665ff0b6bda231b44e591 | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/infra/rsequipmentflashconfigpol.py | 5ba1c06c67cbdddd0e6be74145506f655f54e2e0 | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,367 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class RsEquipmentFlashConfigPol(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = NamedSourceRelationMeta("cobra.model.infra.RsEquipmentFlashConfigPol", "cobra.model.equipment.FlashConfigPol")
meta.targetNameProps["name"] = "tnEquipmentFlashConfigPolName"
meta.cardinality = SourceRelationMeta.N_TO_ONE
meta.moClassName = "infraRsEquipmentFlashConfigPol"
meta.rnFormat = "rsequipmentFlashConfigPol"
meta.category = MoCategory.RELATIONSHIP_TO_LOCAL
meta.label = "Flash Configuration Policy"
meta.writeAccessMask = 0x80100000000001
meta.readAccessMask = 0x80100000000001
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.tag.Tag")
meta.childClasses.add("cobra.model.fault.Counts")
meta.childClasses.add("cobra.model.health.Inst")
meta.childClasses.add("cobra.model.fault.Inst")
meta.childClasses.add("cobra.model.aaa.RbacAnnotation")
meta.childClasses.add("cobra.model.tag.Annotation")
meta.childNamesAndRnPrefix.append(("cobra.model.tag.Annotation", "annotationKey-"))
meta.childNamesAndRnPrefix.append(("cobra.model.aaa.RbacAnnotation", "rbacDom-"))
meta.childNamesAndRnPrefix.append(("cobra.model.tag.Tag", "tagKey-"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Counts", "fltCnts"))
meta.childNamesAndRnPrefix.append(("cobra.model.health.Inst", "health"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Inst", "fault-"))
meta.parentClasses.add("cobra.model.infra.AccNodePGrp")
meta.superClasses.add("cobra.model.reln.Inst")
meta.superClasses.add("cobra.model.reln.To")
meta.superClasses.add("cobra.model.pol.NToRef")
meta.rnPrefixes = [
('rsequipmentFlashConfigPol', False),
]
prop = PropMeta("str", "annotation", "annotation", 53109, PropCategory.REGULAR)
prop.label = "Annotation. Suggested format orchestrator:value"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("annotation", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "extMngdBy", "extMngdBy", 53110, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "undefined"
prop._addConstant("msc", "msc", 1)
prop._addConstant("undefined", "undefined", 0)
meta.props.add("extMngdBy", prop)
prop = PropMeta("str", "forceResolve", "forceResolve", 107, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = True
prop.defaultValueStr = "yes"
prop._addConstant("no", None, False)
prop._addConstant("yes", None, True)
meta.props.add("forceResolve", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "monPolDn", "monPolDn", 53108, PropCategory.REGULAR)
prop.label = "Monitoring policy attached to this observable object"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("monPolDn", prop)
prop = PropMeta("str", "rType", "rType", 106, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 1
prop.defaultValueStr = "mo"
prop._addConstant("local", "local", 3)
prop._addConstant("mo", "mo", 1)
prop._addConstant("service", "service", 2)
meta.props.add("rType", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "state", "state", 103, PropCategory.REGULAR)
prop.label = "State"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "unformed"
prop._addConstant("cardinality-violation", "cardinality-violation", 5)
prop._addConstant("formed", "formed", 1)
prop._addConstant("invalid-target", "invalid-target", 4)
prop._addConstant("missing-target", "missing-target", 2)
prop._addConstant("unformed", "unformed", 0)
meta.props.add("state", prop)
prop = PropMeta("str", "stateQual", "stateQual", 104, PropCategory.REGULAR)
prop.label = "State Qualifier"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "none"
prop._addConstant("default-target", "default-target", 2)
prop._addConstant("mismatch-target", "mismatch-target", 1)
prop._addConstant("none", "none", 0)
meta.props.add("stateQual", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "tCl", "tCl", 53105, PropCategory.REGULAR)
prop.label = "Target-class"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 15570
prop.defaultValueStr = "equipmentFlashConfigPol"
prop._addConstant("equipmentFlashConfigPol", None, 15570)
prop._addConstant("unspecified", "unspecified", 0)
meta.props.add("tCl", prop)
prop = PropMeta("str", "tContextDn", "tContextDn", 4990, PropCategory.REGULAR)
prop.label = "Target-context"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("tContextDn", prop)
prop = PropMeta("str", "tDn", "tDn", 100, PropCategory.REGULAR)
prop.label = "Target-dn"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("tDn", prop)
prop = PropMeta("str", "tRn", "tRn", 4989, PropCategory.REGULAR)
prop.label = "Target-rn"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("tRn", prop)
prop = PropMeta("str", "tType", "tType", 4988, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "name"
prop._addConstant("all", "all", 2)
prop._addConstant("mo", "mo", 1)
prop._addConstant("name", "name", 0)
meta.props.add("tType", prop)
prop = PropMeta("str", "tnEquipmentFlashConfigPolName", "tnEquipmentFlashConfigPolName", 53104, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("tnEquipmentFlashConfigPolName", prop)
prop = PropMeta("str", "uid", "uid", 8, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("uid", prop)
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
a3c1db4dbe9cbb4c1e4b32666a85445f6a052213 | 6b597d4968b7fe790743b8b8c005f10c0d7dca14 | /aliyun-python-sdk-sas/aliyunsdksas/request/v20181203/DescribeWebLockConfigListRequest.py | a488efdb3c8a9a2cd17583b1e7a1b7a27fc24020 | [
"Apache-2.0"
] | permissive | tyh001/aliyun-openapi-python-sdk | b325348df187755a7afca059402e6936d3f6489b | 3de91944d67a893422a1d1bd0fc124ff457a33c1 | refs/heads/master | 2023-04-16T15:09:56.132243 | 2021-04-25T03:31:25 | 2021-04-25T03:31:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,454 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DescribeWebLockConfigListRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Sas', '2018-12-03', 'DescribeWebLockConfigList','sas')
self.set_method('POST')
def get_Uuid(self):
return self.get_query_params().get('Uuid')
def set_Uuid(self,Uuid):
self.add_query_param('Uuid',Uuid)
def get_SourceIp(self):
return self.get_query_params().get('SourceIp')
def set_SourceIp(self,SourceIp):
self.add_query_param('SourceIp',SourceIp)
def get_Lang(self):
return self.get_query_params().get('Lang')
def set_Lang(self,Lang):
self.add_query_param('Lang',Lang) | [
"[email protected]"
] | |
f47f0e566dc3c61d9cdc0fb73ae153e1f08d47fc | 6c00499dfe1501294ac56b0d1607fb942aafc2ee | /eventregistry/tests/TestERQueryArticle.py | bdd55097b88d584354831bd931f97c34064372e3 | [
"MIT"
] | permissive | EventRegistry/event-registry-python | dd692729cb5c505e421d4b771804e712e5b6442b | bf3ce144fa61cc195840591bae5ca88b31ca9139 | refs/heads/master | 2023-07-06T11:04:41.033864 | 2023-06-23T08:40:31 | 2023-06-23T08:40:31 | 40,995,963 | 176 | 48 | MIT | 2020-10-21T09:17:06 | 2015-08-18T20:29:23 | Python | UTF-8 | Python | false | false | 2,330 | py | import unittest
from eventregistry import *
from eventregistry.tests.DataValidator import DataValidator
class TestQueryArticle(DataValidator):
def createQuery(self):
q = QueryArticles(conceptUri = self.er.getConceptUri("Obama"))
q.setRequestedResult(RequestArticlesUriWgtList(count = 100))
res = self.er.execQuery(q)
q = QueryArticle([uri for uri in EventRegistry.getUriFromUriWgt(res["uriWgtList"]["results"]) if uri.endswith("TEMP") == False][:10])
return q
def testArticleList(self):
q = self.createQuery()
q.setRequestedResult(RequestArticleInfo(returnInfo = self.returnInfo))
res = self.er.execQuery(q)
self.assertEqual(len(res), 10, "Expected to get a list of 10 articles")
for article in list(res.values()):
self.ensureValidArticle(article["info"], "articleList")
uris = [article.get("info").get("uri") for article in list(res.values())]
urls = [article.get("info").get("url") for article in list(res.values())]
uniqueUrls = list(set(urls))
mapper = ArticleMapper(self.er)
mappedUris = []
for url in uniqueUrls:
# getArticleUri returns a list, so we extend the list of items
urls = mapper.getArticleUri(url)
if urls:
mappedUris.append(urls)
if mappedUris == []:
return
q = QueryArticle.queryByUri(mappedUris)
q.setRequestedResult(RequestArticleInfo(returnInfo = self.returnInfo))
res = self.er.execQuery(q)
for article in list(res.values()):
# it's possible that the article was removed from ER
if "error" in article:
continue
self.ensureValidArticle(article["info"], "articleList")
q = QueryArticle.queryByUri(uris)
q.setRequestedResult(RequestArticleInfo(returnInfo = self.returnInfo))
res = self.er.execQuery(q)
self.assertEqual(len(res), 10, "Expected to get a list of 10 articles when searching by uris")
for article in list(res.values()):
self.ensureValidArticle(article["info"], "articleList")
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(TestQueryArticle)
unittest.TextTestRunner(verbosity=3).run(suite)
| [
"[email protected]"
] | |
0e6eb8dd980dea3dca19b288358cbfacba99fa4e | 78d35bb7876a3460d4398e1cb3554b06e36c720a | /sdk/formrecognizer/azure-ai-formrecognizer/samples/v3.2-beta/async_samples/sample_manage_models_async.py | e4a2f1a079c6b24be6a7fd47b513e0d7ea6e2b56 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | catchsrinivas/azure-sdk-for-python | e35f59b60318a31b3c940a7a3a07b61b28118aa5 | 596227a7738a5342274486e30489239d539b11d1 | refs/heads/main | 2023-08-27T09:08:07.986249 | 2021-11-11T11:13:35 | 2021-11-11T11:13:35 | 427,045,896 | 0 | 0 | MIT | 2021-11-11T15:14:31 | 2021-11-11T15:14:31 | null | UTF-8 | Python | false | false | 3,366 | py | # coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: sample_manage_models_async.py
DESCRIPTION:
This sample demonstrates how to manage the models on your account. To learn
how to build a model, look at sample_build_model_async.py.
USAGE:
python sample_manage_models_async.py
Set the environment variables with your own values before running the sample:
1) AZURE_FORM_RECOGNIZER_ENDPOINT - the endpoint to your Cognitive Services resource.
2) AZURE_FORM_RECOGNIZER_KEY - your Form Recognizer API key
3) CONTAINER_SAS_URL - The shared access signature (SAS) Url of your Azure Blob Storage container
"""
import os
import asyncio
async def sample_manage_models_async():
from azure.core.credentials import AzureKeyCredential
from azure.core.exceptions import ResourceNotFoundError
from azure.ai.formrecognizer.aio import DocumentModelAdministrationClient
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
container_sas_url = os.environ["CONTAINER_SAS_URL"]
# [START get_account_info_async]
document_model_admin_client = DocumentModelAdministrationClient(endpoint=endpoint, credential=AzureKeyCredential(key))
async with document_model_admin_client:
account_info = await document_model_admin_client.get_account_info()
print("Our account has {} custom models, and we can have at most {} custom models\n".format(
account_info.model_count, account_info.model_limit
))
# [END get_account_info_async]
# Next, we get a paged list of all of our custom models
# [START list_models_async]
models = document_model_admin_client.list_models()
print("We have the following 'ready' models with IDs and descriptions:")
async for model in models:
print("{} | {}".format(model.model_id, model.description))
# [END list_models_async]
# let's build a model to use for this sample
poller = await document_model_admin_client.begin_build_model(container_sas_url, description="model for sample")
model = await poller.result()
# [START get_model_async]
my_model = await document_model_admin_client.get_model(model_id=model.model_id)
print("\nModel ID: {}".format(my_model.model_id))
print("Description: {}".format(my_model.description))
print("Model created on: {}".format(my_model.created_on))
# [END get_model_async]
# Finally, we will delete this model by ID
# [START delete_model_async]
await document_model_admin_client.delete_model(model_id=my_model.model_id)
try:
await document_model_admin_client.get_model(model_id=my_model.model_id)
except ResourceNotFoundError:
print("Successfully deleted model with ID {}".format(my_model.model_id))
# [END delete_model_async]
async def main():
await sample_manage_models_async()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| [
"[email protected]"
] | |
a182ce756249ca3cacb5023e1cfa2f9c7184bdc4 | d47436cad4e2b08ee0e58a157a0860f496037df7 | /sApp/migrations/0001_initial.py | 8aae841cbabb0695048dac5b91cf44de9cebbf73 | [] | no_license | rushabhgediya38/Django-BeautifulSoup | ff8a9d5514bf0e3b7ecc2e26dadde605c39c9b71 | 11029364c161d6a63f74b17a5aa04b45a43b3140 | refs/heads/master | 2023-05-06T10:03:56.474083 | 2021-05-25T13:49:13 | 2021-05-25T13:49:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 707 | py | # Generated by Django 3.2.3 on 2021-05-24 11:27
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='sData',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=400)),
('author', models.CharField(max_length=400)),
('created', models.DateTimeField()),
('modified', models.DateTimeField()),
('SITE_URL', models.URLField()),
],
),
]
| [
"[email protected]"
] | |
86e188f2e4988ed3ff9dbc179788f4eaa24c9c97 | f1b86f057b7982163055e36cc97ff1532d3afb46 | /encode_decode/auto_encode.py | e6c13f11080263f3e9ff30b6dd02fad224ede17e | [] | no_license | bigboyooo/ner-bilstm-dnn | 0a29b2268ce48960ee222863673d7fd5785ff54b | 7afd569dd59706e16007eeb50f2bc2049bc33c80 | refs/heads/master | 2020-06-01T22:29:28.019022 | 2018-06-21T07:23:56 | 2018-06-21T07:23:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,158 | py | #coding="utf8"
import os,sys,re,traceback,json
import pandas as pd
import numpy as np
import tensorflow as tf
#import sklearn.preprocessing as prep
#from tensorflow.examples.tutorials.mnist import input_data
import sys
def _path(filepath):
return os.path.join(CURPATH, filepath)
def xavier_init(fan_in, fan_out, constant=1):
low = -constant * np.sqrt(6.0 /(fan_in +fan_out))
high = constant * np.sqrt(6.0 /(fan_in +fan_out))
return tf.random_uniform((fan_in, fan_out), minval = low, maxval = high, dtype = tf.float32)
class AdditiveGaussianNoiseAutoEncoder(object):
def __init__(self, n_input, n_hidden, transfer_function=tf.nn.softplus, optimizer=tf.train.AdamOptimizer(), scale=0.1, sess = ""):
print('\n> cls AdditiveGaussianNoiseAutoEncoder instance')
"""
!attention this para is used on train
"""
self.n_samples = 200000
self.training_epochs = 100
self.batch_size = 32
self.n_input = 100
self.display_step = 1
"""
!attention this para is used on model
"""
self.n_input = n_input
self.n_hidden = n_hidden
self.transfer = transfer_function
self.scale = tf.placeholder(tf.float32)
self.training_scale = scale
network_weights = self._initialize_weights()
self.weights = network_weights
self.x = tf.placeholder(tf.float32, [None, self.n_input])
self.hidden = self.transfer(
tf.add(
tf.matmul(
self.x + scale * tf.random_normal((self.n_input,)),
self.weights['w1']),
self.weights['b1']
)
)
self.reconstruction = tf.add(
tf.matmul(self.hidden, self.weights['w2']),
self.weights['b2']
)
self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0)) # 3 steps sub pow sum
self.optimizer = optimizer.minimize(self.cost)
init = tf.global_variables_initializer()
self.sess = sess
#self.sess = tf.Session()
#self.sess.run(init)
#self.save_graph_meta()
def save_graph_meta(self):
tf.add_to_collection('x', self.x)
tf.add_to_collection('scale', self.scale)
tf.add_to_collection('cost', self.cost)
tf.add_to_collection('reconstruction', self.reconstruction)
def _initialize_weights(self):
all_weights = dict()
all_weights['w1'] = tf.Variable(xavier_init(self.n_input, self.n_hidden))
all_weights['b1'] = tf.Variable(tf.zeros([self.n_hidden], dtype = tf.float32))
all_weights['w2'] = tf.Variable(tf.zeros([self.n_hidden, self.n_input], dtype=tf.float32))
all_weights['b2'] = tf.Variable(tf.zeros([self.n_input], dtype=tf.float32))
return all_weights
def partial_fit(self,X):
cost,opt = self.sess.run((self.cost , self.optimizer), feed_dict={self.x:X, self.scale: self.training_scale})
return cost
def partial_predict(self,X):
_y = self.sess.run((self.reconstruction), feed_dict={self.x:X, self.scale: self.training_scale})
return _y
def calc_total_cost(self,X):
return self.sess.run(self.cost,feed_dict = {self.x:X, self.scale: self.training_scale})
def transform(self, X):
return self.sess.run(self.hidden, feed_dict={self.x:X, self.scale: self.training_scale})
def generate(self, hidden=None):
if hidden is NOne:
hidden = np.random.normal(size=self.weights["b1"])
return self.sess.run(self.reconstruction, feed_dict={self.hidden:hidden})
def reconstruct(self, X):
return self.sess.run(self.reconstrucion, feed_dict = {sekf.x:X, self.scale:self.training_scale})
def getWeights(self):
return self.sess.run(self.weights['w1'])
def getBiases(self):
return self.sess.run(self.weights['b1'])
#mnist = input_data.read_data_sets('MNIST_dat', one_hot = True)
def standard_scale(X_train, X_test):
preprocessor = prep.StandardScaler().fit(X_train)
X_train = preprocesor.transform(X_train)
X_test= preprocesor.transform(X_test)
return X_train, X_test
def get_random_block_from_data(data, batch_size):
start_indeix = np.random.randint(0, len(data) - batch_size)
return data[start_index:(start_index + batch_size)]
def generat_batch(data_generate, n):
_lst = []
for i in range(n):
_arr = data_generate.__next__()
_lst.append(_arr)
_out = np.array(_lst).reshape(batch_size*n, n_input)
#print(_out.shape)
assert _out.shape == (batch_size*n,n_input)
return _out
def gen_w2v():
w2v = wd2vec()
return w2v
def data_generate(w2v, df):
word2vec, model = w2v.spot_vec()
lines = w2v.load_txt(w2v.txtfilepath, False)
_random_lst = np.random.permutation(len(lines))
_arrlst = []
cnt = 0
for _id in _random_lst:
words = lines[_id].split(" ")
for word in words:
try:
_word_arr = word2vec[word] # renturn one (100,) shape array
_arrlst.append(_word_arr)
except KeyError:
#print("\n> the word", word, " is not in the vocab")
continue
cnt+=1
if cnt % batch_size == 0:
_arr = np.array(_arrlst)
_arrlst = []
cnt = 0
yield _arr
print("\n> all data read finish")
"""
def main():
#--- get data
w2v = gen_w2v()
dg = data_generate(w2v)
print("\n> print the df for test: ")
print(dg.__next__())
print(dg.__next__().shape)
print("\n> now we going to train the model")
X_train = dg
X_test = data_generate(w2v)
autoencoder = AdditiveGaussianNoiseAutoEncoder(n_input = 8,
n_hidden = 32,
transfer_function = tf.nn.softplus,
optimizer = tf.train.AdamOptimizer(learning_rate = 0.001),
scale = 0.01,
)
saver = tf.train.Saver(max_to_keep=1)
model_save_path = os.path.join(CUR_PATH,'data/auto_encode.ckpt') # 模型保存位置
for epoch in range(training_epochs):
print("\n> this is epoch", epoch)
X_train = dg
X_test = data_generate(w2v)
avg_cost = 0
total_batch = int(n_samples // batch_size)
for i in range(total_batch):
if i % 1000 == 1:
print("\n> Epoch:", '%04d' % (epoch +1), "batch ", '%04d' % (i+1))
if i % 5000 == 1:
_y = autoencoder.partial_predict(batch_xs)
print("\n> Epoch:", '%04d' % (epoch +1), "batch ", '%04d' % (i+1), "_y predict:", _y)
batch_xs = generat_batch(X_train, 10)
cost = autoencoder.partial_fit(batch_xs)
avg_cost += cost / n_samples * batch_size
if epoch % display_step == 0:
save_path = saver.save(autoencoder.sess, model_save_path, global_step=(epoch+1))
print("Epoch:", '%04d' % (epoch +1), "cost=", "{:.9f}".format(avg_cost))
print("Total cost: " + str(autoencoder.calc_total_cost(generat_batch(X_test, 60))))
"""
if __name__ == "__main__":
pass
print("\n> autoencoder class")
| [
"[email protected]"
] | |
3c0ecf9821207af1edc05f87c1d8183a22298461 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03597/s115505711.py | 8d17bc3edb536d7cdb0c0d3fde8bd26535f38a0f | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 82 | py | def abc():
n = int(input())
a = int(input())
print(n * n - a)
abc()
| [
"[email protected]"
] | |
0b9f0c6a5191efbdccbdafb1b015451146020a38 | 9cbd523cdedc727f62c887612e8ae2c25c909964 | /tests/lib/steps/check_TID_027.py | 83b46b0a4fa54c98997731f5cfb1487b519482f6 | [] | no_license | louiscklaw/QA_test_scripts | 8a71d0bed99fae3b0dac4cd9414b3e34dcf5beed | 58b73594332053272d8dce2c812c93297259c782 | refs/heads/master | 2023-01-27T15:48:29.477848 | 2020-12-06T10:05:19 | 2020-12-06T10:05:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 598 | py | import random
from pprint import pprint
from config import *
from time import sleep
from assert_check_point import assertCheckPoint
import restaurant_manage.password
def run_check(json_metadata, browser, password_to_bruce):
TEST_ERR_MSG='test failed at TID_027'
assertCheckPoint(browser, 'TID_027_1', TEST_ERR_MSG)
restaurant_mgmt_po = restaurant_manage.password.Main(browser)
restaurant_mgmt_po.inputPassword(password_to_bruce)
restaurant_mgmt_po.tapLogin()
assertCheckPoint(browser, 'TID_027_2_{}'.format(password_to_bruce), TEST_ERR_MSG)
json_metadata['TID_027'] = 'passed'
| [
"[email protected]"
] | |
3c1e3a8b843b126307b8f88b9fbc6404661d686d | e641bd95bff4a447e25235c265a58df8e7e57c84 | /build/lacros/test_runner.py | 40919e21025951ca2975f2309bd63dd9ada087a0 | [
"BSD-3-Clause"
] | permissive | zaourzag/chromium | e50cb6553b4f30e42f452e666885d511f53604da | 2370de33e232b282bd45faa084e5a8660cb396ed | refs/heads/master | 2023-01-02T08:48:14.707555 | 2020-11-13T13:47:30 | 2020-11-13T13:47:30 | 312,600,463 | 0 | 0 | BSD-3-Clause | 2022-12-23T17:01:30 | 2020-11-13T14:39:10 | null | UTF-8 | Python | false | false | 16,453 | py | #!/usr/bin/env vpython
#
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script facilitates running tests for lacros on Linux.
In order to run lacros tests on Linux, please first follow bit.ly/3juQVNJ
to setup build directory with the lacros-chrome-on-linux build configuration,
and corresponding test targets are built successfully.
* Example usages:
./build/lacros/test_runner.py test out/lacros/url_unittests
./build/lacros/test_runner.py test out/lacros/browser_tests
The commands above run url_unittests and browser_tests respecitively, and more
specifically, url_unitests is executed directly while browser_tests is
executed with the latest version of prebuilt ash-chrome, and the behavior is
controlled by |_TARGETS_REQUIRE_ASH_CHROME|, and it's worth noting that the
list is maintained manually, so if you see something is wrong, please upload a
CL to fix it.
./build/lacros/test_runner.py test out/lacros/browser_tests \\
--gtest_filter=BrowserTest.Title
The above command only runs 'BrowserTest.Title', and any argument accepted by
the underlying test binary can be specified in the command.
./build/lacros/test_runner.py test out/lacros/browser_tests \\
--ash-chrome-version=793554
The above command runs tests with a given version of ash-chrome, which is
useful to reproduce test failures, the version corresponds to the commit
position of commits on the master branch, and a list of prebuilt versions can
be found at: gs://ash-chromium-on-linux-prebuilts/x86_64.
./testing/xvfb.py ./build/lacros/test_runner.py test out/lacros/browser_tests
The above command starts ash-chrome with xvfb instead of an X11 window, and
it's useful when running tests without a display attached, such as sshing.
"""
import argparse
import os
import logging
import re
import shutil
import signal
import subprocess
import sys
import tempfile
import time
import zipfile
_SRC_ROOT = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir))
sys.path.append(os.path.join(_SRC_ROOT, 'third_party', 'depot_tools'))
# Base GS URL to store prebuilt ash-chrome.
_GS_URL_BASE = 'gs://ash-chromium-on-linux-prebuilts/x86_64'
# Latest file version.
_GS_URL_LATEST_FILE = _GS_URL_BASE + '/latest/ash-chromium.txt'
# GS path to the zipped ash-chrome build with any given version.
_GS_ASH_CHROME_PATH = 'ash-chromium.zip'
# Directory to cache downloaded ash-chrome versions to avoid re-downloading.
_PREBUILT_ASH_CHROME_DIR = os.path.join(os.path.dirname(__file__),
'prebuilt_ash_chrome')
# Number of seconds to wait for ash-chrome to start.
ASH_CHROME_TIMEOUT_SECONDS = 10
# List of targets that require ash-chrome as a Wayland server in order to run.
_TARGETS_REQUIRE_ASH_CHROME = [
'app_shell_unittests',
'aura_unittests',
'browser_tests',
'components_unittests',
'compositor_unittests',
'content_unittests',
'dbus_unittests',
'extensions_unittests',
'message_center_unittests',
'snapshot_unittests',
'sync_integration_tests',
'unit_tests',
'views_unittests',
'wm_unittests',
# regex patters.
'.*_browsertests',
'.*interactive_ui_tests'
]
def _GetAshChromeDirPath(version):
"""Returns a path to the dir storing the downloaded version of ash-chrome."""
return os.path.join(_PREBUILT_ASH_CHROME_DIR, version)
def _remove_unused_ash_chrome_versions(version_to_skip):
"""Removes unused ash-chrome versions to save disk space.
Currently, when an ash-chrome zip is downloaded and unpacked, the atime/mtime
of the dir and the files are NOW instead of the time when they were built, but
there is no garanteen it will always be the behavior in the future, so avoid
removing the current version just in case.
Args:
version_to_skip (str): the version to skip removing regardless of its age.
"""
days = 7
expiration_duration = 60 * 60 * 24 * days
for f in os.listdir(_PREBUILT_ASH_CHROME_DIR):
if f == version_to_skip:
continue
p = os.path.join(_PREBUILT_ASH_CHROME_DIR, f)
if os.path.isfile(p):
# The prebuilt ash-chrome dir is NOT supposed to contain any files, remove
# them to keep the directory clean.
os.remove(p)
continue
chrome_path = os.path.join(p, 'chrome')
if not os.path.exists(chrome_path):
chrome_path = p
age = time.time() - os.path.getatime(chrome_path)
if age > expiration_duration:
logging.info(
'Removing ash-chrome: "%s" as it hasn\'t been used in the '
'past %d days', p, days)
shutil.rmtree(p)
def _GsutilCopyWithRetry(gs_path, local_name, retry_times=3):
"""Gsutil copy with retry.
Args:
gs_path: The gs path for remote location.
local_name: The local file name.
retry_times: The total try times if the gsutil call fails.
Raises:
RuntimeError: If failed to download the specified version, for example,
if the version is not present on gcs.
"""
import download_from_google_storage
gsutil = download_from_google_storage.Gsutil(
download_from_google_storage.GSUTIL_DEFAULT_PATH)
exit_code = 1
retry = 0
while exit_code and retry < retry_times:
retry += 1
exit_code = gsutil.call('cp', gs_path, local_name)
if exit_code:
raise RuntimeError('Failed to download: "%s"' % gs_path)
def _DownloadAshChromeIfNecessary(version):
"""Download a given version of ash-chrome if not already exists.
Args:
version: A string representing the version, such as "793554".
Raises:
RuntimeError: If failed to download the specified version, for example,
if the version is not present on gcs.
"""
def IsAshChromeDirValid(ash_chrome_dir):
# This function assumes that once 'chrome' is present, other dependencies
# will be present as well, it's not always true, for example, if the test
# runner process gets killed in the middle of unzipping (~2 seconds), but
# it's unlikely for the assumption to break in practice.
return os.path.isdir(ash_chrome_dir) and os.path.isfile(
os.path.join(ash_chrome_dir, 'chrome'))
ash_chrome_dir = _GetAshChromeDirPath(version)
if IsAshChromeDirValid(ash_chrome_dir):
return
shutil.rmtree(ash_chrome_dir, ignore_errors=True)
os.makedirs(ash_chrome_dir)
with tempfile.NamedTemporaryFile() as tmp:
logging.info('Ash-chrome version: %s', version)
gs_path = _GS_URL_BASE + '/' + version + '/' + _GS_ASH_CHROME_PATH
_GsutilCopyWithRetry(gs_path, tmp.name)
# https://bugs.python.org/issue15795. ZipFile doesn't preserve permissions.
# And in order to workaround the issue, this function is created and used
# instead of ZipFile.extractall().
# The solution is copied from:
# https://stackoverflow.com/questions/42326428/zipfile-in-python-file-permission
def ExtractFile(zf, info, extract_dir):
zf.extract(info.filename, path=extract_dir)
perm = info.external_attr >> 16
os.chmod(os.path.join(extract_dir, info.filename), perm)
with zipfile.ZipFile(tmp.name, 'r') as zf:
# Extra all files instead of just 'chrome' binary because 'chrome' needs
# other resources and libraries to run.
for info in zf.infolist():
ExtractFile(zf, info, ash_chrome_dir)
_remove_unused_ash_chrome_versions(version)
def _GetLatestVersionOfAshChrome():
"""Returns the latest version of uploaded ash-chrome."""
with tempfile.NamedTemporaryFile() as tmp:
_GsutilCopyWithRetry(_GS_URL_LATEST_FILE, tmp.name)
with open(tmp.name, 'r') as f:
return f.read().strip()
def _WaitForAshChromeToStart(tmp_xdg_dir, lacros_mojo_socket_file,
is_lacros_chrome_browsertests):
"""Waits for Ash-Chrome to be up and running and returns a boolean indicator.
Determine whether ash-chrome is up and running by checking whether two files
(lock file + socket) have been created in the |XDG_RUNTIME_DIR| and the lacros
mojo socket file has been created if running lacros_chrome_browsertests.
TODO(crbug.com/1107966): Figure out a more reliable hook to determine the
status of ash-chrome, likely through mojo connection.
Args:
tmp_xdg_dir (str): Path to the XDG_RUNTIME_DIR.
lacros_mojo_socket_file (str): Path to the lacros mojo socket file.
is_lacros_chrome_browsertests (bool): is running lacros_chrome_browsertests.
Returns:
A boolean indicating whether Ash-chrome is up and running.
"""
def IsAshChromeReady(tmp_xdg_dir, lacros_mojo_socket_file,
is_lacros_chrome_browsertests):
return (len(os.listdir(tmp_xdg_dir)) >= 2
and (not is_lacros_chrome_browsertests
or os.path.exists(lacros_mojo_socket_file)))
time_counter = 0
while not IsAshChromeReady(tmp_xdg_dir, lacros_mojo_socket_file,
is_lacros_chrome_browsertests):
time.sleep(0.5)
time_counter += 0.5
if time_counter > ASH_CHROME_TIMEOUT_SECONDS:
break
return IsAshChromeReady(tmp_xdg_dir, lacros_mojo_socket_file,
is_lacros_chrome_browsertests)
def _RunTestWithAshChrome(args, forward_args):
"""Runs tests with ash-chrome.
Args:
args (dict): Args for this script.
forward_args (dict): Args to be forwarded to the test command.
"""
if args.ash_chrome_path:
ash_chrome_file = args.ash_chrome_path
else:
ash_chrome_version = (args.ash_chrome_version
or _GetLatestVersionOfAshChrome())
_DownloadAshChromeIfNecessary(ash_chrome_version)
logging.info('Ash-chrome version: %s', ash_chrome_version)
ash_chrome_file = os.path.join(_GetAshChromeDirPath(ash_chrome_version),
'chrome')
try:
# Starts Ash-Chrome.
tmp_xdg_dir_name = tempfile.mkdtemp()
tmp_ash_data_dir_name = tempfile.mkdtemp()
# Please refer to below file for how mojo connection is set up in testing.
# //chrome/browser/chromeos/crosapi/test_mojo_connection_manager.h
lacros_mojo_socket_file = '%s/lacros.sock' % tmp_ash_data_dir_name
lacros_mojo_socket_arg = ('--lacros-mojo-socket-for-testing=%s' %
lacros_mojo_socket_file)
is_lacros_chrome_browsertests = (os.path.basename(
args.command) == 'lacros_chrome_browsertests')
ash_process = None
ash_env = os.environ.copy()
ash_env['XDG_RUNTIME_DIR'] = tmp_xdg_dir_name
ash_cmd = [
ash_chrome_file,
'--user-data-dir=%s' % tmp_ash_data_dir_name,
'--enable-wayland-server',
'--no-startup-window',
]
if is_lacros_chrome_browsertests:
ash_cmd.append(lacros_mojo_socket_arg)
ash_process_has_started = False
total_tries = 3
num_tries = 0
while not ash_process_has_started and num_tries < total_tries:
num_tries += 1
ash_process = subprocess.Popen(ash_cmd, env=ash_env)
ash_process_has_started = _WaitForAshChromeToStart(
tmp_xdg_dir_name, lacros_mojo_socket_file,
is_lacros_chrome_browsertests)
if ash_process_has_started:
break
logging.warning('Starting ash-chrome timed out after %ds',
ASH_CHROME_TIMEOUT_SECONDS)
logging.warning('Printing the output of "ps aux" for debugging:')
subprocess.call(['ps', 'aux'])
if ash_process and ash_process.poll() is None:
ash_process.kill()
if not ash_process_has_started:
raise RuntimeError('Timed out waiting for ash-chrome to start')
# Starts tests.
if is_lacros_chrome_browsertests:
forward_args.append(lacros_mojo_socket_arg)
reason_of_jobs_1 = (
'multiple clients crosapi is not supported yet (crbug.com/1124490), '
'lacros_chrome_browsertests has to run tests serially')
if any('--test-launcher-jobs' in arg for arg in forward_args):
raise RuntimeError(
'Specifying "--test-launcher-jobs" is not allowed because %s. '
'Please remove it and this script will automatically append '
'"--test-launcher-jobs=1"' % reason_of_jobs_1)
# TODO(crbug.com/1124490): Run lacros_chrome_browsertests in parallel once
# the bug is fixed.
logging.warning('Appending "--test-launcher-jobs=1" because %s',
reason_of_jobs_1)
forward_args.append('--test-launcher-jobs=1')
test_env = os.environ.copy()
test_env['EGL_PLATFORM'] = 'surfaceless'
test_env['XDG_RUNTIME_DIR'] = tmp_xdg_dir_name
test_process = subprocess.Popen([args.command] + forward_args, env=test_env)
return test_process.wait()
finally:
if ash_process and ash_process.poll() is None:
ash_process.terminate()
# Allow process to do cleanup and exit gracefully before killing.
time.sleep(0.5)
ash_process.kill()
shutil.rmtree(tmp_xdg_dir_name, ignore_errors=True)
shutil.rmtree(tmp_ash_data_dir_name, ignore_errors=True)
def _RunTestDirectly(args, forward_args):
"""Runs tests by invoking the test command directly.
args (dict): Args for this script.
forward_args (dict): Args to be forwarded to the test command.
"""
try:
p = None
p = subprocess.Popen([args.command] + forward_args)
return p.wait()
finally:
if p and p.poll() is None:
p.terminate()
time.sleep(0.5)
p.kill()
def _HandleSignal(sig, _):
"""Handles received signals to make sure spawned test process are killed.
sig (int): An integer representing the received signal, for example SIGTERM.
"""
logging.warning('Received signal: %d, killing spawned processes', sig)
# Don't do any cleanup here, instead, leave it to the finally blocks.
# Assumption is based on https://docs.python.org/3/library/sys.html#sys.exit:
# cleanup actions specified by finally clauses of try statements are honored.
# https://tldp.org/LDP/abs/html/exitcodes.html:
# Exit code 128+n -> Fatal error signal "n".
sys.exit(128 + sig)
def _RunTest(args, forward_args):
"""Runs tests with given args.
args (dict): Args for this script.
forward_args (dict): Args to be forwarded to the test command.
Raises:
RuntimeError: If the given test binary doesn't exist or the test runner
doesn't know how to run it.
"""
if not os.path.isfile(args.command):
raise RuntimeError('Specified test command: "%s" doesn\'t exist' %
args.command)
# |_TARGETS_REQUIRE_ASH_CHROME| may not always be accurate as it is updated
# with a best effort only, therefore, allow the invoker to override the
# behavior with a specified ash-chrome version, which makes sure that
# automated CI/CQ builders would always work correctly.
requires_ash_chrome = any(
re.match(t, os.path.basename(args.command))
for t in _TARGETS_REQUIRE_ASH_CHROME)
if not requires_ash_chrome and not args.ash_chrome_version:
return _RunTestDirectly(args, forward_args)
return _RunTestWithAshChrome(args, forward_args)
def Main():
for sig in (signal.SIGTERM, signal.SIGINT):
signal.signal(sig, _HandleSignal)
logging.basicConfig(level=logging.INFO)
arg_parser = argparse.ArgumentParser()
arg_parser.usage = __doc__
subparsers = arg_parser.add_subparsers()
test_parser = subparsers.add_parser('test', help='Run tests')
test_parser.set_defaults(func=_RunTest)
test_parser.add_argument(
'command',
help='A single command to invoke the tests, for example: '
'"./url_unittests". Any argument unknown to this test runner script will '
'be forwarded to the command, for example: "--gtest_filter=Suite.Test"')
version_group = test_parser.add_mutually_exclusive_group()
version_group.add_argument(
'--ash-chrome-version',
type=str,
help='Version of an prebuilt ash-chrome to use for testing, for example: '
'"793554", and the version corresponds to the commit position of commits '
'on the main branch. If not specified, will use the latest version '
'available')
version_group.add_argument(
'--ash-chrome-path',
type=str,
help='Path to an locally built ash-chrome to use for testing.')
args = arg_parser.parse_known_args()
return args[0].func(args[0], args[1])
if __name__ == '__main__':
sys.exit(Main())
| [
"[email protected]"
] | |
d68475d087c681885a302ce09839ee0363db00a4 | 0122d6ff2fdab185480ca06ba37e743c8e899e26 | /test/test_api_list_gateway_response.py | b41732009e203a02e3f75b9f21c2ae4ff86e5112 | [] | no_license | jcu-eresearch/pyLorawanServer | c0564a4946627f71b1cdba114fe24c0475059f59 | 277b99736194b1f1ae47526c1deaee3f7f88c299 | refs/heads/master | 2020-04-03T14:38:03.334371 | 2018-10-30T05:29:27 | 2018-10-30T05:29:27 | 155,328,867 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,090 | py | # coding: utf-8
"""
LoRa App Server REST API
For more information about the usage of the LoRa App Server (REST) API, see [https://docs.loraserver.io/lora-app-server/api/](https://docs.loraserver.io/lora-app-server/api/). # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import lorawan_server
from lorawan_server.models.api_list_gateway_response import ApiListGatewayResponse # noqa: E501
from lorawan_server.rest import ApiException
class TestApiListGatewayResponse(unittest.TestCase):
"""ApiListGatewayResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testApiListGatewayResponse(self):
"""Test ApiListGatewayResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = lorawan_client.models.api_list_gateway_response.ApiListGatewayResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
a701b7b61f73127bef8278cfaa98dcf0c501b42d | 0e4168c4d129b7219cd4048102409ab69c543376 | /scripts/joinSamples.py | 4ea8442b37d446e6dad8984cbac3b1c31589947a | [] | no_license | saumyaphor4252/PlayWithDatacards | 867f47486cd51091b7b6ee99bc3f860486266117 | 2b25d49073b76a6c030cdac5577def3024dd219f | refs/heads/master | 2022-03-06T05:44:21.039024 | 2017-10-31T11:08:43 | 2017-10-31T11:08:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 494 | py | #
# join samples
#
# join YY and XX into VV
#
# joinSamples.update({'VV' : ['YY','XX']})
joinSamples.update({'Higgs' : ['qqH','ggH','ZH','WH']})
joinSamples.update({'VVV-WZ-ZZ-Vg' : ['VV','VVV','Vg','VgS']})
#joinSamples.update({'WW' : ['ggWW','WW']})
joinSamples.update({'Top' : ['TopPt0','TopPt1']})
joinSamples.update({'DY' : ['DYee','DYmm','DYTT']})
#joinSamples.update({'all' : ['VV','VVV','Vg','VgS','ggWW','WW','TopPt0','TopPt1','DYee','DYmm']})
| [
"[email protected]"
] | |
d3fa277d14aebc7a28b2c6ef51d67f48fb198690 | 17e60f61fc82e7369802a1c597b58b0206ad9bec | /lib/resIpOSF1.py | ed540226dbdb369c1350048d478951dab3b4d1ee | [] | no_license | SLB-DeN/opensvc | 5e06d42947f51662fa16203a00670a88b9e1fea9 | 75baeb19e0d26d5e150e770aef4d615c2327f32e | refs/heads/master | 2021-05-17T05:35:18.585791 | 2020-03-19T15:20:05 | 2020-03-19T15:20:05 | 250,651,667 | 1 | 0 | null | 2020-03-27T21:29:22 | 2020-03-27T21:29:22 | null | UTF-8 | Python | false | false | 919 | py | import resIp as Res
import rcExceptions as ex
from rcUtilitiesOSF1 import check_ping
from rcUtilities import to_cidr, to_dotted
class Ip(Res.Ip):
def check_ping(self, count=1, timeout=5):
self.log.info("checking %s availability"%self.addr)
return check_ping(self.addr, count=count, timeout=timeout)
def arp_announce(self):
return
def startip_cmd(self):
if ':' in self.addr:
cmd = ['ifconfig', self.ipdev, 'inet6', '/'.join([self.addr, to_cidr(self.mask)]), 'add']
else:
cmd = ['ifconfig', self.ipdev, 'inet', 'alias', self.addr, 'netmask', to_dotted(self.mask)]
return self.vcall(cmd)
def stopip_cmd(self):
if ':' in self.addr:
cmd = ['ifconfig', self.ipdev, 'inet6', self.addr, 'delete']
else:
cmd = ['ifconfig', self.ipdev, 'inet', '-alias', self.addr]
return self.vcall(cmd)
| [
"[email protected]"
] | |
bf5ec6948feea77173ec09f78b4b41b6d0a71eaa | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/built-in/nlp/MT5_ID4146_for_PyTorch/transformers/src/transformers/models/albert/__init__.py | 0a61f5995a78963a71d334ac03c89ac5cdcdbb54 | [
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 4,368 | py | # flake8: noqa
# There's no way to ignore "F401 '...' imported but unused" warnings in this
# module, but to preserve other warnings. So, don't check this module at all.
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...file_utils import (
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_import_structure = {
"configuration_albert": ["ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "AlbertConfig", "AlbertOnnxConfig"],
}
if is_sentencepiece_available():
_import_structure["tokenization_albert"] = ["AlbertTokenizer"]
if is_tokenizers_available():
_import_structure["tokenization_albert_fast"] = ["AlbertTokenizerFast"]
if is_torch_available():
_import_structure["modeling_albert"] = [
"ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"AlbertForMaskedLM",
"AlbertForMultipleChoice",
"AlbertForPreTraining",
"AlbertForQuestionAnswering",
"AlbertForSequenceClassification",
"AlbertForTokenClassification",
"AlbertModel",
"AlbertPreTrainedModel",
"load_tf_weights_in_albert",
]
if is_tf_available():
_import_structure["modeling_tf_albert"] = [
"TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAlbertForMaskedLM",
"TFAlbertForMultipleChoice",
"TFAlbertForPreTraining",
"TFAlbertForQuestionAnswering",
"TFAlbertForSequenceClassification",
"TFAlbertForTokenClassification",
"TFAlbertMainLayer",
"TFAlbertModel",
"TFAlbertPreTrainedModel",
]
if is_flax_available():
_import_structure["modeling_flax_albert"] = [
"FlaxAlbertForMaskedLM",
"FlaxAlbertForMultipleChoice",
"FlaxAlbertForPreTraining",
"FlaxAlbertForQuestionAnswering",
"FlaxAlbertForSequenceClassification",
"FlaxAlbertForTokenClassification",
"FlaxAlbertModel",
"FlaxAlbertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
if is_tokenizers_available():
from .tokenization_albert_fast import AlbertTokenizerFast
if is_torch_available():
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
if is_tf_available():
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
if is_flax_available():
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| [
"[email protected]"
] | |
62af2b892b48fc4ea07ca654b78b182a5488e8be | 52e814745700b54e4b35e783386ad5f796def1e9 | /colour/models/rgb/dataset/smptec_rgb.py | 975aa35b67e90bc77dac406f33783516a44fb3cb | [
"BSD-3-Clause"
] | permissive | scoopxyz/colour | e9c6502f67ff0774ab77f3c2f622b5973f5a9196 | b1d82af250122f82919b4c54d06fdf72c069c5af | refs/heads/develop | 2020-12-30T19:57:48.884001 | 2016-12-28T12:42:44 | 2016-12-28T12:42:44 | 68,670,983 | 0 | 0 | null | 2016-09-20T03:38:17 | 2016-09-20T03:38:17 | null | UTF-8 | Python | false | false | 2,618 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
SMPTE-C RGB Colourspace
=======================
Defines the *SMPTE-C RGB* colourspace:
- :attr:`SMPTE_C_RGB_COLOURSPACE`.
See Also
--------
`RGB Colourspaces Jupyter Notebook
<http://nbviewer.jupyter.org/github/colour-science/colour-notebooks/\
blob/master/notebooks/models/rgb.ipynb>`_
References
----------
.. [1] Society of Motion Picture and Television Engineers. (2004). SMPTE C
Color Monitor Colorimetry. In RP 145:2004 (Vol. RP 145:200).
doi:10.5594/S9781614821649
"""
from __future__ import division, unicode_literals
import numpy as np
from functools import partial
from colour.colorimetry import ILLUMINANTS
from colour.models.rgb import (
RGB_Colourspace,
gamma_function,
normalised_primary_matrix)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2016 - Colour Developers'
__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '[email protected]'
__status__ = 'Production'
__all__ = ['SMPTE_C_RGB_PRIMARIES',
'SMPTE_C_RGB_ILLUMINANT',
'SMPTE_C_RGB_WHITEPOINT',
'SMPTE_C_RGB_TO_XYZ_MATRIX',
'XYZ_TO_SMPTE_C_RGB_MATRIX',
'SMPTE_C_RGB_COLOURSPACE']
SMPTE_C_RGB_PRIMARIES = np.array(
[[0.630, 0.340],
[0.310, 0.595],
[0.155, 0.070]])
"""
*SMPTE-C RGB* colourspace primaries.
SMPTE_C_RGB_PRIMARIES : ndarray, (3, 2)
"""
SMPTE_C_RGB_ILLUMINANT = 'D65'
"""
*SMPTE-C RGB* colourspace whitepoint name as illuminant.
SMPTE_C_RGB_ILLUMINANT : unicode
"""
SMPTE_C_RGB_WHITEPOINT = ILLUMINANTS.get(
'CIE 1931 2 Degree Standard Observer').get(SMPTE_C_RGB_ILLUMINANT)
"""
*SMPTE-C RGB* colourspace whitepoint.
SMPTE_C_RGB_WHITEPOINT : ndarray
"""
SMPTE_C_RGB_TO_XYZ_MATRIX = normalised_primary_matrix(
SMPTE_C_RGB_PRIMARIES, SMPTE_C_RGB_WHITEPOINT)
"""
*SMPTE-C RGB* colourspace to *CIE XYZ* tristimulus values matrix.
SMPTE_C_RGB_TO_XYZ_MATRIX : array_like, (3, 3)
"""
XYZ_TO_SMPTE_C_RGB_MATRIX = np.linalg.inv(SMPTE_C_RGB_TO_XYZ_MATRIX)
"""
*CIE XYZ* tristimulus values to *SMPTE-C RGB* colourspace matrix.
XYZ_TO_SMPTE_C_RGB_MATRIX : array_like, (3, 3)
"""
SMPTE_C_RGB_COLOURSPACE = RGB_Colourspace(
'SMPTE-C RGB',
SMPTE_C_RGB_PRIMARIES,
SMPTE_C_RGB_WHITEPOINT,
SMPTE_C_RGB_ILLUMINANT,
SMPTE_C_RGB_TO_XYZ_MATRIX,
XYZ_TO_SMPTE_C_RGB_MATRIX,
partial(gamma_function, exponent=1 / 2.2),
partial(gamma_function, exponent=2.2))
"""
*SMPTE-C RGB* colourspace.
SMPTE_C_RGB_COLOURSPACE : RGB_Colourspace
"""
| [
"[email protected]"
] | |
a121ce2c0c6215da8a3ea58cd38153ea6a7642af | 7a2342efcfb0a9880c30c26e8b45bf954b701ac1 | /tensorflow/contrib/learn/python/learn/preprocessing/tests/categorical_test.py | 0a6603c79b78a6f9bfe53d502e7a4da5ba17401a | [
"Apache-2.0"
] | permissive | danielhardej/tensorflow | 0012157e89629c19cd7fcdab613ab609b05b0294 | 1c7d02cd21f6182f959ae66d3487ff55daa83f6a | refs/heads/master | 2023-04-15T13:05:46.374708 | 2016-07-13T22:30:26 | 2016-07-13T22:30:26 | 63,285,754 | 2 | 0 | Apache-2.0 | 2023-04-07T11:36:20 | 2016-07-13T23:15:10 | C++ | UTF-8 | Python | false | false | 2,243 | py | # encoding: utf-8
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Categorical tests."""
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.io import HAS_PANDAS
from tensorflow.contrib.learn.python.learn.preprocessing import categorical
class CategoricalTest(tf.test.TestCase):
"""Categorical tests."""
def testSingleCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(min_frequency=1)
x = cat_processor.fit_transform([["0"], [1], [float("nan")], ["C"], ["C"],
[1], ["0"], [np.nan], [3]])
self.assertAllEqual(list(x), [[2], [1], [0], [3], [3], [1], [2], [0], [0]])
def testSingleCategoricalProcessorPandasSingleDF(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
cat_processor = categorical.CategoricalProcessor()
data = pd.DataFrame({"Gender": ["Male", "Female", "Male"]})
x = list(cat_processor.fit_transform(data))
self.assertAllEqual(list(x), [[1], [2], [1]])
def testMultiCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(min_frequency=0,
share=False)
x = cat_processor.fit_transform([["0", "Male"], [1, "Female"], ["3", "Male"]
])
self.assertAllEqual(list(x), [[1, 1], [2, 2], [3, 1]])
if __name__ == "__main__":
tf.test.main()
| [
"[email protected]"
] | |
ccf90dacc8518550b9d77f0e2efe79c948778c82 | 2333e07c173acc09bae62d9e94c1fe114afe96f2 | /day02/titanic/knn.py | 8f5d31fb1325ce3ea2ccc69928274853507c369e | [] | no_license | nojongmun/python_tensor | a61bf447bae253fe99d1d8297240922a12c1007f | 80422d9be2b687e5453d4c551988625a7ec3e248 | refs/heads/master | 2022-08-26T01:17:20.628581 | 2020-05-26T13:28:51 | 2020-05-26T13:28:51 | 256,535,640 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 69 | py | """
KNN(KNeighbors)
분류기(classifier) vs 회귀기(regressor)
""" | [
"[email protected]"
] | |
5466508a5f10a2490145c71a4d00799ff8fa23b2 | 1a59a9076c1e9f1eb98e24ff41a4c1c95e2b353e | /xcp2k/classes/_print51.py | 5960da1d1f2942edcf03cd540cb901434df30894 | [] | no_license | Roolthasiva/xcp2k | 66b2f30ebeae1a946b81f71d22f97ea4076e11dc | fc3b5885503c6f6dc549efeb4f89f61c8b6b8242 | refs/heads/master | 2022-12-23T06:03:14.033521 | 2020-10-07T08:01:48 | 2020-10-07T08:01:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | from xcp2k.inputsection import InputSection
from xcp2k.classes._program_run_info36 import _program_run_info36
class _print51(InputSection):
def __init__(self):
InputSection.__init__(self)
self.PROGRAM_RUN_INFO = _program_run_info36()
self._name = "PRINT"
self._subsections = {'PROGRAM_RUN_INFO': 'PROGRAM_RUN_INFO'}
| [
"[email protected]"
] | |
5228f70187f2fb73f3f1637fd851a5a7bc4cc9b0 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02850/s373081081.py | b381794c0d3494f7275436bc03b2bca28c761d40 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 556 | py | n = int(input())
g = [[] for _ in range(n)]
inv = [0] * n
for i in range(n - 1):
a, b = map(int, input().split())
g[a - 1].append((b - 1, i))
inv[a - 1] += 1
g[b - 1].append((a - 1, i))
inv[b - 1] += 1
k = max(inv)
print(k)
s = [0]
d = [-1] * n
d[0] = [-2]
ans = [0] * (n - 1)
while s:
p = s.pop()
c = 1
for node, idx in g[p]:
if d[node] == -1:
if c == d[p]:
c += 1
d[node] = c
ans[idx] = c
c += 1
s.append(node)
for x in ans:
print(x)
| [
"[email protected]"
] | |
4f92f5233c910ebb8db129825cec1fe0afd083df | 0a7711063b30b1566ade3cc07f105292e32fe6d6 | /fabfile.py | 07b2332cef37e71892c58c0032d7fa0bfe7a0502 | [] | no_license | huokedu/dynamic-scrapy | e150a1fc6894e39d6bae37c602a592d57cd22c51 | 31a47e9810f2039cfe33653e09d7d03242764723 | refs/heads/master | 2021-01-17T21:33:17.810250 | 2013-10-05T17:28:19 | 2013-10-05T17:28:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | true | false | 3,512 | py | """Management utilities."""
from fabric.contrib.console import confirm
from fabric.api import abort, env, local, settings, task
########## GLOBALS
env.run = 'heroku run python manage.py'
HEROKU_ADDONS = (
'cloudamqp:lemur',
'heroku-postgresql:dev',
'scheduler:standard',
'memcachier:dev',
'newrelic:standard',
'pgbackups:auto-month',
'sentry:developer',
)
HEROKU_CONFIGS = (
'DJANGO_SETTINGS_MODULE=scrapy_test.settings.prod',
'SECRET_KEY=to(rkb!6lj3bwbz&qs2go0@)1ctjcx43lm6lerci#s_vpg*%mr'
'AWS_ACCESS_KEY_ID=xxx',
'AWS_SECRET_ACCESS_KEY=xxx',
'AWS_STORAGE_BUCKET_NAME=xxx',
)
########## END GLOBALS
########## HELPERS
def cont(cmd, message):
"""Given a command, ``cmd``, and a message, ``message``, allow a user to
either continue or break execution if errors occur while executing ``cmd``.
:param str cmd: The command to execute on the local system.
:param str message: The message to display to the user on failure.
.. note::
``message`` should be phrased in the form of a question, as if ``cmd``'s
execution fails, we'll ask the user to press 'y' or 'n' to continue or
cancel exeuction, respectively.
Usage::
cont('heroku run ...', "Couldn't complete %s. Continue anyway?" % cmd)
"""
with settings(warn_only=True):
result = local(cmd, capture=True)
if message and result.failed and not confirm(message):
abort('Stopped execution per user request.')
########## END HELPERS
########## DATABASE MANAGEMENT
@task
def syncdb():
"""Run a syncdb."""
local('%(run)s syncdb --noinput' % env)
@task
def migrate(app=None):
"""Apply one (or more) migrations. If no app is specified, fabric will
attempt to run a site-wide migration.
:param str app: Django app name to migrate.
"""
if app:
local('%s migrate %s --noinput' % (env.run, app))
else:
local('%(run)s migrate --noinput' % env)
########## END DATABASE MANAGEMENT
########## FILE MANAGEMENT
@task
def collectstatic():
"""Collect all static files, and copy them to S3 for production usage."""
local('%(run)s collectstatic --noinput' % env)
########## END FILE MANAGEMENT
########## HEROKU MANAGEMENT
@task
def bootstrap():
"""Bootstrap your new application with Heroku, preparing it for a production
deployment. This will:
- Create a new Heroku application.
- Install all ``HEROKU_ADDONS``.
- Sync the database.
- Apply all database migrations.
- Initialize New Relic's monitoring add-on.
"""
cont('heroku create', "Couldn't create the Heroku app, continue anyway?")
for addon in HEROKU_ADDONS:
cont('heroku addons:add %s' % addon,
"Couldn't add %s to your Heroku app, continue anyway?" % addon)
for config in HEROKU_CONFIGS:
cont('heroku config:add %s' % config,
"Couldn't add %s to your Heroku app, continue anyway?" % config)
cont('git push heroku master',
"Couldn't push your application to Heroku, continue anyway?")
syncdb()
migrate()
cont('%(run)s newrelic-admin validate-config - stdout' % env,
"Couldn't initialize New Relic, continue anyway?")
@task
def destroy():
"""Destroy this Heroku application. Wipe it from existance.
.. note::
This really will completely destroy your application. Think twice.
"""
local('heroku apps:destroy')
########## END HEROKU MANAGEMENT
| [
"[email protected]"
] | |
001ee8c250001a08a7d5f7293b5801b8d90f2704 | 7250ce4b0f8c0bbff563e22243750bd89bc4a1f8 | /source/interprocedural_analyses/taint/test/integration/via_type_of.py | 5c5b05667e533cc4c1f9f0187df6d2ea54ec20a3 | [
"MIT"
] | permissive | geekmc/pyre-check | 717a02b71b9537852e494507c70b91e7e98a8c22 | 592ad6dee657e48746ed2d352c6f9269b6ff9c61 | refs/heads/main | 2023-07-07T15:48:20.915843 | 2021-08-10T03:22:53 | 2021-08-10T03:23:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 923 | py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import enum
from builtins import _test_sink, _test_source
def return_via_parameter_type(parameter):
return 0
def test_strings():
return return_via_parameter_type("A")
def test_numerals():
return return_via_parameter_type(1)
def test_lists():
return return_via_parameter_type(["a", "b"])
def meta(parameter):
return return_via_parameter_type(parameter)
def test_via_type_of_does_not_propagate():
return meta("Name")
def tito(parameter, other):
pass
def test_tito():
a = tito(_test_source(), [1, 2])
return a
def sink_via_type_of(x, y):
pass
def test_sink(element):
return sink_via_type_of(element, 1)
def test_backwards_tito(parameter):
return tito(parameter, "by_backwards")
| [
"[email protected]"
] | |
b3f6e06ec271a24acb23628e036addff72b6bd18 | ec46c70a721f16031a784f54f522656fb43dfc9f | /venv/lib/python3.6/site-packages/xmlschema/validators/elements.py | 2fd9e70cc618a388925fd3b40bb08fc2b89878e9 | [] | no_license | kardelen-karatas/django-importXML | c6a62942b740697d3647ec0bc1ed9c078e751159 | b169966627bd54b684aaedd5fd6c0d7be551b973 | refs/heads/master | 2022-12-10T00:38:40.578278 | 2020-04-15T10:34:36 | 2020-04-15T10:34:36 | 125,032,574 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,086 | py | # -*- coding: utf-8 -*-
#
# Copyright (c), 2016-2018, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author Davide Brunato <[email protected]>
#
"""
This module contains classes for XML Schema elements, complex types and model groups.
"""
from collections import Sequence
from ..compat import unicode_type
from ..exceptions import XMLSchemaAttributeError
from ..etree import etree_element
from ..converters import ElementData
from ..qnames import (
XSD_GROUP_TAG, XSD_SEQUENCE_TAG, XSD_ALL_TAG, XSD_CHOICE_TAG, XSD_ATTRIBUTE_GROUP_TAG,
XSD_COMPLEX_TYPE_TAG, XSD_SIMPLE_TYPE_TAG, XSD_ALTERNATIVE_TAG, XSD_ELEMENT_TAG, XSD_ANY_TYPE,
XSD_UNIQUE_TAG, XSD_KEY_TAG, XSD_KEYREF_TAG, XSI_NIL, XSI_TYPE, reference_to_qname, get_qname
)
from ..xpath import ElementPathMixin
from .exceptions import (
XMLSchemaValidationError, XMLSchemaParseError, XMLSchemaChildrenValidationError
)
from .parseutils import check_type, get_xsd_attribute, get_xsd_bool_attribute, get_xsd_derivation_attribute
from .xsdbase import XsdAnnotated, ParticleMixin, ValidatorMixin
from .simple_types import XsdSimpleType
from .complex_types import XsdComplexType
from .constraints import XsdUnique, XsdKey, XsdKeyref
XSD_MODEL_GROUP_TAGS = {XSD_GROUP_TAG, XSD_SEQUENCE_TAG, XSD_ALL_TAG, XSD_CHOICE_TAG}
class XsdElement(Sequence, XsdAnnotated, ValidatorMixin, ParticleMixin, ElementPathMixin):
"""
Class for XSD 1.0 'element' declarations.
<element
abstract = boolean : false
block = (#all | List of (extension | restriction | substitution))
default = string
final = (#all | List of (extension | restriction))
fixed = string
form = (qualified | unqualified)
id = ID
maxOccurs = (nonNegativeInteger | unbounded) : 1
minOccurs = nonNegativeInteger : 1
name = NCName
nillable = boolean : false
ref = QName
substitutionGroup = QName
type = QName
{any attributes with non-schema namespace . . .}>
Content: (annotation?, ((simpleType | complexType)?, (unique | key | keyref)*))
</element>
"""
def __init__(self, elem, schema, name=None, is_global=False):
super(XsdElement, self).__init__(elem, schema, name, is_global)
if not hasattr(self, 'type'):
raise XMLSchemaAttributeError("undefined 'type' attribute for %r." % self)
if not hasattr(self, 'qualified'):
raise XMLSchemaAttributeError("undefined 'qualified' attribute for %r." % self)
def __getitem__(self, i):
try:
elements = [e for e in self.type.content_type.iter_elements()]
except AttributeError:
raise IndexError('child index out of range')
return elements[i]
def __iter__(self):
try:
for xsd_element in self.type.content_type.iter_elements():
yield xsd_element
except (TypeError, AttributeError):
return
def __reversed__(self):
return reversed([e for e in self.type.content_type.iter_elements()])
def __len__(self):
try:
return len([e for e in self.type.content_type.iter_elements()])
except AttributeError:
return 0
def __setattr__(self, name, value):
if name == "type":
check_type(value, XsdSimpleType, XsdComplexType, type(None))
try:
self.attributes = value.attributes
except AttributeError:
self.attributes = self.schema.BUILDERS.attribute_group_class(
etree_element(XSD_ATTRIBUTE_GROUP_TAG), schema=self.schema
)
super(XsdElement, self).__setattr__(name, value)
def _parse(self):
XsdAnnotated._parse(self)
self._parse_attributes()
index = self._parse_type()
if self.type is None:
self.type = self.maps.lookup_type(XSD_ANY_TYPE)
self._parse_constraints(index)
self._parse_substitution_group()
def _parse_attributes(self):
self._parse_particle()
self.name = None
if self.default is not None and self.fixed is not None:
self._parse_error("'default' and 'fixed' attributes are mutually exclusive", self)
self._parse_properties('abstract', 'block', 'final', 'form', 'nillable')
# Parse element attributes
try:
element_name = reference_to_qname(self.elem.attrib['ref'], self.namespaces)
except KeyError:
# No 'ref' attribute ==> 'name' attribute required.
self.qualified = self.elem.get('form', self.schema.element_form_default) == 'qualified'
try:
if self.is_global or self.qualified:
self.name = get_qname(self.target_namespace, self.elem.attrib['name'])
else:
self.name = self.elem.attrib['name']
except KeyError:
self._parse_error("missing both 'name' and 'ref' attributes.")
if self.is_global:
if 'minOccurs' in self.elem.attrib:
self._parse_error("attribute 'minOccurs' not allowed for a global element.")
if 'maxOccurs' in self.elem.attrib:
self._parse_error("attribute 'maxOccurs' not allowed for a global element.")
else:
# Reference to a global element
if self.is_global:
self._parse_error("an element reference can't be global.")
for attribute in ('name', 'type', 'nillable', 'default', 'fixed', 'form', 'block'):
if attribute in self.elem.attrib:
self._parse_error("attribute %r is not allowed when element reference is used." % attribute)
xsd_element = self.maps.lookup_element(element_name)
self.name = xsd_element.name
self.type = xsd_element.type
self.qualified = xsd_element.qualified
def _parse_type(self):
if self.ref:
if self._parse_component(self.elem, required=False, strict=False) is not None:
self._parse_error("element reference declaration can't has children.")
elif 'type' in self.elem.attrib:
type_qname = reference_to_qname(self.elem.attrib['type'], self.namespaces)
try:
self.type = self.maps.lookup_type(type_qname)
except KeyError:
self._parse_error('unknown type %r' % self.elem.attrib['type'])
self.type = self.maps.lookup_type(XSD_ANY_TYPE)
else:
child = self._parse_component(self.elem, required=False, strict=False)
if child is not None:
if child.tag == XSD_COMPLEX_TYPE_TAG:
self.type = self.schema.BUILDERS.complex_type_class(child, self.schema)
elif child.tag == XSD_SIMPLE_TYPE_TAG:
self.type = self.schema.BUILDERS.simple_type_factory(child, self.schema)
return 1
else:
self.type = None
return 0
def _parse_constraints(self, index=0):
self.constraints = {}
for child in self._iterparse_components(self.elem, start=index):
if child.tag == XSD_UNIQUE_TAG:
constraint = XsdUnique(child, self.schema, parent=self)
elif child.tag == XSD_KEY_TAG:
constraint = XsdKey(child, self.schema, parent=self)
elif child.tag == XSD_KEYREF_TAG:
constraint = XsdKeyref(child, self.schema, parent=self)
else:
raise XMLSchemaParseError("unexpected child element %r:" % child, self)
try:
if child != self.maps.constraints[constraint.name]:
self._parse_error("duplicated identity constraint %r:" % constraint.name, child)
except KeyError:
self.maps.constraints[constraint.name] = child
finally:
self.constraints[constraint.name] = constraint
def _parse_substitution_group(self):
substitution_group = self.substitution_group
if substitution_group is None:
return
if not self.is_global:
self._parse_error("'substitutionGroup' attribute in a local element declaration")
qname = reference_to_qname(substitution_group, self.namespaces)
if qname[0] != '{':
qname = get_qname(self.target_namespace, qname)
try:
head_element = self.maps.lookup_element(qname)
except KeyError:
self._parse_error("unknown substitutionGroup %r" % substitution_group)
else:
final = head_element.final
if final is None:
final = self.schema.final_default
if final == '#all' or 'extension' in final and 'restriction' in final:
self._parse_error("head element %r cannot be substituted." % head_element)
elif self.type == head_element.type or self.type.name == XSD_ANY_TYPE:
pass
elif 'extension' in final and not self.type.is_derived(head_element.type, 'extension'):
self._parse_error(
"%r type is not of the same or an extension of the head element %r type."
% (self, head_element)
)
elif 'restriction' in final and not self.type.is_derived(head_element.type, 'restriction'):
self._parse_error(
"%r type is not of the same or a restriction of the head element %r type."
% (self, head_element)
)
elif not self.type.is_derived(head_element.type):
self._parse_error(
"%r type is not of the same or a derivation of the head element %r type."
% (self, head_element)
)
def _validation_error(self, error, validation, obj=None):
if not isinstance(error, XMLSchemaValidationError):
error = XMLSchemaValidationError(self, obj, reason=unicode_type(error))
if error.schema_elem is None:
if self.type.name is not None and self.target_namespace == self.type.target_namespace:
error.schema_elem = self.type.elem
else:
error.schema_elem = self.elem
return super(XsdElement, self)._validation_error(error, validation)
@property
def built(self):
return self.type.is_global or self.type.built
@property
def validation_attempted(self):
if self.built:
return 'full'
else:
return self.type.validation_attempted
@property
def admitted_tags(self):
return {XSD_ELEMENT_TAG}
@property
def ref(self):
return self.elem.get('ref')
@property
def abstract(self):
return get_xsd_bool_attribute(self.elem, 'abstract', default=False)
@property
def block(self):
return get_xsd_derivation_attribute(self.elem, 'block', ('extension', 'restriction', 'substitution'))
@property
def default(self):
return self.elem.get('default')
@property
def final(self):
return get_xsd_derivation_attribute(self.elem, 'final', ('extension', 'restriction'))
@property
def fixed(self):
return self.elem.get('fixed')
@property
def form(self):
return get_xsd_attribute(self.elem, 'form', ('qualified', 'unqualified'), default=None)
@property
def nillable(self):
return get_xsd_bool_attribute(self.elem, 'nillable', default=False)
@property
def substitution_group(self):
return self.elem.get('substitutionGroup')
def iter_components(self, xsd_classes=None):
if xsd_classes is None:
yield self
for obj in self.constraints.values():
yield obj
else:
if isinstance(self, xsd_classes):
yield self
for obj in self.constraints.values():
if isinstance(obj, xsd_classes):
yield obj
if self.ref is None and not self.type.is_global:
for obj in self.type.iter_components(xsd_classes):
yield obj
def match(self, name):
return self.name == name or not self.qualified and self.local_name == name
def iter_decode(self, elem, validation='lax', **kwargs):
"""
Generator method for decoding elements. A data structure is returned, eventually
preceded by a sequence of validation or decode errors.
"""
try:
converter = kwargs['converter']
except KeyError:
converter = kwargs['converter'] = self.schema.get_converter(
namespaces=kwargs.get('namespaces'),
dict_class=kwargs.get('dict_class'),
list_class=kwargs.get('list_class'),
)
use_defaults = kwargs.get('use_defaults', False)
# Get the instance type: xsi:type or the schema's declaration
if XSI_TYPE in elem.attrib:
type_ = self.maps.lookup_type(reference_to_qname(elem.attrib[XSI_TYPE], self.namespaces))
else:
type_ = self.type
# Check the xsi:nil attribute of the instance
if validation != 'skip' and XSI_NIL in elem.attrib:
if self.nillable:
try:
if get_xsd_bool_attribute(elem, XSI_NIL):
self._validation_error('xsi:nil="true" but the element is not empty.', validation, elem)
except TypeError:
self._validation_error("xsi:nil attribute must has a boolean value.", validation, elem)
else:
self._validation_error("element is not nillable.", validation, elem)
if type_.is_complex():
if use_defaults and type_.has_simple_content():
kwargs['default'] = self.default
for result in type_.iter_decode(elem, validation, **kwargs):
if isinstance(result, XMLSchemaValidationError):
yield self._validation_error(result, validation, elem)
else:
yield converter.element_decode(ElementData(elem.tag, *result), self)
del result
else:
# simpleType
if not elem.attrib:
attributes = None
else:
# Decode with an empty XsdAttributeGroup validator (only XML and XSD default attrs)
for result in self.attributes.iter_decode(elem.attrib, validation, **kwargs):
if isinstance(result, XMLSchemaValidationError):
yield self._validation_error(result, validation, elem)
else:
attributes = result
break
else:
attributes = None
if len(elem) and validation != 'skip':
yield self._validation_error("a simpleType element can't has child elements.", validation, elem)
text = elem.text
if not text and use_defaults:
default = self.default
if default is not None:
text = default
if text is None:
yield None
else:
for result in type_.iter_decode(text, validation, **kwargs):
if isinstance(result, XMLSchemaValidationError):
yield self._validation_error(result, validation, elem)
else:
yield converter.element_decode(ElementData(elem.tag, result, None, attributes), self)
del result
if validation != 'skip':
for constraint in self.constraints.values():
for error in constraint(elem):
yield self._validation_error(error, validation)
def iter_encode(self, data, validation='lax', **kwargs):
element_encode_hook = kwargs.get('element_encode_hook')
if element_encode_hook is None:
element_encode_hook = self.schema.get_converter().element_encode
kwargs['element_encode_hook'] = element_encode_hook
_etree_element = kwargs.get('etree_element') or etree_element
level = kwargs.pop('level', 0)
indent = kwargs.get('indent', None)
tail = (u'\n' + u' ' * indent * level) if indent is not None else None
element_data, errors = element_encode_hook(data, self, validation)
if validation != 'skip':
for e in errors:
yield self._validation_error(e, validation)
if self.type.is_complex():
for result in self.type.iter_encode(element_data, validation, level=level + 1, **kwargs):
if isinstance(result, XMLSchemaValidationError):
yield self._validation_error(result, validation, data)
else:
elem = _etree_element(self.name, attrib=dict(result.attributes))
elem.text = result.text
elem.extend(result.content)
elem.tail = tail
yield elem
else:
# Encode a simpleType
if element_data.attributes:
yield self._validation_error("a simpleType element can't has attributes.", validation, data)
if element_data.content:
yield self._validation_error("a simpleType element can't has child elements.", validation, data)
if element_data.text is None:
elem = _etree_element(self.name, attrib={})
elem.text = None
elem.tail = tail
yield elem
else:
for result in self.type.iter_encode(element_data.text, validation, **kwargs):
if isinstance(result, XMLSchemaValidationError):
yield self._validation_error(result, validation, data)
else:
elem = _etree_element(self.name, attrib={})
elem.text = result
elem.tail = tail
yield elem
break
del element_data
def iter_decode_children(self, elem, index=0, validation='lax'):
model_occurs = 0
while True:
try:
tag = elem[index].tag
except TypeError:
# elem is a lxml.etree.Element and elem[index] is a <class 'lxml.etree._Comment'>:
# in this case elem[index].tag is a <cyfunction Comment>, not subscriptable. So
# decode nothing and take the next.
pass
except IndexError:
if validation != 'skip' and model_occurs == 0 and self.min_occurs > 0:
error = XMLSchemaChildrenValidationError(self, elem, index, self.prefixed_name)
yield self._validation_error(error, validation)
else:
yield index
return
else:
if tag == self.name:
yield self, elem[index]
elif not self.qualified and tag == get_qname(self.target_namespace, self.name):
yield self, elem[index]
elif self.name in self.maps.substitution_groups:
for e in self.schema.substitution_groups[self.name]:
if tag == e.name:
yield e, elem[index]
break
else:
if validation != 'skip' and model_occurs == 0 and self.min_occurs > 0:
error = XMLSchemaChildrenValidationError(self, elem, index, self.prefixed_name)
yield self._validation_error(error, validation)
else:
yield index
return
else:
if validation != 'skip' and model_occurs == 0 and self.min_occurs > 0:
error = XMLSchemaChildrenValidationError(self, elem, index, self.prefixed_name)
yield self._validation_error(error, validation)
else:
yield index
return
index += 1
model_occurs += 1
if self.max_occurs is not None and model_occurs >= self.max_occurs:
yield index
return
def get_attribute(self, name):
if name[0] != '{':
return self.type.attributes[get_qname(self.type.target_namespace, name)]
return self.type.attributes[name]
def iter(self, tag=None):
if tag is None or self.name == tag:
yield self
try:
for xsd_element in self.type.content_type.iter_elements():
if xsd_element.ref is None:
for e in xsd_element.iter(tag):
yield e
elif tag is None or xsd_element.name == tag:
yield xsd_element
except (TypeError, AttributeError):
return
def iterchildren(self, tag=None):
try:
for xsd_element in self.type.content_type.iter_elements():
if tag is None or xsd_element.match(tag):
yield xsd_element
except (TypeError, AttributeError):
return
class Xsd11Element(XsdElement):
"""
Class for XSD 1.1 'element' declarations.
<element
abstract = boolean : false
block = (#all | List of (extension | restriction | substitution))
default = string
final = (#all | List of (extension | restriction))
fixed = string
form = (qualified | unqualified)
id = ID
maxOccurs = (nonNegativeInteger | unbounded) : 1
minOccurs = nonNegativeInteger : 1
name = NCName
nillable = boolean : false
ref = QName
substitutionGroup = List of QName
targetNamespace = anyURI
type = QName
{any attributes with non-schema namespace . . .}>
Content: (annotation?, ((simpleType | complexType)?, alternative*, (unique | key | keyref)*))
</element>
"""
def _parse(self):
XsdAnnotated._parse(self)
self._parse_attributes()
index = self._parse_type()
index = self._parse_alternatives(index)
if self.type is None:
if not self.alternatives:
self.type = self.maps.lookup_type(XSD_ANY_TYPE)
elif self.alternatives:
self._parse_error("types alternatives incompatible with type specification.")
self._parse_constraints(index)
self._parse_substitution_group()
def _parse_alternatives(self, index=0):
self.alternatives = []
for child in self._iterparse_components(self.elem, start=index):
if child.tag == XSD_ALTERNATIVE_TAG:
self.alternatives.append(XsdAlternative(child, self.schema))
index += 1
else:
break
return index
@property
def target_namespace(self):
try:
return self.elem.attrib['targetNamespace']
except KeyError:
return self.schema.target_namespace
class XsdAlternative(XsdAnnotated):
"""
<alternative
id = ID
test = an XPath expression
type = QName
xpathDefaultNamespace = (anyURI | (##defaultNamespace | ##targetNamespace | ##local))
{any attributes with non-schema namespace . . .}>
Content: (annotation?, (simpleType | complexType)?)
</alternative>
"""
@property
def admitted_tags(self):
return {XSD_ELEMENT_TAG}
| [
"[email protected]"
] | |
57213ef2b1e90bae101fffe7e7eb0c0cfa88899b | e3f8a3631b05347614645807ec04f834f30d3361 | /mysite/myapp/urls.py | d509527fb94ec17f6719cd9a13ab69163abc3d67 | [
"MIT"
] | permissive | CSUChico-CINS465/CINS465-S19-Examples | 0acd7e8c46d6a197e63ff23a5198ba5c93d9cf3d | 2a9e88e1cc5ec937562211680760243d0f16efcf | refs/heads/master | 2021-11-16T15:06:28.924984 | 2021-06-16T16:33:37 | 2021-06-16T16:33:37 | 166,880,285 | 0 | 1 | MIT | 2021-09-22T17:49:06 | 2019-01-21T21:02:57 | Python | UTF-8 | Python | false | false | 383 | py | from django.urls import path
from django.contrib.auth import views as auth_views
from . import views
urlpatterns = [
path('', views.index),
path('login/', auth_views.LoginView.as_view()),
path('logout/', views.logout_view),
path('register/', views.register),
path('comment/<int:sugg>/', views.comment_view),
path('suggestions/', views.suggestions_json),
]
| [
"[email protected]"
] | |
66fc09278c9d017d0fc259a174d156f1030566ff | 12f83344cdfe561db39ad9106dbf263ccd919f7e | /Projects/miami_metro/debra/search_views.py | 77ead296336719b4b2451479030e5c53308017f5 | [] | no_license | TopWebGhost/Angular-Influencer | ebcd28f83a77a92d240c41f11d82927b98bcea9e | 2f15c4ddd8bbb112c407d222ae48746b626c674f | refs/heads/master | 2021-01-19T10:45:47.039673 | 2016-12-05T01:59:26 | 2016-12-05T01:59:26 | 82,214,998 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 83,630 | py | import json
import datetime
import time
import urllib
import logging
import sys
import itertools
from collections import defaultdict, Counter
import itertools
from django.conf import settings
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.core.mail import mail_admins
from django.template.loader import render_to_string
from django.http import (HttpResponseForbidden, HttpResponse,\
HttpResponseBadRequest)
from django.db.models import Q, Count, F
from django.core.serializers.json import DjangoJSONEncoder
from django.core.cache import cache, get_cache
from django.http import Http404
from debra.es_requests import make_es_get_request
from debra.models import (
Influencer, BrandMentions, Brands, User,
InfluencerJobMapping, SearchQueryArchive, PostAnalyticsCollection,
PostAnalytics, ROIPredictionReport, InfluencerAnalytics,
InfluencerAnalyticsCollection, InfluencerBrandUserMapping,
InfluencersGroup, Platform,)
from debra.constants import (ELASTICSEARCH_URL, ELASTICSEARCH_INDEX, NUM_OF_IMAGES_PER_BOX)
from debra.constants import STRIPE_COLLECTION_PLANS, STRIPE_EMAIL_PLANS
from debra.decorators import (
user_is_brand_user, public_influencer_view, user_is_brand_user_json,
login_required_json)
from debra.serializers import unescape
from debra import constants
from debra import feeds_helpers
from debra import search_helpers
from debra import mongo_utils
from debra import account_helpers
from debra.helpers import get_or_set_cache
from xpathscraper import utils
log = logging.getLogger('debra.search_views')
mc_cache = get_cache('memcached')
redis_cache = get_cache('redis')
@login_required_json
@user_is_brand_user_json
def blogger_search_json_v3(request):
def postprocessing(search_result, brand, search_query):
_t0 = time.time()
# brand_tag_ids = list(base_brand.influencer_groups.exclude(
# archived=True
# ).filter(creator_brand=brand, system_collection=False).values_list(
# 'id', flat=True))
pipe = settings.REDIS_CLIENT.pipeline()
pipe.sdiff('btags_{}'.format(brand.id), 'systags')
for res in search_result["results"]:
pipe.sdiff('itags_{}'.format(res['id']), 'systags')
pipe_data = pipe.execute()
brand_tag_ids, inf_tag_ids = map(int, pipe_data[0]), {
int(res['id']): map(int, [t for t in tag_ids if t and t != 'None'])
for res, tag_ids in zip(search_result['results'], pipe_data[1:])
if res['id']
}
all_tag_ids = set([
tag_id for tag_id in itertools.chain(*inf_tag_ids.values())
if tag_id in brand_tag_ids
])
tag_names = {
int(key[3:]): val
for key, val in redis_cache.get_many([
'ig_{}'.format(tag_id) for tag_id in all_tag_ids
]).items()
}
for res in search_result['results']:
res['collections_in'] = {
tag_id: tag_names.get(tag_id)
for tag_id in inf_tag_ids.get(int(res['id']), [])
if tag_names.get(tag_id)
}
print '* Filtering tag ids took {}'.format(
datetime.timedelta(seconds=time.time() - _t0))
if brand and brand.flag_bloggers_custom_data_enabled and search_query.get('search_method') == 'r29':
from debra.models import (InfluencerBrandMapping,
SiteConfiguration)
from debra.constants import SITE_CONFIGURATION_ID
from debra.serializers import InfluencerBrandMappingSerializer
brand_mappings = {
m.influencer_id: m
for m in InfluencerBrandMapping.objects.filter(
influencer_id__in=[x['id'] for x in search_result['results']],
brand_id=brand.id
).prefetch_related('influencer__demographics_locality')
}
metadata = SiteConfiguration.objects.get(
id=SITE_CONFIGURATION_ID).blogger_custom_data_json
for res in search_result['results']:
mp = brand_mappings.get(int(res['id']))
from djangorestframework_camel_case.util import camelize
res['brand_custom_data'] = camelize(InfluencerBrandMappingSerializer(
mp, context={'metadata': metadata}).data) if mp else None
search_result["query_limited"] = query_limiter
if query_limiter:
if brand.num_querys_remaining:
brand.num_querys_remaining -= 1
brand.save()
request.session.modified = True
search_result["remaining"] = brand.num_querys_remaining
search_result["remaining_debug"] = settings.DEBUG
only_setup_params = {}
t0 = time.time()
t1 = time.time()
print
print "BSJ start",
mongo_utils.track_visit(request)
try:
search_query = json.loads(request.body)
except ValueError:
search_query = {}
brand = request.visitor["brand"]
base_brand = request.visitor["base_brand"]
q_from_request = search_helpers.query_from_request(request)
# flag for caching separately ES page of initial data for trial and complete pages
trial_str = 'full' if bool(brand.flag_show_dummy_data if brand else False) else 'trial'
# first prettify the query for mandrill, intercom, and slack
try:
only_setup_params = search_helpers.find_non_default_query(search_query)
if only_setup_params is None or only_setup_params == [{}]:
only_setup_params = {}
query_formatted = search_helpers.format_query_for_displaying(only_setup_params)
print "only_setup_params = [%r] query_formatted = [%r]" % (only_setup_params, query_formatted)
if len(only_setup_params) == 0:
cached_empty_query = cache.get('only_setup_params_influencer_%s' % trial_str)
if cached_empty_query:
print('Returning cached data about initial page of influencers. No ES query was made.')
postprocessing(cached_empty_query, brand, q_from_request)
if request.is_ajax():
data_json = json.dumps(cached_empty_query, cls=DjangoJSONEncoder)
return HttpResponse(data_json, content_type="application/json")
else:
data_json = json.dumps(cached_empty_query, cls=DjangoJSONEncoder, indent=4)
return HttpResponse("<body><pre>{}</pre></body>".format(data_json))
except:
a = json.dumps(search_query, sort_keys=True, indent=4, separators=(',', ': '))
query_formatted = 'Problem in formatting %r' % a
pass
account_helpers.intercom_track_event(request, "brand-search-query", {
'query': query_formatted,
})
mongo_utils.track_query(
"brand-search-query",
query_formatted, {"user_id": request.visitor["auth_user"].id})
print time.time() - t1
print "Verification",
t1 = time.time()
if not base_brand or not base_brand.is_subscribed:
return HttpResponseForbidden()
# disable_query_limit = request.user.is_superuser or request.user.is_staff
disable_query_limit = True
if not disable_query_limit:
query_limiter = True
else:
query_limiter = False
print time.time() - t1
print "Favs",
t1 = time.time()
if base_brand:
user = User.objects.get(id=request.user.id)
if base_brand.flag_trial_on and not account_helpers.internal_user(user):
slack_msg = "\n**************\nBrand = " + base_brand.domain_name + " User: " + request.user.email + "\n" + query_formatted
account_helpers.send_msg_to_slack.apply_async(['brands-trial-activity', slack_msg],
queue='celery')
if base_brand.is_subscribed:
query_limiter = False
base_brand.saved_queries.create(query=json.dumps(search_query),
user=request.user)
# Primitive Rate-Limiting
if not settings.DEBUG and not account_helpers.internal_user(user) and (
# TODO: contains on dates? possibly a bug
base_brand.saved_queries.filter(timestamp__contains=datetime.date.today()).count() > 2000 or
base_brand.blacklisted is True
):
return HttpResponseForbidden("limit", content_type="application/json")
print time.time() - t1
print "Query limiter",
t1 = time.time()
if query_limiter and brand.num_querys_remaining == 0:
data = {}
data["remaining"] = brand.num_querys_remaining
data["remaining_debug"] = settings.DEBUG
data = json.dumps(data, cls=DjangoJSONEncoder)
return HttpResponse(data, content_type="application/json")
if query_limiter:
items_per_page = 20
else:
items_per_page = 30
search_query = q_from_request
if search_query.get('sub_tab') == 'instagram_search':
search_query['filters']['post_platform'] = ['Instagram']
elif search_query.get('sub_tab') == 'main_search':
search_query['no_artificial_blogs'] = True
if search_query.get('search_method') == 'r29':
search_query['filters']['tags'].append(constants.R29_CUSTOM_DATA_TAG_ID)
else:
search_query['filters']['exclude_tags'] = [constants.R29_CUSTOM_DATA_TAG_ID]
if True:
# this uses the newer version of our code to use Elastic Search in a more structured way
# this is the only front-end facing view that uses the new code
# all other methods use the old search logic
# our goal is to use the old search logic everywhere first to make sure we're consistent first
# and then we want to re-factor everything.
search_result = search_helpers.search_influencers_v3(
search_query, items_per_page, request=request)
post_time = time.time()
if only_setup_params == {}:
search_result['total_influencers'] = search_result['total_influencers'] + 40000
cache.set('only_setup_params_influencer_%s' % trial_str, search_result)
# we can use ES for this
# search_helpers.set_influencer_collections(
# search_result["results"], brand_id=brand.id)
# search_helpers.set_mailed_to_influencer(
# search_result["results"], brand_id=brand.id)
# search_helpers.set_influencer_invited_to(
# search_result["results"], brand_id=brand.id)
# _t0 = time.time()
# # brand_tag_ids = list(base_brand.influencer_groups.exclude(
# # archived=True
# # ).filter(creator_brand=brand, system_collection=False).values_list(
# # 'id', flat=True))
# pipe = settings.REDIS_CLIENT.pipeline()
# pipe.sdiff('btags_{}'.format(brand.id), 'systags')
# for res in search_result["results"]:
# pipe.sdiff('itags_{}'.format(res['id']), 'systags')
# pipe_data = pipe.execute()
# brand_tag_ids, inf_tag_ids = map(int, pipe_data[0]), {
# int(res['id']): map(int, tag_ids)
# for res, tag_ids in zip(search_result['results'], pipe_data[1:])
# }
# all_tag_ids = set([
# tag_id for tag_id in itertools.chain(*inf_tag_ids.values())
# if tag_id in brand_tag_ids
# ])
# tag_names = {
# int(key[3:]): val
# for key, val in redis_cache.get_many([
# 'ig_{}'.format(tag_id) for tag_id in all_tag_ids
# ]).items()
# }
# for res in search_result['results']:
# res['collections_in'] = {
# tag_id: tag_names.get(tag_id)
# for tag_id in inf_tag_ids.get(int(res['id']), [])
# if tag_names.get(tag_id)
# }
# for res in search_result["results"]:
# res['collections_in'] = {
# tag_id: name
# for tag_id, name in res.get('collections_in', {}).items()
# if tag_id in brand_tag_ids
# }
# print '* Filtering tag ids took {}'.format(
# datetime.timedelta(seconds=time.time() - _t0))
# search_helpers.set_influencer_analytics_collections(
# search_result["results"], brand_id=brand.id)
# search_helpers.set_brand_notes(
# search_result["results"], user_id=request.user.id)
print 'Post-processing time', time.time() - post_time
mongo_utils.influencers_appeared_on_search(
[x["id"] for x in search_result["results"]])
# search_result["query_limited"] = query_limiter
# if brand and brand.flag_bloggers_custom_data_enabled and search_query.get('search_method') == 'r29':
# from debra.models import (InfluencerBrandMapping,
# SiteConfiguration)
# from debra.constants import SITE_CONFIGURATION_ID
# from debra.serializers import InfluencerBrandMappingSerializer
# brand_mappings = {
# m.influencer_id: m
# for m in InfluencerBrandMapping.objects.filter(
# influencer_id__in=[x['id'] for x in search_result['results']],
# brand_id=brand.id
# ).prefetch_related('influencer__demographics_locality')
# }
# metadata = SiteConfiguration.objects.get(
# id=SITE_CONFIGURATION_ID).blogger_custom_data_json
# for res in search_result['results']:
# mp = brand_mappings.get(int(res['id']))
# from djangorestframework_camel_case.util import camelize
# res['brand_custom_data'] = camelize(InfluencerBrandMappingSerializer(
# mp, context={'metadata': metadata}).data) if mp else None
else:
# remove this later on
search_result = search_helpers.search_influencers_old(search_query, 60)
print "SEARCH_RESULT: %s" % search_result
print [x["id"] for x in search_result["results"]]
# if query_limiter:
# if brand.num_querys_remaining:
# brand.num_querys_remaining -= 1
# brand.save()
# request.session.modified = True
# search_result["remaining"] = brand.num_querys_remaining
# search_result["remaining_debug"] = settings.DEBUG
postprocessing(search_result, brand, search_query)
print time.time() - t1
print "BSJ end, total in: ", time.time() - t0
print
# @todo: it's from some other branch, so not used for now
# search_result['engagement_to_followers_ratio_overall'] = Platform.engagement_to_followers_ratio_overall(
# 'All')
# if only_setup_params == {}:
# search_result['total_influencers'] = search_result['total_influencers'] + 40000
# cache.set('only_setup_params_influencer_%s' % trial_str, search_result)
print 'TOTAL', search_result['total_influencers']
if request.is_ajax():
data_json = json.dumps(search_result, cls=DjangoJSONEncoder)
#print "DATA_JSON: %s" % data_json
return HttpResponse(data_json, content_type="application/json")
else:
data_json = json.dumps(search_result, cls=DjangoJSONEncoder, indent=4)
return HttpResponse("<body><pre>{}</pre></body>".format(data_json))
@login_required
@user_is_brand_user
def blogger_search(request):
"""
returns rendered bloggers search page
"""
return redirect(reverse('debra.search_views.main_search'))
@login_required
@user_is_brand_user
def main_search(request):
mongo_utils.track_visit(request)
brand = request.visitor["base_brand"]
if brand and brand.is_subscribed:
plan_name = brand.stripe_plan
else:
return redirect('/')
# saved_queries_list = search_helpers.get_brand_saved_queries_list(brand)
# groups_list = search_helpers.get_brand_groups_list(
# request.visitor["brand"], request.visitor["base_brand"])
# tags_list = search_helpers.get_brand_tags_list(
# request.visitor["brand"], request.visitor["base_brand"])
context = {
'search_page': True,
'type': 'all',
'selected_tab': 'search',
'sub_page': 'main_search',
'shelf_user': request.visitor["user"],
'debug': settings.DEBUG,
# 'show_select': show_select,
# 'groups_list': json.dumps(groups_list),
# 'saved_queries_list': json.dumps(saved_queries_list),
'tag_id': request.GET.get('tag_id'),
'saved_search': request.GET.get('saved_search'),
}
context.update(
search_helpers.prepare_filter_params(context, plan_name=plan_name))
return render(request, 'pages/search/main.html', context)
@login_required
@user_is_brand_user
def posts_search_json(request):
mongo_utils.track_visit(request)
base_brand = request.visitor["base_brand"]
if base_brand:
user = User.objects.get(id=request.user.id)
if not settings.DEBUG and not account_helpers.internal_user(user) and (
# TODO: contains on dates? possibly a bug
base_brand.saved_queries.filter(timestamp__contains=datetime.date.today()).count() > 2000 or
base_brand.blacklisted is True
):
return HttpResponseForbidden("limit", content_type="application/json")
try:
search_query = json.loads(urllib.unquote(request.body))
except:
search_query = {}
content_filter = search_query.get('filter')
if content_filter == 'tweets':
data = feeds_helpers.twitter_feed_json(request)
elif content_filter == 'pins':
data = feeds_helpers.pinterest_feed_json(request)
elif content_filter == 'photos':
data = feeds_helpers.instagram_feed_json(request)
else:
data = feeds_helpers.blog_feed_json_dashboard(request)
if request.is_ajax():
#data["results"].sort(key=lambda x: x.get("create_date", x["id"]))
data = json.dumps(data, cls=DjangoJSONEncoder)
return HttpResponse(data, content_type="application/json")
else:
return HttpResponse("<body></body>" % data)
@login_required
@user_is_brand_user
def posts_search(request):
"""
obsolete
"""
mongo_utils.track_visit(request)
context = {
'search_page': True,
'selected_tab': 'search_posts',
'shelf_user': request.user.userprofile,
'debug': settings.DEBUG,
}
return render(request, 'pages/search/posts.html', context)
# NON RENDERING METHODS
def blogger_posts_json(request, influencer_id):
"""
This view renders ajax request with the influencer's posts for influencer's PROFILE.
params: influencer_id -- id of the desired influencer.
"""
from debra.models import BrandJobPost
from debra.search_helpers import find_non_default_query
use_query = False
if request.GET.get('campaign_posts_query'):
campaign = BrandJobPost.objects.get(
id=request.GET.get('campaign_posts_query'))
if campaign.posts_saved_search:
use_query = True
es_query = campaign.posts_saved_search.query
else:
es_query = '{}'
else:
try:
es_query = json.loads(urllib.unquote(request.GET.get('q', '{}')))
except:
es_query = {}
else:
es_query = find_non_default_query(es_query)
if es_query is None or es_query == [{}]:
es_query = {}
if es_query:
use_query = True
# posts_per_page = 60
posts_per_page = 12
data = {} # resulting dict
# if no cached data, then obtaining it from ES and DB
if not data:
# jsonifying request
if not isinstance(es_query, dict):
try:
es_query = json.loads(es_query)
except ValueError:
mail_admins(
"Request jsonify problem",
"es_query: {}\n, request: {}".format(es_query, request)
)
es_query = {}
data = {}
es_query['page'] = 1
if not use_query:
es_query['default_posts'] = "profile"
# getting posts, posts_sponsored, photos for influencer and adding it to result
posts, posts_sponsored, photos, total_posts = search_helpers.get_influencer_posts_v2(
influencer_ids=influencer_id,
parameters=es_query,
page_size=posts_per_page,
include_photos=True,
request=request
)
if posts:
data['posts'] = posts
if posts_sponsored:
data['posts_sponsored'] = posts_sponsored
if photos:
data['photos'] = photos
data['total_posts'] = total_posts
# caching this data result
# cache.set(cache_key, data)
data = json.dumps(data, cls=DjangoJSONEncoder)
return HttpResponse(data, content_type="application/json")
blogger_posts_json_public = public_influencer_view(blogger_posts_json)
blogger_posts_json = login_required(blogger_posts_json)
def blogger_items_json(request, influencer_id):
"""
This view renders ajax request with the influencer's items for influencer's PROFILE.
params: influencer_id -- id of the desired influencer.
"""
# retrieving the influencer's object
es_query = urllib.unquote(request.GET.get('q', '{}')) # passed query parameters
items_per_page = 25
data = {} # resulting dict
# if no cached data, then obtaining it from ES and DB
if not data:
# jsonifying request
try:
es_query = json.loads(es_query)
except ValueError:
mail_admins(
"Request jsonify problem",
"es_query: {}\n, request: {}".format(es_query, request)
)
es_query = {}
data = {}
es_query['page'] = 1
# getting items for influencer and adding it to result
items, total_items = search_helpers.get_influencer_products_v2(
influencer_ids=influencer_id,
parameters=es_query,
page_size=items_per_page
)
items = items if len(items) > 2 else []
if items:
data['items'] = items
data['total_items'] = total_items if len(items) > 2 else 0
# caching this data result
# cache.set(cache_key, data)
data = json.dumps(data, cls=DjangoJSONEncoder)
return HttpResponse(data, content_type="application/json")
blogger_items_json_public = public_influencer_view(blogger_items_json)
blogger_items_json = login_required(blogger_items_json)
def blogger_stats_json(request, influencer_id):
from debra.models import Influencer
cache_key = "bij_%s_stats" % (influencer_id,)
def get_data():
influencer = Influencer.objects.get(id=influencer_id)
return search_helpers.get_popularity_stats(influencer)
# data = get_or_set_cache(cache_key, get_data)
data = get_data()
if request.is_ajax():
data = json.dumps(data, cls=DjangoJSONEncoder)
return HttpResponse(data, content_type="application/json")
else:
data = json.dumps(data, cls=DjangoJSONEncoder, indent=4)
return HttpResponse("<body>%s</body>" % data)
blogger_stats_json_public = public_influencer_view(blogger_stats_json)
blogger_stats_json = login_required(blogger_stats_json)
def blogger_brand_mentions_json(request, influencer_id):
from debra.models import Influencer
esquery = urllib.unquote(request.GET.get('q', '{}'))
as_json = urllib.unquote(request.GET.get('json', '')) or False
if as_json:
esquery = json.loads(esquery)
bf_raw = []
eskw = []
if 'keyword' in esquery and esquery['keyword'] is not None:
eskw = esquery['keyword']
if 'filters' in esquery and 'brand' in esquery['filters']:
bf_raw = [b['value'] for b in esquery['filters']['brand']]
else:
eskw = [esquery]
#brand filters normalization
bf_raw = urllib.unquote(request.GET.get('brands', '')).split(',')
brands_filter = sorted(filter(None, [bf.strip() for bf in set(bf_raw)]))
brands_filter = ",".join(brands_filter)
cache_key = "bij_%s_%s_%s_brand_mentions" % (influencer_id, esquery, brands_filter,)
cache_key = "".join([ord(ch) > 32 and ord(ch) < 129 and ch or str(ord(ch)) for ch in cache_key])
data = cache.get(cache_key)
if not data:
# First variant
# brands = map(search_helpers.extract_brand_name, brands_filter.split(',') + eskw)
# brands = filter(None, brands)
# Variant with multiple urls at once
brands = search_helpers.extract_brand_names(brands_filter.split(',') + eskw)
brand_names = {brand.name.title().strip() for brand in brands}
influencer = Influencer.objects.get(id=influencer_id)
data = search_helpers.get_brand_mentions(influencer, exclude_itself=True)
mentions_notsponsored = search_helpers\
.additional_brand_mentions_filtering(
data["mentions_notsponsored"], brand_names)
if len(mentions_notsponsored) < 3:
mentions_notsponsored = []
data['mentions_notsponsored'] = {
'name': 'main',
'children': [{
'name': item['name'].lower(),
'children':[{
'name': item['name'],
'size': item['count'],
'data': {
# 'domain': if item.get('domain_name'),
'url': reverse('debra.blogger_views.blogger_generic_posts',
kwargs=dict(
section='all',
influencer_id=influencer_id,
brand_domain=item.get('domain_name'),
)
) if item.get('domain_name') else None
},
}]
} for item in mentions_notsponsored]
}
data['mentions_min'] = min(x['count']
for x in mentions_notsponsored) if mentions_notsponsored else 0
data['mentions_max'] = max(x['count']
for x in mentions_notsponsored) if mentions_notsponsored else 0
data['brand_names'] = list(set(x['name']
for x in mentions_notsponsored))
# data["mentions_notsponsored"] = search_helpers.additional_brand_mentions_filtering(data["mentions_notsponsored"], brand_names)
cache.set(cache_key, data)
if request.is_ajax():
data = json.dumps(data, cls=DjangoJSONEncoder)
return HttpResponse(data, content_type="application/json")
else:
data = json.dumps(data, cls=DjangoJSONEncoder, indent=4)
return HttpResponse("<body>%s</body>" % data)
blogger_brand_mentions_json_public = public_influencer_view(
blogger_brand_mentions_json)
blogger_brand_mentions_json = login_required(blogger_brand_mentions_json)
def blogger_post_counts_json(request, influencer_id):
from debra.models import Influencer
post_type = request.GET.get('post_type')
influencer = Influencer.objects.get(id=influencer_id)
POST_TYPES = ['photos', 'pins', 'tweets', 'videos', 'blog_posts']
if post_type not in POST_TYPES:
return HttpResponseBadRequest()
def get_page_url(influencer, post_type):
field = {
'blog_posts': 'posts_page',
'photos': 'photos_page',
'tweets': 'tweets_page',
'pins': 'pins_page',
'videos': 'youtube_page',
}[post_type]
return getattr(influencer, field)
def get_count(influencer, post_type):
# return getattr(influencer, '{}_count'.format(post_type))
return influencer.get_posts_section_count(post_type)
def get_post_type_verbose(post_type):
return ' '.join([w.capitalize() for w in post_type.split('_')])
data = {
'count': get_count(influencer, post_type),
'post_type': post_type,
'post_type_verbose': get_post_type_verbose(post_type),
'page_url': get_page_url(influencer, post_type),
}
if request.is_ajax():
data = json.dumps(data, cls=DjangoJSONEncoder)
return HttpResponse(data, content_type="application/json")
else:
data = json.dumps(data, cls=DjangoJSONEncoder, indent=4)
return HttpResponse("<body>%s</body>" % data)
blogger_post_counts_json_public = public_influencer_view(
blogger_post_counts_json)
blogger_post_counts_json = login_required(blogger_post_counts_json)
#####-----</ Similar Web Views />-----#####
def blogger_monthly_visits(request, influencer_id):
''' blogger_monthly_views returns an array of all monthly view data for an influencer.
@request: django request object
@influnecer_id: the id of the influencer to get data for
@results: list of dictionaries containing :month, :count
'''
from debra.models import Influencer
brand = request.visitor["brand"]
def get_compete_data():
influencer = Influencer.objects.get(id=influencer_id)
return influencer.get_monthly_visits_compete(
brand.flag_compete_api_key)
def get_data():
influencer = Influencer.objects.get(id=influencer_id)
return influencer.get_monthly_visits()
use_compete = brand and brand.flag_compete_api_key_available
getter, cache_key = (get_compete_data, "bmv_{}_compete".format(
influencer_id)) if use_compete else (get_data, "bmv_{}".format(
influencer_id))
data = get_or_set_cache(cache_key, getter)
data = {
'columns': [
['x'] + [item['date'] for item in data],
['visits'] + [item['count'] for item in data],
]
}
if request.is_ajax():
data = json.dumps(data, cls=DjangoJSONEncoder)
return HttpResponse(data, content_type='application/json')
else:
data = json.dumps(data, cls=DjangoJSONEncoder, indent=4)
return HttpResponse('<body>%s</body>' % data)
blogger_monthly_visits_public = public_influencer_view(blogger_monthly_visits)
blogger_monthly_visits = login_required(blogger_monthly_visits)
def blogger_traffic_shares(request, influencer_id):
cache_key = "bts_{}".format(influencer_id)
def get_data():
influencer = Influencer.objects.get(id=influencer_id)
return influencer.get_traffic_shares()
data = get_or_set_cache(cache_key, get_data)
if request.is_ajax():
data = json.dumps(data, cls=DjangoJSONEncoder)
return HttpResponse(data, content_type='application/json')
else:
data = json.dumps(data, cls=DjangoJSONEncoder, indent=4)
return HttpResponse('<body>%s</body>' % data)
blogger_traffic_shares_public = public_influencer_view(blogger_traffic_shares)
blogger_traffic_shares = login_required(blogger_traffic_shares)
def blogger_top_country_shares(request, influencer_id):
cache_key = "bcs_{}".format(influencer_id)
def get_data():
influencer = Influencer.objects.get(id=influencer_id)
return influencer.get_top_country_shares()
data = get_or_set_cache(cache_key, get_data)
if request.is_ajax():
data = json.dumps(data, cls=DjangoJSONEncoder)
return HttpResponse(data, content_type='application/json')
else:
data = json.dumps(data, cls=DjangoJSONEncoder, indent=4)
return HttpResponse('<body>%s</body>' % data)
blogger_top_country_shares_public = public_influencer_view(blogger_top_country_shares)
blogger_top_country_shares = login_required(blogger_top_country_shares)
#####-----</ End Similar Web Views />-----#####
def blogger_info_json(request, influencer_id):
"""
this views returns details json for influencer given in parameter
request can contain GET query with:
- q - elastic search query that matches posts content, brands, etc and highlights results
- brands - elastic search query that matches brands and highlights its related items/posts
"""
t0 = time.time()
mongo_utils.track_visit(request)
esquery = urllib.unquote(request.GET.get('q', '{}'))
influencer_id = int(influencer_id)
t_bf = time.time()
#brand filters normalization
bf_raw = urllib.unquote(request.GET.get('brands', '')).split(',')
brands_filter = sorted(filter(None, [bf.strip() for bf in set(bf_raw)]))
brands_filter = ",".join(brands_filter)
print 'Brand filters normalization', t_bf - time.time()
auth_user = request.visitor["auth_user"]
t_mongo = time.time()
mongo_utils.track_query("brand-clicked-blogger-detail-panel", {
"influencer_id": influencer_id,
"post_filter": esquery,
"brand_filter": brands_filter
}, {"user_id": auth_user.id if auth_user else None })
print 'Mongo track', time.time() - t_mongo
t_b = time.time()
brand = request.visitor["brand"]
base_brand = request.visitor["base_brand"]
print 'Brand/BaseBrand retrieve', t_b - time.time()
try:
parameters = json.loads(esquery)
except ValueError:
mail_admins(
"Request jsonify problem",
"es_query: {}\n, request: {}".format(esquery, request)
)
parameters = {}
t_cache = time.time()
def get_cache_key(influencer_id):
return 'bij_%s_%i_%i_%i' % (
parameters.get('sub_tab', 'main_search'),
influencer_id,
int(bool(base_brand.flag_show_dummy_data if base_brand else False)),
int(request.user.is_authenticated())
)
# get base influencer json from cache or create one
# cache_key = get_cache_key(influencer_id)
# data = cache.get(cache_key)
# print 'Retrieve data from cache', time.time() - t_cache
print 'Pre-influencer stage', time.time() - t0
if True:
_t0 = time.time()
influencer = Influencer.objects.prefetch_related(
'platform_set',
'shelf_user__userprofile',
'demographics_locality',
# 'mails__candidate_mapping__job',
# 'group_mapping__jobs__job'
).get(id=influencer_id)
influencer.for_search = True
print 'Influencer prefetch', time.time() - _t0
t0 = time.time()
data = search_helpers.get_influencer_json(
influencer,
with_posts=False,
with_items=False,
with_stats=False,
with_brand_mentions=False,
parameters=parameters,
request=request
)
print 'Get influencer json', time.time() - t0
# cache.set(cache_key, data)
t0 = time.time()
# some brand related variables
if base_brand and base_brand.is_subscribed and base_brand.stripe_plan in STRIPE_EMAIL_PLANS:
if base_brand.stripe_plan in STRIPE_COLLECTION_PLANS:
data["can_favorite"] = True
# data["is_favoriting"] = brand.influencer_groups.filter(
# influencers_mapping__influencer__id=influencer_id).exists()
data["email"] = False
if 'influencer' not in locals():
influencer = Influencer.objects.get(id=influencer_id)
emails = influencer.email
if emails:
splited = emails.split()
if splited:
data["email"] = splited[0]
else:
data["can_favorite"] = False
# invited_to list (moved from serializer to prevent being cached)
if False and brand:
job_ids = InfluencerJobMapping.objects.with_influencer(
influencer
).filter(
Q(mailbox__brand_id=brand.id) | \
Q(mapping__group__owner_brand_id=brand.id)
).distinct('job').values_list('job', flat=True)
else:
job_ids = []
if brand:
from debra.models import InfluencerBrandMapping
from debra.serializers import InfluencerBrandMappingSerializer
brand_mapping, _ = InfluencerBrandMapping.objects.get_or_create(
influencer_id=influencer.id, brand_id=brand.id)
data['brand_custom_data'] = InfluencerBrandMappingSerializer(
brand_mapping).data
data["invited_to"] = list(job_ids)
# if request.user.is_authenticated():
# mapping, _ = InfluencerBrandUserMapping.objects.get_or_create(
# user=request.user, influencer=influencer)
# data['note'] = mapping.notes
# tracking
account_helpers.intercom_track_event(request, "brand-clicked-blogger-detail-panel", {
'blog_url': data["profile"]["blog_page"]
})
print 'Post influencer stage', time.time() - t0
# finaly serialize and send back
if request.is_ajax():
data = json.dumps(data, cls=DjangoJSONEncoder)
return HttpResponse(data, content_type="application/json")
else:
data = json.dumps(data, cls=DjangoJSONEncoder, indent=4)
return HttpResponse("<body>%s</body>" % data)
blogger_info_json_public = public_influencer_view(blogger_info_json)
blogger_info_json = login_required_json(blogger_info_json)
@login_required
def search_brand_json(request):
"""
this view returns brand names and urls matching GET query *term*
"""
mongo_utils.track_visit(request)
if request.is_ajax():
brand_term = urllib.unquote(request.GET.get('term'))
brand_term = utils.domain_from_url(brand_term)
if not brand_term:
return HttpResponse()
brands = BrandMentions.objects.filter(brand__blacklisted=False, brand__domain_name__icontains=brand_term)
brands = brands.distinct('brand__name').only(
'brand__name', "brand__domain_name").values('brand__name', "brand__domain_name")
data = [{"name": x["brand__name"], "url": x["brand__domain_name"]} for x in brands[:100]]
data = json.dumps(data, cls=DjangoJSONEncoder)
return HttpResponse(data, content_type="application/json")
return HttpResponse()
@login_required
def autocomplete(request):
"""
this view returns any items matching GET *query*
"""
base_brand = request.visitor["base_brand"]
if not base_brand or not base_brand.is_subscribed:
return HttpResponseForbidden()
endpoint = "/" + ELASTICSEARCH_INDEX + "/_suggest"
url = ELASTICSEARCH_URL
query = {
"text": urllib.unquote(request.GET.get('query')),
"blogger_name": {
"completion": {
"field": "_suggest_name",
"fuzzy": True
}
},
"blog_name": {
"completion": {
"field": "_suggest_blogname",
"fuzzy": True
}
},
"blog_url": {
"completion": {
"field": "_suggest_blogurl",
"fuzzy": True
}
},
"brand_name": {
"completion": {
"field": "_suggest_brandname",
"fuzzy": True
}
},
"brand_url": {
"completion": {
"field": "_suggest_brandurl",
"fuzzy": True
}
}
}
rq = make_es_get_request(
es_url=url + endpoint,
es_query_string=json.dumps(query)
)
options = []
if rq.status_code == 200:
resp = rq.json()
options = {}
options["name"] = []
options["blogname"] = []
options["blogurl"] = []
options["brand"] = []
options["brandurl"] = []
for suggestion in resp.get("blogger_name", []):
for option in suggestion.get("options", []):
data = {
'value': option["text"],
'label': option["text"]
}
options["name"].append(data)
for suggestion in resp.get("blog_name", []):
for option in suggestion.get("options", []):
data = {
'value': option["text"],
'label': option["text"]
}
options["blogname"].append(data)
for suggestion in resp.get("blog_url", []):
for option in suggestion.get("options", []):
data = {
'value': option["text"],
'label': option["text"]
}
options["blogurl"].append(data)
for suggestion in resp.get("brand_name", []):
for option in suggestion.get("options", []):
data = {
'value': option["text"],
'label': option["text"]
}
options["brand"].append(data)
for suggestion in resp.get("brand_url", []):
for option in suggestion.get("options", []):
data = {
'value': option["text"],
'label': option["text"]
}
options["brandurl"].append(data)
influencer_q = []
for option in options["name"]:
influencer_q.append(Q(name=option["value"]))
for option in options["blogname"]:
influencer_q.append(Q(blogname=option["value"]))
for option in options["blogurl"]:
influencer_q.append(Q(blog_url=option["value"]))
brand_q = []
for option in options["brand"]:
brand_q.append(Q(name=option["value"]))
for option in options["brandurl"]:
brand_q.append(Q(domain_name=option["value"]))
old_options = options
options = {}
options["name"] = []
options["blogname"] = []
options["blogurl"] = []
options["brand"] = []
options["brandurl"] = []
if influencer_q:
influencers = Influencer.objects
influencers = influencers.prefetch_related('shelf_user__userprofile', 'shelf_user')
influencers = influencers.filter(reduce(lambda a, b: a | b, influencer_q))
influencers = influencers.only('name', 'blogname', 'blog_url', 'profile_pic_url', 'shelf_user')
unique_influencer_labels = set()
for option in old_options["name"]:
option_influencer = None
for influencer in influencers:
if influencer.name == option["value"]:
option_influencer = influencer
break
if not option_influencer:
continue
option["label"] = option_influencer.name
option["photo"] = option_influencer.profile_pic
if option["label"] in unique_influencer_labels:
continue
unique_influencer_labels.add(option["label"])
options["name"].append(option)
for option in old_options["blogname"]:
option_influencer = None
for influencer in influencers:
if influencer.blogname == option["value"]:
option_influencer = influencer
break
if not option_influencer:
continue
option["label"] = option_influencer.name
option["photo"] = option_influencer.profile_pic
if option["label"] in unique_influencer_labels:
continue
unique_influencer_labels.add(option["label"])
options["blogname"].append(option)
for option in old_options["blogurl"]:
option_influencer = None
for influencer in influencers:
if influencer.blog_url and option["value"] in influencer.blog_url:
option_influencer = influencer
break
if not option_influencer:
continue
option["label"] = option_influencer.name
option["photo"] = option_influencer.profile_pic
if option["label"] in unique_influencer_labels:
continue
unique_influencer_labels.add(option["label"])
options["blogurl"].append(option)
else:
options["name"] = old_options["name"]
options["blogname"] = old_options["blogname"]
options["blogurl"] = old_options["blogurl"]
if brand_q:
brands = Brands.objects
brands = brands.filter(reduce(lambda a, b: a | b, brand_q))
brands = brands.only('name', 'domain_name')
unique_brand_labels = set()
for option in old_options["brand"]:
option_brand = None
for brand in brands:
if brand.name == option["value"]:
option_brand = brand
break
if not option_brand:
continue
option["label"] = option_brand.name
if option["label"] in unique_brand_labels:
continue
unique_brand_labels.add(option["label"])
options["brand"].append(option)
for option in old_options["brandurl"]:
option_brand = None
for brand in brands:
if brand.domain_name == option["value"]:
option_brand = brand
break
if not option_brand:
continue
option["label"] = option_brand.name
if option["label"] in unique_brand_labels:
continue
unique_brand_labels.add(option["label"])
options["brandurl"].append(option)
else:
options["brandurl"] = old_options["brandurl"]
options["brand"] = old_options["brand"]
if request.is_ajax():
return HttpResponse(json.dumps(options), content_type="application/json")
else:
return HttpResponse("<body></body>")
@login_required
def autocomplete_with_type(request):
"""
returns items of given *type* for *query*
"""
base_brand = request.visitor["base_brand"]
if not base_brand or not base_brand.is_subscribed:
return HttpResponseForbidden()
endpoint = "/" + ELASTICSEARCH_INDEX + "/_suggest"
url = ELASTICSEARCH_URL
type = urllib.unquote(request.GET.get('type'))
query = {
"text": urllib.unquote(request.GET.get('query'))
}
if type == "name":
query["suggestions"] = {
"completion": {
"field": "_suggest_name",
#"fuzzy": True,
"size": 10,
}
}
elif type == "blogname":
query["suggestions"] = {
"completion": {
"field": "_suggest_blogname",
#"fuzzy": True,
"size": 10,
}
}
elif type == "blogurl":
query["suggestions"] = {
"completion": {
"field": "_suggest_blogurl",
#"fuzzy": True,
"size": 10,
}
}
elif type == "location":
query["suggestions"] = {
"completion": {
"field": "_suggest_location",
#"fuzzy": True,
"size": 10,
}
}
if settings.DEBUG:
print json.dumps(query, indent=4)
rq = make_es_get_request(
es_url=url + endpoint,
es_query_string=json.dumps(query)
)
options = []
if rq.status_code == 200:
resp = rq.json()
print(' *** %s' % resp)
options = []
for suggestion in resp.get("suggestions", []):
for option in suggestion.get("options", []):
data = {
'value': unescape(option["text"]),
'label': unescape(option["text"])
}
options.append(data)
if request.is_ajax():
return HttpResponse(json.dumps(options), content_type="application/json")
else:
return HttpResponse("<body></body>")
@login_required
def autocomplete_brand(request):
"""
returns brands names matching *query*
"""
base_brand = request.visitor["base_brand"]
if not base_brand or not base_brand.is_subscribed:
return HttpResponseForbidden()
endpoint = "/" + ELASTICSEARCH_INDEX + "/_suggest"
url = ELASTICSEARCH_URL
query = {
"text": urllib.unquote(request.GET.get('query')),
"suggestions": {
"completion": {
"field": "_suggest_brandname",
"fuzzy": True,
"size": 50,
}
}
}
if settings.DEBUG:
print json.dumps(query, indent=4)
rq = make_es_get_request(
es_url=url + endpoint,
es_query_string=json.dumps(query)
)
options = []
if rq.status_code == 200:
resp = rq.json()
names = []
for suggestion in resp.get("suggestions", []):
for option in suggestion.get("options", []):
names.append(option["text"])
if names:
brands = Brands.objects.filter(name__in=names).only('name', 'domain_name').values('name', 'domain_name')
for brand in brands:
options.append({"text": brand["name"], "value": brand["domain_name"]})
if request.is_ajax():
return HttpResponse(json.dumps(options), content_type="application/json")
else:
return HttpResponse("<body></body>")
def saved_views_tags(request):
brand = request.visitor["base_brand"]
groups = request.visitor["brand"].influencer_groups.exclude(
archived=True
).filter(
creator_brand=brand,
system_collection=False
).prefetch_related(
'influencers_mapping__influencer__shelf_user__userprofile'
)
for group in groups:
group.imgs = []
# for influencer in list(group.influencers.all())[:4]:
for influencer in group.influencers[:NUM_OF_IMAGES_PER_BOX]:
group.imgs.append(influencer.profile_pic)
campaigns = request.visitor["brand"].job_posts.filter(oryg_creator=brand)
context = {
'search_page': True,
'type': 'followed',
'sub_page': 'tags',
'selected_tab': 'tags_and_searches',
'shelf_user': request.user.userprofile,
'groups': groups,
'campaign_list': campaigns,
}
return render(request, 'pages/search/saved_views_tags.html', context)
def post_analytics_collections(request):
brand = request.visitor["base_brand"]
existing = brand.created_post_analytics_collections.filter(
system_collection=False
).exclude(
archived=True
).prefetch_related(
'postanalytics_set__post'
).order_by('name', '-created_date')
if request.is_ajax():
data = [{'value': x.id, 'text': x.name} for x in existing]
data = json.dumps(data, cls=DjangoJSONEncoder)
return HttpResponse(data, content_type="application/json")
else:
context = {
'search_page': True,
'type': 'followed',
'sub_page': 'post_analytics_collections',
'selected_tab': 'competitors',
'shelf_user': request.user.userprofile,
'groups': existing,
}
return render(
request, 'pages/search/post_analytics_collections.html', context)
def edit_post_analytics(request, post_analytics_id):
from debra.serializers import PostAnalyticsTableSerializer
from debra.models import (
InfluencerEditHistory, PlatformDataOp, InfluencerAnalytics)
from debra import admin_helpers
from debra import brand_helpers
if request.method == 'POST':
body = json.loads(request.body)
if body.get('is_blogger_approval'):
del body['is_blogger_approval']
pa = InfluencerAnalytics.objects.get(id=post_analytics_id)
collection = None
else:
pa = PostAnalytics.objects.get(id=post_analytics_id)
collection = pa.collection
for key, value in body.items():
if key == 'blog_name' and pa.influencer.blogname != value:
pa.influencer.blogname = value
pa.influencer.append_validated_on("customer")
InfluencerEditHistory.commit_change(
pa.influencer, 'blogname', value)
pa.influencer.save()
elif key == 'influencer_name' and pa.influencer.name != value:
pa.influencer.name = value
pa.influencer.append_validated_on("customer")
InfluencerEditHistory.commit_change(
pa.influencer, 'name', value)
pa.influencer.save()
elif key == 'post_create_date' and value:
pa.post.create_date = datetime.datetime.strptime(value, '%Y-%m-%d')
pa.post.save()
PlatformDataOp.objects.create(
post=pa.post, operation='customer_updated_createdate')
elif key == 'post_title' and value:
pa.post.title = value
pa.post.save()
PlatformDataOp.objects.create(
post=pa.post, operation="customer_updated_post_title")
elif key == 'post_url' and value and pa.post_url != value:
# pa.post.url = value
# pa.post.save()
# PlatformDataOp.objects.create(
# post=pa.post, operation="customer_updated_post_url")
collection.remove(pa.post_url)
brand_helpers.handle_post_analytics_urls(
[value], refresh=True, collection=collection)
elif key == 'post_num_comments':
pa.post_comments = value
pa.save()
PlatformDataOp.objects.create(
post=pa.post, operation="customer_updated_post_num_comments")
elif key == 'tw_url':
pa.influencer.tw_url = value
pa.influencer.save()
admin_helpers.handle_social_handle_updates(pa.influencer, 'tw_url', value)
elif key == 'insta_url':
pa.influencer.insta_url = value
pa.influencer.save()
admin_helpers.handle_social_handle_updates(pa.influencer, 'insta_url', value)
elif key == 'fb_url':
pa.influencer.fb_url = value
pa.influencer.save()
admin_helpers.handle_social_handle_updates(pa.influencer, 'fb_url', value)
elif key == 'pin_url':
pa.influencer.pin_url = value
pa.influencer.save()
admin_helpers.handle_social_handle_updates(pa.influencer, 'pin_url', value)
elif key == 'youtube_url':
pa.influencer.youtube_url = value
pa.influencer.save()
admin_helpers.handle_social_handle_updates(pa.influencer, 'youtube_url', value)
elif key in ['count_gplus_plusone', 'count_pins', 'count_tweets']:
pa.__dict__[key] = value
pa.save()
PlatformDataOp.objects.create(
post=pa.post, operation='customer_updated_' + key)
elif key in ['count_fb']:
pa.count_fb_shares = value
pa.count_fb_likes = 0
pa.count_fb_comments = 0
pa.save()
PlatformDataOp.objects.create(
post=pa.post, operation='customer_updated_count_fb')
return HttpResponse()
return HttpResponseBadRequest()
def del_post_analytics(request, post_analytics_id):
if request.method == 'POST':
# actual post analytics
pa = PostAnalytics.objects.get(id=post_analytics_id)
# but we should remove all the instances
# wih the same url in a collection
pa.collection.remove(pa.post_url)
return HttpResponse()
return HttpResponseBadRequest()
def refresh_post_analytics_collection(request, collection_id):
collection = PostAnalyticsCollection.objects.get(id=collection_id)
if request.method == 'POST':
if not collection.updated:
data = {
'error': """We're still calculating, so this operation
is not possible at this time. Please try again later
(we'll send you an email when it's complete."""
}
elif collection.is_updated_recently:
data = {
'error': """We calculated the results in the last few hours,
so please again tomorrow."""
}
else:
data = {}
collection.refresh()
data = json.dumps(data, cls=DjangoJSONEncoder)
return HttpResponse(data, content_type='application/json')
return HttpResponseBadRequest()
# ANALYTICS DETAILS
@login_required
def post_analytics_collection(request, collection_id):
from debra.serializers import PostAnalyticsTableSerializer, count_totals
collection = PostAnalyticsCollection.objects.get(id=collection_id)
qs = collection.get_unique_post_analytics().with_counters()
context = search_helpers.generic_reporting_table_context(
request,
queryset=qs,
serializer_class=PostAnalyticsTableSerializer,
total_with_fields=True,
annotation_fields={
'post_num_comments': 'agr_num_comments',
'count_total': 'agr_count_total',
'count_fb': 'agr_count_fb',
}
)
context.update({
'sub_page': 'post_analytics_collections',
'collection': collection,
'table_id': 'post_analytics_collection_table',
})
return render(
request,
'pages/search/post_analytics_collection_details.html', context)
@login_required
def post_analytics_collection_edit(request, collection_id):
from debra.serializers import PostAnalyticsUrlsSerializer
collection = PostAnalyticsCollection.objects.get(id=collection_id)
qs = collection.get_unique_post_analytics()
if request.is_ajax():
data = PostAnalyticsUrlsSerializer(qs, many=True).data
data = json.dumps(data, cls=DjangoJSONEncoder)
return HttpResponse(data, content_type='application/json')
context = {
'collection': collection,
'data_count': qs.count(),
'search_page': True,
'type': 'followed',
'sub_page': 'post_analytics_collections',
'selected_tab': 'competitors',
'shelf_user': request.user.userprofile,
}
return render(
request,
'pages/search/post_analytics_collection_edit.html', context)
@login_required
def post_analytics_collection_create(request):
context = {
'search_page': True,
'type': 'followed',
'sub_page': 'post_analytics_collections',
'selected_tab': 'competitors',
'shelf_user': request.user.userprofile,
}
return render(
request,
'pages/search/post_analytics_collection_create.html', context)
@login_required
def roi_prediction_report_edit(request, report_id):
from debra.serializers import PostAnalyticsUrlsSerializer
report = ROIPredictionReport.objects.get(id=report_id)
collection = report.post_collection
qs = collection.get_unique_post_analytics()
infs = qs.exclude(
post__influencer__isnull=True
).values_list('post__influencer', flat=True).distinct()
if request.is_ajax():
data = PostAnalyticsUrlsSerializer(qs, many=True).data
data = json.dumps(data, cls=DjangoJSONEncoder)
return HttpResponse(data, content_type='application/json')
context = {
'report': report,
'collection': collection,
'data_count': infs.count(),
'search_page': True,
'type': 'followed',
# 'sub_page': 'influencer_stats',
'sub_page': 'roi_prediction_reports',
'selected_tab': 'competitors',
'shelf_user': request.user.userprofile,
'request': request,
}
return render(
request,
'pages/search/roi_prediction_report_edit.html', context)
@login_required
def roi_prediction_report_create(request):
context = {
'search_page': True,
'type': 'followed',
# 'sub_page': 'influencer_stats',
'sub_page': 'roi_prediction_reports',
'selected_tab': 'competitors',
'shelf_user': request.user.userprofile,
'request': request,
}
return render(
request,
'pages/search/roi_prediction_report_create.html', context)
def saved_views_favorites(request):
pass
def saved_views_posts(request):
context = {
'search_page': True,
'type': 'followed',
'sub_page': 'saved_posts',
'selected_tab': 'tags_and_searches',
'shelf_user': request.user.userprofile
}
return render(request, 'pages/search/saved_views_posts.html', context)
def saved_views_searches(request):
brand = request.visitor["base_brand"]
saved_queries = brand.saved_queries.exclude(
name__isnull=True
).exclude(
archived=True
)
for query in saved_queries:
if not query.result:
query.num_results = 0
query.imgs = []
continue
result = json.loads(query.result)
query.imgs = [x['pic'] for x in result['results'][:NUM_OF_IMAGES_PER_BOX]]
query.num_results = result['total']
if settings.DEBUG:
print('*** QUERY RESULT ***')
print(result)
context = {
'search_page': True,
'type': 'followed',
'sub_page': 'saved_searches',
'selected_tab': 'tags_and_searches',
'shelf_user': request.user.userprofile,
'saved_queries': saved_queries,
}
return render(request, 'pages/search/saved_views_searches.html', context)
def save_search(request):
if request.method == 'POST':
brand = request.visitor["base_brand"]
try:
query_id = int(request.POST.get('query_id'))
except ValueError:
query_id = None
query_string = request.POST.get('query')
try:
if query_string in [None, 'null']:
raise ValueError
json.loads(query_string)
except ValueError:
mail_admins(
"Saving search with incorrect query",
"query string: {}".format(query_string, request)
)
if query_id is not None:
q = brand.saved_queries.get(id=query_id)
q.name = request.POST.get('name')
q.query = request.POST.get('query')
q.result = request.POST.get('result')
q.save()
else:
sq = brand.saved_queries.create(
user=request.user,
name=request.POST.get('name'),
query=request.POST.get('query'),
result=request.POST.get('result')
)
query_id = sq.id
data = json.dumps({'id': query_id}, cls=DjangoJSONEncoder)
return HttpResponse(data, content_type="application/json")
return HttpResponseBadRequest()
@login_required_json
def saved_search_details(request, query_id=None, section=None, **kwargs):
from debra.models import (SearchQueryArchive, Influencer, Posts,\
ProductModelShelfMap)
SECTIONS = ['all', 'influencers', 'blog_posts', 'instagrams', 'tweets',\
'pins', 'youtube', 'products', 'facebook']
POST_SECTIONS = ['all', 'blog_posts', 'instagrams', 'tweets', 'pins',\
'facebook', 'youtube',]
PRODUCT_SECTIONS = ['products']
if not section in SECTIONS:
raise Http404
if query_id:
query = SearchQueryArchive.objects.get(id=query_id)
else:
query = None
count_only = request.GET.get('count_only')
filter_mapping = {
'all': 'all',
'blog_posts': 'blog',
'instagrams': 'photos',
'tweets': 'tweets',
'pins': 'pins',
'facebook': 'blog',
'youtube': 'youtube',
'products': 'products',
'facebook': 'facebook',
}
default_posts_mapping = {
'all': 'about_all',
'pins': 'about_pins',
'tweets': 'about_tweets',
'instagrams': 'about_insta',
'youtube': 'about_youtube',
'facebook': 'about_facebook',
}
if request.method == 'POST':
if not section in POST_SECTIONS and not section in PRODUCT_SECTIONS:
return HttpResponseBadRequest()
front_end_query = search_helpers.query_from_request(request)
front_end_filters = front_end_query.get('filters')
if query:
result, esquery = query.result_json, search_helpers.query_from_request(request, source=query.query_json)
else:
result, esquery = None, None
if front_end_filters and esquery:
esquery['filters']['time_range'] = front_end_filters.get('time_range')
feed_json = feeds_helpers.get_feed_handler(section)
feed_params = dict(no_cache=True, limit_size=30, count_only=count_only)
# for_influencer=influencer, default_posts="about_pins"
if front_end_query.get('influencer'):
try:
influencer = Influencer.objects.get(
id=int(front_end_query.get('influencer')))
except:
raise Http404()
else:
feed_params.update({
'for_influencer': influencer,
'default_posts': default_posts_mapping.get(section, 'about'),
'with_parameters': True,
'parameters': esquery if query_id else front_end_query,
})
else:
feed_params.update({
'with_parameters': True,
'parameters': esquery if query_id else front_end_query,
})
data = feed_json(request, **feed_params)
data = json.dumps(data, cls=DjangoJSONEncoder)
return HttpResponse(data, content_type="application/json")
elif not request.is_ajax():
context = {
'search_page': True,
'type': 'followed',
'sub_page': section,
'selected_tab': 'tags_and_searches',
'shelf_user': request.user.userprofile,
'sections': SECTIONS,
'section': section,
'platform_filter': filter_mapping.get(section),
'query': query
}
return render(request, 'pages/search/saved_search_details_{}.html'.format(
'influencers' if section == 'influencers' else 'posts'), context)
@login_required
def get_saved_searches(request, query_id):
mongo_utils.track_visit(request)
shelf_user = request.user.userprofile
brand = request.visitor["base_brand"]
if not brand:
return redirect('/')
if not brand.stripe_plan in constants.STRIPE_COLLECTION_PLANS:
return redirect('/')
saved_search = get_object_or_404(SearchQueryArchive,
id=query_id, brand=brand)
data = {
'id': saved_search.id,
'name': saved_search.name,
'query': saved_search.query_json,
'and_or_filter_on': saved_search.query_json.get(
'and_or_filter_on', False)
}
data = json.dumps(data, cls=DjangoJSONEncoder)
return HttpResponse(data, content_type="application/json")
@login_required
def edit_saved_searches(request):
mongo_utils.track_visit(request)
shelf_user = request.user.userprofile
brand = request.visitor["base_brand"]
if not brand:
return redirect('/')
if not brand.stripe_plan in constants.STRIPE_COLLECTION_PLANS:
return redirect('/')
try:
data = json.loads(request.body)
except ValueError:
return HttpResponseBadRequest()
saved_search = get_object_or_404(SearchQueryArchive,
id=data.get('id'), brand=brand)
if brand.saved_queries.exclude(archived=True).filter(
name=data.get('name')).exists():
return HttpResponseBadRequest(
"Saved Search with such name already exists",
content_type="application/json")
saved_search.name = data.get('name')
saved_search.save()
return HttpResponse()
@login_required
def delete_saved_search(request):
mongo_utils.track_visit(request)
shelf_user = request.user.userprofile
brand = request.visitor["base_brand"]
if not brand:
return redirect('/')
if not brand.stripe_plan in constants.STRIPE_COLLECTION_PLANS:
return redirect('/')
try:
data = json.loads(request.body)
except ValueError:
return HttpResponseBadRequest()
saved_search = get_object_or_404(SearchQueryArchive,
id=data.get('id'), brand=brand)
mongo_utils.track_query("brand-delete-saved-search", {
'saved_search_name': saved_search.name,
}, {"user_id": request.visitor["auth_user"].id})
account_helpers.intercom_track_event(request, "brand-delete-saved-search", {
'saved_search_name': saved_search.name,
})
saved_search.archived = True
saved_search.save()
return HttpResponse()
def roi_prediction_reports(request):
from aggregate_if import Count
brand = request.visitor["base_brand"]
existing = brand.created_roi_prediction_reports.filter(
post_collection__system_collection=False
).exclude(
archived=True
).prefetch_related(
'post_collection__postanalytics_set__post'
).annotate(
influencers_number=Count(
'post_collection__postanalytics__post__influencer', distinct=True)
).order_by('name', '-created_date')
if request.is_ajax():
data = [{'value': x.id, 'text': x.name} for x in existing]
data = json.dumps(data, cls=DjangoJSONEncoder)
return HttpResponse(data, content_type="application/json")
else:
context = {
'search_page': True,
'type': 'followed',
'sub_page': 'roi_prediction_reports',
'selected_tab': 'competitors',
'shelf_user': request.user.userprofile,
'groups': existing,
}
return render(
request, 'pages/search/roi_prediction_reports.html', context)
# ANALYTICS DETAILS
def roi_prediction_report(request, report_id):
inf_partial, context = roi_prediction_report_influencer_stats_partial(
request,report_id)
context['influencer_stats_partial_content'] = inf_partial
return render(
request, 'pages/search/roi_prediction_report_details.html', context)
# ANALYTICS DETAILS
def blogger_approval_report(request, report_id):
from debra.serializers import InfluencerApprovalReportTableSerializer
from debra.helpers import PageSectionSwitcher, name_to_underscore
report = ROIPredictionReport.objects.get(id=report_id)
collection = report.influencer_collection
campaign = report.campaign
if campaign is not None:
return redirect(
'debra.job_posts_views.campaign_approval', args=(campaign.id,))
pre_outreach_enabled = campaign.info_json.get(
'approval_report_enabled', False)
if not pre_outreach_enabled:
raise Http404()
if request.method == 'POST':
if request.GET.get('delete_pending'):
collection.influenceranalytics_set.filter(
approve_status=InfluencerAnalytics.APPROVE_STATUS_PENDING
).delete()
return HttpResponse()
qs = collection.influenceranalytics_set.prefetch_related(
'influencer__platform_set',
'influencer__shelf_user__userprofile',
)
status_counts = dict(Counter(qs.values_list('approve_status', flat=True)))
approve_status = int(request.GET.get(
'approve_status', -1 if status_counts.get(-1, 0) > 0 else 0
))
statuses = []
for status, name in InfluencerAnalytics.APPROVE_STATUS[:-1]:
statuses.append({
'value': status,
'count': status_counts.get(status, 0),
'name': name,
'visible': False if status_counts.get(status, 0) == 0 and status in [-1, 0] else True,
'class': '{}_approval'.format(name_to_underscore(name)),
})
print '* counts distribution:', status_counts
if approve_status is not None:
qs = qs.filter(
approve_status=approve_status
)
def pre_serialize_processor(paginated_qs):
brand_user_mapping = {
x.influencer_id:x
for x in InfluencerBrandUserMapping.objects.filter(
influencer__in=[p.influencer for p in paginated_qs],
user=request.user
)
}
for p in paginated_qs:
p.agr_brand_user_mapping = brand_user_mapping.get(
p.influencer.id)
if p.agr_brand_user_mapping:
p.agr_notes = p.agr_brand_user_mapping.notes
else:
p.agr_notes = None
context = search_helpers.generic_reporting_table_context(
request,
queryset=qs,
serializer_class=InfluencerApprovalReportTableSerializer,
include_total=False,
pre_serialize_processor=pre_serialize_processor,
hidden_fields=report.info_json.get(
'blogger_approval_report_columns_hidden', []) + ['approve_info'] + ['remove_info'] if collection.approval_status == collection.APPROVAL_STATUS_SUBMITTED else [],
)
print context['fields_hidden']
campaign = report.campaign
context.update({
'table_id': 'blogger_approval_report_table',
'sub_page': 'roi_prediction_reports',
'collection': collection,
'approve_status': approve_status,
'report': report,
'brand_id': request.visitor["base_brand"].id,
'user_id': request.user.id,
'statuses': statuses,
'total_count': sum(status_counts.values()),
'public_link': report.get_public_url(request.user),
'status_counts': status_counts,
'campaign': campaign,
})
if campaign is not None:
context.update({
'campaign_switcher': PageSectionSwitcher(
constants.CAMPAIGN_SECTIONS, 'influencer_approval',
url_args=(campaign.id,),
extra_url_args={'influencer_approval': (campaign.report_id,)},
hidden=[] if pre_outreach_enabled else ['influencer_approval'],
),
})
return render(
request, 'pages/search/blogger_approval_report_details.html', context)
from debra.pipeline_views import PublicBloggerApprovalView
blogger_approval_report_public = PublicBloggerApprovalView.as_view()
def approve_report_update(request):
if request.method == 'POST':
data = json.loads(request.body)
brand = Brands.objects.get(id=int(data.get('brand_id')))
if data.get('approve_status'):
for ia_id, status in reversed(data.get('approve_status').items()):
try:
ia = InfluencerAnalytics.objects.get(id=ia_id)
except InfluencerAnalytics.DoesNotExist:
continue
else:
if ia.approve_status != int(status):
ia.approve_status = int(status)
ia.save()
print ia_id, 'status changed to', int(status)
else:
print ia_id, 'status remains', int(status)
if data.get('notes'):
for ia_id, note in reversed(data.get('notes').items()):
try:
ia = InfluencerAnalytics.objects.get(id=ia_id)
except InfluencerAnalytics.DoesNotExist:
continue
else:
if note and ia.notes != note:
ia.notes = note
ia.save()
print ia_id, 'note changed'
else:
print ia_id, 'note remains the same'
return HttpResponse()
else:
return HttpResponseBadRequest()
def public_approval_report_submit(request):
from debra import helpers, mail_proxy
from debra.constants import MAIN_DOMAIN, BLOG_DOMAIN
data = json.loads(request.body)
brand_id = data.get('brand_id')
report_id = data.get('report_id')
user_id = data.get('user_id')
report = ROIPredictionReport.objects.get(id=report_id)
user = User.objects.get(id=user_id)
public_link = report.get_public_url(user)
inner_link = "{}{}".format(MAIN_DOMAIN, reverse(
'debra.search_views.blogger_approval_report',
args=(report_id,))
)
# subject = "{}. Client approval report submitted".format(inner_link)
# body = "".join([
# "<p>Public link: {}</p>",
# "<p>Inner link: {}</p>",
# ]).format(public_link, inner_link)
# helpers.send_admin_email_via_mailsnake(
# subject,
# body,
# ["[email protected]", "[email protected]", "[email protected]"]
# )
rendered_message = render_to_string(
'mailchimp_templates/approval_report_submitted_email.txt', {
'user': user.userprofile,
'campaign': report.campaign,
'blog_domain': BLOG_DOMAIN,
'main_domain': MAIN_DOMAIN,
}
).encode('utf-8')
mandrill_message = {
'html': rendered_message,
'subject': "The Influencer Approval Form for {} has been submitted.".format(report.campaign.title),
'from_email': '[email protected]',
'from_name': 'Lauren',
'to': [{
'email': user.email,
'name': user.userprofile.name if user.userprofile else user.email
}],
}
print mandrill_message
mail_proxy.mailsnake_send(mandrill_message)
report.influencer_collection.approval_status = InfluencerAnalyticsCollection.APPROVAL_STATUS_SUBMITTED
report.influencer_collection.save()
if report.campaign:
report.campaign.influencer_collection.influenceranalytics_set.filter(
tmp_approve_status__isnull=True
).update(
tmp_approve_status=InfluencerAnalytics.APPROVE_STATUS_PENDING
)
report.campaign.influencer_collection.influenceranalytics_set.update(
approve_status=F('tmp_approve_status'))
report.campaign.merge_approved_candidates(celery=True)
return HttpResponse()
def blogger_approval_status_change(request, brand_id, report_id, user_id):
from debra import helpers
from debra import mail_proxy
data = json.loads(request.body)
report = ROIPredictionReport.objects.get(id=report_id)
user = User.objects.get(id=user_id)
collection = report.influencer_collection
if collection.approval_status > int(data.get('status')):
# if False:
# to_list = []
# permissiions = report.creator_brand.related_user_profiles.prefetch_related(
# 'user_profile__user').all()
# for permissiion in permissiions:
# profile = permissiion.user_profile
# if profile and profile.user and profile.user.email:
# to_list.append({
# 'email': profile.user.email,
# 'name': profile.name
# })
to_list = [{
'email': user.email,
'name': user.userprofile.name if user.userprofile else user.email
}]
mandrill_message = {
'html': "One of your clients wants to make more changes on '{}' blogger approval report".format(report.name),
'subject': "More edits for '{}' blogger approval report".format(report.name),
'from_email': '[email protected]',
'from_name': 'Atul',
'to': to_list,
}
print mandrill_message
mail_proxy.mailsnake_send(mandrill_message)
collection.approval_status = int(data.get('status'))
collection.save()
return HttpResponse()
def client_approval_invite_send(request, report_id):
from debra import helpers
from debra import mail_proxy
from debra.brand_helpers import send_approval_report_to_client
report = ROIPredictionReport.objects.get(id=report_id)
data = json.loads(request.body)
send_approval_report_to_client.apply_async(
[report.campaign.id], queue="blogger_approval_report")
# send_approval_report_to_client(report.campaign.id)
mandrill_message = {
'html': data.get('body'),
'subject': data.get('subject'),
# 'from_email': request.user.email,
'from_email': '{}_b_{}_id_{}@reply.theshelf.com'.format(
request.user.email.split('@')[0],
request.visitor['base_brand'].id,
request.user.id),
'from_name': request.visitor["user"].name,
'to': data.get('toList', [{
'email': data.get('toEmail'),
'name': data.get('toName')
}]),
}
print mandrill_message
mail_proxy.mailsnake_send(mandrill_message)
report.influencer_collection.approval_status = 1
report.influencer_collection.save()
return HttpResponse()
# ANALYTICS DETAILS
def roi_prediction_report_influencer_stats_partial(request, report_id):
from debra.serializers import InfluencerReportTableSerializer
report = ROIPredictionReport.objects.get(id=report_id)
collection = report.post_collection
qs = collection.get_unique_post_analytics().exclude(
post__influencer__isnull=True
).with_counters()
group_by_influencers = defaultdict(list)
for x in qs:
group_by_influencers[x.post.influencer_id].append(x)
inf_ids = [x[0].id for x in group_by_influencers.values() if x]
qs = qs.filter(id__in=inf_ids)
collection.agr_post_analytics_set = qs
context = search_helpers.generic_reporting_table_context(
request,
queryset=qs,
serializer_class=InfluencerReportTableSerializer,
include_total=False,
serializer_context={
'virality_scores': collection.virality_score_values_for_influencers,
'group_by_influencers': group_by_influencers,
}
)
context.update({
'report': report,
'collection': collection,
'sub_page': 'roi_prediction_reports',
'table_id': 'influencer_roi_prediction_report_table',
})
partial = render_to_string(
'pages/search/roi_prediction_report_influencer_stats_details_partial.html',
context)
return partial, context
# ANALYTICS DETAILS
def roi_prediction_report_influencer_stats(request, report_id):
partial, context = roi_prediction_report_influencer_stats_partial(
request,report_id)
context['influencer_stats_partial_content'] = partial
return render(
request,
'pages/search/roi_prediction_report_influencer_stats_details.html',
context
)
def influencer_posts_info(request):
from debra.serializers import InfluencerReportTableSerializer
pa_id = int(request.GET.get('pa_id'))
pa = PostAnalytics.objects.get(id=pa_id)
context = InfluencerReportTableSerializer(context={
'brand': request.visitor["base_brand"],
}).get_posts_info(pa)
partial = render_to_string(
context.get('include_template'), {'data': context})
return HttpResponse(partial) | [
"[email protected]"
] | |
5650b18a91e0524ae5250eb65573dc05105ed4f4 | 31f1be14dbdc5f6076415dbab63a23a3766d0c84 | /Slateo-web/Slateo_Admin/urls/adminGradeUrls.py | ab8ccf9ce72c7147d4cc5927ea19363ea668c797 | [] | no_license | kumar109-beep/Examination_Panel | 4004b328cc5abb26dfaccc0b11fd35112840458a | 598f70aa970b9ed3a03cc931e0fd39efb5ed84bb | refs/heads/main | 2023-08-10T17:33:48.067626 | 2021-09-25T12:51:22 | 2021-09-25T12:51:22 | 410,267,668 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 360 | py | from django.urls import path
from ..views.admingradeView import *
urlpatterns = [
path('create-grade' , create_individualGrade, name='individualGrade'),
path('chainedGradeCourses' , chainedGradeCourses, name='chainedGradeCourses'),
path('grade-list' , grade_list, name='grade_list'),
path('edit-grade' , edit_grade, name='edit_grade'),
]
| [
"[email protected]"
] | |
bff37916a2a0abf7e118be05ae0e606e4f32e919 | 31ecba3f8112e91fbf99edf0bb9dd385da93c2ea | /Chapters/Unit5probs/findbuckets.py | 45234cabf24149c5a3d9eba1512cbed7175f33d9 | [] | no_license | siddeshbb/searchengine | 7c92ab59e9515725541a9c010ec8efd369121907 | 210a6f52cb12b7c0bc27f43d138fa308942b9fd6 | refs/heads/master | 2022-12-22T19:52:52.889791 | 2020-09-28T12:46:44 | 2020-09-28T12:46:44 | 299,305,999 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 874 | py | # Define a procedure, hashtable_get_bucket,
# that takes two inputs - a hashtable, and
# a keyword, and returns the bucket where the
# keyword could occur.
def hashtable_get_bucket(htable,keyword):
return htable[ hash_string( keyword , len( htable ) ) ]
def hash_string(keyword,buckets):
out = 0
for s in keyword:
out = (out + ord(s)) % buckets
return out
def make_hashtable(nbuckets):
table = []
for unused in range(0,nbuckets):
table.append([])
return table
table = [[['Francis', 13], ['Ellis', 11]], [], [['Bill', 17],
['Zoe', 14]], [['Coach', 4]], [['Louis', 29], ['Rochelle', 4], ['Nick', 2]]]
print hashtable_get_bucket(table, "Zoe")
#>>> [['Bill', 17], ['Zoe', 14]]
print hashtable_get_bucket(table, "Brick")
#>>> []
print hashtable_get_bucket(table, "Lilith")
#>>> [['Louis', 29], ['Rochelle', 4], ['Nick', 2]]
| [
"[email protected]"
] | |
f9ba13ab120ad54b3d31f7b7e3d3fde7dbb94789 | 2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8 | /pardus/tags/2007.1/programming/libs/tcltk/actions.py | 1d79ab883daf1f4427f3cad73d8694cbc4138091 | [] | no_license | aligulle1/kuller | bda0d59ce8400aa3c7ba9c7e19589f27313492f7 | 7f98de19be27d7a517fe19a37c814748f7e18ba6 | refs/heads/master | 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,731 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2005 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/copyleft/gpl.txt.
from pisi.actionsapi import autotools
from pisi.actionsapi import get
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
WorkDir="tk8.5a5/unix"
def setup():
autotools.configure("--with-encoding=utf-8 \
--enable-threads \
--enable-man-compression=gzip \
--enable-man-symlinks \
--enable-64bit \
--with-x \
--enable-xft")
def build():
autotools.make()
def install():
shelltools.system("make DESTDIR=%s install" % get.installDIR())
# Collect private headers, 3rd party apps like Tile depends on this
shelltools.cd("..")
pisitools.dodir("/usr/include/tk-private/generic")
pisitools.dodir("/usr/include/tk-private/unix")
shelltools.copy("unix/*.h","%s/usr/include/tk-private/unix" % get.installDIR())
shelltools.copy("generic/*.h", "%s/usr/include/tk-private/generic" % get.installDIR())
# Remove duplicated headers
pisitools.remove("/usr/include/tk-private/generic/tk.h")
pisitools.remove("/usr/include/tk-private/generic/tkDecls.h")
pisitools.remove("/usr/include/tk-private/generic/tkPlatDecls.h")
# Remove tmp path from tclConfig.sh
pisitools.dosed("%s/usr/lib/tkConfig.sh" % get.installDIR(),"%s/unix" % get.curDIR() ,"/usr/lib/")
pisitools.dosed("%s/usr/lib/tkConfig.sh" % get.installDIR(),"%s" % get.curDIR() ,"/usr/include/tk-private")
pisitools.dosym("/usr/bin/wish8.5","/usr/bin/wish")
| [
"[email protected]"
] | |
f8149f2da378fb53d5e36feff3939993f377b017 | 36896bab28eb4c818178aa6896e95e2dacc0842b | /troposphere_mate/emr.py | 831b2c0b00db8ebe67734dba4ecf22460667da47 | [
"MIT"
] | permissive | MacHu-GWU/troposphere_mate-project | 302ad28ab6b83576c8a3413dcdca11dfd253a9fe | d615c0b717faae52e6edfb9dca65940e49191529 | refs/heads/master | 2023-03-17T19:48:09.280012 | 2023-03-03T04:08:16 | 2023-03-03T04:08:16 | 195,665,308 | 10 | 3 | null | null | null | null | UTF-8 | Python | false | false | 27,453 | py | # -*- coding: utf-8 -*-
"""
This code is auto generated from troposphere_mate.code_generator.__init__.py scripts.
"""
import sys
if sys.version_info.major >= 3 and sys.version_info.minor >= 5: # pragma: no cover
from typing import Union, List, Any
import troposphere.emr
from troposphere.emr import (
Application as _Application,
AutoScalingPolicy as _AutoScalingPolicy,
BootstrapActionConfig as _BootstrapActionConfig,
CloudWatchAlarmDefinition as _CloudWatchAlarmDefinition,
Configuration as _Configuration,
EbsBlockDeviceConfigs as _EbsBlockDeviceConfigs,
EbsConfiguration as _EbsConfiguration,
HadoopJarStepConfig as _HadoopJarStepConfig,
InstanceFleetConfigProperty as _InstanceFleetConfigProperty,
InstanceFleetProvisioningSpecifications as _InstanceFleetProvisioningSpecifications,
InstanceGroupConfigProperty as _InstanceGroupConfigProperty,
InstanceTypeConfig as _InstanceTypeConfig,
JobFlowInstancesConfig as _JobFlowInstancesConfig,
KerberosAttributes as _KerberosAttributes,
KeyValue as _KeyValue,
PlacementType as _PlacementType,
ScalingAction as _ScalingAction,
ScalingConstraints as _ScalingConstraints,
ScalingRule as _ScalingRule,
ScalingTrigger as _ScalingTrigger,
ScriptBootstrapActionConfig as _ScriptBootstrapActionConfig,
SimpleScalingPolicyConfiguration as _SimpleScalingPolicyConfiguration,
SpotProvisioningSpecification as _SpotProvisioningSpecification,
StepConfig as _StepConfig,
Tags as _Tags,
VolumeSpecification as _VolumeSpecification,
)
from troposphere import Template, AWSHelperFn
from troposphere_mate.core.mate import preprocess_init_kwargs, Mixin
from troposphere_mate.core.sentiel import REQUIRED, NOTHING
class KeyValue(troposphere.emr.KeyValue, Mixin):
def __init__(self,
title=None,
Key=REQUIRED, # type: Union[str, AWSHelperFn]
Value=REQUIRED, # type: Union[str, AWSHelperFn]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
Key=Key,
Value=Value,
**kwargs
)
super(KeyValue, self).__init__(**processed_kwargs)
class SecurityConfiguration(troposphere.emr.SecurityConfiguration, Mixin):
def __init__(self,
title, # type: str
template=None, # type: Template
validation=True, # type: bool
SecurityConfiguration=REQUIRED, # type: dict
Name=NOTHING, # type: Union[str, AWSHelperFn]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
template=template,
validation=validation,
SecurityConfiguration=SecurityConfiguration,
Name=Name,
**kwargs
)
super(SecurityConfiguration, self).__init__(**processed_kwargs)
class Application(troposphere.emr.Application, Mixin):
def __init__(self,
title=None,
AdditionalInfo=NOTHING, # type: Any
Args=NOTHING, # type: List[Union[str, AWSHelperFn]]
Name=NOTHING, # type: Union[str, AWSHelperFn]
Version=NOTHING, # type: Union[str, AWSHelperFn]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
AdditionalInfo=AdditionalInfo,
Args=Args,
Name=Name,
Version=Version,
**kwargs
)
super(Application, self).__init__(**processed_kwargs)
class ScriptBootstrapActionConfig(troposphere.emr.ScriptBootstrapActionConfig, Mixin):
def __init__(self,
title=None,
Path=REQUIRED, # type: Union[str, AWSHelperFn]
Args=NOTHING, # type: List[Union[str, AWSHelperFn]]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
Path=Path,
Args=Args,
**kwargs
)
super(ScriptBootstrapActionConfig, self).__init__(**processed_kwargs)
class BootstrapActionConfig(troposphere.emr.BootstrapActionConfig, Mixin):
def __init__(self,
title=None,
Name=REQUIRED, # type: Union[str, AWSHelperFn]
ScriptBootstrapAction=REQUIRED, # type: _ScriptBootstrapActionConfig
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
Name=Name,
ScriptBootstrapAction=ScriptBootstrapAction,
**kwargs
)
super(BootstrapActionConfig, self).__init__(**processed_kwargs)
class Configuration(troposphere.emr.Configuration, Mixin):
def __init__(self,
title=None,
Classification=NOTHING, # type: Union[str, AWSHelperFn]
ConfigurationProperties=NOTHING, # type: Any
Configurations=NOTHING, # type: List[_Configuration]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
Classification=Classification,
ConfigurationProperties=ConfigurationProperties,
Configurations=Configurations,
**kwargs
)
super(Configuration, self).__init__(**processed_kwargs)
class VolumeSpecification(troposphere.emr.VolumeSpecification, Mixin):
def __init__(self,
title=None,
SizeInGB=REQUIRED, # type: int
VolumeType=REQUIRED, # type: Any
Iops=NOTHING, # type: int
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
SizeInGB=SizeInGB,
VolumeType=VolumeType,
Iops=Iops,
**kwargs
)
super(VolumeSpecification, self).__init__(**processed_kwargs)
class EbsBlockDeviceConfigs(troposphere.emr.EbsBlockDeviceConfigs, Mixin):
def __init__(self,
title=None,
VolumeSpecification=REQUIRED, # type: _VolumeSpecification
VolumesPerInstance=NOTHING, # type: int
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
VolumeSpecification=VolumeSpecification,
VolumesPerInstance=VolumesPerInstance,
**kwargs
)
super(EbsBlockDeviceConfigs, self).__init__(**processed_kwargs)
class EbsConfiguration(troposphere.emr.EbsConfiguration, Mixin):
def __init__(self,
title=None,
EbsBlockDeviceConfigs=NOTHING, # type: List[_EbsBlockDeviceConfigs]
EbsOptimized=NOTHING, # type: bool
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
EbsBlockDeviceConfigs=EbsBlockDeviceConfigs,
EbsOptimized=EbsOptimized,
**kwargs
)
super(EbsConfiguration, self).__init__(**processed_kwargs)
class ScalingConstraints(troposphere.emr.ScalingConstraints, Mixin):
def __init__(self,
title=None,
MinCapacity=REQUIRED, # type: int
MaxCapacity=REQUIRED, # type: int
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
MinCapacity=MinCapacity,
MaxCapacity=MaxCapacity,
**kwargs
)
super(ScalingConstraints, self).__init__(**processed_kwargs)
class CloudWatchAlarmDefinition(troposphere.emr.CloudWatchAlarmDefinition, Mixin):
def __init__(self,
title=None,
ComparisonOperator=REQUIRED, # type: Union[str, AWSHelperFn]
MetricName=REQUIRED, # type: Union[str, AWSHelperFn]
Period=REQUIRED, # type: int
Threshold=REQUIRED, # type: int
Dimensions=NOTHING, # type: List[_KeyValue]
EvaluationPeriods=NOTHING, # type: int
Namespace=NOTHING, # type: Union[str, AWSHelperFn]
Statistic=NOTHING, # type: Union[str, AWSHelperFn]
Unit=NOTHING, # type: Union[str, AWSHelperFn]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
ComparisonOperator=ComparisonOperator,
MetricName=MetricName,
Period=Period,
Threshold=Threshold,
Dimensions=Dimensions,
EvaluationPeriods=EvaluationPeriods,
Namespace=Namespace,
Statistic=Statistic,
Unit=Unit,
**kwargs
)
super(CloudWatchAlarmDefinition, self).__init__(**processed_kwargs)
class ScalingTrigger(troposphere.emr.ScalingTrigger, Mixin):
def __init__(self,
title=None,
CloudWatchAlarmDefinition=REQUIRED, # type: _CloudWatchAlarmDefinition
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
CloudWatchAlarmDefinition=CloudWatchAlarmDefinition,
**kwargs
)
super(ScalingTrigger, self).__init__(**processed_kwargs)
class SimpleScalingPolicyConfiguration(troposphere.emr.SimpleScalingPolicyConfiguration, Mixin):
def __init__(self,
title=None,
ScalingAdjustment=REQUIRED, # type: Any
AdjustmentType=NOTHING, # type: Union[str, AWSHelperFn]
CoolDown=NOTHING, # type: int
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
ScalingAdjustment=ScalingAdjustment,
AdjustmentType=AdjustmentType,
CoolDown=CoolDown,
**kwargs
)
super(SimpleScalingPolicyConfiguration, self).__init__(**processed_kwargs)
class ScalingAction(troposphere.emr.ScalingAction, Mixin):
def __init__(self,
title=None,
SimpleScalingPolicyConfiguration=REQUIRED, # type: _SimpleScalingPolicyConfiguration
Market=NOTHING, # type: Any
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
SimpleScalingPolicyConfiguration=SimpleScalingPolicyConfiguration,
Market=Market,
**kwargs
)
super(ScalingAction, self).__init__(**processed_kwargs)
class ScalingRule(troposphere.emr.ScalingRule, Mixin):
def __init__(self,
title=None,
Action=REQUIRED, # type: _ScalingAction
Name=REQUIRED, # type: Union[str, AWSHelperFn]
Trigger=REQUIRED, # type: _ScalingTrigger
Description=NOTHING, # type: Union[str, AWSHelperFn]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
Action=Action,
Name=Name,
Trigger=Trigger,
Description=Description,
**kwargs
)
super(ScalingRule, self).__init__(**processed_kwargs)
class AutoScalingPolicy(troposphere.emr.AutoScalingPolicy, Mixin):
def __init__(self,
title=None,
Constraints=REQUIRED, # type: _ScalingConstraints
Rules=NOTHING, # type: List[_ScalingRule]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
Constraints=Constraints,
Rules=Rules,
**kwargs
)
super(AutoScalingPolicy, self).__init__(**processed_kwargs)
class InstanceGroupConfigProperty(troposphere.emr.InstanceGroupConfigProperty, Mixin):
def __init__(self,
title=None,
InstanceCount=REQUIRED, # type: int
InstanceType=REQUIRED, # type: Union[str, AWSHelperFn]
AutoScalingPolicy=NOTHING, # type: _AutoScalingPolicy
BidPrice=NOTHING, # type: Union[str, AWSHelperFn]
Configurations=NOTHING, # type: List[_Configuration]
EbsConfiguration=NOTHING, # type: _EbsConfiguration
Market=NOTHING, # type: Any
Name=NOTHING, # type: Union[str, AWSHelperFn]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
InstanceCount=InstanceCount,
InstanceType=InstanceType,
AutoScalingPolicy=AutoScalingPolicy,
BidPrice=BidPrice,
Configurations=Configurations,
EbsConfiguration=EbsConfiguration,
Market=Market,
Name=Name,
**kwargs
)
super(InstanceGroupConfigProperty, self).__init__(**processed_kwargs)
class SpotProvisioningSpecification(troposphere.emr.SpotProvisioningSpecification, Mixin):
def __init__(self,
title=None,
TimeoutAction=REQUIRED, # type: Union[str, AWSHelperFn]
TimeoutDurationMinutes=REQUIRED, # type: int
BlockDurationMinutes=NOTHING, # type: int
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
TimeoutAction=TimeoutAction,
TimeoutDurationMinutes=TimeoutDurationMinutes,
BlockDurationMinutes=BlockDurationMinutes,
**kwargs
)
super(SpotProvisioningSpecification, self).__init__(**processed_kwargs)
class InstanceFleetProvisioningSpecifications(troposphere.emr.InstanceFleetProvisioningSpecifications, Mixin):
def __init__(self,
title=None,
SpotSpecification=REQUIRED, # type: _SpotProvisioningSpecification
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
SpotSpecification=SpotSpecification,
**kwargs
)
super(InstanceFleetProvisioningSpecifications, self).__init__(**processed_kwargs)
class InstanceTypeConfig(troposphere.emr.InstanceTypeConfig, Mixin):
def __init__(self,
title=None,
InstanceType=REQUIRED, # type: Union[str, AWSHelperFn]
BidPrice=NOTHING, # type: Union[str, AWSHelperFn]
BidPriceAsPercentageOfOnDemandPrice=NOTHING, # type: Union[str, AWSHelperFn]
Configurations=NOTHING, # type: List[_Configuration]
EbsConfiguration=NOTHING, # type: _EbsConfiguration
WeightedCapacity=NOTHING, # type: int
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
InstanceType=InstanceType,
BidPrice=BidPrice,
BidPriceAsPercentageOfOnDemandPrice=BidPriceAsPercentageOfOnDemandPrice,
Configurations=Configurations,
EbsConfiguration=EbsConfiguration,
WeightedCapacity=WeightedCapacity,
**kwargs
)
super(InstanceTypeConfig, self).__init__(**processed_kwargs)
class InstanceFleetConfigProperty(troposphere.emr.InstanceFleetConfigProperty, Mixin):
def __init__(self,
title=None,
InstanceTypeConfigs=NOTHING, # type: List[_InstanceTypeConfig]
LaunchSpecifications=NOTHING, # type: _InstanceFleetProvisioningSpecifications
Name=NOTHING, # type: Union[str, AWSHelperFn]
TargetOnDemandCapacity=NOTHING, # type: int
TargetSpotCapacity=NOTHING, # type: int
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
InstanceTypeConfigs=InstanceTypeConfigs,
LaunchSpecifications=LaunchSpecifications,
Name=Name,
TargetOnDemandCapacity=TargetOnDemandCapacity,
TargetSpotCapacity=TargetSpotCapacity,
**kwargs
)
super(InstanceFleetConfigProperty, self).__init__(**processed_kwargs)
class PlacementType(troposphere.emr.PlacementType, Mixin):
def __init__(self,
title=None,
AvailabilityZone=REQUIRED, # type: Union[str, AWSHelperFn]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
AvailabilityZone=AvailabilityZone,
**kwargs
)
super(PlacementType, self).__init__(**processed_kwargs)
class JobFlowInstancesConfig(troposphere.emr.JobFlowInstancesConfig, Mixin):
def __init__(self,
title=None,
AdditionalMasterSecurityGroups=NOTHING, # type: List[Union[str, AWSHelperFn]]
AdditionalSlaveSecurityGroups=NOTHING, # type: List[Union[str, AWSHelperFn]]
CoreInstanceFleet=NOTHING, # type: _InstanceFleetConfigProperty
CoreInstanceGroup=NOTHING, # type: _InstanceGroupConfigProperty
Ec2KeyName=NOTHING, # type: Union[str, AWSHelperFn]
Ec2SubnetId=NOTHING, # type: Union[str, AWSHelperFn]
Ec2SubnetIds=NOTHING, # type: List[Union[str, AWSHelperFn]]
EmrManagedMasterSecurityGroup=NOTHING, # type: Union[str, AWSHelperFn]
EmrManagedSlaveSecurityGroup=NOTHING, # type: Union[str, AWSHelperFn]
HadoopVersion=NOTHING, # type: Union[str, AWSHelperFn]
KeepJobFlowAliveWhenNoSteps=NOTHING, # type: bool
MasterInstanceFleet=NOTHING, # type: _InstanceFleetConfigProperty
MasterInstanceGroup=NOTHING, # type: _InstanceGroupConfigProperty
Placement=NOTHING, # type: _PlacementType
ServiceAccessSecurityGroup=NOTHING, # type: Union[str, AWSHelperFn]
TerminationProtected=NOTHING, # type: bool
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
AdditionalMasterSecurityGroups=AdditionalMasterSecurityGroups,
AdditionalSlaveSecurityGroups=AdditionalSlaveSecurityGroups,
CoreInstanceFleet=CoreInstanceFleet,
CoreInstanceGroup=CoreInstanceGroup,
Ec2KeyName=Ec2KeyName,
Ec2SubnetId=Ec2SubnetId,
Ec2SubnetIds=Ec2SubnetIds,
EmrManagedMasterSecurityGroup=EmrManagedMasterSecurityGroup,
EmrManagedSlaveSecurityGroup=EmrManagedSlaveSecurityGroup,
HadoopVersion=HadoopVersion,
KeepJobFlowAliveWhenNoSteps=KeepJobFlowAliveWhenNoSteps,
MasterInstanceFleet=MasterInstanceFleet,
MasterInstanceGroup=MasterInstanceGroup,
Placement=Placement,
ServiceAccessSecurityGroup=ServiceAccessSecurityGroup,
TerminationProtected=TerminationProtected,
**kwargs
)
super(JobFlowInstancesConfig, self).__init__(**processed_kwargs)
class KerberosAttributes(troposphere.emr.KerberosAttributes, Mixin):
def __init__(self,
title=None,
KdcAdminPassword=REQUIRED, # type: Union[str, AWSHelperFn]
Realm=REQUIRED, # type: Union[str, AWSHelperFn]
ADDomainJoinPassword=NOTHING, # type: Union[str, AWSHelperFn]
ADDomainJoinUser=NOTHING, # type: Union[str, AWSHelperFn]
CrossRealmTrustPrincipalPassword=NOTHING, # type: Union[str, AWSHelperFn]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
KdcAdminPassword=KdcAdminPassword,
Realm=Realm,
ADDomainJoinPassword=ADDomainJoinPassword,
ADDomainJoinUser=ADDomainJoinUser,
CrossRealmTrustPrincipalPassword=CrossRealmTrustPrincipalPassword,
**kwargs
)
super(KerberosAttributes, self).__init__(**processed_kwargs)
class HadoopJarStepConfig(troposphere.emr.HadoopJarStepConfig, Mixin):
def __init__(self,
title=None,
Jar=REQUIRED, # type: Union[str, AWSHelperFn]
Args=NOTHING, # type: List[Union[str, AWSHelperFn]]
MainClass=NOTHING, # type: Union[str, AWSHelperFn]
StepProperties=NOTHING, # type: List[_KeyValue]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
Jar=Jar,
Args=Args,
MainClass=MainClass,
StepProperties=StepProperties,
**kwargs
)
super(HadoopJarStepConfig, self).__init__(**processed_kwargs)
class StepConfig(troposphere.emr.StepConfig, Mixin):
def __init__(self,
title=None,
HadoopJarStep=REQUIRED, # type: _HadoopJarStepConfig
Name=REQUIRED, # type: Union[str, AWSHelperFn]
ActionOnFailure=NOTHING, # type: Any
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
HadoopJarStep=HadoopJarStep,
Name=Name,
ActionOnFailure=ActionOnFailure,
**kwargs
)
super(StepConfig, self).__init__(**processed_kwargs)
class Cluster(troposphere.emr.Cluster, Mixin):
def __init__(self,
title, # type: str
template=None, # type: Template
validation=True, # type: bool
Instances=REQUIRED, # type: _JobFlowInstancesConfig
JobFlowRole=REQUIRED, # type: Union[str, AWSHelperFn]
Name=REQUIRED, # type: Union[str, AWSHelperFn]
ServiceRole=REQUIRED, # type: Union[str, AWSHelperFn]
AdditionalInfo=NOTHING, # type: dict
Applications=NOTHING, # type: List[_Application]
AutoScalingRole=NOTHING, # type: Union[str, AWSHelperFn]
BootstrapActions=NOTHING, # type: List[_BootstrapActionConfig]
Configurations=NOTHING, # type: List[_Configuration]
CustomAmiId=NOTHING, # type: Union[str, AWSHelperFn]
EbsRootVolumeSize=NOTHING, # type: int
KerberosAttributes=NOTHING, # type: _KerberosAttributes
LogUri=NOTHING, # type: Union[str, AWSHelperFn]
ReleaseLabel=NOTHING, # type: Union[str, AWSHelperFn]
ScaleDownBehavior=NOTHING, # type: Union[str, AWSHelperFn]
SecurityConfiguration=NOTHING, # type: Union[str, AWSHelperFn]
Steps=NOTHING, # type: List[_StepConfig]
Tags=NOTHING, # type: Union[_Tags, list]
VisibleToAllUsers=NOTHING, # type: bool
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
template=template,
validation=validation,
Instances=Instances,
JobFlowRole=JobFlowRole,
Name=Name,
ServiceRole=ServiceRole,
AdditionalInfo=AdditionalInfo,
Applications=Applications,
AutoScalingRole=AutoScalingRole,
BootstrapActions=BootstrapActions,
Configurations=Configurations,
CustomAmiId=CustomAmiId,
EbsRootVolumeSize=EbsRootVolumeSize,
KerberosAttributes=KerberosAttributes,
LogUri=LogUri,
ReleaseLabel=ReleaseLabel,
ScaleDownBehavior=ScaleDownBehavior,
SecurityConfiguration=SecurityConfiguration,
Steps=Steps,
Tags=Tags,
VisibleToAllUsers=VisibleToAllUsers,
**kwargs
)
super(Cluster, self).__init__(**processed_kwargs)
class InstanceFleetConfig(troposphere.emr.InstanceFleetConfig, Mixin):
def __init__(self,
title, # type: str
template=None, # type: Template
validation=True, # type: bool
ClusterId=REQUIRED, # type: Union[str, AWSHelperFn]
InstanceFleetType=REQUIRED, # type: Union[str, AWSHelperFn]
InstanceTypeConfigs=NOTHING, # type: List[_InstanceTypeConfig]
LaunchSpecifications=NOTHING, # type: _InstanceFleetProvisioningSpecifications
Name=NOTHING, # type: Union[str, AWSHelperFn]
TargetOnDemandCapacity=NOTHING, # type: int
TargetSpotCapacity=NOTHING, # type: int
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
template=template,
validation=validation,
ClusterId=ClusterId,
InstanceFleetType=InstanceFleetType,
InstanceTypeConfigs=InstanceTypeConfigs,
LaunchSpecifications=LaunchSpecifications,
Name=Name,
TargetOnDemandCapacity=TargetOnDemandCapacity,
TargetSpotCapacity=TargetSpotCapacity,
**kwargs
)
super(InstanceFleetConfig, self).__init__(**processed_kwargs)
class InstanceGroupConfig(troposphere.emr.InstanceGroupConfig, Mixin):
def __init__(self,
title, # type: str
template=None, # type: Template
validation=True, # type: bool
InstanceCount=REQUIRED, # type: int
InstanceRole=REQUIRED, # type: Union[str, AWSHelperFn]
InstanceType=REQUIRED, # type: Union[str, AWSHelperFn]
JobFlowId=REQUIRED, # type: Union[str, AWSHelperFn]
AutoScalingPolicy=NOTHING, # type: _AutoScalingPolicy
BidPrice=NOTHING, # type: Union[str, AWSHelperFn]
Configurations=NOTHING, # type: List[_Configuration]
EbsConfiguration=NOTHING, # type: _EbsConfiguration
Market=NOTHING, # type: Any
Name=NOTHING, # type: Union[str, AWSHelperFn]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
template=template,
validation=validation,
InstanceCount=InstanceCount,
InstanceRole=InstanceRole,
InstanceType=InstanceType,
JobFlowId=JobFlowId,
AutoScalingPolicy=AutoScalingPolicy,
BidPrice=BidPrice,
Configurations=Configurations,
EbsConfiguration=EbsConfiguration,
Market=Market,
Name=Name,
**kwargs
)
super(InstanceGroupConfig, self).__init__(**processed_kwargs)
class Step(troposphere.emr.Step, Mixin):
def __init__(self,
title, # type: str
template=None, # type: Template
validation=True, # type: bool
ActionOnFailure=REQUIRED, # type: Any
HadoopJarStep=REQUIRED, # type: _HadoopJarStepConfig
JobFlowId=REQUIRED, # type: Union[str, AWSHelperFn]
Name=REQUIRED, # type: Union[str, AWSHelperFn]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
template=template,
validation=validation,
ActionOnFailure=ActionOnFailure,
HadoopJarStep=HadoopJarStep,
JobFlowId=JobFlowId,
Name=Name,
**kwargs
)
super(Step, self).__init__(**processed_kwargs)
| [
"[email protected]"
] | |
574256b07e8f71c690e54228a60da25478ef2087 | b8630509b97621ddc3bbeaef8d1cd54ff77b3dde | /bandmaker/bandmaker/urls.py | d29cb941e129bd43157b12d46f475ee46c854d41 | [] | no_license | ChibaUnppluged/BandMaker-ogi | c4080204d63495fed42e26306663e06df8ffb373 | 68a4a6eceb3f385b97afe8741fc538216800f893 | refs/heads/master | 2021-01-01T04:29:28.436614 | 2017-10-20T16:30:00 | 2017-10-20T16:30:00 | 97,184,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 984 | py | # coding:utf-8
"""bandmaker URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^accounts/',include('accounts.urls')),
url(r'mypage/',include('mypage.urls')),
url(r'userlist/',include('userlist.urls')),
url(r'^fileupload/',include('fileupload.urls')),
]
| [
"[email protected]"
] | |
0eb1879c244d2115498463b9733d18e928eb0385 | 3716e91c0a18a2cf0b5807cc673d95a7539b008c | /Desert/GoldenMirage.py | f563b55791426c7140a315433fe66e9f7577132e | [] | no_license | kiwiapple87/CodeCombat-1 | 47f0fa6d75d6d3e9fb9c28feeb6fe2648664c1aa | ce0201e5ed099193ca40afd3b7abeee5a3732387 | refs/heads/master | 2021-05-01T16:38:03.575842 | 2016-08-25T11:13:26 | 2016-08-25T11:13:26 | 66,552,813 | 1 | 0 | null | 2016-08-25T11:39:20 | 2016-08-25T11:39:18 | null | UTF-8 | Python | false | false | 780 | py | # https://codecombat.com/play/level/golden-mirage
# Collect 7 real coins.
# Real coins have a unique value in the each group of coins.
# If a coin has the same value as another coin in the group, then it's a fake.
# Fake coins will transform into venomous creatures to hurt the player!
def Coin(coins):
for coin1 in coins:
count = 0
for coin2 in coins:
if coin1.value == coin2.value:
count = count + 1
if count == 1:
return coin1
while True:
coins = self.findItems()
if coins and len(coins):
# The following code will help you debug:
coin = Coin(coins)
self.say(coin.value);
self.moveXY(coin.pos.x, coin.pos.y);
# When ready, delete the previous code and solve.
| [
"[email protected]"
] | |
df9d3a446db760b6a0eac1890c697315b556e221 | 649bd422025e421d86025743eac324c9b882a2e8 | /exam/1_three-dimensional_atomic_system/dump/phasetrans/temp152_6000.py | 7a080d72fc6285918249e58f56891af455243f01 | [] | no_license | scheuclu/atom_class | 36ddee1f6a5995872e858add151c5942c109847c | 0c9a8c63d9b38898c1869fe8983126cef17662cd | refs/heads/master | 2021-01-21T10:52:28.448221 | 2017-03-07T23:04:41 | 2017-03-07T23:04:41 | 83,489,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 68,790 | py | ITEM: TIMESTEP
6000
ITEM: NUMBER OF ATOMS
2048
ITEM: BOX BOUNDS pp pp pp
-2.5726909140137622e+00 4.9772690914007427e+01
-2.5726909140137622e+00 4.9772690914007427e+01
-2.5726909140137622e+00 4.9772690914007427e+01
ITEM: ATOMS id type xs ys zs
1452 1 0.0679786 0.118804 0.0641003
1030 1 0.142102 0.194386 0.166033
1678 1 0.475409 0.0260847 0.125235
713 1 0.122508 0.0723027 0.0554368
1540 1 0.0339797 0.194481 0.0630296
947 1 0.0419305 0.0839625 0.149234
1047 1 0.0792433 0.0182886 0.11606
538 1 0.0509577 0.298082 0.401764
1323 1 0.304533 0.737371 0.107246
340 1 0.10577 0.177405 0.0330761
396 1 0.285847 0.224836 0.0833281
1465 1 0.182899 0.200705 0.0469069
927 1 0.176531 0.123433 0.177294
739 1 0.348897 0.186594 0.092119
699 1 0.281981 0.0886665 0.0159159
1882 1 0.148379 0.151265 0.101777
1699 1 0.169574 0.130373 0.0392097
730 1 0.441018 0.505453 -0.00112544
302 1 0.40068 0.149013 0.162528
1600 1 0.350938 0.0973099 0.0943016
793 1 0.206036 0.0642489 0.205496
295 1 0.404839 0.0735347 0.146308
815 1 0.466502 0.00355565 0.194433
1490 1 0.379276 0.996111 0.0109819
1375 1 0.378895 0.964216 0.442821
804 1 0.29887 0.94776 0.00358565
961 1 0.115301 0.322111 0.0912442
1220 1 0.056716 0.30712 0.130669
1840 1 0.0661846 0.275541 0.0470648
1437 1 0.377987 0.608569 0.0682874
1972 1 0.0711956 0.219388 0.124368
170 1 0.0971767 0.391595 0.249691
930 1 0.100363 0.346465 0.184784
1 1 0.4352 0.650494 0.476417
1988 1 0.208922 0.798526 0.0155257
1387 1 0.064599 0.376386 0.106682
497 1 0.203797 0.25965 0.0914506
1230 1 0.248777 0.17233 0.232963
1615 1 0.197936 0.252721 0.163668
639 1 0.230579 0.266358 0.228527
564 1 0.343031 0.274511 0.0554367
506 1 0.00878439 0.0540763 0.386424
1407 1 0.352909 0.138903 0.0299987
2016 1 0.385351 0.313143 0.100429
1103 1 0.129482 0.244046 0.0802081
1444 1 0.264085 0.156545 0.0428052
1748 1 0.325106 0.125002 0.181291
227 1 0.300371 0.193304 0.147206
1715 1 0.293409 0.266216 0.457555
374 1 0.173895 0.591083 0.0103423
1556 1 0.397985 0.189033 0.0319115
720 1 0.472131 0.274313 0.225173
1751 1 0.377948 0.872572 0.432444
118 1 0.399688 0.23682 0.107347
5 1 0.405551 0.0464095 0.0796547
399 1 0.11353 0.774355 0.46691
48 1 0.0958454 0.491768 0.495104
1359 1 0.133118 0.307595 0.0156448
311 1 0.272277 0.413448 0.0154659
1931 1 0.136597 0.384798 0.058647
806 1 0.0177728 0.265107 0.341781
945 1 0.0681868 0.419238 0.0458286
333 1 0.481014 0.268461 0.380583
637 1 0.263202 0.287467 0.14975
1569 1 0.136091 0.385237 0.13147
29 1 0.285959 0.406278 0.169438
445 1 0.287123 0.324849 0.0858592
1108 1 0.210585 0.32909 0.0617882
1402 1 0.198462 0.341746 0.147432
1520 1 0.350245 0.382065 0.120158
390 1 0.413714 0.476683 0.178393
992 1 0.461492 0.245724 0.140887
1294 1 0.472422 0.320299 0.114956
1768 1 0.407072 0.373825 0.0598611
1937 1 0.436137 0.35853 0.173173
523 1 0.439731 0.44624 0.102374
1046 1 0.495224 0.206813 0.422199
1086 1 0.0329302 0.965224 0.165802
1194 1 0.470169 0.151969 0.204946
1592 1 0.450716 0.379962 0.00284623
595 1 0.243412 0.909781 0.491846
1158 1 0.0490584 0.442221 0.109106
1788 1 0.183272 0.294204 0.493355
1733 1 0.0561841 0.504575 0.0567462
1949 1 0.13056 0.663662 0.158763
1477 1 0.481845 0.936029 0.148703
1196 1 0.0623765 0.501528 0.146073
1940 1 0.0459518 0.677295 0.475831
1155 1 0.480435 0.300655 0.451865
916 1 0.131702 0.490769 0.100703
1651 1 0.335062 0.468754 0.150981
813 1 0.259882 0.611954 0.127243
1878 1 0.275071 0.514609 0.0716287
802 1 0.290898 0.432506 0.0968674
1793 1 0.182237 0.446092 0.0706185
1152 1 0.350701 0.745425 0.0317018
621 1 0.200046 0.498741 0.121527
211 1 0.190162 0.510127 0.0259527
274 1 0.343751 0.485124 0.0648898
840 1 0.431597 0.424232 0.224852
498 1 0.418618 0.541557 0.215448
1803 1 0.420508 0.802569 0.430863
1266 1 0.499643 0.580287 0.100626
1497 1 0.336111 0.348584 0.0246239
624 1 0.101321 0.565683 0.155582
926 1 0.208538 0.563401 0.0791887
1401 1 0.0434187 0.571634 0.0757136
462 1 0.163653 0.696088 0.0354824
1758 1 0.0405914 0.62347 0.130986
408 1 0.0711224 0.632792 0.196526
563 1 0.13467 0.524918 0.210547
598 1 0.0766909 0.703124 0.178027
1744 1 0.169763 0.718938 0.137542
1924 1 0.418621 0.0610387 0.454715
1063 1 0.298717 0.573325 0.177609
1039 1 0.232136 0.688097 0.140947
1879 1 0.159745 0.611962 0.194368
342 1 0.229206 0.643699 0.0595958
1895 1 0.0467466 0.0396711 0.476212
1501 1 0.341489 0.559579 0.101868
1607 1 0.407358 0.668649 0.125411
1942 1 0.486522 0.49661 0.206773
1280 1 0.419732 0.537999 0.124268
751 1 0.318229 0.53342 0.0135341
597 1 0.347874 0.667088 0.191327
66 1 0.450914 0.615939 0.014938
1709 1 0.0190116 0.55139 0.346061
601 1 0.0147579 0.754092 0.137611
1928 1 0.105502 0.85784 0.100193
76 1 0.17767 0.67926 0.21539
1403 1 0.0792722 0.70011 0.0921339
1027 1 0.136202 0.740532 0.208147
25 1 0.035408 0.817034 0.179641
599 1 0.0336002 0.802489 0.0452146
272 1 0.247882 0.895106 0.13134
1289 1 0.187822 0.865544 0.0705306
1969 1 0.253677 0.788121 0.142501
669 1 0.0859558 0.775419 0.130923
1956 1 0.32298 0.826655 0.109895
134 1 0.191201 0.781519 0.087699
215 1 0.28535 0.601607 0.0589382
949 1 0.262021 0.719737 0.223508
1842 1 0.312849 0.758499 0.18151
1150 1 0.250021 0.739476 0.0271153
771 1 0.224807 0.929407 0.0743913
620 1 0.38411 0.74467 0.12666
1051 1 0.0431958 0.0382307 0.27641
1656 1 0.397806 0.782379 0.202265
80 1 0.283167 0.677057 0.0818621
191 1 0.429036 0.709099 0.0586853
1292 1 0.412533 0.818379 0.1114
1487 1 0.00606093 0.366791 0.146867
1495 1 0.490305 0.906668 0.0763128
933 1 0.286156 0.0420824 0.0849532
347 1 0.164192 0.00367568 0.0743641
1918 1 0.164257 0.927813 0.113607
954 1 0.112751 0.938677 0.0498585
777 1 0.494216 0.792407 0.440918
1310 1 0.0858021 0.939789 0.118444
141 1 0.130064 0.46226 0.0299408
130 1 0.252689 0.0738391 0.143093
859 1 0.489174 0.844363 0.12702
768 1 0.283949 0.841806 0.200968
566 1 0.180618 0.0459833 0.135004
419 1 0.238716 0.990842 0.132125
210 1 0.0206415 0.519919 0.432424
585 1 0.371146 0.809476 0.053478
1620 1 0.197901 0.925265 0.17887
1829 1 0.351729 0.996523 0.0931783
391 1 0.33594 0.0180112 0.170126
1970 1 0.467021 0.796307 0.0723448
1657 1 0.275867 0.342937 0.495564
653 1 0.427341 0.962564 0.0815682
32 1 0.25142 0.270153 0.0286909
1005 1 0.00302914 0.684399 0.11861
711 1 0.414369 0.890751 0.102416
829 1 0.463529 0.0193358 0.0410474
1105 1 0.419667 0.91206 0.174327
1596 1 0.238677 0.879547 0.0210886
628 1 0.449439 0.894359 0.437005
1116 1 0.0408916 0.153608 0.133774
1838 1 0.104238 0.991766 0.246754
1229 1 0.11664 0.0651951 0.21182
643 1 0.113228 0.120613 0.324908
1305 1 0.137042 0.182611 0.36463
1767 1 0.0579381 0.104065 0.222373
1973 1 0.108832 0.123048 0.153134
1737 1 0.385685 0.12802 0.228266
756 1 0.285002 0.0577038 0.296357
528 1 0.219643 0.192226 0.129765
1412 1 0.131489 0.0611579 0.281751
225 1 0.249863 0.0411246 0.444317
1291 1 0.291781 0.114864 0.360169
1429 1 0.165971 0.173699 0.295226
1663 1 0.143245 0.0669231 0.362307
1028 1 0.251304 0.124519 0.286456
544 1 0.355047 0.10819 0.402916
1914 1 0.353525 0.0579145 0.249124
1587 1 0.35152 0.0677921 0.33356
2031 1 0.442331 0.937203 0.307732
737 1 0.277702 0.0749566 0.21288
1187 1 0.393833 0.977094 0.150944
1897 1 0.406123 0.00147202 0.242879
1611 1 0.417429 0.078588 0.30275
335 1 0.106184 0.905224 0.484123
543 1 0.0826094 0.00222404 0.0336506
1435 1 0.105804 0.23133 0.225198
1677 1 0.0567764 0.240897 0.278891
1256 1 0.125028 0.140796 0.237667
965 1 0.0953373 0.187904 0.301653
1376 1 0.121592 0.262933 0.157564
589 1 0.127618 0.248742 0.298374
658 1 0.304905 0.260632 0.217268
526 1 0.176121 0.211998 0.232579
810 1 0.323536 0.119192 0.263504
307 1 0.257267 0.249121 0.296149
964 1 0.195713 0.241627 0.344267
1352 1 0.15245 0.365465 0.284786
1871 1 0.314366 0.176768 0.318276
694 1 0.199322 0.151549 0.355435
275 1 0.365563 0.238138 0.256651
821 1 0.332496 0.273655 0.125742
1681 1 0.454152 0.071734 0.220577
1661 1 0.422334 0.221295 0.18524
1139 1 0.440831 0.292501 0.0535361
1222 1 0.497022 0.951988 0.365132
257 1 0.0197613 0.343265 0.282032
1954 1 0.422922 0.281222 0.287352
1753 1 0.0136992 0.875301 0.0466452
931 1 0.069347 0.412568 0.175671
1771 1 0.129338 0.549271 0.0583798
710 1 0.0194481 0.407954 0.352435
1308 1 0.199822 0.422891 0.31817
1542 1 0.099815 0.427487 0.322356
1870 1 0.000905802 0.9099 0.450906
1057 1 0.256598 0.342677 0.208152
266 1 0.255093 0.335977 0.307278
2020 1 0.326984 0.342614 0.196962
1904 1 0.188334 0.360736 0.221261
1686 1 0.190284 0.301875 0.304945
1078 1 0.238926 0.380172 0.380992
719 1 0.279356 0.301968 0.365177
1804 1 0.209126 0.412948 0.144488
753 1 0.384261 0.277185 0.34887
605 1 0.412251 0.34063 0.24437
1252 1 0.339834 0.316112 0.270156
1703 1 0.376383 0.285938 0.18832
2009 1 0.463929 0.124946 0.48516
1440 1 0.399427 0.404051 0.480608
15 1 0.273687 0.410856 0.322432
415 1 0.403364 0.376516 0.313027
1959 1 0.0166349 0.919878 0.361139
74 1 0.0368518 0.377066 0.423398
1107 1 0.126071 0.626924 0.492571
1276 1 0.458538 0.789094 0.266896
1036 1 0.105537 0.385577 0.394765
1010 1 0.205623 0.714699 0.470459
1534 1 0.325935 0.537901 0.447044
1360 1 0.137576 0.461212 0.176453
1279 1 0.313094 0.464249 0.221954
1098 1 0.199986 0.570004 0.250774
1099 1 0.243852 0.422384 0.251944
2027 1 0.233242 0.47946 0.19018
736 1 0.155634 0.436882 0.254703
153 1 0.0276878 0.421682 0.282189
124 1 0.0897208 0.520469 0.404012
646 1 0.170168 0.480996 0.372648
1886 1 0.328115 0.355103 0.33945
1119 1 0.275136 0.523917 0.232354
89 1 0.368421 0.39838 0.250751
1185 1 0.248762 0.54362 0.303116
309 1 0.371537 0.467219 0.269779
545 1 0.327974 0.212258 0.0127422
940 1 0.431884 0.522848 0.299412
1325 1 0.396792 0.472113 0.365948
1071 1 0.296404 0.4917 0.296817
1183 1 0.088136 0.529254 0.328235
1233 1 0.062805 0.484185 0.248723
1718 1 0.117298 0.586344 0.257844
16 1 0.358732 0.534428 0.182404
1754 1 0.218545 0.557885 0.166734
1114 1 0.237246 0.682477 0.413922
1163 1 0.238951 0.595559 0.381778
477 1 0.187492 0.592806 0.323204
1812 1 0.135884 0.654785 0.323556
1739 1 0.185105 0.617178 0.125791
1301 1 0.295089 0.680795 0.272978
1947 1 0.246751 0.647098 0.310943
1286 1 0.306577 0.604063 0.340022
128 1 0.424773 0.635785 0.19653
1197 1 0.358561 0.652804 0.265114
492 1 0.28551 0.602991 0.270845
243 1 0.411965 0.749111 0.348924
397 1 0.376091 0.602949 0.144336
1576 1 0.118923 0.622624 0.0967765
1493 1 0.437562 0.601699 0.260299
1113 1 0.318674 0.872264 0.313944
839 1 0.00177234 0.446413 0.0462862
1929 1 0.061644 0.63754 0.265812
1795 1 0.210136 0.763003 0.264059
1946 1 0.12618 0.843459 0.211016
913 1 0.124271 0.689814 0.260655
452 1 0.262803 0.636104 0.204943
1126 1 0.29416 0.764529 0.280273
242 1 0.193844 0.698782 0.298348
203 1 0.059505 0.782494 0.237231
1485 1 0.254185 0.741444 0.336467
1557 1 0.267883 0.844569 0.267258
1987 1 0.202698 0.83223 0.23051
1548 1 0.337129 0.813059 0.238347
136 1 0.251938 0.755206 0.413248
1312 1 0.36615 0.720672 0.244689
198 1 0.373953 0.821537 0.376256
852 1 0.338251 0.913022 0.104502
1148 1 0.475884 0.717549 0.325559
1450 1 0.422159 0.850464 0.242089
944 1 0.377187 0.800342 0.300016
4 1 0.456079 0.749833 0.136242
1910 1 0.00435848 0.0282384 0.0852113
147 1 0.193233 0.826716 0.154289
1144 1 0.0375427 0.898109 0.170195
1242 1 0.0545641 0.0283103 0.201109
1120 1 0.264917 0.821624 0.0588344
167 1 0.384621 0.668811 0.0199348
507 1 0.207512 0.760773 0.190462
1635 1 0.118058 0.916469 0.179355
458 1 0.0783831 0.910644 0.237116
1692 1 0.135368 0.993546 0.162818
1344 1 0.18834 0.976381 0.257945
820 1 0.125839 0.98408 0.323844
500 1 0.336336 0.930015 0.174951
527 1 0.260494 0.00892074 0.221949
1261 1 0.180756 0.107749 0.257276
1106 1 0.15527 0.992463 0.386768
1044 1 0.234955 0.0139308 0.333193
666 1 0.155527 0.879947 0.270318
1001 1 0.315441 0.968354 0.241193
1939 1 0.296627 0.94209 0.31424
1077 1 0.302682 0.834245 0.385916
254 1 0.252697 0.882441 0.355435
111 1 0.36893 0.977181 0.353824
2028 1 0.369113 0.90303 0.365339
1782 1 0.274086 0.916481 0.203817
14 1 0.0514312 0.634728 0.0321686
1371 1 0.47026 0.937036 0.225586
1380 1 0.43044 0.868748 0.338442
1026 1 0.366197 0.863404 0.173051
1025 1 0.0300254 0.980217 0.418831
355 1 0.078161 0.0569307 0.336132
30 1 0.0581957 0.147577 0.374896
332 1 0.0765605 0.0781412 0.411226
579 1 0.034246 0.999096 0.344245
270 1 0.136817 0.128313 0.41601
667 1 0.101529 0.990392 0.428087
794 1 0.262773 0.485161 0.441801
146 1 0.273189 0.0929633 0.491041
474 1 0.00852718 0.374656 0.2204
104 1 0.0338654 0.550356 0.270347
1975 1 0.329002 0.0523567 0.449506
1212 1 0.244994 0.112869 0.420426
1708 1 0.337902 0.147023 0.465992
1506 1 0.164554 0.130461 0.478888
1599 1 0.387434 0.0327147 0.398345
1528 1 0.428618 0.137489 0.375857
1475 1 0.413523 0.169066 0.449157
1346 1 0.346963 0.997969 0.497035
765 1 0.461148 0.1286 0.291273
343 1 0.182495 0.942199 0.0090299
1173 1 0.382641 0.895125 0.0435948
1388 1 0.438457 0.0115704 0.340504
1207 1 0.0149646 0.180693 0.333715
617 1 0.402414 0.442509 0.0411436
1357 1 0.0976789 0.322402 0.306141
1672 1 0.0852481 0.25167 0.355797
557 1 0.149728 0.30077 0.23926
570 1 0.0369564 0.110489 0.304769
232 1 0.285926 0.96644 0.0840768
386 1 0.0395479 0.221807 0.449272
524 1 0.0480745 0.586607 0.460639
1643 1 0.040882 0.838806 0.112986
1683 1 0.245642 0.561362 0.0130655
319 1 0.145723 0.293389 0.366537
2038 1 0.198695 0.178595 0.435731
1841 1 0.21898 0.0835302 0.334555
442 1 0.349296 0.250331 0.407783
843 1 0.100339 0.167343 0.469364
938 1 0.417049 0.247754 0.480808
1851 1 0.386217 0.14559 0.307975
1124 1 0.418437 0.229868 0.403759
246 1 0.0113004 0.119379 0.434866
1893 1 0.438909 0.331204 0.369191
1912 1 0.483744 0.0151739 0.476996
878 1 0.287313 0.184532 0.388739
616 1 0.36169 0.179308 0.38913
177 1 0.394388 0.316269 0.43433
1288 1 0.11221 0.691115 0.4514
1316 1 0.0776637 0.30594 0.24369
303 1 0.00618519 0.290497 0.475356
115 1 0.295107 0.348772 0.420073
473 1 0.126685 0.420663 0.446761
1705 1 0.436637 0.557669 0.0553739
1670 1 0.0318225 0.180006 0.234727
1854 1 0.468779 0.16703 0.0404406
196 1 0.343569 0.356138 0.472463
1209 1 0.220561 0.251492 0.419058
409 1 0.276616 0.409886 0.454875
1433 1 0.350911 0.425551 0.327815
535 1 0.374808 0.376453 0.38752
998 1 0.453809 0.820321 0.189677
1298 1 0.0612851 0.863202 0.310041
1031 1 0.471112 0.700299 0.419747
1986 1 0.463199 0.44472 0.444636
345 1 0.305723 0.899125 0.443476
822 1 0.465154 0.370645 0.441147
169 1 0.438599 0.413764 0.375503
1264 1 0.469075 0.162195 0.128031
230 1 0.211782 0.915126 0.424168
963 1 0.236273 0.970315 0.396598
123 1 0.206416 0.062226 0.0463334
935 1 0.00562349 0.369278 0.0502202
689 1 0.188845 0.62001 0.448344
1721 1 0.193245 0.413106 0.429147
1702 1 0.0588812 0.459385 0.386981
1857 1 0.493439 0.221863 0.313981
702 1 0.118525 0.559259 0.459166
181 1 0.345235 0.84775 0.492092
1811 1 0.259639 0.56723 0.452376
112 1 0.495573 0.503116 0.108376
1889 1 0.485143 0.592288 0.169515
1322 1 0.354971 0.51842 0.322494
1616 1 0.255525 0.464853 0.374055
999 1 0.25955 0.639239 0.467489
779 1 0.344972 0.463302 0.414245
1759 1 0.177263 0.507527 0.439768
1920 1 0.439225 0.973594 0.405242
721 1 0.485399 0.525212 0.34971
1558 1 0.213429 0.330726 0.436464
1791 1 0.399943 0.480627 0.466877
1519 1 0.444455 0.520332 0.419368
644 1 0.0326506 0.944586 0.00641149
1888 1 0.151161 0.717936 0.401385
798 1 0.105778 0.591036 0.334789
1049 1 0.022286 0.725564 0.270161
1837 1 0.118835 0.623601 0.41753
33 1 0.386583 0.915709 0.260538
836 1 0.0345127 0.629854 0.39644
896 1 0.284899 0.529344 0.379152
1405 1 0.427938 0.572703 0.473038
1326 1 0.346507 0.643795 0.467131
870 1 0.164085 0.566709 0.389444
224 1 0.371087 0.662209 0.397036
443 1 0.297608 0.0221316 0.360807
857 1 0.308474 0.610047 0.41461
1054 1 0.0160796 0.843196 0.2416
1369 1 0.475631 0.678425 0.146024
1314 1 0.180375 0.764081 0.341535
2005 1 0.100367 0.791952 0.36576
1267 1 0.482701 0.365575 0.322789
278 1 0.136688 0.229172 0.440464
1869 1 0.460576 0.607703 0.397031
1354 1 0.379914 0.621979 0.332996
814 1 0.0981269 0.693553 0.00335357
946 1 0.386921 0.574868 0.402561
1916 1 0.279318 0.968353 0.468235
1633 1 0.0267596 0.790797 0.461418
663 1 0.218217 0.942616 0.323968
1824 1 0.173894 0.869606 0.465455
217 1 0.139768 0.786672 0.270448
1598 1 0.115655 0.724082 0.321054
607 1 0.0263994 0.70906 0.351385
608 1 0.239226 0.99079 0.0358167
465 1 0.0630323 0.732954 0.417579
1145 1 0.00623388 0.925909 0.232829
1050 1 0.176921 0.647452 0.378199
1935 1 0.218728 0.818327 0.391576
1115 1 0.322618 0.745917 0.383775
346 1 0.166135 0.847988 0.334119
1849 1 0.443407 0.443541 0.307405
241 1 0.243632 0.846218 0.449514
1609 1 0.301592 0.694572 0.439535
1070 1 0.288123 0.752512 0.485029
1448 1 0.44011 0.949685 0.00968692
63 1 0.13391 0.0622744 0.450359
1016 1 0.486437 0.379508 0.24028
267 1 0.339794 0.791897 0.438009
1941 1 0.380768 0.718803 0.435033
1680 1 0.477628 0.812983 0.361612
1676 1 0.0439116 0.329187 0.00323625
69 1 0.327777 0.685847 0.342737
682 1 0.0497524 0.942366 0.298168
791 1 0.0772795 0.884899 0.386382
890 1 0.252316 0.213764 0.485066
1189 1 0.122766 0.906896 0.330014
1640 1 0.140125 0.915795 0.413488
1561 1 0.0805261 0.951181 0.368462
457 1 0.0482366 0.786342 0.312895
1328 1 0.202436 0.978465 0.466829
1919 1 0.00144723 0.570418 0.00711277
59 1 0.457801 0.638567 0.0896453
986 1 0.434619 0.946473 0.491279
693 1 0.417396 0.71209 0.18931
1269 1 0.0468539 0.259979 0.183135
96 1 0.465383 0.0963047 0.0564028
1340 1 0.0509691 0.960109 0.481866
1084 1 0.35585 0.0638747 0.010125
1564 1 0.113762 0.833736 0.422613
1460 1 0.442651 0.74459 0.47619
808 1 0.478221 0.441035 0.0297572
1480 1 0.213309 0.786934 0.492749
734 1 0.484006 0.741175 0.20227
511 1 0.325992 0.466034 0.496154
334 1 0.0718459 0.561497 0.00291042
862 1 0.178068 0.333996 0.90767
974 1 0.0732828 0.0470483 0.632944
379 1 0.127074 0.087088 0.568651
2006 1 0.110249 0.0101695 0.571268
1319 1 0.163057 0.21209 0.506782
1926 1 0.0949828 0.103902 0.680628
648 1 0.0162171 0.355116 0.561293
1284 1 0.00488109 0.0792404 0.649405
1860 1 0.210894 0.145451 0.618976
2042 1 0.313732 0.140509 0.640442
1566 1 0.152873 0.0381382 0.655824
520 1 0.193873 0.0819982 0.585745
365 1 0.20826 0.166965 0.547472
1404 1 0.307012 0.0744723 0.724931
2019 1 0.244527 0.0421704 0.676892
1549 1 0.489197 0.0972099 0.986293
293 1 0.265271 0.00153036 0.598017
451 1 0.259428 0.0878345 0.609589
1999 1 0.384637 0.148325 0.524327
1743 1 0.469224 0.0630463 0.614631
1191 1 0.432567 0.917665 0.631172
1476 1 0.256683 0.446354 0.515809
1278 1 0.133662 0.192044 0.607802
1469 1 0.381543 0.823864 0.985933
517 1 0.102389 0.166429 0.537978
1337 1 0.0278658 0.135034 0.676446
920 1 0.0284327 0.0932138 0.576512
1449 1 0.0210148 0.313427 0.644772
148 1 0.443163 0.581585 0.556098
1927 1 0.39053 0.78664 0.516468
854 1 0.19023 0.258426 0.657977
1091 1 0.193712 0.231646 0.583351
1662 1 0.298802 0.241336 0.541636
38 1 0.485899 0.315226 0.994271
1800 1 0.357368 0.275569 0.598005
1826 1 0.270361 0.188465 0.60061
1808 1 0.318732 0.154866 0.718823
680 1 0.197152 0.872772 0.535413
512 1 0.399508 0.269102 0.659967
1734 1 0.381053 0.205053 0.593908
577 1 0.436433 0.337776 0.780844
761 1 0.449556 0.21517 0.623829
238 1 0.411498 0.0854397 0.582298
823 1 0.0682053 0.191647 0.896129
439 1 0.296144 0.13863 0.548571
1905 1 0.498009 0.812774 0.879289
508 1 0.138667 0.280251 0.580146
1864 1 0.434618 0.878297 0.872171
613 1 0.124138 0.331065 0.520513
1831 1 0.365406 0.236857 0.525936
1589 1 0.114417 0.428884 0.640066
208 1 0.284545 0.323285 0.569291
300 1 0.285602 0.26124 0.620792
569 1 0.351409 0.333405 0.543403
360 1 0.0781235 0.615967 0.811276
1580 1 0.155845 0.399821 0.56753
291 1 0.184831 0.299983 0.725902
1281 1 0.23508 0.326391 0.650516
1991 1 0.48382 0.480596 0.789638
264 1 0.453225 0.351944 0.568366
1335 1 0.389251 0.336702 0.622206
1559 1 0.329966 0.455709 0.986527
1630 1 0.460531 0.282955 0.625753
233 1 0.441064 0.195209 0.533129
936 1 0.429106 0.271844 0.558216
1200 1 0.488436 0.745492 0.863711
1003 1 0.114896 0.532998 0.713056
98 1 0.0227914 0.961381 0.828366
1034 1 0.0954264 0.517265 0.628564
1570 1 0.291028 0.594395 0.512847
1820 1 0.180458 0.691054 0.911712
1601 1 0.374909 0.423465 0.556262
236 1 0.0733666 0.554191 0.548975
271 1 0.135043 0.482547 0.579501
871 1 0.236193 0.412321 0.583006
11 1 0.300399 0.446394 0.577266
406 1 0.310247 0.92648 0.518286
2032 1 0.32577 0.515532 0.574276
948 1 0.225122 0.720927 0.698997
1411 1 0.239364 0.585299 0.581353
262 1 0.366469 0.458989 0.619474
279 1 0.329462 0.381627 0.602251
1953 1 0.0011682 0.0732613 0.931263
1995 1 0.438702 0.44765 0.596661
416 1 0.455115 0.574561 0.648555
370 1 0.397334 0.526769 0.618586
1426 1 0.376309 0.595553 0.584748
469 1 0.422047 0.504012 0.545665
962 1 0.00531527 0.853815 0.750845
444 1 0.365166 0.551983 0.522789
744 1 0.145854 0.814745 0.518791
651 1 0.108907 0.256497 0.512373
941 1 0.0308469 0.127712 0.873118
1243 1 0.020697 0.724248 0.681183
54 1 0.462992 0.404216 0.501092
1362 1 0.451711 0.0511823 0.836975
1427 1 0.0593172 0.635275 0.548644
1979 1 0.0618545 0.470508 0.883192
1056 1 0.162913 0.708743 0.615064
556 1 0.193302 0.394211 0.993289
1350 1 0.316116 0.509119 0.638526
1932 1 0.0243198 0.38969 0.88159
687 1 0.144399 0.662235 0.55824
301 1 0.349977 0.663474 0.626218
855 1 0.309595 0.586078 0.597122
1000 1 0.26631 0.654843 0.603902
892 1 0.218387 0.719223 0.548166
1175 1 0.432744 0.638746 0.603976
1691 1 0.0681951 0.380318 0.656695
581 1 0.403301 0.646312 0.700734
1890 1 0.482821 0.630836 0.67747
325 1 0.313386 0.748603 0.586759
1018 1 0.030663 0.0328004 0.543881
1994 1 0.294333 0.562175 0.67943
1738 1 0.327626 0.663163 0.71391
73 1 0.140097 0.592762 0.661183
1900 1 0.127485 0.681512 0.672161
352 1 0.0583559 0.798899 0.626738
743 1 0.154068 0.731366 0.716549
1330 1 0.115209 0.779253 0.572979
402 1 0.137909 0.234744 0.994575
1966 1 0.0269228 0.240236 0.598181
532 1 0.0263993 0.561694 0.692218
212 1 0.447532 0.52161 0.898886
668 1 0.00608851 0.25267 0.899267
31 1 0.337227 0.81186 0.562922
479 1 0.237624 0.746834 0.630462
722 1 0.193032 0.805775 0.635506
1093 1 0.409468 0.704045 0.629735
1943 1 0.21932 0.795114 0.571412
632 1 0.126528 0.78227 0.655549
1544 1 0.283561 0.82481 0.612405
980 1 0.140266 0.87389 0.987383
120 1 0.270949 0.688485 0.757546
1915 1 0.485582 0.935914 0.818726
82 1 0.293221 0.729268 0.684345
1731 1 0.438669 0.776848 0.639805
1408 1 0.366217 0.878872 0.603242
1948 1 0.352533 0.800111 0.642459
1634 1 0.38911 0.756552 0.579978
685 1 0.40921 0.842276 0.642987
1867 1 0.370386 0.736034 0.677146
1169 1 0.0403411 0.787044 0.932274
1511 1 0.451948 0.137892 0.918012
8 1 0.473745 0.833643 0.696769
190 1 0.0806304 0.952529 0.557153
400 1 0.0215375 0.956129 0.619392
939 1 0.0984284 0.976771 0.64408
1366 1 0.11637 0.841629 0.604112
1206 1 0.065157 0.476434 0.989443
1667 1 0.0495902 0.859287 0.577173
1747 1 0.149147 0.735627 0.520088
509 1 0.333734 0.973485 0.591859
377 1 0.118202 0.889617 0.549399
366 1 0.160049 0.953854 0.530197
88 1 0.232537 0.939505 0.56736
1516 1 0.163901 0.941675 0.604226
1760 1 0.200086 0.974421 0.656594
1014 1 0.324496 0.0052699 0.672834
362 1 0.205465 0.882562 0.61828
1253 1 0.172558 0.863073 0.687064
578 1 0.180872 0.0109385 0.598606
341 1 0.269905 0.836641 0.520049
1141 1 0.382913 0.893129 0.714171
1736 1 0.313614 0.865843 0.69994
58 1 0.412796 0.930504 0.562267
1379 1 0.415214 0.835944 0.571562
1925 1 0.347838 0.93041 0.657121
519 1 0.0209113 0.317484 0.844668
172 1 0.218909 0.169251 0.964434
42 1 0.395014 0.983481 0.629893
1168 1 0.0976631 0.0245985 0.702858
1160 1 0.0870716 0.135671 0.605562
1980 1 0.0982125 0.0790921 0.760308
1985 1 0.0153153 0.0262468 0.710672
156 1 0.157632 0.0380538 0.805236
674 1 0.104997 0.0600526 0.892009
1218 1 0.105844 0.979145 0.870873
1186 1 0.167073 0.881815 0.903797
1470 1 0.209078 0.0475263 0.867372
1613 1 0.170052 0.111302 0.828292
1097 1 0.249966 0.121901 0.682917
1157 1 0.306393 0.135273 0.797904
1578 1 0.086682 0.184737 0.673867
494 1 0.210448 0.141264 0.760394
1172 1 0.246892 0.0834387 0.819974
433 1 0.466014 0.0602732 0.756339
1581 1 0.174217 0.106326 0.677084
1002 1 0.452535 0.0103756 0.683451
1887 1 0.395588 0.141328 0.625772
1567 1 0.343659 0.0533645 0.615905
1290 1 0.410531 0.0685963 0.670658
27 1 0.396139 0.118707 0.803024
847 1 0.423255 0.977277 0.83816
1687 1 0.364344 0.0482169 0.843269
1996 1 0.0281397 0.509247 0.516914
1568 1 0.0350384 0.616411 0.640859
388 1 0.313654 0.393126 0.531511
1174 1 0.13979 0.146626 0.746371
1836 1 0.17851 0.189786 0.689761
572 1 0.0653561 0.254252 0.730355
967 1 0.491148 0.132608 0.565578
552 1 0.105418 0.208215 0.757279
1383 1 0.0321577 0.230095 0.668585
849 1 0.117178 0.249639 0.666561
573 1 0.0318449 0.178638 0.796254
1178 1 0.200194 0.288261 0.800559
1529 1 0.130916 0.268848 0.788663
1666 1 0.265115 0.260855 0.786767
717 1 0.204508 0.222525 0.759103
199 1 0.295704 0.201467 0.774998
989 1 0.126177 0.232153 0.922962
356 1 0.407673 0.190683 0.687877
437 1 0.203331 0.361021 0.524698
1766 1 0.30895 0.933314 0.920562
943 1 0.416201 0.178796 0.843319
868 1 0.374354 0.193188 0.770085
1177 1 0.373377 0.356313 0.696438
1347 1 0.407402 0.88486 0.506491
373 1 0.448794 0.191426 0.747176
973 1 0.409194 0.262567 0.735635
742 1 0.478661 0.144696 0.690064
127 1 0.0577357 0.286587 0.553232
695 1 0.16906 0.502053 0.657903
1642 1 0.118969 0.339249 0.730815
1021 1 0.0892622 0.308304 0.630707
1317 1 0.157773 0.328135 0.644852
1632 1 0.182574 0.0598422 0.510389
99 1 0.427056 0.13552 0.990513
1329 1 0.0413796 0.489294 0.708837
1188 1 0.15323 0.465528 0.743516
575 1 0.256359 0.189868 0.666458
1062 1 0.20423 0.31291 0.577653
1397 1 0.265483 0.34464 0.737106
6 1 0.141142 0.422632 0.828169
44 1 0.353642 0.317493 0.772608
216 1 0.307824 0.398068 0.845782
1381 1 0.243815 0.236243 0.853964
93 1 0.251784 0.274711 0.70747
1072 1 0.319089 0.255793 0.728692
106 1 0.0277027 0.531394 0.613645
960 1 0.301377 0.379232 0.675612
1696 1 0.327037 0.30234 0.658961
1825 1 0.487041 0.362163 0.632315
1304 1 0.455593 0.441156 0.870108
683 1 0.0380098 0.406507 0.795981
850 1 0.420143 0.290942 0.837803
952 1 0.249017 0.420837 0.709647
36 1 0.0922753 0.368088 0.580262
260 1 0.0536974 0.112167 0.505539
2029 1 0.439177 0.996739 0.759702
819 1 0.173841 0.555995 0.581104
975 1 0.0647055 0.538428 0.770105
880 1 0.0218973 0.628559 0.772205
741 1 0.303013 0.404649 0.770559
1272 1 0.23446 0.392277 0.795584
732 1 0.290338 0.445986 0.6514
582 1 0.245047 0.518186 0.642185
273 1 0.200658 0.41692 0.651403
395 1 0.236976 0.491433 0.716028
1176 1 0.183615 0.642327 0.647551
1963 1 0.240759 0.562835 0.821166
284 1 0.387599 0.414725 0.85337
1151 1 0.186369 0.480702 0.811265
2035 1 0.424517 0.420476 0.676776
1481 1 0.486792 0.251387 0.510187
1127 1 0.399136 0.395042 0.749889
1671 1 0.368018 0.522141 0.689591
1192 1 0.320785 0.513904 0.742338
848 1 0.430651 0.498605 0.715066
1582 1 0.385862 0.543592 0.771251
1012 1 0.405203 0.479038 0.797483
1806 1 0.0579624 0.463538 0.585182
723 1 0.331165 0.993066 0.951738
1992 1 0.0931039 0.656557 0.611979
117 1 0.0370256 0.732958 0.888245
1198 1 0.427424 0.69494 0.550691
62 1 0.178494 0.595964 0.779101
1962 1 0.0564168 0.139923 0.747837
1006 1 0.256464 0.55071 0.737981
1735 1 0.259117 0.815606 0.680793
240 1 0.203519 0.651036 0.83009
2014 1 0.0953528 0.547985 0.846842
603 1 0.196735 0.70029 0.774804
827 1 0.281081 0.624277 0.783228
1538 1 0.324883 0.683878 0.826383
1762 1 0.156821 0.532651 0.520007
1468 1 0.344552 0.587753 0.727607
784 1 0.235284 0.700772 0.870527
837 1 0.329007 0.576232 0.802557
1353 1 0.367976 0.705104 0.763915
1138 1 0.444094 0.67806 0.896475
879 1 0.450028 0.576271 0.748582
1668 1 0.0272499 0.714219 0.968043
797 1 0.451441 0.70485 0.720361
1917 1 0.491973 0.285101 0.762079
478 1 0.410417 0.722257 0.859923
979 1 0.441841 0.33315 0.685305
256 1 0.0814967 0.734368 0.713345
1674 1 0.173482 0.80342 0.72353
1898 1 0.00526459 0.695639 0.607963
1447 1 0.0516734 0.910466 0.775541
1628 1 0.165613 0.653022 0.720922
562 1 0.0829246 0.829815 0.714326
183 1 0.0839084 0.770899 0.835958
1190 1 0.0502631 0.840078 0.812607
1554 1 0.0652584 0.712529 0.782969
1395 1 0.0270225 0.778099 0.739408
1500 1 0.305622 0.680089 1.00014
1080 1 0.244869 0.763365 0.749716
922 1 0.197419 0.775334 0.888729
917 1 0.262131 0.78753 0.855414
1154 1 0.193721 0.785989 0.814341
101 1 0.115096 0.809252 0.780603
629 1 0.243161 0.886583 0.686724
289 1 0.275281 0.914341 0.782352
417 1 0.265144 0.836976 0.75428
1772 1 0.222283 0.857814 0.846663
1356 1 0.335281 0.0620846 0.519952
420 1 0.320013 0.780089 0.799633
1515 1 0.391704 0.769114 0.789076
1101 1 0.359407 0.795839 0.713092
214 1 0.46424 0.762166 0.792774
460 1 0.343235 0.860646 0.776662
1274 1 0.170439 0.00576946 0.718857
339 1 0.0245196 0.585974 0.930037
664 1 0.0955799 0.720857 0.631694
1547 1 0.097937 0.90568 0.670336
86 1 0.181015 0.957396 0.782192
463 1 0.109833 0.962582 0.777687
881 1 0.0547362 0.014648 0.793693
681 1 0.230519 0.0745165 0.747491
1701 1 0.207237 0.877831 0.762739
491 1 0.159519 0.932264 0.714431
1535 1 0.314927 0.977343 0.816694
1248 1 0.300211 0.862729 0.868147
955 1 0.239485 0.977759 0.719129
1863 1 0.162533 0.963627 0.939309
143 1 0.272874 0.905483 0.625065
1419 1 0.240703 0.00457034 0.803135
1302 1 0.383765 0.978438 0.703631
1807 1 0.376561 0.0681418 0.748156
467 1 0.331343 1.00055 0.746242
1489 1 0.426323 0.90784 0.781894
175 1 0.111605 0.00212697 0.503083
77 1 0.298283 0.936274 0.714192
882 1 0.445311 0.827682 0.776407
1563 1 0.359841 0.934704 0.777405
1809 1 0.182332 0.946515 0.863454
1770 1 0.105516 0.131505 0.869144
845 1 0.0530216 0.0238627 0.936212
1228 1 0.400182 0.01807 0.562598
1830 1 0.0706363 0.118075 0.929285
1076 1 0.121372 0.574042 0.918235
1333 1 0.389343 0.877937 0.921459
219 1 0.0995021 0.90777 0.842473
924 1 0.268846 0.0938049 0.885136
1283 1 0.307574 0.0708089 0.951018
1133 1 0.0937566 0.0805304 0.983613
1816 1 0.152821 0.0140576 0.994732
326 1 0.161899 0.17853 0.882548
1902 1 0.204309 0.108272 0.911311
1652 1 0.171841 0.0888507 0.969534
832 1 0.473378 0.0588884 0.532536
828 1 0.0848002 0.768081 0.978647
1510 1 0.482985 0.37638 0.93831
1883 1 0.45925 0.218251 0.992526
673 1 0.344582 0.120325 0.882314
1976 1 0.36452 0.264055 0.978951
7 1 0.477036 0.713806 0.604025
1361 1 0.127111 0.830049 0.93426
1868 1 0.470152 0.245463 0.696579
1679 1 0.404833 0.992915 0.929578
1602 1 0.235402 0.94971 0.937016
1257 1 0.00176641 0.271145 0.793343
1603 1 0.0566747 0.309347 0.909917
630 1 0.0934958 0.380067 0.911438
911 1 0.101324 0.340554 0.849063
860 1 0.218196 0.500939 0.545157
410 1 0.366246 0.868033 0.842518
429 1 0.136158 0.143734 0.94629
560 1 0.00186611 0.681574 0.823996
10 1 0.125343 0.310559 0.944162
281 1 0.0653673 0.30936 0.783778
1146 1 0.30467 0.0643238 0.790289
1997 1 0.120996 0.195211 0.827945
193 1 0.22658 0.158253 0.837187
1123 1 0.391111 0.245216 0.89211
315 1 0.350864 0.132583 0.955462
317 1 0.282765 0.178409 0.884929
1341 1 0.197287 0.225194 0.931116
209 1 0.287284 0.164138 0.97146
790 1 0.301063 0.231883 0.934906
1024 1 0.00421335 0.830801 0.983076
747 1 0.27828 0.68459 0.519182
1541 1 0.499288 0.0793325 0.677327
1193 1 0.345288 0.184499 0.841834
1374 1 0.473375 0.302427 0.892544
1698 1 0.486003 0.506847 0.668674
590 1 0.0130818 0.931165 0.689856
1232 1 0.407076 0.193798 0.951007
1165 1 0.46211 0.223725 0.883168
159 1 0.126684 0.442627 0.959313
381 1 0.472479 0.679373 0.982893
1950 1 0.175609 0.351894 0.833616
105 1 0.0386111 0.957679 0.90033
220 1 0.0400639 0.414547 0.961674
1983 1 0.159688 0.501358 0.885738
1234 1 0.239794 0.344707 0.854862
1373 1 0.203972 0.456817 0.94943
1156 1 0.297605 0.31939 0.828674
1786 1 0.157879 0.278182 0.863513
893 1 0.266245 0.294624 0.913781
1410 1 0.306103 0.371401 0.912394
553 1 0.336829 0.29839 0.907468
1707 1 0.258452 0.326933 0.988282
1313 1 0.227987 0.379482 0.931114
389 1 0.195129 0.293821 0.974508
1250 1 0.413103 0.436433 0.972934
1104 1 0.47104 0.825614 0.497954
912 1 0.45314 0.536509 0.82838
655 1 0.349695 0.2536 0.811288
977 1 0.455935 0.3591 0.847347
1498 1 0.413775 0.352548 0.910885
752 1 0.492976 0.22729 0.80251
1861 1 0.402637 0.0761291 0.938993
1166 1 0.155654 0.586157 0.848202
18 1 0.0763762 0.632237 0.714345
188 1 0.445994 0.26375 0.946656
349 1 0.141444 0.537583 0.785336
1843 1 0.120893 0.514959 0.981276
372 1 0.236774 0.493553 0.88561
441 1 0.456838 0.919494 0.932747
587 1 0.0043627 0.771221 0.54395
1260 1 0.260982 0.472827 0.799785
1238 1 0.184396 0.536757 0.941906
866 1 0.234106 0.424214 0.866524
1235 1 0.290679 0.451732 0.887022
1749 1 0.102206 0.481067 0.804525
1170 1 0.0818421 0.259641 0.856711
1134 1 0.12303 0.639135 0.884728
1365 1 0.36322 0.675508 0.898395
1690 1 0.378981 0.632642 0.77819
482 1 0.27613 0.996926 0.875911
2011 1 0.313128 0.594687 0.982323
1436 1 0.359001 0.420194 0.925445
759 1 0.364114 0.332138 0.843784
1275 1 0.378242 0.485799 0.910754
1153 1 0.378217 0.523168 0.84471
459 1 0.474569 0.686304 0.803449
1239 1 0.335024 0.52887 0.940704
163 1 0.362822 0.920246 0.971618
1131 1 0.146601 0.634313 0.954048
414 1 0.141077 0.713767 0.838759
811 1 0.202288 0.60877 0.905036
1623 1 0.0147547 0.0838295 0.778406
2026 1 0.0736056 0.639679 0.957249
1653 1 0.0491016 0.66293 0.874136
394 1 0.492957 0.397005 0.784644
885 1 0.298907 0.52044 0.864699
863 1 0.296783 0.726872 0.898973
606 1 0.470223 0.0081557 0.955842
35 1 0.274777 0.655323 0.922943
1318 1 0.364759 0.593656 0.907854
891 1 0.269321 0.56859 0.913581
1664 1 0.220291 0.644172 0.541862
1850 1 0.308299 0.62134 0.869618
1575 1 0.484946 0.893206 0.523409
2003 1 0.44314 0.595883 0.890477
1464 1 0.411262 0.631957 0.949851
640 1 0.00358078 0.582747 0.846483
706 1 0.39367 0.555203 0.97358
1631 1 0.476364 0.742494 0.935054
436 1 0.358578 0.689286 0.53136
1486 1 0.132915 0.893266 0.771255
1550 1 0.103102 0.379879 0.990489
1776 1 0.0163549 0.884895 0.939296
1885 1 0.473832 0.563227 0.959589
1432 1 0.0753644 0.844031 0.877085
1894 1 0.0652903 0.707941 0.547321
762 1 0.150966 0.837578 0.839801
1069 1 0.312229 0.842309 0.93896
299 1 0.108383 0.718266 0.89983
923 1 0.247426 0.742393 0.948095
1378 1 0.372032 0.933507 0.869342
1993 1 0.307416 0.776812 0.983121
1011 1 0.0241379 0.155604 0.996403
1817 1 0.0670506 0.836976 0.504116
1796 1 0.431495 0.868047 1.00068
1655 1 0.430767 0.810399 0.846093
435 1 0.464233 0.776113 0.559011
1958 1 0.45394 0.909752 0.709908
294 1 0.390529 0.719185 0.968125
1453 1 0.363063 0.779804 0.859304
750 1 0.438295 0.813777 0.927749
1204 1 0.0378351 0.868385 0.647943
1478 1 0.369584 0.780815 0.936491
40 1 0.42742 0.774355 0.726404
1865 1 0.094159 0.966252 0.950775
1913 1 0.00333765 0.891361 0.857907
252 1 0.0617465 0.226114 0.976942
1132 1 0.236831 0.638117 0.985004
1033 1 0.465522 0.996579 0.890521
2041 1 0.233485 0.823498 0.945223
774 1 0.413666 0.322968 0.984131
1574 1 0.4754 0.978906 0.562713
654 1 0.244484 0.0238089 0.522023
884 1 0.175729 0.761797 0.95889
1202 1 0.2745 0.486811 0.944775
1899 1 0.492205 0.452548 0.946966
1484 1 0.0091085 0.826806 0.876675
1179 1 0.422154 0.333921 0.506495
1368 1 0.432469 0.771374 0.997826
422 1 0.168439 0.450815 0.505292
872 1 0.0708055 0.38892 0.502678
807 1 0.0636615 0.866512 0.999153
253 1 0.562383 0.99411 0.159836
1083 1 0.607566 0.063553 0.107981
1092 1 0.524357 0.977301 0.0806633
662 1 0.978363 0.948417 0.308666
625 1 0.533058 0.0821363 0.103008
786 1 0.697853 0.984635 0.0669892
1244 1 0.499888 0.561353 0.452783
1693 1 0.714686 0.0816285 0.106498
1848 1 0.786031 0.0719166 0.06784
1446 1 0.859088 0.0940826 0.0433195
510 1 0.950726 0.142303 0.489222
1675 1 0.533629 0.16819 0.0158367
1647 1 0.858787 0.971579 0.142955
1610 1 0.856282 0.124997 0.112846
984 1 0.922452 0.0819307 0.205725
430 1 0.93612 0.00318153 0.169766
164 1 0.915089 0.135075 0.172975
90 1 0.88832 0.466024 0.0193171
206 1 0.827207 0.919992 0.0284904
1297 1 0.872971 0.166882 0.0370149
493 1 0.905895 0.53261 0.487779
476 1 0.745532 0.258579 0.458237
495 1 0.92911 0.0137043 0.0985804
809 1 0.978787 0.221228 0.0158079
1059 1 0.611393 0.272445 0.0340435
2043 1 0.987971 0.451231 0.225889
676 1 0.624748 0.197905 0.488602
1423 1 0.503479 0.690239 0.0606475
1331 1 0.997503 0.62478 0.23527
158 1 0.593075 0.228357 0.149959
1066 1 0.620935 0.280867 0.111907
623 1 0.739716 0.306179 0.0252956
697 1 0.654917 0.10214 0.138893
763 1 0.670998 0.249765 0.0722354
152 1 0.742054 0.143126 0.0682705
1439 1 0.810349 0.142677 0.0667549
1459 1 0.945028 0.39689 0.322157
631 1 0.81069 0.211294 0.026077
586 1 0.852517 0.23257 0.0842234
95 1 0.931462 0.231782 0.071572
835 1 0.503834 0.416304 0.0917392
418 1 0.956152 0.151838 0.0232588
46 1 0.528081 0.741963 0.107464
52 1 0.52081 0.21591 0.218185
795 1 0.53746 0.678051 0.338447
659 1 0.985563 0.584582 0.422405
898 1 0.573977 0.204543 0.0714752
929 1 0.744792 0.192982 0.0119814
760 1 0.694362 0.31019 0.0905643
1981 1 0.636889 0.371065 0.0667631
783 1 0.576033 0.355749 0.0136003
1794 1 0.546785 0.813669 0.0853543
17 1 0.511226 0.101121 0.250852
1752 1 0.708331 0.405035 0.103575
1565 1 0.78237 0.378339 0.0944983
1684 1 0.634505 0.10008 0.0349948
867 1 0.725537 0.515498 0.124269
2036 1 0.805018 0.348461 0.16419
874 1 0.764914 0.420187 0.170709
2008 1 0.873376 0.37722 0.123344
864 1 0.754365 0.460929 0.0714694
434 1 0.966403 0.757719 0.304978
412 1 0.508007 0.75813 0.0157443
1621 1 0.968731 0.120876 0.0883178
1726 1 0.977324 0.990273 0.243391
764 1 0.953218 0.386029 0.103532
991 1 0.996165 0.758282 0.390214
2034 1 0.97641 0.286801 0.0436274
1823 1 0.994173 0.500742 0.00111705
1978 1 0.9271 0.397213 0.0269156
261 1 0.936565 0.0648334 0.0316849
925 1 0.87202 0.998559 0.0363089
1214 1 0.622858 0.423729 0.00687215
39 1 0.696548 0.668593 0.0606117
1015 1 0.503145 0.499325 0.49242
64 1 0.627782 0.560691 0.0456708
1400 1 0.553333 0.881666 0.0375153
23 1 0.63733 0.561364 0.143142
78 1 0.580497 0.469108 0.110636
471 1 0.514326 0.456843 0.154149
357 1 0.708318 0.487606 0.195131
642 1 0.521415 0.873775 0.459807
304 1 0.651558 0.503973 0.0929887
1293 1 0.507585 0.944685 0.480094
425 1 0.688749 0.592907 0.0236704
1967 1 0.926367 0.386039 0.241691
1955 1 0.679803 0.54833 0.200388
1090 1 0.77038 0.586447 0.0200897
1171 1 0.884426 0.474132 0.124766
229 1 0.959045 0.981638 0.0352284
1605 1 0.831976 0.282052 0.0288234
213 1 0.852761 0.535056 0.0962416
990 1 0.93774 0.941216 0.435631
359 1 0.831518 0.689308 0.227054
456 1 0.933837 0.455796 0.0733674
487 1 0.834308 0.527853 0.0182035
842 1 0.929349 0.668598 0.138544
401 1 0.971406 0.595472 0.150066
1128 1 0.677755 0.167448 0.0507832
298 1 0.645612 0.613684 0.0944787
846 1 0.582207 0.693559 0.110108
647 1 0.646417 0.722789 0.087647
1428 1 0.673452 0.676218 0.132074
2001 1 0.571822 0.601451 0.0968929
788 1 0.759626 0.0719146 0.462389
745 1 0.78688 0.651792 0.0618948
1901 1 0.980423 0.630461 0.31031
378 1 0.824859 0.778502 0.0930535
1713 1 0.7311 0.714397 0.127956
740 1 0.726133 0.616414 0.169425
1143 1 0.681811 0.771424 0.160872
276 1 0.796829 0.71822 0.0304685
1859 1 0.678667 0.261256 0.490072
778 1 0.911117 0.652634 0.0379991
1650 1 0.569382 0.936419 0.43357
1461 1 0.989034 0.728338 0.487837
1779 1 0.894976 0.721996 0.0873189
1530 1 0.892276 0.749143 0.276049
851 1 0.87428 0.59811 0.0350301
265 1 0.564904 0.731389 0.0415744
1625 1 0.665721 0.938234 0.387459
985 1 0.625538 0.721514 0.171322
2030 1 0.509246 0.371891 0.171053
1064 1 0.500397 0.498711 0.286041
1706 1 0.621459 0.802661 0.0477108
899 1 0.537703 0.790225 0.172554
1756 1 0.812099 0.747658 0.408218
968 1 0.995686 0.273228 0.127372
259 1 0.559211 0.353535 0.259267
292 1 0.823739 0.883355 0.431985
207 1 0.744923 0.80559 0.144346
34 1 0.82982 0.831372 0.176087
2045 1 0.687566 0.780275 0.0898934
1532 1 0.691087 0.921184 0.198699
1358 1 0.765065 0.757365 0.188062
976 1 0.684661 0.764649 0.229016
160 1 0.929921 0.642638 0.420117
1727 1 0.852299 0.762067 0.164572
2004 1 0.514175 0.505808 0.419941
665 1 0.977467 0.858898 0.167524
2018 1 0.865267 0.839679 0.0696646
887 1 0.829262 0.776564 0.0129465
179 1 0.971215 0.802527 0.10092
468 1 0.507057 0.134806 0.392685
1445 1 0.95155 0.88224 0.0919092
733 1 0.636939 -6.69266e-05 0.313499
521 1 0.950113 0.883062 0.244575
800 1 0.616082 0.17671 0.00352667
1644 1 0.722808 0.570977 0.415765
26 1 0.594168 0.655048 0.0398501
1597 1 0.916721 0.695553 0.49983
932 1 0.599128 0.00681374 0.0647757
1725 1 0.690007 0.873732 0.116571
675 1 0.56143 0.80147 0.483888
1262 1 0.751386 0.925433 0.0889583
447 1 0.627334 0.807357 0.119717
1594 1 0.998251 0.332596 0.358423
47 1 0.614729 0.930639 0.0402471
1418 1 0.79785 0.987759 0.0851208
1654 1 0.978436 0.628554 0.0431023
728 1 0.903856 0.444771 0.467513
1180 1 0.748182 0.0139683 0.124678
1125 1 0.792037 0.94446 0.157358
1533 1 0.837827 0.903442 0.109517
902 1 0.771568 0.864404 0.0925024
1933 1 0.731793 0.87656 0.42784
1513 1 0.975786 0.944097 0.102042
602 1 0.897149 0.813882 0.224224
1833 1 0.942069 0.757266 0.00904559
1309 1 0.83364 0.899368 0.187631
1414 1 0.866098 0.948444 0.436659
883 1 0.904652 0.0731546 0.133531
192 1 0.708468 0.79979 0.494666
515 1 0.895405 0.940489 0.0680957
1938 1 0.774518 0.517937 0.472277
245 1 0.556321 0.279076 0.19581
1467 1 0.535524 0.10084 0.170879
660 1 0.599329 0.0561422 0.215329
1082 1 0.60782 0.157654 0.178018
1251 1 0.602546 0.279854 0.254676
57 1 0.711721 0.0632926 0.179096
61 1 0.68246 0.0829077 0.346434
195 1 0.510891 0.038573 0.364406
350 1 0.803151 0.0462776 0.148122
622 1 0.890599 0.0257022 0.24424
1624 1 0.853337 0.101523 0.184114
982 1 0.664586 0.0618143 0.256121
958 1 0.764214 0.932937 0.259257
670 1 0.758414 0.996539 0.193057
1306 1 0.746207 0.115644 0.226896
125 1 0.539968 0.304467 0.0491569
392 1 0.558998 0.0856849 0.0204355
1930 1 0.822244 0.131749 0.240783
1998 1 0.952145 0.198552 0.195183
174 1 0.850382 0.201323 0.308175
650 1 0.923656 0.263269 0.171373
1512 1 0.918642 0.329791 0.473967
453 1 0.866612 0.141998 0.498139
561 1 0.580425 0.392156 0.198502
244 1 0.663244 0.129671 0.226852
1307 1 0.740107 0.185761 0.243508
1682 1 0.541944 0.221513 0.367693
283 1 0.745672 0.23138 0.0927803
2037 1 0.701537 0.193336 0.159045
1984 1 0.975819 0.0502582 0.461734
1765 1 0.704473 0.266951 0.152236
785 1 0.77456 0.119801 0.137968
2033 1 0.777924 0.259882 0.22607
1386 1 0.807215 0.202506 0.258852
176 1 0.762399 0.291868 0.123105
805 1 0.836516 0.309226 0.100764
709 1 0.94184 0.186933 0.123025
1866 1 0.922753 0.321405 0.107923
634 1 0.79892 0.351305 0.018792
384 1 0.99584 0.242364 0.234659
1689 1 0.864089 0.183277 0.185853
1068 1 0.781037 0.173691 0.185011
221 1 0.841897 0.253295 0.161325
1896 1 0.96033 0.213002 0.292137
312 1 0.907366 0.016699 0.415885
1862 1 0.529391 0.27456 0.116896
1121 1 0.527865 0.437762 0.228844
1742 1 0.578857 0.34133 0.149409
1763 1 0.866817 0.702455 0.00261283
1839 1 0.646383 0.357532 0.150488
1853 1 0.606205 0.233427 0.306704
1472 1 0.538739 0.419143 0.298193
539 1 0.531949 0.431659 0.439793
895 1 0.580986 0.39716 0.11246
1149 1 0.63654 0.283694 0.188404
171 1 0.828937 0.438244 0.0969401
1065 1 0.718964 0.344694 0.186554
1110 1 0.703623 0.269891 0.2287
2013 1 0.809106 0.414192 0.233712
1334 1 0.782815 0.487928 0.15144
919 1 0.853671 0.469708 0.212036
1660 1 0.678298 0.314844 0.309847
1724 1 0.845921 0.313793 0.207161
1096 1 0.574269 0.507371 0.468029
237 1 0.873085 0.343617 0.0407267
614 1 0.934988 0.347665 0.184475
251 1 0.922139 0.425518 0.176265
1741 1 0.905743 0.315139 0.27007
248 1 0.964307 0.293561 0.293247
234 1 0.588604 0.471864 0.184627
1219 1 0.886296 0.267389 0.466935
1215 1 0.600602 0.422152 0.26482
380 1 0.569336 0.493348 0.256348
1694 1 0.645559 0.436545 0.130843
1471 1 0.684288 0.59025 0.261842
638 1 0.606471 0.555119 0.215937
2017 1 0.617696 0.553831 0.285515
1273 1 0.753566 0.885125 0.022463
909 1 0.628759 0.350332 0.237661
285 1 0.685812 0.420331 0.269351
354 1 0.759106 0.344733 0.249079
1880 1 0.748674 0.463795 0.267206
576 1 0.67322 0.402044 0.19695
1422 1 0.703532 0.52035 0.269999
324 1 0.848423 0.565877 0.204168
226 1 0.641517 0.47224 0.23096
296 1 0.794202 0.508426 0.228956
1112 1 0.825639 0.461331 0.294388
1035 1 0.858574 0.608004 0.143619
812 1 0.968932 0.522521 0.225255
1720 1 0.879368 0.399795 0.292706
858 1 0.915744 0.506962 0.18088
1255 1 0.915847 0.459862 0.246323
1226 1 0.640375 0.182702 0.11136
1482 1 0.872074 0.602415 0.272311
1259 1 0.875508 0.921072 0.369604
755 1 0.52447 0.714415 0.264323
688 1 0.599726 0.526035 0.36405
1300 1 0.518682 0.572963 0.278362
1858 1 0.597591 0.636606 0.151816
323 1 0.852364 0.475633 0.414672
371 1 0.611596 0.688457 0.322339
2047 1 0.726412 0.609642 0.0881365
928 1 0.646324 0.617225 0.201992
1856 1 0.792749 0.571159 0.1003
1792 1 0.827171 0.681532 0.116466
1832 1 0.700866 0.688179 0.282179
45 1 0.58388 0.696874 0.23693
131 1 0.767249 0.66402 0.206555
1343 1 0.659478 0.639629 0.33592
92 1 0.751771 0.549701 0.183584
1142 1 0.633638 0.647229 0.270339
337 1 0.686211 0.692304 0.202854
1618 1 0.879338 0.0529439 0.481103
1213 1 0.912381 0.671267 0.21721
1022 1 0.907804 0.634496 0.354401
978 1 0.909234 0.556824 0.131948
1345 1 0.930639 0.553883 0.28141
600 1 0.859228 0.67426 0.312845
145 1 0.910118 0.560284 0.354595
1659 1 0.790247 0.652799 0.283041
1764 1 0.529575 0.642983 0.182364
1425 1 0.633045 0.794364 0.456973
1590 1 0.824154 0.977158 0.385215
1390 1 0.577363 0.96117 0.252528
155 1 0.623296 0.903877 0.231794
1525 1 0.534175 0.796246 0.244926
749 1 0.823306 0.776769 0.229151
1723 1 0.607154 0.795182 0.196419
1517 1 0.752514 0.876501 0.173499
2 1 0.747586 0.811657 0.231212
1553 1 0.962961 0.769563 0.228014
1906 1 0.771686 0.722139 0.265045
1757 1 0.649986 0.898022 0.328035
970 1 0.827297 0.848539 0.256552
1746 1 0.901057 0.82437 0.302008
1629 1 0.880453 0.861849 0.464031
1009 1 0.826486 0.842429 0.368132
263 1 0.724753 0.785368 0.303182
937 1 0.587455 0.891716 0.489531
1790 1 0.973048 0.821551 0.343209
197 1 0.826222 0.784746 0.296441
551 1 0.908835 0.843228 0.13689
841 1 0.560729 0.882398 0.122747
645 1 0.7373 0.750297 0.0525952
635 1 0.682752 0.949769 0.134997
1399 1 0.524233 0.0258493 0.23127
782 1 0.661893 0.0207081 0.151957
830 1 0.559978 0.920936 0.196875
708 1 0.611006 0.944287 0.106636
180 1 0.544892 0.857457 0.22073
789 1 0.794513 0.452238 0.478993
201 1 0.634027 0.881958 0.153765
888 1 0.718744 0.979034 0.344048
71 1 0.759934 0.0114814 0.268808
1463 1 0.693061 0.00596165 0.211438
877 1 0.688707 0.836358 0.194032
1268 1 0.645629 0.833045 0.295674
1911 1 0.608238 0.381636 0.493136
81 1 0.939047 0.761827 0.156507
504 1 0.863078 0.899401 0.280465
592 1 0.905325 0.952459 0.272322
1873 1 0.82503 0.972266 0.292577
1588 1 0.906808 0.936648 0.191617
686 1 0.846359 0.97374 0.218192
1773 1 0.943247 0.893019 0.349448
1221 1 0.905636 0.989951 0.344441
2022 1 0.531142 0.716742 0.458938
135 1 0.707321 0.374622 0.0289161
529 1 0.696857 0.109837 0.426085
1240 1 0.904934 0.847738 0.392841
997 1 0.948412 0.835267 0.0352664
701 1 0.573669 0.161622 0.418658
615 1 0.546663 0.0249318 0.427089
277 1 0.606109 0.103831 0.305895
450 1 0.98346 0.0726622 0.254227
1844 1 0.58959 0.0938582 0.420937
1774 1 0.812937 0.0599412 0.227377
1704 1 0.626592 0.00127237 0.420714
514 1 0.646096 0.93604 0.458931
894 1 0.696061 0.128248 0.290369
1583 1 0.763712 0.0421524 0.345474
1845 1 0.814762 0.0261443 0.445364
1391 1 0.700563 0.0315024 0.410348
60 1 0.769323 0.124436 0.37777
1311 1 0.968145 0.981787 0.37456
1129 1 0.944291 0.525296 0.421183
725 1 0.819301 0.126332 0.435446
297 1 0.758833 0.98107 0.409456
661 1 0.892123 0.109003 0.416392
499 1 0.84589 0.208101 0.46588
921 1 0.593005 0.208282 0.232559
187 1 0.955642 0.0306012 0.310426
969 1 0.97334 0.139251 0.274356
133 1 0.899605 0.0605353 0.355395
288 1 0.844026 0.121978 0.346294
1073 1 0.82929 0.0589955 0.390338
1457 1 0.836828 0.0567251 0.309459
385 1 0.608982 0.279327 0.463786
1377 1 0.559197 0.0455904 0.307986
1058 1 0.894985 0.473761 0.307582
1483 1 0.839908 0.35058 0.488314
757 1 0.666526 0.211219 0.22596
726 1 0.989519 0.526758 0.156204
1216 1 0.63527 0.107371 0.481912
672 1 0.559813 0.152523 0.327803
75 1 0.7767 0.119238 0.304917
140 1 0.713142 0.17789 0.386389
202 1 0.759242 0.178239 0.462795
20 1 0.738518 0.269055 0.300465
1745 1 0.63647 0.134644 0.386119
1874 1 0.68184 0.194621 0.446766
1944 1 0.758112 0.822032 0.354109
897 1 0.617973 0.224649 0.402089
1951 1 0.672125 0.196767 0.300604
518 1 0.679529 0.633852 0.431977
1020 1 0.783685 0.201855 0.334294
1417 1 0.872047 0.246547 0.236296
971 1 0.559029 0.990073 0.498105
1079 1 0.872231 0.211063 0.392315
1327 1 0.923007 0.158826 0.330882
1351 1 0.553394 0.586735 0.389225
873 1 0.956917 0.221131 0.372883
2021 1 0.620709 0.35721 0.400687
1085 1 0.605427 0.308866 0.346442
525 1 0.977249 0.849177 0.409493
1846 1 0.558793 0.280039 0.414517
1205 1 0.658015 0.285588 0.411361
56 1 0.545436 0.382838 0.364906
1492 1 0.957152 0.910989 0.008985
194 1 0.754745 0.385706 0.469696
1415 1 0.583501 0.447557 0.389939
157 1 0.790339 0.218138 0.399061
1075 1 0.784806 0.322401 0.458354
1799 1 0.903866 0.270639 0.333938
1246 1 0.719062 0.334593 0.408994
1420 1 0.617336 0.870751 0.0772888
1195 1 0.993058 0.125476 0.192956
1604 1 0.819903 0.366441 0.373319
1847 1 0.840761 0.275252 0.298991
1348 1 0.713481 0.245407 0.38412
503 1 0.827295 0.35059 0.287503
1908 1 0.544761 0.531084 0.0312845
1008 1 0.925441 0.374874 0.416061
1122 1 0.847866 0.303382 0.391989
981 1 0.852684 0.406366 0.432245
1277 1 0.900497 0.346213 0.35327
1023 1 0.949989 0.291749 0.403792
186 1 0.579015 0.890108 0.2969
9 1 0.989825 0.0587416 0.185859
1881 1 0.567372 0.577904 0.477332
1945 1 0.612967 0.377545 0.318395
19 1 0.858115 0.626045 0.453652
24 1 0.643842 0.48087 0.438351
801 1 0.696961 0.455778 0.0221039
1957 1 0.969461 0.472608 0.303597
1182 1 0.597837 0.460426 0.323033
772 1 0.848883 0.41003 0.000597057
1389 1 0.904323 0.104772 0.283743
149 1 0.757959 0.520588 0.318967
567 1 0.707014 0.490616 0.474921
94 1 0.751754 0.398348 0.300677
1577 1 0.919451 0.18935 0.443316
1136 1 0.682951 0.41379 0.470159
1102 1 0.738536 0.446979 0.420343
1100 1 0.678144 0.514516 0.375864
1161 1 0.785968 0.576237 0.265579
218 1 0.998793 0.43048 0.432103
1504 1 0.884966 0.419352 0.358326
308 1 0.797208 0.446521 0.373372
1719 1 0.708415 0.427354 0.349652
13 1 0.952822 0.45506 0.376846
2044 1 0.979952 0.458904 0.129954
1225 1 0.529631 0.649577 0.436869
269 1 0.585756 0.613011 0.319
488 1 0.59624 0.725581 0.47045
249 1 0.569619 0.613376 0.237156
185 1 0.53653 0.751595 0.387221
2007 1 0.929597 0.547528 0.0486958
611 1 0.518123 0.453212 0.363184
1164 1 0.538066 0.281598 0.32726
1466 1 0.714413 0.577566 0.339211
1591 1 0.78311 0.605927 0.348036
2024 1 0.612399 0.589059 0.426243
1227 1 0.785853 0.616903 0.421597
987 1 0.754057 0.873574 0.297948
438 1 0.664659 0.479126 0.311139
536 1 0.594102 0.654971 0.386922
1438 1 0.736319 0.652042 0.370337
305 1 0.767951 0.921854 0.358858
55 1 0.790508 0.531999 0.38808
455 1 0.778551 0.73272 0.334764
387 1 0.84794 0.573293 0.400015
1658 1 0.857245 0.681103 0.396023
2025 1 0.846673 0.523556 0.330135
1903 1 0.556963 0.335697 0.467889
769 1 0.865348 0.52815 0.25677
1555 1 0.56998 0.9754 0.35221
1217 1 0.769865 1.00046 0.0171721
1199 1 0.582723 0.823145 0.400601
684 1 0.665908 0.72426 0.445235
530 1 0.651654 0.86171 0.418028
407 1 0.68105 0.813271 0.370755
336 1 0.906106 0.255006 0.0109724
652 1 0.637723 0.536208 0.495448
966 1 0.641041 0.741741 0.37974
485 1 0.810483 0.809474 0.453434
691 1 0.642151 0.758477 0.284791
1818 1 0.967165 0.120962 0.374364
132 1 0.544524 0.889477 0.389076
748 1 0.691522 0.875859 0.255753
580 1 0.905984 0.776084 0.351192
1527 1 0.937142 0.790691 0.454782
983 1 0.897697 0.737762 0.428479
1287 1 0.984671 0.68515 0.196011
1802 1 0.557078 0.545136 0.137054
464 1 0.95008 0.698742 0.383232
1320 1 0.980792 0.505829 0.0814321
313 1 0.778798 0.984905 0.489339
68 1 0.787119 0.664241 0.488258
320 1 0.506949 0.603143 0.340285
475 1 0.503216 0.989442 0.303678
87 1 0.506308 0.240983 0.0429711
1537 1 0.820906 0.271598 0.485415
1968 1 0.848413 0.695746 0.464899
934 1 0.928535 0.624616 0.486823
1750 1 0.501185 0.868076 0.286347
1203 1 0.987649 0.308985 0.211846
1855 1 0.999592 0.716513 0.0484309
690 1 0.786787 0.591678 0.494485
490 1 0.64193 0.87181 0.00645147
534 1 0.656139 0.86345 0.489733
363 1 0.707501 0.096157 0.00324741
636 1 0.783269 0.111877 0.00122307
235 1 0.674544 0.243754 0.0025731
1496 1 0.678924 0.804753 0.00344766
2023 1 0.562585 0.808006 0.00419732
1118 1 0.97649 0.368943 0.497008
255 1 0.709934 0.0221529 0.520141
901 1 0.992985 0.281856 0.720039
2000 1 0.616722 0.0735783 0.617502
151 1 0.548779 0.0961107 0.577003
776 1 0.506696 0.332026 0.518196
65 1 0.589236 0.961261 0.969296
1546 1 0.589506 0.502258 0.783259
231 1 0.600774 -0.000109502 0.585759
1622 1 0.93129 0.694205 0.967649
1821 1 0.536314 0.103571 0.738668
1711 1 0.828369 0.0355208 0.707864
1875 1 0.82798 0.945965 0.626887
594 1 0.537408 0.224099 0.745659
834 1 0.824769 0.0889735 0.545709
330 1 0.882767 0.787828 0.501943
1324 1 0.829699 0.0156983 0.632522
656 1 0.772702 0.0170681 0.560251
1514 1 0.92893 0.209851 0.512136
282 1 0.695855 0.90439 0.675726
1052 1 0.536248 0.610396 0.991318
559 1 0.943189 0.0685958 0.528181
1509 1 0.921563 0.0633661 0.60852
376 1 0.930532 0.990426 0.588795
548 1 0.529561 0.883725 0.899585
1029 1 0.867162 0.111289 0.598202
110 1 0.960663 0.0118347 0.655494
574 1 0.650495 0.328161 0.997601
1210 1 0.72195 0.181541 0.527644
1813 1 0.852677 0.10418 0.929937
107 1 0.549506 0.762219 0.812051
1111 1 0.950723 0.941693 0.650227
1491 1 0.561556 0.278873 0.521389
1451 1 0.648729 0.243612 0.554851
770 1 0.669444 0.175499 0.57856
1543 1 0.568792 0.111631 0.654432
1785 1 0.793927 0.14923 0.523867
489 1 0.665353 0.10101 0.566802
712 1 0.516053 0.968465 0.641574
1521 1 0.741799 0.078842 0.563529
1061 1 0.751383 0.182719 0.674825
1140 1 0.849205 0.352155 0.562346
1539 1 0.792542 0.219573 0.551612
1852 1 0.994042 0.501544 0.840841
1038 1 0.730662 0.135534 0.60586
767 1 0.791528 0.31524 0.653271
596 1 0.989783 0.568373 0.501278
1971 1 0.79572 0.65266 0.984705
1370 1 0.867284 0.200255 0.559502
287 1 0.868609 0.286268 0.539485
618 1 0.923649 0.151633 0.554059
1095 1 0.921491 0.123417 0.66053
1416 1 0.972351 0.188361 0.647839
799 1 0.812263 0.235019 0.618174
1249 1 0.525556 0.246297 0.971472
1208 1 0.979595 0.384524 0.972544
1236 1 0.53529 0.408159 0.514275
995 1 0.60444 0.38361 0.577385
393 1 0.610471 0.309053 0.723478
2040 1 0.601326 0.33157 0.637833
1434 1 0.591864 0.47013 0.588068
411 1 0.597378 0.264554 0.605074
1648 1 0.805507 0.290576 0.567031
1396 1 0.751596 0.334045 0.794522
367 1 0.775769 0.468461 0.629167
37 1 0.736838 0.268714 0.541219
1710 1 0.738087 0.209784 0.595759
1784 1 0.99906 0.438853 0.510521
876 1 0.944489 0.214819 0.586751
405 1 0.512008 0.714511 0.674703
1722 1 0.942325 0.344043 0.554806
1805 1 0.867198 0.412953 0.521428
1296 1 0.935181 0.438284 0.546511
1608 1 0.540153 0.0221844 0.703235
1037 1 0.885071 0.280247 0.63014
1876 1 0.827705 0.426101 0.584259
344 1 0.66445 0.419562 0.544611
754 1 0.622071 0.478471 0.664028
568 1 0.957558 0.342371 0.661824
633 1 0.586075 0.617016 0.570039
1922 1 0.668034 0.698666 0.570552
972 1 0.970895 0.725819 0.757654
1159 1 0.777176 0.345374 0.531219
1780 1 0.612323 0.453555 0.509912
1907 1 0.648244 0.429533 0.615087
440 1 0.972122 0.966987 0.95805
1245 1 0.634358 0.527366 0.610943
1231 1 0.700588 0.310613 0.642252
692 1 0.675189 0.496579 0.553206
2002 1 0.683038 0.564352 0.657395
364 1 0.787523 0.383833 0.62218
1224 1 0.715246 0.407868 0.636443
516 1 0.738325 0.456028 0.71499
1488 1 0.732004 0.531993 0.581552
1421 1 0.977716 0.497983 0.567246
816 1 0.795107 0.487504 0.551064
1965 1 0.955371 0.598625 0.640561
223 1 0.542052 0.0393272 0.626524
1336 1 0.997712 0.592661 0.575134
584 1 0.506876 0.554212 0.550606
322 1 0.883168 0.566973 0.659453
1406 1 0.829209 0.525114 0.624174
910 1 0.687653 0.642935 0.619341
542 1 0.524046 0.898766 0.744109
959 1 0.923343 0.565577 0.555497
481 1 0.977431 0.190884 0.934392
1781 1 0.695798 0.714256 0.65547
705 1 0.521862 0.565413 0.682936
1524 1 0.783512 0.453505 0.980827
1499 1 0.759172 0.549466 0.652786
641 1 0.553382 0.0751622 0.502491
1755 1 0.824913 0.496178 0.701741
522 1 0.723508 0.569952 0.503418
696 1 0.667127 0.579379 0.550754
22 1 0.944012 0.173473 0.714089
1801 1 0.719477 0.699075 0.995671
484 1 0.868433 0.499177 0.558436
258 1 0.720656 0.115937 0.508573
865 1 0.856359 0.789665 0.670556
1265 1 0.775458 0.640011 0.669257
290 1 0.852715 0.638096 0.611832
2010 1 0.886184 0.683791 0.667139
368 1 0.538717 0.189836 0.541339
382 1 0.544959 0.784296 0.55808
200 1 0.510068 0.716932 0.533986
53 1 0.54003 0.855698 0.54228
114 1 0.588039 0.83969 0.595393
1442 1 0.646954 0.776976 0.551641
1094 1 0.768725 0.733506 0.513615
1814 1 0.750327 0.690204 0.582891
1040 1 0.502827 0.62301 0.926604
831 1 0.712979 0.757713 0.594073
113 1 0.715506 0.658404 0.526427
781 1 0.751976 0.797406 0.549468
1626 1 0.713351 0.594346 0.745035
1728 1 0.79912 0.813122 0.616025
383 1 0.853715 0.847178 0.589525
1649 1 0.962188 0.206491 0.781827
1990 1 0.629137 0.308431 0.527931
1349 1 0.986244 0.774958 0.614397
1712 1 0.560774 0.412549 0.962014
833 1 0.781954 0.912018 0.502948
369 1 0.774557 0.923687 0.956463
904 1 0.926292 0.739427 0.558469
571 1 0.596671 0.700587 0.54728
1798 1 0.63319 0.871661 0.650233
79 1 0.543722 0.930452 0.566416
1551 1 0.526835 0.500492 0.737618
1536 1 0.635288 0.94493 0.627024
1392 1 0.610171 0.01787 0.666262
1494 1 0.908025 0.848078 0.656807
375 1 0.678521 0.0131526 0.588456
1117 1 0.706641 0.944445 0.603624
49 1 0.71411 0.940142 0.520363
314 1 0.751653 0.884103 0.635747
1455 1 0.783331 0.866067 0.573559
178 1 0.777196 0.787592 0.693067
546 1 0.855565 0.805232 0.886132
1665 1 0.779474 0.943284 0.565686
432 1 0.977505 0.0257273 0.773757
306 1 0.963728 0.855942 0.584494
1081 1 0.630273 0.969502 0.526382
1989 1 0.903029 0.913369 0.602658
1552 1 0.856334 0.946727 0.562024
1087 1 0.910825 0.90531 0.525092
102 1 0.789007 0.715702 0.942431
583 1 0.708582 0.869226 0.546837
853 1 0.752266 0.0551646 0.816273
72 1 0.555116 0.00987973 0.795804
1797 1 0.634935 0.0516718 0.74213
555 1 0.607553 0.116905 0.723468
703 1 0.856148 0.310511 0.96128
318 1 0.608886 0.0427306 0.847125
1775 1 0.689621 0.0670106 0.633711
1695 1 0.675682 0.0934306 0.69921
1815 1 0.779719 0.108816 0.653575
348 1 0.848101 0.0164711 0.562767
1089 1 0.769883 0.987718 0.774537
533 1 0.834655 0.0664571 0.800127
12 1 0.814609 0.12574 0.763652
328 1 0.800996 0.158905 0.595613
715 1 0.903125 0.0554367 0.743167
184 1 0.534574 0.502833 0.847355
746 1 0.886932 0.976018 0.65824
738 1 0.850091 0.984224 0.835371
1454 1 0.893225 0.0445054 0.671536
1043 1 0.994212 0.046145 0.851463
619 1 0.938415 0.986605 0.857711
1891 1 0.681297 0.176376 0.710208
593 1 0.579423 0.275988 0.792885
1921 1 0.635865 0.142698 0.641797
1685 1 0.664746 0.103347 0.844827
100 1 0.628549 0.2234 0.75415
1936 1 0.602498 0.250532 0.681465
1424 1 0.978 0.560258 0.785688
351 1 0.574064 0.187767 0.799992
239 1 0.826404 0.189066 0.686864
826 1 0.89358 0.196743 0.648405
1032 1 0.680576 0.16874 0.78785
161 1 0.710013 0.246699 0.769227
139 1 0.875445 0.240547 0.836934
612 1 0.847407 0.181994 0.808077
1508 1 0.743364 0.149 0.738319
875 1 0.584992 0.817183 0.773223
700 1 0.96793 0.775417 0.939817
1167 1 0.967564 0.120135 0.838398
446 1 0.864608 0.11188 0.697921
1637 1 0.934646 0.257876 0.673129
1518 1 0.91679 0.280049 0.780913
1295 1 0.850762 0.285737 0.73698
609 1 0.893162 0.141121 0.759615
1332 1 0.546227 0.347304 0.754322
918 1 0.783019 0.23925 0.73489
869 1 0.577967 0.521613 0.547987
1827 1 0.892591 0.535672 0.960064
2048 1 0.532278 0.29235 0.676216
950 1 0.727069 0.382148 0.717336
1571 1 0.750829 0.25306 0.6702
1247 1 0.789592 0.396598 0.760024
1697 1 0.65578 0.383089 0.764419
1013 1 0.720996 0.404272 0.79751
1579 1 0.734522 0.367847 0.861754
338 1 0.672633 0.249904 0.702492
1042 1 0.891528 0.353015 0.625954
1443 1 0.724374 0.305106 0.724288
1299 1 0.876611 0.384907 0.686163
2039 1 0.860571 0.44369 0.649044
43 1 0.798181 0.329112 0.724653
1716 1 0.810298 0.399982 0.68883
1053 1 0.910469 0.309187 0.710859
993 1 0.89809 0.517542 0.801072
1730 1 0.809445 0.730814 0.867042
1612 1 0.880434 0.221584 0.746581
1382 1 0.794448 0.244416 0.816688
205 1 0.943787 0.446414 0.972354
678 1 0.66202 0.393968 0.687505
310 1 0.583571 0.423103 0.795926
1909 1 0.591074 0.385349 0.708652
472 1 0.558071 0.392347 0.637791
825 1 0.559967 0.462519 0.691765
1355 1 0.702 0.486652 0.645644
704 1 0.561205 0.564704 0.75186
1688 1 0.627573 0.44666 0.739931
1004 1 0.625184 0.506321 0.856521
122 1 0.667574 0.46571 0.808986
426 1 0.701699 0.430986 0.869401
1384 1 0.86238 0.556786 0.729119
1456 1 0.739743 0.663932 0.795278
1130 1 0.936082 0.644236 0.55076
103 1 0.758265 0.49587 0.81031
731 1 0.711689 0.513335 0.876816
716 1 0.651159 0.530692 0.731579
1505 1 0.789838 0.557148 0.752926
905 1 0.699383 0.544125 0.808281
166 1 0.722467 0.469056 0.94342
1254 1 0.906789 0.462266 0.731648
1562 1 0.536912 0.342984 0.588097
787 1 0.850813 0.423347 0.740662
1372 1 0.935302 0.407656 0.771872
1638 1 0.937936 0.535027 0.702439
803 1 0.553674 0.520615 0.62285
1593 1 0.869619 0.677357 0.784166
316 1 0.995925 0.433851 0.704024
41 1 0.907256 0.589806 0.781686
565 1 0.998413 0.217883 0.523765
707 1 0.591162 0.549067 0.682902
1531 1 0.621717 0.684503 0.622608
250 1 0.530075 0.796696 0.725262
138 1 0.616713 0.611327 0.647756
604 1 0.586379 0.688114 0.712566
28 1 0.565165 0.639344 0.791279
558 1 0.629577 0.611282 0.731813
331 1 0.598121 0.68557 0.838659
915 1 0.636668 0.742362 0.70533
1960 1 0.721922 0.523026 0.738644
1201 1 0.800798 0.665576 0.758132
162 1 0.78486 0.580142 0.587436
142 1 0.692864 0.643042 0.695581
735 1 0.861513 0.627503 0.727755
1641 1 0.850781 0.725391 0.611527
21 1 0.585822 0.137236 0.527897
988 1 0.927953 0.718085 0.814091
1507 1 0.514638 0.134635 0.813346
1430 1 0.81215 0.721418 0.716856
1673 1 0.833678 0.601397 0.799886
1045 1 0.939962 0.654899 0.778113
144 1 0.944187 0.6079 0.721179
1017 1 0.893686 0.860534 0.740604
1783 1 0.529124 0.784062 0.651919
824 1 0.584356 0.815642 0.891031
1964 1 0.512288 0.580256 0.806457
758 1 0.654603 0.829889 0.714252
1835 1 0.668929 0.78753 0.788207
501 1 0.603025 0.858301 0.828644
119 1 0.626608 0.792436 0.634806
1019 1 0.612216 0.503697 0.995155
1321 1 0.577537 0.742211 0.609739
470 1 0.665877 0.722515 0.830776
1184 1 0.759487 0.724749 0.640716
718 1 0.829816 0.737386 0.787896
1523 1 0.79486 0.840118 0.905329
502 1 0.735252 0.708229 0.722833
1884 1 0.637999 0.716155 0.766965
1714 1 0.757985 0.74882 0.797138
449 1 0.668725 0.82243 0.861149
126 1 0.558071 0.668766 0.644164
1834 1 0.891376 0.786716 0.816781
1315 1 0.919982 0.757198 0.660434
1645 1 0.928028 0.7937 0.747088
1285 1 0.887165 0.731705 0.741747
466 1 0.574768 0.94517 0.781913
1271 1 0.855532 0.856447 0.831448
404 1 0.739242 0.0678984 0.744451
1572 1 0.629119 0.895499 0.565275
461 1 0.701125 0.989085 0.679911
1394 1 0.635763 0.982036 0.812844
182 1 0.530186 0.411229 0.89659
956 1 0.642292 0.648573 0.977988
908 1 0.572981 0.844718 0.700148
108 1 0.617728 0.979524 0.731815
505 1 0.627107 0.899767 0.737048
286 1 0.840943 0.804376 0.741515
1055 1 0.75676 0.0430244 0.671168
1789 1 0.680751 0.945972 0.734394
413 1 0.770291 0.960339 0.683934
541 1 0.71012 0.804041 0.653486
1147 1 0.764398 0.991848 0.866539
1211 1 0.835161 0.903657 0.766692
886 1 0.715411 0.776324 0.736305
1560 1 0.747644 0.878233 0.732499
1181 1 0.725888 0.908488 0.805293
2046 1 0.908329 0.958294 0.786978
610 1 0.867743 0.870274 0.925626
1462 1 0.876865 0.91266 0.69444
1545 1 0.936008 0.985047 0.718559
1109 1 0.807438 0.875309 0.689602
1162 1 0.856804 0.980863 0.742338
168 1 0.887616 0.924102 0.854128
766 1 0.682994 0.821213 0.592484
1777 1 0.919152 0.35223 0.827132
627 1 0.546162 -0.000961171 0.891311
906 1 0.552233 0.756233 0.891468
1441 1 0.602202 0.0355437 0.950687
1458 1 0.618625 0.102559 0.903575
173 1 0.593897 0.106837 0.800287
247 1 0.982554 0.665327 0.715388
1342 1 0.944157 0.277361 0.520998
1892 1 0.95714 0.890188 0.696782
1398 1 0.515297 0.459284 0.566939
550 1 0.937152 0.100175 0.962859
942 1 0.891294 0.388227 0.949991
1338 1 0.736963 0.0388 0.92186
121 1 0.666696 0.0388695 0.889219
957 1 0.510826 0.855354 0.978377
398 1 0.862394 0.0477925 0.888665
775 1 0.824646 0.116333 0.85683
165 1 0.846971 0.00631648 0.95813
85 1 0.930915 0.0221454 0.947512
480 1 0.76223 0.799723 1.0005
818 1 0.900695 0.0825781 0.830104
1479 1 0.571648 0.1622 0.947832
280 1 0.703808 0.335344 0.517879
1385 1 0.534311 0.17855 0.869505
537 1 0.862397 0.212992 0.909804
729 1 0.897 0.94083 0.974233
889 1 0.885862 0.972005 0.908543
1237 1 0.664903 0.224954 0.916006
591 1 0.535624 0.324383 0.936128
1585 1 0.575028 0.229282 0.898052
1778 1 0.65843 0.230298 0.832518
1669 1 0.729427 0.182517 0.929843
1952 1 0.61516 0.172803 0.871415
588 1 0.705243 0.104548 0.912739
1740 1 0.655518 0.15526 0.942387
856 1 0.779072 0.129181 0.918294
137 1 0.782172 0.1935 0.869671
838 1 0.827305 0.166348 0.964581
486 1 0.686952 0.164785 0.872138
1522 1 0.750818 0.419083 0.550242
1614 1 0.709711 0.312823 0.952065
1367 1 0.812839 0.256709 0.915882
97 1 0.934111 0.0782649 0.89427
549 1 0.914738 0.252673 0.929491
423 1 0.94703 0.277753 0.852033
1573 1 0.618382 0.36947 0.939776
1639 1 0.928339 0.177723 0.876272
1041 1 0.513993 0.392127 0.696843
1258 1 0.576282 0.360035 0.872583
91 1 0.975308 0.483769 0.759221
792 1 0.527999 0.334507 0.821188
900 1 0.676287 0.307774 0.809299
1503 1 0.902558 0.177564 0.961667
189 1 0.61236 0.340023 0.801775
1828 1 0.63791 0.631153 0.509249
677 1 0.955534 0.285494 0.60986
1241 1 0.656867 0.380316 0.850634
671 1 0.692112 0.298182 0.880292
2012 1 0.776636 0.326602 0.911122
448 1 0.612523 0.295865 0.914746
1431 1 0.752436 0.187522 0.807769
861 1 0.888181 0.416073 0.840152
698 1 0.836399 0.360104 0.866116
1961 1 0.856719 0.327641 0.802504
1263 1 0.984313 0.327169 0.917036
83 1 0.726891 0.250823 0.845192
1761 1 0.904911 0.995297 0.522011
547 1 0.820424 0.392965 0.923471
428 1 0.970408 0.425651 0.849775
1717 1 0.668926 0.341361 0.583511
1732 1 0.691065 0.924349 0.989304
1636 1 0.963485 0.106759 0.724815
1526 1 0.607012 0.422756 0.881523
1270 1 0.514767 0.654142 0.739629
154 1 0.654905 0.458615 0.925433
431 1 0.593735 0.624969 0.90553
1473 1 0.538224 0.57153 0.880775
1982 1 0.546701 0.0754418 0.852342
1787 1 0.592449 0.49867 0.925395
361 1 0.720524 0.552357 0.939203
1822 1 0.790395 0.439654 0.829759
657 1 0.686044 0.407981 0.948912
353 1 0.752957 0.382688 0.967634
1700 1 0.779187 0.535987 0.882364
67 1 0.856197 0.473157 0.867268
1627 1 0.762935 0.59571 0.823386
1769 1 0.727026 0.617769 0.973216
421 1 0.828538 0.525238 0.799447
531 1 0.865225 0.721239 0.919501
1223 1 0.91376 0.478567 0.91487
1595 1 0.796079 0.464645 0.912999
1934 1 0.569579 0.905108 0.625224
844 1 0.874543 0.542168 0.891825
554 1 0.934093 0.550005 0.85143
1474 1 0.517413 0.601368 0.605687
424 1 0.503413 0.961557 0.729219
649 1 0.533135 0.682932 0.888265
773 1 0.612137 0.696317 0.917894
914 1 0.972268 0.271857 0.963843
222 1 0.925156 0.839276 0.87742
1872 1 0.59461 0.5677 0.826372
1729 1 0.648286 0.581008 0.959415
727 1 0.80813 0.564874 0.953946
129 1 0.768211 0.264395 0.977634
403 1 0.680917 0.629351 0.918587
327 1 0.651588 0.605402 0.819791
1617 1 0.747503 0.698095 0.89176
626 1 0.649436 0.57067 0.887552
427 1 0.525522 0.637028 0.522668
204 1 0.57306 0.881105 0.969436
1619 1 0.792019 0.639513 0.913115
1586 1 0.690862 0.658474 0.858894
329 1 0.91367 0.605503 0.90516
50 1 0.835356 0.59331 0.881277
1409 1 0.867256 0.640122 0.962687
1303 1 0.976163 0.708394 0.912158
2015 1 0.869218 0.659999 0.859084
780 1 0.961212 0.617497 0.969849
1067 1 0.617443 0.897149 0.91036
1584 1 0.628037 0.752514 0.986429
84 1 0.542012 0.69467 0.978344
1364 1 0.975859 0.930975 0.561209
907 1 0.627458 0.762059 0.873665
1413 1 0.648283 0.821586 0.940561
996 1 0.819807 0.852915 0.984342
1048 1 0.803865 0.799137 0.825838
483 1 0.698252 0.732428 0.937635
540 1 0.736892 0.778775 0.892898
116 1 0.886889 0.785019 0.952608
817 1 0.804629 0.656105 0.838576
1007 1 0.986904 0.788814 0.806131
951 1 0.941982 0.766711 0.865454
714 1 0.954419 0.854608 0.806099
454 1 0.9778 0.947469 0.774691
1363 1 0.979687 0.638257 0.900921
679 1 0.543121 0.535636 0.952185
1137 1 0.740687 0.125677 0.848949
724 1 0.571883 0.929712 0.860191
1606 1 0.660111 -0.000474458 0.991586
1502 1 0.524856 0.175622 0.634703
109 1 0.719889 0.879861 0.905838
150 1 0.517676 0.868645 0.816554
1923 1 0.624351 0.977208 0.90139
1282 1 0.964192 0.553856 0.911778
1819 1 0.70185 0.962962 0.925249
1646 1 0.817845 0.930724 0.887279
358 1 0.735505 0.841355 0.830142
1810 1 0.938697 0.86085 0.955276
1977 1 0.587696 0.174745 0.591133
268 1 0.567427 0.945931 0.684473
796 1 0.66582 0.919003 0.860095
321 1 0.944501 0.9132 0.89788
1074 1 0.504879 0.0883335 0.919375
1060 1 0.992362 0.158935 0.578609
70 1 0.997035 0.19234 0.861497
994 1 0.985188 0.344221 0.762687
1393 1 0.925047 0.480255 0.618248
3 1 0.980234 0.984184 0.50437
1088 1 0.95442 0.843816 0.512026
1135 1 0.980428 0.495837 0.660357
1974 1 0.986764 0.81967 0.685055
513 1 0.504606 0.844251 0.605971
903 1 0.993099 0.472746 0.92664
953 1 0.618309 0.0494925 0.533552
1339 1 0.508691 0.891362 0.663955
496 1 0.97192 0.404585 0.603122
51 1 0.49947 0.446633 0.636693
1877 1 0.919521 0.328942 0.98449
228 1 0.733659 0.512667 0.997843
| [
"[email protected]"
] | |
26a3eb6017784f83e4890b5450ca6e887d90b6f5 | 3b9b4049a8e7d38b49e07bb752780b2f1d792851 | /src/third_party/skia/tools/dm_flags.py | 9672849ee344fc28ca0dbce6a1404595890e3fd0 | [
"BSD-3-Clause",
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-public-domain",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT"
] | permissive | webosce/chromium53 | f8e745e91363586aee9620c609aacf15b3261540 | 9171447efcf0bb393d41d1dc877c7c13c46d8e38 | refs/heads/webosce | 2020-03-26T23:08:14.416858 | 2018-08-23T08:35:17 | 2018-09-20T14:25:18 | 145,513,343 | 0 | 2 | Apache-2.0 | 2019-08-21T22:44:55 | 2018-08-21T05:52:31 | null | UTF-8 | Python | false | false | 11,724 | py | #
# Copyright 2015 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
#!/usr/bin/env python
usage = '''
Write extra flags to outfile for DM based on the bot name:
$ python dm_flags.py outfile Test-Ubuntu-GCC-GCE-CPU-AVX2-x86-Debug
Or run self-tests:
$ python dm_flags.py test
'''
import inspect
import json
import os
import sys
def lineno():
caller = inspect.stack()[1] # Up one level to our caller.
return inspect.getframeinfo(caller[0]).lineno
cov_start = lineno()+1 # We care about coverage starting just past this def.
def get_args(bot):
args = []
# 32-bit desktop bots tend to run out of memory, because they have relatively
# far more cores than RAM (e.g. 32 cores, 3G RAM). Hold them back a bit.
if '-x86-' in bot and not 'NexusPlayer' in bot:
args.extend('--threads 4'.split(' '))
# These are the canonical configs that we would ideally run on all bots. We
# may opt out or substitute some below for specific bots
configs = ['565', '8888', 'gpu', 'gpusrgb', 'pdf']
# Add in either msaa4 or msaa16 to the canonical set of configs to run
if 'Android' in bot or 'iOS' in bot:
configs.append('msaa4')
else:
configs.append('msaa16')
# With msaa, the S4 crashes and the NP produces a long error stream when we
# run with MSAA. The Tegra2 and Tegra3 just don't support it. No record of
# why we're not running msaa on iOS, probably started with gpu config and just
# haven't tried.
if ('GalaxyS4' in bot or
'NexusPlayer' in bot or
'Tegra3' in bot or
'iOS' in bot):
configs = [x for x in configs if 'msaa' not in x]
# Runs out of memory on Android bots and Daisy. Everyone else seems fine.
if 'Android' in bot or 'Daisy' in bot:
configs.remove('pdf')
if '-GCE-' in bot:
configs.extend(['f16', 'srgb']) # Gamma-correct formats.
configs.extend(['sp-8888', '2ndpic-8888']) # Test niche uses of SkPicture.
if '-TSAN' not in bot:
if ('TegraK1' in bot or
'GTX550Ti' in bot or
'GTX660' in bot or
'GT610' in bot):
if 'Android' in bot:
configs.append('nvprdit4')
else:
configs.append('nvprdit16')
# We want to test the OpenGL config not the GLES config on the X1
if 'TegraX1' in bot:
configs = [x.replace('gpu', 'gl') for x in configs]
configs = [x.replace('msaa', 'glmsaa') for x in configs]
# NP is running out of RAM when we run all these modes. skia:3255
if 'NexusPlayer' not in bot:
configs.extend(mode + '-8888' for mode in
['serialize', 'tiles_rt', 'pic'])
if 'ANGLE' in bot:
configs.append('angle')
# We want to run gpudft on atleast the mali 400
if 'GalaxyS3' in bot:
configs.append('gpudft')
# CommandBuffer bot *only* runs the command_buffer config.
if 'CommandBuffer' in bot:
configs = ['commandbuffer']
# Vulkan bot *only* runs the vk config.
if 'Vulkan' in bot:
configs = ['vk']
args.append('--config')
args.extend(configs)
# Run tests, gms, and image decoding tests everywhere.
args.extend('--src tests gm image'.split(' '))
if 'GalaxyS' in bot:
args.extend(('--threads', '0'))
blacklist = []
# TODO: ???
blacklist.extend('f16 _ _ dstreadshuffle'.split(' '))
blacklist.extend('f16 image _ _'.split(' '))
blacklist.extend('srgb image _ _'.split(' '))
blacklist.extend('gpusrgb image _ _'.split(' '))
# Certain gm's on win7 gpu and pdf are never finishing and keeping the test
# running forever
if 'Win7' in bot:
blacklist.extend('msaa16 gm _ colorwheelnative'.split(' '))
blacklist.extend('pdf gm _ fontmgr_iter_factory'.split(' '))
if 'Valgrind' in bot:
# These take 18+ hours to run.
blacklist.extend('pdf gm _ fontmgr_iter'.split(' '))
blacklist.extend('pdf _ _ PANO_20121023_214540.jpg'.split(' '))
blacklist.extend('pdf skp _ worldjournal'.split(' '))
blacklist.extend('pdf skp _ desk_baidu.skp'.split(' '))
blacklist.extend('pdf skp _ desk_wikipedia.skp'.split(' '))
if 'iOS' in bot:
blacklist.extend('gpu skp _ _ msaa skp _ _'.split(' '))
blacklist.extend('msaa16 gm _ tilemodesProcess'.split(' '))
if 'Mac' in bot or 'iOS' in bot:
# CG fails on questionable bmps
blacklist.extend('_ image gen_platf rgba32abf.bmp'.split(' '))
blacklist.extend('_ image gen_platf rgb24prof.bmp'.split(' '))
blacklist.extend('_ image gen_platf rgb24lprof.bmp'.split(' '))
blacklist.extend('_ image gen_platf 8bpp-pixeldata-cropped.bmp'.split(' '))
blacklist.extend('_ image gen_platf 4bpp-pixeldata-cropped.bmp'.split(' '))
blacklist.extend('_ image gen_platf 32bpp-pixeldata-cropped.bmp'.split(' '))
blacklist.extend('_ image gen_platf 24bpp-pixeldata-cropped.bmp'.split(' '))
# CG has unpredictable behavior on this questionable gif
# It's probably using uninitialized memory
blacklist.extend('_ image gen_platf frame_larger_than_image.gif'.split(' '))
# WIC fails on questionable bmps
if 'Win' in bot:
blacklist.extend('_ image gen_platf rle8-height-negative.bmp'.split(' '))
blacklist.extend('_ image gen_platf rle4-height-negative.bmp'.split(' '))
blacklist.extend('_ image gen_platf pal8os2v2.bmp'.split(' '))
blacklist.extend('_ image gen_platf pal8os2v2-16.bmp'.split(' '))
blacklist.extend('_ image gen_platf rgba32abf.bmp'.split(' '))
blacklist.extend('_ image gen_platf rgb24prof.bmp'.split(' '))
blacklist.extend('_ image gen_platf rgb24lprof.bmp'.split(' '))
blacklist.extend('_ image gen_platf 8bpp-pixeldata-cropped.bmp'.split(' '))
blacklist.extend('_ image gen_platf 4bpp-pixeldata-cropped.bmp'.split(' '))
blacklist.extend('_ image gen_platf 32bpp-pixeldata-cropped.bmp'.split(' '))
blacklist.extend('_ image gen_platf 24bpp-pixeldata-cropped.bmp'.split(' '))
if 'x86_64' in bot and 'CPU' in bot:
# This GM triggers a SkSmallAllocator assert.
blacklist.extend('_ gm _ composeshader_bitmap'.split(' '))
if 'Android' in bot or 'iOS' in bot:
# This test crashes the N9 (perhaps because of large malloc/frees). It also
# is fairly slow and not platform-specific. So we just disable it on all of
# Android and iOS. skia:5438
blacklist.extend('_ test _ GrShape'.split(' '))
# skia:4095
for test in ['bleed_image',
'c_gms',
'colortype',
'colortype_xfermodes',
'drawfilter',
'fontmgr_bounds_0.75_0',
'fontmgr_bounds_1_-0.25',
'fontmgr_bounds',
'fontmgr_match',
'fontmgr_iter']:
blacklist.extend(['serialize-8888', 'gm', '_', test])
if 'Mac' not in bot:
for test in ['bleed_alpha_image', 'bleed_alpha_image_shader']:
blacklist.extend(['serialize-8888', 'gm', '_', test])
# It looks like we skip these only for out-of-memory concerns.
if 'Win' in bot or 'Android' in bot:
for test in ['verylargebitmap', 'verylarge_picture_image']:
blacklist.extend(['serialize-8888', 'gm', '_', test])
# skia:4769
for test in ['drawfilter']:
blacklist.extend([ 'sp-8888', 'gm', '_', test])
blacklist.extend([ 'pic-8888', 'gm', '_', test])
blacklist.extend(['2ndpic-8888', 'gm', '_', test])
# skia:4703
for test in ['image-cacherator-from-picture',
'image-cacherator-from-raster',
'image-cacherator-from-ctable']:
blacklist.extend([ 'sp-8888', 'gm', '_', test])
blacklist.extend([ 'pic-8888', 'gm', '_', test])
blacklist.extend([ '2ndpic-8888', 'gm', '_', test])
blacklist.extend(['serialize-8888', 'gm', '_', test])
# Extensions for RAW images
r = ["arw", "cr2", "dng", "nef", "nrw", "orf", "raf", "rw2", "pef", "srw",
"ARW", "CR2", "DNG", "NEF", "NRW", "ORF", "RAF", "RW2", "PEF", "SRW"]
# skbug.com/4888
# Blacklist RAW images (and a few large PNGs) on GPU bots
# until we can resolve failures
if 'GPU' in bot:
blacklist.extend('_ image _ interlaced1.png'.split(' '))
blacklist.extend('_ image _ interlaced2.png'.split(' '))
blacklist.extend('_ image _ interlaced3.png'.split(' '))
for raw_ext in r:
blacklist.extend(('_ image _ .%s' % raw_ext).split(' '))
# Large image that overwhelms older Mac bots
if 'MacMini4.1-GPU' in bot:
blacklist.extend('_ image _ abnormal.wbmp'.split(' '))
blacklist.extend(['msaa16', 'gm', '_', 'blurcircles'])
match = []
if 'Valgrind' in bot: # skia:3021
match.append('~Threaded')
if 'GalaxyS3' in bot: # skia:1699
match.append('~WritePixels')
if 'AndroidOne' in bot: # skia:4711
match.append('~WritePixels')
if 'NexusPlayer' in bot:
match.append('~ResourceCache')
if 'GalaxyS4' in bot: # skia:4079
match.append('~imagefiltersclipped')
match.append('~imagefilterscropexpand')
match.append('~scaled_tilemodes_npot')
match.append('~bleed_image') # skia:4367
match.append('~ReadPixels') # skia:4368
if 'ANGLE' in bot and 'Debug' in bot:
match.append('~GLPrograms') # skia:4717
if 'MSAN' in bot:
match.extend(['~Once', '~Shared']) # Not sure what's up with these tests.
if 'TSAN' in bot:
match.extend(['~ReadWriteAlpha']) # Flaky on TSAN-covered on nvidia bots.
if blacklist:
args.append('--blacklist')
args.extend(blacklist)
if match:
args.append('--match')
args.extend(match)
# These bots run out of memory running RAW codec tests. Do not run them in
# parallel
if ('NexusPlayer' in bot or 'Nexus5' in bot or 'Nexus9' in bot
or 'Win8-MSVC-ShuttleB' in bot):
args.append('--noRAW_threading')
return args
cov_end = lineno() # Don't care about code coverage past here.
def self_test():
import coverage # This way the bots don't need coverage.py to be installed.
args = {}
cases = [
'Pretend-iOS-Bot',
'Test-Android-GCC-AndroidOne-GPU-Mali400MP2-Arm7-Release',
'Test-Android-GCC-GalaxyS3-GPU-Mali400-Arm7-Debug',
'Test-Android-GCC-GalaxyS4-GPU-SGX544-Arm7-Release',
'Test-Android-GCC-Nexus7-GPU-Tegra3-Arm7-Release',
'Test-Android-GCC-Nexus9-GPU-TegraK1-Arm64-Debug',
'Test-Android-GCC-NexusPlayer-CPU-SSSE3-x86-Release',
'Test-Android-GCC-NVIDIA_Shield-GPU-TegraX1-Arm64-Release',
'Test-Mac-Clang-MacMini4.1-GPU-GeForce320M-x86_64-Release',
'Test-Mac-Clang-MacMini6.2-GPU-HD4000-x86_64-Debug-CommandBuffer',
'Test-Mac10.8-Clang-MacMini4.1-CPU-SSE4-x86_64-Release',
'Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Debug-MSAN',
'Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Release-TSAN',
'Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Release-Valgrind',
'Test-Ubuntu-GCC-ShuttleA-GPU-GTX550Ti-x86_64-Release-Valgrind',
'Test-Win-MSVC-GCE-CPU-AVX2-x86_64-Debug',
'Test-Win10-MSVC-ShuttleA-GPU-GTX660-x86_64-Debug-Vulkan',
'Test-Win7-MSVC-ShuttleA-GPU-HD2000-x86-Debug-ANGLE',
]
cov = coverage.coverage()
cov.start()
for case in cases:
args[case] = get_args(case)
cov.stop()
this_file = os.path.basename(__file__)
_, _, not_run, _ = cov.analysis(this_file)
filtered = [line for line in not_run if line > cov_start and line < cov_end]
if filtered:
print 'Lines not covered by test cases: ', filtered
sys.exit(1)
golden = this_file.replace('.py', '.json')
with open(os.path.join(os.path.dirname(__file__), golden), 'w') as f:
json.dump(args, f, indent=2, sort_keys=True)
if __name__ == '__main__':
if len(sys.argv) == 2 and sys.argv[1] == 'test':
self_test()
sys.exit(0)
if len(sys.argv) != 3:
print usage
sys.exit(1)
with open(sys.argv[1], 'w') as out:
json.dump(get_args(sys.argv[2]), out)
| [
"[email protected]"
] | |
68d1e6d7b2e4f62368c66cc12f92988222360a43 | b1962b701230e8fe3235676b6a9a659b1ad921ef | /app/route/route1/models.py | bb9c977cd8cfd4a7d72eeda587a6f38d85ac85ae | [] | no_license | volgoweb/wt | 8e08dc0ff83ac120992bd81c209a420b207df966 | 3a88b8d7d6e1f925b363bfecb94008e14d15943f | refs/heads/master | 2021-01-23T09:29:42.209757 | 2016-02-05T21:24:10 | 2016-02-05T21:24:10 | 42,570,795 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,063 | py | # -*- coding: utf-8 -*-
"""
Маршрут для форумки.
Контент-менеджер создает инфоповод.
Автор пишет по инфоповоду заголовок и текст.
Контент-менеджер подбирает картинки.
Корректор проверяет и исправляет ошибки.
Контент-менеджер публикует.
"""
import json
import datetime
from django.db import models
from django.core.urlresolvers import reverse
from app.route.models import BaseRoute, BaseStep
from app.task.models import (
Task
)
# TODO вынести одинаковый функционал в отдельные методы
class Step1(BaseStep):
class Meta:
proxy = True
def run(self, **kwargs):
super(Step1, self).run(**kwargs)
task = Task(
title=u'Поиск товара и информирование клиента о сроках отгрузки.',
desc=u"""Проверьте наличие каждого заказанного товара и сообщите клиенту: 1) какие товары есть в наличии; 2) цену каждого товара; 3) срок поставки товаров, которых нет в наличие 4) альтернативу тем товарам, которых нет в наличие. \n Описание заявки клиента: {0}""".format(kwargs['application_desc']),
performer=kwargs['manager'],
step=self,
)
task.save()
self.task = task
self.save()
def end(self, **kwargs):
super(Step1, self).end(**kwargs)
next_step = self.route.get_step(name=Route.STEP_CREATE_ORDER)
next_step.run()
class Step2(BaseStep):
class Meta:
proxy = True
def run(self, **kwargs):
super(Step2, self).run(**kwargs)
task = Task(
title=u'Формирование заказа в 1С',
desc=u'Сформируйте заказ на отгрузку товара, который в наличие.',
performer=self.route.manager,
step=self,
)
task.save()
task = Task(
title=u'Заказать у поставщиков товар, которого нет в наличие',
desc=u'Сформируйте заказ на отгрузку товара, который в наличие.',
performer=self.route.manager,
step=self,
)
task.save()
def end(self, **kwargs):
super(Step2, self).end(**kwargs)
next_step = self.route.get_step(name=Route.STEP_CREATE_ORDER)
next_step.run()
self.save()
class Route(BaseRoute):
STEP_FIRST = 'first'
STEP_CREATE_ORDER = 'create_order'
application_desc = models.CharField(max_length=20000, blank=True, null=True)
manager = models.ForeignKey('account.Account', blank=True, null=True)
# class Meta:
# proxy = True
def save(self, *args, **kwargs):
is_new = False if self.pk else True
super(Route, self).save(*args, **kwargs)
if is_new:
s1 = Step1(
name=self.STEP_FIRST,
route=self,
)
s1.save()
s2 = Step2(
name=self.STEP_CREATE_ORDER,
route=self,
)
s2.save()
# s3 = Step3(
# name=self.STEP_CHECK_BY_CORRECTOR,
# route=self,
# )
# s3.save()
# s4 = Step4(
# name=self.STEP_GIVE_IMAGES,
# route=self,
# )
# s4.save()
# s5 = Step5(
# name=self.STEP_PUBLISH,
# route=self,
# )
# s5.save()
def run(self):
self.article = Article()
self.article.save()
step = self.steps.get(name='S1')
step.run()
| [
"[email protected]"
] | |
956895fdcf39007fb48577a2b7f41af5e435c1e2 | 4e353bf7035eec30e5ad861e119b03c5cafc762d | /QtGui/QPainterPath.py | 7998d10a8e9049e3c3657e20e5a30cef878cfee4 | [] | no_license | daym/PyQt4-Stubs | fb79f54d5c9a7fdb42e5f2506d11aa1181f3b7d5 | 57d880c0d453641e31e1e846be4087865fe793a9 | refs/heads/master | 2022-02-11T16:47:31.128023 | 2017-10-06T15:32:21 | 2017-10-06T15:32:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,689 | py | # encoding: utf-8
# module PyQt4.QtGui
# from C:\Python27\lib\site-packages\PyQt4\QtGui.pyd
# by generator 1.145
# no doc
# imports
import PyQt4.QtCore as __PyQt4_QtCore
class QPainterPath(): # skipped bases: <type 'sip.simplewrapper'>
"""
QPainterPath()
QPainterPath(QPointF)
QPainterPath(QPainterPath)
"""
def addEllipse(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
"""
QPainterPath.addEllipse(QRectF)
QPainterPath.addEllipse(float, float, float, float)
QPainterPath.addEllipse(QPointF, float, float)
"""
pass
def addPath(self, QPainterPath): # real signature unknown; restored from __doc__
""" QPainterPath.addPath(QPainterPath) """
pass
def addPolygon(self, QPolygonF): # real signature unknown; restored from __doc__
""" QPainterPath.addPolygon(QPolygonF) """
pass
def addRect(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
"""
QPainterPath.addRect(QRectF)
QPainterPath.addRect(float, float, float, float)
"""
pass
def addRegion(self, QRegion): # real signature unknown; restored from __doc__
""" QPainterPath.addRegion(QRegion) """
pass
def addRoundedRect(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
"""
QPainterPath.addRoundedRect(QRectF, float, float, Qt.SizeMode mode=Qt.AbsoluteSize)
QPainterPath.addRoundedRect(float, float, float, float, float, float, Qt.SizeMode mode=Qt.AbsoluteSize)
"""
pass
def addRoundRect(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
"""
QPainterPath.addRoundRect(QRectF, int, int)
QPainterPath.addRoundRect(float, float, float, float, int, int)
QPainterPath.addRoundRect(QRectF, int)
QPainterPath.addRoundRect(float, float, float, float, int)
"""
pass
def addText(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
"""
QPainterPath.addText(QPointF, QFont, QString)
QPainterPath.addText(float, float, QFont, QString)
"""
pass
def angleAtPercent(self, p_float): # real signature unknown; restored from __doc__
""" QPainterPath.angleAtPercent(float) -> float """
return 0.0
def arcMoveTo(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
"""
QPainterPath.arcMoveTo(QRectF, float)
QPainterPath.arcMoveTo(float, float, float, float, float)
"""
pass
def arcTo(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
"""
QPainterPath.arcTo(QRectF, float, float)
QPainterPath.arcTo(float, float, float, float, float, float)
"""
pass
def boundingRect(self): # real signature unknown; restored from __doc__
""" QPainterPath.boundingRect() -> QRectF """
pass
def closeSubpath(self): # real signature unknown; restored from __doc__
""" QPainterPath.closeSubpath() """
pass
def connectPath(self, QPainterPath): # real signature unknown; restored from __doc__
""" QPainterPath.connectPath(QPainterPath) """
pass
def contains(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
"""
QPainterPath.contains(QPointF) -> bool
QPainterPath.contains(QRectF) -> bool
QPainterPath.contains(QPainterPath) -> bool
"""
return False
def controlPointRect(self): # real signature unknown; restored from __doc__
""" QPainterPath.controlPointRect() -> QRectF """
pass
def cubicTo(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
"""
QPainterPath.cubicTo(QPointF, QPointF, QPointF)
QPainterPath.cubicTo(float, float, float, float, float, float)
"""
pass
def currentPosition(self): # real signature unknown; restored from __doc__
""" QPainterPath.currentPosition() -> QPointF """
pass
def elementAt(self, p_int): # real signature unknown; restored from __doc__
""" QPainterPath.elementAt(int) -> QPainterPath.Element """
pass
def elementCount(self): # real signature unknown; restored from __doc__
""" QPainterPath.elementCount() -> int """
return 0
def fillRule(self): # real signature unknown; restored from __doc__
""" QPainterPath.fillRule() -> Qt.FillRule """
pass
def intersected(self, QPainterPath): # real signature unknown; restored from __doc__
""" QPainterPath.intersected(QPainterPath) -> QPainterPath """
return QPainterPath
def intersects(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
"""
QPainterPath.intersects(QRectF) -> bool
QPainterPath.intersects(QPainterPath) -> bool
"""
return False
def isEmpty(self): # real signature unknown; restored from __doc__
""" QPainterPath.isEmpty() -> bool """
return False
def length(self): # real signature unknown; restored from __doc__
""" QPainterPath.length() -> float """
return 0.0
def lineTo(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
"""
QPainterPath.lineTo(QPointF)
QPainterPath.lineTo(float, float)
"""
pass
def moveTo(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
"""
QPainterPath.moveTo(QPointF)
QPainterPath.moveTo(float, float)
"""
pass
def percentAtLength(self, p_float): # real signature unknown; restored from __doc__
""" QPainterPath.percentAtLength(float) -> float """
return 0.0
def pointAtPercent(self, p_float): # real signature unknown; restored from __doc__
""" QPainterPath.pointAtPercent(float) -> QPointF """
pass
def quadTo(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
"""
QPainterPath.quadTo(QPointF, QPointF)
QPainterPath.quadTo(float, float, float, float)
"""
pass
def setElementPositionAt(self, p_int, p_float, p_float_1): # real signature unknown; restored from __doc__
""" QPainterPath.setElementPositionAt(int, float, float) """
pass
def setFillRule(self, Qt_FillRule): # real signature unknown; restored from __doc__
""" QPainterPath.setFillRule(Qt.FillRule) """
pass
def simplified(self): # real signature unknown; restored from __doc__
""" QPainterPath.simplified() -> QPainterPath """
return QPainterPath
def slopeAtPercent(self, p_float): # real signature unknown; restored from __doc__
""" QPainterPath.slopeAtPercent(float) -> float """
return 0.0
def subtracted(self, QPainterPath): # real signature unknown; restored from __doc__
""" QPainterPath.subtracted(QPainterPath) -> QPainterPath """
return QPainterPath
def subtractedInverted(self, QPainterPath): # real signature unknown; restored from __doc__
""" QPainterPath.subtractedInverted(QPainterPath) -> QPainterPath """
return QPainterPath
def swap(self, QPainterPath): # real signature unknown; restored from __doc__
""" QPainterPath.swap(QPainterPath) """
pass
def toFillPolygon(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
"""
QPainterPath.toFillPolygon(QMatrix matrix=QMatrix()) -> QPolygonF
QPainterPath.toFillPolygon(QTransform) -> QPolygonF
"""
return QPolygonF
def toFillPolygons(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
"""
QPainterPath.toFillPolygons(QMatrix matrix=QMatrix()) -> list-of-QPolygonF
QPainterPath.toFillPolygons(QTransform) -> list-of-QPolygonF
"""
pass
def toReversed(self): # real signature unknown; restored from __doc__
""" QPainterPath.toReversed() -> QPainterPath """
return QPainterPath
def toSubpathPolygons(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
"""
QPainterPath.toSubpathPolygons(QMatrix matrix=QMatrix()) -> list-of-QPolygonF
QPainterPath.toSubpathPolygons(QTransform) -> list-of-QPolygonF
"""
pass
def translate(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
"""
QPainterPath.translate(float, float)
QPainterPath.translate(QPointF)
"""
pass
def translated(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
"""
QPainterPath.translated(float, float) -> QPainterPath
QPainterPath.translated(QPointF) -> QPainterPath
"""
return QPainterPath
def united(self, QPainterPath): # real signature unknown; restored from __doc__
""" QPainterPath.united(QPainterPath) -> QPainterPath """
return QPainterPath
def __add__(self, y): # real signature unknown; restored from __doc__
""" x.__add__(y) <==> x+y """
pass
def __and__(self, y): # real signature unknown; restored from __doc__
""" x.__and__(y) <==> x&y """
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __iadd__(self, y): # real signature unknown; restored from __doc__
""" x.__iadd__(y) <==> x+=y """
pass
def __iand__(self, y): # real signature unknown; restored from __doc__
""" x.__iand__(y) <==> x&=y """
pass
def __init__(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
pass
def __ior__(self, y): # real signature unknown; restored from __doc__
""" x.__ior__(y) <==> x|=y """
pass
def __isub__(self, y): # real signature unknown; restored from __doc__
""" x.__isub__(y) <==> x-=y """
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
def __mul__(self, y): # real signature unknown; restored from __doc__
""" x.__mul__(y) <==> x*y """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __or__(self, y): # real signature unknown; restored from __doc__
""" x.__or__(y) <==> x|y """
pass
def __radd__(self, y): # real signature unknown; restored from __doc__
""" x.__radd__(y) <==> y+x """
pass
def __rand__(self, y): # real signature unknown; restored from __doc__
""" x.__rand__(y) <==> y&x """
pass
def __rmul__(self, y): # real signature unknown; restored from __doc__
""" x.__rmul__(y) <==> y*x """
pass
def __ror__(self, y): # real signature unknown; restored from __doc__
""" x.__ror__(y) <==> y|x """
pass
def __rsub__(self, y): # real signature unknown; restored from __doc__
""" x.__rsub__(y) <==> y-x """
pass
def __sub__(self, y): # real signature unknown; restored from __doc__
""" x.__sub__(y) <==> x-y """
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
CurveToDataElement = 3
CurveToElement = 2
LineToElement = 1
MoveToElement = 0
| [
"[email protected]"
] | |
2a00ed6c5e049ee4620344d4fb310bfc0654281d | 74912c10f66e90195bf87fd71e9a78fa09f017ec | /execroot/syntaxnet/bazel-out/local-opt/bin/dragnn/tools/segmenter_trainer.runfiles/org_tensorflow/tensorflow/contrib/grid_rnn/python/ops/__init__.py | fe1f5c47cb4acf5a8849d044c967729708723048 | [] | no_license | koorukuroo/821bda42e7dedbfae9d936785dd2d125- | 1f0b8f496da8380c6e811ed294dc39a357a5a8b8 | 237fcc152ff436f32b2b5a3752a4181d279b3a57 | refs/heads/master | 2020-03-17T03:39:31.972750 | 2018-05-13T14:35:24 | 2018-05-13T14:35:24 | 133,244,956 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | /root/.cache/bazel/_bazel_root/821bda42e7dedbfae9d936785dd2d125/external/org_tensorflow/tensorflow/contrib/grid_rnn/python/ops/__init__.py | [
"k"
] | k |
d7db9754a8b6bbf84b7a4de1c90f1a7ec627a1f8 | 3a9f76cda884152ab083ca713b57570d5d195a79 | /locations/choices.py | 7d3c412a3970317af58e06134cf6b1d703bc0ef9 | [
"MIT"
] | permissive | MahmoudFarid/django-locations | 9b9790dfef5de0214169e04640e9a4f0ab6c0961 | af25d0e4492fd3476b6be1c7f1ef8471be13751c | refs/heads/master | 2020-07-17T11:47:38.806163 | 2015-11-14T23:41:25 | 2015-11-14T23:41:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,069 | py | from localflavor.us import us_states
STATE_CHOICES = us_states.US_STATES
# ISO 3166-1 country names and codes adapted from http://opencountrycodes.appspot.com/python/
COUNTRY_CHOICES = (
('GB', 'United Kingdom'),
('AF', 'Afghanistan'),
('AX', 'Aland Islands'),
('AL', 'Albania'),
('DZ', 'Algeria'),
('AS', 'American Samoa'),
('AD', 'Andorra'),
('AO', 'Angola'),
('AI', 'Anguilla'),
('AQ', 'Antarctica'),
('AG', 'Antigua and Barbuda'),
('AR', 'Argentina'),
('AM', 'Armenia'),
('AW', 'Aruba'),
('AU', 'Australia'),
('AT', 'Austria'),
('AZ', 'Azerbaijan'),
('BS', 'Bahamas'),
('BH', 'Bahrain'),
('BD', 'Bangladesh'),
('BB', 'Barbados'),
('BY', 'Belarus'),
('BE', 'Belgium'),
('BZ', 'Belize'),
('BJ', 'Benin'),
('BM', 'Bermuda'),
('BT', 'Bhutan'),
('BO', 'Bolivia'),
('BA', 'Bosnia and Herzegovina'),
('BW', 'Botswana'),
('BV', 'Bouvet Island'),
('BR', 'Brazil'),
('IO', 'British Indian Ocean Territory'),
('BN', 'Brunei Darussalam'),
('BG', 'Bulgaria'),
('BF', 'Burkina Faso'),
('BI', 'Burundi'),
('KH', 'Cambodia'),
('CM', 'Cameroon'),
('CA', 'Canada'),
('CV', 'Cape Verde'),
('KY', 'Cayman Islands'),
('CF', 'Central African Republic'),
('TD', 'Chad'),
('CL', 'Chile'),
('CN', 'China'),
('CX', 'Christmas Island'),
('CC', 'Cocos (Keeling) Islands'),
('CO', 'Colombia'),
('KM', 'Comoros'),
('CG', 'Congo'),
('CD', 'Congo, The Democratic Republic of the'),
('CK', 'Cook Islands'),
('CR', 'Costa Rica'),
('CI', 'Cote d\'Ivoire'),
('HR', 'Croatia'),
('CU', 'Cuba'),
('CY', 'Cyprus'),
('CZ', 'Czech Republic'),
('DK', 'Denmark'),
('DJ', 'Djibouti'),
('DM', 'Dominica'),
('DO', 'Dominican Republic'),
('EC', 'Ecuador'),
('EG', 'Egypt'),
('SV', 'El Salvador'),
('GQ', 'Equatorial Guinea'),
('ER', 'Eritrea'),
('EE', 'Estonia'),
('ET', 'Ethiopia'),
('FK', 'Falkland Islands (Malvinas)'),
('FO', 'Faroe Islands'),
('FJ', 'Fiji'),
('FI', 'Finland'),
('FR', 'France'),
('GF', 'French Guiana'),
('PF', 'French Polynesia'),
('TF', 'French Southern Territories'),
('GA', 'Gabon'),
('GM', 'Gambia'),
('GE', 'Georgia'),
('DE', 'Germany'),
('GH', 'Ghana'),
('GI', 'Gibraltar'),
('GR', 'Greece'),
('GL', 'Greenland'),
('GD', 'Grenada'),
('GP', 'Guadeloupe'),
('GU', 'Guam'),
('GT', 'Guatemala'),
('GG', 'Guernsey'),
('GN', 'Guinea'),
('GW', 'Guinea-Bissau'),
('GY', 'Guyana'),
('HT', 'Haiti'),
('HM', 'Heard Island and McDonald Islands'),
('VA', 'Holy See (Vatican City State)'),
('HN', 'Honduras'),
('HK', 'Hong Kong'),
('HU', 'Hungary'),
('IS', 'Iceland'),
('IN', 'India'),
('ID', 'Indonesia'),
('IR', 'Iran, Islamic Republic of'),
('IQ', 'Iraq'),
('IE', 'Ireland'),
('IM', 'Isle of Man'),
('IL', 'Israel'),
('IT', 'Italy'),
('JM', 'Jamaica'),
('JP', 'Japan'),
('JE', 'Jersey'),
('JO', 'Jordan'),
('KZ', 'Kazakhstan'),
('KE', 'Kenya'),
('KI', 'Kiribati'),
('KP', 'Korea, Democratic People\'s Republic of'),
('KR', 'Korea, Republic of'),
('KW', 'Kuwait'),
('KG', 'Kyrgyzstan'),
('LA', 'Lao People\'s Democratic Republic'),
('LV', 'Latvia'),
('LB', 'Lebanon'),
('LS', 'Lesotho'),
('LR', 'Liberia'),
('LY', 'Libyan Arab Jamahiriya'),
('LI', 'Liechtenstein'),
('LT', 'Lithuania'),
('LU', 'Luxembourg'),
('MO', 'Macao'),
('MK', 'Macedonia, The Former Yugoslav Republic of'),
('MG', 'Madagascar'),
('MW', 'Malawi'),
('MY', 'Malaysia'),
('MV', 'Maldives'),
('ML', 'Mali'),
('MT', 'Malta'),
('MH', 'Marshall Islands'),
('MQ', 'Martinique'),
('MR', 'Mauritania'),
('MU', 'Mauritius'),
('YT', 'Mayotte'),
('MX', 'Mexico'),
('FM', 'Micronesia, Federated States of'),
('MD', 'Moldova'),
('MC', 'Monaco'),
('MN', 'Mongolia'),
('ME', 'Montenegro'),
('MS', 'Montserrat'),
('MA', 'Morocco'),
('MZ', 'Mozambique'),
('MM', 'Myanmar'),
('NA', 'Namibia'),
('NR', 'Nauru'),
('NP', 'Nepal'),
('NL', 'Netherlands'),
('AN', 'Netherlands Antilles'),
('NC', 'New Caledonia'),
('NZ', 'New Zealand'),
('NI', 'Nicaragua'),
('NE', 'Niger'),
('NG', 'Nigeria'),
('NU', 'Niue'),
('NF', 'Norfolk Island'),
('MP', 'Northern Mariana Islands'),
('NO', 'Norway'),
('OM', 'Oman'),
('PK', 'Pakistan'),
('PW', 'Palau'),
('PS', 'Palestinian Territory, Occupied'),
('PA', 'Panama'),
('PG', 'Papua New Guinea'),
('PY', 'Paraguay'),
('PE', 'Peru'),
('PH', 'Philippines'),
('PN', 'Pitcairn'),
('PL', 'Poland'),
('PT', 'Portugal'),
('PR', 'Puerto Rico'),
('QA', 'Qatar'),
('RE', 'Reunion'),
('RO', 'Romania'),
('RU', 'Russian Federation'),
('RW', 'Rwanda'),
('BL', 'Saint Barthelemy'),
('SH', 'Saint Helena'),
('KN', 'Saint Kitts and Nevis'),
('LC', 'Saint Lucia'),
('MF', 'Saint Martin'),
('PM', 'Saint Pierre and Miquelon'),
('VC', 'Saint Vincent and the Grenadines'),
('WS', 'Samoa'),
('SM', 'San Marino'),
('ST', 'Sao Tome and Principe'),
('SA', 'Saudi Arabia'),
('SN', 'Senegal'),
('RS', 'Serbia'),
('SC', 'Seychelles'),
('SL', 'Sierra Leone'),
('SG', 'Singapore'),
('SK', 'Slovakia'),
('SI', 'Slovenia'),
('SB', 'Solomon Islands'),
('SO', 'Somalia'),
('ZA', 'South Africa'),
('GS', 'South Georgia and the South Sandwich Islands'),
('ES', 'Spain'),
('LK', 'Sri Lanka'),
('SD', 'Sudan'),
('SR', 'Suriname'),
('SJ', 'Svalbard and Jan Mayen'),
('SZ', 'Swaziland'),
('SE', 'Sweden'),
('CH', 'Switzerland'),
('SY', 'Syrian Arab Republic'),
('TW', 'Taiwan, Province of China'),
('TJ', 'Tajikistan'),
('TZ', 'Tanzania, United Republic of'),
('TH', 'Thailand'),
('TL', 'Timor-Leste'),
('TG', 'Togo'),
('TK', 'Tokelau'),
('TO', 'Tonga'),
('TT', 'Trinidad and Tobago'),
('TN', 'Tunisia'),
('TR', 'Turkey'),
('TM', 'Turkmenistan'),
('TC', 'Turks and Caicos Islands'),
('TV', 'Tuvalu'),
('UG', 'Uganda'),
('UA', 'Ukraine'),
('AE', 'United Arab Emirates'),
('US', 'United States'),
('UM', 'United States Minor Outlying Islands'),
('UY', 'Uruguay'),
('UZ', 'Uzbekistan'),
('VU', 'Vanuatu'),
('VE', 'Venezuela'),
('VN', 'Viet Nam'),
('VG', 'Virgin Islands, British'),
('VI', 'Virgin Islands, U.S.'),
('WF', 'Wallis and Futuna'),
('EH', 'Western Sahara'),
('YE', 'Yemen'),
('ZM', 'Zambia'),
('ZW', 'Zimbabwe'),
) | [
"[email protected]"
] | |
2ab00653756f1a974149ea20746f1d28e975f1aa | bfdab27f224d9cac02e319fe55b53172fbf8d1a2 | /motion_editor_core/data/atlas_old/positions/leg/vi4_leg8r.py | 5e747dc2e1385126fc1654323f987d7f36d3526c | [] | no_license | tu-darmstadt-ros-pkg/motion_editor | c18294b4f035f737ff33d1dcbdfa87d4bb4e6f71 | 178a7564b18420748e1ca4413849a44965823655 | refs/heads/master | 2020-04-06T12:37:30.763325 | 2016-09-15T14:11:48 | 2016-09-15T14:11:48 | 35,028,245 | 2 | 3 | null | 2015-05-05T13:20:27 | 2015-05-04T10:18:22 | Python | UTF-8 | Python | false | false | 64 | py | { 'vi4_leg8r': [0.3032, 0.3181, -1.745, 2.447, -0.848, 0.0711]}
| [
"[email protected]"
] | |
4cbd58be9c0a75640734503160b3eb437bf7da9c | a1de302f76b3b5bf3e4c29e4c31984a8bf081820 | /autobahn/autobahn/websocket.py | cef09eb844e6ac6f837ba77d200f08e2ebb9e956 | [
"Apache-2.0"
] | permissive | jhargis/AutobahnPython | ce4d085e40b1e71823aef6387dd568f01c6288c1 | 30aed5538059ef5df9e0793938eaabee47d3c569 | refs/heads/master | 2021-04-15T05:47:01.777634 | 2012-07-17T19:13:43 | 2012-07-17T19:13:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144,851 | py | ###############################################################################
##
## Copyright 2011,2012 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
## The Python urlparse module currently does not contain the ws/wss
## schemes, so we add those dynamically (which is a hack of course).
##
import urlparse
wsschemes = ["ws", "wss"]
urlparse.uses_relative.extend(wsschemes)
urlparse.uses_netloc.extend(wsschemes)
urlparse.uses_params.extend(wsschemes)
urlparse.uses_query.extend(wsschemes)
urlparse.uses_fragment.extend(wsschemes)
from twisted.internet import reactor, protocol
from twisted.python import log
import urllib
import binascii
import hashlib
import base64
import struct
import random
import os
from array import array
from collections import deque
from utf8validator import Utf8Validator
from xormasker import XorMaskerNull, XorMaskerSimple, XorMaskerShifted1
from httpstatus import *
import autobahn # need autobahn.version
def createWsUrl(hostname, port = None, isSecure = False, path = None, params = None):
"""
Create a WbeSocket URL from components.
:param hostname: WebSocket server hostname.
:type hostname: str
:param port: WebSocket service port or None (to select default ports 80/443 depending on isSecure).
:type port: int
:param isSecure: Set True for secure WebSockets ("wss" scheme).
:type isSecure: bool
:param path: Path component of addressed resource (will be properly URL escaped).
:type path: str
:param params: A dictionary of key-values to construct the query component of the addressed resource (will be properly URL escaped).
:type params: dict
:returns str -- Constructed WebSocket URL.
"""
if port is not None:
netloc = "%s:%d" % (hostname, port)
else:
if isSecure:
netloc = "%s:443" % hostname
else:
netloc = "%s:80" % hostname
if isSecure:
scheme = "wss"
else:
scheme = "ws"
if path is not None:
ppath = urllib.quote(path)
else:
ppath = "/"
if params is not None:
query = urllib.urlencode(params)
else:
query = None
return urlparse.urlunparse((scheme, netloc, ppath, None, query, None))
def parseWsUrl(url):
"""
Parses as WebSocket URL into it's components and returns a tuple (isSecure, host, port, resource, path, params).
isSecure is a flag which is True for wss URLs.
host is the hostname or IP from the URL.
port is the port from the URL or standard port derived from scheme (ws = 80, wss = 443).
resource is the /resource name/ from the URL, the /path/ together with the (optional) /query/ component.
path is the /path/ component properly unescaped.
params is the /query) component properly unescaped and returned as dictionary.
:param url: A valid WebSocket URL, i.e. ws://localhost:9000/myresource?param1=23¶m2=666
:type url: str
:returns: tuple -- A tuple (isSecure, host, port, resource, path, params)
"""
parsed = urlparse.urlparse(url)
if parsed.scheme not in ["ws", "wss"]:
raise Exception("invalid WebSocket scheme '%s'" % parsed.scheme)
if parsed.port is None or parsed.port == "":
if parsed.scheme == "ws":
port = 80
else:
port = 443
else:
port = int(parsed.port)
if parsed.fragment is not None and parsed.fragment != "":
raise Exception("invalid WebSocket URL: non-empty fragment '%s" % parsed.fragment)
if parsed.path is not None and parsed.path != "":
ppath = parsed.path
path = urllib.unquote(ppath)
else:
ppath = "/"
path = ppath
if parsed.query is not None and parsed.query != "":
resource = ppath + "?" + parsed.query
params = urlparse.parse_qs(parsed.query)
else:
resource = ppath
params = {}
return (parsed.scheme == "wss", parsed.hostname, port, resource, path, params)
def connectWS(factory, contextFactory = None, timeout = 30, bindAddress = None):
"""
Establish WebSockets connection to a server. The connection parameters like target
host, port, resource and others are provided via the factory.
:param factory: The WebSockets protocol factory to be used for creating client protocol instances.
:type factory: An :class:`autobahn.websocket.WebSocketClientFactory` instance.
:param contextFactory: SSL context factory, required for secure WebSockets connections ("wss").
:type contextFactory: A twisted.internet.ssl.ClientContextFactory instance.
:param timeout: Number of seconds to wait before assuming the connection has failed.
:type timeout: int
:param bindAddress: A (host, port) tuple of local address to bind to, or None.
:type bindAddress: tuple
:returns: obj -- An object which provides twisted.interface.IConnector.
"""
if factory.isSecure:
if contextFactory is None:
# create default client SSL context factory when none given
from twisted.internet import ssl
contextFactory = ssl.ClientContextFactory()
conn = reactor.connectSSL(factory.host, factory.port, factory, contextFactory, timeout, bindAddress)
else:
conn = reactor.connectTCP(factory.host, factory.port, factory, timeout, bindAddress)
return conn
def listenWS(factory, contextFactory = None, backlog = 50, interface = ''):
"""
Listen for incoming WebSocket connections from clients. The connection parameters like
listening port and others are provided via the factory.
:param factory: The WebSockets protocol factory to be used for creating server protocol instances.
:type factory: An :class:`autobahn.websocket.WebSocketServerFactory` instance.
:param contextFactory: SSL context factory, required for secure WebSockets connections ("wss").
:type contextFactory: A twisted.internet.ssl.ContextFactory.
:param backlog: Size of the listen queue.
:type backlog: int
:param interface: The interface (derived from hostname given) to bind to, defaults to '' (all).
:type interface: str
:returns: obj -- An object that provides twisted.interface.IListeningPort.
"""
if factory.isSecure:
if contextFactory is None:
raise Exception("Secure WebSocket listen requested, but no SSL context factory given")
listener = reactor.listenSSL(factory.port, factory, contextFactory, backlog, interface)
else:
listener = reactor.listenTCP(factory.port, factory, backlog, interface)
return listener
class FrameHeader:
"""
Thin-wrapper for storing WebSockets frame metadata.
FOR INTERNAL USE ONLY!
"""
def __init__(self, opcode, fin, rsv, length, mask):
"""
Constructor.
:param opcode: Frame opcode (0-15).
:type opcode: int
:param fin: Frame FIN flag.
:type fin: bool
:param rsv: Frame reserved flags (0-7).
:type rsv: int
:param length: Frame payload length.
:type length: int
:param mask: Frame mask (binary string) or None.
:type mask: str
"""
self.opcode = opcode
self.fin = fin
self.rsv = rsv
self.length = length
self.mask = mask
class HttpException():
"""
Throw an instance of this class to deny a WebSockets connection
during handshake in :meth:`autobahn.websocket.WebSocketServerProtocol.onConnect`.
You can find definitions of HTTP status codes in module :mod:`autobahn.httpstatus`.
"""
def __init__(self, code, reason):
"""
Constructor.
:param code: HTTP error code.
:type code: int
:param reason: HTTP error reason.
:type reason: str
"""
self.code = code
self.reason = reason
class ConnectionRequest():
"""
Thin-wrapper for WebSockets connection request information
provided in :meth:`autobahn.websocket.WebSocketServerProtocol.onConnect` when a WebSockets
client establishes a connection to a WebSockets server.
"""
def __init__(self, peer, peerstr, headers, host, path, params, version, origin, protocols, extensions):
"""
Constructor.
:param peer: IP address/port of the connecting client.
:type peer: object
:param peerstr: IP address/port of the connecting client as string.
:type peerstr: str
:param headers: HTTP headers from opening handshake request.
:type headers: dict
:param host: Host from opening handshake HTTP header.
:type host: str
:param path: Path from requested HTTP resource URI. For example, a resource URI of "/myservice?foo=23&foo=66&bar=2" will be parsed to "/myservice".
:type path: str
:param params: Query parameters (if any) from requested HTTP resource URI. For example, a resource URI of "/myservice?foo=23&foo=66&bar=2" will be parsed to {'foo': ['23', '66'], 'bar': ['2']}.
:type params: dict of arrays of strings
:param version: The WebSockets protocol version the client announced (and will be spoken, when connection is accepted).
:type version: int
:param origin: The WebSockets origin header or None. Note that this only a reliable source of information for browser clients!
:type origin: str
:param protocols: The WebSockets (sub)protocols the client announced. You must select and return one of those (or None) in :meth:`autobahn.websocket.WebSocketServerProtocol.onConnect`.
:type protocols: array of strings
:param extensions: The WebSockets extensions the client requested and the server accepted (and thus will be spoken, when WS connection is established).
:type extensions: array of strings
"""
self.peer = peer
self.peerstr = peerstr
self.headers = headers
self.host = host
self.path = path
self.params = params
self.version = version
self.origin = origin
self.protocols = protocols
self.extensions = extensions
class ConnectionResponse():
"""
Thin-wrapper for WebSockets connection response information
provided in :meth:`autobahn.websocket.WebSocketClientProtocol.onConnect` when a WebSockets
client has established a connection to a WebSockets server.
"""
def __init__(self, peer, peerstr, headers, version, protocol, extensions):
"""
Constructor.
:param peer: IP address/port of the connected server.
:type peer: object
:param peerstr: IP address/port of the connected server as string.
:type peerstr: str
:param headers: HTTP headers from opening handshake response.
:type headers: dict
:param version: The WebSockets protocol version that is spoken.
:type version: int
:param protocol: The WebSockets (sub)protocol in use.
:type protocol: str
:param extensions: The WebSockets extensions in use.
:type extensions: array of strings
"""
self.peer = peer
self.peerstr = peerstr
self.headers = headers
self.version = version
self.protocol = protocol
self.extensions = extensions
def parseHttpHeader(data):
"""
Parses the beginning of a HTTP request header (the data up to the \n\n line) into a pair
of status line and HTTP headers dictionary.
Header keys are normalized to all-lower-case.
FOR INTERNAL USE ONLY!
:param data: The HTTP header data up to the \n\n line.
:type data: str
"""
raw = data.splitlines()
http_status_line = raw[0].strip()
http_headers = {}
http_headers_cnt = {}
for h in raw[1:]:
i = h.find(":")
if i > 0:
## HTTP header keys are case-insensitive
key = h[:i].strip().lower()
## not sure if UTF-8 is allowed for HTTP header values..
value = h[i+1:].strip().decode("utf-8")
## handle HTTP headers split across multiple lines
if http_headers.has_key(key):
http_headers[key] += ", %s" % value
http_headers_cnt[key] += 1
else:
http_headers[key] = value
http_headers_cnt[key] = 1
else:
# skip bad HTTP header
pass
return (http_status_line, http_headers, http_headers_cnt)
class WebSocketProtocol(protocol.Protocol):
"""
A Twisted Protocol class for WebSockets. This class is used by both WebSocket
client and server protocol version. It is unusable standalone, for example
the WebSockets initial handshake is implemented in derived class differently
for clients and servers.
"""
SUPPORTED_SPEC_VERSIONS = [0, 10, 11, 12, 13, 14, 15, 16, 17, 18]
"""
WebSockets protocol spec (draft) versions supported by this implementation.
Use of version 18 indicates RFC6455. Use of versions < 18 indicate actual
draft spec versions (Hybi-Drafts). Use of version 0 indicates Hixie-76.
"""
SUPPORTED_PROTOCOL_VERSIONS = [0, 8, 13]
"""
WebSocket protocol versions supported by this implementation. For Hixie-76,
there is no protocol version announced in HTTP header, and we just use the
draft version (0) in this case.
"""
SPEC_TO_PROTOCOL_VERSION = {0: 0, 10: 8, 11: 8, 12: 8, 13: 13, 14: 13, 15: 13, 16: 13, 17: 13, 18: 13}
"""
Mapping from protocol spec (draft) version to protocol version. For Hixie-76,
there is no protocol version announced in HTTP header, and we just use the
pseudo protocol version 0 in this case.
"""
PROTOCOL_TO_SPEC_VERSION = {0: 0, 8: 12, 13: 18}
"""
Mapping from protocol version to the latest protocol spec (draft) version
using that protocol version. For Hixie-76, there is no protocol version
announced in HTTP header, and we just use the draft version (0) in this case.
"""
DEFAULT_SPEC_VERSION = 10
"""
Default WebSockets protocol spec version this implementation speaks.
We use Hybi-10, since this is what is currently targeted by widely distributed
browsers (namely Firefox 8 and the like).
"""
DEFAULT_ALLOW_HIXIE76 = False
"""
By default, this implementation will not allow to speak the obsoleted
Hixie-76 protocol version. That protocol version has security issues, but
is still spoken by some clients. Enable at your own risk! Enabling can be
done by using setProtocolOptions() on the factories for clients and servers.
"""
WS_MAGIC = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
"""
Protocol defined magic used during WebSocket handshake (used in Hybi-drafts
and final RFC6455.
"""
QUEUED_WRITE_DELAY = 0.00001
"""For synched/chopped writes, this is the reactor reentry delay in seconds."""
PAYLOAD_LEN_XOR_BREAKEVEN = 128
"""Tuning parameter which chooses XORer used for masking/unmasking based on
payload length."""
MESSAGE_TYPE_TEXT = 1
"""WebSockets text message type (UTF-8 payload)."""
MESSAGE_TYPE_BINARY = 2
"""WebSockets binary message type (arbitrary binary payload)."""
## WebSockets protocol state:
## STATE_CONNECTING => STATE_OPEN => STATE_CLOSING => STATE_CLOSED
##
STATE_CLOSED = 0
STATE_CONNECTING = 1
STATE_CLOSING = 2
STATE_OPEN = 3
## Streaming Send State
SEND_STATE_GROUND = 0
SEND_STATE_MESSAGE_BEGIN = 1
SEND_STATE_INSIDE_MESSAGE = 2
SEND_STATE_INSIDE_MESSAGE_FRAME = 3
## WebSockets protocol close codes
##
CLOSE_STATUS_CODE_NORMAL = 1000
"""Normal close of connection."""
CLOSE_STATUS_CODE_GOING_AWAY = 1001
"""Going away."""
CLOSE_STATUS_CODE_PROTOCOL_ERROR = 1002
"""Protocol error."""
CLOSE_STATUS_CODE_UNSUPPORTED_DATA = 1003
"""Unsupported data."""
CLOSE_STATUS_CODE_RESERVED1 = 1004
"""RESERVED"""
CLOSE_STATUS_CODE_NULL = 1005 # MUST NOT be set in close frame!
"""No status received. (MUST NOT be used as status code when sending a close)."""
CLOSE_STATUS_CODE_ABNORMAL_CLOSE = 1006 # MUST NOT be set in close frame!
"""Abnormal close of connection. (MUST NOT be used as status code when sending a close)."""
CLOSE_STATUS_CODE_INVALID_PAYLOAD = 1007
"""Invalid frame payload data."""
CLOSE_STATUS_CODE_POLICY_VIOLATION = 1008
"""Policy violation."""
CLOSE_STATUS_CODE_MESSAGE_TOO_BIG = 1009
"""Message too big."""
CLOSE_STATUS_CODE_MANDATORY_EXTENSION = 1010
"""Mandatory extension."""
CLOSE_STATUS_CODE_INTERNAL_ERROR = 1011
"""The peer encountered an unexpected condition or internal error."""
CLOSE_STATUS_CODE_TLS_HANDSHAKE_FAILED = 1015 # MUST NOT be set in close frame!
"""TLS handshake failed, i.e. server certificate could not be verified. (MUST NOT be used as status code when sending a close)."""
CLOSE_STATUS_CODES_ALLOWED = [CLOSE_STATUS_CODE_NORMAL,
CLOSE_STATUS_CODE_GOING_AWAY,
CLOSE_STATUS_CODE_PROTOCOL_ERROR,
CLOSE_STATUS_CODE_UNSUPPORTED_DATA,
CLOSE_STATUS_CODE_INVALID_PAYLOAD,
CLOSE_STATUS_CODE_POLICY_VIOLATION,
CLOSE_STATUS_CODE_MESSAGE_TOO_BIG,
CLOSE_STATUS_CODE_MANDATORY_EXTENSION,
CLOSE_STATUS_CODE_INTERNAL_ERROR]
"""Status codes allowed to send in close."""
def onOpen(self):
"""
Callback when initial WebSockets handshake was completed. Now you may send messages.
Default implementation does nothing. Override in derived class.
Modes: Hybi, Hixie
"""
if self.debugCodePaths:
log.msg("WebSocketProtocol.onOpen")
def onMessageBegin(self, opcode):
"""
Callback when receiving a new message has begun. Default implementation will
prepare to buffer message frames. Override in derived class.
Modes: Hybi, Hixie
:param opcode: Opcode of message.
:type opcode: int
"""
self.message_opcode = opcode
self.message_data = []
self.message_data_total_length = 0
def onMessageFrameBegin(self, length, reserved):
"""
Callback when receiving a new message frame has begun. Default implementation will
prepare to buffer message frame data. Override in derived class.
Modes: Hybi
:param length: Payload length of message frame which is to be received.
:type length: int
:param reserved: Reserved bits set in frame (an integer from 0 to 7).
:type reserved: int
"""
self.frame_length = length
self.frame_reserved = reserved
self.frame_data = []
self.message_data_total_length += length
if not self.failedByMe:
if self.maxMessagePayloadSize > 0 and self.message_data_total_length > self.maxMessagePayloadSize:
self.wasMaxMessagePayloadSizeExceeded = True
self.failConnection(WebSocketProtocol.CLOSE_STATUS_CODE_MESSAGE_TOO_BIG, "message exceeds payload limit of %d octets" % self.maxMessagePayloadSize)
elif self.maxFramePayloadSize > 0 and length > self.maxFramePayloadSize:
self.wasMaxFramePayloadSizeExceeded = True
self.failConnection(WebSocketProtocol.CLOSE_STATUS_CODE_POLICY_VIOLATION, "frame exceeds payload limit of %d octets" % self.maxFramePayloadSize)
def onMessageFrameData(self, payload):
"""
Callback when receiving data witin message frame. Default implementation will
buffer data for frame. Override in derived class.
Modes: Hybi, Hixie
Notes:
- For Hixie mode, this method is slightly misnamed for historic reasons.
:param payload: Partial payload for message frame.
:type payload: str
"""
if not self.failedByMe:
if self.websocket_version == 0:
self.message_data_total_length += len(payload)
if self.maxMessagePayloadSize > 0 and self.message_data_total_length > self.maxMessagePayloadSize:
self.wasMaxMessagePayloadSizeExceeded = True
self.failConnection(WebSocketProtocol.CLOSE_STATUS_CODE_MESSAGE_TOO_BIG, "message exceeds payload limit of %d octets" % self.maxMessagePayloadSize)
self.message_data.append(payload)
else:
self.frame_data.append(payload)
def onMessageFrameEnd(self):
"""
Callback when a message frame has been completely received. Default implementation
will flatten the buffered frame data and callback onMessageFrame. Override
in derived class.
Modes: Hybi
"""
if not self.failedByMe:
self.onMessageFrame(self.frame_data, self.frame_reserved)
self.frame_data = None
def onMessageFrame(self, payload, reserved):
"""
Callback fired when complete message frame has been received. Default implementation
will buffer frame for message. Override in derived class.
Modes: Hybi
:param payload: Message frame payload.
:type payload: list of str
:param reserved: Reserved bits set in frame (an integer from 0 to 7).
:type reserved: int
"""
if not self.failedByMe:
self.message_data.extend(payload)
def onMessageEnd(self):
"""
Callback when a message has been completely received. Default implementation
will flatten the buffered frames and callback onMessage. Override
in derived class.
Modes: Hybi, Hixie
"""
if not self.failedByMe:
payload = ''.join(self.message_data)
self.onMessage(payload, self.message_opcode == WebSocketProtocol.MESSAGE_TYPE_BINARY)
self.message_data = None
def onMessage(self, payload, binary):
"""
Callback when a complete message was received. Default implementation does nothing.
Override in derived class.
Modes: Hybi, Hixie
:param payload: Message payload (UTF-8 encoded text string or binary string). Can also be an empty string, when message contained no payload.
:type payload: str
:param binary: If True, payload is binary, otherwise text.
:type binary: bool
"""
if self.debug:
log.msg("WebSocketProtocol.onMessage")
def onPing(self, payload):
"""
Callback when Ping was received. Default implementation responds
with a Pong. Override in derived class.
Modes: Hybi
:param payload: Payload of Ping, when there was any. Can be arbitrary, up to 125 octets.
:type payload: str
"""
if self.debug:
log.msg("WebSocketProtocol.onPing")
if self.state == WebSocketProtocol.STATE_OPEN:
self.sendPong(payload)
def onPong(self, payload):
"""
Callback when Pong was received. Default implementation does nothing.
Override in derived class.
Modes: Hybi
:param payload: Payload of Pong, when there was any. Can be arbitrary, up to 125 octets.
"""
if self.debug:
log.msg("WebSocketProtocol.onPong")
def onClose(self, wasClean, code, reason):
"""
Callback when the connection has been closed. Override in derived class.
Modes: Hybi, Hixie
:param wasClean: True, iff the connection was closed cleanly.
:type wasClean: bool
:param code: None or close status code (sent by peer), if there was one (:class:`WebSocketProtocol`.CLOSE_STATUS_CODE_*).
:type code: int
:param reason: None or close reason (sent by peer) (when present, a status code MUST have been also be present).
:type reason: str
"""
if self.debugCodePaths:
s = "WebSocketProtocol.onClose:\n"
s += "wasClean=%s\n" % wasClean
s += "code=%s\n" % code
s += "reason=%s\n" % reason
s += "self.closedByMe=%s\n" % self.closedByMe
s += "self.failedByMe=%s\n" % self.failedByMe
s += "self.droppedByMe=%s\n" % self.droppedByMe
s += "self.wasClean=%s\n" % self.wasClean
s += "self.wasNotCleanReason=%s\n" % self.wasNotCleanReason
s += "self.localCloseCode=%s\n" % self.localCloseCode
s += "self.localCloseReason=%s\n" % self.localCloseReason
s += "self.remoteCloseCode=%s\n" % self.remoteCloseCode
s += "self.remoteCloseReason=%s\n" % self.remoteCloseReason
log.msg(s)
def onCloseFrame(self, code, reasonRaw):
"""
Callback when a Close frame was received. The default implementation answers by
sending a Close when no Close was sent before. Otherwise it drops
the TCP connection either immediately (when we are a server) or after a timeout
(when we are a client and expect the server to drop the TCP).
Modes: Hybi, Hixie
Notes:
- For Hixie mode, this method is slightly misnamed for historic reasons.
- For Hixie mode, code and reasonRaw are silently ignored.
:param code: None or close status code, if there was one (:class:`WebSocketProtocol`.CLOSE_STATUS_CODE_*).
:type code: int
:param reason: None or close reason (when present, a status code MUST have been also be present).
:type reason: str
"""
if self.debugCodePaths:
log.msg("WebSocketProtocol.onCloseFrame")
self.remoteCloseCode = code
self.remoteCloseReason = reasonRaw
## reserved close codes: 0-999, 1004, 1005, 1006, 1011-2999, >= 5000
##
if code is not None and (code < 1000 or (code >= 1000 and code <= 2999 and code not in WebSocketProtocol.CLOSE_STATUS_CODES_ALLOWED) or code >= 5000):
if self.protocolViolation("invalid close code %d" % code):
return True
## closing reason
##
if reasonRaw is not None:
## we use our own UTF-8 validator to get consistent and fully conformant
## UTF-8 validation behavior
u = Utf8Validator()
val = u.validate(reasonRaw)
if not val[0]:
if self.invalidPayload("invalid close reason (non-UTF-8 payload)"):
return True
if self.state == WebSocketProtocol.STATE_CLOSING:
## We already initiated the closing handshake, so this
## is the peer's reply to our close frame.
## cancel any closing HS timer if present
##
if self.closeHandshakeTimeoutCall is not None:
if self.debugCodePaths:
log.msg("closeHandshakeTimeoutCall.cancel")
self.closeHandshakeTimeoutCall.cancel()
self.closeHandshakeTimeoutCall = None
self.wasClean = True
if self.isServer:
## When we are a server, we immediately drop the TCP.
self.dropConnection(abort = True)
else:
## When we are a client, the server should drop the TCP
## If that doesn't happen, we do. And that will set wasClean = False.
if self.serverConnectionDropTimeout > 0:
self.serverConnectionDropTimeoutCall = reactor.callLater(self.serverConnectionDropTimeout, self.onServerConnectionDropTimeout)
elif self.state == WebSocketProtocol.STATE_OPEN:
## The peer initiates a closing handshake, so we reply
## by sending close frame.
self.wasClean = True
if self.websocket_version == 0:
self.sendCloseFrame(isReply = True)
else:
## Either reply with same code/reason, or code == NORMAL/reason=None
if self.echoCloseCodeReason:
self.sendCloseFrame(code = code, reasonUtf8 = reason.encode("UTF-8"), isReply = True)
else:
self.sendCloseFrame(code = WebSocketProtocol.CLOSE_STATUS_CODE_NORMAL, isReply = True)
if self.isServer:
## When we are a server, we immediately drop the TCP.
self.dropConnection(abort = False)
else:
## When we are a client, we expect the server to drop the TCP,
## and when the server fails to do so, a timeout in sendCloseFrame()
## will set wasClean = False back again.
pass
else:
## STATE_CONNECTING, STATE_CLOSED
raise Exception("logic error")
def onServerConnectionDropTimeout(self):
"""
We (a client) expected the peer (a server) to drop the connection,
but it didn't (in time self.serverConnectionDropTimeout).
So we drop the connection, but set self.wasClean = False.
Modes: Hybi, Hixie
"""
self.serverConnectionDropTimeoutCall = None
if self.state != WebSocketProtocol.STATE_CLOSED:
if self.debugCodePaths:
log.msg("onServerConnectionDropTimeout")
self.wasClean = False
self.wasNotCleanReason = "server did not drop TCP connection (in time)"
self.wasServerConnectionDropTimeout = True
self.dropConnection(abort = True)
else:
if self.debugCodePaths:
log.msg("skipping onServerConnectionDropTimeout since connection is already closed")
def onOpenHandshakeTimeout(self):
"""
We expected the peer to complete the opening handshake with to us.
It didn't do so (in time self.openHandshakeTimeout).
So we drop the connection, but set self.wasClean = False.
Modes: Hybi, Hixie
"""
self.openHandshakeTimeoutCall = None
if self.state == WebSocketProtocol.STATE_CONNECTING:
if self.debugCodePaths:
log.msg("onOpenHandshakeTimeout fired")
self.wasClean = False
self.wasNotCleanReason = "peer did not finish (in time) the opening handshake"
self.wasOpenHandshakeTimeout = True
self.dropConnection(abort = True)
elif self.state == WebSocketProtocol.STATE_OPEN:
if self.debugCodePaths:
log.msg("skipping onOpenHandshakeTimeout since WebSocket connection is open (opening handshake already finished)")
elif self.state == WebSocketProtocol.STATE_CLOSING:
if self.debugCodePaths:
log.msg("skipping onOpenHandshakeTimeout since WebSocket connection is closing")
elif self.state == WebSocketProtocol.STATE_CLOSED:
if self.debugCodePaths:
log.msg("skipping onOpenHandshakeTimeout since WebSocket connection already closed")
else:
# should not arrive here
raise Exception("logic error")
def onCloseHandshakeTimeout(self):
"""
We expected the peer to respond to us initiating a close handshake. It didn't
respond (in time self.closeHandshakeTimeout) with a close response frame though.
So we drop the connection, but set self.wasClean = False.
Modes: Hybi, Hixie
"""
self.closeHandshakeTimeoutCall = None
if self.state != WebSocketProtocol.STATE_CLOSED:
if self.debugCodePaths:
log.msg("onCloseHandshakeTimeout fired")
self.wasClean = False
self.wasNotCleanReason = "peer did not respond (in time) in closing handshake"
self.wasCloseHandshakeTimeout = True
self.dropConnection(abort = True)
else:
if self.debugCodePaths:
log.msg("skipping onCloseHandshakeTimeout since connection is already closed")
def dropConnection(self, abort = False):
"""
Drop the underlying TCP connection. For abort parameter, see:
* http://twistedmatrix.com/documents/current/core/howto/servers.html#auto2
* https://github.com/tavendo/AutobahnPython/issues/96
Modes: Hybi, Hixie
"""
if self.state != WebSocketProtocol.STATE_CLOSED:
if self.debugCodePaths:
log.msg("dropping connection")
self.droppedByMe = True
self.state = WebSocketProtocol.STATE_CLOSED
if abort:
self.transport.abortConnection()
else:
self.transport.loseConnection()
else:
if self.debugCodePaths:
log.msg("skipping dropConnection since connection is already closed")
def failConnection(self, code = CLOSE_STATUS_CODE_GOING_AWAY, reason = "Going Away"):
"""
Fails the WebSockets connection.
Modes: Hybi, Hixie
Notes:
- For Hixie mode, the code and reason are silently ignored.
"""
if self.state != WebSocketProtocol.STATE_CLOSED:
if self.debugCodePaths:
log.msg("Failing connection : %s - %s" % (code, reason))
self.failedByMe = True
if self.failByDrop:
## brutally drop the TCP connection
self.wasClean = False
self.wasNotCleanReason = "I failed the WebSocket connection by dropping the TCP connection"
self.dropConnection(abort = True)
else:
## perform WebSockets closing handshake
if self.state != WebSocketProtocol.STATE_CLOSING:
self.sendCloseFrame(code = code, reasonUtf8 = reason.encode("UTF-8"), isReply = False)
else:
if self.debugCodePaths:
log.msg("skipping failConnection since connection is already closing")
else:
if self.debugCodePaths:
log.msg("skipping failConnection since connection is already closed")
def protocolViolation(self, reason):
"""
Fired when a WebSockets protocol violation/error occurs.
Modes: Hybi, Hixie
Notes:
- For Hixie mode, reason is silently ignored.
:param reason: Protocol violation that was encountered (human readable).
:type reason: str
:returns: bool -- True, when any further processing should be discontinued.
"""
if self.debugCodePaths:
log.msg("Protocol violation : %s" % reason)
self.failConnection(WebSocketProtocol.CLOSE_STATUS_CODE_PROTOCOL_ERROR, reason)
if self.failByDrop:
return True
else:
## if we don't immediately drop the TCP, we need to skip the invalid frame
## to continue to later receive the closing handshake reply
return False
def invalidPayload(self, reason):
"""
Fired when invalid payload is encountered. Currently, this only happens
for text message when payload is invalid UTF-8 or close frames with
close reason that is invalid UTF-8.
Modes: Hybi, Hixie
Notes:
- For Hixie mode, reason is silently ignored.
:param reason: What was invalid for the payload (human readable).
:type reason: str
:returns: bool -- True, when any further processing should be discontinued.
"""
if self.debugCodePaths:
log.msg("Invalid payload : %s" % reason)
self.failConnection(WebSocketProtocol.CLOSE_STATUS_CODE_INVALID_PAYLOAD, reason)
if self.failByDrop:
return True
else:
## if we don't immediately drop the TCP, we need to skip the invalid frame
## to continue to later receive the closing handshake reply
return False
def connectionMade(self):
"""
This is called by Twisted framework when a new TCP connection has been established
and handed over to a Protocol instance (an instance of this class).
Modes: Hybi, Hixie
"""
## copy default options from factory (so we are not affected by changed on those)
##
self.debug = self.factory.debug
self.debugCodePaths = self.factory.debugCodePaths
self.logOctets = self.factory.logOctets
self.logFrames = self.factory.logFrames
self.allowHixie76 = self.factory.allowHixie76
self.utf8validateIncoming = self.factory.utf8validateIncoming
self.applyMask = self.factory.applyMask
self.maxFramePayloadSize = self.factory.maxFramePayloadSize
self.maxMessagePayloadSize = self.factory.maxMessagePayloadSize
self.autoFragmentSize = self.factory.autoFragmentSize
self.failByDrop = self.factory.failByDrop
self.echoCloseCodeReason = self.factory.echoCloseCodeReason
self.openHandshakeTimeout = self.factory.openHandshakeTimeout
self.closeHandshakeTimeout = self.factory.closeHandshakeTimeout
self.tcpNoDelay = self.factory.tcpNoDelay
if self.isServer:
self.versions = self.factory.versions
self.webStatus = self.factory.webStatus
self.requireMaskedClientFrames = self.factory.requireMaskedClientFrames
self.maskServerFrames = self.factory.maskServerFrames
else:
self.version = self.factory.version
self.acceptMaskedServerFrames = self.factory.acceptMaskedServerFrames
self.maskClientFrames = self.factory.maskClientFrames
self.serverConnectionDropTimeout = self.factory.serverConnectionDropTimeout
## Set "Nagle"
self.transport.setTcpNoDelay(self.tcpNoDelay)
## the peer we are connected to
self.peer = self.transport.getPeer()
self.peerstr = "%s:%d" % (self.peer.host, self.peer.port)
## initial state
self.state = WebSocketProtocol.STATE_CONNECTING
self.send_state = WebSocketProtocol.SEND_STATE_GROUND
self.data = ""
## for chopped/synched sends, we need to queue to maintain
## ordering when recalling the reactor to actually "force"
## the octets to wire (see test/trickling in the repo)
self.send_queue = deque()
self.triggered = False
## incremental UTF8 validator
self.utf8validator = Utf8Validator()
## track when frame/message payload sizes (incoming) were exceeded
self.wasMaxFramePayloadSizeExceeded = False
self.wasMaxMessagePayloadSizeExceeded = False
## the following vars are related to connection close handling/tracking
# True, iff I have initiated closing HS (that is, did send close first)
self.closedByMe = False
# True, iff I have failed the WS connection (i.e. due to protocol error)
# Failing can be either by initiating close HS or brutal drop (this is
# controlled by failByDrop option)
self.failedByMe = False
# True, iff I dropped the TCP connection (called transport.loseConnection())
self.droppedByMe = False
# True, iff full WebSockets closing handshake was performed (close frame sent
# and received) _and_ the server dropped the TCP (which is its responsibility)
self.wasClean = False
# When self.wasClean = False, the reason (what happened)
self.wasNotCleanReason = None
# When we are a client, and we expected the server to drop the TCP, but that
# didn't happen in time, this gets True
self.wasServerConnectionDropTimeout = False
# When the initial WebSocket opening handshake times out, this gets True
self.wasOpenHandshakeTimeout = False
# When we initiated a closing handshake, but the peer did not respond in
# time, this gets True
self.wasCloseHandshakeTimeout = False
# The close code I sent in close frame (if any)
self.localCloseCode = None
# The close reason I sent in close frame (if any)
self.localCloseReason = None
# The close code the peer sent me in close frame (if any)
self.remoteCloseCode = None
# The close reason the peer sent me in close frame (if any)
self.remoteCloseReason = None
# timers, which might get set up later, and remembered here to get canceled
# when appropriate
if not self.isServer:
self.serverConnectionDropTimeoutCall = None
self.openHandshakeTimeoutCall = None
self.closeHandshakeTimeoutCall = None
# set opening handshake timeout handler
if self.openHandshakeTimeout > 0:
self.openHandshakeTimeoutCall = reactor.callLater(self.openHandshakeTimeout, self.onOpenHandshakeTimeout)
def connectionLost(self, reason):
"""
This is called by Twisted framework when a TCP connection was lost.
Modes: Hybi, Hixie
"""
## cancel any server connection drop timer if present
##
if not self.isServer and self.serverConnectionDropTimeoutCall is not None:
if self.debugCodePaths:
log.msg("serverConnectionDropTimeoutCall.cancel")
self.serverConnectionDropTimeoutCall.cancel()
self.serverConnectionDropTimeoutCall = None
self.state = WebSocketProtocol.STATE_CLOSED
if not self.wasClean:
if not self.droppedByMe and self.wasNotCleanReason is None:
self.wasNotCleanReason = "peer dropped the TCP connection without previous WebSocket closing handshake"
self.onClose(self.wasClean, WebSocketProtocol.CLOSE_STATUS_CODE_ABNORMAL_CLOSE, "connection was closed uncleanly (%s)" % self.wasNotCleanReason)
else:
self.onClose(self.wasClean, self.remoteCloseCode, self.remoteCloseReason)
def logRxOctets(self, data):
"""
Hook fired right after raw octets have been received, but only when self.logOctets == True.
Modes: Hybi, Hixie
"""
log.msg("RX Octets from %s : octets = %s" % (self.peerstr, binascii.b2a_hex(data)))
def logTxOctets(self, data, sync):
"""
Hook fired right after raw octets have been sent, but only when self.logOctets == True.
Modes: Hybi, Hixie
"""
log.msg("TX Octets to %s : sync = %s, octets = %s" % (self.peerstr, sync, binascii.b2a_hex(data)))
def logRxFrame(self, frameHeader, payload):
"""
Hook fired right after WebSocket frame has been received and decoded, but only when self.logFrames == True.
Modes: Hybi
"""
data = ''.join(payload)
info = (self.peerstr,
frameHeader.fin,
frameHeader.rsv,
frameHeader.opcode,
binascii.b2a_hex(frameHeader.mask) if frameHeader.mask else "-",
frameHeader.length,
data if frameHeader.opcode == 1 else binascii.b2a_hex(data))
log.msg("RX Frame from %s : fin = %s, rsv = %s, opcode = %s, mask = %s, length = %s, payload = %s" % info)
def logTxFrame(self, frameHeader, payload, repeatLength, chopsize, sync):
"""
Hook fired right after WebSocket frame has been encoded and sent, but only when self.logFrames == True.
Modes: Hybi
"""
info = (self.peerstr,
frameHeader.fin,
frameHeader.rsv,
frameHeader.opcode,
binascii.b2a_hex(frameHeader.mask) if frameHeader.mask else "-",
frameHeader.length,
repeatLength,
chopsize,
sync,
payload if frameHeader.opcode == 1 else binascii.b2a_hex(payload))
log.msg("TX Frame to %s : fin = %s, rsv = %s, opcode = %s, mask = %s, length = %s, repeat_length = %s, chopsize = %s, sync = %s, payload = %s" % info)
def dataReceived(self, data):
"""
This is called by Twisted framework upon receiving data on TCP connection.
Modes: Hybi, Hixie
"""
if self.logOctets:
self.logRxOctets(data)
self.data += data
self.consumeData()
def consumeData(self):
"""
Consume buffered (incoming) data.
Modes: Hybi, Hixie
"""
## WebSocket is open (handshake was completed) or close was sent
##
if self.state == WebSocketProtocol.STATE_OPEN or self.state == WebSocketProtocol.STATE_CLOSING:
## process until no more buffered data left or WS was closed
##
while self.processData() and self.state != WebSocketProtocol.STATE_CLOSED:
pass
## WebSocket needs handshake
##
elif self.state == WebSocketProtocol.STATE_CONNECTING:
## the implementation of processHandshake() in derived
## class needs to perform client or server handshake
## from other party here ..
##
self.processHandshake()
## we failed the connection .. don't process any more data!
##
elif self.state == WebSocketProtocol.STATE_CLOSED:
## ignore any data received after WS was closed
##
if self.debugCodePaths:
log.msg("received data in STATE_CLOSED")
## should not arrive here (invalid state)
##
else:
raise Exception("invalid state")
def processHandshake(self):
"""
Process WebSockets handshake.
Modes: Hybi, Hixie
"""
raise Exception("must implement handshake (client or server) in derived class")
def registerProducer(self, producer, streaming):
"""
Register a Twisted producer with this protocol.
Modes: Hybi, Hixie
:param producer: A Twisted push or pull producer.
:type producer: object
:param streaming: Producer type.
:type streaming: bool
"""
self.transport.registerProducer(producer, streaming)
def _trigger(self):
"""
Trigger sending stuff from send queue (which is only used for chopped/synched writes).
Modes: Hybi, Hixie
"""
if not self.triggered:
self.triggered = True
self._send()
def _send(self):
"""
Send out stuff from send queue. For details how this works, see test/trickling
in the repo.
Modes: Hybi, Hixie
"""
if len(self.send_queue) > 0:
e = self.send_queue.popleft()
if self.state != WebSocketProtocol.STATE_CLOSED:
self.transport.write(e[0])
if self.logOctets:
self.logTxOctets(e[0], e[1])
else:
if self.debugCodePaths:
log.msg("skipped delayed write, since connection is closed")
# we need to reenter the reactor to make the latter
# reenter the OS network stack, so that octets
# can get on the wire. Note: this is a "heuristic",
# since there is no (easy) way to really force out
# octets from the OS network stack to wire.
reactor.callLater(WebSocketProtocol.QUEUED_WRITE_DELAY, self._send)
else:
self.triggered = False
def sendData(self, data, sync = False, chopsize = None):
"""
Wrapper for self.transport.write which allows to give a chopsize.
When asked to chop up writing to TCP stream, we write only chopsize octets
and then give up control to select() in underlying reactor so that bytes
get onto wire immediately. Note that this is different from and unrelated
to WebSockets data message fragmentation. Note that this is also different
from the TcpNoDelay option which can be set on the socket.
Modes: Hybi, Hixie
"""
if chopsize and chopsize > 0:
i = 0
n = len(data)
done = False
while not done:
j = i + chopsize
if j >= n:
done = True
j = n
self.send_queue.append((data[i:j], True))
i += chopsize
self._trigger()
else:
if sync or len(self.send_queue) > 0:
self.send_queue.append((data, sync))
self._trigger()
else:
self.transport.write(data)
if self.logOctets:
self.logTxOctets(data, False)
def sendPreparedMessage(self, preparedMsg):
"""
Send a message that was previously prepared with
WebSocketFactory.prepareMessage().
Modes: Hybi, Hixie
"""
if self.websocket_version == 0:
self.sendData(preparedMsg.payloadHixie)
else:
self.sendData(preparedMsg.payloadHybi)
def processData(self):
"""
After WebSockets handshake has been completed, this procedure will do all
subsequent processing of incoming bytes.
Modes: Hybi, Hixie
"""
if self.websocket_version == 0:
return self.processDataHixie76()
else:
return self.processDataHybi()
def processDataHixie76(self):
"""
Hixie-76 incoming data processing.
Modes: Hixie
"""
buffered_len = len(self.data)
## outside a message, that is we are awaiting data which starts a new message
##
if not self.inside_message:
if buffered_len >= 2:
## new message
##
if self.data[0] == '\x00':
self.inside_message = True
if self.utf8validateIncoming:
self.utf8validator.reset()
self.utf8validateIncomingCurrentMessage = True
self.utf8validateLast = (True, True, 0, 0)
else:
self.utf8validateIncomingCurrentMessage = False
self.data = self.data[1:]
self.onMessageBegin(1)
## Hixie close from peer received
##
elif self.data[0] == '\xff' and self.data[1] == '\x00':
self.onCloseFrame()
self.data = self.data[2:]
# stop receiving/processing after having received close!
return False
## malformed data
##
else:
if self.protocolViolation("malformed data received"):
return False
else:
## need more data
return False
end_index = self.data.find('\xff')
if end_index > 0:
payload = self.data[:end_index]
self.data = self.data[end_index + 1:]
else:
payload = self.data
self.data = ''
## incrementally validate UTF-8 payload
##
if self.utf8validateIncomingCurrentMessage:
self.utf8validateLast = self.utf8validator.validate(payload)
if not self.utf8validateLast[0]:
if self.invalidPayload("encountered invalid UTF-8 while processing text message at payload octet index %d" % self.utf8validateLast[3]):
return False
self.onMessageFrameData(payload)
if end_index > 0:
self.inside_message = False
self.onMessageEnd()
return len(self.data) > 0
def processDataHybi(self):
"""
RFC6455/Hybi-Drafts incoming data processing.
Modes: Hybi
"""
buffered_len = len(self.data)
## outside a frame, that is we are awaiting data which starts a new frame
##
if self.current_frame is None:
## need minimum of 2 octets to for new frame
##
if buffered_len >= 2:
## FIN, RSV, OPCODE
##
b = ord(self.data[0])
frame_fin = (b & 0x80) != 0
frame_rsv = (b & 0x70) >> 4
frame_opcode = b & 0x0f
## MASK, PAYLOAD LEN 1
##
b = ord(self.data[1])
frame_masked = (b & 0x80) != 0
frame_payload_len1 = b & 0x7f
## MUST be 0 when no extension defining
## the semantics of RSV has been negotiated
##
if frame_rsv != 0:
if self.protocolViolation("RSV != 0 and no extension negotiated"):
return False
## all client-to-server frames MUST be masked
##
if self.isServer and self.requireMaskedClientFrames and not frame_masked:
if self.protocolViolation("unmasked client-to-server frame"):
return False
## all server-to-client frames MUST NOT be masked
##
if not self.isServer and not self.acceptMaskedServerFrames and frame_masked:
if self.protocolViolation("masked server-to-client frame"):
return False
## check frame
##
if frame_opcode > 7: # control frame (have MSB in opcode set)
## control frames MUST NOT be fragmented
##
if not frame_fin:
if self.protocolViolation("fragmented control frame"):
return False
## control frames MUST have payload 125 octets or less
##
if frame_payload_len1 > 125:
if self.protocolViolation("control frame with payload length > 125 octets"):
return False
## check for reserved control frame opcodes
##
if frame_opcode not in [8, 9, 10]:
if self.protocolViolation("control frame using reserved opcode %d" % frame_opcode):
return False
## close frame : if there is a body, the first two bytes of the body MUST be a 2-byte
## unsigned integer (in network byte order) representing a status code
##
if frame_opcode == 8 and frame_payload_len1 == 1:
if self.protocolViolation("received close control frame with payload len 1"):
return False
else: # data frame
## check for reserved data frame opcodes
##
if frame_opcode not in [0, 1, 2]:
if self.protocolViolation("data frame using reserved opcode %d" % frame_opcode):
return False
## check opcode vs message fragmentation state 1/2
##
if not self.inside_message and frame_opcode == 0:
if self.protocolViolation("received continuation data frame outside fragmented message"):
return False
## check opcode vs message fragmentation state 2/2
##
if self.inside_message and frame_opcode != 0:
if self.protocolViolation("received non-continuation data frame while inside fragmented message"):
return False
## compute complete header length
##
if frame_masked:
mask_len = 4
else:
mask_len = 0
if frame_payload_len1 < 126:
frame_header_len = 2 + mask_len
elif frame_payload_len1 == 126:
frame_header_len = 2 + 2 + mask_len
elif frame_payload_len1 == 127:
frame_header_len = 2 + 8 + mask_len
else:
raise Exception("logic error")
## only proceed when we have enough data buffered for complete
## frame header (which includes extended payload len + mask)
##
if buffered_len >= frame_header_len:
## minimum frame header length (already consumed)
##
i = 2
## extract extended payload length
##
if frame_payload_len1 == 126:
frame_payload_len = struct.unpack("!H", self.data[i:i+2])[0]
if frame_payload_len < 126:
if self.protocolViolation("invalid data frame length (not using minimal length encoding)"):
return False
i += 2
elif frame_payload_len1 == 127:
frame_payload_len = struct.unpack("!Q", self.data[i:i+8])[0]
if frame_payload_len > 0x7FFFFFFFFFFFFFFF: # 2**63
if self.protocolViolation("invalid data frame length (>2^63)"):
return False
if frame_payload_len < 65536:
if self.protocolViolation("invalid data frame length (not using minimal length encoding)"):
return False
i += 8
else:
frame_payload_len = frame_payload_len1
## when payload is masked, extract frame mask
##
frame_mask = None
if frame_masked:
frame_mask = self.data[i:i+4]
i += 4
if frame_masked and frame_payload_len > 0 and self.applyMask:
if frame_payload_len < WebSocketProtocol.PAYLOAD_LEN_XOR_BREAKEVEN:
self.current_frame_masker = XorMaskerSimple(frame_mask)
else:
self.current_frame_masker = XorMaskerShifted1(frame_mask)
else:
self.current_frame_masker = XorMaskerNull()
## remember rest (payload of current frame after header and everything thereafter)
##
self.data = self.data[i:]
## ok, got complete frame header
##
self.current_frame = FrameHeader(frame_opcode,
frame_fin,
frame_rsv,
frame_payload_len,
frame_mask)
## process begin on new frame
##
self.onFrameBegin()
## reprocess when frame has no payload or and buffered data left
##
return frame_payload_len == 0 or len(self.data) > 0
else:
return False # need more data
else:
return False # need more data
## inside a started frame
##
else:
## cut out rest of frame payload
##
rest = self.current_frame.length - self.current_frame_masker.pointer()
if buffered_len >= rest:
data = self.data[:rest]
length = rest
self.data = self.data[rest:]
else:
data = self.data
length = buffered_len
self.data = ""
if length > 0:
## unmask payload
##
payload = self.current_frame_masker.process(data)
## process frame data
##
fr = self.onFrameData(payload)
if fr == False:
return False
## fire frame end handler when frame payload is complete
##
if self.current_frame_masker.pointer() == self.current_frame.length:
fr = self.onFrameEnd()
if fr == False:
return False
## reprocess when no error occurred and buffered data left
##
return len(self.data) > 0
def onFrameBegin(self):
"""
Begin of receive new frame.
Modes: Hybi
"""
if self.current_frame.opcode > 7:
self.control_frame_data = []
else:
## new message started
##
if not self.inside_message:
self.inside_message = True
if self.current_frame.opcode == WebSocketProtocol.MESSAGE_TYPE_TEXT and self.utf8validateIncoming:
self.utf8validator.reset()
self.utf8validateIncomingCurrentMessage = True
self.utf8validateLast = (True, True, 0, 0)
else:
self.utf8validateIncomingCurrentMessage = False
self.onMessageBegin(self.current_frame.opcode)
self.onMessageFrameBegin(self.current_frame.length, self.current_frame.rsv)
def onFrameData(self, payload):
"""
New data received within frame.
Modes: Hybi
"""
if self.current_frame.opcode > 7:
self.control_frame_data.append(payload)
else:
## incrementally validate UTF-8 payload
##
if self.utf8validateIncomingCurrentMessage:
self.utf8validateLast = self.utf8validator.validate(payload)
if not self.utf8validateLast[0]:
if self.invalidPayload("encountered invalid UTF-8 while processing text message at payload octet index %d" % self.utf8validateLast[3]):
return False
self.onMessageFrameData(payload)
def onFrameEnd(self):
"""
End of frame received.
Modes: Hybi
"""
if self.current_frame.opcode > 7:
if self.logFrames:
self.logRxFrame(self.current_frame, self.control_frame_data)
self.processControlFrame()
else:
if self.logFrames:
self.logRxFrame(self.current_frame, self.frame_data)
self.onMessageFrameEnd()
if self.current_frame.fin:
if self.utf8validateIncomingCurrentMessage:
if not self.utf8validateLast[1]:
if self.invalidPayload("UTF-8 text message payload ended within Unicode code point at payload octet index %d" % self.utf8validateLast[3]):
return False
self.onMessageEnd()
self.inside_message = False
self.current_frame = None
def processControlFrame(self):
"""
Process a completely received control frame.
Modes: Hybi
"""
payload = ''.join(self.control_frame_data)
self.control_frame_data = None
## CLOSE frame
##
if self.current_frame.opcode == 8:
code = None
reasonRaw = None
ll = len(payload)
if ll > 1:
code = struct.unpack("!H", payload[0:2])[0]
if ll > 2:
reasonRaw = payload[2:]
if self.onCloseFrame(code, reasonRaw):
return False
## PING frame
##
elif self.current_frame.opcode == 9:
self.onPing(payload)
## PONG frame
##
elif self.current_frame.opcode == 10:
self.onPong(payload)
else:
## we might arrive here, when protocolViolation
## wants us to continue anyway
pass
return True
def sendFrame(self, opcode, payload = "", fin = True, rsv = 0, mask = None, payload_len = None, chopsize = None, sync = False):
"""
Send out frame. Normally only used internally via sendMessage(), sendPing(), sendPong() and sendClose().
This method deliberately allows to send invalid frames (that is frames invalid
per-se, or frames invalid because of protocol state). Other than in fuzzing servers,
calling methods will ensure that no invalid frames are sent.
In addition, this method supports explicit specification of payload length.
When payload_len is given, it will always write that many octets to the stream.
It'll wrap within payload, resending parts of that when more octets were requested
The use case is again for fuzzing server which want to sent increasing amounts
of payload data to peers without having to construct potentially large messges
themselfes.
Modes: Hybi
"""
if self.websocket_version == 0:
raise Exception("function not supported in Hixie-76 mode")
if payload_len is not None:
if len(payload) < 1:
raise Exception("cannot construct repeated payload with length %d from payload of length %d" % (payload_len, len(payload)))
l = payload_len
pl = ''.join([payload for k in range(payload_len / len(payload))]) + payload[:payload_len % len(payload)]
else:
l = len(payload)
pl = payload
## first byte
##
b0 = 0
if fin:
b0 |= (1 << 7)
b0 |= (rsv % 8) << 4
b0 |= opcode % 128
## second byte, payload len bytes and mask
##
b1 = 0
if mask or (not self.isServer and self.maskClientFrames) or (self.isServer and self.maskServerFrames):
b1 |= 1 << 7
if not mask:
mask = struct.pack("!I", random.getrandbits(32))
mv = mask
else:
mv = ""
## mask frame payload
##
if l > 0 and self.applyMask:
if l < WebSocketProtocol.PAYLOAD_LEN_XOR_BREAKEVEN:
masker = XorMaskerSimple(mask)
else:
masker = XorMaskerShifted1(mask)
plm = masker.process(pl)
else:
plm = pl
else:
mv = ""
plm = pl
el = ""
if l <= 125:
b1 |= l
elif l <= 0xFFFF:
b1 |= 126
el = struct.pack("!H", l)
elif l <= 0x7FFFFFFFFFFFFFFF:
b1 |= 127
el = struct.pack("!Q", l)
else:
raise Exception("invalid payload length")
raw = ''.join([chr(b0), chr(b1), el, mv, plm])
if self.logFrames:
frameHeader = FrameHeader(opcode, fin, rsv, l, mask)
self.logTxFrame(frameHeader, payload, payload_len, chopsize, sync)
## send frame octets
##
self.sendData(raw, sync, chopsize)
def sendPing(self, payload = None):
"""
Send out Ping to peer. A peer is expected to Pong back the payload a soon
as "practical". When more than 1 Ping is outstanding at a peer, the peer may
elect to respond only to the last Ping.
Modes: Hybi
:param payload: An optional, arbitrary payload of length < 126 octets.
:type payload: str
"""
if self.websocket_version == 0:
raise Exception("function not supported in Hixie-76 mode")
if self.state != WebSocketProtocol.STATE_OPEN:
return
if payload:
l = len(payload)
if l > 125:
raise Exception("invalid payload for PING (payload length must be <= 125, was %d)" % l)
self.sendFrame(opcode = 9, payload = payload)
else:
self.sendFrame(opcode = 9)
def sendPong(self, payload = None):
"""
Send out Pong to peer. A Pong frame MAY be sent unsolicited.
This serves as a unidirectional heartbeat. A response to an unsolicited pong is "not expected".
Modes: Hybi
:param payload: An optional, arbitrary payload of length < 126 octets.
:type payload: str
"""
if self.websocket_version == 0:
raise Exception("function not supported in Hixie-76 mode")
if self.state != WebSocketProtocol.STATE_OPEN:
return
if payload:
l = len(payload)
if l > 125:
raise Exception("invalid payload for PONG (payload length must be <= 125, was %d)" % l)
self.sendFrame(opcode = 10, payload = payload)
else:
self.sendFrame(opcode = 10)
def sendCloseFrame(self, code = None, reasonUtf8 = None, isReply = False):
"""
Send a close frame and update protocol state. Note, that this is
an internal method which deliberately allows not send close
frame with invalid payload.
Modes: Hybi, Hixie
Notes:
- For Hixie mode, this method is slightly misnamed for historic reasons.
- For Hixie mode, code and reasonUtf8 will be silently ignored.
"""
if self.state == WebSocketProtocol.STATE_CLOSING:
if self.debugCodePaths:
log.msg("ignoring sendCloseFrame since connection is closing")
elif self.state == WebSocketProtocol.STATE_CLOSED:
if self.debugCodePaths:
log.msg("ignoring sendCloseFrame since connection already closed")
elif self.state == WebSocketProtocol.STATE_CONNECTING:
raise Exception("cannot close a connection not yet connected")
elif self.state == WebSocketProtocol.STATE_OPEN:
if self.websocket_version == 0:
self.sendData("\xff\x00")
else:
## construct Hybi close frame payload and send frame
payload = ""
if code is not None:
payload += struct.pack("!H", code)
if reasonUtf8 is not None:
payload += reasonUtf8
self.sendFrame(opcode = 8, payload = payload)
## update state
self.state = WebSocketProtocol.STATE_CLOSING
self.closedByMe = not isReply
## remember payload of close frame we sent
self.localCloseCode = code
self.localCloseReason = reasonUtf8
## drop connection when timeout on receiving close handshake reply
if self.closedByMe and self.closeHandshakeTimeout > 0:
self.closeHandshakeTimeoutCall = reactor.callLater(self.closeHandshakeTimeout, self.onCloseHandshakeTimeout)
else:
raise Exception("logic error")
def sendClose(self, code = None, reason = None):
"""
Starts a closing handshake.
Modes: Hybi, Hixie
Notes:
- For Hixie mode, code and reason will be silently ignored.
:param code: An optional close status code (:class:`WebSocketProtocol`.CLOSE_STATUS_CODE_NORMAL or 3000-4999).
:type code: int
:param reason: An optional close reason (a string that when present, a status code MUST also be present).
:type reason: str
"""
if code is not None:
if type(code) != int:
raise Exception("invalid type %s for close code" % type(code))
if code != 1000 and not (code >= 3000 and code <= 4999):
raise Exception("invalid close code %d" % code)
if reason is not None:
if code is None:
raise Exception("close reason without close code")
if type(reason) not in [str, unicode]:
raise Exception("invalid type %s for close reason" % type(reason))
reasonUtf8 = reason.encode("UTF-8")
if len(reasonUtf8) + 2 > 125:
raise Exception("close reason too long (%d)" % len(reasonUtf8))
else:
reasonUtf8 = None
self.sendCloseFrame(code = code, reasonUtf8 = reasonUtf8, isReply = False)
def beginMessage(self, opcode = MESSAGE_TYPE_TEXT):
"""
Begin sending new message.
Modes: Hybi, Hixie
:param opcode: Message type, normally either WebSocketProtocol.MESSAGE_TYPE_TEXT (default) or
WebSocketProtocol.MESSAGE_TYPE_BINARY (only Hybi mode).
"""
if self.state != WebSocketProtocol.STATE_OPEN:
return
## check if sending state is valid for this method
##
if self.send_state != WebSocketProtocol.SEND_STATE_GROUND:
raise Exception("WebSocketProtocol.beginMessage invalid in current sending state")
if self.websocket_version == 0:
if opcode != 1:
raise Exception("cannot send non-text message in Hixie mode")
self.sendData('\x00')
self.send_state = WebSocketProtocol.SEND_STATE_INSIDE_MESSAGE
else:
if opcode not in [1, 2]:
raise Exception("use of reserved opcode %d" % opcode)
## remember opcode for later (when sending first frame)
##
self.send_message_opcode = opcode
self.send_state = WebSocketProtocol.SEND_STATE_MESSAGE_BEGIN
def beginMessageFrame(self, length, reserved = 0, mask = None):
"""
Begin sending new message frame.
Modes: Hybi
:param length: Length of frame which is started. Must be >= 0 and <= 2^63.
:type length: int
:param reserved: Reserved bits for frame (an integer from 0 to 7). Note that reserved != 0 is only legal when an extension has been negoiated which defines semantics.
:type reserved: int
:param mask: Optional frame mask. When given, this is used. When None and the peer is a client, a mask will be internally generated. For servers None is default.
:type mask: str
"""
if self.websocket_version == 0:
raise Exception("function not supported in Hixie-76 mode")
if self.state != WebSocketProtocol.STATE_OPEN:
return
## check if sending state is valid for this method
##
if self.send_state not in [WebSocketProtocol.SEND_STATE_MESSAGE_BEGIN, WebSocketProtocol.SEND_STATE_INSIDE_MESSAGE]:
raise Exception("WebSocketProtocol.beginMessageFrame invalid in current sending state")
if (not type(length) in [int, long]) or length < 0 or length > 0x7FFFFFFFFFFFFFFF: # 2**63
raise Exception("invalid value for message frame length")
if type(reserved) is not int or reserved < 0 or reserved > 7:
raise Exception("invalid value for reserved bits")
self.send_message_frame_length = length
if mask:
## explicit mask given
##
assert type(mask) == str
assert len(mask) == 4
self.send_message_frame_mask = mask
elif (not self.isServer and self.maskClientFrames) or (self.isServer and self.maskServerFrames):
## automatic mask:
## - client-to-server masking (if not deactivated)
## - server-to-client masking (if activated)
##
self.send_message_frame_mask = struct.pack("!I", random.getrandbits(32))
else:
## no mask
##
self.send_message_frame_mask = None
## payload masker
##
if self.send_message_frame_mask and length > 0 and self.applyMask:
if length < WebSocketProtocol.PAYLOAD_LEN_XOR_BREAKEVEN:
self.send_message_frame_masker = XorMaskerSimple(self.send_message_frame_mask)
else:
self.send_message_frame_masker = XorMaskerShifted1(self.send_message_frame_mask)
else:
self.send_message_frame_masker = XorMaskerNull()
## first byte
##
b0 = (reserved % 8) << 4 # FIN = false .. since with streaming, we don't know when message ends
if self.send_state == WebSocketProtocol.SEND_STATE_MESSAGE_BEGIN:
self.send_state = WebSocketProtocol.SEND_STATE_INSIDE_MESSAGE
b0 |= self.send_message_opcode % 128
else:
pass # message continuation frame
## second byte, payload len bytes and mask
##
b1 = 0
if self.send_message_frame_mask:
b1 |= 1 << 7
mv = self.send_message_frame_mask
else:
mv = ""
el = ""
if length <= 125:
b1 |= length
elif length <= 0xFFFF:
b1 |= 126
el = struct.pack("!H", length)
elif length <= 0x7FFFFFFFFFFFFFFF:
b1 |= 127
el = struct.pack("!Q", length)
else:
raise Exception("invalid payload length")
## write message frame header
##
header = ''.join([chr(b0), chr(b1), el, mv])
self.sendData(header)
## now we are inside message frame ..
##
self.send_state = WebSocketProtocol.SEND_STATE_INSIDE_MESSAGE_FRAME
def sendMessageFrameData(self, payload, sync = False):
"""
Send out data when within message frame (message was begun, frame was begun).
Note that the frame is automatically ended when enough data has been sent
that is, there is no endMessageFrame, since you have begun the frame specifying
the frame length, which implicitly defined the frame end. This is different from
messages, which you begin and end, since a message can contain an unlimited number
of frames.
Modes: Hybi, Hixie
Notes:
- For Hixie mode, this method is slightly misnamed for historic reasons.
:param payload: Data to send.
:returns: int -- Hybi mode: when frame still incomplete, returns outstanding octets, when frame complete, returns <= 0, when < 0, the amount of unconsumed data in payload argument. Hixie mode: returns None.
"""
if self.state != WebSocketProtocol.STATE_OPEN:
return
if self.websocket_version == 0:
## Hixie Mode
##
if self.send_state != WebSocketProtocol.SEND_STATE_INSIDE_MESSAGE:
raise Exception("WebSocketProtocol.sendMessageFrameData invalid in current sending state")
self.sendData(payload, sync = sync)
return None
else:
## Hybi Mode
##
if self.send_state != WebSocketProtocol.SEND_STATE_INSIDE_MESSAGE_FRAME:
raise Exception("WebSocketProtocol.sendMessageFrameData invalid in current sending state")
rl = len(payload)
if self.send_message_frame_masker.pointer() + rl > self.send_message_frame_length:
l = self.send_message_frame_length - self.send_message_frame_masker.pointer()
rest = -(rl - l)
pl = payload[:l]
else:
l = rl
rest = self.send_message_frame_length - self.send_message_frame_masker.pointer() - l
pl = payload
## mask frame payload
##
plm = self.send_message_frame_masker.process(pl)
## send frame payload
##
self.sendData(plm, sync = sync)
## if we are done with frame, move back into "inside message" state
##
if self.send_message_frame_masker.pointer() >= self.send_message_frame_length:
self.send_state = WebSocketProtocol.SEND_STATE_INSIDE_MESSAGE
## when =0 : frame was completed exactly
## when >0 : frame is still uncomplete and that much amount is still left to complete the frame
## when <0 : frame was completed and there was this much unconsumed data in payload argument
##
return rest
def endMessage(self):
"""
End a previously begun message. No more frames may be sent (for that message). You have to
begin a new message before sending again.
Modes: Hybi, Hixie
"""
if self.state != WebSocketProtocol.STATE_OPEN:
return
## check if sending state is valid for this method
##
if self.send_state != WebSocketProtocol.SEND_STATE_INSIDE_MESSAGE:
raise Exception("WebSocketProtocol.endMessage invalid in current sending state [%d]" % self.send_state)
if self.websocket_version == 0:
self.sendData('\x00')
else:
self.sendFrame(opcode = 0, fin = True)
self.send_state = WebSocketProtocol.SEND_STATE_GROUND
def sendMessageFrame(self, payload, reserved = 0, mask = None, sync = False):
"""
When a message has begun, send a complete message frame in one go.
Modes: Hybi
"""
if self.websocket_version == 0:
raise Exception("function not supported in Hixie-76 mode")
if self.state != WebSocketProtocol.STATE_OPEN:
return
if self.websocket_version == 0:
raise Exception("function not supported in Hixie-76 mode")
self.beginMessageFrame(len(payload), reserved, mask)
self.sendMessageFrameData(payload, sync)
def sendMessage(self, payload, binary = False, payload_frag_size = None, sync = False):
"""
Send out a message in one go.
You can send text or binary message, and optionally specifiy a payload fragment size.
When the latter is given, the payload will be split up into frames with
payload <= the payload_frag_size given.
Modes: Hybi, Hixie
"""
if self.state != WebSocketProtocol.STATE_OPEN:
return
if self.websocket_version == 0:
if binary:
raise Exception("cannot send binary message in Hixie76 mode")
if payload_frag_size:
raise Exception("cannot fragment messages in Hixie76 mode")
self.sendMessageHixie76(payload, sync)
else:
self.sendMessageHybi(payload, binary, payload_frag_size, sync)
def sendMessageHixie76(self, payload, sync = False):
"""
Hixie76-Variant of sendMessage().
Modes: Hixie
"""
self.sendData('\x00' + payload + '\xff', sync = sync)
def sendMessageHybi(self, payload, binary = False, payload_frag_size = None, sync = False):
"""
Hybi-Variant of sendMessage().
Modes: Hybi
"""
## (initial) frame opcode
##
if binary:
opcode = 2
else:
opcode = 1
## explicit payload_frag_size arguments overrides autoFragmentSize setting
##
if payload_frag_size is not None:
pfs = payload_frag_size
else:
if self.autoFragmentSize > 0:
pfs = self.autoFragmentSize
else:
pfs = None
## send unfragmented
##
if pfs is None or len(payload) <= pfs:
self.sendFrame(opcode = opcode, payload = payload, sync = sync)
## send data message in fragments
##
else:
if pfs < 1:
raise Exception("payload fragment size must be at least 1 (was %d)" % pfs)
n = len(payload)
i = 0
done = False
first = True
while not done:
j = i + pfs
if j > n:
done = True
j = n
if first:
self.sendFrame(opcode = opcode, payload = payload[i:j], fin = done, sync = sync)
first = False
else:
self.sendFrame(opcode = 0, payload = payload[i:j], fin = done, sync = sync)
i += pfs
class PreparedMessage:
"""
Encapsulates a prepared message to be sent later once or multiple
times. This is used for optimizing Broadcast/PubSub.
The message serialization formats currently created internally are:
* Hybi
* Hixie
The construction of different formats is needed, since we support
mixed clients (speaking different protocol versions).
It will also be the place to add a 3rd format, when we support
the deflate extension, since then, the clients will be mixed
between Hybi-Deflate-Unsupported, Hybi-Deflate-Supported and Hixie.
"""
def __init__(self, payload, binary, masked):
self.initHixie(payload, binary)
self.initHybi(payload, binary, masked)
def initHixie(self, payload, binary):
if binary:
# silently filter out .. probably do something else:
# base64?
# dunno
self.payloadHixie = ''
else:
self.payloadHixie = '\x00' + payload + '\xff'
def initHybi(self, payload, binary, masked):
l = len(payload)
## first byte
##
b0 = ((1 << 7) | 2) if binary else ((1 << 7) | 1)
## second byte, payload len bytes and mask
##
if masked:
b1 = 1 << 7
mask = struct.pack("!I", random.getrandbits(32))
if l == 0:
plm = payload
elif l < WebSocketProtocol.PAYLOAD_LEN_XOR_BREAKEVEN:
plm = XorMaskerSimple(mask).process(payload)
else:
plm = XorMaskerShifted1(mask).process(payload)
else:
b1 = 0
mask = ""
plm = payload
## payload extended length
##
el = ""
if l <= 125:
b1 |= l
elif l <= 0xFFFF:
b1 |= 126
el = struct.pack("!H", l)
elif l <= 0x7FFFFFFFFFFFFFFF:
b1 |= 127
el = struct.pack("!Q", l)
else:
raise Exception("invalid payload length")
## raw WS message (single frame)
##
self.payloadHybi = ''.join([chr(b0), chr(b1), el, mask, plm])
class WebSocketFactory:
"""
Mixin for WebSocketClientFactory and WebSocketServerFactory.
"""
def prepareMessage(self, payload, binary = False, masked = None):
"""
Prepare a WebSocket message. This can be later used on multiple
instances of WebSocketProtocol using sendPreparedMessage().
By doing so, you can avoid the (small) overhead of framing the
_same_ payload into WS messages when that payload is to be sent
out on multiple connections.
Modes: Hybi, Hixie
Caveats:
1) Only use when you know what you are doing. I.e. calling
sendPreparedMessage() on the _same_ protocol instance multiples
times with the same prepared message might break the spec.
Since i.e. the frame mask will be the same!
2) Treat the object returned as opaque. It may change!
"""
if masked is None:
masked = not self.isServer
return PreparedMessage(payload, binary, masked)
class WebSocketServerProtocol(WebSocketProtocol):
"""
A Twisted protocol for WebSockets servers.
"""
def onConnect(self, connectionRequest):
"""
Callback fired during WebSocket opening handshake when new WebSocket client
connection is about to be established.
Throw HttpException when you don't want to accept the WebSocket
connection request. For example, throw a
HttpException(httpstatus.HTTP_STATUS_CODE_UNAUTHORIZED[0], "You are not authorized for this!").
When you want to accept the connection, return the accepted protocol
from list of WebSockets (sub)protocols provided by client or None to
speak no specific one or when the client list was empty.
:param connectionRequest: WebSocket connection request information.
:type connectionRequest: instance of :class:`autobahn.websocket.ConnectionRequest`
"""
return None
def connectionMade(self):
"""
Called by Twisted when new TCP connection from client was accepted. Default
implementation will prepare for initial WebSocket opening handshake.
When overriding in derived class, make sure to call this base class
implementation _before_ your code.
"""
self.isServer = True
WebSocketProtocol.connectionMade(self)
self.factory.countConnections += 1
if self.debug:
log.msg("connection accepted from peer %s" % self.peerstr)
def connectionLost(self, reason):
"""
Called by Twisted when established TCP connection from client was lost. Default
implementation will tear down all state properly.
When overriding in derived class, make sure to call this base class
implementation _after_ your code.
"""
WebSocketProtocol.connectionLost(self, reason)
self.factory.countConnections -= 1
if self.debug:
log.msg("connection from %s lost" % self.peerstr)
def parseHixie76Key(self, key):
return int(filter(lambda x: x.isdigit(), key)) / key.count(" ")
def processHandshake(self):
"""
Process WebSockets opening handshake request from client.
"""
## only proceed when we have fully received the HTTP request line and all headers
##
end_of_header = self.data.find("\x0d\x0a\x0d\x0a")
if end_of_header >= 0:
self.http_request_data = self.data[:end_of_header + 4]
if self.debug:
log.msg("received HTTP request:\n\n%s\n\n" % self.http_request_data)
## extract HTTP status line and headers
##
(self.http_status_line, self.http_headers, http_headers_cnt) = parseHttpHeader(self.http_request_data)
## validate WebSocket opening handshake client request
##
if self.debug:
log.msg("received HTTP status line in opening handshake : %s" % str(self.http_status_line))
log.msg("received HTTP headers in opening handshake : %s" % str(self.http_headers))
## HTTP Request line : METHOD, VERSION
##
rl = self.http_status_line.split()
if len(rl) != 3:
return self.failHandshake("Bad HTTP request status line '%s'" % self.http_status_line)
if rl[0].strip() != "GET":
return self.failHandshake("HTTP method '%s' not allowed" % rl[0], HTTP_STATUS_CODE_METHOD_NOT_ALLOWED[0])
vs = rl[2].strip().split("/")
if len(vs) != 2 or vs[0] != "HTTP" or vs[1] not in ["1.1"]:
return self.failHandshake("Unsupported HTTP version '%s'" % rl[2], HTTP_STATUS_CODE_UNSUPPORTED_HTTP_VERSION[0])
## HTTP Request line : REQUEST-URI
##
self.http_request_uri = rl[1].strip()
try:
(scheme, netloc, path, params, query, fragment) = urlparse.urlparse(self.http_request_uri)
## FIXME: check that if absolute resource URI is given,
## the scheme/netloc matches the server
if scheme != "" or netloc != "":
pass
## Fragment identifiers are meaningless in the context of WebSocket
## URIs, and MUST NOT be used on these URIs.
if fragment != "":
return self.failHandshake("HTTP requested resource contains a fragment identifier '%s'" % fragment)
## resource path and query parameters .. this will get forwarded
## to onConnect()
self.http_request_path = path
self.http_request_params = urlparse.parse_qs(query)
except:
return self.failHandshake("Bad HTTP request resource - could not parse '%s'" % rl[1].strip())
## Host
##
if not self.http_headers.has_key("host"):
return self.failHandshake("HTTP Host header missing in opening handshake request")
if http_headers_cnt["host"] > 1:
return self.failHandshake("HTTP Host header appears more than once in opening handshake request")
self.http_request_host = self.http_headers["host"].strip()
if self.http_request_host.find(":") >= 0:
(h, p) = self.http_request_host.split(":")
try:
port = int(str(p.strip()))
except:
return self.failHandshake("invalid port '%s' in HTTP Host header '%s'" % (str(p.strip()), str(self.http_request_host)))
if port != self.factory.port:
return self.failHandshake("port %d in HTTP Host header '%s' does not match server listening port %s" % (port, str(self.http_request_host), self.factory.port))
self.http_request_host = h
else:
if not ((self.factory.isSecure and self.factory.port == 443) or (not self.factory.isSecure and self.factory.port == 80)):
return self.failHandshake("missing port in HTTP Host header '%s' and server runs on non-standard port %d (wss = %s)" % (str(self.http_request_host), self.factory.port, self.factory.isSecure))
## Upgrade
##
if not self.http_headers.has_key("upgrade"):
## When no WS upgrade, render HTML server status page
##
if self.webStatus:
if self.http_request_params.has_key('redirect') and len(self.http_request_params['redirect']) > 0:
## To specifiy an URL for redirection, encode the URL, i.e. from JavaScript:
##
## var url = encodeURIComponent("http://autobahn.ws/python");
##
## and append the encoded string as a query parameter 'redirect'
##
## http://localhost:9000?redirect=http%3A%2F%2Fautobahn.ws%2Fpython
## https://localhost:9000?redirect=https%3A%2F%2Ftwitter.com%2F
##
## This will perform an immediate HTTP-303 redirection. If you provide
## an additional parameter 'after' (int >= 0), the redirection happens
## via Meta-Refresh in the rendered HTML status page, i.e.
##
## https://localhost:9000/?redirect=https%3A%2F%2Ftwitter.com%2F&after=3
##
url = self.http_request_params['redirect'][0]
if self.http_request_params.has_key('after') and len(self.http_request_params['after']) > 0:
after = int(self.http_request_params['after'][0])
if self.debugCodePaths:
log.msg("HTTP Upgrade header missing : render server status page and meta-refresh-redirecting to %s after %d seconds" % (url, after))
self.sendServerStatus(url, after)
else:
if self.debugCodePaths:
log.msg("HTTP Upgrade header missing : 303-redirecting to %s" % url)
self.sendRedirect(url)
else:
if self.debugCodePaths:
log.msg("HTTP Upgrade header missing : render server status page")
self.sendServerStatus()
self.dropConnection(abort = False)
return
else:
return self.failHandshake("HTTP Upgrade header missing", HTTP_STATUS_CODE_UPGRADE_REQUIRED[0])
upgradeWebSocket = False
for u in self.http_headers["upgrade"].split(","):
if u.strip().lower() == "websocket":
upgradeWebSocket = True
break
if not upgradeWebSocket:
return self.failHandshake("HTTP Upgrade headers do not include 'websocket' value (case-insensitive) : %s" % self.http_headers["upgrade"])
## Connection
##
if not self.http_headers.has_key("connection"):
return self.failHandshake("HTTP Connection header missing")
connectionUpgrade = False
for c in self.http_headers["connection"].split(","):
if c.strip().lower() == "upgrade":
connectionUpgrade = True
break
if not connectionUpgrade:
return self.failHandshake("HTTP Connection headers do not include 'upgrade' value (case-insensitive) : %s" % self.http_headers["connection"])
## Sec-WebSocket-Version PLUS determine mode: Hybi or Hixie
##
if not self.http_headers.has_key("sec-websocket-version"):
if self.debugCodePaths:
log.msg("Hixie76 protocol detected")
if self.allowHixie76:
version = 0
else:
return self.failHandshake("WebSocket connection denied - Hixie76 protocol mode disabled.")
else:
if self.debugCodePaths:
log.msg("Hybi protocol detected")
if http_headers_cnt["sec-websocket-version"] > 1:
return self.failHandshake("HTTP Sec-WebSocket-Version header appears more than once in opening handshake request")
try:
version = int(self.http_headers["sec-websocket-version"])
except:
return self.failHandshake("could not parse HTTP Sec-WebSocket-Version header '%s' in opening handshake request" % self.http_headers["sec-websocket-version"])
if version not in self.versions:
## respond with list of supported versions (descending order)
##
sv = sorted(self.versions)
sv.reverse()
svs = ','.join([str(x) for x in sv])
return self.failHandshake("WebSocket version %d not supported (supported versions: %s)" % (version, svs),
HTTP_STATUS_CODE_BAD_REQUEST[0],
[("Sec-WebSocket-Version", svs)])
else:
## store the protocol version we are supposed to talk
self.websocket_version = version
## Sec-WebSocket-Protocol
##
if self.http_headers.has_key("sec-websocket-protocol"):
protocols = [str(x.strip()) for x in self.http_headers["sec-websocket-protocol"].split(",")]
# check for duplicates in protocol header
pp = {}
for p in protocols:
if pp.has_key(p):
return self.failHandshake("duplicate protocol '%s' specified in HTTP Sec-WebSocket-Protocol header" % p)
else:
pp[p] = 1
# ok, no duplicates, save list in order the client sent it
self.websocket_protocols = protocols
else:
self.websocket_protocols = []
## Origin / Sec-WebSocket-Origin
## http://tools.ietf.org/html/draft-ietf-websec-origin-02
##
if self.websocket_version < 13 and self.websocket_version != 0:
# Hybi, but only < Hybi-13
websocket_origin_header_key = 'sec-websocket-origin'
else:
# RFC6455, >= Hybi-13 and Hixie
websocket_origin_header_key = "origin"
self.websocket_origin = None
if self.http_headers.has_key(websocket_origin_header_key):
if http_headers_cnt[websocket_origin_header_key] > 1:
return self.failHandshake("HTTP Origin header appears more than once in opening handshake request")
self.websocket_origin = self.http_headers[websocket_origin_header_key].strip()
else:
# non-browser clients are allowed to omit this header
pass
## Sec-WebSocket-Extensions
##
## extensions requested by client
self.websocket_extensions = []
## extensions selected by server
self.websocket_extensions_in_use = []
if self.http_headers.has_key("sec-websocket-extensions"):
if self.websocket_version == 0:
return self.failHandshake("Sec-WebSocket-Extensions header specified for Hixie-76")
extensions = [x.strip() for x in self.http_headers["sec-websocket-extensions"].split(',')]
if len(extensions) > 0:
self.websocket_extensions = extensions
if self.debug:
log.msg("client requested extensions we don't support (%s)" % str(extensions))
## Sec-WebSocket-Key (Hybi) or Sec-WebSocket-Key1/Sec-WebSocket-Key2 (Hixie-76)
##
if self.websocket_version == 0:
for kk in ['Sec-WebSocket-Key1', 'Sec-WebSocket-Key2']:
k = kk.lower()
if not self.http_headers.has_key(k):
return self.failHandshake("HTTP %s header missing" % kk)
if http_headers_cnt[k] > 1:
return self.failHandshake("HTTP %s header appears more than once in opening handshake request" % kk)
try:
key1 = self.parseHixie76Key(self.http_headers["sec-websocket-key1"].strip())
key2 = self.parseHixie76Key(self.http_headers["sec-websocket-key2"].strip())
except:
return self.failHandshake("could not parse Sec-WebSocket-Key1/2")
else:
if not self.http_headers.has_key("sec-websocket-key"):
return self.failHandshake("HTTP Sec-WebSocket-Key header missing")
if http_headers_cnt["sec-websocket-key"] > 1:
return self.failHandshake("HTTP Sec-WebSocket-Key header appears more than once in opening handshake request")
key = self.http_headers["sec-websocket-key"].strip()
if len(key) != 24: # 16 bytes => (ceil(128/24)*24)/6 == 24
return self.failHandshake("bad Sec-WebSocket-Key (length must be 24 ASCII chars) '%s'" % key)
if key[-2:] != "==": # 24 - ceil(128/6) == 2
return self.failHandshake("bad Sec-WebSocket-Key (invalid base64 encoding) '%s'" % key)
for c in key[:-2]:
if c not in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789+/":
return self.failHandshake("bad character '%s' in Sec-WebSocket-Key (invalid base64 encoding) '%s'" (c, key))
## For Hixie-76, we need 8 octets of HTTP request body to complete HS!
##
if self.websocket_version == 0:
if len(self.data) < end_of_header + 4 + 8:
return
else:
key3 = self.data[end_of_header + 4:end_of_header + 4 + 8]
## Ok, got complete HS input, remember rest (if any)
##
if self.websocket_version == 0:
self.data = self.data[end_of_header + 4 + 8:]
else:
self.data = self.data[end_of_header + 4:]
## WebSocket handshake validated => produce opening handshake response
## Now fire onConnect() on derived class, to give that class a chance to accept or deny
## the connection. onConnect() may throw, in which case the connection is denied, or it
## may return a protocol from the protocols provided by client or None.
##
try:
connectionRequest = ConnectionRequest(self.peer,
self.peerstr,
self.http_headers,
self.http_request_host,
self.http_request_path,
self.http_request_params,
self.websocket_version,
self.websocket_origin,
self.websocket_protocols,
self.websocket_extensions)
## onConnect() will return the selected subprotocol or None
## or raise an HttpException
##
protocol = self.onConnect(connectionRequest)
if protocol is not None and not (protocol in self.websocket_protocols):
raise Exception("protocol accepted must be from the list client sent or None")
self.websocket_protocol_in_use = protocol
except HttpException, e:
return self.failHandshake(e.reason, e.code)
#return self.sendHttpRequestFailure(e.code, e.reason)
except Exception, e:
log.msg("Exception raised in onConnect() - %s" % str(e))
return self.failHandshake("Internal Server Error", HTTP_STATUS_CODE_INTERNAL_SERVER_ERROR[0])
## build response to complete WebSocket handshake
##
response = "HTTP/1.1 %d Switching Protocols\x0d\x0a" % HTTP_STATUS_CODE_SWITCHING_PROTOCOLS[0]
if self.factory.server is not None and self.factory.server != "":
response += "Server: %s\x0d\x0a" % self.factory.server.encode("utf-8")
response += "Upgrade: WebSocket\x0d\x0a"
response += "Connection: Upgrade\x0d\x0a"
if self.websocket_protocol_in_use is not None:
response += "Sec-WebSocket-Protocol: %s\x0d\x0a" % str(self.websocket_protocol_in_use)
if self.websocket_version == 0:
if self.websocket_origin:
## browser client provide the header, and expect it to be echo'ed
response += "Sec-WebSocket-Origin: %s\x0d\x0a" % str(self.websocket_origin)
if self.debugCodePaths:
log.msg('factory isSecure = %s port = %s' % (self.factory.isSecure, self.factory.port))
if (self.factory.isSecure and self.factory.port != 443) or ((not self.factory.isSecure) and self.factory.port != 80):
if self.debugCodePaths:
log.msg('factory running on non-default port')
response_port = ':' + str(self.factory.port)
else:
if self.debugCodePaths:
log.msg('factory running on default port')
response_port = ''
## FIXME: check this! But see below ..
if False:
response_host = str(self.factory.host)
response_path = str(self.factory.path)
else:
response_host = str(self.http_request_host)
response_path = str(self.http_request_uri)
location = "%s://%s%s%s" % ('wss' if self.factory.isSecure else 'ws', response_host, response_port, response_path)
# Safari is very picky about this one
response += "Sec-WebSocket-Location: %s\x0d\x0a" % location
## end of HTTP response headers
response += "\x0d\x0a"
## compute accept body
##
accept_val = struct.pack(">II", key1, key2) + key3
accept = hashlib.md5(accept_val).digest()
response_body = str(accept)
else:
## compute Sec-WebSocket-Accept
##
sha1 = hashlib.sha1()
sha1.update(key + WebSocketProtocol.WS_MAGIC)
sec_websocket_accept = base64.b64encode(sha1.digest())
response += "Sec-WebSocket-Accept: %s\x0d\x0a" % sec_websocket_accept
if len(self.websocket_extensions_in_use) > 0:
response += "Sec-WebSocket-Extensions: %s\x0d\x0a" % ','.join(self.websocket_extensions_in_use)
## end of HTTP response headers
response += "\x0d\x0a"
response_body = ''
if self.debug:
log.msg("sending HTTP response:\n\n%s%s\n\n" % (response, binascii.b2a_hex(response_body)))
## save and send out opening HS data
##
self.http_response_data = response + response_body
self.sendData(self.http_response_data)
## opening handshake completed, move WebSockets connection into OPEN state
##
self.state = WebSocketProtocol.STATE_OPEN
## cancel any opening HS timer if present
##
if self.openHandshakeTimeoutCall is not None:
if self.debugCodePaths:
log.msg("openHandshakeTimeoutCall.cancel")
self.openHandshakeTimeoutCall.cancel()
self.openHandshakeTimeoutCall = None
## init state
##
self.inside_message = False
if self.websocket_version != 0:
self.current_frame = None
## fire handler on derived class
##
self.onOpen()
## process rest, if any
##
if len(self.data) > 0:
self.consumeData()
def failHandshake(self, reason, code = HTTP_STATUS_CODE_BAD_REQUEST[0], responseHeaders = []):
"""
During opening handshake the client request was invalid, we send a HTTP
error response and then drop the connection.
"""
if self.debug:
log.msg("failing WebSockets opening handshake ('%s')" % reason)
self.sendHttpErrorResponse(code, reason, responseHeaders)
self.dropConnection(abort = False)
def sendHttpErrorResponse(self, code, reason, responseHeaders = []):
"""
Send out HTTP error response.
"""
response = "HTTP/1.1 %d %s\x0d\x0a" % (code, reason.encode("utf-8"))
for h in responseHeaders:
response += "%s: %s\x0d\x0a" % (h[0], h[1].encode("utf-8"))
response += "\x0d\x0a"
self.sendData(response)
def sendHtml(self, html):
"""
Send HTML page HTTP response.
"""
raw = html.encode("utf-8")
response = "HTTP/1.1 %d %s\x0d\x0a" % (HTTP_STATUS_CODE_OK[0], HTTP_STATUS_CODE_OK[1])
if self.factory.server is not None and self.factory.server != "":
response += "Server: %s\x0d\x0a" % self.factory.server.encode("utf-8")
response += "Content-Type: text/html; charset=UTF-8\x0d\x0a"
response += "Content-Length: %d\x0d\x0a" % len(raw)
response += "\x0d\x0a"
response += raw
self.sendData(response)
def sendRedirect(self, url):
"""
Send HTTP Redirect (303) response.
"""
response = "HTTP/1.1 %d\x0d\x0a" % HTTP_STATUS_CODE_SEE_OTHER[0]
#if self.factory.server is not None and self.factory.server != "":
# response += "Server: %s\x0d\x0a" % self.factory.server.encode("utf-8")
response += "Location: %s\x0d\x0a" % url.encode("utf-8")
response += "\x0d\x0a"
self.sendData(response)
def sendServerStatus(self, redirectUrl = None, redirectAfter = 0):
"""
Used to send out server status/version upon receiving a HTTP/GET without
upgrade to WebSocket header (and option serverStatus is True).
"""
if redirectUrl:
redirect = """<meta http-equiv="refresh" content="%d;URL='%s'">""" % (redirectAfter, redirectUrl)
else:
redirect = ""
html = """
<!DOCTYPE html>
<html>
<head>
%s
<style>
body {
color: #fff;
background-color: #027eae;
font-family: "Segoe UI", "Lucida Grande", "Helvetica Neue", Helvetica, Arial, sans-serif;
font-size: 16px;
}
a, a:visited, a:hover {
color: #fff;
}
</style>
</head>
<body>
<h1>AutobahnPython %s</h1>
<p>
I am not Web server, but a WebSocket endpoint.
You can talk to me using the WebSocket <a href="http://tools.ietf.org/html/rfc6455">protocol</a>.
</p>
<p>
For more information, please visit <a href="http://autobahn.ws/python">my homepage</a>.
</p>
</body>
</html>
""" % (redirect, autobahn.version)
self.sendHtml(html)
class WebSocketServerFactory(protocol.ServerFactory, WebSocketFactory):
"""
A Twisted factory for WebSockets server protocols.
"""
protocol = WebSocketServerProtocol
"""
The protocol to be spoken. Must be derived from :class:`autobahn.websocket.WebSocketServerProtocol`.
"""
def __init__(self,
## WebSockect session parameters
url = None,
protocols = [],
server = "AutobahnPython/%s" % autobahn.version,
## debugging
debug = False,
debugCodePaths = False):
"""
Create instance of WebSocket server factory.
Note that you MUST set URL either here or using setSessionParameters() _before_ the factory is started.
:param url: WebSocket listening URL - ("ws:" | "wss:") "//" host [ ":" port ] path [ "?" query ].
:type url: str
:param protocols: List of subprotocols the server supports. The subprotocol used is the first from the list of subprotocols announced by the client that is contained in this list.
:type protocols: list of strings
:param server: Server as announced in HTTP response header during opening handshake or None (default: "AutobahnWebSockets/x.x.x").
:type server: str
:param debug: Debug mode (default: False).
:type debug: bool
:param debugCodePaths: Debug code paths mode (default: False).
:type debugCodePaths: bool
"""
self.debug = debug
self.debugCodePaths = debugCodePaths
self.logOctets = debug
self.logFrames = debug
self.isServer = True
## seed RNG which is used for WS frame masks generation
random.seed()
## default WS session parameters
##
self.setSessionParameters(url, protocols, server)
## default WebSocket protocol options
##
self.resetProtocolOptions()
## number of currently connected clients
##
self.countConnections = 0
def setSessionParameters(self, url = None, protocols = [], server = None):
"""
Set WebSocket session parameters.
:param url: WebSocket listening URL - ("ws:" | "wss:") "//" host [ ":" port ].
:type url: str
:param protocols: List of subprotocols the server supports. The subprotocol used is the first from the list of subprotocols announced by the client that is contained in this list.
:type protocols: list of strings
:param server: Server as announced in HTTP response header during opening handshake.
:type server: str
"""
if url is not None:
## parse WebSocket URI into components
(isSecure, host, port, resource, path, params) = parseWsUrl(url)
if path != "/":
raise Exception("path specified for server WebSocket URL")
if len(params) > 0:
raise Exception("query parameters specified for server WebSocket URL")
self.url = url
self.isSecure = isSecure
self.host = host
self.port = port
else:
self.url = None
self.isSecure = None
self.host = None
self.port = None
self.protocols = protocols
self.server = server
def resetProtocolOptions(self):
"""
Reset all WebSocket protocol options to defaults.
"""
self.versions = WebSocketProtocol.SUPPORTED_PROTOCOL_VERSIONS
self.allowHixie76 = WebSocketProtocol.DEFAULT_ALLOW_HIXIE76
self.webStatus = True
self.utf8validateIncoming = True
self.requireMaskedClientFrames = True
self.maskServerFrames = False
self.applyMask = True
self.maxFramePayloadSize = 0
self.maxMessagePayloadSize = 0
self.autoFragmentSize = 0
self.failByDrop = True
self.echoCloseCodeReason = False
self.openHandshakeTimeout = 5
self.closeHandshakeTimeout = 1
self.tcpNoDelay = True
def setProtocolOptions(self,
versions = None,
allowHixie76 = None,
webStatus = None,
utf8validateIncoming = None,
maskServerFrames = None,
requireMaskedClientFrames = None,
applyMask = None,
maxFramePayloadSize = None,
maxMessagePayloadSize = None,
autoFragmentSize = None,
failByDrop = None,
echoCloseCodeReason = None,
openHandshakeTimeout = None,
closeHandshakeTimeout = None,
tcpNoDelay = None):
"""
Set WebSocket protocol options used as defaults for new protocol instances.
:param versions: The WebSockets protocol versions accepted by the server (default: WebSocketProtocol.SUPPORTED_PROTOCOL_VERSIONS).
:type versions: list of ints
:param allowHixie76: Allow to speak Hixie76 protocol version.
:type allowHixie76: bool
:param webStatus: Return server status/version on HTTP/GET without WebSocket upgrade header (default: True).
:type webStatus: bool
:param utf8validateIncoming: Validate incoming UTF-8 in text message payloads (default: True).
:type utf8validateIncoming: bool
:param maskServerFrames: Mask server-to-client frames (default: False).
:type maskServerFrames: bool
:param requireMaskedClientFrames: Require client-to-server frames to be masked (default: True).
:type requireMaskedClientFrames: bool
:param applyMask: Actually apply mask to payload when mask it present. Applies for outgoing and incoming frames (default: True).
:type applyMask: bool
:param maxFramePayloadSize: Maximum frame payload size that will be accepted when receiving or 0 for unlimited (default: 0).
:type maxFramePayloadSize: int
:param maxMessagePayloadSize: Maximum message payload size (after reassembly of fragmented messages) that will be accepted when receiving or 0 for unlimited (default: 0).
:type maxMessagePayloadSize: int
:param autoFragmentSize: Automatic fragmentation of outgoing data messages (when using the message-based API) into frames with payload length <= this size or 0 for no auto-fragmentation (default: 0).
:type autoFragmentSize: int
:param failByDrop: Fail connections by dropping the TCP connection without performaing closing handshake (default: True).
:type failbyDrop: bool
:param echoCloseCodeReason: Iff true, when receiving a close, echo back close code/reason. Otherwise reply with code == NORMAL, reason = "" (default: False).
:type echoCloseCodeReason: bool
:param openHandshakeTimeout: Opening WebSocket handshake timeout, timeout in seconds or 0 to deactivate (default: 0).
:type openHandshakeTimeout: float
:param closeHandshakeTimeout: When we expect to receive a closing handshake reply, timeout in seconds (default: 1).
:type closeHandshakeTimeout: float
:param tcpNoDelay: TCP NODELAY ("Nagle") socket option (default: True).
:type tcpNoDelay: bool
"""
if allowHixie76 is not None and allowHixie76 != self.allowHixie76:
self.allowHixie76 = allowHixie76
if versions is not None:
for v in versions:
if v not in WebSocketProtocol.SUPPORTED_PROTOCOL_VERSIONS:
raise Exception("invalid WebSockets protocol version %s (allowed values: %s)" % (v, str(WebSocketProtocol.SUPPORTED_PROTOCOL_VERSIONS)))
if v == 0 and not self.allowHixie76:
raise Exception("use of Hixie-76 requires allowHixie76 == True")
if set(versions) != set(self.versions):
self.versions = versions
if webStatus is not None and webStatus != self.webStatus:
self.webStatus = webStatus
if utf8validateIncoming is not None and utf8validateIncoming != self.utf8validateIncoming:
self.utf8validateIncoming = utf8validateIncoming
if requireMaskedClientFrames is not None and requireMaskedClientFrames != self.requireMaskedClientFrames:
self.requireMaskedClientFrames = requireMaskedClientFrames
if maskServerFrames is not None and maskServerFrames != self.maskServerFrames:
self.maskServerFrames = maskServerFrames
if applyMask is not None and applyMask != self.applyMask:
self.applyMask = applyMask
if maxFramePayloadSize is not None and maxFramePayloadSize != self.maxFramePayloadSize:
self.maxFramePayloadSize = maxFramePayloadSize
if maxMessagePayloadSize is not None and maxMessagePayloadSize != self.maxMessagePayloadSize:
self.maxMessagePayloadSize = maxMessagePayloadSize
if autoFragmentSize is not None and autoFragmentSize != self.autoFragmentSize:
self.autoFragmentSize = autoFragmentSize
if failByDrop is not None and failByDrop != self.failByDrop:
self.failByDrop = failByDrop
if echoCloseCodeReason is not None and echoCloseCodeReason != self.echoCloseCodeReason:
self.echoCloseCodeReason = echoCloseCodeReason
if openHandshakeTimeout is not None and openHandshakeTimeout != self.openHandshakeTimeout:
self.openHandshakeTimeout = openHandshakeTimeout
if closeHandshakeTimeout is not None and closeHandshakeTimeout != self.closeHandshakeTimeout:
self.closeHandshakeTimeout = closeHandshakeTimeout
if tcpNoDelay is not None and tcpNoDelay != self.tcpNoDelay:
self.tcpNoDelay = tcpNoDelay
def getConnectionCount(self):
"""
Get number of currently connected clients.
:returns: int -- Number of currently connected clients.
"""
return self.countConnections
def startFactory(self):
"""
Called by Twisted before starting to listen on port for incoming connections.
Default implementation does nothing. Override in derived class when appropriate.
"""
pass
def stopFactory(self):
"""
Called by Twisted before stopping to listen on port for incoming connections.
Default implementation does nothing. Override in derived class when appropriate.
"""
pass
class WebSocketClientProtocol(WebSocketProtocol):
"""
Client protocol for WebSockets.
"""
def onConnect(self, connectionResponse):
"""
Callback fired directly after WebSocket opening handshake when new WebSocket server
connection was established.
:param connectionResponse: WebSocket connection response information.
:type connectionResponse: instance of :class:`autobahn.websocket.ConnectionResponse`
"""
pass
def connectionMade(self):
"""
Called by Twisted when new TCP connection to server was established. Default
implementation will start the initial WebSocket opening handshake.
When overriding in derived class, make sure to call this base class
implementation _before_ your code.
"""
self.isServer = False
WebSocketProtocol.connectionMade(self)
if self.debug:
log.msg("connection to %s established" % self.peerstr)
self.startHandshake()
def connectionLost(self, reason):
"""
Called by Twisted when established TCP connection to server was lost. Default
implementation will tear down all state properly.
When overriding in derived class, make sure to call this base class
implementation _after_ your code.
"""
WebSocketProtocol.connectionLost(self, reason)
if self.debug:
log.msg("connection to %s lost" % self.peerstr)
def createHixieKey(self):
"""
Supposed to implement the crack smoker algorithm below. Well, crack
probably wasn't the stuff they smoked - dog poo?
http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-76#page-21
Items 16 - 22
"""
spaces1 = random.randint(1, 12)
max1 = int(4294967295L / spaces1)
number1 = random.randint(0, max1)
product1 = number1 * spaces1
key1 = str(product1)
rchars = filter(lambda x: (x >= 0x21 and x <= 0x2f) or (x >= 0x3a and x <= 0x7e), range(0,127))
for i in xrange(random.randint(1, 12)):
p = random.randint(0, len(key1) - 1)
key1 = key1[:p] + chr(random.choice(rchars)) + key1[p:]
for i in xrange(spaces1):
p = random.randint(1, len(key1) - 2)
key1 = key1[:p] + ' ' + key1[p:]
return (key1, number1)
def startHandshake(self):
"""
Start WebSockets opening handshake.
"""
## construct WS opening handshake HTTP header
##
request = "GET %s HTTP/1.1\x0d\x0a" % self.factory.resource.encode("utf-8")
if self.factory.useragent is not None and self.factory.useragent != "":
request += "User-Agent: %s\x0d\x0a" % self.factory.useragent.encode("utf-8")
request += "Host: %s:%d\x0d\x0a" % (self.factory.host.encode("utf-8"), self.factory.port)
request += "Upgrade: WebSocket\x0d\x0a"
request += "Connection: Upgrade\x0d\x0a"
## handshake random key
##
if self.version == 0:
(self.websocket_key1, number1) = self.createHixieKey()
(self.websocket_key2, number2) = self.createHixieKey()
self.websocket_key3 = os.urandom(8)
accept_val = struct.pack(">II", number1, number2) + self.websocket_key3
self.websocket_expected_challenge_response = hashlib.md5(accept_val).digest()
request += "Sec-WebSocket-Key1: %s\x0d\x0a" % self.websocket_key1
request += "Sec-WebSocket-Key2: %s\x0d\x0a" % self.websocket_key2
else:
self.websocket_key = base64.b64encode(os.urandom(16))
request += "Sec-WebSocket-Key: %s\x0d\x0a" % self.websocket_key
## optional origin announced
##
if self.factory.origin:
if self.version > 10 or self.version == 0:
request += "Origin: %d\x0d\x0a" % self.factory.origin.encode("utf-8")
else:
request += "Sec-WebSocket-Origin: %d\x0d\x0a" % self.factory.origin.encode("utf-8")
## optional list of WS subprotocols announced
##
if len(self.factory.protocols) > 0:
request += "Sec-WebSocket-Protocol: %s\x0d\x0a" % ','.join(self.factory.protocols)
## set WS protocol version depending on WS spec version
##
if self.version != 0:
request += "Sec-WebSocket-Version: %d\x0d\x0a" % WebSocketProtocol.SPEC_TO_PROTOCOL_VERSION[self.version]
request += "\x0d\x0a"
if self.version == 0:
request += self.websocket_key3
self.http_request_data = request
if self.debug:
log.msg(self.http_request_data)
self.sendData(self.http_request_data)
def processHandshake(self):
"""
Process WebSockets opening handshake response from server.
"""
## only proceed when we have fully received the HTTP request line and all headers
##
end_of_header = self.data.find("\x0d\x0a\x0d\x0a")
if end_of_header >= 0:
self.http_response_data = self.data[:end_of_header + 4]
if self.debug:
log.msg("received HTTP response:\n\n%s\n\n" % self.http_response_data)
## extract HTTP status line and headers
##
(self.http_status_line, self.http_headers, http_headers_cnt) = parseHttpHeader(self.http_response_data)
## validate WebSocket opening handshake server response
##
if self.debug:
log.msg("received HTTP status line in opening handshake : %s" % str(self.http_status_line))
log.msg("received HTTP headers in opening handshake : %s" % str(self.http_headers))
## Response Line
##
sl = self.http_status_line.split()
if len(sl) < 2:
return self.failHandshake("Bad HTTP response status line '%s'" % self.http_status_line)
## HTTP version
##
http_version = sl[0].strip()
if http_version != "HTTP/1.1":
return self.failHandshake("Unsupported HTTP version ('%s')" % http_version)
## HTTP status code
##
try:
status_code = int(sl[1].strip())
except:
return self.failHandshake("Bad HTTP status code ('%s')" % sl[1].strip())
if status_code != HTTP_STATUS_CODE_SWITCHING_PROTOCOLS[0]:
## FIXME: handle redirects
## FIXME: handle authentication required
if len(sl) > 2:
reason = " - %s" % sl[2].strip()
else:
reason = ""
return self.failHandshake("WebSockets connection upgrade failed (%d%s)" % (status_code, reason))
## Upgrade
##
if not self.http_headers.has_key("upgrade"):
return self.failHandshake("HTTP Upgrade header missing")
if self.http_headers["upgrade"].strip().lower() != "websocket":
return self.failHandshake("HTTP Upgrade header different from 'websocket' (case-insensitive) : %s" % self.http_headers["upgrade"])
## Connection
##
if not self.http_headers.has_key("connection"):
return self.failHandshake("HTTP Connection header missing")
connectionUpgrade = False
for c in self.http_headers["connection"].split(","):
if c.strip().lower() == "upgrade":
connectionUpgrade = True
break
if not connectionUpgrade:
return self.failHandshake("HTTP Connection header does not include 'upgrade' value (case-insensitive) : %s" % self.http_headers["connection"])
## compute Sec-WebSocket-Accept
##
if self.version != 0:
if not self.http_headers.has_key("sec-websocket-accept"):
return self.failHandshake("HTTP Sec-WebSocket-Accept header missing in opening handshake reply")
else:
if http_headers_cnt["sec-websocket-accept"] > 1:
return self.failHandshake("HTTP Sec-WebSocket-Accept header appears more than once in opening handshake reply")
sec_websocket_accept_got = self.http_headers["sec-websocket-accept"].strip()
sha1 = hashlib.sha1()
sha1.update(self.websocket_key + WebSocketProtocol.WS_MAGIC)
sec_websocket_accept = base64.b64encode(sha1.digest())
if sec_websocket_accept_got != sec_websocket_accept:
return self.failHandshake("HTTP Sec-WebSocket-Accept bogus value : expected %s / got %s" % (sec_websocket_accept, sec_websocket_accept_got))
## handle "extensions in use" - if any
##
self.websocket_extensions_in_use = []
if self.version != 0:
if self.http_headers.has_key("sec-websocket-extensions"):
if http_headers_cnt["sec-websocket-extensions"] > 1:
return self.failHandshake("HTTP Sec-WebSocket-Extensions header appears more than once in opening handshake reply")
exts = self.http_headers["sec-websocket-extensions"].strip()
##
## we don't support any extension, but if we did, we needed
## to set self.websocket_extensions_in_use here, and don't fail the handshake
##
return self.failHandshake("server wants to use extensions (%s), but no extensions implemented" % exts)
## handle "subprotocol in use" - if any
##
self.websocket_protocol_in_use = None
if self.http_headers.has_key("sec-websocket-protocol"):
if http_headers_cnt["sec-websocket-protocol"] > 1:
return self.failHandshake("HTTP Sec-WebSocket-Protocol header appears more than once in opening handshake reply")
sp = str(self.http_headers["sec-websocket-protocol"].strip())
if sp != "":
if sp not in self.factory.protocols:
return self.failHandshake("subprotocol selected by server (%s) not in subprotocol list requested by client (%s)" % (sp, str(self.factory.protocols)))
else:
## ok, subprotocol in use
##
self.websocket_protocol_in_use = sp
## For Hixie-76, we need 16 octets of HTTP request body to complete HS!
##
if self.version == 0:
if len(self.data) < end_of_header + 4 + 16:
return
else:
challenge_response = self.data[end_of_header + 4:end_of_header + 4 + 16]
if challenge_response != self.websocket_expected_challenge_response:
return self.failHandshake("invalid challenge response received from server (Hixie-76)")
## Ok, got complete HS input, remember rest (if any)
##
if self.version == 0:
self.data = self.data[end_of_header + 4 + 16:]
else:
self.data = self.data[end_of_header + 4:]
## opening handshake completed, move WebSockets connection into OPEN state
##
self.state = WebSocketProtocol.STATE_OPEN
self.inside_message = False
if self.version != 0:
self.current_frame = None
self.websocket_version = self.version
## we handle this symmetrical to server-side .. that is, give the
## client a chance to bail out .. i.e. on no subprotocol selected
## by server
try:
connectionResponse = ConnectionResponse(self.peer,
self.peerstr,
self.http_headers,
None, # FIXME
self.websocket_protocol_in_use,
self.websocket_extensions_in_use)
self.onConnect(connectionResponse)
except Exception, e:
## immediately close the WS connection
##
self.failConnection(1000, str(e))
else:
## fire handler on derived class
##
self.onOpen()
## process rest, if any
##
if len(self.data) > 0:
self.consumeData()
def failHandshake(self, reason):
"""
During opening handshake the server response is invalid and we drop the
connection.
"""
if self.debug:
log.msg("failing WebSockets opening handshake ('%s')" % reason)
self.dropConnection(abort = True)
class WebSocketClientFactory(protocol.ClientFactory, WebSocketFactory):
"""
A Twisted factory for WebSockets client protocols.
"""
protocol = WebSocketClientProtocol
"""
The protocol to be spoken. Must be derived from :class:`autobahn.websocket.WebSocketClientProtocol`.
"""
def __init__(self,
## WebSockect session parameters
url = None,
origin = None,
protocols = [],
useragent = "AutobahnPython/%s" % autobahn.version,
## debugging
debug = False,
debugCodePaths = False):
"""
Create instance of WebSocket client factory.
Note that you MUST set URL either here or using setSessionParameters() _before_ the factory is started.
:param url: WebSocket URL to connect to - ("ws:" | "wss:") "//" host [ ":" port ] path [ "?" query ].
:type url: str
:param origin: The origin to be sent in WebSockets opening handshake or None (default: None).
:type origin: str
:param protocols: List of subprotocols the client should announce in WebSockets opening handshake (default: []).
:type protocols: list of strings
:param useragent: User agent as announced in HTTP request header or None (default: "AutobahnWebSockets/x.x.x").
:type useragent: str
:param debug: Debug mode (default: False).
:type debug: bool
:param debugCodePaths: Debug code paths mode (default: False).
:type debugCodePaths: bool
"""
self.debug = debug
self.debugCodePaths = debugCodePaths
self.logOctets = debug
self.logFrames = debug
self.isServer = False
## seed RNG which is used for WS opening handshake key and WS frame masks generation
random.seed()
## default WS session parameters
##
self.setSessionParameters(url, origin, protocols, useragent)
## default WebSocket protocol options
##
self.resetProtocolOptions()
def setSessionParameters(self, url = None, origin = None, protocols = [], useragent = None):
"""
Set WebSocket session parameters.
:param url: WebSocket URL to connect to - ("ws:" | "wss:") "//" host [ ":" port ] path [ "?" query ].
:type url: str
:param origin: The origin to be sent in opening handshake.
:type origin: str
:param protocols: List of WebSocket subprotocols the client should announce in opening handshake.
:type protocols: list of strings
:param useragent: User agent as announced in HTTP request header during opening handshake.
:type useragent: str
"""
if url is not None:
## parse WebSocket URI into components
(isSecure, host, port, resource, path, params) = parseWsUrl(url)
self.url = url
self.isSecure = isSecure
self.host = host
self.port = port
self.resource = resource
self.path = path
self.params = params
else:
self.url = None
self.isSecure = None
self.host = None
self.port = None
self.resource = None
self.path = None
self.params = None
self.origin = origin
self.protocols = protocols
self.useragent = useragent
def resetProtocolOptions(self):
"""
Reset all WebSocket protocol options to defaults.
"""
self.version = WebSocketProtocol.DEFAULT_SPEC_VERSION
self.allowHixie76 = WebSocketProtocol.DEFAULT_ALLOW_HIXIE76
self.utf8validateIncoming = True
self.acceptMaskedServerFrames = False
self.maskClientFrames = True
self.applyMask = True
self.maxFramePayloadSize = 0
self.maxMessagePayloadSize = 0
self.autoFragmentSize = 0
self.failByDrop = True
self.echoCloseCodeReason = False
self.serverConnectionDropTimeout = 1
self.openHandshakeTimeout = 5
self.closeHandshakeTimeout = 1
self.tcpNoDelay = True
def setProtocolOptions(self,
version = None,
allowHixie76 = None,
utf8validateIncoming = None,
acceptMaskedServerFrames = None,
maskClientFrames = None,
applyMask = None,
maxFramePayloadSize = None,
maxMessagePayloadSize = None,
autoFragmentSize = None,
failByDrop = None,
echoCloseCodeReason = None,
serverConnectionDropTimeout = None,
openHandshakeTimeout = None,
closeHandshakeTimeout = None,
tcpNoDelay = None):
"""
Set WebSocket protocol options used as defaults for _new_ protocol instances.
:param version: The WebSockets protocol spec (draft) version to be used (default: WebSocketProtocol.DEFAULT_SPEC_VERSION).
:type version: int
:param allowHixie76: Allow to speak Hixie76 protocol version.
:type allowHixie76: bool
:param utf8validateIncoming: Validate incoming UTF-8 in text message payloads (default: True).
:type utf8validateIncoming: bool
:param acceptMaskedServerFrames: Accept masked server-to-client frames (default: False).
:type acceptMaskedServerFrames: bool
:param maskClientFrames: Mask client-to-server frames (default: True).
:type maskClientFrames: bool
:param applyMask: Actually apply mask to payload when mask it present. Applies for outgoing and incoming frames (default: True).
:type applyMask: bool
:param maxFramePayloadSize: Maximum frame payload size that will be accepted when receiving or 0 for unlimited (default: 0).
:type maxFramePayloadSize: int
:param maxMessagePayloadSize: Maximum message payload size (after reassembly of fragmented messages) that will be accepted when receiving or 0 for unlimited (default: 0).
:type maxMessagePayloadSize: int
:param autoFragmentSize: Automatic fragmentation of outgoing data messages (when using the message-based API) into frames with payload length <= this size or 0 for no auto-fragmentation (default: 0).
:type autoFragmentSize: int
:param failByDrop: Fail connections by dropping the TCP connection without performing closing handshake (default: True).
:type failbyDrop: bool
:param echoCloseCodeReason: Iff true, when receiving a close, echo back close code/reason. Otherwise reply with code == NORMAL, reason = "" (default: False).
:type echoCloseCodeReason: bool
:param serverConnectionDropTimeout: When the client expects the server to drop the TCP, timeout in seconds (default: 1).
:type serverConnectionDropTimeout: float
:param openHandshakeTimeout: Opening WebSocket handshake timeout, timeout in seconds or 0 to deactivate (default: 0).
:type openHandshakeTimeout: float
:param closeHandshakeTimeout: When we expect to receive a closing handshake reply, timeout in seconds (default: 1).
:type closeHandshakeTimeout: float
:param tcpNoDelay: TCP NODELAY ("Nagle") socket option (default: True).
:type tcpNoDelay: bool
"""
if allowHixie76 is not None and allowHixie76 != self.allowHixie76:
self.allowHixie76 = allowHixie76
if version is not None:
if version not in WebSocketProtocol.SUPPORTED_SPEC_VERSIONS:
raise Exception("invalid WebSockets draft version %s (allowed values: %s)" % (version, str(WebSocketProtocol.SUPPORTED_SPEC_VERSIONS)))
if version == 0 and not self.allowHixie76:
raise Exception("use of Hixie-76 requires allowHixie76 == True")
if version != self.version:
self.version = version
if utf8validateIncoming is not None and utf8validateIncoming != self.utf8validateIncoming:
self.utf8validateIncoming = utf8validateIncoming
if acceptMaskedServerFrames is not None and acceptMaskedServerFrames != self.acceptMaskedServerFrames:
self.acceptMaskedServerFrames = acceptMaskedServerFrames
if maskClientFrames is not None and maskClientFrames != self.maskClientFrames:
self.maskClientFrames = maskClientFrames
if applyMask is not None and applyMask != self.applyMask:
self.applyMask = applyMask
if maxFramePayloadSize is not None and maxFramePayloadSize != self.maxFramePayloadSize:
self.maxFramePayloadSize = maxFramePayloadSize
if maxMessagePayloadSize is not None and maxMessagePayloadSize != self.maxMessagePayloadSize:
self.maxMessagePayloadSize = maxMessagePayloadSize
if autoFragmentSize is not None and autoFragmentSize != self.autoFragmentSize:
self.autoFragmentSize = autoFragmentSize
if failByDrop is not None and failByDrop != self.failByDrop:
self.failByDrop = failByDrop
if echoCloseCodeReason is not None and echoCloseCodeReason != self.echoCloseCodeReason:
self.echoCloseCodeReason = echoCloseCodeReason
if serverConnectionDropTimeout is not None and serverConnectionDropTimeout != self.serverConnectionDropTimeout:
self.serverConnectionDropTimeout = serverConnectionDropTimeout
if openHandshakeTimeout is not None and openHandshakeTimeout != self.openHandshakeTimeout:
self.openHandshakeTimeout = openHandshakeTimeout
if closeHandshakeTimeout is not None and closeHandshakeTimeout != self.closeHandshakeTimeout:
self.closeHandshakeTimeout = closeHandshakeTimeout
if tcpNoDelay is not None and tcpNoDelay != self.tcpNoDelay:
self.tcpNoDelay = tcpNoDelay
def clientConnectionFailed(self, connector, reason):
"""
Called by Twisted when the connection to server has failed. Default implementation
does nothing. Override in derived class when appropriate.
"""
pass
def clientConnectionLost(self, connector, reason):
"""
Called by Twisted when the connection to server was lost. Default implementation
does nothing. Override in derived class when appropriate.
"""
pass
| [
"[email protected]"
] | |
09c4d503aed61e80d4d4a09cddc15e187782154b | 3cd1246ff58f26329021f2d13caa62221c91d5a4 | /testdata/python/binary/and_.py | 2e3721fee8e99a5e1844d9ed4aba6a89bdea90f4 | [] | no_license | mwkmwkmwk/unpyc | 0929e15fb37599496930299d7ced0bf1bedd7e99 | 000fdaec159050c94b7ecf6ab57be3950676f778 | refs/heads/master | 2020-12-01T14:01:57.592806 | 2016-03-21T14:11:43 | 2016-03-21T14:12:01 | 230,650,936 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10 | py | x = b & 2
| [
"[email protected]"
] | |
ccefb9800777cb3c74d7e87d7736daf68f3b4d36 | 0a3da7d8ee3453beec7798a62003014187b714b1 | /data/ai/99_others/nlp/text-classifier/text-classifier-rule-resume/cv-filter.py | 2b008e6920d75a57b8bf8f0c003819bffd2f9aba | [] | no_license | atfly/atlib | 635d1b0804af01c090a92048ed09758cb03820bf | 2c15fa47b2b915c1d998e89bbd69ff24dba52137 | refs/heads/master | 2021-05-16T12:51:50.157911 | 2017-11-16T06:50:33 | 2017-11-16T06:50:33 | 105,330,432 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,299 | py | #!/usr/bin/env python
# _*_ coding:utf-8 _*_
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
cv_basic_patterns = {"name": ["姓名"], "phone": ["手机", "电话"], "email": ["邮箱", "e-mail"],"age": ["年龄"], "address": ["通讯地址"],"location": ["居住地"], "hukou": ["户口"], "gender": ["性别", "男", "女"]}
cv_edu_patterns = {"university": ["毕业院校", "教育背景"], "major": ["专业"], "degree": ["学历", "大专", "专科", "硕士", "博士", "研究生"]}
cv_job_patterns = {"evaluation": ["个人描述", "自我评价", "个人情况", "兴趣"], "career": ["求职意向", "应聘职位", "求职类型", "职位"], "work": ["工作经历", "工作经验", "工作职责", "工作地点", "工作地区"], "project": ["项目经历", "项目"]}
cv_include_keys = {"cv": ["岗位职责", "任职要求", "任职资格", "能力要求", "基本要求", "职责描述", "岗位要求", "岗位描述", "岗位名称", "职位描述"]}
jd_include_keys = {"jd": ["求职意向", "求职状态", "教育背景", "教育经历"]}
def cvMatchFlow(content):
cv_basic_matches = {}
cv_edu_matches = {}
cv_job_matches = {}
cv_key_matches = {}
jd_key_matches = {}
for k,v in cv_basic_patterns.items():
cv_basic_matches[k]= [content.find(eachv) for eachv in v]
for k,v in cv_edu_patterns.items():
cv_edu_matches[k]= [content.find(eachv) for eachv in v]
for k,v in cv_job_patterns.items():
cv_job_matches[k]= [content.find(eachv) for eachv in v]
for k,v in cv_include_keys.items():
cv_key_matches[k]= [content.find(eachv) for eachv in v]
for k,v in cv_include_keys.items():
jd_key_matches[k]= [content.find(eachv) for eachv in v]
return cv_basic_matches,cv_edu_matches,cv_job_matches,cv_key_matches,jd_key_matches
def cvRecognition(content):
cv_basic_matches,cv_edu_matches,cv_job_matches,cv_key_matches,jd_key_matches=cvMatchFlow(content)
cv_basic_matches.items().
def isNotCV(content):
for key in jd_keys:
if key in content:
return True
return False
def isCV(content):
base_info_match = []
education_info_match = []
job_info_match = []
base_info_list = []
education_info_list = []
job_info_list = []
other_info_list = []
for k, v in cv_patterns.items():
if k == "base_info":
base_info_list = [content.find(eachv) for eachv in v]
elif k == "education_info":
education_info_list = [content.find(eachv) for eachv in v]
elif k == "job_info":
job_info_list = [content.find(eachv) for eachv in v]
else:
pass
base_info_match = [ v for v in base_info_list if v != -1]
education_info_match = [v for v in education_info_list if v != -1]
job_info_match = [v for v in job_info_list if v != -1]
print base_info_match
print job_info_match
print education_info_match
if len(base_info_match) > 0 and len(job_info_match) > 0:
if min(base_info_match) <= min(job_info_match) and min(base_info_match) < len(content)/2:
return True
if len(education_info_match) > 0 and min(education_info_match) < len(content)/2 and min(base_info_match) < min(education_info_match):
return True
for key in cv_include_keys:
if key in content:
return True
return False
if len(job_info_match) > 0 and len(education_info_match) > 0:
for key in cv_include_keys:
if key in content:
return True
if len(base_info_match) >= 2 and len(job_info_match) == 0 and len(education_info_match) > 0:
return True
return False
if __name__ == "__main__":
path = "Sample_2.xlsx"
descPos = 2
data = xlrd.open_workbook(path)
tableSample = data.sheets()[1]
nrows = tableSample.nrows
datav = []
for row in range(nrows):
if row != 0:
datav.append(tableSample.row_values(row)[descPos].lower())
f = open("sample_2_res.txt", "w")
for line in datav:
if nonCVCheck(line):
f.write("other\n")
continue
if isCVCheck(line):
f.write("cv\n")
else:
f.write("other\n")
f.close()
| [
"[email protected]"
] | |
d0d76f63c0475f78d1062683c3de2370e4c53de6 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_152/ch4_2020_09_11_17_51_58_429714.py | e3254130811b41145b6e3da216c09d9b3ffa527c | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | def classifica_idade(idade):
if idade >= 18:
return 'adulto'
elif idade >=12 and <= 17:
return 'adolescente'
else:
return 'crianca' | [
"[email protected]"
] | |
074fea474ada199da2341663a3361ad9c806139c | 1ff9adfdb9d559e6f81ed9470467bab25e93b5ab | /src/ta_lib/_vendor/tigerml/core/utils/stats.py | dc81eb055a7defec79beb1790a276d8acedfe3f6 | [] | no_license | Seemant-tiger/housing-price-prediction | a39dbefcb11bc460edeeee92e6becf77d35ff3a8 | be5d8cca769c7e267cfee1932eb82b70c2855bc1 | refs/heads/main | 2023-06-24T00:25:49.776720 | 2021-07-18T16:44:28 | 2021-07-18T16:44:28 | 387,222,852 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,242 | py | import itertools
import numpy as np
import pandas as pd
def woe_info_value(target_series, idv_series, target_counts=None):
"""Compute Information Value (IV) from WOE (weight of evidence).
Parameters
----------
target_series: pd.Series of target variable
idv_series: pd.Series of categorical variable
Returns
-------
information value of the categorical feature
"""
if target_counts is None:
target_counts = target_series.value_counts().reset_index()
target_counts.columns = ["target", "target_counts"]
df = pd.DataFrame({"target": target_series.values, "idv": idv_series.values})
col_target_counts = df.groupby(["idv", "target"]).size().reset_index()
col_target_counts.columns = ["idv", "target", "col_target_counts"]
# Handle Zero Event/ Non-Event
# AdjustedWOE = ln(((Number of non-events in a group + 0.5) /
# Number of non-events) /
# ((Number of events in a group + 0.5) / Number of events)))
# https://www.listendata.com/2015/03/weight-of-evidence-woe-and-information.html
exhaustive_combinations = list(
itertools.product(
col_target_counts.idv.unique(), col_target_counts.target.unique()
)
)
exhaustive_combinations_df = pd.DataFrame(
exhaustive_combinations, columns=["idv", "target"]
)
col_target_counts = pd.merge(
exhaustive_combinations_df, col_target_counts, how="outer", on=["idv", "target"]
)
col_target_counts["col_target_counts"].fillna(0.5, inplace=True)
col_target_counts = col_target_counts.merge(target_counts, on="target")
col_target_counts["col_target_per"] = (
col_target_counts["col_target_counts"] / col_target_counts["target_counts"]
)
col_target_per = col_target_counts.pivot_table(
index="idv", columns="target", values="col_target_per", fill_value=0
)
col_target_per.columns = ["NonEvent", "Event"]
col_target_per["WoE"] = np.log(col_target_per["NonEvent"] / col_target_per["Event"])
col_target_per["IV"] = (
col_target_per["NonEvent"] - col_target_per["Event"]
) * col_target_per["WoE"]
return col_target_per["IV"].sum()
def correlation_ratio(categories, measurements):
"""Compute correlation ratio η (eta).
Parameters
----------
categories: pd.Series of categorical variable
measurements: pd.Series of continuous variable
Returns
-------
correlation ratio: float
"""
fcat, _ = pd.factorize(categories)
cat_num = np.max(fcat) + 1
y_avg_array = np.zeros(cat_num)
n_array = np.zeros(cat_num)
for i in range(0, cat_num):
cat_measures = measurements[np.argwhere(fcat == i).flatten()]
n_array[i] = len(cat_measures)
y_avg_array[i] = np.average(cat_measures)
y_total_avg = np.sum(np.multiply(y_avg_array, n_array)) / np.sum(n_array)
numerator = np.sum(
np.multiply(n_array, np.power(np.subtract(y_avg_array, y_total_avg), 2))
)
denominator = np.sum(np.power(np.subtract(measurements, y_total_avg), 2))
if numerator == 0:
eta = 0.0
else:
eta = np.sqrt(numerator / denominator)
return eta
| [
"[email protected]"
] | |
2eb0e04cbf98eb39a6c920a04901fe777673aa45 | 31c77e4002bea7b8a3b09eef1c96b0925b8f3490 | /src/spaceone/inventory/connector/aws_ecr_connector/__init__.py | 52d970d20f7f7f9ead201c388d6dfede55d095f4 | [
"Apache-2.0"
] | permissive | Jeoungseungho/plugin-aws-cloud-services | 109e33440ac9ec0932bd71339849e13f15b4b0fa | 2a4df525599dcc77bd67c8b677b7cc93dc177439 | refs/heads/master | 2023-03-10T07:11:27.789611 | 2021-02-24T02:39:33 | 2021-02-24T02:39:33 | 330,849,261 | 0 | 0 | Apache-2.0 | 2021-02-04T08:15:02 | 2021-01-19T03:08:10 | Python | UTF-8 | Python | false | false | 82 | py | from spaceone.inventory.connector.aws_ecr_connector.connector import ECRConnector
| [
"[email protected]"
] | |
785823daf0c6021b57e5de394a425498b30194d3 | 4d332c45578246847ef2cdcdeb827ca29ab06090 | /modules/Bio/NeuralNetwork/BackPropagation/Network.py | 7c9b53f69f6bef0be388469fab4eaa6abe4a286f | [
"MIT"
] | permissive | prateekgupta3991/justforlearn | 616cc297a2a6119fa959b9337a5e91c77a11ebf7 | 3984c64063b356cf89003e17a914272983b6cf48 | refs/heads/master | 2021-03-12T22:09:12.184638 | 2014-01-28T10:37:07 | 2014-01-28T10:37:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 64 | py | /usr/share/pyshared/Bio/NeuralNetwork/BackPropagation/Network.py | [
"[email protected]"
] | |
02786c59d2cf9e90a7fb6700ed31bc99cfca740b | bebacae90aa17ad2ab4c9111a2e5cfa0f8cf13a6 | /Python-3/basic_examples/python_breakpoint_examples.py | 82779156f60abb7de9db164c20fc32e1fa246595 | [
"MIT"
] | permissive | ayanakshi/journaldev | 5b0d73c53bc9a5292a8629c6c0320196abeab76e | a61cba22232e8cc9c40264c31aaba0bd17ff2522 | refs/heads/master | 2020-03-27T21:52:15.081736 | 2018-08-31T11:51:28 | 2018-08-31T11:51:28 | 147,182,378 | 1 | 0 | MIT | 2018-09-03T09:28:38 | 2018-09-03T09:28:38 | null | UTF-8 | Python | false | false | 388 | py | x = 10
y = 'Hi'
z = 'Hello'
print(y)
# breakpoint() is introduced in Python 3.7
breakpoint()
print(z)
# Execution Steps
# Default:
# $python3.7 python_breakpoint_examples.py
# Disable Breakpoint:
# $PYTHONBREAKPOINT=0 python3.7 python_breakpoint_examples.py
# Using Other Debugger (for example web-pdb):
# $PYTHONBREAKPOINT=web_pdb.set_trace python3.7 python_breakpoint_examples.py
| [
"[email protected]"
] | |
7d51ad00952c385a402ae18745b0c2224c9cd731 | b8c65f30cd80f1ca3400ff88e02d6e92d83f4137 | /eve/exceptions.py | fc0361299e5eabe4201f5841e6de2014895c5ae2 | [
"BSD-3-Clause"
] | permissive | cnsoft/eve | e3429d5dade3cac2190ef0a56143e3a033107ea5 | f624855c0e83668aa2db4f9c482da38847c699f4 | refs/heads/master | 2021-01-09T06:49:11.827912 | 2013-07-25T08:13:35 | 2013-07-25T08:13:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | # -*- coding: utf-8 -*-
"""
eve.exceptions
~~~~~~~~~~~~~~
This module implements Eve custom exceptions.
:copyright: (c) 2012 by Nicola Iarocci.
:license: BSD, see LICENSE for more details.
"""
class ConfigException(Exception):
""" Raised when errors are found in the configuration settings (usually
`settings.py`).
"""
pass
class SchemaException(ConfigException):
""" Raised when errors are found in a field schema definition """
pass
| [
"[email protected]"
] | |
770f6161c2b5791f1460733e6599363406134e67 | 41a672c9505b5b53c58a01d5455acc410949aa24 | /tests/aoutgoing/acceptance/messaging/p2p/context/C_15457.py | d782e225bd120f95750cafe463d46b88d56df246 | [] | no_license | Alexsorgo/mobile_iOS | b045a0ea058726841c88158be8407b7ae45e893e | 7e298f890b408cedad9db9d0aefeccd9c10d6002 | refs/heads/master | 2022-12-12T17:26:14.039876 | 2020-03-18T06:34:56 | 2020-03-18T06:34:56 | 248,154,882 | 0 | 0 | null | 2021-06-02T01:13:05 | 2020-03-18T06:25:17 | Python | UTF-8 | Python | false | false | 1,322 | py | import pytest
from configs import config
from enums import context_enums
from screens.chats.chat_list_screen import ChatListScreen
from screens.chats.chat_screen import ChatScreen
from screens.chats.location_screen import LocationScreen
from controls.menu import Menu
from tests.aoutgoing.base_test import BaseTest
from utils.logs import log
from utils.verify import Verify
@pytest.mark.skip
class TestC15457(BaseTest):
"""
Check context menu items on location message in p2p chat
"""
FRIEND = config.AMERICA_FIRSTNAME + ' ' + config.AMERICA_LASTNAME
def test_c15457(self):
log.info("Check context menu items on location message in p2p chat")
menu = Menu(self.driver)
chat = ChatScreen(self.driver)
chat_list = ChatListScreen(self.driver)
location = LocationScreen(self.driver)
menu.go_to(menu.wenums.CHATS, [menu.wenums.ALL])
chat_list.tap_user(self.FRIEND)
menu.go_to(menu.wenums.ACTIONS, [menu.wenums.LOCATION, menu.wenums.SEND_LOCATION], menu.wenums.CHATS)
location.tap_send_location()
chat.open_context_menu_last_bubble()
log.info("Verify context menu items")
Verify.equals(context_enums.LOCATION_CONTEXT_MENU_ITEMS, chat.get_context_options(),
"Wrong context menu items")
| [
"[email protected]"
] | |
d106059d970e58822acb60ca0f9d2965aa8d056b | 8eb5008ad4ab98f72666c54e3f83a8b17ac096f6 | /UIPackage/LoginMainWindow.py | 24f8c8e4d9b92506b4e5f5c80ca2d2e029873bb5 | [] | no_license | caojiaju-2017/HSShareKowledge | ff60d28a894807462de9402a2fdd28a1723c0ddf | e511439444959c518a4962ea4b6590f7c28bd112 | refs/heads/master | 2020-03-30T08:45:40.620897 | 2018-11-11T12:34:04 | 2018-11-11T12:34:04 | 151,037,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,612 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.Qt import *
from ByPlatform.Base.OutPutHelper import *
import sys
from UIDesigner.LoginUI import Ui_MainWindow
from tinydb import TinyDB, Query
from ByPlatform.Base.TimeHelper import *
# from UIPackage.WaitWindow import WaitWindow
from UIPackage.KnowlegeForm import KnowlegeForm
class LoginMainWindow(QMainWindow,Ui_MainWindow):
"""docstring for myDialog"""
def __init__(self, arg=None):
super(LoginMainWindow, self).__init__(arg)
self.setupUi(self)
self.setWindowIcon(QIcon(r'Res\logo.png'))
self.setWindowTitle("超级智慧终端")
self.setMinimumWidth(1000)
self.setMinimumHeight(580)
# 设置登录主界面背景
window_pale = QPalette()
window_pale.setBrush(self.backgroundRole(),QBrush(QPixmap(r"Res\loginback.jpg")))
self.setPalette(window_pale)
self.setWindowFlags(Qt.CustomizeWindowHint)
self.setWindowFlags(Qt.FramelessWindowHint)
self.setStyleSheet("venus--TitleBar {border-radius:10px;}")
# 窗口移动
self.m_flag = False
self.m_Position = None
# 初始化标题
self.initTitle()
# 初始化按钮
self.initInputButton()
# 加载账户信息
self.accountSet = None
self.loadAccount()
self.mainWindow = None
def loadAccount(self):
configQuery = Query()
db = TinyDB('config.json')
table = db.table('config')
result = table.all()
if len(result) <= 0:
pass
else:
self.accountSet = result[0]
def mousePressEvent(self, event):
if event.button() == Qt.LeftButton:
self.m_flag = True
self.m_Position = event.globalPos() - self.pos() # 获取鼠标相对窗口的位置
event.accept()
self.setCursor(QCursor(Qt.OpenHandCursor)) # 更改鼠标图标
def mouseMoveEvent(self, QMouseEvent):
if Qt.LeftButton and self.m_flag:
self.move(QMouseEvent.globalPos() - self.m_Position) # 更改窗口位置
QMouseEvent.accept()
def mouseReleaseEvent(self, QMouseEvent):
self.m_flag = False
self.setCursor(QCursor(Qt.ArrowCursor))
def initTitle(self):
# 调用QtGui.QPixmap方法,打开一个图片,存放在变量png中
png = QPixmap('Res\wordtitle.png')
# 在l1里面,调用setPixmap命令,建立一个图像存放框,并将之前的图像png存放在这个框框里。
self.loginTitle.setPixmap(png)
labWd = self.width() * 0.28
labHd = int(labWd *60/340.0)
self.loginTitle.setMinimumWidth(labWd)
self.loginTitle.setMinimumHeight(labHd)
startX = (self.width() - labWd) / 2
startY = int(self.height()*0.3)
self.loginTitle.setGeometry(startX,startY,labWd,labHd)
self.loginTitle.setScaledContents(True) # 让图片自适应label大小
self.pbLogin.clicked.connect(self.LoginSystem)
pass
def LoginSystem(self):
'''
登陆指令
:return:
'''
self.waitDlg = WaitWindow()
# // 屏幕居中显示
frmX = self.waitDlg.width()
frmY = self.waitDlg.height()
deskWidth = QDesktopWidget().width()
deskHeight = QDesktopWidget().height()
movePoint = QPoint(deskWidth / 2 - frmX / 2, deskHeight / 2 - frmY / 2)
# movePoint = QPoint(0,0)
self.waitDlg.move(movePoint)
# self.waitDlg.setModal(True)
# self.waitDlg.createLabel()
self.waitDlg.update()
self.waitDlg.exec_()
OutPutHelper.consolePrint("loginsystem")
userName = self.userName.text()
userPassword = self.userPassword.text()
MESSAGE = "账号或密码错误"
if self.accountSet:
if userName == self.accountSet["account"] and userPassword == self.accountSet["password"]:
pass
else:
reply = QMessageBox.information(self, "信息", MESSAGE)
if reply == QMessageBox.Ok:
pass
else:
pass
return
elif userName == "root" and userPassword == "123456":
self.accountSet = {"account": userName, "password": userPassword,
"logintime": TimeHelper.getCurrentTime()}
else:
reply = QMessageBox.information(self, "信息", MESSAGE)
if reply == QMessageBox.Ok:
pass
else:
pass
return
db = TinyDB('config.json')
table = db.table('config')
table.purge()
table.insert(self.accountSet)
self.mainWindow = SuperSmartWindow()
self.mainWindow.show()
self.hide()
def initInputButton(self):
palette = self.palette()
# palette.setColor(palette.Window, QColor(210, 210, 210))
#
# self.loginpannel.setAutoFillBackground(True)
# self.loginpannel.setPalette(palette)
self.loginpannel.setWindowOpacity(0.6)
# setWindowOpacity
self.loginpannel.setStyleSheet("#loginpannel{border:0px groove gray;border-radius:10px;padding:2px 4px;background-color: #ffffff;color: #000000;}")
self.pbLogin.setStyleSheet(
"#pbLogin{border-radius:6px; background:rgba(65, 168, 200,0.8); color:white;}" + "#pbLogin:hover{background:rgb(255,128,64);}")
self.pbLogin.setCursor(Qt.PointingHandCursor)
self.userName.setStyleSheet(
"#userName{border:2px groove gray;border-radius:4px; background:rgba(255, 255, 255,1); color:black;}" + "#userName:hover{background:rgb(255, 255, 255);}")
self.userPassword.setStyleSheet(
"#userPassword{border:2px groove gray;border-radius:4px; background:rgba(255, 255, 255,1); color:black;}" + "#userPassword:hover{background:rgb(255, 255, 255);}")
panelWd = self.loginpannel.width()
panelHd = self.loginpannel.height()
startX = (self.width() - panelWd) / 2
startY = (self.height() - panelHd)*3.0 / 5
self.loginpannel.setGeometry(startX, startY, panelWd, panelHd)
self.userName.setText("root")
self.userPassword.setEchoMode(QLineEdit.Password)
self.userPassword.setText("123456")
def keyPressEvent(self, event):
if event.key() == Qt.Key_Return:
self.pbLogin.click() | [
"[email protected]"
] | |
ba9a9f6ba60a81293d7b7eaa9ce97f4b6b1d919b | e02506da0c661c8241fed00efdd0d6b2f8b147df | /textattack/constraints/overlap/levenshtein_edit_distance.py | 8e7863c9daeb4621e5bbc89f9b080d68255b60b1 | [
"MIT"
] | permissive | SatoshiRobatoFujimoto/TextAttack | 2592a828f128fd8bf0b8ce5578e9488df5b2ac97 | a809a9bddddff9f41750949e26edde26c8af6cfa | refs/heads/master | 2022-07-11T02:10:24.536157 | 2020-05-14T13:29:44 | 2020-05-14T13:29:44 | 263,941,825 | 1 | 0 | MIT | 2020-05-14T14:43:47 | 2020-05-14T14:43:46 | null | UTF-8 | Python | false | false | 719 | py | import editdistance
from textattack.constraints import Constraint
class LevenshteinEditDistance(Constraint):
""" A constraint on edit distance (Levenshtein Distance).
"""
def __init__(self, max_edit_distance):
if not isinstance(max_edit_distance, int):
raise TypeError('max_edit_distance must be an int')
self.max_edit_distance = max_edit_distance
def __call__(self, x, x_adv, original_text=None):
if not original_text:
return True
edit_distance = editdistance.eval(original_text.text, x_adv.text)
return edit_distance <= self.max_edit_distance
def extra_repr_keys(self):
return ['max_edit_distance']
| [
"[email protected]"
] | |
5c1948278e75e3ff8b2fba43c5b2c56bff5ce1f9 | 0793a634ce31b3c2370ba0f945993ee80bf1a8b4 | /mirage/thesisPresentation.py | 96edde9b9e908147fbf31dcb8cb3e0476b311c96 | [
"MIT"
] | permissive | ruizmic0100/Mirage | a5079793124f54052257af1d084b0390e421c43f | 33ad0d07322953ac6fc5c26b4f6fe7d17e4784dd | refs/heads/master | 2023-07-30T10:52:57.956239 | 2021-09-12T04:15:56 | 2021-09-12T04:15:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,498 | py | import numpy as np
from mirage import lens_analysis as la
def overlay_sizes(result,num):
from matplotlib import pyplot as plt
fig = plt.figure()
for res in result:
x,y = res.lightcurves[num].plottable("uas")
label_sz = "%.3f $theta_E$" % res.parameters.quasar.radius.to(res.parameters.theta_E).value
plt.plot(x,y,label=label_sz)
return fig
def export_vid(infile,outfile):
from imageio import get_writer
from matplotlib import cm
from matplotlib.colors import Normalize
norm = Normalize(vmin=-4,vmax=4)
writer = get_writer(outfile,"mp4",fps=10)
cmap = cm.BuPu_r
data = la.load(infile)
for i in range(data[0].simulation.num_trials):
mm = data[i].magmap.data
normald = cmap(norm(mm))
normald = (normald*255).astype(np.uint8)
writer.append_data(normald)
writer.close()
print("Video exported to %s" % outfile)
#Things I want to show:
#Start a simulation and let it run while I talk about background.
#Show magnification maps, and how they vary as a function of quasar size.
#Show lightcurves, and how variable they are.
#Show how caustic events shift as a function of quasar size.
#Hilight the peaks with a + to show peaks more clearly and how they shift.
#Possible question of interest - how far apart are doublets, typically? Can we constrain
#The speed of the quasar because of that?
#Give an example of a fold and a cusp, and analyze the differences.
| [
"[email protected]"
] | |
c380835831f8c41526e64814112f5f23d2c0673b | ae10b60cb92a69146bfb05ef5dde735a0aa45d4b | /examples/Extended Application/sklearn/examples/calibration/plot_compare_calibration.py | 2740807f47b521cb56cd402223342f85c841602d | [
"MIT"
] | permissive | kantel/nodebox-pyobjc | 471cea4c5d7f1c239c490323186458a74edcc214 | 068ba64c87d607522a240ab60c3ba14f869f6222 | refs/heads/master | 2021-08-14T18:32:57.995445 | 2017-11-16T13:42:23 | 2017-11-16T13:42:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,821 | py | """
========================================
Comparison of Calibration of Classifiers
========================================
Well calibrated classifiers are probabilistic classifiers for which the output
of the predict_proba method can be directly interpreted as a confidence level.
For instance a well calibrated (binary) classifier should classify the samples
such that among the samples to which it gave a predict_proba value close to
0.8, approx. 80% actually belong to the positive class.
LogisticRegression returns well calibrated predictions as it directly
optimizes log-loss. In contrast, the other methods return biased probabilities,
with different biases per method:
* GaussianNaiveBayes tends to push probabilities to 0 or 1 (note the counts in
the histograms). This is mainly because it makes the assumption that features
are conditionally independent given the class, which is not the case in this
dataset which contains 2 redundant features.
* RandomForestClassifier shows the opposite behavior: the histograms show
peaks at approx. 0.2 and 0.9 probability, while probabilities close to 0 or 1
are very rare. An explanation for this is given by Niculescu-Mizil and Caruana
[1]: "Methods such as bagging and random forests that average predictions from
a base set of models can have difficulty making predictions near 0 and 1
because variance in the underlying base models will bias predictions that
should be near zero or one away from these values. Because predictions are
restricted to the interval [0,1], errors caused by variance tend to be one-
sided near zero and one. For example, if a model should predict p = 0 for a
case, the only way bagging can achieve this is if all bagged trees predict
zero. If we add noise to the trees that bagging is averaging over, this noise
will cause some trees to predict values larger than 0 for this case, thus
moving the average prediction of the bagged ensemble away from 0. We observe
this effect most strongly with random forests because the base-level trees
trained with random forests have relatively high variance due to feature
subsetting." As a result, the calibration curve shows a characteristic
sigmoid shape, indicating that the classifier could trust its "intuition"
more and return probabilities closer to 0 or 1 typically.
* Support Vector Classification (SVC) shows an even more sigmoid curve as
the RandomForestClassifier, which is typical for maximum-margin methods
(compare Niculescu-Mizil and Caruana [1]), which focus on hard samples
that are close to the decision boundary (the support vectors).
.. topic:: References:
.. [1] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
print(__doc__)
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
np.random.seed(0)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import calibration_curve
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=2)
train_samples = 100 # Samples used for training the models
X_train = X[:train_samples]
X_test = X[train_samples:]
y_train = y[:train_samples]
y_test = y[train_samples:]
# Create classifiers
lr = LogisticRegression()
gnb = GaussianNB()
svc = LinearSVC(C=1.0)
rfc = RandomForestClassifier(n_estimators=100)
# #############################################################################
# Plot calibration plots
plt.figure(figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(gnb, 'Naive Bayes'),
(svc, 'Support Vector Classification'),
(rfc, 'Random Forest')]:
clf.fit(X_train, y_train)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s" % (name, ))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
# plt.show()
pltshow(plt)
| [
"[email protected]"
] | |
e0cc017891024f3f9aaf43b29c5b45d31bb2ad5c | 0b193f4da7547d95b7c50fbc1b81276da8163372 | /actions/models.py | 9f242b017204d32441280c09d1083d6557d8d2a2 | [] | no_license | jzxyouok/bookmarks | 4b071023af57a2b87fb4fcb034affd5a16719e85 | c1bf5ce731f20c8771f6ff5038839c938a2562d8 | refs/heads/master | 2020-06-06T15:22:37.096495 | 2019-04-08T03:51:17 | 2019-04-08T03:51:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 740 | py | from django.db import models
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
# Create your models here.
class Action(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, models.CASCADE, 'actions', db_index=True)
verb = models.CharField(max_length=255)
created = models.DateTimeField(auto_now_add=True, db_index=True)
target_ct = models.ForeignKey(ContentType, models.CASCADE, 'target_obj', blank=True, null=True)
target_id = models.PositiveIntegerField(blank=True, null=True, db_index=True)
target = GenericForeignKey('target_ct', 'target_id')
class Meta:
ordering = ('-created',) | [
"[email protected]"
] | |
c7f7e4aa1efc2446b503bd9c8ed99a72da2ce9c9 | d2b53b3568890dd805575035d09635c422c6bc4d | /rllib/execution/rollout_ops.py | c199e15b9b41dca7c3828a3af4f362e9c6b5d70b | [
"Apache-2.0",
"MIT"
] | permissive | mehrdadn/ray | 939deda7099eb30371cbb920a9725b314c58c0b5 | 3506910c5da257215d38d02f424acc4f419ddbaf | refs/heads/master | 2020-09-03T15:33:35.578248 | 2020-07-31T21:33:27 | 2020-07-31T21:33:27 | 219,498,150 | 2 | 1 | Apache-2.0 | 2019-11-04T12:37:23 | 2019-11-04T12:37:22 | null | UTF-8 | Python | false | false | 8,816 | py | import logging
from typing import List, Tuple
import time
from ray.util.iter import from_actors, LocalIterator
from ray.util.iter_metrics import SharedMetrics
from ray.rllib.evaluation.metrics import get_learner_stats
from ray.rllib.evaluation.rollout_worker import get_global_worker
from ray.rllib.evaluation.worker_set import WorkerSet
from ray.rllib.execution.common import STEPS_SAMPLED_COUNTER, LEARNER_INFO, \
SAMPLE_TIMER, GRAD_WAIT_TIMER, _check_sample_batch_type, \
_get_shared_metrics
from ray.rllib.policy.sample_batch import SampleBatch, DEFAULT_POLICY_ID, \
MultiAgentBatch
from ray.rllib.utils.sgd import standardized
from ray.rllib.utils.types import PolicyID, SampleBatchType, ModelGradients
logger = logging.getLogger(__name__)
def ParallelRollouts(workers: WorkerSet, *, mode="bulk_sync",
num_async=1) -> LocalIterator[SampleBatch]:
"""Operator to collect experiences in parallel from rollout workers.
If there are no remote workers, experiences will be collected serially from
the local worker instance instead.
Arguments:
workers (WorkerSet): set of rollout workers to use.
mode (str): One of {'async', 'bulk_sync', 'raw'}.
- In 'async' mode, batches are returned as soon as they are
computed by rollout workers with no order guarantees.
- In 'bulk_sync' mode, we collect one batch from each worker
and concatenate them together into a large batch to return.
- In 'raw' mode, the ParallelIterator object is returned directly
and the caller is responsible for implementing gather and
updating the timesteps counter.
num_async (int): In async mode, the max number of async
requests in flight per actor.
Returns:
A local iterator over experiences collected in parallel.
Examples:
>>> rollouts = ParallelRollouts(workers, mode="async")
>>> batch = next(rollouts)
>>> print(batch.count)
50 # config.rollout_fragment_length
>>> rollouts = ParallelRollouts(workers, mode="bulk_sync")
>>> batch = next(rollouts)
>>> print(batch.count)
200 # config.rollout_fragment_length * config.num_workers
Updates the STEPS_SAMPLED_COUNTER counter in the local iterator context.
"""
# Ensure workers are initially in sync.
workers.sync_weights()
def report_timesteps(batch):
metrics = _get_shared_metrics()
metrics.counters[STEPS_SAMPLED_COUNTER] += batch.count
return batch
if not workers.remote_workers():
# Handle the serial sampling case.
def sampler(_):
while True:
yield workers.local_worker().sample()
return (LocalIterator(sampler, SharedMetrics())
.for_each(report_timesteps))
# Create a parallel iterator over generated experiences.
rollouts = from_actors(workers.remote_workers())
if mode == "bulk_sync":
return rollouts \
.batch_across_shards() \
.for_each(lambda batches: SampleBatch.concat_samples(batches)) \
.for_each(report_timesteps)
elif mode == "async":
return rollouts.gather_async(
num_async=num_async).for_each(report_timesteps)
elif mode == "raw":
return rollouts
else:
raise ValueError("mode must be one of 'bulk_sync', 'async', 'raw', "
"got '{}'".format(mode))
def AsyncGradients(
workers: WorkerSet) -> LocalIterator[Tuple[ModelGradients, int]]:
"""Operator to compute gradients in parallel from rollout workers.
Arguments:
workers (WorkerSet): set of rollout workers to use.
Returns:
A local iterator over policy gradients computed on rollout workers.
Examples:
>>> grads_op = AsyncGradients(workers)
>>> print(next(grads_op))
{"var_0": ..., ...}, 50 # grads, batch count
Updates the STEPS_SAMPLED_COUNTER counter and LEARNER_INFO field in the
local iterator context.
"""
# Ensure workers are initially in sync.
workers.sync_weights()
# This function will be applied remotely on the workers.
def samples_to_grads(samples):
return get_global_worker().compute_gradients(samples), samples.count
# Record learner metrics and pass through (grads, count).
class record_metrics:
def _on_fetch_start(self):
self.fetch_start_time = time.perf_counter()
def __call__(self, item):
(grads, info), count = item
metrics = _get_shared_metrics()
metrics.counters[STEPS_SAMPLED_COUNTER] += count
metrics.info[LEARNER_INFO] = get_learner_stats(info)
metrics.timers[GRAD_WAIT_TIMER].push(time.perf_counter() -
self.fetch_start_time)
return grads, count
rollouts = from_actors(workers.remote_workers())
grads = rollouts.for_each(samples_to_grads)
return grads.gather_async().for_each(record_metrics())
class ConcatBatches:
"""Callable used to merge batches into larger batches for training.
This should be used with the .combine() operator.
Examples:
>>> rollouts = ParallelRollouts(...)
>>> rollouts = rollouts.combine(ConcatBatches(min_batch_size=10000))
>>> print(next(rollouts).count)
10000
"""
def __init__(self, min_batch_size: int):
self.min_batch_size = min_batch_size
self.buffer = []
self.count = 0
self.batch_start_time = None
def _on_fetch_start(self):
if self.batch_start_time is None:
self.batch_start_time = time.perf_counter()
def __call__(self, batch: SampleBatchType) -> List[SampleBatchType]:
_check_sample_batch_type(batch)
self.buffer.append(batch)
self.count += batch.count
if self.count >= self.min_batch_size:
if self.count > self.min_batch_size * 2:
logger.info("Collected more training samples than expected "
"(actual={}, expected={}). ".format(
self.count, self.min_batch_size) +
"This may be because you have many workers or "
"long episodes in 'complete_episodes' batch mode.")
out = SampleBatch.concat_samples(self.buffer)
timer = _get_shared_metrics().timers[SAMPLE_TIMER]
timer.push(time.perf_counter() - self.batch_start_time)
timer.push_units_processed(self.count)
self.batch_start_time = None
self.buffer = []
self.count = 0
return [out]
return []
class SelectExperiences:
"""Callable used to select experiences from a MultiAgentBatch.
This should be used with the .for_each() operator.
Examples:
>>> rollouts = ParallelRollouts(...)
>>> rollouts = rollouts.for_each(SelectExperiences(["pol1", "pol2"]))
>>> print(next(rollouts).policy_batches.keys())
{"pol1", "pol2"}
"""
def __init__(self, policy_ids: List[PolicyID]):
assert isinstance(policy_ids, list), policy_ids
self.policy_ids = policy_ids
def __call__(self, samples: SampleBatchType) -> SampleBatchType:
_check_sample_batch_type(samples)
if isinstance(samples, MultiAgentBatch):
samples = MultiAgentBatch({
k: v
for k, v in samples.policy_batches.items()
if k in self.policy_ids
}, samples.count)
return samples
class StandardizeFields:
"""Callable used to standardize fields of batches.
This should be used with the .for_each() operator. Note that the input
may be mutated by this operator for efficiency.
Examples:
>>> rollouts = ParallelRollouts(...)
>>> rollouts = rollouts.for_each(StandardizeFields(["advantages"]))
>>> print(np.std(next(rollouts)["advantages"]))
1.0
"""
def __init__(self, fields: List[str]):
self.fields = fields
def __call__(self, samples: SampleBatchType) -> SampleBatchType:
_check_sample_batch_type(samples)
wrapped = False
if isinstance(samples, SampleBatch):
samples = MultiAgentBatch({
DEFAULT_POLICY_ID: samples
}, samples.count)
wrapped = True
for policy_id in samples.policy_batches:
batch = samples.policy_batches[policy_id]
for field in self.fields:
batch[field] = standardized(batch[field])
if wrapped:
samples = samples.policy_batches[DEFAULT_POLICY_ID]
return samples
| [
"[email protected]"
] | |
585d3ceb6d4b6de1c0a332107e0e397f3be944fa | f30783d6087509ba8afaf60f66fc2d824ae5b511 | /backend/mastersoftastemobile_1636/urls.py | 0dca63503fb39a6b80820ec4f500fc0e7e8b95df | [] | no_license | crowdbotics-apps/mastersoftastemobile-1636 | fd75f2ad60e4ebf46789d91675e5ebfb360c44fd | 93c241332ad323246c73b5115f1c3bf8e8e9ddbb | refs/heads/master | 2022-04-15T07:15:30.033047 | 2020-02-10T19:03:28 | 2020-02-10T19:03:28 | 239,589,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,000 | py | """mastersoftastemobile_1636 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "MastersOfTasteMobileApp"
admin.site.site_title = "MastersOfTasteMobileApp Admin Portal"
admin.site.index_title = "MastersOfTasteMobileApp Admin"
# swagger
schema_view = get_schema_view(
openapi.Info(
title="MastersOfTasteMobileApp API",
default_version="v1",
description="API documentation for MastersOfTasteMobileApp App",
),
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.